def main(): # Parse input arguments and get sanitised version sargs = mm_parse_inputs.SanitisedArgs(mm_parse_inputs.parse_input_arguments()) # Enable calling NiftyReg and NiftySeg nk = mm_niftk.MM_Niftk() mmfn.check_create_directories([sargs.output_directory]) # Set default parameters if sargs.number is not None: iterations = sargs.number else: iterations = 200 fwhm = 0.15 # default 0.15 subsampleFactor = 4 # default 4; recommend 2 for in vivo mouse brains with resolution >100 micrometres. nlevels = 4 # default 4 conv = 0.001 # default 0.001 nhistbins = 256 # default 200 # Get list of files (use file_name_filter) nifti_files_list = sorted(glob.glob(os.path.join(sargs.input_directory, sargs.file_name_filter + '.nii*'))) print(" Processing {0} files: \n - - -\n \t{1}\n - - -\n ...".format(len(nifti_files_list), '\n\t'.join([ str(item) for item in nifti_files_list ]))) TASKS = [(nk, sargs, nf_path, iterations, fwhm, subsampleFactor, nlevels, conv, nhistbins) for nf_path in nifti_files_list] # Set the number of parallel processes to use pool = multiprocessing.Pool(np.int(multiprocessing.cpu_count() / 2)) # The _star function will unpack TASKS to use in the actual function # Using pool.map because we do care about the order of the results. all_output_paths = pool.map(nuc_n4_debias_star, TASKS) pool.close() pool.join() all_output_paths = [] for counter, nf_path in enumerate(nifti_files_list): outpath = nuc_n4_debias(nk, sargs, nf_path, iterations, fwhm, subsampleFactor, nlevels, conv, nhistbins) all_output_paths.append(outpath) print(" all_output_paths: {0}".format(all_output_paths)) print(" Copying headers from original images to bias-corrected images ...") for counter, nifti_file in enumerate(nifti_files_list): print " Processing {0} / {1}: {2} ...".format((counter + 1), len(nifti_files_list), nifti_file) original_nf_name = os.path.basename(nifti_file).split(os.extsep)[0] original_nifti = nib.load(nifti_file) bias_corrected_path = mmfn.get_corresponding_file(sargs.output_directory, original_nf_name, path_only=True) print("Bias corrected result path: '{0}'".format(bias_corrected_path)) updated_nii = mmfn.copy_header(nib.load(bias_corrected_path), original_nifti) # This will overwrite the bias corrected files nib.save(updated_nii, bias_corrected_path) print(" Bias field correction completed; files saved to: \n{0}".format('\n\t'.join([ str(item) for item in all_output_paths ])))
def main(): # Parse input arguments and get sanitised version sargs = mm_parse_inputs.SanitisedArgs( mm_parse_inputs.parse_input_arguments()) # Enable calling NiftyReg and NiftySeg nk = mm_niftk.MM_Niftk() mmfn.check_create_directories([sargs.output_directory]) # Set default parameters if sargs.number is not None: iterations = sargs.number else: iterations = 200 fwhm = 0.15 # default 0.15 subsampleFactor = 4 # default 4; recommend 2 for in vivo mouse brains with resolution >100 micrometres. nlevels = 4 # default 4 conv = 0.001 # default 0.001 nhistbins = 256 # default 200 # Get list of files (use file_name_filter) nifti_files_list = sorted( glob.glob( os.path.join(sargs.input_directory, sargs.file_name_filter + '.nii*'))) print( " Processing {0} files: \n - - -\n \t{1}\n - - -\n ...".format( len(nifti_files_list), '\n\t'.join([str(item) for item in nifti_files_list]))) TASKS = [(nk, sargs, nf_path, iterations, fwhm, subsampleFactor, nlevels, conv, nhistbins) for nf_path in nifti_files_list] # Set the number of parallel processes to use pool = multiprocessing.Pool(np.int(multiprocessing.cpu_count() / 2)) # The _star function will unpack TASKS to use in the actual function # Using pool.map because we do care about the order of the results. all_output_paths = pool.map(nuc_n4_debias_star, TASKS) pool.close() pool.join() all_output_paths = [] for counter, nf_path in enumerate(nifti_files_list): outpath = nuc_n4_debias(nk, sargs, nf_path, iterations, fwhm, subsampleFactor, nlevels, conv, nhistbins) all_output_paths.append(outpath) print(" all_output_paths: {0}".format(all_output_paths)) print( " Copying headers from original images to bias-corrected images ...") for counter, nifti_file in enumerate(nifti_files_list): print " Processing {0} / {1}: {2} ...".format((counter + 1), len(nifti_files_list), nifti_file) original_nf_name = os.path.basename(nifti_file).split(os.extsep)[0] original_nifti = nib.load(nifti_file) bias_corrected_path = mmfn.get_corresponding_file( sargs.output_directory, original_nf_name, path_only=True) print("Bias corrected result path: '{0}'".format(bias_corrected_path)) updated_nii = mmfn.copy_header(nib.load(bias_corrected_path), original_nifti) # This will overwrite the bias corrected files nib.save(updated_nii, bias_corrected_path) print(" Bias field correction completed; files saved to: \n{0}".format( '\n\t'.join([str(item) for item in all_output_paths])))
def sanitise_arguments(self, args): """ """ args.in_ext_filter = '.nii*' if not args.unzip: args.ext = '.nii.gz' else: args.ext = '.nii' if args.input: if os.path.isdir(args.input): # print (" Input {0} is a directory ...".format(args.input)) args.input_name_filter = '*' + args.in_filter + '*' # use wildcards if provided a directory alone args.input_name_filter_exact = args.in_filter args.input_directory = os.path.normpath(os.path.join(args.input)) elif os.path.isfile(args.input): # print (" Input {0} is a file ...".format(args.input)) # Get the filename, removing path and 1+ extensions args.input_name_filter = os.path.basename(args.input).split(os.extsep)[0] args.input_directory = os.path.dirname(args.input) else: raise Exception("Input not recognised or does not exist: {0}".format(args.input)) else: args.input_directory = os.getcwd() if not args.no_output: if args.output: if os.path.isdir(args.output): # print (" Output {0} is a directory ...".format(args.input)) args.output_directory = os.path.normpath(os.path.join(args.output)) else: print "Specified output ({0}) is not a directory; creating it ...".format(args.output) args.output_directory = os.path.normpath(os.path.join(args.output)) mmfn.check_create_directories([args.output_directory]) else: print "No output directory specified. Setting to input directory ({0}) in case it is required.".format(args.input_directory) args.output_directory = args.input_directory if hasattr(args, 'input_2'): if args.input_2: if os.path.isdir(args.input_2): # print (" Input 2 {0} is a directory ...".format(args.input)) args.input_name_filter_2 = '*' + args.in_filter + '*' # use wildcards if provided a directory alone args.input_name_filter_exact_2 = args.in_filter args.input_directory_2 = os.path.normpath(os.path.join(args.input_2)) elif os.path.isfile(args.input_2): # print (" Input 2 {0} is a file ...".format(args.input)) # Get the filename, removing path and 1+ extensions args.input_name_filter_2 = os.path.basename(args.input_2).split(os.extsep)[0] args.input_directory_2 = os.path.dirname(args.input_2) else: raise Exception("Input 2 not recognised or does not exist: {0}".format(args.input_2)) else: args.input_directory_2 = os.getcwd() if hasattr(args, 'mask'): if args.mask: args.mask = os.path.normpath(args.mask) if os.path.isdir(args.mask): args.mask_name_filter = '*' + args.mn_filter + '*' # use wildcards if provided a directory alone args.mask_name_filter_exact = args.mn_filter args.mask_directory = os.path.normpath(os.path.join(args.mask)) elif os.path.isfile(args.mask): # Get the filename, removing path and 1+ extensions args.mask_name_filter = os.path.basename(args.mask).split(os.extsep)[0] args.mask_directory = os.path.dirname(args.mask) else: args.mask_directory = None if hasattr(args, 'tpm'): if args.tpm: args.tpm = os.path.normpath(args.tpm) if os.path.isdir(args.tpm): args.tpm_name_filter = '*' + args.tn_filter + '*' # use wildcards if provided a directory alone args.tpm_name_filter_exact = args.tn_filter args.tpm_directory = os.path.normpath(os.path.join(args.tpm)) elif os.path.isfile(args.tpm): # Get the filename, removing path and 1+ extensions args.tpm_name_filter = os.path.basename(args.tpm).split(os.extsep)[0] args.tpm_directory = os.path.dirname(args.tpm) else: args.tpm_directory = None # Either get a list of strings as file names from a directory, or from a given column of a .CSV file if hasattr(args, 'list'): if args.list: args.list = os.path.normpath(args.list) if os.path.isdir(os.path.normpath(args.list)): args.list_names = mmfn.get_names_list(args.list, args.list_filter, extension=args.in_ext_filter) # elif os.path.isfile(os.path.normpath(args.list)): # args.column return args
def sanitise_arguments(self, args): """ """ args.in_ext_filter = '.nii*' if not args.unzip: args.ext = '.nii.gz' else: args.ext = '.nii' if args.input: if os.path.isdir(args.input): # print (" Input {0} is a directory ...".format(args.input)) args.input_name_filter = '*' + args.in_filter + '*' # use wildcards if provided a directory alone args.input_name_filter_exact = args.in_filter args.input_directory = os.path.normpath( os.path.join(args.input)) elif os.path.isfile(args.input): # print (" Input {0} is a file ...".format(args.input)) # Get the filename, removing path and 1+ extensions args.input_name_filter = os.path.basename(args.input).split( os.extsep)[0] args.input_directory = os.path.dirname(args.input) else: raise Exception( "Input not recognised or does not exist: {0}".format( args.input)) else: args.input_directory = os.getcwd() if not args.no_output: if args.output: if os.path.isdir(args.output): # print (" Output {0} is a directory ...".format(args.input)) args.output_directory = os.path.normpath( os.path.join(args.output)) else: print "Specified output ({0}) is not a directory; creating it ...".format( args.output) args.output_directory = os.path.normpath( os.path.join(args.output)) mmfn.check_create_directories([args.output_directory]) else: print "No output directory specified. Setting to input directory ({0}) in case it is required.".format( args.input_directory) args.output_directory = args.input_directory if hasattr(args, 'input_2'): if args.input_2: if os.path.isdir(args.input_2): # print (" Input 2 {0} is a directory ...".format(args.input)) args.input_name_filter_2 = '*' + args.in_filter + '*' # use wildcards if provided a directory alone args.input_name_filter_exact_2 = args.in_filter args.input_directory_2 = os.path.normpath( os.path.join(args.input_2)) elif os.path.isfile(args.input_2): # print (" Input 2 {0} is a file ...".format(args.input)) # Get the filename, removing path and 1+ extensions args.input_name_filter_2 = os.path.basename( args.input_2).split(os.extsep)[0] args.input_directory_2 = os.path.dirname(args.input_2) else: raise Exception( "Input 2 not recognised or does not exist: {0}".format( args.input_2)) else: args.input_directory_2 = os.getcwd() if hasattr(args, 'mask'): if args.mask: args.mask = os.path.normpath(args.mask) if os.path.isdir(args.mask): args.mask_name_filter = '*' + args.mn_filter + '*' # use wildcards if provided a directory alone args.mask_name_filter_exact = args.mn_filter args.mask_directory = os.path.normpath( os.path.join(args.mask)) elif os.path.isfile(args.mask): # Get the filename, removing path and 1+ extensions args.mask_name_filter = os.path.basename(args.mask).split( os.extsep)[0] args.mask_directory = os.path.dirname(args.mask) else: args.mask_directory = None if hasattr(args, 'tpm'): if args.tpm: args.tpm = os.path.normpath(args.tpm) if os.path.isdir(args.tpm): args.tpm_name_filter = '*' + args.tn_filter + '*' # use wildcards if provided a directory alone args.tpm_name_filter_exact = args.tn_filter args.tpm_directory = os.path.normpath( os.path.join(args.tpm)) elif os.path.isfile(args.tpm): # Get the filename, removing path and 1+ extensions args.tpm_name_filter = os.path.basename(args.tpm).split( os.extsep)[0] args.tpm_directory = os.path.dirname(args.tpm) else: args.tpm_directory = None # Either get a list of strings as file names from a directory, or from a given column of a .CSV file if hasattr(args, 'list'): if args.list: args.list = os.path.normpath(args.list) if os.path.isdir(os.path.normpath(args.list)): args.list_names = mmfn.get_names_list( args.list, args.list_filter, extension=args.in_ext_filter) # elif os.path.isfile(os.path.normpath(args.list)): # args.column return args