def main(args=None): if args is None: args = sys.argv[1:] # Get parser parser = get_parser() arguments = parser.parse(args) # Set param arguments ad inputted by user fname_in = arguments["-i"] contrast = arguments["-c"] # Segmentation or Centerline line if '-s' in arguments: fname_seg = arguments['-s'] if not os.path.isfile(fname_seg): fname_seg = None sct.printv('WARNING: -s input file: "' + arguments['-s'] + '" does not exist.\nDetecting PMJ without using segmentation information', 1, 'warning') else: fname_seg = None # Output Folder if '-ofolder' in arguments: path_results = arguments["-ofolder"] if not os.path.isdir(path_results) and os.path.exists(path_results): sct.printv("ERROR output directory %s is not a valid directory" % path_results, 1, 'error') if not os.path.exists(path_results): os.makedirs(path_results) else: path_results = '.' path_qc = arguments.get("-qc", None) # Remove temp folder rm_tmp = bool(int(arguments.get("-r", 1))) verbose = int(arguments.get('-v')) sct.init_sct(log_level=verbose, update=True) # Update log level # Initialize DetectPMJ detector = DetectPMJ(fname_im=fname_in, contrast=contrast, fname_seg=fname_seg, path_out=path_results, verbose=verbose) # run the extraction fname_out, tmp_dir = detector.apply() # Remove tmp_dir if rm_tmp: sct.rmtree(tmp_dir) # View results if fname_out is not None: if path_qc is not None: generate_qc(fname_in, fname_seg=fname_out, args=args, path_qc=os.path.abspath(path_qc), process='sct_detect_pmj') sct.display_viewer_syntax([fname_in, fname_out], colormaps=['gray', 'red'])
def main(args = None): if not args: args = sys.argv[1:] # initialization file_mask = '' # Get parser info parser = get_parser() arguments = parser.parse(sys.argv[1:]) fname_in = arguments['-i'] fname_bvals = arguments['-bval'] fname_bvecs = arguments['-bvec'] prefix = arguments['-o'] method = arguments['-method'] evecs = int(arguments['-evecs']) if "-m" in arguments: file_mask = arguments['-m'] param.verbose = int(arguments.get('-v')) sct.init_sct(log_level=param.verbose, update=True) # Update log level # compute DTI if not compute_dti(fname_in, fname_bvals, fname_bvecs, prefix, method, evecs, file_mask): sct.printv('ERROR in compute_dti()', 1, 'error')
def main(args=None): # check user arguments if not args: args = sys.argv[1:] # Get parser info parser = get_parser() arguments = parser.parse(args) input_filename = arguments["-i"] fname_dest = arguments["-d"] warp_filename = arguments["-w"] transform = Transform(input_filename=input_filename, fname_dest=fname_dest, warp=warp_filename) if "-crop" in arguments: transform.crop = arguments["-crop"] if "-o" in arguments: transform.output_filename = arguments["-o"] if "-x" in arguments: transform.interp = arguments["-x"] if "-r" in arguments: transform.remove_temp_files = int(arguments["-r"]) transform.verbose = int(arguments.get('-v')) sct.init_sct(log_level=transform.verbose, update=True) # Update log level transform.apply()
def main(args=None): if args is None: args = sys.argv[1:] param = Param() # Check input parameters parser = get_parser() arguments = parser.parse(args) param.fname_data = os.path.abspath(arguments['-i']) if '-p' in arguments: param.process = arguments['-p'] if param.process[0] not in param.process_list: sct.printv(parser.usage.generate(error='ERROR: Process ' + param.process[0] + ' is not recognized.')) if '-size' in arguments: param.size = arguments['-size'] if '-f' in arguments: param.shape = arguments['-f'] if '-o' in arguments: param.fname_out = os.path.abspath(arguments['-o']) if '-r' in arguments: param.remove_temp_files = int(arguments['-r']) param.verbose = int(arguments.get('-v')) sct.init_sct(log_level=param.verbose, update=True) # Update log level # run main program create_mask(param)
def run_main(): sct.init_sct() parser = get_parser() args = sys.argv[1:] arguments = parser.parse(args) # Input filename fname_input_data = arguments["-i"] fname_data = os.path.abspath(fname_input_data) # Method used method = 'optic' if "-method" in arguments: method = arguments["-method"] # Contrast type contrast_type = '' if "-c" in arguments: contrast_type = arguments["-c"] if method == 'optic' and not contrast_type: # Contrast must be error = 'ERROR: -c is a mandatory argument when using Optic method.' sct.printv(error, type='error') return # Ga between slices interslice_gap = 10.0 if "-gap" in arguments: interslice_gap = float(arguments["-gap"]) # Output folder if "-o" in arguments: file_output = arguments["-o"] else: path_data, file_data, ext_data = sct.extract_fname(fname_data) file_output = os.path.join(path_data, file_data + '_centerline') verbose = int(arguments.get('-v')) sct.init_sct(log_level=verbose, update=True) # Update log level if method == 'viewer': im_labels = _call_viewer_centerline(Image(fname_data), interslice_gap=interslice_gap) im_centerline, arr_centerline, _ = \ get_centerline(im_labels, algo_fitting='polyfit', degree=3, minmax=True, verbose=verbose) else: im_centerline, arr_centerline, _ = \ get_centerline(Image(fname_data), algo_fitting='optic', contrast=contrast_type, minmax=True, verbose=verbose) # save centerline as nifti (discrete) and csv (continuous) files im_centerline.save(file_output + '.nii.gz') np.savetxt(file_output + '.csv', arr_centerline.transpose(), delimiter=",") sct.display_viewer_syntax([fname_input_data, file_output+'.nii.gz'], colormaps=['gray', 'red'], opacities=['', '1'])
def main(args=None): if args is None: args = sys.argv[1:] # create param object param = Param() param_glcm = ParamGLCM() # get parser parser = get_parser() arguments = parser.parse(args) # set param arguments ad inputted by user param.fname_im = arguments["-i"] param.fname_seg = arguments["-m"] if '-ofolder' in arguments: param.path_results = arguments["-ofolder"] if not os.path.isdir(param.path_results) and os.path.exists(param.path_results): sct.printv("ERROR output directory %s is not a valid directory" % param.path_results, 1, 'error') if not os.path.exists(param.path_results): os.makedirs(param.path_results) if '-feature' in arguments: param_glcm.feature = arguments['-feature'] if '-distance' in arguments: param_glcm.distance = int(arguments['-distance']) if '-angle' in arguments: param_glcm.angle = arguments['-angle'] if '-dim' in arguments: param.dim = arguments['-dim'] if '-r' in arguments: param.rm_tmp = bool(int(arguments['-r'])) verbose = int(arguments.get('-v')) sct.init_sct(log_level=verbose, update=True) # Update log level # create the GLCM constructor glcm = ExtractGLCM(param=param, param_glcm=param_glcm) # run the extraction fname_out_lst = glcm.extract() # remove tmp_dir if param.rm_tmp: sct.rmtree(glcm.tmp_dir) sct.printv('\nDone! To view results, type:', param.verbose) sct.printv('fslview ' + arguments["-i"] + ' ' + ' -l Red-Yellow -t 0.7 '.join(fname_out_lst) + ' -l Red-Yellow -t 0.7 & \n', param.verbose, 'info')
def main(args=None): parser = get_parser() arguments = parser.parse(sys.argv[1:]) fname_src = arguments["-d"] fname_transfo = arguments["-w"] warp_atlas = int(arguments["-a"]) warp_spinal_levels = int(arguments["-s"]) folder_out = arguments['-ofolder'] path_template = arguments['-t'] verbose = int(arguments.get('-v')) sct.init_sct(log_level=verbose, update=True) # Update log level path_qc = arguments.get("-qc", None) qc_dataset = arguments.get("-qc-dataset", None) qc_subject = arguments.get("-qc-subject", None) # call main function w = WarpTemplate(fname_src, fname_transfo, warp_atlas, warp_spinal_levels, folder_out, path_template, verbose) path_template = os.path.join(w.folder_out, w.folder_template) # Only deal with QC and verbose if white matter was warped (meaning, everything under template/) if "white matter" in spinalcordtoolbox.metadata.get_indiv_label_names(path_template): # Deal with QC report if path_qc is not None: fname_wm = os.path.join(w.folder_out, w.folder_template, spinalcordtoolbox.metadata.get_file_label(path_template, 'white matter')) generate_qc(fname_src, fname_seg=fname_wm, args=sys.argv[1:], path_qc=os.path.abspath(path_qc), dataset=qc_dataset, subject=qc_subject, process='sct_warp_template') # Deal with verbose sct.display_viewer_syntax( [ fname_src, spinalcordtoolbox.metadata.get_file_label(path_template, 'T2-weighted template', output="filewithpath"), spinalcordtoolbox.metadata.get_file_label(path_template, 'gray matter', output="filewithpath"), spinalcordtoolbox.metadata.get_file_label(path_template, 'white matter', output="filewithpath") ], colormaps=['gray', 'gray', 'red-yellow', 'blue-lightblue'], opacities=['1', '1', '0.5', '0.5'], minmax=['', '0,4000', '0.4,1', '0.4,1'], verbose=verbose, ) else: if path_qc is not None: sct.printv("QC not generated since expected labels are missing from template", type="warning" )
def main(args): import sct_utils as sct from spinalcordtoolbox.mtsat import mtsat verbose = args.v sct.init_sct(log_level=verbose, update=True) # Update log level fname_mtsat, fname_t1map = mtsat.compute_mtsat_from_file( args.mt, args.pd, args.t1, args.trmt, args.trpd, args.trt1, args.famt, args.fapd, args.fat1, fname_b1map=args.b1map, fname_mtsat=args.omtsat, fname_t1map=args.ot1map, verbose=verbose) sct.display_viewer_syntax([fname_mtsat, fname_t1map], colormaps=['gray', 'gray'], minmax=['-10,10', '0, 3'], opacities=['1', '1'], verbose=verbose)
def run_main(): parser = get_parser() arguments = parser.parse(sys.argv[1:]) input_filename = arguments["-i"] try: output_filename = arguments["-o"] except KeyError: output_filename = sct.add_suffix(input_filename, '_gmseg') use_tta = "-t" in arguments model_name = arguments["-m"] threshold = arguments['-thr'] verbose = int(arguments.get('-v')) sct.init_sct(log_level=verbose, update=True) # Update log level if threshold > 1.0 or threshold < 0.0: raise RuntimeError("Threshold should be between 0.0 and 1.0.") # Threshold zero means no thresholding if threshold == 0.0: threshold = None from spinalcordtoolbox.deepseg_gm import deepseg_gm deepseg_gm.check_backend() out_fname = deepseg_gm.segment_file(input_filename, output_filename, model_name, threshold, int(verbose), use_tta) path_qc = arguments.get("-qc", None) qc_dataset = arguments.get("-qc-dataset", None) qc_subject = arguments.get("-qc-subject", None) if path_qc is not None: generate_qc(fname_in1=input_filename, fname_seg=out_fname, args=sys.argv[1:], path_qc=os.path.abspath(path_qc), dataset=qc_dataset, subject=qc_subject, process='sct_deepseg_gm') sct.display_viewer_syntax([input_filename, format(out_fname)], colormaps=['gray', 'red'], opacities=['1', '0.7'], verbose=verbose)
def main(args=None): import numpy as np import spinalcordtoolbox.image as msct_image # Initialization fname_mt0 = '' fname_mt1 = '' fname_mtr = '' # register = param.register # remove_temp_files = param.remove_temp_files # verbose = param.verbose # check user arguments if not args: args = sys.argv[1:] # Check input parameters parser = get_parser() arguments = parser.parse(args) fname_mt0 = arguments['-mt0'] fname_mt1 = arguments['-mt1'] fname_mtr = arguments['-o'] verbose = int(arguments.get('-v')) sct.init_sct(log_level=verbose, update=True) # Update log level # compute MTR sct.printv('\nCompute MTR...', verbose) nii_mt1 = msct_image.Image(fname_mt1) data_mt1 = nii_mt1.data data_mt0 = msct_image.Image(fname_mt0).data data_mtr = 100 * (data_mt0 - data_mt1) / data_mt0 # save MTR file nii_mtr = nii_mt1 nii_mtr.data = data_mtr nii_mtr.save(fname_mtr) # sct.run(fsloutput+'fslmaths -dt double mt0.nii -sub mt1.nii -mul 100 -div mt0.nii -thr 0 -uthr 100 fname_mtr', verbose) sct.display_viewer_syntax([fname_mt0, fname_mt1, fname_mtr])
def run_main(): parser = get_parser() arguments = parser.parse(sys.argv[1:]) param.fname_data = arguments["-i"] arg = 0 if "-f" in arguments: param.new_size = arguments["-f"] param.new_size_type = 'factor' arg += 1 elif "-mm" in arguments: param.new_size = arguments["-mm"] param.new_size_type = 'mm' arg += 1 elif "-vox" in arguments: param.new_size = arguments["-vox"] param.new_size_type = 'vox' arg += 1 elif "-ref" in arguments: param.ref = arguments["-ref"] arg += 1 else: sct.printv(parser.usage.generate(error='ERROR: you need to specify one of those three arguments : -f, -mm or -vox')) if arg > 1: sct.printv(parser.usage.generate(error='ERROR: you need to specify ONLY one of those three arguments : -f, -mm or -vox')) if "-o" in arguments: param.fname_out = arguments["-o"] if "-x" in arguments: if len(arguments["-x"]) == 1: param.interpolation = int(arguments["-x"]) else: param.interpolation = arguments["-x"] param.verbose = int(arguments.get('-v')) sct.init_sct(log_level=param.verbose, update=True) # Update log level spinalcordtoolbox.resampling.resample_file(param.fname_data, param.fname_out, param.new_size, param.new_size_type, param.interpolation, param.verbose, fname_ref=param.ref)
def main(args=None): if not args: args = sys.argv[1:] # Get parser info parser = get_parser() arguments = parser.parse(sys.argv[1:]) fname_in = arguments['-bvec'] if '-o' in arguments: fname_out = arguments['-o'] else: fname_out = '' verbose = int(arguments.get('-v')) sct.init_sct(log_level=verbose, update=True) # Update log level # get bvecs in proper orientation from dipy.io import read_bvals_bvecs bvals, bvecs = read_bvals_bvecs(None, fname_in) # # Transpose bvecs # printv('Transpose bvecs...', verbose) # # from numpy import transpose # bvecs = bvecs.transpose() # Write new file if fname_out == '': path_in, file_in, ext_in = extract_fname(fname_in) fname_out = path_in + file_in + ext_in fid = open(fname_out, 'w') for iLine in range(bvecs.shape[0]): fid.write(' '.join(str(i) for i in bvecs[iLine, :]) + '\n') fid.close() # display message printv('Created file:\n--> ' + fname_out + '\n', verbose, 'info')
def main(args=None): if args is None: args = sys.argv[1:] # create param objects param = Param() # get parser parser = get_parser() arguments = parser.parse(args) # set param arguments ad inputted by user list_fname_src = arguments["-i"] fname_dest = arguments["-d"] list_fname_warp = arguments["-w"] param.fname_out = arguments["-o"] # if '-ofolder' in arguments: # path_results = arguments['-ofolder'] if '-x' in arguments: param.interp = arguments['-x'] if '-r' in arguments: param.rm_tmp = bool(int(arguments['-r'])) param.verbose = int(arguments.get('-v')) sct.init_sct(log_level=param.verbose, update=True) # Update log level # check if list of input files and warping fields have same length assert len(list_fname_src) == len(list_fname_warp), "ERROR: list of files are not of the same length" # merge src images to destination image try: merge_images(list_fname_src, fname_dest, list_fname_warp, param) except Exception as e: sct.printv(str(e), 1, 'error') sct.display_viewer_syntax([fname_dest, os.path.abspath(param.fname_out)])
"c2c3_disc_models": [ "https://github.com/sct-data/c2c3_disc_models/releases/download/r20190117/20190117_c2c3_disc_models.zip", "https://osf.io/t97ap/?action=download", "https://www.neuro.polymtl.ca/_media/downloads/sct/20190117_c2c3_disc_models.zip", ], } if args is None: args = sys.argv[1:] # Get parser info parser = get_parser(dict_url) arguments = parser.parse(args) data_name = arguments['-d'] verbose = int(arguments.get('-v')) sct.init_sct(log_level=verbose, update=True) # Update log level dest_folder = arguments.get( '-o', os.path.join(os.path.abspath(os.curdir), data_name)) url = dict_url[data_name] install_data(url, dest_folder, keep=arguments.get("-k", False)) sct.printv('Done!\n', verbose) return 0 if __name__ == "__main__": sct.init_sct() res = main() raise SystemExit(res)
def main(): """Main function.""" sct.init_sct() parser = get_parser() args = sys.argv[1:] arguments = parser.parse(args) fname_image = arguments['-i'] contrast_type = arguments['-c'] ctr_algo = arguments["-centerline"] brain_bool = bool(int(arguments["-brain"])) if "-brain" not in args and contrast_type in ['t2s', 't2_ax']: brain_bool = False if '-ofolder' not in args: output_folder = os.getcwd() else: output_folder = arguments["-ofolder"] if ctr_algo == 'file' and "-file_centerline" not in args: sct.log.error( 'Please use the flag -file_centerline to indicate the centerline filename.' ) sys.exit(1) if "-file_centerline" in args: manual_centerline_fname = arguments["-file_centerline"] ctr_algo = 'file' else: manual_centerline_fname = None remove_temp_files = int(arguments['-r']) verbose = int(arguments['-v']) algo_config_stg = '\nMethod:' algo_config_stg += '\n\tCenterline algorithm: ' + str(ctr_algo) algo_config_stg += '\n\tAssumes brain section included in the image: ' + str( brain_bool) + '\n' sct.printv(algo_config_stg) im_image = Image(fname_image) im_seg, im_labels_viewer, im_ctr = deep_segmentation_MSlesion( im_image, contrast_type, ctr_algo=ctr_algo, ctr_file=manual_centerline_fname, brain_bool=brain_bool, remove_temp_files=remove_temp_files, verbose=verbose) # Save segmentation fname_seg = os.path.abspath( os.path.join( output_folder, sct.extract_fname(fname_image)[1] + '_lesionseg' + sct.extract_fname(fname_image)[2])) im_seg.save(fname_seg) if ctr_algo == 'viewer': # Save labels fname_labels = os.path.abspath( os.path.join( output_folder, sct.extract_fname(fname_image)[1] + '_labels-centerline' + sct.extract_fname(fname_image)[2])) im_labels_viewer.save(fname_labels) if verbose == 2: # Save ctr fname_ctr = os.path.abspath( os.path.join( output_folder, sct.extract_fname(fname_image)[1] + '_centerline' + sct.extract_fname(fname_image)[2])) im_ctr.save(fname_ctr) sct.display_viewer_syntax([fname_image, fname_seg], colormaps=['gray', 'red'], opacities=['', '0.7'])
def main(args=None): # initialization start_time = time.time() path_out = '.' param = Param() # check user arguments if not args: args = sys.argv[1:] # Get parser info parser = get_parser() arguments = parser.parse(sys.argv[1:]) param.fname_data = arguments['-i'] param.fname_bvecs = arguments['-bvec'] if '-bval' in arguments: param.fname_bvals = arguments['-bval'] if '-bvalmin' in arguments: param.bval_min = arguments['-bvalmin'] if '-g' in arguments: param.group_size = arguments['-g'] if '-m' in arguments: param.fname_mask = arguments['-m'] if '-param' in arguments: param.update(arguments['-param']) if '-thr' in arguments: param.otsu = arguments['-thr'] if '-x' in arguments: param.interp = arguments['-x'] if '-ofolder' in arguments: path_out = arguments['-ofolder'] if '-r' in arguments: param.remove_temp_files = int(arguments['-r']) param.verbose = int(arguments.get('-v')) sct.init_sct(log_level=param.verbose, update=True) # Update log level # Get full path param.fname_data = os.path.abspath(param.fname_data) param.fname_bvecs = os.path.abspath(param.fname_bvecs) if param.fname_bvals != '': param.fname_bvals = os.path.abspath(param.fname_bvals) if param.fname_mask != '': param.fname_mask = os.path.abspath(param.fname_mask) # Extract path, file and extension path_data, file_data, ext_data = sct.extract_fname(param.fname_data) path_mask, file_mask, ext_mask = sct.extract_fname(param.fname_mask) path_tmp = sct.tmp_create(basename="dmri_moco", verbose=param.verbose) # names of files in temporary folder mask_name = 'mask' bvecs_fname = 'bvecs.txt' # Copying input data to tmp folder sct.printv('\nCopying input data to tmp folder and convert to nii...', param.verbose) convert(param.fname_data, os.path.join(path_tmp, "dmri.nii")) sct.copy(param.fname_bvecs, os.path.join(path_tmp, bvecs_fname), verbose=param.verbose) if param.fname_mask != '': sct.copy(param.fname_mask, os.path.join(path_tmp, mask_name + ext_mask), verbose=param.verbose) # go to tmp folder curdir = os.getcwd() os.chdir(path_tmp) # update field in param (because used later). # TODO: make this cleaner... if param.fname_mask != '': param.fname_mask = mask_name + ext_mask # run moco fname_data_moco_tmp = dmri_moco(param) # generate b0_moco_mean and dwi_moco_mean args = ['-i', fname_data_moco_tmp, '-bvec', 'bvecs.txt', '-a', '1', '-v', '0'] if not param.fname_bvals == '': # if bvals file is provided args += ['-bval', param.fname_bvals] fname_b0, fname_b0_mean, fname_dwi, fname_dwi_mean = sct_dmri_separate_b0_and_dwi.main(args=args) # come back os.chdir(curdir) # Generate output files fname_dmri_moco = os.path.join(path_out, file_data + param.suffix + ext_data) fname_dmri_moco_b0_mean = sct.add_suffix(fname_dmri_moco, '_b0_mean') fname_dmri_moco_dwi_mean = sct.add_suffix(fname_dmri_moco, '_dwi_mean') sct.create_folder(path_out) sct.printv('\nGenerate output files...', param.verbose) sct.generate_output_file(fname_data_moco_tmp, fname_dmri_moco, param.verbose) sct.generate_output_file(fname_b0_mean, fname_dmri_moco_b0_mean, param.verbose) sct.generate_output_file(fname_dwi_mean, fname_dmri_moco_dwi_mean, param.verbose) # Delete temporary files if param.remove_temp_files == 1: sct.printv('\nDelete temporary files...', param.verbose) sct.rmtree(path_tmp, verbose=param.verbose) # display elapsed time elapsed_time = time.time() - start_time sct.printv('\nFinished! Elapsed time: ' + str(int(np.round(elapsed_time))) + 's', param.verbose) sct.display_viewer_syntax([fname_dmri_moco, file_data], mode='ortho,ortho')
def main(): """Main function.""" sct.init_sct() parser = get_parser() args = sys.argv[1:] arguments = parser.parse(args) fname_image = os.path.abspath(arguments['-i']) contrast_type = arguments['-c'] ctr_algo = arguments["-centerline"] if "-brain" not in args: if contrast_type in ['t2s', 'dwi']: brain_bool = False if contrast_type in ['t1', 't2']: brain_bool = True else: brain_bool = bool(int(arguments["-brain"])) kernel_size = arguments["-kernel"] if kernel_size == '3d' and contrast_type == 'dwi': kernel_size = '2d' sct.printv('3D kernel model for dwi contrast is not available. 2D kernel model is used instead.', type="warning") if '-ofolder' not in args: output_folder = os.getcwd() else: output_folder = arguments["-ofolder"] if ctr_algo == 'file' and "-file_centerline" not in args: logger.warning('Please use the flag -file_centerline to indicate the centerline filename.') sys.exit(1) if "-file_centerline" in args: manual_centerline_fname = arguments["-file_centerline"] ctr_algo = 'file' else: manual_centerline_fname = None remove_temp_files = int(arguments['-r']) verbose = int(arguments.get('-v')) sct.init_sct(log_level=verbose, update=True) # Update log level path_qc = arguments.get("-qc", None) qc_dataset = arguments.get("-qc-dataset", None) qc_subject = arguments.get("-qc-subject", None) algo_config_stg = '\nMethod:' algo_config_stg += '\n\tCenterline algorithm: ' + str(ctr_algo) algo_config_stg += '\n\tAssumes brain section included in the image: ' + str(brain_bool) algo_config_stg += '\n\tDimension of the segmentation kernel convolutions: ' + kernel_size + '\n' sct.printv(algo_config_stg) im_image = Image(fname_image) # note: below we pass im_image.copy() otherwise the field absolutepath becomes None after execution of this function im_seg, im_image_RPI_upsamp, im_seg_RPI_upsamp, im_labels_viewer, im_ctr = deep_segmentation_spinalcord( im_image.copy(), contrast_type, ctr_algo=ctr_algo, ctr_file=manual_centerline_fname, brain_bool=brain_bool, kernel_size=kernel_size, remove_temp_files=remove_temp_files, verbose=verbose) # Save segmentation fname_seg = os.path.abspath(os.path.join(output_folder, sct.extract_fname(fname_image)[1] + '_seg' + sct.extract_fname(fname_image)[2])) im_seg.save(fname_seg) if ctr_algo == 'viewer': # Save labels fname_labels = os.path.abspath(os.path.join(output_folder, sct.extract_fname(fname_image)[1] + '_labels-centerline' + sct.extract_fname(fname_image)[2])) im_labels_viewer.save(fname_labels) if verbose == 2: # Save ctr fname_ctr = os.path.abspath(os.path.join(output_folder, sct.extract_fname(fname_image)[1] + '_centerline' + sct.extract_fname(fname_image)[2])) im_ctr.save(fname_ctr) if path_qc is not None: generate_qc(fname_image, fname_seg=fname_seg, args=args, path_qc=os.path.abspath(path_qc), dataset=qc_dataset, subject=qc_subject, process='sct_deepseg_sc') sct.display_viewer_syntax([fname_image, fname_seg], colormaps=['gray', 'red'], opacities=['', '0.7'])
def main(args): parser = get_parser() arguments = parser.parse(args) # Initialization slices = '' group_funcs = (('MEAN', func_wa), ('STD', func_std) ) # functions to perform when aggregating metrics along S-I fname_segmentation = sct.get_absolute_path(arguments['-i']) fname_vert_levels = '' if '-o' in arguments: file_out = os.path.abspath(arguments['-o']) else: file_out = '' if '-append' in arguments: append = int(arguments['-append']) else: append = 0 if '-vert' in arguments: vert_levels = arguments['-vert'] else: vert_levels = '' if '-r' in arguments: remove_temp_files = arguments['-r'] if '-vertfile' in arguments: fname_vert_levels = arguments['-vertfile'] if '-perlevel' in arguments: perlevel = arguments['-perlevel'] else: perlevel = None if '-z' in arguments: slices = arguments['-z'] if '-perslice' in arguments: perslice = arguments['-perslice'] else: perslice = None if '-angle-corr' in arguments: if arguments['-angle-corr'] == '1': angle_correction = True elif arguments['-angle-corr'] == '0': angle_correction = False param_centerline = ParamCenterline( algo_fitting=arguments['-centerline-algo'], smooth=arguments['-centerline-smooth'], minmax=True) path_qc = arguments.get("-qc", None) qc_dataset = arguments.get("-qc-dataset", None) qc_subject = arguments.get("-qc-subject", None) verbose = int(arguments.get('-v')) sct.init_sct(log_level=verbose, update=True) # Update log level # update fields metrics_agg = {} if not file_out: file_out = 'csa.csv' metrics, fit_results = process_seg.compute_shape( fname_segmentation, angle_correction=angle_correction, param_centerline=param_centerline, verbose=verbose) for key in metrics: if key == 'length': # For computing cord length, slice-wise length needs to be summed across slices metrics_agg[key] = aggregate_per_slice_or_level( metrics[key], slices=parse_num_list(slices), levels=parse_num_list(vert_levels), perslice=perslice, perlevel=perlevel, vert_level=fname_vert_levels, group_funcs=(('SUM', func_sum), )) else: # For other metrics, we compute the average and standard deviation across slices metrics_agg[key] = aggregate_per_slice_or_level( metrics[key], slices=parse_num_list(slices), levels=parse_num_list(vert_levels), perslice=perslice, perlevel=perlevel, vert_level=fname_vert_levels, group_funcs=group_funcs) metrics_agg_merged = _merge_dict(metrics_agg) save_as_csv(metrics_agg_merged, file_out, fname_in=fname_segmentation, append=append) # QC report (only show CSA for clarity) if path_qc is not None: generate_qc(fname_segmentation, args=args, path_qc=os.path.abspath(path_qc), dataset=qc_dataset, subject=qc_subject, path_img=_make_figure(metrics_agg_merged, fit_results), process='sct_process_segmentation') sct.display_open(file_out)
def main(args=None): """ Main function :param args: :return: """ dim_list = ['x', 'y', 'z', 't'] # Get parser args if args is None: args = None if sys.argv[1:] else ['--help'] parser = get_parser() arguments = parser.parse_args(args=args) fname_in = arguments.i fname_out = arguments.o verbose = arguments.v init_sct(log_level=verbose, update=True) # Update log level if '-type' in arguments: output_type = arguments.type else: output_type = None # Open file(s) im = Image(fname_in) data = im.data # 3d or 4d numpy array dim = im.dim # run command if arguments.otsu is not None: param = arguments.otsu data_out = otsu(data, param) elif arguments.adap is not None: param = convert_list_str(arguments.adap, "int") data_out = adap(data, param[0], param[1]) elif arguments.otsu_median is not None: param = convert_list_str(arguments.otsu_median, "int") data_out = otsu_median(data, param[0], param[1]) elif arguments.thr is not None: param = arguments.thr data_out = threshold(data, param) elif arguments.percent is not None: param = arguments.percent data_out = perc(data, param) elif arguments.bin is not None: bin_thr = arguments.bin data_out = binarise(data, bin_thr=bin_thr) elif arguments.add is not None: from numpy import sum data2 = get_data_or_scalar(arguments.add, data) data_concat = concatenate_along_4th_dimension(data, data2) data_out = sum(data_concat, axis=3) elif arguments.sub is not None: data2 = get_data_or_scalar(arguments.sub, data) data_out = data - data2 elif arguments.laplacian is not None: sigmas = convert_list_str(arguments.laplacian, "float") if len(sigmas) == 1: sigmas = [sigmas for i in range(len(data.shape))] elif len(sigmas) != len(data.shape): printv( parser.error( 'ERROR: -laplacian need the same number of inputs as the number of image dimension OR only one input' )) # adjust sigma based on voxel size sigmas = [sigmas[i] / dim[i + 4] for i in range(3)] # smooth data data_out = laplacian(data, sigmas) elif arguments.mul is not None: from numpy import prod data2 = get_data_or_scalar(arguments.mul, data) data_concat = concatenate_along_4th_dimension(data, data2) data_out = prod(data_concat, axis=3) elif arguments.div is not None: from numpy import divide data2 = get_data_or_scalar(arguments.div, data) data_out = divide(data, data2) elif arguments.mean is not None: from numpy import mean dim = dim_list.index(arguments.mean) if dim + 1 > len( np.shape(data)): # in case input volume is 3d and dim=t data = data[..., np.newaxis] data_out = mean(data, dim) elif arguments.rms is not None: from numpy import mean, sqrt, square dim = dim_list.index(arguments.rms) if dim + 1 > len( np.shape(data)): # in case input volume is 3d and dim=t data = data[..., np.newaxis] data_out = sqrt(mean(square(data.astype(float)), dim)) elif arguments.std is not None: from numpy import std dim = dim_list.index(arguments.std) if dim + 1 > len( np.shape(data)): # in case input volume is 3d and dim=t data = data[..., np.newaxis] data_out = std(data, dim, ddof=1) elif arguments.smooth is not None: sigmas = convert_list_str(arguments.smooth, "float") if len(sigmas) == 1: sigmas = [sigmas[0] for i in range(len(data.shape))] elif len(sigmas) != len(data.shape): printv( parser.error( 'ERROR: -smooth need the same number of inputs as the number of image dimension OR only one input' )) # adjust sigma based on voxel size sigmas = [sigmas[i] / dim[i + 4] for i in range(3)] # smooth data data_out = smooth(data, sigmas) elif arguments.dilate is not None: data_out = sct.math.dilate(data, size=arguments.dilate, shape=arguments.shape, dim=arguments.dim) elif arguments.erode is not None: data_out = sct.math.erode(data, size=arguments.erode, shape=arguments.shape, dim=arguments.dim) elif arguments.denoise is not None: # parse denoising arguments p, b = 1, 5 # default arguments list_denoise = (arguments.denoise).split(",") for i in list_denoise: if 'p' in i: p = int(i.split('=')[1]) if 'b' in i: b = int(i.split('=')[1]) data_out = denoise_nlmeans(data, patch_radius=p, block_radius=b) elif arguments.symmetrize is not None: data_out = (data + data[list(range(data.shape[0] - 1, -1, -1)), :, :]) / float(2) elif arguments.mi is not None: # input 1 = from flag -i --> im # input 2 = from flag -mi im_2 = Image(arguments.mi) compute_similarity(im.data, im_2.data, fname_out, metric='mi', verbose=verbose) data_out = None elif arguments.minorm is not None: im_2 = Image(arguments.minorm) compute_similarity(im.data, im_2.data, fname_out, metric='minorm', verbose=verbose) data_out = None elif arguments.corr is not None: # input 1 = from flag -i --> im # input 2 = from flag -mi im_2 = Image(arguments.corr) compute_similarity(im.data, im_2.data, fname_out, metric='corr', verbose=verbose) data_out = None # if no flag is set else: data_out = None printv( parser.error( 'ERROR: you need to specify an operation to do on the input image' )) if data_out is not None: # Write output nii_out = Image(fname_in) # use header of input file nii_out.data = data_out nii_out.save(fname_out, dtype=output_type) # TODO: case of multiple outputs # assert len(data_out) == n_out # if n_in == n_out: # for im_in, d_out, fn_out in zip(nii, data_out, fname_out): # im_in.data = d_out # im_in.absolutepath = fn_out # if "-w" in arguments: # im_in.hdr.set_intent('vector', (), '') # im_in.save() # elif n_out == 1: # nii[0].data = data_out[0] # nii[0].absolutepath = fname_out[0] # if "-w" in arguments: # nii[0].hdr.set_intent('vector', (), '') # nii[0].save() # elif n_out > n_in: # for dat_out, name_out in zip(data_out, fname_out): # im_out = nii[0].copy() # im_out.data = dat_out # im_out.absolutepath = name_out # if "-w" in arguments: # im_out.hdr.set_intent('vector', (), '') # im_out.save() # else: # printv(parser.usage.generate(error='ERROR: not the correct numbers of inputs and outputs')) # display message if data_out is not None: display_viewer_syntax([fname_out], verbose=verbose) else: printv('\nDone! File created: ' + fname_out, verbose, 'info')
def main(args=None): """ Main function :param args: :return: """ # get parser args if args is None: args = None if sys.argv[1:] else ['--help'] parser = get_parser() arguments = parser.parse_args(args=args) fname_mask = arguments.m fname_sc = arguments.s fname_ref = arguments.i # Path to template path_template = arguments.f # TODO: check this in the parser # if not os.path.isdir(path_template) and os.path.exists(path_template): # path_template = None # printv("ERROR output directory %s is not a valid directory" % path_template, 1, 'error') # Output Folder path_results = arguments.ofolder # if not os.path.isdir(path_results) and os.path.exists(path_results): # printv("ERROR output directory %s is not a valid directory" % path_results, 1, 'error') if not os.path.exists(path_results): os.makedirs(path_results) # Remove temp folder if arguments.r is not None: rm_tmp = bool(arguments.r) else: rm_tmp = True # Verbosity verbose = arguments.v sct.init_sct(log_level=verbose, update=True) # Update log level # create the Lesion constructor lesion_obj = AnalyzeLeion(fname_mask=fname_mask, fname_sc=fname_sc, fname_ref=fname_ref, path_template=path_template, path_ofolder=path_results, verbose=verbose) # run the analyze lesion_obj.analyze() # remove tmp_dir if rm_tmp: sct.rmtree(lesion_obj.tmp_dir) printv( '\nDone! To view the labeled lesion file (one value per lesion), type:', verbose) if fname_ref is not None: printv( 'fsleyes ' + fname_mask + ' ' + os.path.join(path_results, lesion_obj.fname_label) + ' -cm red-yellow -a 70.0 & \n', verbose, 'info') else: printv( 'fsleyes ' + os.path.join(path_results, lesion_obj.fname_label) + ' -cm red-yellow -a 70.0 & \n', verbose, 'info')
def propseg(img_input, options_dict): """ :param img_input: source image, to be segmented :param options_dict: arguments as dictionary :return: segmented Image """ arguments = options_dict fname_input_data = img_input.absolutepath fname_data = os.path.abspath(fname_input_data) contrast_type = arguments["-c"] contrast_type_conversion = {'t1': 't1', 't2': 't2', 't2s': 't2', 'dwi': 't1'} contrast_type_propseg = contrast_type_conversion[contrast_type] # Starting building the command cmd = ['isct_propseg', '-t', contrast_type_propseg] if "-ofolder" in arguments: folder_output = arguments["-ofolder"] else: folder_output = './' cmd += ['-o', folder_output] if not os.path.isdir(folder_output) and os.path.exists(folder_output): logger.error("output directory %s is not a valid directory" % folder_output) if not os.path.exists(folder_output): os.makedirs(folder_output) if "-down" in arguments: cmd += ["-down", str(arguments["-down"])] if "-up" in arguments: cmd += ["-up", str(arguments["-up"])] remove_temp_files = 1 if "-r" in arguments: remove_temp_files = int(arguments["-r"]) verbose = int(arguments.get('-v')) sct.init_sct(log_level=verbose, update=True) # Update log level # Update for propseg binary if verbose > 0: cmd += ["-verbose"] # Output options if "-mesh" in arguments: cmd += ["-mesh"] if "-centerline-binary" in arguments: cmd += ["-centerline-binary"] if "-CSF" in arguments: cmd += ["-CSF"] if "-centerline-coord" in arguments: cmd += ["-centerline-coord"] if "-cross" in arguments: cmd += ["-cross"] if "-init-tube" in arguments: cmd += ["-init-tube"] if "-low-resolution-mesh" in arguments: cmd += ["-low-resolution-mesh"] if "-detect-nii" in arguments: cmd += ["-detect-nii"] if "-detect-png" in arguments: cmd += ["-detect-png"] # Helping options use_viewer = None use_optic = True # enabled by default init_option = None rescale_header = arguments["-rescale"] if "-init" in arguments: init_option = float(arguments["-init"]) if init_option < 0: sct.printv('Command-line usage error: ' + str(init_option) + " is not a valid value for '-init'", 1, 'error') sys.exit(1) if "-init-centerline" in arguments: if str(arguments["-init-centerline"]) == "viewer": use_viewer = "centerline" elif str(arguments["-init-centerline"]) == "hough": use_optic = False else: if rescale_header is not 1: fname_labels_viewer = func_rescale_header(str(arguments["-init-centerline"]), rescale_header, verbose=verbose) else: fname_labels_viewer = str(arguments["-init-centerline"]) cmd += ["-init-centerline", fname_labels_viewer] use_optic = False if "-init-mask" in arguments: if str(arguments["-init-mask"]) == "viewer": use_viewer = "mask" else: if rescale_header is not 1: fname_labels_viewer = func_rescale_header(str(arguments["-init-mask"]), rescale_header) else: fname_labels_viewer = str(arguments["-init-mask"]) cmd += ["-init-mask", fname_labels_viewer] use_optic = False if "-mask-correction" in arguments: cmd += ["-mask-correction", str(arguments["-mask-correction"])] if "-radius" in arguments: cmd += ["-radius", str(arguments["-radius"])] if "-detect-n" in arguments: cmd += ["-detect-n", str(arguments["-detect-n"])] if "-detect-gap" in arguments: cmd += ["-detect-gap", str(arguments["-detect-gap"])] if "-init-validation" in arguments: cmd += ["-init-validation"] if "-nbiter" in arguments: cmd += ["-nbiter", str(arguments["-nbiter"])] if "-max-area" in arguments: cmd += ["-max-area", str(arguments["-max-area"])] if "-max-deformation" in arguments: cmd += ["-max-deformation", str(arguments["-max-deformation"])] if "-min-contrast" in arguments: cmd += ["-min-contrast", str(arguments["-min-contrast"])] if "-d" in arguments: cmd += ["-d", str(arguments["-d"])] if "-distance-search" in arguments: cmd += ["-dsearch", str(arguments["-distance-search"])] if "-alpha" in arguments: cmd += ["-alpha", str(arguments["-alpha"])] # check if input image is in 3D. Otherwise itk image reader will cut the 4D image in 3D volumes and only take the first one. image_input = Image(fname_data) image_input_rpi = image_input.copy().change_orientation('RPI') nx, ny, nz, nt, px, py, pz, pt = image_input_rpi.dim if nt > 1: sct.printv('ERROR: your input image needs to be 3D in order to be segmented.', 1, 'error') path_data, file_data, ext_data = sct.extract_fname(fname_data) path_tmp = sct.tmp_create(basename="label_vertebrae", verbose=verbose) # rescale header (see issue #1406) if rescale_header is not 1: fname_data_propseg = func_rescale_header(fname_data, rescale_header) else: fname_data_propseg = fname_data # add to command cmd += ['-i', fname_data_propseg] # if centerline or mask is asked using viewer if use_viewer: from spinalcordtoolbox.gui.base import AnatomicalParams from spinalcordtoolbox.gui.centerline import launch_centerline_dialog params = AnatomicalParams() if use_viewer == 'mask': params.num_points = 3 params.interval_in_mm = 15 # superior-inferior interval between two consecutive labels params.starting_slice = 'midfovminusinterval' if use_viewer == 'centerline': # setting maximum number of points to a reasonable value params.num_points = 20 params.interval_in_mm = 30 params.starting_slice = 'top' im_data = Image(fname_data_propseg) im_mask_viewer = msct_image.zeros_like(im_data) # im_mask_viewer.absolutepath = sct.add_suffix(fname_data_propseg, '_labels_viewer') controller = launch_centerline_dialog(im_data, im_mask_viewer, params) fname_labels_viewer = sct.add_suffix(fname_data_propseg, '_labels_viewer') if not controller.saved: sct.printv('The viewer has been closed before entering all manual points. Please try again.', 1, 'error') sys.exit(1) # save labels controller.as_niftii(fname_labels_viewer) # add mask filename to parameters string if use_viewer == "centerline": cmd += ["-init-centerline", fname_labels_viewer] elif use_viewer == "mask": cmd += ["-init-mask", fname_labels_viewer] # If using OptiC elif use_optic: image_centerline = optic.detect_centerline(image_input, contrast_type, verbose) fname_centerline_optic = os.path.join(path_tmp, 'centerline_optic.nii.gz') image_centerline.save(fname_centerline_optic) cmd += ["-init-centerline", fname_centerline_optic] if init_option is not None: if init_option > 1: init_option /= (nz - 1) cmd += ['-init', str(init_option)] # enabling centerline extraction by default (needed by check_and_correct_segmentation() ) cmd += ['-centerline-binary'] # run propseg status, output = sct.run(cmd, verbose, raise_exception=False, is_sct_binary=True) # check status is not 0 if not status == 0: sct.printv('Automatic cord detection failed. Please initialize using -init-centerline or -init-mask (see help)', 1, 'error') sys.exit(1) # build output filename fname_seg = os.path.join(folder_output, os.path.basename(sct.add_suffix(fname_data, "_seg"))) fname_centerline = os.path.join(folder_output, os.path.basename(sct.add_suffix(fname_data, "_centerline"))) # in case header was rescaled, we need to update the output file names by removing the "_rescaled" if rescale_header is not 1: sct.mv(os.path.join(folder_output, sct.add_suffix(os.path.basename(fname_data_propseg), "_seg")), fname_seg) sct.mv(os.path.join(folder_output, sct.add_suffix(os.path.basename(fname_data_propseg), "_centerline")), fname_centerline) # if user was used, copy the labelled points to the output folder (they will then be scaled back) if use_viewer: fname_labels_viewer_new = os.path.join(folder_output, os.path.basename(sct.add_suffix(fname_data, "_labels_viewer"))) sct.copy(fname_labels_viewer, fname_labels_viewer_new) # update variable (used later) fname_labels_viewer = fname_labels_viewer_new # check consistency of segmentation if arguments["-correct-seg"] == "1": check_and_correct_segmentation(fname_seg, fname_centerline, folder_output=folder_output, threshold_distance=3.0, remove_temp_files=remove_temp_files, verbose=verbose) # copy header from input to segmentation to make sure qform is the same sct.printv("Copy header input --> output(s) to make sure qform is the same.", verbose) list_fname = [fname_seg, fname_centerline] if use_viewer: list_fname.append(fname_labels_viewer) for fname in list_fname: im = Image(fname) im.header = image_input.header im.save(dtype='int8') # they are all binary masks hence fine to save as int8 return Image(fname_seg)
def main(args=None): if args is None: args = sys.argv[1:] # create param objects param_seg = ParamSeg() param_data = ParamData() param_model = ParamModel() param = Param() # get parser parser = get_parser() arguments = parser.parse(args) # set param arguments ad inputted by user param_seg.fname_im = arguments["-i"] param_seg.fname_im_original = arguments["-i"] param_seg.fname_seg = arguments["-s"] if '-vertfile' in arguments: if extract_fname(arguments['-vertfile'])[1].lower() == "none": param_seg.fname_level = None elif os.path.isfile(arguments['-vertfile']): param_seg.fname_level = arguments['-vertfile'] else: param_seg.fname_level = None printv('WARNING: -vertfile input file: "' + arguments['-vertfile'] + '" does not exist.\nSegmenting GM without using vertebral information', 1, 'warning') if '-denoising' in arguments: param_data.denoising = bool(int(arguments['-denoising'])) if '-normalization' in arguments: param_data.normalization = bool(int(arguments['-normalization'])) if '-p' in arguments: param_data.register_param = arguments['-p'] if '-w-levels' in arguments: param_seg.weight_level = arguments['-w-levels'] if '-w-coordi' in arguments: param_seg.weight_coord = arguments['-w-coordi'] if '-thr-sim' in arguments: param_seg.thr_similarity = arguments['-thr-sim'] if '-model' in arguments: param_model.path_model_to_load = os.path.abspath(arguments['-model']) if '-res-type' in arguments: param_seg.type_seg = arguments['-res-type'] if '-ref' in arguments: param_seg.fname_manual_gmseg = arguments['-ref'] if '-ofolder' in arguments: param_seg.path_results = os.path.abspath(arguments['-ofolder']) param_seg.qc = arguments.get("-qc", None) if '-r' in arguments: param.rm_tmp = bool(int(arguments['-r'])) param.verbose = int(arguments.get('-v')) sct.init_sct(log_level=param.verbose, update=True) # Update log level start_time = time.time() seg_gm = SegmentGM(param_seg=param_seg, param_data=param_data, param_model=param_model, param=param) seg_gm.segment() elapsed_time = time.time() - start_time printv('\nFinished! Elapsed time: ' + str(int(np.round(elapsed_time))) + 's', param.verbose) # save quality control and sct.printv(info) if param_seg.type_seg == 'bin': wm_col = 'red' gm_col = 'blue' b = '0,1' else: wm_col = 'blue-lightblue' gm_col = 'red-yellow' b = '0.4,1' if param_seg.qc is not None: generate_qc(param_seg.fname_im_original, seg_gm.fname_res_gmseg, seg_gm.fname_res_wmseg, param_seg, args, os.path.abspath(param_seg.qc)) if param.rm_tmp: # remove tmp_dir sct.rmtree(seg_gm.tmp_dir) sct.display_viewer_syntax([param_seg.fname_im_original, seg_gm.fname_res_gmseg, seg_gm.fname_res_wmseg], colormaps=['gray', gm_col, wm_col], minmax=['', b, b], opacities=['1', '0.7', '0.7'], verbose=param.verbose)
def main(args=None): if not args: args = sys.argv[1:] # initialize parameters param = Param() # call main function parser = get_parser() arguments = parser.parse(args) fname_data = arguments['-i'] fname_bvecs = arguments['-bvec'] average = arguments['-a'] verbose = int(arguments.get('-v')) sct.init_sct(log_level=verbose, update=True) # Update log level remove_temp_files = int(arguments['-r']) path_out = arguments['-ofolder'] if '-bval' in arguments: fname_bvals = arguments['-bval'] else: fname_bvals = '' if '-bvalmin' in arguments: param.bval_min = arguments['-bvalmin'] # Initialization start_time = time.time() # sct.printv(arguments) sct.printv('\nInput parameters:', verbose) sct.printv(' input file ............' + fname_data, verbose) sct.printv(' bvecs file ............' + fname_bvecs, verbose) sct.printv(' bvals file ............' + fname_bvals, verbose) sct.printv(' average ...............' + str(average), verbose) # Get full path fname_data = os.path.abspath(fname_data) fname_bvecs = os.path.abspath(fname_bvecs) if fname_bvals: fname_bvals = os.path.abspath(fname_bvals) # Extract path, file and extension path_data, file_data, ext_data = sct.extract_fname(fname_data) # create temporary folder path_tmp = sct.tmp_create(basename="dmri_separate", verbose=verbose) # copy files into tmp folder and convert to nifti sct.printv('\nCopy files into temporary folder...', verbose) ext = '.nii' dmri_name = 'dmri' b0_name = file_data + '_b0' b0_mean_name = b0_name + '_mean' dwi_name = file_data + '_dwi' dwi_mean_name = dwi_name + '_mean' if not convert(fname_data, os.path.join(path_tmp, dmri_name + ext)): sct.printv('ERROR in convert.', 1, 'error') sct.copy(fname_bvecs, os.path.join(path_tmp, "bvecs"), verbose=verbose) # go to tmp folder curdir = os.getcwd() os.chdir(path_tmp) # Get size of data im_dmri = Image(dmri_name + ext) sct.printv('\nGet dimensions data...', verbose) nx, ny, nz, nt, px, py, pz, pt = im_dmri.dim sct.printv('.. ' + str(nx) + ' x ' + str(ny) + ' x ' + str(nz) + ' x ' + str(nt), verbose) # Identify b=0 and DWI images sct.printv(fname_bvals) index_b0, index_dwi, nb_b0, nb_dwi = identify_b0(fname_bvecs, fname_bvals, param.bval_min, verbose) # Split into T dimension sct.printv('\nSplit along T dimension...', verbose) im_dmri_split_list = split_data(im_dmri, 3) for im_d in im_dmri_split_list: im_d.save() # Merge b=0 images sct.printv('\nMerge b=0...', verbose) from sct_image import concat_data l = [] for it in range(nb_b0): l.append(dmri_name + '_T' + str(index_b0[it]).zfill(4) + ext) im_out = concat_data(l, 3).save(b0_name + ext) # Average b=0 images if average: sct.printv('\nAverage b=0...', verbose) sct.run(['sct_maths', '-i', b0_name + ext, '-o', b0_mean_name + ext, '-mean', 't'], verbose) # Merge DWI l = [] for it in range(nb_dwi): l.append(dmri_name + '_T' + str(index_dwi[it]).zfill(4) + ext) im_out = concat_data(l, 3).save(dwi_name + ext) # Average DWI images if average: sct.printv('\nAverage DWI...', verbose) sct.run(['sct_maths', '-i', dwi_name + ext, '-o', dwi_mean_name + ext, '-mean', 't'], verbose) # come back os.chdir(curdir) # Generate output files fname_b0 = os.path.abspath(os.path.join(path_out, b0_name + ext_data)) fname_dwi = os.path.abspath(os.path.join(path_out, dwi_name + ext_data)) fname_b0_mean = os.path.abspath(os.path.join(path_out, b0_mean_name + ext_data)) fname_dwi_mean = os.path.abspath(os.path.join(path_out, dwi_mean_name + ext_data)) sct.printv('\nGenerate output files...', verbose) sct.generate_output_file(os.path.join(path_tmp, b0_name + ext), fname_b0, verbose=verbose) sct.generate_output_file(os.path.join(path_tmp, dwi_name + ext), fname_dwi, verbose=verbose) if average: sct.generate_output_file(os.path.join(path_tmp, b0_mean_name + ext), fname_b0_mean, verbose=verbose) sct.generate_output_file(os.path.join(path_tmp, dwi_mean_name + ext), fname_dwi_mean, verbose=verbose) # Remove temporary files if remove_temp_files == 1: sct.printv('\nRemove temporary files...', verbose) sct.rmtree(path_tmp, verbose=verbose) # display elapsed time elapsed_time = time.time() - start_time sct.printv('\nFinished! Elapsed time: ' + str(int(np.round(elapsed_time))) + 's', verbose) return fname_b0, fname_b0_mean, fname_dwi, fname_dwi_mean
def main(args = None): dim_list = ['x', 'y', 'z', 't'] if not args: args = sys.argv[1:] # Get parser info parser = get_parser() arguments = parser.parse(args) fname_in = arguments["-i"] fname_out = arguments["-o"] verbose = int(arguments.get('-v')) sct.init_sct(log_level=verbose, update=True) # Update log level if '-type' in arguments: output_type = arguments['-type'] else: output_type = None # Open file(s) im = Image(fname_in) data = im.data # 3d or 4d numpy array dim = im.dim # run command if '-otsu' in arguments: param = arguments['-otsu'] data_out = otsu(data, param) elif '-otsu_adap' in arguments: param = arguments['-otsu_adap'] data_out = otsu_adap(data, param[0], param[1]) elif '-otsu_median' in arguments: param = arguments['-otsu_median'] data_out = otsu_median(data, param[0], param[1]) elif '-thr' in arguments: param = arguments['-thr'] data_out = threshold(data, param) elif '-percent' in arguments: param = arguments['-percent'] data_out = perc(data, param) elif '-bin' in arguments: bin_thr = arguments['-bin'] data_out = binarise(data, bin_thr=bin_thr) elif '-add' in arguments: from numpy import sum data2 = get_data_or_scalar(arguments["-add"], data) data_concat = concatenate_along_4th_dimension(data, data2) data_out = sum(data_concat, axis=3) elif '-sub' in arguments: data2 = get_data_or_scalar(arguments['-sub'], data) data_out = data - data2 elif "-laplacian" in arguments: sigmas = arguments["-laplacian"] if len(sigmas) == 1: sigmas = [sigmas for i in range(len(data.shape))] elif len(sigmas) != len(data.shape): printv(parser.usage.generate(error='ERROR: -laplacian need the same number of inputs as the number of image dimension OR only one input')) # adjust sigma based on voxel size sigmas = [sigmas[i] / dim[i + 4] for i in range(3)] # smooth data data_out = laplacian(data, sigmas) elif '-mul' in arguments: from numpy import prod data2 = get_data_or_scalar(arguments["-mul"], data) data_concat = concatenate_along_4th_dimension(data, data2) data_out = prod(data_concat, axis=3) elif '-div' in arguments: from numpy import divide data2 = get_data_or_scalar(arguments["-div"], data) data_out = divide(data, data2) elif '-mean' in arguments: from numpy import mean dim = dim_list.index(arguments['-mean']) if dim + 1 > len(np.shape(data)): # in case input volume is 3d and dim=t data = data[..., np.newaxis] data_out = mean(data, dim) elif '-rms' in arguments: from numpy import mean, sqrt, square dim = dim_list.index(arguments['-rms']) if dim + 1 > len(np.shape(data)): # in case input volume is 3d and dim=t data = data[..., np.newaxis] data_out = sqrt(mean(square(data.astype(float)), dim)) elif '-std' in arguments: from numpy import std dim = dim_list.index(arguments['-std']) if dim + 1 > len(np.shape(data)): # in case input volume is 3d and dim=t data = data[..., np.newaxis] data_out = std(data, dim, ddof=1) elif "-smooth" in arguments: sigmas = arguments["-smooth"] if len(sigmas) == 1: sigmas = [sigmas[0] for i in range(len(data.shape))] elif len(sigmas) != len(data.shape): printv(parser.usage.generate(error='ERROR: -smooth need the same number of inputs as the number of image dimension OR only one input')) # adjust sigma based on voxel size sigmas = [sigmas[i] / dim[i + 4] for i in range(3)] # smooth data data_out = smooth(data, sigmas) elif '-dilate' in arguments: data_out = dilate(data, arguments['-dilate']) elif '-erode' in arguments: data_out = erode(data, arguments['-erode']) elif '-denoise' in arguments: # parse denoising arguments p, b = 1, 5 # default arguments list_denoise = arguments['-denoise'] for i in list_denoise: if 'p' in i: p = int(i.split('=')[1]) if 'b' in i: b = int(i.split('=')[1]) data_out = denoise_nlmeans(data, patch_radius=p, block_radius=b) elif '-symmetrize' in arguments: data_out = (data + data[list(range(data.shape[0] - 1, -1, -1)), :, :]) / float(2) elif '-mi' in arguments: # input 1 = from flag -i --> im # input 2 = from flag -mi im_2 = Image(arguments['-mi']) compute_similarity(im.data, im_2.data, fname_out, metric='mi', verbose=verbose) data_out = None elif '-minorm' in arguments: im_2 = Image(arguments['-minorm']) compute_similarity(im.data, im_2.data, fname_out, metric='minorm', verbose=verbose) data_out = None elif '-corr' in arguments: # input 1 = from flag -i --> im # input 2 = from flag -mi im_2 = Image(arguments['-corr']) compute_similarity(im.data, im_2.data, fname_out, metric='corr', verbose=verbose) data_out = None # if no flag is set else: data_out = None printv(parser.usage.generate(error='ERROR: you need to specify an operation to do on the input image')) if data_out is not None: # Write output nii_out = Image(fname_in) # use header of input file nii_out.data = data_out nii_out.save(fname_out, dtype=output_type) # TODO: case of multiple outputs # assert len(data_out) == n_out # if n_in == n_out: # for im_in, d_out, fn_out in zip(nii, data_out, fname_out): # im_in.data = d_out # im_in.absolutepath = fn_out # if "-w" in arguments: # im_in.hdr.set_intent('vector', (), '') # im_in.save() # elif n_out == 1: # nii[0].data = data_out[0] # nii[0].absolutepath = fname_out[0] # if "-w" in arguments: # nii[0].hdr.set_intent('vector', (), '') # nii[0].save() # elif n_out > n_in: # for dat_out, name_out in zip(data_out, fname_out): # im_out = nii[0].copy() # im_out.data = dat_out # im_out.absolutepath = name_out # if "-w" in arguments: # im_out.hdr.set_intent('vector', (), '') # im_out.save() # else: # printv(parser.usage.generate(error='ERROR: not the correct numbers of inputs and outputs')) # display message if data_out is not None: sct.display_viewer_syntax([fname_out], verbose=verbose) else: printv('\nDone! File created: ' + fname_out, verbose, 'info')
def main(): """Main function.""" sct.init_sct() parser = get_parser() args = sys.argv[1:] arguments = parser.parse(args) fname_image = os.path.abspath(arguments['-i']) contrast_type = arguments['-c'] ctr_algo = arguments["-centerline"] if "-brain" not in args: if contrast_type in ['t2s', 'dwi']: brain_bool = False if contrast_type in ['t1', 't2']: brain_bool = True else: brain_bool = bool(int(arguments["-brain"])) kernel_size = arguments["-kernel"] if kernel_size == '3d' and contrast_type == 'dwi': kernel_size = '2d' sct.printv( '3D kernel model for dwi contrast is not available. 2D kernel model is used instead.', type="warning") if '-ofolder' not in args: output_folder = os.getcwd() else: output_folder = arguments["-ofolder"] if ctr_algo == 'manual' and "-file_centerline" not in args: sct.log.warning( 'Please use the flag -file_centerline to indicate the centerline filename.' ) sys.exit(1) if "-file_centerline" in args: manual_centerline_fname = arguments["-file_centerline"] ctr_algo = 'manual' else: manual_centerline_fname = None remove_temp_files = int(arguments['-r']) verbose = arguments['-v'] path_qc = arguments.get("-qc", None) algo_config_stg = '\nMethod:' algo_config_stg += '\n\tCenterline algorithm: ' + str(ctr_algo) algo_config_stg += '\n\tAssumes brain section included in the image: ' + str( brain_bool) algo_config_stg += '\n\tDimension of the segmentation kernel convolutions: ' + kernel_size + '\n' sct.printv(algo_config_stg) im_image = Image(fname_image) # note: below we pass im_image.copy() otherwise the field absolutepath becomes None after execution of this function im_seg, im_image_RPI_upsamp, im_seg_RPI_upsamp = deep_segmentation_spinalcord( im_image.copy(), contrast_type, ctr_algo=ctr_algo, ctr_file=manual_centerline_fname, brain_bool=brain_bool, kernel_size=kernel_size, remove_temp_files=remove_temp_files, verbose=verbose) # Save segmentation fname_seg = os.path.abspath( os.path.join( output_folder, sct.extract_fname(fname_image)[1] + '_seg' + sct.extract_fname(fname_image)[2])) im_seg.save(fname_seg) if path_qc is not None: generate_qc(im_image, im_seg, args, os.path.abspath(path_qc)) sct.display_viewer_syntax([fname_image, fname_seg], colormaps=['gray', 'red'], opacities=['', '0.7'])
def main(args=None): # check user arguments if not args: args = sys.argv[1:] # Get parser info parser = get_parser() arguments = parser.parse(args) input_filename = arguments['-i'] input_fname_output = None input_fname_ref = None input_cross_radius = 5 input_dilate = False input_coordinates = None vertebral_levels = None value = None if '-add' in arguments: process_type = 'add' value = arguments['-add'] elif '-create' in arguments: process_type = 'create' input_coordinates = arguments['-create'] elif '-create-add' in arguments: process_type = 'create-add' input_coordinates = arguments['-create-add'] elif '-create-seg' in arguments: process_type = 'create-seg' input_coordinates = arguments['-create-seg'] elif '-cross' in arguments: process_type = 'cross' input_cross_radius = arguments['-cross'] elif '-cubic-to-point' in arguments: process_type = 'cubic-to-point' elif '-display' in arguments: process_type = 'display-voxel' elif '-increment' in arguments: process_type = 'increment' elif '-vert-body' in arguments: process_type = 'vert-body' vertebral_levels = arguments['-vert-body'] # elif '-vert-disc' in arguments: # process_type = 'vert-disc' # vertebral_levels = arguments['-vert-disc'] elif '-vert-continuous' in arguments: process_type = 'vert-continuous' elif '-MSE' in arguments: process_type = 'MSE' input_fname_ref = arguments['-r'] elif '-remove-reference' in arguments: process_type = 'remove-reference' input_fname_ref = arguments['-remove-reference'] elif '-remove-symm' in arguments: process_type = 'remove-symm' input_fname_ref = arguments['-r'] elif '-create-viewer' in arguments: process_type = 'create-viewer' value = arguments['-create-viewer'] elif '-remove' in arguments: process_type = 'remove' value = arguments['-remove'] elif '-keep' in arguments: process_type = 'keep' value = arguments['-keep'] else: # no process chosen sct.printv('ERROR: No process was chosen.', 1, 'error') if '-msg' in arguments: msg = arguments['-msg'] + "\n" else: msg = "" if '-o' in arguments: input_fname_output = arguments['-o'] if '-ilabel' in arguments: input_fname_previous = arguments['-ilabel'] else: input_fname_previous = None verbose = int(arguments.get('-v')) sct.init_sct(log_level=verbose, update=True) # Update log level processor = ProcessLabels(input_filename, fname_output=input_fname_output, fname_ref=input_fname_ref, cross_radius=input_cross_radius, dilate=input_dilate, coordinates=input_coordinates, verbose=verbose, vertebral_levels=vertebral_levels, value=value, msg=msg, fname_previous=input_fname_previous) processor.process(process_type) # return based on process type if process_type == 'display-voxel': return processor.useful_notation path_qc = arguments.get("-qc", None) qc_dataset = arguments.get("-qc-dataset", None) qc_subject = arguments.get("-qc-subject", None) if path_qc is not None: generate_qc(fname_in1=input_filename, fname_seg=input_fname_output[0], args=args, path_qc=os.path.abspath(path_qc), dataset=qc_dataset, subject=qc_subject, process='sct_label_utils')
def main(args=None): """ Main function :param args: :return: """ # initializations output_type = None dim_list = ['x', 'y', 'z', 't'] # Get parser args if args is None: args = None if sys.argv[1:] else ['--help'] parser = get_parser() arguments = parser.parse_args(args=args) fname_in = arguments.i n_in = len(fname_in) verbose = arguments.v sct.init_sct(log_level=verbose, update=True) # Update log level if arguments.o is not None: fname_out = arguments.o else: fname_out = None # run command if arguments.concat is not None: dim = arguments.concat assert dim in dim_list dim = dim_list.index(dim) im_out = [concat_data(fname_in, dim)] # TODO: adapt to fname_in elif arguments.copy_header is not None: im_in = Image(fname_in[0]) im_dest = Image(arguments.copy_header) im_dest_new = im_in.copy() im_dest_new.data = im_dest.data.copy() # im_dest.header = im_in.header im_dest_new.absolutepath = im_dest.absolutepath im_out = [im_dest_new] fname_out = arguments.copy_header elif arguments.display_warp: im_in = fname_in[0] visualize_warp(im_in, fname_grid=None, step=3, rm_tmp=True) im_out = None elif arguments.getorient: im_in = Image(fname_in[0]) orient = im_in.orientation im_out = None elif arguments.keep_vol is not None: index_vol = (arguments.keep_vol).split(',') for iindex_vol, vol in enumerate(index_vol): index_vol[iindex_vol] = int(vol) im_in = Image(fname_in[0]) im_out = [remove_vol(im_in, index_vol, todo='keep')] elif arguments.mcs: im_in = Image(fname_in[0]) if n_in != 1: sct.printv(parser.usage.generate(error='ERROR: -mcs need only one input')) if len(im_in.data.shape) != 5: sct.printv(parser.usage.generate(error='ERROR: -mcs input need to be a multi-component image')) im_out = multicomponent_split(im_in) elif arguments.omc: im_ref = Image(fname_in[0]) for fname in fname_in: im = Image(fname) if im.data.shape != im_ref.data.shape: sct.printv(parser.usage.generate(error='ERROR: -omc inputs need to have all the same shapes')) del im im_out = [multicomponent_merge(fname_in)] # TODO: adapt to fname_in elif arguments.pad is not None: im_in = Image(fname_in[0]) ndims = len(im_in.data.shape) if ndims != 3: sct.printv('ERROR: you need to specify a 3D input file.', 1, 'error') return pad_arguments = arguments.pad.split(',') if len(pad_arguments) != 3: sct.printv('ERROR: you need to specify 3 padding values.', 1, 'error') padx, pady, padz = pad_arguments padx, pady, padz = int(padx), int(pady), int(padz) im_out = [pad_image(im_in, pad_x_i=padx, pad_x_f=padx, pad_y_i=pady, pad_y_f=pady, pad_z_i=padz, pad_z_f=padz)] elif arguments.pad_asym is not None: im_in = Image(fname_in[0]) ndims = len(im_in.data.shape) if ndims != 3: sct.printv('ERROR: you need to specify a 3D input file.', 1, 'error') return pad_arguments = arguments.pad_asym.split(',') if len(pad_arguments) != 6: sct.printv('ERROR: you need to specify 6 padding values.', 1, 'error') padxi, padxf, padyi, padyf, padzi, padzf = pad_arguments padxi, padxf, padyi, padyf, padzi, padzf = int(padxi), int(padxf), int(padyi), int(padyf), int(padzi), int(padzf) im_out = [pad_image(im_in, pad_x_i=padxi, pad_x_f=padxf, pad_y_i=padyi, pad_y_f=padyf, pad_z_i=padzi, pad_z_f=padzf)] elif arguments.remove_vol is not None: index_vol = (arguments.remove_vol).split(',') for iindex_vol, vol in enumerate(index_vol): index_vol[iindex_vol] = int(vol) im_in = Image(fname_in[0]) im_out = [remove_vol(im_in, index_vol, todo='remove')] elif arguments.setorient is not None: sct.printv(fname_in[0]) im_in = Image(fname_in[0]) im_out = [msct_image.change_orientation(im_in, arguments.setorient)] elif arguments.setorient_data is not None: im_in = Image(fname_in[0]) im_out = [msct_image.change_orientation(im_in, arguments.setorient_data, data_only=True)] elif arguments.split is not None: dim = arguments.split assert dim in dim_list im_in = Image(fname_in[0]) dim = dim_list.index(dim) im_out = split_data(im_in, dim) elif arguments.type is not None: output_type = arguments.type im_in = Image(fname_in[0]) im_out = [im_in] # TODO: adapt to fname_in else: im_out = None sct.printv(parser.usage.generate(error='ERROR: you need to specify an operation to do on the input image')) # in case fname_out is not defined, use first element of input file name list if fname_out is None: fname_out = fname_in[0] # Write output if im_out is not None: sct.printv('Generate output files...', verbose) # if only one output if len(im_out) == 1 and not '-split' in arguments: im_out[0].save(fname_out, dtype=output_type, verbose=verbose) sct.display_viewer_syntax([fname_out], verbose=verbose) if arguments.mcs: # use input file name and add _X, _Y _Z. Keep the same extension l_fname_out = [] for i_dim in range(3): l_fname_out.append(sct.add_suffix(fname_out or fname_in[0], '_' + dim_list[i_dim].upper())) im_out[i_dim].save(l_fname_out[i_dim], verbose=verbose) sct.display_viewer_syntax(fname_out) if arguments.split is not None: # use input file name and add _"DIM+NUMBER". Keep the same extension l_fname_out = [] for i, im in enumerate(im_out): l_fname_out.append(sct.add_suffix(fname_out or fname_in[0], '_' + dim_list[dim].upper() + str(i).zfill(4))) im.save(l_fname_out[i]) sct.display_viewer_syntax(l_fname_out) elif arguments.getorient: sct.printv(orient) elif arguments.display_warp: sct.printv('Warping grid generated.', verbose, 'info')
indzero = np.where(data_smooth == 0) data_smooth[indzero] = data_input[indzero] nii_smooth.data = data_smooth nii_smooth.save('anat_rpi_straight_smooth_curved_nonzero.nii') # come back os.chdir(curdir) # Generate output file sct.printv('\nGenerate output file...') sct.generate_output_file(os.path.join(path_tmp, "anat_rpi_straight_smooth_curved_nonzero.nii"), file_anat + '_smooth' + ext_anat) # Remove temporary files if remove_temp_files == 1: sct.printv('\nRemove temporary files...') sct.rmtree(path_tmp) # Display elapsed time elapsed_time = time.time() - start_time sct.printv('\nFinished! Elapsed time: ' + str(int(np.round(elapsed_time))) + 's\n') sct.display_viewer_syntax([file_anat, file_anat + '_smooth'], verbose=verbose) # START PROGRAM # ========================================================================================== if __name__ == "__main__": sct.init_sct() main()
def main(): """Main function.""" parser = get_parser() args = parser.parse_args(args=None if sys.argv[1:] else ['--help']) fname_image = os.path.abspath(args.i) contrast_type = args.c ctr_algo = args.centerline if args.brain is None: if contrast_type in ['t2s', 'dwi']: brain_bool = False if contrast_type in ['t1', 't2']: brain_bool = True else: brain_bool = bool(args.brain) kernel_size = args.kernel if kernel_size == '3d' and contrast_type == 'dwi': kernel_size = '2d' sct.printv( '3D kernel model for dwi contrast is not available. 2D kernel model is used instead.', type="warning") if ctr_algo == 'file' and args.file_centerline is None: sct.printv( 'Please use the flag -file_centerline to indicate the centerline filename.', 1, 'warning') sys.exit(1) if args.file_centerline is not None: manual_centerline_fname = args.file_centerline ctr_algo = 'file' else: manual_centerline_fname = None threshold = args.thr if threshold is not None: if threshold > 1.0 or (threshold < 0.0 and threshold != -1.0): raise SyntaxError( "Threshold should be between 0 and 1, or equal to -1 (no threshold)" ) remove_temp_files = args.r verbose = args.v sct.init_sct(log_level=verbose, update=True) # Update log level path_qc = args.qc qc_dataset = args.qc_dataset qc_subject = args.qc_subject output_folder = args.ofolder # Segment image from spinalcordtoolbox.image import Image from spinalcordtoolbox.deepseg_sc.core import deep_segmentation_spinalcord from spinalcordtoolbox.reports.qc import generate_qc im_image = Image(fname_image) # note: below we pass im_image.copy() otherwise the field absolutepath becomes None after execution of this function im_seg, im_image_RPI_upsamp, im_seg_RPI_upsamp = \ deep_segmentation_spinalcord(im_image.copy(), contrast_type, ctr_algo=ctr_algo, ctr_file=manual_centerline_fname, brain_bool=brain_bool, kernel_size=kernel_size, threshold_seg=threshold, remove_temp_files=remove_temp_files, verbose=verbose) # Save segmentation fname_seg = os.path.abspath( os.path.join( output_folder, sct.extract_fname(fname_image)[1] + '_seg' + sct.extract_fname(fname_image)[2])) im_seg.save(fname_seg) # Generate QC report if path_qc is not None: generate_qc(fname_image, fname_seg=fname_seg, args=sys.argv[1:], path_qc=os.path.abspath(path_qc), dataset=qc_dataset, subject=qc_subject, process='sct_deepseg_sc') sct.display_viewer_syntax([fname_image, fname_seg], colormaps=['gray', 'red'], opacities=['', '0.7'])
# result = hough_ellipse(edges, accuracy=20, min_size=5, max_size=20) # result.sort(order='accumulator') # # Estimated parameters for the ellipse # best = list(result[-1]) # yc, xc, a, b = [int(round(x)) for x in best[1:5]] # orientation = best[5] # # Draw the ellipse on the original image # from matplotlib.pylab import * # from skimage.draw import ellipse_perimeter # cy, cx = ellipse_perimeter(yc, xc, a, b, orientation) # # image_rgb[cy, cx] = (0, 0, 255) # # Draw the edge (white) and the resulting ellipse (red) # # edges = color.gray2rgb(edges) # data2d[cy, cx] = 1000 # # detect edges # from skimage.feature import canny # from skimage import morphology, measure # data2d = data3d[:, :, 8] # edges = canny(data2d, sigma=3.0) # contours = measure.find_contours(edges, 1, fully_connected='low') # mask = morphology.closing(edges, morphology.square(3), out=None) # k-means clustering # from sklearn.cluster import KMeans if __name__ == "__main__": init_sct() main()
def main(args=None): # initializations initz = '' initcenter = '' fname_initlabel = '' file_labelz = 'labelz.nii.gz' param = Param() # check user arguments if not args: args = sys.argv[1:] # Get parser info parser = get_parser() arguments = parser.parse(args) fname_in = os.path.abspath(arguments["-i"]) fname_seg = os.path.abspath(arguments['-s']) contrast = arguments['-c'] path_template = os.path.abspath(arguments['-t']) scale_dist = arguments['-scale-dist'] if '-ofolder' in arguments: path_output = arguments['-ofolder'] else: path_output = os.curdir param.path_qc = arguments.get("-qc", None) if '-discfile' in arguments: fname_disc = os.path.abspath(arguments['-discfile']) else: fname_disc = None if '-initz' in arguments: initz = arguments['-initz'] if '-initcenter' in arguments: initcenter = arguments['-initcenter'] # if user provided text file, parse and overwrite arguments if '-initfile' in arguments: file = open(arguments['-initfile'], 'r') initfile = ' ' + file.read().replace('\n', '') arg_initfile = initfile.split(' ') for idx_arg, arg in enumerate(arg_initfile): if arg == '-initz': initz = [int(x) for x in arg_initfile[idx_arg + 1].split(',')] if arg == '-initcenter': initcenter = int(arg_initfile[idx_arg + 1]) if '-initlabel' in arguments: # get absolute path of label fname_initlabel = os.path.abspath(arguments['-initlabel']) if '-param' in arguments: param.update(arguments['-param'][0]) verbose = int(arguments.get('-v')) sct.init_sct(log_level=verbose, update=True) # Update log level remove_temp_files = int(arguments['-r']) denoise = int(arguments['-denoise']) laplacian = int(arguments['-laplacian']) path_tmp = sct.tmp_create(basename="label_vertebrae", verbose=verbose) # Copying input data to tmp folder sct.printv('\nCopying input data to tmp folder...', verbose) Image(fname_in).save(os.path.join(path_tmp, "data.nii")) Image(fname_seg).save(os.path.join(path_tmp, "segmentation.nii")) # Go go temp folder curdir = os.getcwd() os.chdir(path_tmp) # Straighten spinal cord sct.printv('\nStraighten spinal cord...', verbose) # check if warp_curve2straight and warp_straight2curve already exist (i.e. no need to do it another time) cache_sig = sct.cache_signature( input_files=[fname_in, fname_seg], ) cachefile = os.path.join(curdir, "straightening.cache") if sct.cache_valid(cachefile, cache_sig) and os.path.isfile(os.path.join(curdir, "warp_curve2straight.nii.gz")) and os.path.isfile(os.path.join(curdir, "warp_straight2curve.nii.gz")) and os.path.isfile(os.path.join(curdir, "straight_ref.nii.gz")): # if they exist, copy them into current folder sct.printv('Reusing existing warping field which seems to be valid', verbose, 'warning') sct.copy(os.path.join(curdir, "warp_curve2straight.nii.gz"), 'warp_curve2straight.nii.gz') sct.copy(os.path.join(curdir, "warp_straight2curve.nii.gz"), 'warp_straight2curve.nii.gz') sct.copy(os.path.join(curdir, "straight_ref.nii.gz"), 'straight_ref.nii.gz') # apply straightening s, o = sct.run(['sct_apply_transfo', '-i', 'data.nii', '-w', 'warp_curve2straight.nii.gz', '-d', 'straight_ref.nii.gz', '-o', 'data_straight.nii']) else: sct_straighten_spinalcord.main(args=[ '-i', 'data.nii', '-s', 'segmentation.nii', '-r', str(remove_temp_files), '-v', str(verbose), ]) sct.cache_save(cachefile, cache_sig) # resample to 0.5mm isotropic to match template resolution sct.printv('\nResample to 0.5mm isotropic...', verbose) s, o = sct.run(['sct_resample', '-i', 'data_straight.nii', '-mm', '0.5x0.5x0.5', '-x', 'linear', '-o', 'data_straightr.nii'], verbose=verbose) # Apply straightening to segmentation # N.B. Output is RPI sct.printv('\nApply straightening to segmentation...', verbose) sct.run('isct_antsApplyTransforms -d 3 -i %s -r %s -t %s -o %s -n %s' % ('segmentation.nii', 'data_straightr.nii', 'warp_curve2straight.nii.gz', 'segmentation_straight.nii', 'Linear'), verbose=verbose, is_sct_binary=True, ) # Threshold segmentation at 0.5 sct.run(['sct_maths', '-i', 'segmentation_straight.nii', '-thr', '0.5', '-o', 'segmentation_straight.nii'], verbose) # If disc label file is provided, label vertebrae using that file instead of automatically if fname_disc: # Apply straightening to disc-label sct.printv('\nApply straightening to disc labels...', verbose) sct.run('isct_antsApplyTransforms -d 3 -i %s -r %s -t %s -o %s -n %s' % (fname_disc, 'data_straightr.nii', 'warp_curve2straight.nii.gz', 'labeldisc_straight.nii.gz', 'NearestNeighbor'), verbose=verbose, is_sct_binary=True, ) label_vert('segmentation_straight.nii', 'labeldisc_straight.nii.gz', verbose=1) else: # create label to identify disc sct.printv('\nCreate label to identify disc...', verbose) fname_labelz = os.path.join(path_tmp, file_labelz) if initz or initcenter: if initcenter: # find z centered in FOV nii = Image('segmentation.nii').change_orientation("RPI") nx, ny, nz, nt, px, py, pz, pt = nii.dim # Get dimensions z_center = int(np.round(nz / 2)) # get z_center initz = [z_center, initcenter] # create single label and output as labels.nii.gz label = ProcessLabels('segmentation.nii', fname_output='tmp.labelz.nii.gz', coordinates=['{},{}'.format(initz[0], initz[1])]) im_label = label.process('create-seg') im_label.data = dilate(im_label.data, 3, 'ball') # TODO: create a dilation method specific to labels, # which does not apply a convolution across all voxels (highly inneficient) im_label.save(fname_labelz) elif fname_initlabel: Image(fname_initlabel).save(fname_labelz) else: # automatically finds C2-C3 disc im_data = Image('data.nii') im_seg = Image('segmentation.nii') if not remove_temp_files: # because verbose is here also used for keeping temp files verbose_detect_c2c3 = 2 else: verbose_detect_c2c3 = 0 im_label_c2c3 = detect_c2c3(im_data, im_seg, contrast, verbose=verbose_detect_c2c3) ind_label = np.where(im_label_c2c3.data) if not np.size(ind_label) == 0: im_label_c2c3.data[ind_label] = 3 else: sct.printv('Automatic C2-C3 detection failed. Please provide manual label with sct_label_utils', 1, 'error') sys.exit() im_label_c2c3.save(fname_labelz) # dilate label so it is not lost when applying warping dilate(Image(fname_labelz), 3, 'ball').save(fname_labelz) # Apply straightening to z-label sct.printv('\nAnd apply straightening to label...', verbose) sct.run('isct_antsApplyTransforms -d 3 -i %s -r %s -t %s -o %s -n %s' % (file_labelz, 'data_straightr.nii', 'warp_curve2straight.nii.gz', 'labelz_straight.nii.gz', 'NearestNeighbor'), verbose=verbose, is_sct_binary=True, ) # get z value and disk value to initialize labeling sct.printv('\nGet z and disc values from straight label...', verbose) init_disc = get_z_and_disc_values_from_label('labelz_straight.nii.gz') sct.printv('.. ' + str(init_disc), verbose) # denoise data if denoise: sct.printv('\nDenoise data...', verbose) sct.run(['sct_maths', '-i', 'data_straightr.nii', '-denoise', 'h=0.05', '-o', 'data_straightr.nii'], verbose) # apply laplacian filtering if laplacian: sct.printv('\nApply Laplacian filter...', verbose) sct.run(['sct_maths', '-i', 'data_straightr.nii', '-laplacian', '1', '-o', 'data_straightr.nii'], verbose) # detect vertebral levels on straight spinal cord init_disc[1]=init_disc[1]-1 vertebral_detection('data_straightr.nii', 'segmentation_straight.nii', contrast, param, init_disc=init_disc, verbose=verbose, path_template=path_template, path_output=path_output, scale_dist=scale_dist) # un-straighten labeled spinal cord sct.printv('\nUn-straighten labeling...', verbose) sct.run('isct_antsApplyTransforms -d 3 -i %s -r %s -t %s -o %s -n %s' % ('segmentation_straight_labeled.nii', 'segmentation.nii', 'warp_straight2curve.nii.gz', 'segmentation_labeled.nii', 'NearestNeighbor'), verbose=verbose, is_sct_binary=True, ) # Clean labeled segmentation sct.printv('\nClean labeled segmentation (correct interpolation errors)...', verbose) clean_labeled_segmentation('segmentation_labeled.nii', 'segmentation.nii', 'segmentation_labeled.nii') # label discs sct.printv('\nLabel discs...', verbose) label_discs('segmentation_labeled.nii', verbose=verbose) # come back os.chdir(curdir) # Generate output files path_seg, file_seg, ext_seg = sct.extract_fname(fname_seg) fname_seg_labeled = os.path.join(path_output, file_seg + '_labeled' + ext_seg) sct.printv('\nGenerate output files...', verbose) sct.generate_output_file(os.path.join(path_tmp, "segmentation_labeled.nii"), fname_seg_labeled) sct.generate_output_file(os.path.join(path_tmp, "segmentation_labeled_disc.nii"), os.path.join(path_output, file_seg + '_labeled_discs' + ext_seg)) # copy straightening files in case subsequent SCT functions need them sct.generate_output_file(os.path.join(path_tmp, "warp_curve2straight.nii.gz"), os.path.join(path_output, "warp_curve2straight.nii.gz"), verbose=verbose) sct.generate_output_file(os.path.join(path_tmp, "warp_straight2curve.nii.gz"), os.path.join(path_output, "warp_straight2curve.nii.gz"), verbose=verbose) sct.generate_output_file(os.path.join(path_tmp, "straight_ref.nii.gz"), os.path.join(path_output, "straight_ref.nii.gz"), verbose=verbose) # Remove temporary files if remove_temp_files == 1: sct.printv('\nRemove temporary files...', verbose) sct.rmtree(path_tmp) # Generate QC report if param.path_qc is not None: path_qc = os.path.abspath(param.path_qc) qc_dataset = arguments.get("-qc-dataset", None) qc_subject = arguments.get("-qc-subject", None) labeled_seg_file = os.path.join(path_output, file_seg + '_labeled' + ext_seg) generate_qc(fname_in, fname_seg=labeled_seg_file, args=args, path_qc=os.path.abspath(path_qc), dataset=qc_dataset, subject=qc_subject, process='sct_label_vertebrae') sct.display_viewer_syntax([fname_in, fname_seg_labeled], colormaps=['', 'subcortical'], opacities=['1', '0.5'])
def main(args=None): if args is None: args = sys.argv[1:] # initialization # note: mirror servers are listed in order of priority dict_url = { 'sct_example_data': [ 'https://osf.io/kjcgs/?action=download', 'https://www.neuro.polymtl.ca/_media/downloads/sct/20180525_sct_example_data.zip' ], 'sct_testing_data': [ 'https://osf.io/z8gaj/?action=download', 'https://www.neuro.polymtl.ca/_media/downloads/sct/20180125_sct_testing_data.zip' ], 'PAM50': [ 'https://osf.io/kc3jx/?action=download', 'https://www.neuro.polymtl.ca/_media/downloads/sct/20181214_PAM50.zip' ], 'MNI-Poly-AMU': [ 'https://osf.io/sh6h4/?action=download', 'https://www.neuro.polymtl.ca/_media/downloads/sct/20170310_MNI-Poly-AMU.zip' ], 'gm_model': [ 'https://osf.io/ugscu/?action=download', 'https://www.neuro.polymtl.ca/_media/downloads/sct/20160922_gm_model.zip' ], 'optic_models': [ 'https://osf.io/g4fwn/?action=download', 'https://www.neuro.polymtl.ca/_media/downloads/sct/20170413_optic_models.zip' ], 'pmj_models': [ 'https://osf.io/4gufr/?action=download', 'https://www.neuro.polymtl.ca/_media/downloads/sct/20170922_pmj_models.zip' ], 'binaries_debian': [ 'https://osf.io/z72vn/?action=download', 'https://www.neuro.polymtl.ca/_media/downloads/sct/20181204_sct_binaries_linux.tar.gz' ], 'binaries_centos': [ 'https://osf.io/97ybd/?action=download', 'https://www.neuro.polymtl.ca/_media/downloads/sct/20181204_sct_binaries_linux_centos6.tar.gz' ], 'binaries_osx': [ 'https://osf.io/zjv4c/?action=download', 'https://www.neuro.polymtl.ca/_media/downloads/sct/20181204_sct_binaries_osx.tar.gz' ], 'course_hawaii17': 'https://osf.io/6exht/?action=download', 'course_paris18': [ 'https://osf.io/9bmn5/?action=download', 'https://www.neuro.polymtl.ca/_media/downloads/sct/20180612_sct_course-paris18.zip' ], 'course_london19': [ 'https://osf.io/4q3u7/?action=download', 'https://www.neuro.polymtl.ca/_media/downloads/sct/20190121_sct_course-london19.zip' ], 'deepseg_gm_models': [ 'https://osf.io/b9y4x/?action=download', 'https://www.neuro.polymtl.ca/_media/downloads/sct/20180205_deepseg_gm_models.zip' ], 'deepseg_sc_models': [ 'https://osf.io/avf97/?action=download', 'https://www.neuro.polymtl.ca/_media/downloads/sct/20180610_deepseg_sc_models.zip' ], 'deepseg_lesion_models': [ 'https://osf.io/eg7v9/?action=download', 'https://www.neuro.polymtl.ca/_media/downloads/sct/20180613_deepseg_lesion_models.zip' ], 'c2c3_disc_models': [ 'https://osf.io/t97ap/?action=download', 'https://www.neuro.polymtl.ca/_media/downloads/sct/20190117_c2c3_disc_models.zip' ] } # Get parser info parser = get_parser() arguments = parser.parse(args) data_name = arguments['-d'] verbose = int(arguments.get('-v')) sct.init_sct(log_level=verbose, update=True) # Update log level dest_folder = arguments.get('-o', os.path.abspath(os.curdir)) # Download data url = dict_url[data_name] tmp_file = download_data(url, verbose) # Check if folder already exists sct.printv('\nCheck if folder already exists...', verbose) if os.path.isdir(data_name): sct.printv( 'WARNING: Folder ' + data_name + ' already exists. Removing it...', 1, 'warning') sct.rmtree(data_name) # unzip unzip(tmp_file, dest_folder, verbose) sct.printv('\nRemove temporary file...', verbose) os.remove(tmp_file) sct.printv('Done!\n', verbose) return 0
def run_main(): sct.init_sct() parser = get_parser() args = sys.argv[1:] arguments = parser.parse(args) # Input filename fname_input_data = arguments["-i"] fname_data = os.path.abspath(fname_input_data) # Method used method = 'optic' if "-method" in arguments: method = arguments["-method"] # Contrast type contrast_type = '' if "-c" in arguments: contrast_type = arguments["-c"] if method == 'optic' and not contrast_type: # Contrast must be error = 'ERROR: -c is a mandatory argument when using Optic method.' sct.printv(error, type='error') return # Ga between slices interslice_gap = 10.0 if "-gap" in arguments: interslice_gap = float(arguments["-gap"]) # Output folder if "-ofolder" in arguments: folder_output = arguments["-ofolder"] else: folder_output = '.' # Remove temporary files remove_temp_files = True if "-r" in arguments: remove_temp_files = bool(int(arguments["-r"])) # Outputs a ROI file output_roi = False if "-roi" in arguments: output_roi = bool(int(arguments["-roi"])) # Verbosity verbose = 0 if "-v" in arguments: verbose = int(arguments["-v"]) if method == 'viewer': fname_labels_viewer = _call_viewer_centerline( fname_in=fname_data, interslice_gap=interslice_gap) centerline_filename = extract_centerline( fname_labels_viewer, remove_temp_files=remove_temp_files, verbose=verbose, algo_fitting='nurbs', nurbs_pts_number=8000) else: # condition on verbose when using OptiC if verbose == 1: verbose = 2 # OptiC models path_script = os.path.dirname(__file__) path_sct = os.path.dirname(path_script) optic_models_path = os.path.join(path_sct, 'data', 'optic_models', '{}_model'.format(contrast_type)) # Execute OptiC binary _, centerline_filename = optic.detect_centerline( image_fname=fname_data, contrast_type=contrast_type, optic_models_path=optic_models_path, folder_output=folder_output, remove_temp_files=remove_temp_files, output_roi=output_roi, verbose=verbose) sct.display_viewer_syntax([fname_input_data, centerline_filename], colormaps=['gray', 'red'], opacities=['', '1'])
def main(args=None): # initializations param = Param() # check user arguments if not args: args = sys.argv[1:] # Get parser info parser = get_parser() arguments = parser.parse(args) fname_data = arguments['-i'] fname_seg = arguments['-s'] if '-l' in arguments: fname_landmarks = arguments['-l'] label_type = 'body' elif '-ldisc' in arguments: fname_landmarks = arguments['-ldisc'] label_type = 'disc' elif '-lspinal' in arguments: fname_landmarks = arguments['-lspinal'] label_type = 'spinal' else: sct.printv('ERROR: Labels should be provided.', 1, 'error') if '-ofolder' in arguments: path_output = arguments['-ofolder'] else: path_output = '' param.path_qc = arguments.get("-qc", None) path_template = arguments['-t'] contrast_template = arguments['-c'] ref = arguments['-ref'] param.remove_temp_files = int(arguments.get('-r')) verbose = int(arguments.get('-v')) sct.init_sct(log_level=verbose, update=True) # Update log level param.verbose = verbose # TODO: not clean, unify verbose or param.verbose in code, but not both param_centerline = ParamCenterline( algo_fitting=arguments['-centerline-algo'], smooth=arguments['-centerline-smooth']) # registration parameters if '-param' in arguments: # reset parameters but keep step=0 (might be overwritten if user specified step=0) paramregmulti = ParamregMultiStep([step0]) if ref == 'subject': paramregmulti.steps['0'].dof = 'Tx_Ty_Tz_Rx_Ry_Rz_Sz' # add user parameters for paramStep in arguments['-param']: paramregmulti.addStep(paramStep) else: paramregmulti = ParamregMultiStep([step0, step1, step2]) # if ref=subject, initialize registration using different affine parameters if ref == 'subject': paramregmulti.steps['0'].dof = 'Tx_Ty_Tz_Rx_Ry_Rz_Sz' # initialize other parameters zsubsample = param.zsubsample # retrieve template file names if label_type == 'spinal': file_template_labeling = get_file_label( os.path.join(path_template, 'template'), id_label=14) # label = point-wise spinal level labels else: file_template_labeling = get_file_label( os.path.join(path_template, 'template'), id_label=7 ) # label = spinal cord mask with discrete vertebral levels id_label_dct = {'T1': 0, 'T2': 1, 'T2S': 2} file_template = get_file_label( os.path.join(path_template, 'template'), id_label=id_label_dct[ contrast_template.upper()]) # label = *-weighted template file_template_seg = get_file_label( os.path.join(path_template, 'template'), id_label=3) # label = spinal cord mask (binary) # start timer start_time = time.time() # get fname of the template + template objects fname_template = os.path.join(path_template, 'template', file_template) fname_template_labeling = os.path.join(path_template, 'template', file_template_labeling) fname_template_seg = os.path.join(path_template, 'template', file_template_seg) fname_template_disc_labeling = os.path.join(path_template, 'template', 'PAM50_label_disc.nii.gz') # check file existence # TODO: no need to do that! sct.printv('\nCheck template files...') sct.check_file_exist(fname_template, verbose) sct.check_file_exist(fname_template_labeling, verbose) sct.check_file_exist(fname_template_seg, verbose) path_data, file_data, ext_data = sct.extract_fname(fname_data) # sct.printv(arguments) sct.printv('\nCheck parameters:', verbose) sct.printv(' Data: ' + fname_data, verbose) sct.printv(' Landmarks: ' + fname_landmarks, verbose) sct.printv(' Segmentation: ' + fname_seg, verbose) sct.printv(' Path template: ' + path_template, verbose) sct.printv(' Remove temp files: ' + str(param.remove_temp_files), verbose) # check input labels labels = check_labels(fname_landmarks, label_type=label_type) level_alignment = False if len(labels) > 2 and label_type in ['disc', 'spinal']: level_alignment = True path_tmp = sct.tmp_create(basename="register_to_template", verbose=verbose) # set temporary file names ftmp_data = 'data.nii' ftmp_seg = 'seg.nii.gz' ftmp_label = 'label.nii.gz' ftmp_template = 'template.nii' ftmp_template_seg = 'template_seg.nii.gz' ftmp_template_label = 'template_label.nii.gz' # copy files to temporary folder sct.printv('\nCopying input data to tmp folder and convert to nii...', verbose) Image(fname_data).save(os.path.join(path_tmp, ftmp_data)) Image(fname_seg).save(os.path.join(path_tmp, ftmp_seg)) Image(fname_landmarks).save(os.path.join(path_tmp, ftmp_label)) Image(fname_template).save(os.path.join(path_tmp, ftmp_template)) Image(fname_template_seg).save(os.path.join(path_tmp, ftmp_template_seg)) Image(fname_template_labeling).save( os.path.join(path_tmp, ftmp_template_label)) if label_type == 'disc': Image(fname_template_disc_labeling).save( os.path.join(path_tmp, ftmp_template_label)) # go to tmp folder curdir = os.getcwd() os.chdir(path_tmp) # Generate labels from template vertebral labeling if label_type == 'body': sct.printv('\nGenerate labels from template vertebral labeling', verbose) ftmp_template_label_, ftmp_template_label = ftmp_template_label, sct.add_suffix( ftmp_template_label, "_body") sct_label_utils.main(args=[ '-i', ftmp_template_label_, '-vert-body', '0', '-o', ftmp_template_label ]) # check if provided labels are available in the template sct.printv('\nCheck if provided labels are available in the template', verbose) image_label_template = Image(ftmp_template_label) labels_template = image_label_template.getNonZeroCoordinates( sorting='value') if labels[-1].value > labels_template[-1].value: sct.printv( 'ERROR: Wrong landmarks input. Labels must have correspondence in template space. \nLabel max ' 'provided: ' + str(labels[-1].value) + '\nLabel max from template: ' + str(labels_template[-1].value), verbose, 'error') # if only one label is present, force affine transformation to be Tx,Ty,Tz only (no scaling) if len(labels) == 1: paramregmulti.steps['0'].dof = 'Tx_Ty_Tz' sct.printv( 'WARNING: Only one label is present. Forcing initial transformation to: ' + paramregmulti.steps['0'].dof, 1, 'warning') # Project labels onto the spinal cord centerline because later, an affine transformation is estimated between the # template's labels (centered in the cord) and the subject's labels (assumed to be centered in the cord). # If labels are not centered, mis-registration errors are observed (see issue #1826) ftmp_label = project_labels_on_spinalcord(ftmp_label, ftmp_seg, param_centerline) # binarize segmentation (in case it has values below 0 caused by manual editing) sct.printv('\nBinarize segmentation', verbose) ftmp_seg_, ftmp_seg = ftmp_seg, sct.add_suffix(ftmp_seg, "_bin") sct_maths.main(['-i', ftmp_seg_, '-bin', '0.5', '-o', ftmp_seg]) # Switch between modes: subject->template or template->subject if ref == 'template': # resample data to 1mm isotropic sct.printv('\nResample data to 1mm isotropic...', verbose) resample_file(ftmp_data, add_suffix(ftmp_data, '_1mm'), '1.0x1.0x1.0', 'mm', 'linear', verbose) ftmp_data = add_suffix(ftmp_data, '_1mm') resample_file(ftmp_seg, add_suffix(ftmp_seg, '_1mm'), '1.0x1.0x1.0', 'mm', 'linear', verbose) ftmp_seg = add_suffix(ftmp_seg, '_1mm') # N.B. resampling of labels is more complicated, because they are single-point labels, therefore resampling # with nearest neighbour can make them disappear. resample_labels(ftmp_label, ftmp_data, add_suffix(ftmp_label, '_1mm')) ftmp_label = add_suffix(ftmp_label, '_1mm') # Change orientation of input images to RPI sct.printv('\nChange orientation of input images to RPI...', verbose) ftmp_data = Image(ftmp_data).change_orientation( "RPI", generate_path=True).save().absolutepath ftmp_seg = Image(ftmp_seg).change_orientation( "RPI", generate_path=True).save().absolutepath ftmp_label = Image(ftmp_label).change_orientation( "RPI", generate_path=True).save().absolutepath ftmp_seg_, ftmp_seg = ftmp_seg, add_suffix(ftmp_seg, '_crop') if level_alignment: # cropping the segmentation based on the label coverage to ensure good registration with level alignment # See https://github.com/neuropoly/spinalcordtoolbox/pull/1669 for details image_labels = Image(ftmp_label) coordinates_labels = image_labels.getNonZeroCoordinates( sorting='z') nx, ny, nz, nt, px, py, pz, pt = image_labels.dim offset_crop = 10.0 * pz # cropping the image 10 mm above and below the highest and lowest label cropping_slices = [ coordinates_labels[0].z - offset_crop, coordinates_labels[-1].z + offset_crop ] # make sure that the cropping slices do not extend outside of the slice range (issue #1811) if cropping_slices[0] < 0: cropping_slices[0] = 0 if cropping_slices[1] > nz: cropping_slices[1] = nz msct_image.spatial_crop( Image(ftmp_seg_), dict(((2, np.int32(np.round(cropping_slices))), ))).save(ftmp_seg) else: # if we do not align the vertebral levels, we crop the segmentation from top to bottom im_seg_rpi = Image(ftmp_seg_) bottom = 0 for data in msct_image.SlicerOneAxis(im_seg_rpi, "IS"): if (data != 0).any(): break bottom += 1 top = im_seg_rpi.data.shape[2] for data in msct_image.SlicerOneAxis(im_seg_rpi, "SI"): if (data != 0).any(): break top -= 1 msct_image.spatial_crop(im_seg_rpi, dict( ((2, (bottom, top)), ))).save(ftmp_seg) # straighten segmentation sct.printv( '\nStraighten the spinal cord using centerline/segmentation...', verbose) # check if warp_curve2straight and warp_straight2curve already exist (i.e. no need to do it another time) fn_warp_curve2straight = os.path.join(curdir, "warp_curve2straight.nii.gz") fn_warp_straight2curve = os.path.join(curdir, "warp_straight2curve.nii.gz") fn_straight_ref = os.path.join(curdir, "straight_ref.nii.gz") cache_input_files = [ftmp_seg] if level_alignment: cache_input_files += [ ftmp_template_seg, ftmp_label, ftmp_template_label, ] cache_sig = sct.cache_signature(input_files=cache_input_files, ) cachefile = os.path.join(curdir, "straightening.cache") if sct.cache_valid( cachefile, cache_sig ) and os.path.isfile(fn_warp_curve2straight) and os.path.isfile( fn_warp_straight2curve) and os.path.isfile(fn_straight_ref): sct.printv( 'Reusing existing warping field which seems to be valid', verbose, 'warning') sct.copy(fn_warp_curve2straight, 'warp_curve2straight.nii.gz') sct.copy(fn_warp_straight2curve, 'warp_straight2curve.nii.gz') sct.copy(fn_straight_ref, 'straight_ref.nii.gz') # apply straightening sct_apply_transfo.main(args=[ '-i', ftmp_seg, '-w', 'warp_curve2straight.nii.gz', '-d', 'straight_ref.nii.gz', '-o', add_suffix(ftmp_seg, '_straight') ]) else: from spinalcordtoolbox.straightening import SpinalCordStraightener sc_straight = SpinalCordStraightener(ftmp_seg, ftmp_seg) sc_straight.param_centerline = param_centerline sc_straight.output_filename = add_suffix(ftmp_seg, '_straight') sc_straight.path_output = './' sc_straight.qc = '0' sc_straight.remove_temp_files = param.remove_temp_files sc_straight.verbose = verbose if level_alignment: sc_straight.centerline_reference_filename = ftmp_template_seg sc_straight.use_straight_reference = True sc_straight.discs_input_filename = ftmp_label sc_straight.discs_ref_filename = ftmp_template_label sc_straight.straighten() sct.cache_save(cachefile, cache_sig) # N.B. DO NOT UPDATE VARIABLE ftmp_seg BECAUSE TEMPORARY USED LATER # re-define warping field using non-cropped space (to avoid issue #367) sct_concat_transfo.main(args=[ '-w', 'warp_straight2curve.nii.gz', '-d', ftmp_data, '-o', 'warp_straight2curve.nii.gz' ]) if level_alignment: sct.copy('warp_curve2straight.nii.gz', 'warp_curve2straightAffine.nii.gz') else: # Label preparation: # -------------------------------------------------------------------------------- # Remove unused label on template. Keep only label present in the input label image sct.printv( '\nRemove unused label on template. Keep only label present in the input label image...', verbose) sct.run([ 'sct_label_utils', '-i', ftmp_template_label, '-o', ftmp_template_label, '-remove-reference', ftmp_label ]) # Dilating the input label so they can be straighten without losing them sct.printv('\nDilating input labels using 3vox ball radius') sct_maths.main([ '-i', ftmp_label, '-dilate', '3', '-o', add_suffix(ftmp_label, '_dilate') ]) ftmp_label = add_suffix(ftmp_label, '_dilate') # Apply straightening to labels sct.printv('\nApply straightening to labels...', verbose) sct_apply_transfo.main(args=[ '-i', ftmp_label, '-o', add_suffix(ftmp_label, '_straight'), '-d', add_suffix(ftmp_seg, '_straight'), '-w', 'warp_curve2straight.nii.gz', '-x', 'nn' ]) ftmp_label = add_suffix(ftmp_label, '_straight') # Compute rigid transformation straight landmarks --> template landmarks sct.printv('\nEstimate transformation for step #0...', verbose) try: register_landmarks(ftmp_label, ftmp_template_label, paramregmulti.steps['0'].dof, fname_affine='straight2templateAffine.txt', verbose=verbose) except RuntimeError: raise ( 'Input labels do not seem to be at the right place. Please check the position of the labels. ' 'See documentation for more details: https://www.slideshare.net/neuropoly/sct-course-20190121/42' ) # Concatenate transformations: curve --> straight --> affine sct.printv( '\nConcatenate transformations: curve --> straight --> affine...', verbose) sct_concat_transfo.main(args=[ '-w', ['warp_curve2straight.nii.gz', 'straight2templateAffine.txt'], '-d', 'template.nii', '-o', 'warp_curve2straightAffine.nii.gz' ]) # Apply transformation sct.printv('\nApply transformation...', verbose) sct_apply_transfo.main(args=[ '-i', ftmp_data, '-o', add_suffix(ftmp_data, '_straightAffine'), '-d', ftmp_template, '-w', 'warp_curve2straightAffine.nii.gz' ]) ftmp_data = add_suffix(ftmp_data, '_straightAffine') sct_apply_transfo.main(args=[ '-i', ftmp_seg, '-o', add_suffix(ftmp_seg, '_straightAffine'), '-d', ftmp_template, '-w', 'warp_curve2straightAffine.nii.gz', '-x', 'linear' ]) ftmp_seg = add_suffix(ftmp_seg, '_straightAffine') """ # Benjamin: Issue from Allan Martin, about the z=0 slice that is screwed up, caused by the affine transform. # Solution found: remove slices below and above landmarks to avoid rotation effects points_straight = [] for coord in landmark_template: points_straight.append(coord.z) min_point, max_point = int(np.round(np.min(points_straight))), int(np.round(np.max(points_straight))) ftmp_seg_, ftmp_seg = ftmp_seg, add_suffix(ftmp_seg, '_black') msct_image.spatial_crop(Image(ftmp_seg_), dict(((2, (min_point,max_point)),))).save(ftmp_seg) """ # open segmentation im = Image(ftmp_seg) im_new = msct_image.empty_like(im) # binarize im_new.data = im.data > 0.5 # find min-max of anat2template (for subsequent cropping) zmin_template, zmax_template = msct_image.find_zmin_zmax(im_new, threshold=0.5) # save binarized segmentation im_new.save(add_suffix(ftmp_seg, '_bin')) # unused? # crop template in z-direction (for faster processing) # TODO: refactor to use python module instead of doing i/o sct.printv('\nCrop data in template space (for faster processing)...', verbose) ftmp_template_, ftmp_template = ftmp_template, add_suffix( ftmp_template, '_crop') msct_image.spatial_crop(Image(ftmp_template_), dict( ((2, (zmin_template, zmax_template)), ))).save(ftmp_template) ftmp_template_seg_, ftmp_template_seg = ftmp_template_seg, add_suffix( ftmp_template_seg, '_crop') msct_image.spatial_crop( Image(ftmp_template_seg_), dict(((2, (zmin_template, zmax_template)), ))).save(ftmp_template_seg) ftmp_data_, ftmp_data = ftmp_data, add_suffix(ftmp_data, '_crop') msct_image.spatial_crop(Image(ftmp_data_), dict(((2, (zmin_template, zmax_template)), ))).save(ftmp_data) ftmp_seg_, ftmp_seg = ftmp_seg, add_suffix(ftmp_seg, '_crop') msct_image.spatial_crop(Image(ftmp_seg_), dict(((2, (zmin_template, zmax_template)), ))).save(ftmp_seg) # sub-sample in z-direction # TODO: refactor to use python module instead of doing i/o sct.printv('\nSub-sample in z-direction (for faster processing)...', verbose) sct.run([ 'sct_resample', '-i', ftmp_template, '-o', add_suffix(ftmp_template, '_sub'), '-f', '1x1x' + zsubsample ], verbose) ftmp_template = add_suffix(ftmp_template, '_sub') sct.run([ 'sct_resample', '-i', ftmp_template_seg, '-o', add_suffix(ftmp_template_seg, '_sub'), '-f', '1x1x' + zsubsample ], verbose) ftmp_template_seg = add_suffix(ftmp_template_seg, '_sub') sct.run([ 'sct_resample', '-i', ftmp_data, '-o', add_suffix(ftmp_data, '_sub'), '-f', '1x1x' + zsubsample ], verbose) ftmp_data = add_suffix(ftmp_data, '_sub') sct.run([ 'sct_resample', '-i', ftmp_seg, '-o', add_suffix(ftmp_seg, '_sub'), '-f', '1x1x' + zsubsample ], verbose) ftmp_seg = add_suffix(ftmp_seg, '_sub') # Registration straight spinal cord to template sct.printv('\nRegister straight spinal cord to template...', verbose) # TODO: find a way to input initwarp, corresponding to straightening warp # Set the angle of the template orientation to 0 (destination image) for key in list(paramregmulti.steps.keys()): paramregmulti.steps[key].rot_dest = 0 fname_src2dest, fname_dest2src, warp_forward, warp_inverse = register_wrapper( ftmp_data, ftmp_template, param, paramregmulti, fname_src_seg=ftmp_seg, fname_dest_seg=ftmp_template_seg, same_space=True) # Concatenate transformations: anat --> template sct.printv('\nConcatenate transformations: anat --> template...', verbose) sct_concat_transfo.main(args=[ '-w', ['warp_curve2straightAffine.nii.gz', warp_forward], '-d', 'template.nii', '-o', 'warp_anat2template.nii.gz' ]) # Concatenate transformations: template --> anat sct.printv('\nConcatenate transformations: template --> anat...', verbose) # TODO: make sure the commented code below is consistent with the new implementation # warp_inverse.reverse() if level_alignment: sct_concat_transfo.main(args=[ '-w', [warp_inverse, 'warp_straight2curve.nii.gz'], '-d', 'data.nii', '-o', 'warp_template2anat.nii.gz' ]) else: sct_concat_transfo.main(args=[ '-w', [ warp_inverse, 'straight2templateAffine.txt', 'warp_straight2curve.nii.gz' ], '-winv', ['straight2templateAffine.txt'], '-d', 'data.nii', '-o', 'warp_template2anat.nii.gz' ]) # register template->subject elif ref == 'subject': # Change orientation of input images to RPI sct.printv('\nChange orientation of input images to RPI...', verbose) ftmp_data = Image(ftmp_data).change_orientation( "RPI", generate_path=True).save().absolutepath ftmp_seg = Image(ftmp_seg).change_orientation( "RPI", generate_path=True).save().absolutepath ftmp_label = Image(ftmp_label).change_orientation( "RPI", generate_path=True).save().absolutepath # Remove unused label on template. Keep only label present in the input label image sct.printv( '\nRemove unused label on template. Keep only label present in the input label image...', verbose) sct.run([ 'sct_label_utils', '-i', ftmp_template_label, '-o', ftmp_template_label, '-remove-reference', ftmp_label ]) # Add one label because at least 3 orthogonal labels are required to estimate an affine transformation. This # new label is added at the level of the upper most label (lowest value), at 1cm to the right. for i_file in [ftmp_label, ftmp_template_label]: im_label = Image(i_file) coord_label = im_label.getCoordinatesAveragedByValue( ) # N.B. landmarks are sorted by value # Create new label from copy import deepcopy new_label = deepcopy(coord_label[0]) # move it 5mm to the left (orientation is RAS) nx, ny, nz, nt, px, py, pz, pt = im_label.dim new_label.x = np.round(coord_label[0].x + 5.0 / px) # assign value 99 new_label.value = 99 # Add to existing image im_label.data[int(new_label.x), int(new_label.y), int(new_label.z)] = new_label.value # Overwrite label file # im_label.absolutepath = 'label_rpi_modif.nii.gz' im_label.save() # Set the angle of the template orientation to 0 (source image) for key in list(paramregmulti.steps.keys()): paramregmulti.steps[key].rot_src = 0 fname_src2dest, fname_dest2src, warp_forward, warp_inverse = register_wrapper( ftmp_template, ftmp_data, param, paramregmulti, fname_src_seg=ftmp_template_seg, fname_dest_seg=ftmp_seg, fname_src_label=ftmp_template_label, fname_dest_label=ftmp_label, same_space=False) # Renaming for code compatibility os.rename(warp_forward, 'warp_template2anat.nii.gz') os.rename(warp_inverse, 'warp_anat2template.nii.gz') # Apply warping fields to anat and template sct.run([ 'sct_apply_transfo', '-i', 'template.nii', '-o', 'template2anat.nii.gz', '-d', 'data.nii', '-w', 'warp_template2anat.nii.gz', '-crop', '0' ], verbose) sct.run([ 'sct_apply_transfo', '-i', 'data.nii', '-o', 'anat2template.nii.gz', '-d', 'template.nii', '-w', 'warp_anat2template.nii.gz', '-crop', '0' ], verbose) # come back os.chdir(curdir) # Generate output files sct.printv('\nGenerate output files...', verbose) fname_template2anat = os.path.join(path_output, 'template2anat' + ext_data) fname_anat2template = os.path.join(path_output, 'anat2template' + ext_data) sct.generate_output_file( os.path.join(path_tmp, "warp_template2anat.nii.gz"), os.path.join(path_output, "warp_template2anat.nii.gz"), verbose) sct.generate_output_file( os.path.join(path_tmp, "warp_anat2template.nii.gz"), os.path.join(path_output, "warp_anat2template.nii.gz"), verbose) sct.generate_output_file(os.path.join(path_tmp, "template2anat.nii.gz"), fname_template2anat, verbose) sct.generate_output_file(os.path.join(path_tmp, "anat2template.nii.gz"), fname_anat2template, verbose) if ref == 'template': # copy straightening files in case subsequent SCT functions need them sct.generate_output_file( os.path.join(path_tmp, "warp_curve2straight.nii.gz"), os.path.join(path_output, "warp_curve2straight.nii.gz"), verbose) sct.generate_output_file( os.path.join(path_tmp, "warp_straight2curve.nii.gz"), os.path.join(path_output, "warp_straight2curve.nii.gz"), verbose) sct.generate_output_file( os.path.join(path_tmp, "straight_ref.nii.gz"), os.path.join(path_output, "straight_ref.nii.gz"), verbose) # Delete temporary files if param.remove_temp_files: sct.printv('\nDelete temporary files...', verbose) sct.rmtree(path_tmp, verbose=verbose) # display elapsed time elapsed_time = time.time() - start_time sct.printv( '\nFinished! Elapsed time: ' + str(int(np.round(elapsed_time))) + 's', verbose) qc_dataset = arguments.get("-qc-dataset", None) qc_subject = arguments.get("-qc-subject", None) if param.path_qc is not None: generate_qc(fname_data, fname_in2=fname_template2anat, fname_seg=fname_seg, args=args, path_qc=os.path.abspath(param.path_qc), dataset=qc_dataset, subject=qc_subject, process='sct_register_to_template') sct.display_viewer_syntax([fname_data, fname_template2anat], verbose=verbose) sct.display_viewer_syntax([fname_template, fname_anat2template], verbose=verbose)
def run_main(): sct.init_sct() parser = get_parser() args = sys.argv[1:] arguments = parser.parse(args) # Input filename fname_input_data = arguments["-i"] fname_data = os.path.abspath(fname_input_data) # Method used method = 'optic' if "-method" in arguments: method = arguments["-method"] # Contrast type contrast_type = '' if "-c" in arguments: contrast_type = arguments["-c"] if method == 'optic' and not contrast_type: # Contrast must be error = 'ERROR: -c is a mandatory argument when using Optic method.' sct.printv(error, type='error') return # Gap between slices interslice_gap = 10.0 if "-gap" in arguments: interslice_gap = float(arguments["-gap"]) param_centerline = ParamCenterline( algo_fitting=arguments['-centerline-algo'], smooth=arguments['-centerline-smooth'], minmax=True) # Output folder if "-o" in arguments: file_output = arguments["-o"] else: path_data, file_data, ext_data = sct.extract_fname(fname_data) file_output = os.path.join(path_data, file_data + '_centerline') verbose = int(arguments.get('-v')) sct.init_sct(log_level=verbose, update=True) # Update log level if method == 'viewer': # Manual labeling of cord centerline im_labels = _call_viewer_centerline(Image(fname_data), interslice_gap=interslice_gap) else: # Automatic detection of cord centerline im_labels = Image(fname_data) param_centerline.algo_fitting = 'optic' param_centerline.contrast = contrast_type # Extrapolate and regularize (or detect if optic) cord centerline im_centerline, arr_centerline, _, _ = get_centerline(im_labels, param=param_centerline, verbose=verbose) # save centerline as nifti (discrete) and csv (continuous) files im_centerline.save(file_output + '.nii.gz') np.savetxt(file_output + '.csv', arr_centerline.transpose(), delimiter=",") sct.display_viewer_syntax([fname_input_data, file_output+'.nii.gz'], colormaps=['gray', 'red'], opacities=['', '1'])
def main(args=None): if args is None: args = sys.argv[1:] # initialization # note: mirror servers are listed in order of priority dict_url = { 'sct_example_data': [ 'https://osf.io/kjcgs/?action=download', 'https://www.neuro.polymtl.ca/_media/downloads/sct/20180525_sct_example_data.zip' ], 'sct_testing_data': [ 'https://osf.io/yutrj/?action=download', 'https://www.neuro.polymtl.ca/_media/downloads/sct/20191031_sct_testing_data.zip' ], 'PAM50': [ 'https://osf.io/u79sr/?pid=6zbyf/?action=download', 'https://www.neuro.polymtl.ca/_media/downloads/sct/20191029_PAM50.zip' ], 'MNI-Poly-AMU': [ 'https://osf.io/sh6h4/?action=download', 'https://www.neuro.polymtl.ca/_media/downloads/sct/20170310_MNI-Poly-AMU.zip' ], 'gm_model': [ 'https://osf.io/ugscu/?action=download', 'https://www.neuro.polymtl.ca/_media/downloads/sct/20160922_gm_model.zip' ], 'optic_models': [ 'https://osf.io/g4fwn/?action=download', 'https://www.neuro.polymtl.ca/_media/downloads/sct/20170413_optic_models.zip' ], 'pmj_models': [ 'https://osf.io/4gufr/?action=download', 'https://www.neuro.polymtl.ca/_media/downloads/sct/20170922_pmj_models.zip' ], 'binaries_debian': [ 'https://osf.io/bt58d/?action=download', 'https://www.neuro.polymtl.ca/_media/downloads/sct/20190930_sct_binaries_linux.tar.gz' ], 'binaries_centos': [ 'https://osf.io/8kpt4/?action=download', 'https://www.neuro.polymtl.ca/_media/downloads/sct/20190930_sct_binaries_linux_centos6.tar.gz' ], 'binaries_osx': [ 'https://osf.io/msjb5/?action=download', 'https://www.neuro.polymtl.ca/_media/downloads/sct/20190930_sct_binaries_osx.tar.gz' ], 'course_hawaii17': 'https://osf.io/6exht/?action=download', 'course_paris18': [ 'https://osf.io/9bmn5/?action=download', 'https://www.neuro.polymtl.ca/_media/downloads/sct/20180612_sct_course-paris18.zip' ], 'course_london19': [ 'https://osf.io/4q3u7/?action=download', 'https://www.neuro.polymtl.ca/_media/downloads/sct/20190121_sct_course-london19.zip' ], 'course_beijing19': [ 'https://osf.io/ef4xz/?action=download', 'https://www.neuro.polymtl.ca/_media/downloads/sct/20190802_sct_course-beijing19.zip' ], 'deepseg_gm_models': [ 'https://osf.io/b9y4x/?action=download', 'https://www.neuro.polymtl.ca/_media/downloads/sct/20180205_deepseg_gm_models.zip' ], 'deepseg_sc_models': [ 'https://osf.io/avf97/?action=download', 'https://www.neuro.polymtl.ca/_media/downloads/sct/20180610_deepseg_sc_models.zip' ], 'deepseg_lesion_models': [ 'https://osf.io/eg7v9/?action=download', 'https://www.neuro.polymtl.ca/_media/downloads/sct/20180613_deepseg_lesion_models.zip' ], 'c2c3_disc_models': [ 'https://osf.io/t97ap/?action=download', 'https://www.neuro.polymtl.ca/_media/downloads/sct/20190117_c2c3_disc_models.zip' ] } # Get parser info parser = get_parser() arguments = parser.parse(args) data_name = arguments['-d'] verbose = int(arguments.get('-v')) sct.init_sct(log_level=verbose, update=True) # Update log level dest_folder = arguments.get('-o', os.path.abspath(os.curdir)) # Download data url = dict_url[data_name] tmp_file = download_data(url, verbose) # unzip dest_tmp_folder = sct.tmp_create() unzip(tmp_file, dest_tmp_folder, verbose) extracted_files_paths = [] # Get the name of the extracted files and directories extracted_files = os.listdir(dest_tmp_folder) for extracted_file in extracted_files: extracted_files_paths.append( os.path.join(os.path.abspath(dest_tmp_folder), extracted_file)) # Check if files and folder already exists sct.printv('\nCheck if folder already exists on the destination path...', verbose) for data_extracted_name in extracted_files: fullpath_dest = os.path.join(dest_folder, data_extracted_name) if os.path.isdir(fullpath_dest): sct.printv( "Folder {} already exists. Removing it...".format( data_extracted_name), 1, 'warning') rmtree(fullpath_dest) # Destination path # for source_path in extracted_files_paths: # Copy the content of source to destination (and create destination folder) copy_tree(dest_tmp_folder, dest_folder) sct.printv("\nRemove temporary folders...", verbose) try: rmtree(os.path.split(tmp_file)[0]) rmtree(dest_tmp_folder) except Exception as error: print("Cannot remove temp folder: " + repr(error)) sct.printv('Done!\n', verbose) return 0
import os import sys import pytest import numpy as np from spinalcordtoolbox import __sct_dir__ sys.path.append(os.path.join(__sct_dir__, 'scripts')) from spinalcordtoolbox.centerline.core import ParamCenterline, get_centerline, find_and_sort_coord, round_and_clip from spinalcordtoolbox.image import Image from create_test_data import dummy_centerline from sct_utils import init_sct init_sct(log_level=2) # Set logger in debug mode VERBOSE = 0 # Set to 2 to save images, 0 otherwise # Generate a list of fake centerlines: (dummy_segmentation(params), dict of expected results) im_ctl_find_and_sort_coord = [ (dummy_centerline(size_arr=(41, 7, 9), subsampling=1, orientation='LPI'), None), ] im_ctl_zeroslice = [ (dummy_centerline(size_arr=(15, 7, 9), zeroslice=[0, 1], orientation='LPI'), (3, 7)), (dummy_centerline(size_arr=(15, 7, 9), zeroslice=[], orientation='LPI'), (3, 9)), ] im_centerlines = [ (dummy_centerline(size_arr=(41, 7, 9), subsampling=1, orientation='SAL'),
def main(args=None): # initializations param = Param() # check user arguments if args is None: args = sys.argv[1:] # get parser info parser = get_parser() arguments = parser.parse_args(args) param.download = int(arguments.download) param.path_data = arguments.path functions_to_test = arguments.function param.remove_tmp_file = int(arguments.remove_temps) jobs = arguments.jobs param.verbose = arguments.verbose sct.init_sct(log_level=param.verbose, update=True) # Update log level start_time = time.time() # get absolute path and add slash at the end param.path_data = os.path.abspath(param.path_data) # check existence of testing data folder if not os.path.isdir(param.path_data) or param.download: downloaddata(param) # display path to data sct.printv('\nPath to testing data: ' + param.path_data, param.verbose) # create temp folder that will have all results and go in it path_tmp = os.path.abspath(arguments.execution_folder or sct.tmp_create(verbose=param.verbose)) curdir = os.getcwd() os.chdir(path_tmp) functions_parallel = list() functions_serial = list() if functions_to_test: for f in functions_to_test: if f in get_functions_parallelizable(): functions_parallel.append(f) elif f in get_functions_nonparallelizable(): functions_serial.append(f) else: sct.printv('Command-line usage error: Function "%s" is not part of the list of testing functions' % f, type='error') jobs = min(jobs, len(functions_parallel)) else: functions_parallel = get_functions_parallelizable() functions_serial = get_functions_nonparallelizable() if arguments.continue_from: first_func = arguments.continue_from if first_func in functions_parallel: functions_serial = [] functions_parallel = functions_parallel[functions_parallel.index(first_func):] elif first_func in functions_serial: functions_serial = functions_serial[functions_serial.index(first_func):] if arguments.check_filesystem and jobs != 1: print("Check filesystem used -> jobs forced to 1") jobs = 1 print("Will run through the following tests:") if functions_serial: print("- sequentially: {}".format(" ".join(functions_serial))) if functions_parallel: print("- in parallel with {} jobs: {}".format(jobs, " ".join(functions_parallel))) list_status = [] for name, functions in ( ("serial", functions_serial), ("parallel", functions_parallel), ): if not functions: continue if any([s for (f, s) in list_status]) and arguments.abort_on_failure: break try: if functions == functions_parallel and jobs != 1: pool = multiprocessing.Pool(processes=jobs) results = list() # loop across functions and run tests for f in functions: func_param = copy.deepcopy(param) func_param.path_output = f res = pool.apply_async(process_function_multiproc, (f, func_param,)) results.append(res) else: pool = None for idx_function, f in enumerate(functions): print_line('Checking ' + f) if functions == functions_serial or jobs == 1: if arguments.check_filesystem: if os.path.exists(os.path.join(path_tmp, f)): shutil.rmtree(os.path.join(path_tmp, f)) sig_0 = fs_signature(path_tmp) func_param = copy.deepcopy(param) func_param.path_output = f res = process_function(f, func_param) if arguments.check_filesystem: sig_1 = fs_signature(path_tmp) fs_ok(sig_0, sig_1, exclude=(f,)) else: res = results[idx_function].get() list_output, list_status_function = res # manage status if any(list_status_function): if 1 in list_status_function: print_fail() status = (f, 1) else: print_warning() status = (f, 99) for output in list_output: for line in output.splitlines(): print(" %s" % line) else: print_ok() if param.verbose: for output in list_output: for line in output.splitlines(): print(" %s" % line) status = (f, 0) # append status function to global list of status list_status.append(status) if any([s for (f, s) in list_status]) and arguments.abort_on_failure: break except KeyboardInterrupt: raise finally: if pool: pool.terminate() pool.join() print('status: ' + str([s for (f, s) in list_status])) if any([s for (f, s) in list_status]): print("Failures: {}".format(" ".join([f for (f, s) in list_status if s]))) # display elapsed time elapsed_time = time.time() - start_time sct.printv('Finished! Elapsed time: ' + str(int(np.round(elapsed_time))) + 's\n') # come back os.chdir(curdir) # remove temp files if param.remove_tmp_file and arguments.execution_folder is None: sct.printv('\nRemove temporary files...', 0) sct.rmtree(path_tmp) e = 0 if any([s for (f, s) in list_status]): e = 1 # print(e) sys.exit(e)
def propseg(img_input, options_dict): """ :param img_input: source image, to be segmented :param options_dict: arguments as dictionary :return: segmented Image """ arguments = options_dict fname_input_data = img_input.absolutepath fname_data = os.path.abspath(fname_input_data) contrast_type = arguments["-c"] contrast_type_conversion = { 't1': 't1', 't2': 't2', 't2s': 't2', 'dwi': 't1' } contrast_type_propseg = contrast_type_conversion[contrast_type] # Starting building the command cmd = ['isct_propseg', '-t', contrast_type_propseg] if "-ofolder" in arguments: folder_output = arguments["-ofolder"] else: folder_output = './' cmd += ['-o', folder_output] if not os.path.isdir(folder_output) and os.path.exists(folder_output): logger.error("output directory %s is not a valid directory" % folder_output) if not os.path.exists(folder_output): os.makedirs(folder_output) if "-down" in arguments: cmd += ["-down", str(arguments["-down"])] if "-up" in arguments: cmd += ["-up", str(arguments["-up"])] remove_temp_files = 1 if "-r" in arguments: remove_temp_files = int(arguments["-r"]) verbose = int(arguments.get('-v')) sct.init_sct(log_level=verbose, update=True) # Update log level # Update for propseg binary if verbose > 0: cmd += ["-verbose"] # Output options if "-mesh" in arguments: cmd += ["-mesh"] if "-centerline-binary" in arguments: cmd += ["-centerline-binary"] if "-CSF" in arguments: cmd += ["-CSF"] if "-centerline-coord" in arguments: cmd += ["-centerline-coord"] if "-cross" in arguments: cmd += ["-cross"] if "-init-tube" in arguments: cmd += ["-init-tube"] if "-low-resolution-mesh" in arguments: cmd += ["-low-resolution-mesh"] if "-detect-nii" in arguments: cmd += ["-detect-nii"] if "-detect-png" in arguments: cmd += ["-detect-png"] # Helping options use_viewer = None use_optic = True # enabled by default init_option = None rescale_header = arguments["-rescale"] if "-init" in arguments: init_option = float(arguments["-init"]) if init_option < 0: sct.printv( 'Command-line usage error: ' + str(init_option) + " is not a valid value for '-init'", 1, 'error') sys.exit(1) if "-init-centerline" in arguments: if str(arguments["-init-centerline"]) == "viewer": use_viewer = "centerline" elif str(arguments["-init-centerline"]) == "hough": use_optic = False else: if rescale_header is not 1: fname_labels_viewer = func_rescale_header(str( arguments["-init-centerline"]), rescale_header, verbose=verbose) else: fname_labels_viewer = str(arguments["-init-centerline"]) cmd += ["-init-centerline", fname_labels_viewer] use_optic = False if "-init-mask" in arguments: if str(arguments["-init-mask"]) == "viewer": use_viewer = "mask" else: if rescale_header is not 1: fname_labels_viewer = func_rescale_header( str(arguments["-init-mask"]), rescale_header) else: fname_labels_viewer = str(arguments["-init-mask"]) cmd += ["-init-mask", fname_labels_viewer] use_optic = False if "-mask-correction" in arguments: cmd += ["-mask-correction", str(arguments["-mask-correction"])] if "-radius" in arguments: cmd += ["-radius", str(arguments["-radius"])] if "-detect-n" in arguments: cmd += ["-detect-n", str(arguments["-detect-n"])] if "-detect-gap" in arguments: cmd += ["-detect-gap", str(arguments["-detect-gap"])] if "-init-validation" in arguments: cmd += ["-init-validation"] if "-nbiter" in arguments: cmd += ["-nbiter", str(arguments["-nbiter"])] if "-max-area" in arguments: cmd += ["-max-area", str(arguments["-max-area"])] if "-max-deformation" in arguments: cmd += ["-max-deformation", str(arguments["-max-deformation"])] if "-min-contrast" in arguments: cmd += ["-min-contrast", str(arguments["-min-contrast"])] if "-d" in arguments: cmd += ["-d", str(arguments["-d"])] if "-distance-search" in arguments: cmd += ["-dsearch", str(arguments["-distance-search"])] if "-alpha" in arguments: cmd += ["-alpha", str(arguments["-alpha"])] # check if input image is in 3D. Otherwise itk image reader will cut the 4D image in 3D volumes and only take the first one. image_input = Image(fname_data) image_input_rpi = image_input.copy().change_orientation('RPI') nx, ny, nz, nt, px, py, pz, pt = image_input_rpi.dim if nt > 1: sct.printv( 'ERROR: your input image needs to be 3D in order to be segmented.', 1, 'error') path_data, file_data, ext_data = sct.extract_fname(fname_data) path_tmp = sct.tmp_create(basename="label_vertebrae", verbose=verbose) # rescale header (see issue #1406) if rescale_header is not 1: fname_data_propseg = func_rescale_header(fname_data, rescale_header) else: fname_data_propseg = fname_data # add to command cmd += ['-i', fname_data_propseg] # if centerline or mask is asked using viewer if use_viewer: from spinalcordtoolbox.gui.base import AnatomicalParams from spinalcordtoolbox.gui.centerline import launch_centerline_dialog params = AnatomicalParams() if use_viewer == 'mask': params.num_points = 3 params.interval_in_mm = 15 # superior-inferior interval between two consecutive labels params.starting_slice = 'midfovminusinterval' if use_viewer == 'centerline': # setting maximum number of points to a reasonable value params.num_points = 20 params.interval_in_mm = 30 params.starting_slice = 'top' im_data = Image(fname_data_propseg) im_mask_viewer = msct_image.zeros_like(im_data) # im_mask_viewer.absolutepath = sct.add_suffix(fname_data_propseg, '_labels_viewer') controller = launch_centerline_dialog(im_data, im_mask_viewer, params) fname_labels_viewer = sct.add_suffix(fname_data_propseg, '_labels_viewer') if not controller.saved: sct.printv( 'The viewer has been closed before entering all manual points. Please try again.', 1, 'error') sys.exit(1) # save labels controller.as_niftii(fname_labels_viewer) # add mask filename to parameters string if use_viewer == "centerline": cmd += ["-init-centerline", fname_labels_viewer] elif use_viewer == "mask": cmd += ["-init-mask", fname_labels_viewer] # If using OptiC elif use_optic: image_centerline = optic.detect_centerline(image_input, contrast_type, verbose) fname_centerline_optic = os.path.join(path_tmp, 'centerline_optic.nii.gz') image_centerline.save(fname_centerline_optic) cmd += ["-init-centerline", fname_centerline_optic] if init_option is not None: if init_option > 1: init_option /= (nz - 1) cmd += ['-init', str(init_option)] # enabling centerline extraction by default (needed by check_and_correct_segmentation() ) cmd += ['-centerline-binary'] # run propseg status, output = sct.run(cmd, verbose, raise_exception=False, is_sct_binary=True) # check status is not 0 if not status == 0: sct.printv( 'Automatic cord detection failed. Please initialize using -init-centerline or -init-mask (see help)', 1, 'error') sys.exit(1) # build output filename fname_seg = os.path.join( folder_output, os.path.basename(sct.add_suffix(fname_data, "_seg"))) fname_centerline = os.path.join( folder_output, os.path.basename(sct.add_suffix(fname_data, "_centerline"))) # in case header was rescaled, we need to update the output file names by removing the "_rescaled" if rescale_header is not 1: sct.mv( os.path.join( folder_output, sct.add_suffix(os.path.basename(fname_data_propseg), "_seg")), fname_seg) sct.mv( os.path.join( folder_output, sct.add_suffix(os.path.basename(fname_data_propseg), "_centerline")), fname_centerline) # if user was used, copy the labelled points to the output folder (they will then be scaled back) if use_viewer: fname_labels_viewer_new = os.path.join( folder_output, os.path.basename(sct.add_suffix(fname_data, "_labels_viewer"))) sct.copy(fname_labels_viewer, fname_labels_viewer_new) # update variable (used later) fname_labels_viewer = fname_labels_viewer_new # check consistency of segmentation if arguments["-correct-seg"] == "1": check_and_correct_segmentation(fname_seg, fname_centerline, folder_output=folder_output, threshold_distance=3.0, remove_temp_files=remove_temp_files, verbose=verbose) # copy header from input to segmentation to make sure qform is the same sct.printv( "Copy header input --> output(s) to make sure qform is the same.", verbose) list_fname = [fname_seg, fname_centerline] if use_viewer: list_fname.append(fname_labels_viewer) for fname in list_fname: im = Image(fname) im.header = image_input.header im.save(dtype='int8' ) # they are all binary masks hence fine to save as int8 return Image(fname_seg)
help="Remove temporary files.", required=False, default=1, choices=(0, 1)) optional.add_argument('-v', type=int, help='Verbose.', required=False, default=1, choices=(0, 1)) return parser if __name__ == "__main__": sct.init_sct() parser = get_parser() arguments = parser.parse_args(args=None if sys.argv[1:] else ['--help']) fname_input1 = arguments.i fname_input2 = arguments.d verbose = arguments.v sct.init_sct(log_level=verbose, update=True) # Update log level tmp_dir = sct.tmp_create(verbose=verbose) # create tmp directory tmp_dir = os.path.abspath(tmp_dir) # copy input files to tmp directory # for fname in [fname_input1, fname_input2]: sct.copy(fname_input1, tmp_dir)
def main(args=None): # initializations param = Param() # check user arguments if args is None: args = sys.argv[1:] # get parser info parser = get_parser() arguments = parser.parse_args(args) param.download = int(arguments.download) param.path_data = arguments.path functions_to_test = arguments.function param.remove_tmp_file = int(arguments.remove_temps) jobs = arguments.jobs param.verbose = arguments.verbose sct.init_sct(log_level=param.verbose, update=True) # Update log level start_time = time.time() # get absolute path and add slash at the end param.path_data = os.path.abspath(param.path_data) # check existence of testing data folder if not os.path.isdir(param.path_data) or param.download: downloaddata(param) # display path to data sct.printv('\nPath to testing data: ' + param.path_data, param.verbose) # create temp folder that will have all results path_tmp = os.path.abspath(arguments.execution_folder or sct.tmp_create(verbose=param.verbose)) # go in path data (where all scripts will be run) curdir = os.getcwd() os.chdir(param.path_data) functions_parallel = list() functions_serial = list() if functions_to_test: for f in functions_to_test: if f in get_functions_parallelizable(): functions_parallel.append(f) elif f in get_functions_nonparallelizable(): functions_serial.append(f) else: sct.printv( 'Command-line usage error: Function "%s" is not part of the list of testing functions' % f, type='error') jobs = min(jobs, len(functions_parallel)) else: functions_parallel = get_functions_parallelizable() functions_serial = get_functions_nonparallelizable() if arguments.continue_from: first_func = arguments.continue_from if first_func in functions_parallel: functions_serial = [] functions_parallel = functions_parallel[functions_parallel. index(first_func):] elif first_func in functions_serial: functions_serial = functions_serial[functions_serial. index(first_func):] if arguments.check_filesystem and jobs != 1: print("Check filesystem used -> jobs forced to 1") jobs = 1 print("Will run through the following tests:") if functions_serial: print("- sequentially: {}".format(" ".join(functions_serial))) if functions_parallel: print("- in parallel with {} jobs: {}".format( jobs, " ".join(functions_parallel))) list_status = [] for name, functions in ( ("serial", functions_serial), ("parallel", functions_parallel), ): if not functions: continue if any([s for (f, s) in list_status]) and arguments.abort_on_failure: break try: if functions == functions_parallel and jobs != 1: pool = multiprocessing.Pool(processes=jobs) results = list() # loop across functions and run tests for f in functions: func_param = copy.deepcopy(param) func_param.path_output = f res = pool.apply_async(process_function_multiproc, ( f, func_param, )) results.append(res) else: pool = None for idx_function, f in enumerate(functions): print_line('Checking ' + f) if functions == functions_serial or jobs == 1: if arguments.check_filesystem: if os.path.exists(os.path.join(path_tmp, f)): shutil.rmtree(os.path.join(path_tmp, f)) sig_0 = fs_signature(path_tmp) func_param = copy.deepcopy(param) func_param.path_output = f res = process_function(f, func_param) if arguments.check_filesystem: sig_1 = fs_signature(path_tmp) fs_ok(sig_0, sig_1, exclude=(f, )) else: res = results[idx_function].get() list_output, list_status_function = res # manage status if any(list_status_function): if 1 in list_status_function: print_fail() status = (f, 1) else: print_warning() status = (f, 99) for output in list_output: for line in output.splitlines(): print(" %s" % line) else: print_ok() if param.verbose: for output in list_output: for line in output.splitlines(): print(" %s" % line) status = (f, 0) # append status function to global list of status list_status.append(status) if any([s for (f, s) in list_status ]) and arguments.abort_on_failure: break except KeyboardInterrupt: raise finally: if pool: pool.terminate() pool.join() print('status: ' + str([s for (f, s) in list_status])) if any([s for (f, s) in list_status]): print("Failures: {}".format(" ".join( [f for (f, s) in list_status if s]))) # display elapsed time elapsed_time = time.time() - start_time sct.printv('Finished! Elapsed time: ' + str(int(np.round(elapsed_time))) + 's\n') # come back os.chdir(curdir) # remove temp files if param.remove_tmp_file and arguments.execution_folder is None: sct.printv('\nRemove temporary files...', 0) sct.rmtree(path_tmp) e = 0 if any([s for (f, s) in list_status]): e = 1 # print(e) sys.exit(e)
def main(args=None): if args is None: args = sys.argv[1:] # initialize parameters param = Param() # Initialization fname_output = '' path_out = '' fname_src_seg = '' fname_dest_seg = '' fname_src_label = '' fname_dest_label = '' generate_warpinv = 1 start_time = time.time() # get default registration parameters # step1 = Paramreg(step='1', type='im', algo='syn', metric='MI', iter='5', shrink='1', smooth='0', gradStep='0.5') step0 = Paramreg( step='0', type='im', algo='syn', metric='MI', iter='0', shrink='1', smooth='0', gradStep='0.5', slicewise='0', dof='Tx_Ty_Tz_Rx_Ry_Rz') # only used to put src into dest space step1 = Paramreg(step='1', type='im') paramreg = ParamregMultiStep([step0, step1]) parser = get_parser(paramreg=paramreg) arguments = parser.parse(args) # get arguments fname_src = arguments['-i'] fname_dest = arguments['-d'] if '-iseg' in arguments: fname_src_seg = arguments['-iseg'] if '-dseg' in arguments: fname_dest_seg = arguments['-dseg'] if '-ilabel' in arguments: fname_src_label = arguments['-ilabel'] if '-dlabel' in arguments: fname_dest_label = arguments['-dlabel'] if '-o' in arguments: fname_output = arguments['-o'] if '-ofolder' in arguments: path_out = arguments['-ofolder'] if '-owarp' in arguments: fname_output_warp = arguments['-owarp'] else: fname_output_warp = '' if '-initwarp' in arguments: fname_initwarp = os.path.abspath(arguments['-initwarp']) else: fname_initwarp = '' if '-initwarpinv' in arguments: fname_initwarpinv = os.path.abspath(arguments['-initwarpinv']) else: fname_initwarpinv = '' if '-m' in arguments: fname_mask = arguments['-m'] else: fname_mask = '' padding = arguments['-z'] if "-param" in arguments: paramreg_user = arguments['-param'] # update registration parameters for paramStep in paramreg_user: paramreg.addStep(paramStep) path_qc = arguments.get("-qc", None) qc_dataset = arguments.get("-qc-dataset", None) qc_subject = arguments.get("-qc-subject", None) identity = int(arguments['-identity']) interp = arguments['-x'] remove_temp_files = int(arguments['-r']) verbose = int(arguments.get('-v')) sct.init_sct(log_level=verbose, update=True) # Update log level # sct.printv(arguments) sct.printv('\nInput parameters:') sct.printv(' Source .............. ' + fname_src) sct.printv(' Destination ......... ' + fname_dest) sct.printv(' Init transfo ........ ' + fname_initwarp) sct.printv(' Mask ................ ' + fname_mask) sct.printv(' Output name ......... ' + fname_output) # sct.printv(' Algorithm ........... '+paramreg.algo) # sct.printv(' Number of iterations '+paramreg.iter) # sct.printv(' Metric .............. '+paramreg.metric) sct.printv(' Remove temp files ... ' + str(remove_temp_files)) sct.printv(' Verbose ............. ' + str(verbose)) # update param param.verbose = verbose param.padding = padding param.fname_mask = fname_mask param.remove_temp_files = remove_temp_files # Get if input is 3D sct.printv('\nCheck if input data are 3D...', verbose) sct.check_if_3d(fname_src) sct.check_if_3d(fname_dest) # Check if user selected type=seg, but did not input segmentation data if 'paramreg_user' in locals(): if True in [ 'type=seg' in paramreg_user[i] for i in range(len(paramreg_user)) ]: if fname_src_seg == '' or fname_dest_seg == '': sct.printv( '\nERROR: if you select type=seg you must specify -iseg and -dseg flags.\n', 1, 'error') # Extract path, file and extension path_src, file_src, ext_src = sct.extract_fname(fname_src) path_dest, file_dest, ext_dest = sct.extract_fname(fname_dest) # check if source and destination images have the same name (related to issue #373) # If so, change names to avoid conflict of result files and warns the user suffix_src, suffix_dest = '_reg', '_reg' if file_src == file_dest: suffix_src, suffix_dest = '_src_reg', '_dest_reg' # define output folder and file name if fname_output == '': path_out = '' if not path_out else path_out # output in user's current directory file_out = file_src + suffix_src file_out_inv = file_dest + suffix_dest ext_out = ext_src else: path, file_out, ext_out = sct.extract_fname(fname_output) path_out = path if not path_out else path_out file_out_inv = file_out + '_inv' # create temporary folder path_tmp = sct.tmp_create() sct.printv('\nCopying input data to tmp folder and convert to nii...', verbose) Image(fname_src).save(os.path.join(path_tmp, "src.nii")) Image(fname_dest).save(os.path.join(path_tmp, "dest.nii")) if fname_src_seg: Image(fname_src_seg).save(os.path.join(path_tmp, "src_seg.nii")) if fname_dest_seg: Image(fname_dest_seg).save(os.path.join(path_tmp, "dest_seg.nii")) if fname_src_label: Image(fname_src_label).save(os.path.join(path_tmp, "src_label.nii")) Image(fname_dest_label).save(os.path.join(path_tmp, "dest_label.nii")) if fname_mask != '': Image(fname_mask).save(os.path.join(path_tmp, "mask.nii.gz")) # go to tmp folder curdir = os.getcwd() os.chdir(path_tmp) # reorient destination to RPI Image('dest.nii').change_orientation("RPI").save('dest_RPI.nii') if fname_dest_seg: Image('dest_seg.nii').change_orientation("RPI").save( 'dest_seg_RPI.nii') if fname_dest_label: Image('dest_label.nii').change_orientation("RPI").save( 'dest_label_RPI.nii') if identity: # overwrite paramreg and only do one identity transformation step0 = Paramreg(step='0', type='im', algo='syn', metric='MI', iter='0', shrink='1', smooth='0', gradStep='0.5') paramreg = ParamregMultiStep([step0]) # Put source into destination space using header (no estimation -- purely based on header) # TODO: Check if necessary to do that # TODO: use that as step=0 # sct.printv('\nPut source into destination space using header...', verbose) # sct.run('isct_antsRegistration -d 3 -t Translation[0] -m MI[dest_pad.nii,src.nii,1,16] -c 0 -f 1 -s 0 -o # [regAffine,src_regAffine.nii] -n BSpline[3]', verbose) # if segmentation, also do it for seg # initialize list of warping fields warp_forward = [] warp_forward_winv = [] warp_inverse = [] warp_inverse_winv = [] # initial warping is specified, update list of warping fields and skip step=0 if fname_initwarp: sct.printv('\nSkip step=0 and replace with initial transformations: ', param.verbose) sct.printv(' ' + fname_initwarp, param.verbose) # sct.copy(fname_initwarp, 'warp_forward_0.nii.gz') warp_forward = [fname_initwarp] start_step = 1 if fname_initwarpinv: warp_inverse = [fname_initwarpinv] else: sct.printv( '\nWARNING: No initial inverse warping field was specified, therefore the inverse warping field ' 'will NOT be generated.', param.verbose, 'warning') generate_warpinv = 0 else: start_step = 0 # loop across registration steps for i_step in range(start_step, len(paramreg.steps)): sct.printv('\n--\nESTIMATE TRANSFORMATION FOR STEP #' + str(i_step), param.verbose) # identify which is the src and dest if paramreg.steps[str(i_step)].type == 'im': src = 'src.nii' dest = 'dest_RPI.nii' interp_step = 'spline' elif paramreg.steps[str(i_step)].type == 'seg': src = 'src_seg.nii' dest = 'dest_seg_RPI.nii' interp_step = 'nn' elif paramreg.steps[str(i_step)].type == 'label': src = 'src_label.nii' dest = 'dest_label_RPI.nii' interp_step = 'nn' else: # src = dest = interp_step = None sct.printv('ERROR: Wrong image type.', 1, 'error') # if step>0, apply warp_forward_concat to the src image to be used if i_step > 0: sct.printv('\nApply transformation from previous step', param.verbose) sct_apply_transfo.main(args=[ '-i', src, '-d', dest, '-w', warp_forward, '-o', sct.add_suffix(src, '_reg'), '-x', interp_step ]) src = sct.add_suffix(src, '_reg') # register src --> dest warp_forward_out, warp_inverse_out = register(src, dest, paramreg, param, str(i_step)) # deal with transformations with "-" as prefix. They should be inverted with calling sct_concat_transfo. if warp_forward_out[0] == "-": warp_forward_out = warp_forward_out[1:] warp_forward_winv.append(warp_forward_out) if warp_inverse_out[0] == "-": warp_inverse_out = warp_inverse_out[1:] warp_inverse_winv.append(warp_inverse_out) # update list of forward/inverse transformations warp_forward.append(warp_forward_out) warp_inverse.insert(0, warp_inverse_out) # Concatenate transformations sct.printv('\nConcatenate transformations...', verbose) sct_concat_transfo.main(args=[ '-w', warp_forward, '-winv', warp_forward_winv, '-d', 'dest.nii', '-o', 'warp_src2dest.nii.gz' ]) sct_concat_transfo.main(args=[ '-w', warp_inverse, '-winv', warp_inverse_winv, '-d', 'src.nii', '-o', 'warp_dest2src.nii.gz' ]) # Apply warping field to src data sct.printv('\nApply transfo source --> dest...', verbose) sct_apply_transfo.main(args=[ '-i', 'src.nii', '-d', 'dest.nii', '-w', 'warp_src2dest.nii.gz', '-o', 'src_reg.nii', '-x', interp ]) sct.printv('\nApply transfo dest --> source...', verbose) sct_apply_transfo.main(args=[ '-i', 'dest.nii', '-d', 'src.nii', '-w', 'warp_dest2src.nii.gz', '-o', 'dest_reg.nii', '-x', interp ]) # come back os.chdir(curdir) # Generate output files sct.printv('\nGenerate output files...', verbose) # generate: src_reg fname_src2dest = sct.generate_output_file( os.path.join(path_tmp, "src_reg.nii"), os.path.join(path_out, file_out + ext_out), verbose) # generate: forward warping field if fname_output_warp == '': fname_output_warp = os.path.join( path_out, 'warp_' + file_src + '2' + file_dest + '.nii.gz') sct.generate_output_file(os.path.join(path_tmp, "warp_src2dest.nii.gz"), fname_output_warp, verbose) if generate_warpinv: # generate: dest_reg fname_dest2src = sct.generate_output_file( os.path.join(path_tmp, "dest_reg.nii"), os.path.join(path_out, file_out_inv + ext_dest), verbose) # generate: inverse warping field sct.generate_output_file( os.path.join(path_tmp, "warp_dest2src.nii.gz"), os.path.join(path_out, 'warp_' + file_dest + '2' + file_src + '.nii.gz'), verbose) # Delete temporary files if remove_temp_files: sct.printv('\nRemove temporary files...', verbose) sct.rmtree(path_tmp, verbose=verbose) # display elapsed time elapsed_time = time.time() - start_time sct.printv( '\nFinished! Elapsed time: ' + str(int(np.round(elapsed_time))) + 's', verbose) if path_qc is not None: if fname_dest_seg: generate_qc(fname_src2dest, fname_in2=fname_dest, fname_seg=fname_dest_seg, args=args, path_qc=os.path.abspath(path_qc), dataset=qc_dataset, subject=qc_subject, process='sct_register_multimodal') else: sct.printv( 'WARNING: Cannot generate QC because it requires destination segmentation.', 1, 'warning') if generate_warpinv: sct.display_viewer_syntax([fname_src, fname_dest2src], verbose=verbose) sct.display_viewer_syntax([fname_dest, fname_src2dest], verbose=verbose)
def main(args=None): # initialization start_time = time.time() param = Param() # check user arguments if not args: args = sys.argv[1:] # Get parser info parser = get_parser() arguments = parser.parse(sys.argv[1:]) param.fname_data = arguments['-i'] if '-g' in arguments: param.group_size = arguments['-g'] if '-m' in arguments: param.fname_mask = arguments['-m'] if '-param' in arguments: param.update(arguments['-param']) if '-x' in arguments: param.interp = arguments['-x'] if '-ofolder' in arguments: path_out = arguments['-ofolder'] if '-r' in arguments: param.remove_temp_files = int(arguments['-r']) param.verbose = int(arguments.get('-v')) sct.init_sct(log_level=param.verbose, update=True) # Update log level sct.printv('\nInput parameters:', param.verbose) sct.printv(' input file ............' + param.fname_data, param.verbose) # Get full path param.fname_data = os.path.abspath(param.fname_data) if param.fname_mask != '': param.fname_mask = os.path.abspath(param.fname_mask) # Extract path, file and extension path_data, file_data, ext_data = sct.extract_fname(param.fname_data) path_tmp = sct.tmp_create(basename="fmri_moco", verbose=param.verbose) # Copying input data to tmp folder and convert to nii # TODO: no need to do that (takes time for nothing) sct.printv('\nCopying input data to tmp folder and convert to nii...', param.verbose) convert(param.fname_data, os.path.join(path_tmp, "fmri.nii"), squeeze_data=False) # go to tmp folder curdir = os.getcwd() os.chdir(path_tmp) # run moco fmri_moco(param) # come back os.chdir(curdir) # Generate output files fname_fmri_moco = os.path.join(path_out, file_data + param.suffix + ext_data) sct.create_folder(path_out) sct.printv('\nGenerate output files...', param.verbose) sct.generate_output_file(os.path.join(path_tmp, "fmri" + param.suffix + '.nii'), fname_fmri_moco, param.verbose) sct.generate_output_file(os.path.join(path_tmp, "fmri" + param.suffix + '_mean.nii'), os.path.join(path_out, file_data + param.suffix + '_mean' + ext_data), param.verbose) # Delete temporary files if param.remove_temp_files == 1: sct.printv('\nDelete temporary files...', param.verbose) sct.rmtree(path_tmp, verbose=param.verbose) # display elapsed time elapsed_time = time.time() - start_time sct.printv('\nFinished! Elapsed time: ' + str(int(np.round(elapsed_time))) + 's', param.verbose) sct.display_viewer_syntax([fname_fmri_moco, file_data], mode='ortho,ortho')
def main(): # Default params param = Param() # Get parser info parser = get_parser() arguments = parser.parse_args(args=None if sys.argv[1:] else ['--help']) fname_data = arguments.i if arguments.m is not None: fname_mask = arguments.m else: fname_mask = '' method = arguments.method if arguments.vol is not None: index_vol_user = arguments.vol else: index_vol_user = '' verbose = arguments.v sct.init_sct(log_level=verbose, update=True) # Update log level # Check parameters if method == 'diff': if not fname_mask: sct.printv('You need to provide a mask with -method diff. Exit.', 1, type='error') # Load data and orient to RPI im_data = Image(fname_data).change_orientation('RPI') data = im_data.data if fname_mask: mask = Image(fname_mask).change_orientation('RPI').data # Retrieve selected volumes if index_vol_user: index_vol = parse_num_list(index_vol_user) else: index_vol = range(data.shape[3]) # Make sure user selected 2 volumes with diff method if method == 'diff': if not len(index_vol) == 2: sct.printv( 'Method "diff" should be used with exactly two volumes (specify with flag "-vol").', 1, 'error') # Compute SNR # NB: "time" is assumed to be the 4th dimension of the variable "data" if method == 'mult': # Compute mean and STD across time data_mean = np.mean(data[:, :, :, index_vol], axis=3) data_std = np.std(data[:, :, :, index_vol], axis=3, ddof=1) # Generate mask where std is different from 0 mask_std_nonzero = np.where(data_std > param.almost_zero) snr_map = np.zeros_like(data_mean) snr_map[mask_std_nonzero] = data_mean[mask_std_nonzero] / data_std[ mask_std_nonzero] # Output SNR map fname_snr = sct.add_suffix(fname_data, '_SNR-' + method) im_snr = empty_like(im_data) im_snr.data = snr_map im_snr.save(fname_snr, dtype=np.float32) # Output non-zero mask fname_stdnonzero = sct.add_suffix(fname_data, '_mask-STD-nonzero' + method) im_stdnonzero = empty_like(im_data) data_stdnonzero = np.zeros_like(data_mean) data_stdnonzero[mask_std_nonzero] = 1 im_stdnonzero.data = data_stdnonzero im_stdnonzero.save(fname_stdnonzero, dtype=np.float32) # Compute SNR in ROI if fname_mask: mean_in_roi = np.average(data_mean[mask_std_nonzero], weights=mask[mask_std_nonzero]) std_in_roi = np.average(data_std[mask_std_nonzero], weights=mask[mask_std_nonzero]) snr_roi = mean_in_roi / std_in_roi # snr_roi = np.average(snr_map[mask_std_nonzero], weights=mask[mask_std_nonzero]) elif method == 'diff': data_2vol = np.take(data, index_vol, axis=3) # Compute mean in ROI data_mean = np.mean(data_2vol, axis=3) mean_in_roi = np.average(data_mean, weights=mask) data_sub = np.subtract(data_2vol[:, :, :, 1], data_2vol[:, :, :, 0]) _, std_in_roi = weighted_avg_and_std(data_sub, mask) # Compute SNR, correcting for Rayleigh noise (see eq. 7 in Dietrich et al.) snr_roi = (2 / np.sqrt(2)) * mean_in_roi / std_in_roi # Display result if fname_mask: sct.printv('\nSNR_' + method + ' = ' + str(snr_roi) + '\n', type='info')
def main(args=None): # check user arguments if not args: args = sys.argv[1:] # Get parser info parser = get_parser() arguments = parser.parse(args) input_filename = arguments['-i'] input_fname_output = None input_fname_ref = None input_cross_radius = 5 input_dilate = False input_coordinates = None vertebral_levels = None value = None if '-add' in arguments: process_type = 'add' value = arguments['-add'] elif '-create' in arguments: process_type = 'create' input_coordinates = arguments['-create'] elif '-create-add' in arguments: process_type = 'create-add' input_coordinates = arguments['-create-add'] elif '-create-seg' in arguments: process_type = 'create-seg' input_coordinates = arguments['-create-seg'] elif '-cross' in arguments: process_type = 'cross' input_cross_radius = arguments['-cross'] elif '-cubic-to-point' in arguments: process_type = 'cubic-to-point' elif '-display' in arguments: process_type = 'display-voxel' elif '-increment' in arguments: process_type = 'increment' elif '-vert-body' in arguments: process_type = 'vert-body' vertebral_levels = arguments['-vert-body'] # elif '-vert-disc' in arguments: # process_type = 'vert-disc' # vertebral_levels = arguments['-vert-disc'] elif '-vert-continuous' in arguments: process_type = 'vert-continuous' elif '-MSE' in arguments: process_type = 'MSE' input_fname_ref = arguments['-r'] elif '-remove-reference' in arguments: process_type = 'remove-reference' input_fname_ref = arguments['-remove-reference'] elif '-remove-symm' in arguments: process_type = 'remove-symm' input_fname_ref = arguments['-r'] elif '-create-viewer' in arguments: process_type = 'create-viewer' value = arguments['-create-viewer'] elif '-remove' in arguments: process_type = 'remove' value = arguments['-remove'] elif '-keep' in arguments: process_type = 'keep' value = arguments['-keep'] else: # no process chosen sct.printv('ERROR: No process was chosen.', 1, 'error') if '-msg' in arguments: msg = arguments['-msg']+"\n" else: msg = "" if '-o' in arguments: input_fname_output = arguments['-o'] verbose = int(arguments.get('-v')) sct.init_sct(log_level=verbose, update=True) # Update log level processor = ProcessLabels(input_filename, fname_output=input_fname_output, fname_ref=input_fname_ref, cross_radius=input_cross_radius, dilate=input_dilate, coordinates=input_coordinates, verbose=verbose, vertebral_levels=vertebral_levels, value=value, msg=msg) processor.process(process_type) # return based on process type if process_type == 'display-voxel': return processor.useful_notation
def main(args=None): # Initialization param = Param() start_time = time.time() parser = get_parser() arguments = parser.parse(sys.argv[1:]) fname_anat = arguments['-i'] fname_centerline = arguments['-s'] if '-smooth' in arguments: sigma = arguments['-smooth'] if '-param' in arguments: param.update(arguments['-param']) if '-r' in arguments: remove_temp_files = int(arguments['-r']) verbose = int(arguments.get('-v')) sct.init_sct(log_level=verbose, update=True) # Update log level # Display arguments sct.printv('\nCheck input arguments...') sct.printv(' Volume to smooth .................. ' + fname_anat) sct.printv(' Centerline ........................ ' + fname_centerline) sct.printv(' Sigma (mm) ........................ ' + str(sigma)) sct.printv(' Verbose ........................... ' + str(verbose)) # Check that input is 3D: from spinalcordtoolbox.image import Image nx, ny, nz, nt, px, py, pz, pt = Image(fname_anat).dim dim = 4 # by default, will be adjusted later if nt == 1: dim = 3 if nz == 1: dim = 2 if dim == 4: sct.printv( 'WARNING: the input image is 4D, please split your image to 3D before smoothing spinalcord using :\n' 'sct_image -i ' + fname_anat + ' -split t -o ' + fname_anat, verbose, 'warning') sct.printv('4D images not supported, aborting ...', verbose, 'error') # Extract path/file/extension path_anat, file_anat, ext_anat = sct.extract_fname(fname_anat) path_centerline, file_centerline, ext_centerline = sct.extract_fname( fname_centerline) path_tmp = sct.tmp_create(basename="smooth_spinalcord", verbose=verbose) # Copying input data to tmp folder sct.printv('\nCopying input data to tmp folder and convert to nii...', verbose) sct.copy(fname_anat, os.path.join(path_tmp, "anat" + ext_anat)) sct.copy(fname_centerline, os.path.join(path_tmp, "centerline" + ext_centerline)) # go to tmp folder curdir = os.getcwd() os.chdir(path_tmp) # convert to nii format convert('anat' + ext_anat, 'anat.nii') convert('centerline' + ext_centerline, 'centerline.nii') # Change orientation of the input image into RPI sct.printv('\nOrient input volume to RPI orientation...') fname_anat_rpi = msct_image.Image("anat.nii") \ .change_orientation("RPI", generate_path=True) \ .save() \ .absolutepath # Change orientation of the input image into RPI sct.printv('\nOrient centerline to RPI orientation...') fname_centerline_rpi = msct_image.Image("centerline.nii") \ .change_orientation("RPI", generate_path=True) \ .save() \ .absolutepath # Straighten the spinal cord # straighten segmentation sct.printv('\nStraighten the spinal cord using centerline/segmentation...', verbose) cache_sig = sct.cache_signature( input_files=[fname_anat_rpi, fname_centerline_rpi], input_params={"x": "spline"}) cachefile = os.path.join(curdir, "straightening.cache") if sct.cache_valid(cachefile, cache_sig) and os.path.isfile( os.path.join( curdir, 'warp_curve2straight.nii.gz')) and os.path.isfile( os.path.join( curdir, 'warp_straight2curve.nii.gz')) and os.path.isfile( os.path.join(curdir, 'straight_ref.nii.gz')): # if they exist, copy them into current folder sct.printv('Reusing existing warping field which seems to be valid', verbose, 'warning') sct.copy(os.path.join(curdir, 'warp_curve2straight.nii.gz'), 'warp_curve2straight.nii.gz') sct.copy(os.path.join(curdir, 'warp_straight2curve.nii.gz'), 'warp_straight2curve.nii.gz') sct.copy(os.path.join(curdir, 'straight_ref.nii.gz'), 'straight_ref.nii.gz') # apply straightening sct.run([ 'sct_apply_transfo', '-i', fname_anat_rpi, '-w', 'warp_curve2straight.nii.gz', '-d', 'straight_ref.nii.gz', '-o', 'anat_rpi_straight.nii', '-x', 'spline' ], verbose) else: sct.run([ 'sct_straighten_spinalcord', '-i', fname_anat_rpi, '-o', 'anat_rpi_straight.nii', '-s', fname_centerline_rpi, '-x', 'spline', '-param', 'algo_fitting=' + param.algo_fitting ], verbose) sct.cache_save(cachefile, cache_sig) # move warping fields locally (to use caching next time) sct.copy('warp_curve2straight.nii.gz', os.path.join(curdir, 'warp_curve2straight.nii.gz')) sct.copy('warp_straight2curve.nii.gz', os.path.join(curdir, 'warp_straight2curve.nii.gz')) # Smooth the straightened image along z sct.printv('\nSmooth the straightened image...') sigma_smooth = ",".join([str(i) for i in sigma]) sct_maths.main(args=[ '-i', 'anat_rpi_straight.nii', '-smooth', sigma_smooth, '-o', 'anat_rpi_straight_smooth.nii', '-v', '0' ]) # Apply the reversed warping field to get back the curved spinal cord sct.printv( '\nApply the reversed warping field to get back the curved spinal cord...' ) sct.run([ 'sct_apply_transfo', '-i', 'anat_rpi_straight_smooth.nii', '-o', 'anat_rpi_straight_smooth_curved.nii', '-d', 'anat.nii', '-w', 'warp_straight2curve.nii.gz', '-x', 'spline' ], verbose) # replace zeroed voxels by original image (issue #937) sct.printv('\nReplace zeroed voxels by original image...', verbose) nii_smooth = Image('anat_rpi_straight_smooth_curved.nii') data_smooth = nii_smooth.data data_input = Image('anat.nii').data indzero = np.where(data_smooth == 0) data_smooth[indzero] = data_input[indzero] nii_smooth.data = data_smooth nii_smooth.save('anat_rpi_straight_smooth_curved_nonzero.nii') # come back os.chdir(curdir) # Generate output file sct.printv('\nGenerate output file...') sct.generate_output_file( os.path.join(path_tmp, "anat_rpi_straight_smooth_curved_nonzero.nii"), file_anat + '_smooth' + ext_anat) # Remove temporary files if remove_temp_files == 1: sct.printv('\nRemove temporary files...') sct.rmtree(path_tmp) # Display elapsed time elapsed_time = time.time() - start_time sct.printv('\nFinished! Elapsed time: ' + str(int(np.round(elapsed_time))) + 's\n') sct.display_viewer_syntax([file_anat, file_anat + '_smooth'], verbose=verbose)
# open mask data = Image(fname_mask).data data_nonzero = nonzero(data) # find min and max boundaries of the mask dim = len(data_nonzero) ind_start = [min(data_nonzero[i]) for i in range(dim)] ind_end = [max(data_nonzero[i]) for i in range(dim)] # create string indices # ind_start = ','.join(str(i) for i in xyzmin) # ind_end = ','.join(str(i) for i in xyzmax) # return values return ind_start, ind_end, list(range(dim)) if __name__ == "__main__": sct.init_sct() parser = get_parser() # Fetching script arguments arguments = parser.parse(sys.argv[1:]) # assigning variables to arguments input_filename = arguments["-i"] exec_choice = 0 if "-g" in arguments: exec_choice = bool(int(arguments["-g"])) # cropping with GUI cropper = ImageCropper(input_filename) cropper.verbose = int(arguments.get('-v')) sct.init_sct(log_level=cropper.verbose, update=True) # Update log level
def main(args=None): # Dictionary containing list of URLs for data names. # Mirror servers are listed in order of decreasing priority. # If exists, favour release artifact straight from github dict_url = { "sct_example_data": [ "https://github.com/sct-data/sct_example_data/releases/download/r20180525/20180525_sct_example_data.zip", "https://osf.io/kjcgs/?action=download", "https://www.neuro.polymtl.ca/_media/downloads/sct/20180525_sct_example_data.zip", ], "sct_testing_data": [ "https://github.com/sct-data/sct_testing_data/releases/download/r20200707232231/sct_testing_data-r20200707232231.zip", "https://osf.io/download/5f053c176cbca300dad363d3/" ], "PAM50": [ "https://github.com/sct-data/PAM50/releases/download/r20191029/20191029_pam50.zip", "https://osf.io/u79sr/download", "https://www.neuro.polymtl.ca/_media/downloads/sct/20191029_PAM50.zip", ], "MNI-Poly-AMU": [ "https://github.com/sct-data/MNI-Poly-AMU/releases/download/r20170310/20170310_MNI-Poly-AMU.zip", "https://osf.io/sh6h4/?action=download", "https://www.neuro.polymtl.ca/_media/downloads/sct/20170310_MNI-Poly-AMU.zip", ], "gm_model": [ "https://osf.io/ugscu/?action=download", "https://www.neuro.polymtl.ca/_media/downloads/sct/20160922_gm_model.zip", ], "optic_models": [ "https://github.com/sct-data/optic_models/releases/download/r20170413/20170413_optic_models.zip", "https://osf.io/g4fwn/?action=download", "https://www.neuro.polymtl.ca/_media/downloads/sct/20170413_optic_models.zip", ], "pmj_models": [ "https://github.com/sct-data/pmj_models/releases/download/r20170922/20170922_pmj_models.zip", "https://osf.io/4gufr/?action=download", "https://www.neuro.polymtl.ca/_media/downloads/sct/20170922_pmj_models.zip", ], "binaries_linux": [ "https://osf.io/cs6zt/?action=download", "https://www.neuro.polymtl.ca/_media/downloads/sct/20200801_sct_binaries_linux.tar.gz", ], "binaries_osx": [ "https://osf.io/874cy?action=download", "https://www.neuro.polymtl.ca/_media/downloads/sct/20200801_sct_binaries_osx.tar.gz", ], "course_hawaii17": "https://osf.io/6exht/?action=download", "course_paris18": [ "https://osf.io/9bmn5/?action=download", "https://www.neuro.polymtl.ca/_media/downloads/sct/20180612_sct_course-paris18.zip", ], "course_london19": [ "https://osf.io/4q3u7/?action=download", "https://www.neuro.polymtl.ca/_media/downloads/sct/20190121_sct_course-london19.zip", ], "course_beijing19": [ "https://osf.io/ef4xz/?action=download", "https://www.neuro.polymtl.ca/_media/downloads/sct/20190802_sct_course-beijing19.zip", ], "deepseg_gm_models": [ "https://github.com/sct-data/deepseg_gm_models/releases/download/r20180205/20180205_deepseg_gm_models.zip", "https://osf.io/b9y4x/?action=download", "https://www.neuro.polymtl.ca/_media/downloads/sct/20180205_deepseg_gm_models.zip", ], "deepseg_sc_models": [ "https://github.com/sct-data/deepseg_sc_models/releases/download/r20180610/20180610_deepseg_sc_models.zip", "https://osf.io/avf97/?action=download", "https://www.neuro.polymtl.ca/_media/downloads/sct/20180610_deepseg_sc_models.zip", ], "deepseg_lesion_models": [ "https://github.com/sct-data/deepseg_lesion_models/releases/download/r20180613/20180613_deepseg_lesion_models.zip", "https://osf.io/eg7v9/?action=download", "https://www.neuro.polymtl.ca/_media/downloads/sct/20180613_deepseg_lesion_models.zip", ], "c2c3_disc_models": [ "https://github.com/sct-data/c2c3_disc_models/releases/download/r20190117/20190117_c2c3_disc_models.zip", "https://osf.io/t97ap/?action=download", "https://www.neuro.polymtl.ca/_media/downloads/sct/20190117_c2c3_disc_models.zip", ], } if args is None: args = sys.argv[1:] # Get parser info parser = get_parser(dict_url) arguments = parser.parse(args) data_name = arguments['-d'] verbose = int(arguments.get('-v')) sct.init_sct(log_level=verbose, update=True) # Update log level dest_folder = arguments.get( '-o', os.path.join(os.path.abspath(os.curdir), data_name)) url = dict_url[data_name] install_data(url, dest_folder, keep=arguments.get("-k", False)) sct.printv('Done!\n', verbose) return 0
def main(argv): # Print the sct startup info sct.init_sct() # Parse the command line arguments parser = get_parser() args = parser.parse_args(argv if argv else ['--help']) # See if there's a configuration file and import those options if args.config is not None: print('configuring') with open(args.config, 'r') as conf: _, ext = os.path.splitext(args.config) if ext == '.json': config = json.load(conf) if ext == '.yml' or ext == '.yaml': config = yaml.load(conf, Loader=yaml.Loader) # Warn people if they're overriding their config file if len(argv) > 2: warnings.warn( UserWarning( 'Using the `-config|-c` flag with additional arguments is discouraged' )) # Check for unsupported arguments orig_keys = set(vars(args).keys()) config_keys = set(config.keys()) if orig_keys != config_keys: for k in config_keys.difference(orig_keys): del config[k] # Remove the unknown key warnings.warn( UserWarning( 'Unknown key "{}" found in your configuration file, ignoring.' .format(k))) # Update the default to match the config parser.set_defaults(**config) # Reparse the arguments args = parser.parse_args(argv) # Set up email notifications if desired do_email = args.email_to is not None if do_email: email_to = args.email_to if args.email_from is not None: email_from = args.email_from else: email_from = args.email_to smtp_host, smtp_port = args.email_host.split(":") smtp_port = int(smtp_port) email_pass = getpass('Please input your email password:\n') def send_notification(subject, message): send_email(email_to, email_from, subject=subject, message=message, passwd=email_pass, smtp_host=smtp_host, smtp_port=smtp_port) while True: send_test = input( 'Would you like to send a test email to validate your settings? [Y/n]:\n' ) if send_test.lower() in ['', 'y', 'n']: break else: print('Please input y or n') if send_test.lower() in ['', 'y']: send_notification('sct_run_batch: test notification', 'Looks good') # Set up output directories and create them if they don't already exist path_output = os.path.abspath(os.path.expanduser(args.path_output)) path_results = os.path.join(path_output, 'results') path_data_processed = os.path.join(path_output, 'data_processed') path_log = os.path.join(path_output, 'log') path_qc = os.path.join(path_output, 'qc') path_segmanual = os.path.abspath(os.path.expanduser(args.path_segmanual)) script = os.path.abspath(os.path.expanduser(args.script)) for pth in [ path_output, path_results, path_data_processed, path_log, path_qc ]: os.makedirs(pth, exist_ok=True) # Check that the script can be found if not os.path.exists(script): raise FileNotFoundError( 'Couldn\'t find the script script at {}'.format(script)) # Setup overall log batch_log = open(os.path.join(path_log, args.batch_log), 'w') # Duplicate init_sct message to batch_log print('\n--\nSpinal Cord Toolbox ({})\n'.format(__version__), file=batch_log, flush=True) # Tee IO to batch_log and std(out/err) orig_stdout = sys.stdout orig_stderr = sys.stderr sys.stdout = Tee(batch_log, orig_stdout) sys.stderr = Tee(batch_log, orig_stderr) def reset_streams(): sys.stdout = orig_stdout sys.stderr = orig_stderr # Display OS print("INFO SYSTEM") print("-----------") platform_running = sys.platform if platform_running.find('darwin') != -1: os_running = 'osx' elif platform_running.find('linux') != -1: os_running = 'linux' print('OS: ' + os_running + ' (' + platform.platform() + ')') # Display number of CPU cores print('CPU cores: Available: {} | Threads used by ITK Programs: {}'.format( multiprocessing.cpu_count(), args.itk_threads)) # Display RAM available print("RAM: Total {} MB | Available {} MB | Used {} MB".format( int(psutil.virtual_memory().total / 1024 / 1024), int(psutil.virtual_memory().available / 1024 / 1024), int(psutil.virtual_memory().used / 1024 / 1024), )) # Log the current arguments (in yaml because it's cleaner) print('\nINPUT ARGUMENTS') print("---------------") print(yaml.dump(vars(args))) # Display script version info print("SCRIPT") print("------") print("git commit: {}".format( __get_commit(path_to_git_folder=os.path.split(args.script)[0]))) print("Copying script to output folder...") try: shutil.copy(args.script, args.path_output) print("{} -> {}".format(args.script, os.path.abspath(args.path_output))) except shutil.SameFileError: print("Input and output folder are the same. Skipping copy.") pass except IsADirectoryError: print("Input folder is a directory (not a file). Skipping copy.") pass # Display data version info print("\nDATA") print("----") print("git commit: {}\n".format( __get_commit(path_to_git_folder=args.path_data))) # Find subjects and process inclusion/exclusions path_data = os.path.abspath(os.path.expanduser(args.path_data)) subject_dirs = [ f for f in os.listdir(path_data) if f.startswith(args.subject_prefix) ] # Handle inclusion lists assert not ((args.include is not None) and (args.include_list is not None)),\ 'Only one of `include` and `include-list` can be used' if args.include is not None: subject_dirs = [ f for f in subject_dirs if re.search(args.include, f) is not None ] if args.include_list is not None: # TODO decide if we should warn users if one of their inclusions isn't around subject_dirs = [f for f in subject_dirs if f in args.include_list] # Handle exclusions assert not ((args.exclude is not None) and (args.exclude_list is not None)),\ 'Only one of `exclude` and `exclude-list` can be used' if args.exclude is not None: subject_dirs = [ f for f in subject_dirs if re.search(args.exclude, f) is None ] if args.exclude_list is not None: subject_dirs = [f for f in subject_dirs if f not in args.exclude_list] # Determine the number of jobs we can run simulataneously if args.jobs < 1: jobs = multiprocessing.cpu_count() + args.jobs else: jobs = args.jobs print("RUNNING") print("-------") print("Running {} jobs in parallel.\n".format(jobs)) # Run the jobs, recording start and end times start = datetime.datetime.now() # Trap errors to send an email if a script fails. try: with multiprocessing.Pool(jobs) as p: run_single_dir = functools.partial( run_single, script=script, script_args=args.script_args, path_segmanual=path_segmanual, path_data=path_data, path_data_processed=path_data_processed, path_results=path_results, path_log=path_log, path_qc=path_qc, itk_threads=args.itk_threads, continue_on_error=args.continue_on_error) results = list(p.imap(run_single_dir, subject_dirs)) except Exception as e: if do_email: message = ( 'Oh no there has been the following error in your pipeline:\n\n' '{}'.format(e)) try: # I consider the multiprocessing error more significant than a potential email error, this # ensures that the multiprocessing error is signalled. send_notification('sct_run_batch errored', message) except Exception: raise e raise e else: raise e end = datetime.datetime.now() # Check for failed subjects fails = [ sd for (sd, ret) in zip(subject_dirs, results) if ret.returncode != 0 ] if len(fails) == 0: status_message = '\nHooray! your batch completed successfully :-)\n' else: status_message = ( '\nYour batch completed but some subjects may have not completed ' 'successfully, please consult the logs for:\n' '{}\n'.format('\n'.join(fails))) print(status_message) # Display timing duration = end - start timing_message = ('Started: {} | Ended: {} | Duration: {}\n'.format( start.strftime('%Hh%Mm%Ss'), end.strftime('%Hh%Mm%Ss'), (datetime.datetime.utcfromtimestamp(0) + duration).strftime('%Hh%Mm%Ss'))) print(timing_message) if do_email: send_notification('sct_run_batch: Run completed', status_message + timing_message) open_cmd = 'open' if sys.platform == 'darwin' else 'xdg-open' print( 'To open the Quality Control (QC) report on a web-browser, run the following:\n' '{} {}/index.html'.format(open_cmd, path_qc)) if args.zip: file_zip = 'sct_run_batch_{}'.format(time.strftime('%Y%m%d%H%M%S')) path_tmp = os.path.join(tempfile.mkdtemp(), file_zip) os.makedirs(os.path.join(path_tmp, file_zip)) for folder in [path_log, path_qc, path_results]: shutil.copytree( folder, os.path.join(path_tmp, file_zip, os.path.split(folder)[-1])) shutil.make_archive(os.path.join(path_output, file_zip), 'zip', path_tmp) shutil.rmtree(path_tmp) print("\nOutput zip archive: {}.zip".format( os.path.join(path_output, file_zip))) reset_streams() batch_log.close()
def main(args=None): # initializations initz = '' initcenter = '' fname_initlabel = '' file_labelz = 'labelz.nii.gz' param = Param() # check user arguments if not args: args = sys.argv[1:] # Get parser info parser = get_parser() arguments = parser.parse(args) fname_in = os.path.abspath(arguments["-i"]) fname_seg = os.path.abspath(arguments['-s']) contrast = arguments['-c'] path_template = arguments['-t'] scale_dist = arguments['-scale-dist'] if '-ofolder' in arguments: path_output = arguments['-ofolder'] else: path_output = os.curdir param.path_qc = arguments.get("-qc", None) if '-discfile' in arguments: fname_disc = os.path.abspath(arguments['-discfile']) else: fname_disc = None if '-initz' in arguments: initz = arguments['-initz'] if '-initcenter' in arguments: initcenter = arguments['-initcenter'] # if user provided text file, parse and overwrite arguments if '-initfile' in arguments: file = open(arguments['-initfile'], 'r') initfile = ' ' + file.read().replace('\n', '') arg_initfile = initfile.split(' ') for idx_arg, arg in enumerate(arg_initfile): if arg == '-initz': initz = [int(x) for x in arg_initfile[idx_arg + 1].split(',')] if arg == '-initcenter': initcenter = int(arg_initfile[idx_arg + 1]) if '-initlabel' in arguments: # get absolute path of label fname_initlabel = os.path.abspath(arguments['-initlabel']) if '-param' in arguments: param.update(arguments['-param'][0]) verbose = int(arguments.get('-v')) sct.init_sct(log_level=verbose, update=True) # Update log level remove_temp_files = int(arguments['-r']) denoise = int(arguments['-denoise']) laplacian = int(arguments['-laplacian']) path_tmp = sct.tmp_create(basename="label_vertebrae", verbose=verbose) # Copying input data to tmp folder sct.printv('\nCopying input data to tmp folder...', verbose) Image(fname_in).save(os.path.join(path_tmp, "data.nii")) Image(fname_seg).save(os.path.join(path_tmp, "segmentation.nii")) # Go go temp folder curdir = os.getcwd() os.chdir(path_tmp) # Straighten spinal cord sct.printv('\nStraighten spinal cord...', verbose) # check if warp_curve2straight and warp_straight2curve already exist (i.e. no need to do it another time) cache_sig = sct.cache_signature( input_files=[fname_in, fname_seg], ) cachefile = os.path.join(curdir, "straightening.cache") if sct.cache_valid(cachefile, cache_sig) and os.path.isfile(os.path.join(curdir, "warp_curve2straight.nii.gz")) and os.path.isfile(os.path.join(curdir, "warp_straight2curve.nii.gz")) and os.path.isfile(os.path.join(curdir, "straight_ref.nii.gz")): # if they exist, copy them into current folder sct.printv('Reusing existing warping field which seems to be valid', verbose, 'warning') sct.copy(os.path.join(curdir, "warp_curve2straight.nii.gz"), 'warp_curve2straight.nii.gz') sct.copy(os.path.join(curdir, "warp_straight2curve.nii.gz"), 'warp_straight2curve.nii.gz') sct.copy(os.path.join(curdir, "straight_ref.nii.gz"), 'straight_ref.nii.gz') # apply straightening s, o = sct.run(['sct_apply_transfo', '-i', 'data.nii', '-w', 'warp_curve2straight.nii.gz', '-d', 'straight_ref.nii.gz', '-o', 'data_straight.nii']) else: cmd = ['sct_straighten_spinalcord', '-i', 'data.nii', '-s', 'segmentation.nii', '-r', str(remove_temp_files)] if param.path_qc is not None and os.environ.get("SCT_RECURSIVE_QC", None) == "1": cmd += ['-qc', param.path_qc] s, o = sct.run(cmd) sct.cache_save(cachefile, cache_sig) # resample to 0.5mm isotropic to match template resolution sct.printv('\nResample to 0.5mm isotropic...', verbose) s, o = sct.run(['sct_resample', '-i', 'data_straight.nii', '-mm', '0.5x0.5x0.5', '-x', 'linear', '-o', 'data_straightr.nii'], verbose=verbose) # Apply straightening to segmentation # N.B. Output is RPI sct.printv('\nApply straightening to segmentation...', verbose) sct.run('isct_antsApplyTransforms -d 3 -i %s -r %s -t %s -o %s -n %s' % ('segmentation.nii', 'data_straightr.nii', 'warp_curve2straight.nii.gz', 'segmentation_straight.nii', 'Linear'), verbose=verbose, is_sct_binary=True, ) # Threshold segmentation at 0.5 sct.run(['sct_maths', '-i', 'segmentation_straight.nii', '-thr', '0.5', '-o', 'segmentation_straight.nii'], verbose) # If disc label file is provided, label vertebrae using that file instead of automatically if fname_disc: # Apply straightening to disc-label sct.printv('\nApply straightening to disc labels...', verbose) sct.run('isct_antsApplyTransforms -d 3 -i %s -r %s -t %s -o %s -n %s' % (fname_disc, 'data_straightr.nii', 'warp_curve2straight.nii.gz', 'labeldisc_straight.nii.gz', 'NearestNeighbor'), verbose=verbose, is_sct_binary=True, ) label_vert('segmentation_straight.nii', 'labeldisc_straight.nii.gz', verbose=1) else: # create label to identify disc sct.printv('\nCreate label to identify disc...', verbose) fname_labelz = os.path.join(path_tmp, file_labelz) if initz or initcenter: if initcenter: # find z centered in FOV nii = Image('segmentation.nii').change_orientation("RPI") nx, ny, nz, nt, px, py, pz, pt = nii.dim # Get dimensions z_center = int(np.round(nz / 2)) # get z_center initz = [z_center, initcenter] # create single label and output as labels.nii.gz label = ProcessLabels('segmentation.nii', fname_output='tmp.labelz.nii.gz', coordinates=['{},{}'.format(initz[0], initz[1])]) im_label = label.process('create-seg') im_label.data = sct_maths.dilate(im_label.data, [3]) # TODO: create a dilation method specific to labels, # which does not apply a convolution across all voxels (highly inneficient) im_label.save(fname_labelz) elif fname_initlabel: import sct_label_utils # subtract "1" to label value because due to legacy, in this code the disc C2-C3 has value "2", whereas in the # recent version of SCT it is defined as "3". Therefore, when asking the user to define a label, we point to the # new definition of labels (i.e., C2-C3 = 3). sct_label_utils.main(['-i', fname_initlabel, '-add', '-1', '-o', fname_labelz]) else: # automatically finds C2-C3 disc im_data = Image('data.nii') im_seg = Image('segmentation.nii') im_label_c2c3 = detect_c2c3(im_data, im_seg, contrast) ind_label = np.where(im_label_c2c3.data) if not np.size(ind_label) == 0: # subtract "1" to label value because due to legacy, in this code the disc C2-C3 has value "2", whereas in the # recent version of SCT it is defined as "3". im_label_c2c3.data[ind_label] = 2 else: sct.printv('Automatic C2-C3 detection failed. Please provide manual label with sct_label_utils', 1, 'error') im_label_c2c3.save(fname_labelz) # dilate label so it is not lost when applying warping sct_maths.main(['-i', fname_labelz, '-dilate', '3', '-o', fname_labelz]) # Apply straightening to z-label sct.printv('\nAnd apply straightening to label...', verbose) sct.run('isct_antsApplyTransforms -d 3 -i %s -r %s -t %s -o %s -n %s' % (file_labelz, 'data_straightr.nii', 'warp_curve2straight.nii.gz', 'labelz_straight.nii.gz', 'NearestNeighbor'), verbose=verbose, is_sct_binary=True, ) # get z value and disk value to initialize labeling sct.printv('\nGet z and disc values from straight label...', verbose) init_disc = get_z_and_disc_values_from_label('labelz_straight.nii.gz') sct.printv('.. ' + str(init_disc), verbose) # denoise data if denoise: sct.printv('\nDenoise data...', verbose) sct.run(['sct_maths', '-i', 'data_straightr.nii', '-denoise', 'h=0.05', '-o', 'data_straightr.nii'], verbose) # apply laplacian filtering if laplacian: sct.printv('\nApply Laplacian filter...', verbose) sct.run(['sct_maths', '-i', 'data_straightr.nii', '-laplacian', '1', '-o', 'data_straightr.nii'], verbose) # detect vertebral levels on straight spinal cord vertebral_detection('data_straightr.nii', 'segmentation_straight.nii', contrast, param, init_disc=init_disc, verbose=verbose, path_template=path_template, path_output=path_output, scale_dist=scale_dist) # un-straighten labeled spinal cord sct.printv('\nUn-straighten labeling...', verbose) sct.run('isct_antsApplyTransforms -d 3 -i %s -r %s -t %s -o %s -n %s' % ('segmentation_straight_labeled.nii', 'segmentation.nii', 'warp_straight2curve.nii.gz', 'segmentation_labeled.nii', 'NearestNeighbor'), verbose=verbose, is_sct_binary=True, ) # Clean labeled segmentation sct.printv('\nClean labeled segmentation (correct interpolation errors)...', verbose) clean_labeled_segmentation('segmentation_labeled.nii', 'segmentation.nii', 'segmentation_labeled.nii') # label discs sct.printv('\nLabel discs...', verbose) label_discs('segmentation_labeled.nii', verbose=verbose) # come back os.chdir(curdir) # Generate output files path_seg, file_seg, ext_seg = sct.extract_fname(fname_seg) fname_seg_labeled = os.path.join(path_output, file_seg + '_labeled' + ext_seg) sct.printv('\nGenerate output files...', verbose) sct.generate_output_file(os.path.join(path_tmp, "segmentation_labeled.nii"), fname_seg_labeled) sct.generate_output_file(os.path.join(path_tmp, "segmentation_labeled_disc.nii"), os.path.join(path_output, file_seg + '_labeled_discs' + ext_seg)) # copy straightening files in case subsequent SCT functions need them sct.generate_output_file(os.path.join(path_tmp, "warp_curve2straight.nii.gz"), os.path.join(path_output, "warp_curve2straight.nii.gz"), verbose) sct.generate_output_file(os.path.join(path_tmp, "warp_straight2curve.nii.gz"), os.path.join(path_output, "warp_straight2curve.nii.gz"), verbose) sct.generate_output_file(os.path.join(path_tmp, "straight_ref.nii.gz"), os.path.join(path_output, "straight_ref.nii.gz"), verbose) # Remove temporary files if remove_temp_files == 1: sct.printv('\nRemove temporary files...', verbose) sct.rmtree(path_tmp) # Generate QC report if param.path_qc is not None: path_qc = os.path.abspath(param.path_qc) qc_dataset = arguments.get("-qc-dataset", None) qc_subject = arguments.get("-qc-subject", None) labeled_seg_file = os.path.join(path_output, file_seg + '_labeled' + ext_seg) generate_qc(fname_in, fname_seg=labeled_seg_file, args=args, path_qc=os.path.abspath(path_qc), dataset=qc_dataset, subject=qc_subject, process='sct_label_vertebrae') sct.display_viewer_syntax([fname_in, fname_seg_labeled], colormaps=['', 'subcortical'], opacities=['1', '0.5'])
def main(args=None): # initializations param = Param() # check user arguments if not args: args = sys.argv[1:] # Get parser info parser = get_parser() arguments = parser.parse(args) fname_data = arguments['-i'] fname_seg = arguments['-s'] if '-l' in arguments: fname_landmarks = arguments['-l'] label_type = 'body' elif '-ldisc' in arguments: fname_landmarks = arguments['-ldisc'] label_type = 'disc' else: sct.printv('ERROR: Labels should be provided.', 1, 'error') if '-ofolder' in arguments: path_output = arguments['-ofolder'] else: path_output = '' param.path_qc = arguments.get("-qc", None) path_template = arguments['-t'] contrast_template = arguments['-c'] ref = arguments['-ref'] param.remove_temp_files = int(arguments.get('-r')) verbose = int(arguments.get('-v')) sct.init_sct(log_level=verbose, update=True) # Update log level param.verbose = verbose # TODO: not clean, unify verbose or param.verbose in code, but not both param.straighten_fitting = arguments['-straighten-fitting'] # if '-cpu-nb' in arguments: # arg_cpu = ' -cpu-nb '+str(arguments['-cpu-nb']) # else: # arg_cpu = '' # registration parameters if '-param' in arguments: # reset parameters but keep step=0 (might be overwritten if user specified step=0) paramreg = ParamregMultiStep([step0]) if ref == 'subject': paramreg.steps['0'].dof = 'Tx_Ty_Tz_Rx_Ry_Rz_Sz' # add user parameters for paramStep in arguments['-param']: paramreg.addStep(paramStep) else: paramreg = ParamregMultiStep([step0, step1, step2]) # if ref=subject, initialize registration using different affine parameters if ref == 'subject': paramreg.steps['0'].dof = 'Tx_Ty_Tz_Rx_Ry_Rz_Sz' # initialize other parameters zsubsample = param.zsubsample # retrieve template file names file_template_vertebral_labeling = get_file_label(os.path.join(path_template, 'template'), 'vertebral labeling') file_template = get_file_label(os.path.join(path_template, 'template'), contrast_template.upper() + '-weighted template') file_template_seg = get_file_label(os.path.join(path_template, 'template'), 'spinal cord') # start timer start_time = time.time() # get fname of the template + template objects fname_template = os.path.join(path_template, 'template', file_template) fname_template_vertebral_labeling = os.path.join(path_template, 'template', file_template_vertebral_labeling) fname_template_seg = os.path.join(path_template, 'template', file_template_seg) fname_template_disc_labeling = os.path.join(path_template, 'template', 'PAM50_label_disc.nii.gz') # check file existence # TODO: no need to do that! sct.printv('\nCheck template files...') sct.check_file_exist(fname_template, verbose) sct.check_file_exist(fname_template_vertebral_labeling, verbose) sct.check_file_exist(fname_template_seg, verbose) path_data, file_data, ext_data = sct.extract_fname(fname_data) # sct.printv(arguments) sct.printv('\nCheck parameters:', verbose) sct.printv(' Data: ' + fname_data, verbose) sct.printv(' Landmarks: ' + fname_landmarks, verbose) sct.printv(' Segmentation: ' + fname_seg, verbose) sct.printv(' Path template: ' + path_template, verbose) sct.printv(' Remove temp files: ' + str(param.remove_temp_files), verbose) # check input labels labels = check_labels(fname_landmarks, label_type=label_type) vertebral_alignment = False if len(labels) > 2 and label_type == 'disc': vertebral_alignment = True path_tmp = sct.tmp_create(basename="register_to_template", verbose=verbose) # set temporary file names ftmp_data = 'data.nii' ftmp_seg = 'seg.nii.gz' ftmp_label = 'label.nii.gz' ftmp_template = 'template.nii' ftmp_template_seg = 'template_seg.nii.gz' ftmp_template_label = 'template_label.nii.gz' # copy files to temporary folder sct.printv('\nCopying input data to tmp folder and convert to nii...', verbose) Image(fname_data).save(os.path.join(path_tmp, ftmp_data)) Image(fname_seg).save(os.path.join(path_tmp, ftmp_seg)) Image(fname_landmarks).save(os.path.join(path_tmp, ftmp_label)) Image(fname_template).save(os.path.join(path_tmp, ftmp_template)) Image(fname_template_seg).save(os.path.join(path_tmp, ftmp_template_seg)) Image(fname_template_vertebral_labeling).save(os.path.join(path_tmp, ftmp_template_label)) if label_type == 'disc': Image(fname_template_disc_labeling).save(os.path.join(path_tmp, ftmp_template_label)) # go to tmp folder curdir = os.getcwd() os.chdir(path_tmp) # Generate labels from template vertebral labeling if label_type == 'body': sct.printv('\nGenerate labels from template vertebral labeling', verbose) ftmp_template_label_, ftmp_template_label = ftmp_template_label, sct.add_suffix(ftmp_template_label, "_body") sct_label_utils.main(args=['-i', ftmp_template_label_, '-vert-body', '0', '-o', ftmp_template_label]) # check if provided labels are available in the template sct.printv('\nCheck if provided labels are available in the template', verbose) image_label_template = Image(ftmp_template_label) labels_template = image_label_template.getNonZeroCoordinates(sorting='value') if labels[-1].value > labels_template[-1].value: sct.printv('ERROR: Wrong landmarks input. Labels must have correspondence in template space. \nLabel max ' 'provided: ' + str(labels[-1].value) + '\nLabel max from template: ' + str(labels_template[-1].value), verbose, 'error') # if only one label is present, force affine transformation to be Tx,Ty,Tz only (no scaling) if len(labels) == 1: paramreg.steps['0'].dof = 'Tx_Ty_Tz' sct.printv('WARNING: Only one label is present. Forcing initial transformation to: ' + paramreg.steps['0'].dof, 1, 'warning') # Project labels onto the spinal cord centerline because later, an affine transformation is estimated between the # template's labels (centered in the cord) and the subject's labels (assumed to be centered in the cord). # If labels are not centered, mis-registration errors are observed (see issue #1826) ftmp_label = project_labels_on_spinalcord(ftmp_label, ftmp_seg) # binarize segmentation (in case it has values below 0 caused by manual editing) sct.printv('\nBinarize segmentation', verbose) ftmp_seg_, ftmp_seg = ftmp_seg, sct.add_suffix(ftmp_seg, "_bin") sct_maths.main(['-i', ftmp_seg_, '-bin', '0.5', '-o', ftmp_seg]) # Switch between modes: subject->template or template->subject if ref == 'template': # resample data to 1mm isotropic sct.printv('\nResample data to 1mm isotropic...', verbose) resample_file(ftmp_data, add_suffix(ftmp_data, '_1mm'), '1.0x1.0x1.0', 'mm', 'linear', verbose) ftmp_data = add_suffix(ftmp_data, '_1mm') resample_file(ftmp_seg, add_suffix(ftmp_seg, '_1mm'), '1.0x1.0x1.0', 'mm', 'linear', verbose) ftmp_seg = add_suffix(ftmp_seg, '_1mm') # N.B. resampling of labels is more complicated, because they are single-point labels, therefore resampling # with nearest neighbour can make them disappear. resample_labels(ftmp_label, ftmp_data, add_suffix(ftmp_label, '_1mm')) ftmp_label = add_suffix(ftmp_label, '_1mm') # Change orientation of input images to RPI sct.printv('\nChange orientation of input images to RPI...', verbose) ftmp_data = Image(ftmp_data).change_orientation("RPI", generate_path=True).save().absolutepath ftmp_seg = Image(ftmp_seg).change_orientation("RPI", generate_path=True).save().absolutepath ftmp_label = Image(ftmp_label).change_orientation("RPI", generate_path=True).save().absolutepath ftmp_seg_, ftmp_seg = ftmp_seg, add_suffix(ftmp_seg, '_crop') if vertebral_alignment: # cropping the segmentation based on the label coverage to ensure good registration with vertebral alignment # See https://github.com/neuropoly/spinalcordtoolbox/pull/1669 for details image_labels = Image(ftmp_label) coordinates_labels = image_labels.getNonZeroCoordinates(sorting='z') nx, ny, nz, nt, px, py, pz, pt = image_labels.dim offset_crop = 10.0 * pz # cropping the image 10 mm above and below the highest and lowest label cropping_slices = [coordinates_labels[0].z - offset_crop, coordinates_labels[-1].z + offset_crop] # make sure that the cropping slices do not extend outside of the slice range (issue #1811) if cropping_slices[0] < 0: cropping_slices[0] = 0 if cropping_slices[1] > nz: cropping_slices[1] = nz msct_image.spatial_crop(Image(ftmp_seg_), dict(((2, np.int32(np.round(cropping_slices))),))).save(ftmp_seg) else: # if we do not align the vertebral levels, we crop the segmentation from top to bottom im_seg_rpi = Image(ftmp_seg_) bottom = 0 for data in msct_image.SlicerOneAxis(im_seg_rpi, "IS"): if (data != 0).any(): break bottom += 1 top = im_seg_rpi.data.shape[2] for data in msct_image.SlicerOneAxis(im_seg_rpi, "SI"): if (data != 0).any(): break top -= 1 msct_image.spatial_crop(im_seg_rpi, dict(((2, (bottom, top)),))).save(ftmp_seg) # straighten segmentation sct.printv('\nStraighten the spinal cord using centerline/segmentation...', verbose) # check if warp_curve2straight and warp_straight2curve already exist (i.e. no need to do it another time) fn_warp_curve2straight = os.path.join(curdir, "warp_curve2straight.nii.gz") fn_warp_straight2curve = os.path.join(curdir, "warp_straight2curve.nii.gz") fn_straight_ref = os.path.join(curdir, "straight_ref.nii.gz") cache_input_files=[ftmp_seg] if vertebral_alignment: cache_input_files += [ ftmp_template_seg, ftmp_label, ftmp_template_label, ] cache_sig = sct.cache_signature( input_files=cache_input_files, ) cachefile = os.path.join(curdir, "straightening.cache") if sct.cache_valid(cachefile, cache_sig) and os.path.isfile(fn_warp_curve2straight) and os.path.isfile(fn_warp_straight2curve) and os.path.isfile(fn_straight_ref): sct.printv('Reusing existing warping field which seems to be valid', verbose, 'warning') sct.copy(fn_warp_curve2straight, 'warp_curve2straight.nii.gz') sct.copy(fn_warp_straight2curve, 'warp_straight2curve.nii.gz') sct.copy(fn_straight_ref, 'straight_ref.nii.gz') # apply straightening sct.run(['sct_apply_transfo', '-i', ftmp_seg, '-w', 'warp_curve2straight.nii.gz', '-d', 'straight_ref.nii.gz', '-o', add_suffix(ftmp_seg, '_straight')]) else: from spinalcordtoolbox.straightening import SpinalCordStraightener sc_straight = SpinalCordStraightener(ftmp_seg, ftmp_seg) sc_straight.algo_fitting = param.straighten_fitting sc_straight.output_filename = add_suffix(ftmp_seg, '_straight') sc_straight.path_output = './' sc_straight.qc = '0' sc_straight.remove_temp_files = param.remove_temp_files sc_straight.verbose = verbose if vertebral_alignment: sc_straight.centerline_reference_filename = ftmp_template_seg sc_straight.use_straight_reference = True sc_straight.discs_input_filename = ftmp_label sc_straight.discs_ref_filename = ftmp_template_label sc_straight.straighten() sct.cache_save(cachefile, cache_sig) # N.B. DO NOT UPDATE VARIABLE ftmp_seg BECAUSE TEMPORARY USED LATER # re-define warping field using non-cropped space (to avoid issue #367) s, o = sct.run(['sct_concat_transfo', '-w', 'warp_straight2curve.nii.gz', '-d', ftmp_data, '-o', 'warp_straight2curve.nii.gz']) if vertebral_alignment: sct.copy('warp_curve2straight.nii.gz', 'warp_curve2straightAffine.nii.gz') else: # Label preparation: # -------------------------------------------------------------------------------- # Remove unused label on template. Keep only label present in the input label image sct.printv('\nRemove unused label on template. Keep only label present in the input label image...', verbose) sct.run(['sct_label_utils', '-i', ftmp_template_label, '-o', ftmp_template_label, '-remove-reference', ftmp_label]) # Dilating the input label so they can be straighten without losing them sct.printv('\nDilating input labels using 3vox ball radius') sct_maths.main(['-i', ftmp_label, '-dilate', '3', '-o', add_suffix(ftmp_label, '_dilate')]) ftmp_label = add_suffix(ftmp_label, '_dilate') # Apply straightening to labels sct.printv('\nApply straightening to labels...', verbose) sct.run(['sct_apply_transfo', '-i', ftmp_label, '-o', add_suffix(ftmp_label, '_straight'), '-d', add_suffix(ftmp_seg, '_straight'), '-w', 'warp_curve2straight.nii.gz', '-x', 'nn']) ftmp_label = add_suffix(ftmp_label, '_straight') # Compute rigid transformation straight landmarks --> template landmarks sct.printv('\nEstimate transformation for step #0...', verbose) try: register_landmarks(ftmp_label, ftmp_template_label, paramreg.steps['0'].dof, fname_affine='straight2templateAffine.txt', verbose=verbose) except RuntimeError: raise('Input labels do not seem to be at the right place. Please check the position of the labels. ' 'See documentation for more details: https://www.slideshare.net/neuropoly/sct-course-20190121/42') # Concatenate transformations: curve --> straight --> affine sct.printv('\nConcatenate transformations: curve --> straight --> affine...', verbose) sct.run(['sct_concat_transfo', '-w', 'warp_curve2straight.nii.gz,straight2templateAffine.txt', '-d', 'template.nii', '-o', 'warp_curve2straightAffine.nii.gz']) # Apply transformation sct.printv('\nApply transformation...', verbose) sct.run(['sct_apply_transfo', '-i', ftmp_data, '-o', add_suffix(ftmp_data, '_straightAffine'), '-d', ftmp_template, '-w', 'warp_curve2straightAffine.nii.gz']) ftmp_data = add_suffix(ftmp_data, '_straightAffine') sct.run(['sct_apply_transfo', '-i', ftmp_seg, '-o', add_suffix(ftmp_seg, '_straightAffine'), '-d', ftmp_template, '-w', 'warp_curve2straightAffine.nii.gz', '-x', 'linear']) ftmp_seg = add_suffix(ftmp_seg, '_straightAffine') """ # Benjamin: Issue from Allan Martin, about the z=0 slice that is screwed up, caused by the affine transform. # Solution found: remove slices below and above landmarks to avoid rotation effects points_straight = [] for coord in landmark_template: points_straight.append(coord.z) min_point, max_point = int(np.round(np.min(points_straight))), int(np.round(np.max(points_straight))) ftmp_seg_, ftmp_seg = ftmp_seg, add_suffix(ftmp_seg, '_black') msct_image.spatial_crop(Image(ftmp_seg_), dict(((2, (min_point,max_point)),))).save(ftmp_seg) """ # open segmentation im = Image(ftmp_seg) im_new = msct_image.empty_like(im) # binarize im_new.data = im.data > 0.5 # find min-max of anat2template (for subsequent cropping) zmin_template, zmax_template = msct_image.find_zmin_zmax(im_new, threshold=0.5) # save binarized segmentation im_new.save(add_suffix(ftmp_seg, '_bin')) # unused? # crop template in z-direction (for faster processing) # TODO: refactor to use python module instead of doing i/o sct.printv('\nCrop data in template space (for faster processing)...', verbose) ftmp_template_, ftmp_template = ftmp_template, add_suffix(ftmp_template, '_crop') msct_image.spatial_crop(Image(ftmp_template_), dict(((2, (zmin_template,zmax_template)),))).save(ftmp_template) ftmp_template_seg_, ftmp_template_seg = ftmp_template_seg, add_suffix(ftmp_template_seg, '_crop') msct_image.spatial_crop(Image(ftmp_template_seg_), dict(((2, (zmin_template,zmax_template)),))).save(ftmp_template_seg) ftmp_data_, ftmp_data = ftmp_data, add_suffix(ftmp_data, '_crop') msct_image.spatial_crop(Image(ftmp_data_), dict(((2, (zmin_template,zmax_template)),))).save(ftmp_data) ftmp_seg_, ftmp_seg = ftmp_seg, add_suffix(ftmp_seg, '_crop') msct_image.spatial_crop(Image(ftmp_seg_), dict(((2, (zmin_template,zmax_template)),))).save(ftmp_seg) # sub-sample in z-direction # TODO: refactor to use python module instead of doing i/o sct.printv('\nSub-sample in z-direction (for faster processing)...', verbose) sct.run(['sct_resample', '-i', ftmp_template, '-o', add_suffix(ftmp_template, '_sub'), '-f', '1x1x' + zsubsample], verbose) ftmp_template = add_suffix(ftmp_template, '_sub') sct.run(['sct_resample', '-i', ftmp_template_seg, '-o', add_suffix(ftmp_template_seg, '_sub'), '-f', '1x1x' + zsubsample], verbose) ftmp_template_seg = add_suffix(ftmp_template_seg, '_sub') sct.run(['sct_resample', '-i', ftmp_data, '-o', add_suffix(ftmp_data, '_sub'), '-f', '1x1x' + zsubsample], verbose) ftmp_data = add_suffix(ftmp_data, '_sub') sct.run(['sct_resample', '-i', ftmp_seg, '-o', add_suffix(ftmp_seg, '_sub'), '-f', '1x1x' + zsubsample], verbose) ftmp_seg = add_suffix(ftmp_seg, '_sub') # Registration straight spinal cord to template sct.printv('\nRegister straight spinal cord to template...', verbose) # loop across registration steps warp_forward = [] warp_inverse = [] for i_step in range(1, len(paramreg.steps)): sct.printv('\nEstimate transformation for step #' + str(i_step) + '...', verbose) # identify which is the src and dest if paramreg.steps[str(i_step)].type == 'im': src = ftmp_data dest = ftmp_template interp_step = 'linear' elif paramreg.steps[str(i_step)].type == 'seg': src = ftmp_seg dest = ftmp_template_seg interp_step = 'nn' else: sct.printv('ERROR: Wrong image type.', 1, 'error') if paramreg.steps[str(i_step)].algo == 'centermassrot' and paramreg.steps[str(i_step)].rot_method == 'hog': src_seg = ftmp_seg dest_seg = ftmp_template_seg # if step>1, apply warp_forward_concat to the src image to be used if i_step > 1: # apply transformation from previous step, to use as new src for registration sct.run(['sct_apply_transfo', '-i', src, '-d', dest, '-w', ','.join(warp_forward), '-o', add_suffix(src, '_regStep' + str(i_step - 1)), '-x', interp_step], verbose) src = add_suffix(src, '_regStep' + str(i_step - 1)) if paramreg.steps[str(i_step)].algo == 'centermassrot' and paramreg.steps[str(i_step)].rot_method == 'hog': # also apply transformation to the seg sct.run(['sct_apply_transfo', '-i', src_seg, '-d', dest_seg, '-w', ','.join(warp_forward), '-o', add_suffix(src, '_regStep' + str(i_step - 1)), '-x', interp_step], verbose) src_seg = add_suffix(src_seg, '_regStep' + str(i_step - 1)) # register src --> dest # TODO: display param for debugging if paramreg.steps[str(i_step)].algo == 'centermassrot' and paramreg.steps[str(i_step)].rot_method == 'hog': # im_seg case warp_forward_out, warp_inverse_out = register([src, src_seg], [dest, dest_seg], paramreg, param, str(i_step)) else: warp_forward_out, warp_inverse_out = register(src, dest, paramreg, param, str(i_step)) warp_forward.append(warp_forward_out) warp_inverse.append(warp_inverse_out) # Concatenate transformations: sct.printv('\nConcatenate transformations: anat --> template...', verbose) sct.run(['sct_concat_transfo', '-w', 'warp_curve2straightAffine.nii.gz,' + ','.join(warp_forward), '-d', 'template.nii', '-o', 'warp_anat2template.nii.gz'], verbose) # sct.run('sct_concat_transfo -w warp_curve2straight.nii.gz,straight2templateAffine.txt,'+','.join(warp_forward)+' -d template.nii -o warp_anat2template.nii.gz', verbose) sct.printv('\nConcatenate transformations: template --> anat...', verbose) warp_inverse.reverse() if vertebral_alignment: sct.run(['sct_concat_transfo', '-w', ','.join(warp_inverse) + ',warp_straight2curve.nii.gz', '-d', 'data.nii', '-o', 'warp_template2anat.nii.gz'], verbose) else: sct.run(['sct_concat_transfo', '-w', ','.join(warp_inverse) + ',-straight2templateAffine.txt,warp_straight2curve.nii.gz', '-d', 'data.nii', '-o', 'warp_template2anat.nii.gz'], verbose) # register template->subject elif ref == 'subject': # Change orientation of input images to RPI sct.printv('\nChange orientation of input images to RPI...', verbose) ftmp_data = Image(ftmp_data).change_orientation("RPI", generate_path=True).save().absolutepath ftmp_seg = Image(ftmp_seg).change_orientation("RPI", generate_path=True).save().absolutepath ftmp_label = Image(ftmp_label).change_orientation("RPI", generate_path=True).save().absolutepath # Remove unused label on template. Keep only label present in the input label image sct.printv('\nRemove unused label on template. Keep only label present in the input label image...', verbose) sct.run(['sct_label_utils', '-i', ftmp_template_label, '-o', ftmp_template_label, '-remove-reference', ftmp_label]) # Add one label because at least 3 orthogonal labels are required to estimate an affine transformation. This # new label is added at the level of the upper most label (lowest value), at 1cm to the right. for i_file in [ftmp_label, ftmp_template_label]: im_label = Image(i_file) coord_label = im_label.getCoordinatesAveragedByValue() # N.B. landmarks are sorted by value # Create new label from copy import deepcopy new_label = deepcopy(coord_label[0]) # move it 5mm to the left (orientation is RAS) nx, ny, nz, nt, px, py, pz, pt = im_label.dim new_label.x = np.round(coord_label[0].x + 5.0 / px) # assign value 99 new_label.value = 99 # Add to existing image im_label.data[int(new_label.x), int(new_label.y), int(new_label.z)] = new_label.value # Overwrite label file # im_label.absolutepath = 'label_rpi_modif.nii.gz' im_label.save() # Bring template to subject space using landmark-based transformation sct.printv('\nEstimate transformation for step #0...', verbose) warp_forward = ['template2subjectAffine.txt'] warp_inverse = ['-template2subjectAffine.txt'] try: register_landmarks(ftmp_template_label, ftmp_label, paramreg.steps['0'].dof, fname_affine=warp_forward[0], verbose=verbose, path_qc="./") except Exception: sct.printv('ERROR: input labels do not seem to be at the right place. Please check the position of the labels. See documentation for more details: https://www.slideshare.net/neuropoly/sct-course-20190121/42', verbose=verbose, type='error') # loop across registration steps for i_step in range(1, len(paramreg.steps)): sct.printv('\nEstimate transformation for step #' + str(i_step) + '...', verbose) # identify which is the src and dest if paramreg.steps[str(i_step)].type == 'im': src = ftmp_template dest = ftmp_data interp_step = 'linear' elif paramreg.steps[str(i_step)].type == 'seg': src = ftmp_template_seg dest = ftmp_seg interp_step = 'nn' else: sct.printv('ERROR: Wrong image type.', 1, 'error') # apply transformation from previous step, to use as new src for registration sct.run(['sct_apply_transfo', '-i', src, '-d', dest, '-w', ','.join(warp_forward), '-o', add_suffix(src, '_regStep' + str(i_step - 1)), '-x', interp_step], verbose) src = add_suffix(src, '_regStep' + str(i_step - 1)) # register src --> dest # TODO: display param for debugging warp_forward_out, warp_inverse_out = register(src, dest, paramreg, param, str(i_step)) warp_forward.append(warp_forward_out) warp_inverse.insert(0, warp_inverse_out) # Concatenate transformations: sct.printv('\nConcatenate transformations: template --> subject...', verbose) sct.run(['sct_concat_transfo', '-w', ','.join(warp_forward), '-d', 'data.nii', '-o', 'warp_template2anat.nii.gz'], verbose) sct.printv('\nConcatenate transformations: subject --> template...', verbose) sct.run(['sct_concat_transfo', '-w', ','.join(warp_inverse), '-d', 'template.nii', '-o', 'warp_anat2template.nii.gz'], verbose) # Apply warping fields to anat and template sct.run(['sct_apply_transfo', '-i', 'template.nii', '-o', 'template2anat.nii.gz', '-d', 'data.nii', '-w', 'warp_template2anat.nii.gz', '-crop', '1'], verbose) sct.run(['sct_apply_transfo', '-i', 'data.nii', '-o', 'anat2template.nii.gz', '-d', 'template.nii', '-w', 'warp_anat2template.nii.gz', '-crop', '1'], verbose) # come back os.chdir(curdir) # Generate output files sct.printv('\nGenerate output files...', verbose) fname_template2anat = os.path.join(path_output, 'template2anat' + ext_data) fname_anat2template = os.path.join(path_output, 'anat2template' + ext_data) sct.generate_output_file(os.path.join(path_tmp, "warp_template2anat.nii.gz"), os.path.join(path_output, "warp_template2anat.nii.gz"), verbose) sct.generate_output_file(os.path.join(path_tmp, "warp_anat2template.nii.gz"), os.path.join(path_output, "warp_anat2template.nii.gz"), verbose) sct.generate_output_file(os.path.join(path_tmp, "template2anat.nii.gz"), fname_template2anat, verbose) sct.generate_output_file(os.path.join(path_tmp, "anat2template.nii.gz"), fname_anat2template, verbose) if ref == 'template': # copy straightening files in case subsequent SCT functions need them sct.generate_output_file(os.path.join(path_tmp, "warp_curve2straight.nii.gz"), os.path.join(path_output, "warp_curve2straight.nii.gz"), verbose) sct.generate_output_file(os.path.join(path_tmp, "warp_straight2curve.nii.gz"), os.path.join(path_output, "warp_straight2curve.nii.gz"), verbose) sct.generate_output_file(os.path.join(path_tmp, "straight_ref.nii.gz"), os.path.join(path_output, "straight_ref.nii.gz"), verbose) # Delete temporary files if param.remove_temp_files: sct.printv('\nDelete temporary files...', verbose) sct.rmtree(path_tmp, verbose=verbose) # display elapsed time elapsed_time = time.time() - start_time sct.printv('\nFinished! Elapsed time: ' + str(int(np.round(elapsed_time))) + 's', verbose) qc_dataset = arguments.get("-qc-dataset", None) qc_subject = arguments.get("-qc-subject", None) if param.path_qc is not None: generate_qc(fname_data, fname_in2=fname_template2anat, fname_seg=fname_seg, args=args, path_qc=os.path.abspath(param.path_qc), dataset=qc_dataset, subject=qc_subject, process='sct_register_to_template') sct.display_viewer_syntax([fname_data, fname_template2anat], verbose=verbose) sct.display_viewer_syntax([fname_template, fname_anat2template], verbose=verbose)
def main(args=None): if args is None: args = sys.argv[1:] # initialization # note: mirror servers are listed in order of priority dict_url = { 'sct_example_data': ['https://osf.io/kjcgs/?action=download', 'https://www.neuro.polymtl.ca/_media/downloads/sct/20180525_sct_example_data.zip'], 'sct_testing_data': ['https://osf.io/z8gaj/?action=download', 'https://www.neuro.polymtl.ca/_media/downloads/sct/20180125_sct_testing_data.zip'], 'PAM50': ['https://osf.io/kc3jx/?action=download', 'https://www.neuro.polymtl.ca/_media/downloads/sct/20181214_PAM50.zip'], 'MNI-Poly-AMU': ['https://osf.io/sh6h4/?action=download', 'https://www.neuro.polymtl.ca/_media/downloads/sct/20170310_MNI-Poly-AMU.zip'], 'gm_model': ['https://osf.io/ugscu/?action=download', 'https://www.neuro.polymtl.ca/_media/downloads/sct/20160922_gm_model.zip'], 'optic_models': ['https://osf.io/g4fwn/?action=download', 'https://www.neuro.polymtl.ca/_media/downloads/sct/20170413_optic_models.zip'], 'pmj_models': ['https://osf.io/4gufr/?action=download', 'https://www.neuro.polymtl.ca/_media/downloads/sct/20170922_pmj_models.zip'], 'binaries_debian': ['https://osf.io/z72vn/?action=download', 'https://www.neuro.polymtl.ca/_media/downloads/sct/20181204_sct_binaries_linux.tar.gz'], 'binaries_centos': ['https://osf.io/97ybd/?action=download', 'https://www.neuro.polymtl.ca/_media/downloads/sct/20181204_sct_binaries_linux_centos6.tar.gz'], 'binaries_osx': ['https://osf.io/zjv4c/?action=download', 'https://www.neuro.polymtl.ca/_media/downloads/sct/20181204_sct_binaries_osx.tar.gz'], 'course_hawaii17': 'https://osf.io/6exht/?action=download', 'course_paris18': ['https://osf.io/9bmn5/?action=download', 'https://www.neuro.polymtl.ca/_media/downloads/sct/20180612_sct_course-paris18.zip'], 'course_london19': ['https://osf.io/4q3u7/?action=download', 'https://www.neuro.polymtl.ca/_media/downloads/sct/20190121_sct_course-london19.zip'], 'deepseg_gm_models': ['https://osf.io/b9y4x/?action=download', 'https://www.neuro.polymtl.ca/_media/downloads/sct/20180205_deepseg_gm_models.zip'], 'deepseg_sc_models': ['https://osf.io/avf97/?action=download', 'https://www.neuro.polymtl.ca/_media/downloads/sct/20180610_deepseg_sc_models.zip'], 'deepseg_lesion_models': ['https://osf.io/eg7v9/?action=download', 'https://www.neuro.polymtl.ca/_media/downloads/sct/20180613_deepseg_lesion_models.zip'], 'c2c3_disc_models': ['https://osf.io/t97ap/?action=download', 'https://www.neuro.polymtl.ca/_media/downloads/sct/20190117_c2c3_disc_models.zip'] } # Get parser info parser = get_parser() arguments = parser.parse(args) data_name = arguments['-d'] verbose = int(arguments.get('-v')) sct.init_sct(log_level=verbose, update=True) # Update log level dest_folder = arguments.get('-o', os.path.abspath(os.curdir)) # Download data url = dict_url[data_name] tmp_file = download_data(url, verbose) # Check if folder already exists sct.printv('\nCheck if folder already exists...', verbose) if os.path.isdir(data_name): sct.printv('WARNING: Folder ' + data_name + ' already exists. Removing it...', 1, 'warning') sct.rmtree(data_name) # unzip unzip(tmp_file, dest_folder, verbose) sct.printv('\nRemove temporary file...', verbose) os.remove(tmp_file) sct.printv('Done!\n', verbose) return 0
def main(args=None): import numpy as np import spinalcordtoolbox.image as msct_image # Initialization fname_mt0 = '' fname_mt1 = '' file_out = param.file_out # register = param.register # remove_temp_files = param.remove_temp_files # verbose = param.verbose # check user arguments if not args: args = sys.argv[1:] # Check input parameters parser = get_parser() arguments = parser.parse(args) fname_mt0 = arguments['-mt0'] fname_mt1 = arguments['-mt1'] remove_temp_files = int(arguments['-r']) verbose = int(arguments.get('-v')) sct.init_sct(log_level=verbose, update=True) # Update log level # Extract path/file/extension path_mt0, file_mt0, ext_mt0 = sct.extract_fname(fname_mt0) path_out, file_out, ext_out = '', file_out, ext_mt0 # create temporary folder path_tmp = sct.tmp_create() # Copying input data to tmp folder and convert to nii sct.printv('\nCopying input data to tmp folder and convert to nii...', verbose) from sct_convert import convert convert(fname_mt0, os.path.join(path_tmp, "mt0.nii"), dtype=np.float32) convert(fname_mt1, os.path.join(path_tmp, "mt1.nii"), dtype=np.float32) # go to tmp folder curdir = os.getcwd() os.chdir(path_tmp) # compute MTR sct.printv('\nCompute MTR...', verbose) nii_mt1 = msct_image.Image('mt1.nii') data_mt1 = nii_mt1.data data_mt0 = msct_image.Image('mt0.nii').data data_mtr = 100 * (data_mt0 - data_mt1) / data_mt0 # save MTR file nii_mtr = nii_mt1 nii_mtr.data = data_mtr nii_mtr.save("mtr.nii") # sct.run(fsloutput+'fslmaths -dt double mt0.nii -sub mt1.nii -mul 100 -div mt0.nii -thr 0 -uthr 100 mtr.nii', verbose) # come back os.chdir(curdir) # Generate output files sct.printv('\nGenerate output files...', verbose) sct.generate_output_file(os.path.join(path_tmp, "mtr.nii"), os.path.join(path_out, file_out + ext_out)) # Remove temporary files if remove_temp_files == 1: sct.printv('\nRemove temporary files...') sct.rmtree(path_tmp) sct.display_viewer_syntax([fname_mt0, fname_mt1, file_out])
def main(): """Main function.""" sct.init_sct() parser = get_parser() args = sys.argv[1:] arguments = parser.parse(args) fname_image = arguments['-i'] contrast_type = arguments['-c'] ctr_algo = arguments["-centerline"] brain_bool = bool(int(arguments["-brain"])) if "-brain" not in args and contrast_type in ['t2s', 't2_ax']: brain_bool = False if '-ofolder' not in args: output_folder = os.getcwd() else: output_folder = arguments["-ofolder"] if ctr_algo == 'file' and "-file_centerline" not in args: sct.printv('Please use the flag -file_centerline to indicate the centerline filename.', 1, 'error') sys.exit(1) if "-file_centerline" in args: manual_centerline_fname = arguments["-file_centerline"] ctr_algo = 'file' else: manual_centerline_fname = None remove_temp_files = int(arguments['-r']) verbose = int(arguments.get('-v')) sct.init_sct(log_level=verbose, update=True) # Update log level algo_config_stg = '\nMethod:' algo_config_stg += '\n\tCenterline algorithm: ' + str(ctr_algo) algo_config_stg += '\n\tAssumes brain section included in the image: ' + str(brain_bool) + '\n' sct.printv(algo_config_stg) im_image = Image(fname_image) im_seg, im_labels_viewer, im_ctr = deep_segmentation_MSlesion(im_image, contrast_type, ctr_algo=ctr_algo, ctr_file=manual_centerline_fname, brain_bool=brain_bool, remove_temp_files=remove_temp_files, verbose=verbose) # Save segmentation fname_seg = os.path.abspath(os.path.join(output_folder, sct.extract_fname(fname_image)[1] + '_lesionseg' + sct.extract_fname(fname_image)[2])) im_seg.save(fname_seg) if ctr_algo == 'viewer': # Save labels fname_labels = os.path.abspath(os.path.join(output_folder, sct.extract_fname(fname_image)[1] + '_labels-centerline' + sct.extract_fname(fname_image)[2])) im_labels_viewer.save(fname_labels) if verbose == 2: # Save ctr fname_ctr = os.path.abspath(os.path.join(output_folder, sct.extract_fname(fname_image)[1] + '_centerline' + sct.extract_fname(fname_image)[2])) im_ctr.save(fname_ctr) sct.display_viewer_syntax([fname_image, fname_seg], colormaps=['gray', 'red'], opacities=['', '0.7'])
def main(): parser = get_parser() arguments = parser.parse_args(args=None if sys.argv[1:] else ['--help']) # Set param arguments ad inputted by user fname_in = arguments.i contrast = arguments.c # Segmentation or Centerline line if arguments.s is not None: fname_seg = arguments.s if not os.path.isfile(fname_seg): fname_seg = None sct.printv( 'WARNING: -s input file: "' + arguments.s + '" does not exist.\nDetecting PMJ without using segmentation information', 1, 'warning') else: fname_seg = None # Output Folder if arguments.ofolder is not None: path_results = arguments.ofolder if not os.path.isdir(path_results) and os.path.exists(path_results): sct.printv( "ERROR output directory %s is not a valid directory" % path_results, 1, 'error') if not os.path.exists(path_results): os.makedirs(path_results) else: path_results = '.' path_qc = arguments.qc # Remove temp folder rm_tmp = bool(arguments.r) verbose = arguments.v sct.init_sct(log_level=verbose, update=True) # Update log level # Initialize DetectPMJ detector = DetectPMJ(fname_im=fname_in, contrast=contrast, fname_seg=fname_seg, path_out=path_results, verbose=verbose) # run the extraction fname_out, tmp_dir = detector.apply() # Remove tmp_dir if rm_tmp: sct.rmtree(tmp_dir) # View results if fname_out is not None: if path_qc is not None: from spinalcordtoolbox.reports.qc import generate_qc generate_qc(fname_in, fname_seg=fname_out, args=sys.argv[1:], path_qc=os.path.abspath(path_qc), process='sct_detect_pmj') sct.display_viewer_syntax([fname_in, fname_out], colormaps=['gray', 'red'])
def main(args=None): if not args: args = sys.argv[1:] # initialize parameters param = Param() # call main function parser = get_parser() arguments = parser.parse(args) fname_data = arguments['-i'] fname_bvecs = arguments['-bvec'] average = arguments['-a'] verbose = int(arguments.get('-v')) sct.init_sct(log_level=verbose, update=True) # Update log level remove_temp_files = int(arguments['-r']) path_out = arguments['-ofolder'] if '-bval' in arguments: fname_bvals = arguments['-bval'] else: fname_bvals = '' if '-bvalmin' in arguments: param.bval_min = arguments['-bvalmin'] # Initialization start_time = time.time() # sct.printv(arguments) sct.printv('\nInput parameters:', verbose) sct.printv(' input file ............' + fname_data, verbose) sct.printv(' bvecs file ............' + fname_bvecs, verbose) sct.printv(' bvals file ............' + fname_bvals, verbose) sct.printv(' average ...............' + str(average), verbose) # Get full path fname_data = os.path.abspath(fname_data) fname_bvecs = os.path.abspath(fname_bvecs) if fname_bvals: fname_bvals = os.path.abspath(fname_bvals) # Extract path, file and extension path_data, file_data, ext_data = sct.extract_fname(fname_data) # create temporary folder path_tmp = sct.tmp_create(basename="dmri_separate", verbose=verbose) # copy files into tmp folder and convert to nifti sct.printv('\nCopy files into temporary folder...', verbose) ext = '.nii' dmri_name = 'dmri' b0_name = file_data + '_b0' b0_mean_name = b0_name + '_mean' dwi_name = file_data + '_dwi' dwi_mean_name = dwi_name + '_mean' if not convert(fname_data, os.path.join(path_tmp, dmri_name + ext)): sct.printv('ERROR in convert.', 1, 'error') sct.copy(fname_bvecs, os.path.join(path_tmp, "bvecs"), verbose=verbose) # go to tmp folder curdir = os.getcwd() os.chdir(path_tmp) # Get size of data im_dmri = Image(dmri_name + ext) sct.printv('\nGet dimensions data...', verbose) nx, ny, nz, nt, px, py, pz, pt = im_dmri.dim sct.printv('.. ' + str(nx) + ' x ' + str(ny) + ' x ' + str(nz) + ' x ' + str(nt), verbose) # Identify b=0 and DWI images sct.printv(fname_bvals) index_b0, index_dwi, nb_b0, nb_dwi = identify_b0(fname_bvecs, fname_bvals, param.bval_min, verbose) # Split into T dimension sct.printv('\nSplit along T dimension...', verbose) im_dmri_split_list = split_data(im_dmri, 3) for im_d in im_dmri_split_list: im_d.save() # Merge b=0 images sct.printv('\nMerge b=0...', verbose) from sct_image import concat_data l = [] for it in range(nb_b0): l.append(dmri_name + '_T' + str(index_b0[it]).zfill(4) + ext) im_out = concat_data(l, 3).save(b0_name + ext) # Average b=0 images if average: sct.printv('\nAverage b=0...', verbose) sct.run(['sct_maths', '-i', b0_name + ext, '-o', b0_mean_name + ext, '-mean', 't'], verbose) # Merge DWI l = [] for it in range(nb_dwi): l.append(dmri_name + '_T' + str(index_dwi[it]).zfill(4) + ext) im_out = concat_data(l, 3).save(dwi_name + ext) # Average DWI images if average: sct.printv('\nAverage DWI...', verbose) sct.run(['sct_maths', '-i', dwi_name + ext, '-o', dwi_mean_name + ext, '-mean', 't'], verbose) # come back os.chdir(curdir) # Generate output files fname_b0 = os.path.abspath(os.path.join(path_out, b0_name + ext_data)) fname_dwi = os.path.abspath(os.path.join(path_out, dwi_name + ext_data)) fname_b0_mean = os.path.abspath(os.path.join(path_out, b0_mean_name + ext_data)) fname_dwi_mean = os.path.abspath(os.path.join(path_out, dwi_mean_name + ext_data)) sct.printv('\nGenerate output files...', verbose) sct.generate_output_file(os.path.join(path_tmp, b0_name + ext), fname_b0, verbose) sct.generate_output_file(os.path.join(path_tmp, dwi_name + ext), fname_dwi, verbose) if average: sct.generate_output_file(os.path.join(path_tmp, b0_mean_name + ext), fname_b0_mean, verbose) sct.generate_output_file(os.path.join(path_tmp, dwi_mean_name + ext), fname_dwi_mean, verbose) # Remove temporary files if remove_temp_files == 1: sct.printv('\nRemove temporary files...', verbose) sct.rmtree(path_tmp, verbose=verbose) # display elapsed time elapsed_time = time.time() - start_time sct.printv('\nFinished! Elapsed time: ' + str(int(np.round(elapsed_time))) + 's', verbose) return fname_b0, fname_b0_mean, fname_dwi, fname_dwi_mean
default_value='hausdorff_distance.txt', example='my_hausdorff_dist.txt') parser.add_option(name="-v", type_value="multiple_choice", description="Verbose. 0: nothing, 1: basic, 2: extended.", mandatory=False, example=['0', '1', '2'], default_value='1') return parser ######################################################################################################################## # ------------------------------------------------------ MAIN ------------------------------------------------------- # ######################################################################################################################## if __name__ == "__main__": sct.init_sct() param = Param() input_fname = None if param.debug: sct.printv('\n*** WARNING: DEBUG MODE ON ***\n') else: param_default = Param() parser = get_parser() arguments = parser.parse(sys.argv[1:]) input_fname = arguments["-i"] input_second_fname = '' output_fname = 'hausdorff_distance.txt' resample_to = 0.1 if "-d" in arguments:
def main(args=None): # Initialization param = Param() start_time = time.time() parser = get_parser() arguments = parser.parse(sys.argv[1:]) fname_anat = arguments['-i'] fname_centerline = arguments['-s'] if '-smooth' in arguments: sigma = arguments['-smooth'] if '-param' in arguments: param.update(arguments['-param']) if '-r' in arguments: remove_temp_files = int(arguments['-r']) verbose = int(arguments.get('-v')) sct.init_sct(log_level=verbose, update=True) # Update log level # Display arguments sct.printv('\nCheck input arguments...') sct.printv(' Volume to smooth .................. ' + fname_anat) sct.printv(' Centerline ........................ ' + fname_centerline) sct.printv(' Sigma (mm) ........................ ' + str(sigma)) sct.printv(' Verbose ........................... ' + str(verbose)) # Check that input is 3D: from spinalcordtoolbox.image import Image nx, ny, nz, nt, px, py, pz, pt = Image(fname_anat).dim dim = 4 # by default, will be adjusted later if nt == 1: dim = 3 if nz == 1: dim = 2 if dim == 4: sct.printv('WARNING: the input image is 4D, please split your image to 3D before smoothing spinalcord using :\n' 'sct_image -i ' + fname_anat + ' -split t -o ' + fname_anat, verbose, 'warning') sct.printv('4D images not supported, aborting ...', verbose, 'error') # Extract path/file/extension path_anat, file_anat, ext_anat = sct.extract_fname(fname_anat) path_centerline, file_centerline, ext_centerline = sct.extract_fname(fname_centerline) path_tmp = sct.tmp_create(basename="smooth_spinalcord", verbose=verbose) # Copying input data to tmp folder sct.printv('\nCopying input data to tmp folder and convert to nii...', verbose) sct.copy(fname_anat, os.path.join(path_tmp, "anat" + ext_anat)) sct.copy(fname_centerline, os.path.join(path_tmp, "centerline" + ext_centerline)) # go to tmp folder curdir = os.getcwd() os.chdir(path_tmp) # convert to nii format convert('anat' + ext_anat, 'anat.nii') convert('centerline' + ext_centerline, 'centerline.nii') # Change orientation of the input image into RPI sct.printv('\nOrient input volume to RPI orientation...') fname_anat_rpi = msct_image.Image("anat.nii") \ .change_orientation("RPI", generate_path=True) \ .save() \ .absolutepath # Change orientation of the input image into RPI sct.printv('\nOrient centerline to RPI orientation...') fname_centerline_rpi = msct_image.Image("centerline.nii") \ .change_orientation("RPI", generate_path=True) \ .save() \ .absolutepath # Straighten the spinal cord # straighten segmentation sct.printv('\nStraighten the spinal cord using centerline/segmentation...', verbose) cache_sig = sct.cache_signature(input_files=[fname_anat_rpi, fname_centerline_rpi], input_params={"x": "spline"}) cachefile = os.path.join(curdir, "straightening.cache") if sct.cache_valid(cachefile, cache_sig) and os.path.isfile(os.path.join(curdir, 'warp_curve2straight.nii.gz')) and os.path.isfile(os.path.join(curdir, 'warp_straight2curve.nii.gz')) and os.path.isfile(os.path.join(curdir, 'straight_ref.nii.gz')): # if they exist, copy them into current folder sct.printv('Reusing existing warping field which seems to be valid', verbose, 'warning') sct.copy(os.path.join(curdir, 'warp_curve2straight.nii.gz'), 'warp_curve2straight.nii.gz') sct.copy(os.path.join(curdir, 'warp_straight2curve.nii.gz'), 'warp_straight2curve.nii.gz') sct.copy(os.path.join(curdir, 'straight_ref.nii.gz'), 'straight_ref.nii.gz') # apply straightening sct.run(['sct_apply_transfo', '-i', fname_anat_rpi, '-w', 'warp_curve2straight.nii.gz', '-d', 'straight_ref.nii.gz', '-o', 'anat_rpi_straight.nii', '-x', 'spline'], verbose) else: sct.run(['sct_straighten_spinalcord', '-i', fname_anat_rpi, '-o', 'anat_rpi_straight.nii', '-s', fname_centerline_rpi, '-x', 'spline', '-param', 'algo_fitting='+param.algo_fitting], verbose) sct.cache_save(cachefile, cache_sig) # move warping fields locally (to use caching next time) sct.copy('warp_curve2straight.nii.gz', os.path.join(curdir, 'warp_curve2straight.nii.gz')) sct.copy('warp_straight2curve.nii.gz', os.path.join(curdir, 'warp_straight2curve.nii.gz')) # Smooth the straightened image along z sct.printv('\nSmooth the straightened image...') sigma_smooth = ",".join([str(i) for i in sigma]) sct_maths.main(args=['-i', 'anat_rpi_straight.nii', '-smooth', sigma_smooth, '-o', 'anat_rpi_straight_smooth.nii', '-v', '0']) # Apply the reversed warping field to get back the curved spinal cord sct.printv('\nApply the reversed warping field to get back the curved spinal cord...') sct.run(['sct_apply_transfo', '-i', 'anat_rpi_straight_smooth.nii', '-o', 'anat_rpi_straight_smooth_curved.nii', '-d', 'anat.nii', '-w', 'warp_straight2curve.nii.gz', '-x', 'spline'], verbose) # replace zeroed voxels by original image (issue #937) sct.printv('\nReplace zeroed voxels by original image...', verbose) nii_smooth = Image('anat_rpi_straight_smooth_curved.nii') data_smooth = nii_smooth.data data_input = Image('anat.nii').data indzero = np.where(data_smooth == 0) data_smooth[indzero] = data_input[indzero] nii_smooth.data = data_smooth nii_smooth.save('anat_rpi_straight_smooth_curved_nonzero.nii') # come back os.chdir(curdir) # Generate output file sct.printv('\nGenerate output file...') sct.generate_output_file(os.path.join(path_tmp, "anat_rpi_straight_smooth_curved_nonzero.nii"), file_anat + '_smooth' + ext_anat) # Remove temporary files if remove_temp_files == 1: sct.printv('\nRemove temporary files...') sct.rmtree(path_tmp) # Display elapsed time elapsed_time = time.time() - start_time sct.printv('\nFinished! Elapsed time: ' + str(int(np.round(elapsed_time))) + 's\n') sct.display_viewer_syntax([file_anat, file_anat + '_smooth'], verbose=verbose)
def main(args=None): """ Main function :param args: :return: """ # Get parser args if args is None: args = None if sys.argv[1:] else ['--help'] parser = get_parser() arguments = parser.parse_args(args=args) input_filename = arguments.i centerline_file = arguments.s sc_straight = SpinalCordStraightener(input_filename, centerline_file) if arguments.dest is not None: sc_straight.use_straight_reference = True sc_straight.centerline_reference_filename = str(arguments.dest) if arguments.ldisc_input is not None: if not sc_straight.use_straight_reference: sct.printv( 'Warning: discs position are not taken into account if reference is not provided.' ) else: sc_straight.discs_input_filename = str(arguments.ldisc_input) sc_straight.precision = 4.0 if arguments.ldisc_dest is not None: if not sc_straight.use_straight_reference: sct.printv( 'Warning: discs position are not taken into account if reference is not provided.' ) else: sc_straight.discs_ref_filename = str(arguments.ldisc_dest) sc_straight.precision = 4.0 # Handling optional arguments sc_straight.remove_temp_files = arguments.r sc_straight.interpolation_warp = arguments.x sc_straight.output_filename = arguments.o sc_straight.path_output = arguments.ofolder path_qc = arguments.qc verbose = arguments.v sct.init_sct(log_level=verbose, update=True) # Update log level sc_straight.verbose = verbose # if "-cpu-nb" in arguments: # sc_straight.cpu_number = arguments.cpu-nb) if arguments.disable_straight2curved: sc_straight.straight2curved = False if arguments.disable_curved2straight: sc_straight.curved2straight = False if arguments.speed_factor: sc_straight.speed_factor = arguments.speed_factor if arguments.xy_size: sc_straight.xy_size = arguments.xy_size sc_straight.param_centerline = ParamCenterline( algo_fitting=arguments.centerline_algo, smooth=arguments.centerline_smooth) if arguments.param is not None: params_user = arguments.param # update registration parameters for param in params_user: param_split = param.split('=') if param_split[0] == 'precision': sc_straight.precision = float(param_split[1]) if param_split[0] == 'threshold_distance': sc_straight.threshold_distance = float(param_split[1]) if param_split[0] == 'accuracy_results': sc_straight.accuracy_results = int(param_split[1]) if param_split[0] == 'template_orientation': sc_straight.template_orientation = int(param_split[1]) fname_straight = sc_straight.straighten() sct.printv( "\nFinished! Elapsed time: {} s".format(sc_straight.elapsed_time), verbose) # Generate QC report if path_qc is not None: path_qc = os.path.abspath(path_qc) qc_dataset = arguments.qc_dataset qc_subject = arguments.qc_subject generate_qc(fname_straight, args=arguments, path_qc=os.path.abspath(path_qc), dataset=qc_dataset, subject=qc_subject, process=os.path.basename(__file__.strip('.py'))) sct.display_viewer_syntax([fname_straight], verbose=verbose)