def main(): # Initialization fname_input = '' fname_segmentation = '' if param.debug: printv('\n*** WARNING: DEBUG MODE ON ***\n') fname_input = '' fname_segmentation = sct_test_path('t2', 't2_seg.nii.gz') else: # Check input param try: opts, args = getopt.getopt(sys.argv[1:], 'hi:t:') except getopt.GetoptError as err: logger.error(str(err)) usage() for opt, arg in opts: if opt == '-h': usage() elif opt in ('-i'): fname_input = arg elif opt in ('-t'): fname_segmentation = arg # display usage if a mandatory argument is not provided if fname_segmentation == '' or fname_input == '': usage() # check existence of input files check_file_exist(fname_input) check_file_exist(fname_segmentation) # read nifti input file img = nibabel.load(fname_input) # 3d array for each x y z voxel values for the input nifti image data = img.get_data() # read nifti input file img_seg = nibabel.load(fname_segmentation) # 3d array for each x y z voxel values for the input nifti image data_seg = img_seg.get_data() X, Y, Z = (data > 0).nonzero() status = 0 for i in range(0, len(X)): if data_seg[X[i], Y[i], Z[i]] == 0: status = 1 break if status is not 0: printv('ERROR: detected point is not in segmentation', 1, 'warning') else: printv('OK: detected point is in segmentation') sys.exit(status)
def __call__(self, parser, namespace, values, option_string=None): file_label = os.path.join(namespace.f, Param().file_info_label) check_file_exist(file_label, 0) with open(file_label, 'r') as default_info_label: label_references = default_info_label.read() txt_label = ( f"List of labels in {file_label}:\n" f"--------------------------------------------------------------------------------------\n" f"{label_references}" f"--------------------------------------------------------------------------------------\n") print(txt_label) parser.exit()
def main(argv=None): """ Main function :param argv: :return: """ parser = get_parser() arguments = parser.parse_args(argv) verbose = arguments.v set_global_loglevel(verbose=verbose) param = Param() # Initialization fname_warp_final = '' # concatenated transformations if arguments.o is not None: fname_warp_final = arguments.o fname_dest = arguments.d fname_warp_list = arguments.w warpinv_filename = arguments.winv # Parse list of warping fields printv('\nParse list of warping fields...', verbose) use_inverse = [] fname_warp_list_invert = [] for idx_warp, path_warp in enumerate(fname_warp_list): # Check if this transformation should be inverted if path_warp in warpinv_filename: use_inverse.append('-i') fname_warp_list_invert += [[ use_inverse[idx_warp], fname_warp_list[idx_warp] ]] else: use_inverse.append('') fname_warp_list_invert += [[path_warp]] path_warp = fname_warp_list[idx_warp] if path_warp.endswith((".nii", ".nii.gz")) \ and Image(fname_warp_list[idx_warp]).header.get_intent()[0] != 'vector': raise ValueError( "Displacement field in {} is invalid: should be encoded" " in a 5D file with vector intent code" " (see https://nifti.nimh.nih.gov/pub/dist/src/niftilib/nifti1.h" .format(path_warp)) # check if destination file is 3d check_dim(fname_dest, dim_lst=[3]) # Here we take the inverse of the warp list, because sct_WarpImageMultiTransform concatenates in the reverse order fname_warp_list_invert.reverse() fname_warp_list_invert = functools.reduce(lambda x, y: x + y, fname_warp_list_invert) # Check file existence printv('\nCheck file existence...', verbose) check_file_exist(fname_dest, verbose) for i in range(len(fname_warp_list)): check_file_exist(fname_warp_list[i], verbose) # Get output folder and file name if fname_warp_final == '': path_out, file_out, ext_out = extract_fname(param.fname_warp_final) else: path_out, file_out, ext_out = extract_fname(fname_warp_final) # Check dimension of destination data (cf. issue #1419, #1429) im_dest = Image(fname_dest) if im_dest.dim[2] == 1: dimensionality = '2' else: dimensionality = '3' cmd = [ 'isct_ComposeMultiTransform', dimensionality, 'warp_final' + ext_out, '-R', fname_dest ] + fname_warp_list_invert _, output = run_proc(cmd, verbose=verbose, is_sct_binary=True) # check if output was generated if not os.path.isfile('warp_final' + ext_out): raise ValueError(f"Warping field was not generated! {output}") # Generate output files printv('\nGenerate output files...', verbose) generate_output_file('warp_final' + ext_out, os.path.join(path_out, file_out + ext_out))
def create_mask(param): # parse argument for method method_type = param.process[0] # check method val if not method_type == 'center': method_val = param.process[1] # check existence of input files if method_type == 'centerline': check_file_exist(method_val, param.verbose) # Extract path/file/extension path_data, file_data, ext_data = extract_fname(param.fname_data) # Get output folder and file name if param.fname_out == '': param.fname_out = os.path.abspath(param.file_prefix + file_data + ext_data) path_tmp = tmp_create(basename="create_mask") printv('\nOrientation:', param.verbose) orientation_input = Image(param.fname_data).orientation printv(' ' + orientation_input, param.verbose) # copy input data to tmp folder and re-orient to RPI Image(param.fname_data).change_orientation("RPI").save( os.path.join(path_tmp, "data_RPI.nii")) if method_type == 'centerline': Image(method_val).change_orientation("RPI").save( os.path.join(path_tmp, "centerline_RPI.nii")) if method_type == 'point': Image(method_val).change_orientation("RPI").save( os.path.join(path_tmp, "point_RPI.nii")) # go to tmp folder curdir = os.getcwd() os.chdir(path_tmp) # Get dimensions of data im_data = Image('data_RPI.nii') nx, ny, nz, nt, px, py, pz, pt = im_data.dim printv('\nDimensions:', param.verbose) printv(im_data.dim, param.verbose) # in case user input 4d data if nt != 1: printv( 'WARNING in ' + os.path.basename(__file__) + ': Input image is 4d but output mask will be 3D from first time slice.', param.verbose, 'warning') # extract first volume to have 3d reference nii = empty_like(Image('data_RPI.nii')) data3d = nii.data[:, :, :, 0] nii.data = data3d nii.save('data_RPI.nii') if method_type == 'coord': # parse to get coordinate coord = [x for x in map(int, method_val.split('x'))] if method_type == 'point': # extract coordinate of point printv('\nExtract coordinate of point...', param.verbose) coord = Image("point_RPI.nii").getNonZeroCoordinates() if method_type == 'center': # set coordinate at center of FOV coord = np.round(float(nx) / 2), np.round(float(ny) / 2) if method_type == 'centerline': # get name of centerline from user argument fname_centerline = 'centerline_RPI.nii' else: # generate volume with line along Z at coordinates 'coord' printv('\nCreate line...', param.verbose) fname_centerline = create_line(param, 'data_RPI.nii', coord, nz) # create mask printv('\nCreate mask...', param.verbose) centerline = nibabel.load(fname_centerline) # open centerline hdr = centerline.get_header() # get header hdr.set_data_dtype('uint8') # set imagetype to uint8 spacing = hdr.structarr['pixdim'] data_centerline = centerline.get_data() # get centerline # if data is 2D, reshape with empty third dimension if len(data_centerline.shape) == 2: data_centerline_shape = list(data_centerline.shape) data_centerline_shape.append(1) data_centerline = data_centerline.reshape(data_centerline_shape) z_centerline_not_null = [ iz for iz in range(0, nz, 1) if data_centerline[:, :, iz].any() ] # get center of mass of the centerline cx = [0] * nz cy = [0] * nz for iz in range(0, nz, 1): if iz in z_centerline_not_null: cx[iz], cy[iz] = ndimage.measurements.center_of_mass( np.array(data_centerline[:, :, iz])) # create 2d masks im_list = [] for iz in range(nz): if iz not in z_centerline_not_null: im_list.append(Image(data_centerline[:, :, iz], hdr=hdr)) else: center = np.array([cx[iz], cy[iz]]) mask2d = create_mask2d(param, center, param.shape, param.size, im_data=im_data) im_list.append(Image(mask2d, hdr=hdr)) im_out = concat_data(im_list, dim=2).save('mask_RPI.nii.gz') im_out.change_orientation(orientation_input) im_out.header = Image(param.fname_data).header im_out.save(param.fname_out) # come back os.chdir(curdir) # Remove temporary files if param.remove_temp_files == 1: printv('\nRemove temporary files...', param.verbose) rmtree(path_tmp) display_viewer_syntax([param.fname_data, param.fname_out], colormaps=['gray', 'red'], opacities=['', '0.5'])
def get_parser(): param_default = Param() # Read .txt files referencing the labels (for extended usage description) file_label = os.path.join(param_default.path_label, param_default.file_info_label) check_file_exist(file_label, 0) default_info_label = open(file_label, 'r') label_references = default_info_label.read() default_info_label.close() description = ( f"This program extracts metrics (e.g., DTI or MTR) within labels. Labels could be a single file or " f"a folder generated with 'sct_warp_template' and containing multiple label files and a label " f"description file (info_label.txt). The labels should be in the same space coordinates as the " f"input image.\n" f"\n" f"To list white matter atlas labels: {os.path.basename(__file__)} -f " f"{os.path.join(__data_dir__, 'atlas')}\n" f"\n" f"To compute FA within labels 0, 2 and 3 within vertebral levels C2 to C7 using binary method: " f"{os.path.basename(__file__)} -i dti_FA.nii.gz -f label/atlas -l 0,2,3 -v 2:7 -m bin\n" ) if label_references != '': description += ( f"\nTo compute average MTR in a region defined by a single label file (could be binary or 0-1 " f"weighted mask) between slices 1 and 4: {os.path.basename(__file__)} -i mtr.nii.gz -f " f"my_mask.nii.gz -z 1:4 -m wa\n" f"List of labels in {file_label}:\n" f"--------------------------------------------------------------------------------------\n" f"{label_references}\n" f"--------------------------------------------------------------------------------------\n" ) parser = argparse.ArgumentParser( description=description, formatter_class=SmartFormatter, add_help=None, prog=os.path.basename(__file__).strip(".py")) mandatory = parser.add_argument_group("\nMANDATORY ARGUMENTS") mandatory.add_argument( '-i', metavar=Metavar.file, required=True, help="Image file to extract metrics from. Example: FA.nii.gz") optional = parser.add_argument_group("\nOPTIONAL ARGUMENTS") optional.add_argument("-h", "--help", action="help", help="Show this help message and exit.") optional.add_argument( '-f', metavar=Metavar.folder, default=os.path.join("label", "atlas"), help=(f"Single label file, or folder that contains WM tract labels." f"Example: {os.path.join(__data_dir__, 'atlas')}")) optional.add_argument( '-l', metavar=Metavar.str, default='', help= "Label IDs to extract the metric from. Default = all labels. Separate labels with ','. To select a group " "of consecutive labels use ':'. Example: 1:3 is equivalent to 1,2,3. Maximum Likelihood (or MAP) is " "computed using all tracts, but only values of the selected tracts are reported." ) optional.add_argument( '-method', choices=['ml', 'map', 'wa', 'bin', 'max'], default=param_default.method, help="R|Method to extract metrics.\n" " - ml: maximum likelihood (only use with well-defined regions and low noise)\n" " N.B. ONLY USE THIS METHOD WITH THE WHITE MATTER ATLAS! The sum of all tracts should be 1 in " "all voxels (the algorithm doesn't normalize the atlas).\n" " - map: maximum a posteriori. Mean priors are estimated by maximum likelihood within three clusters " "(white matter, gray matter and CSF). Tract and noise variance are set with flag -p.\n" " N.B. ONLY USE THIS METHOD WITH THE WHITE MATTER ATLAS! The sum of all tracts should be 1 in " "all voxels (the algorithm doesn't normalize the atlas).\n" " - wa: weighted average\n" " - bin: binarize mask (threshold=0.5)\n" " - max: for each z-slice of the input data, extract the max value for each slice of the input data." ) optional.add_argument( '-append', type=int, choices=(0, 1), default=0, help= "Whether to append results as a new line in the output csv file instead of overwriting it. 0 = no, 1 = yes" ) optional.add_argument( '-combine', type=int, choices=(0, 1), default=0, help= "Whether to combine multiple labels into a single estimation. 0 = no, 1 = yes" ) optional.add_argument( '-o', metavar=Metavar.file, default=param_default.fname_output, help= "R|File name of the output result file collecting the metric estimation results. Include the '.csv' " "file extension in the file name. Example: extract_metric.csv") optional.add_argument( '-output-map', metavar=Metavar.file, default='', help= "File name for an image consisting of the atlas labels multiplied by the estimated metric values " "yielding the metric value map, useful to assess the metric estimation and especially partial volume " "effects.") optional.add_argument( '-z', metavar=Metavar.str, default=param_default.slices_of_interest, help= "R|Slice range to estimate the metric from. First slice is 0. Example: 5:23\n" "You can also select specific slices using commas. Example: 0,2,3,5,12'" ) optional.add_argument( '-perslice', type=int, choices=(0, 1), default=param_default.perslice, help= "R|Whether to output one metric per slice instead of a single output metric. 0 = no, 1 = yes.\n" "Please note that when methods ml or map are used, outputting a single metric per slice and then " "averaging them all is not the same as outputting a single metric at once across all slices." ) optional.add_argument( '-vert', metavar=Metavar.str, default=param_default.vertebral_levels, help= "Vertebral levels to estimate the metric across. Example: 2:9 (for C2 to T2)" ) optional.add_argument( '-vertfile', metavar=Metavar.file, default="./label/template/PAM50_levels.nii.gz", help= "Vertebral labeling file. Only use with flag -vert. Example: PAM50_levels.nii.gz" ) optional.add_argument( '-perlevel', type=int, metavar=Metavar.int, default=0, help= "R|Whether to output one metric per vertebral level instead of a single output metric. 0 = no, 1 = yes.\n" "Please note that this flag needs to be used with the -vert option.") optional.add_argument('-v', choices=("0", "1"), default="1", help="Verbose. 0 = nothing, 1 = expanded") advanced = parser.add_argument_group("\nFOR ADVANCED USERS") advanced.add_argument( '-param', metavar=Metavar.str, default='', help= "R|Advanced parameters for the 'map' method. Separate with comma. All items must be listed (separated " "with comma).\n" " - #1: standard deviation of metrics across labels\n" " - #2: standard deviation of the noise (assumed Gaussian)") advanced.add_argument( '-fix-label', metavar=Metavar.list, type=list_type(',', str), default='', help= "When using ML or MAP estimations, if you do not want to estimate the metric in one label and fix its " "value to avoid effects on other labels, specify <label_ID>,<metric_value. Example: -fix-label 36,0 " "(Fix the CSF value)") advanced.add_argument( '-norm-file', metavar=Metavar.file, default='', help='Filename of the label by which the user wants to normalize.') advanced.add_argument( '-norm-method', choices=['sbs', 'whole'], default='', help="R|Method to use for normalization:\n" " - sbs: normalization slice-by-slice\n" " - whole: normalization by the metric value in the whole label for all slices." ) advanced.add_argument( '-mask-weighted', metavar=Metavar.file, default='', help= "Nifti mask to weight each voxel during ML or MAP estimation. Example: PAM50_wm.nii.gz" ) advanced.add_argument( '-discard-neg-val', choices=('0', '1'), default='0', help= 'Whether to discard voxels with negative value when computing metrics statistics. 0 = no, 1 = yes' ) return parser
def main(): # Initialization fname_data = '' interp_factor = param.interp_factor remove_temp_files = param.remove_temp_files verbose = param.verbose suffix = param.suffix smoothing_sigma = param.smoothing_sigma # start timer start_time = time.time() # Parameters for debug mode if param.debug: fname_data = os.path.join(__data_dir__, 'sct_testing_data', 't2', 't2_seg.nii.gz') remove_temp_files = 0 param.mask_size = 10 else: # Check input parameters try: opts, args = getopt.getopt(sys.argv[1:], 'hi:v:r:s:') except getopt.GetoptError: usage() raise SystemExit(2) if not opts: usage() raise SystemExit(2) for opt, arg in opts: if opt == '-h': usage() return elif opt in ('-i'): fname_data = arg elif opt in ('-r'): remove_temp_files = int(arg) elif opt in ('-s'): smoothing_sigma = arg elif opt in ('-v'): verbose = int(arg) # display usage if a mandatory argument is not provided if fname_data == '': usage() raise SystemExit(2) # printv(arguments) printv('\nCheck parameters:') printv(' segmentation ........... ' + fname_data) printv(' interp factor .......... ' + str(interp_factor)) printv(' smoothing sigma ........ ' + str(smoothing_sigma)) # check existence of input files printv('\nCheck existence of input files...') check_file_exist(fname_data, verbose) # Extract path, file and extension path_data, file_data, ext_data = extract_fname(fname_data) path_tmp = tmp_create(basename="binary_to_trilinear") from sct_convert import convert printv('\nCopying input data to tmp folder and convert to nii...', param.verbose) convert(fname_data, os.path.join(path_tmp, "data.nii")) # go to tmp folder curdir = os.getcwd() os.chdir(path_tmp) # Get dimensions of data printv('\nGet dimensions of data...', verbose) nx, ny, nz, nt, px, py, pz, pt = Image('data.nii').dim printv('.. ' + str(nx) + ' x ' + str(ny) + ' x ' + str(nz), verbose) # upsample data printv('\nUpsample data...', verbose) run_proc([ "sct_resample", "-i", "data.nii", "-x", "linear", "-vox", str(nx * interp_factor) + 'x' + str(ny * interp_factor) + 'x' + str(nz * interp_factor), "-o", "data_up.nii" ], verbose) # Smooth along centerline printv('\nSmooth along centerline...', verbose) run_proc([ "sct_smooth_spinalcord", "-i", "data_up.nii", "-s", "data_up.nii", "-smooth", str(smoothing_sigma), "-r", str(remove_temp_files), "-v", str(verbose) ], verbose) # downsample data printv('\nDownsample data...', verbose) run_proc([ "sct_resample", "-i", "data_up_smooth.nii", "-x", "linear", "-vox", str(nx) + 'x' + str(ny) + 'x' + str(nz), "-o", "data_up_smooth_down.nii" ], verbose) # come back os.chdir(curdir) # Generate output files printv('\nGenerate output files...') fname_out = generate_output_file( os.path.join(path_tmp, "data_up_smooth_down.nii"), '' + file_data + suffix + ext_data) # Delete temporary files if remove_temp_files == 1: printv('\nRemove temporary files...') rmtree(path_tmp) # display elapsed time elapsed_time = time.time() - start_time printv('\nFinished! Elapsed time: ' + str(int(np.round(elapsed_time))) + 's') # to view results printv('\nTo view results, type:') printv('fslview ' + file_data + ' ' + file_data + suffix + ' &\n')