def __init__(self, arguments): self.dlabel_in = NibInput(arguments['--input-dlabel']) self.output_nifti = self.__get_output_path(arguments['--output-nifti']) self.volume_in = NibInput(arguments['--volume-template']) self.use_nearest = self.__get_surf_method(arguments['--use-nearest-vertex']) self.surfs = self.__get_surfs(arguments['--left-mid-surface']) self.map_number = str(arguments['--map-number']) #returns a bool of wether or not there are volume space labels to deal with self.dlabel_in.has_volume = cifti_info(self.dlabel_in.path)['maps_to_volume']
def main(): '''''' arguments = docopt(__doc__) funcfile = arguments['<func.nii.gz>'] outputname = arguments['<output.nii.gz>'] min_low_freq = arguments['--min-low-freq'] max_low_freq = arguments['--max-low-freq'] min_total_freq = arguments['--min-total-freq'] max_total_freq = arguments['--max-total-freq'] maskfile = arguments['--mask-file'] calc_alff = arguments['--calc-alff'] logger.setLevel(logging.WARNING) if arguments['--debug']: logger.setLevel(logging.DEBUG) logging.getLogger('ciftify').setLevel(logging.DEBUG) logger.info(arguments) with ciftify.utils.TempDir() as tmpdir: # IF INPUT IS A NIFTI FILE # Sets input funcfile equal to inputfile func = NibInput(funcfile) # IF INPUT IS CIFTI FILE # Convert cifti input file to nifti input file if func.type == "cifti": inputfile = convert_cifti_to_nifti(func.path, tmpdir) if maskfile: maskinput = convert_cifti_to_nifti(maskfile, tmpdir) else: maskinput = None elif func.type == "nifti": inputfile = func.path if maskfile: maskinput = maskfile else: maskinput = None else: logger.critical("Could not read <func.nii.gz> as nifti or cifti file") sys.exit(1) falff_nifti_output = calc_nifti(inputfile, maskinput, min_low_freq, max_low_freq, min_total_freq, max_total_freq, tmpdir, calc_alff) # Convert nifti output file to cifti output file if func.type == "cifti": convert_nifti_to_cifti(falff_nifti_output, funcfile, outputname) # IF INPUT IS NIFTI FILE # If funcfile was not cifti file, save as nifti file to outputname if func.type == "nifti": run("mv {} {}".format(falff_nifti_output, outputname))
def __get_input_file(self, user_func_input): '''check that input is readable and either cifti or nifti''' func = NibInput(user_func_input) if func.type == "cifti": if not func.path.endswith(".dtseries.nii"): logger.warning( 'cifti input file should be a .dtseries.nii file {} given'. format(func.path)) if not any((func.type == "cifti", func.type == "nifti")): logger.error( "ciftify_clean_img only works for nifti or cifti files {} given" .format(func.path)) sys.exit(1) return func
def run_ciftify_dlabel_report(arguments, tmpdir): dlabel = NibInput(arguments['<clust.dlabel.nii>']) dlabel_map_number = int(arguments['--map-number']) outputcsv = arguments['--outputcsv'] logger.info('Outputcsv: {}'.format(outputcsv)) surf_settings = ciftify.report.CombinedSurfaceSettings(arguments, tmpdir) atlas_settings = ciftify.report.define_atlas_settings() ## load the data label_data, label_dict = ciftify.io.load_LR_label(dlabel.path, dlabel_map_number) ## define the outputcsv if not outputcsv: outputname = '{}_label_report.csv'.format(dlabel.base) outputcsv = os.path.join(os.path.dirname(dlabel.path), outputname) ciftify.utils.check_output_writable(outputcsv, exit_on_error=True) logger.info('Output table: {}'.format(outputcsv)) ## load the vertex areas surf_va_LR = load_LR_vertex_areas(surf_settings) ## assert that the dimensions match if not (label_data.shape[0] == surf_va_LR.shape[0]): logger.error('label file vertices {} not equal to vertex areas {}' ''.format(label_data.shape[0], surf_va_LR.shape[0])) sys.exit(1) ## use the label dict to start the report dataframe df = pd.DataFrame.from_dict(label_dict, orient="index") df['label_idx'] = df.index df = df.rename(index=str, columns={0: "label_name"}) # calculate a column of the surface area for row ROIs df['area'] = -999 for pd_idx in df.index.get_values(): df.loc[pd_idx, 'area'] = ciftify.report.calc_cluster_area( pd_idx, label_data, surf_va_LR) for atlas in atlas_settings.values(): df = report_atlas_overlap(df, label_data, atlas, surf_va_LR, min_percent_overlap=5) df.to_csv(outputcsv)
def run_ciftify_dlabel_report(arguments, tmpdir): dscalar_in = NibInput(arguments['<func.dscalar.nii>']) surf_distance = arguments['--surface-distance'] outputbase = arguments['--outputbase'] dont_output_clusters = arguments['--no-cluster-dlabel'] output_peaktable = arguments['--output-peaks'] surf_settings = ciftify.report.CombinedSurfaceSettings(arguments, tmpdir) atlas_settings = ciftify.report.define_atlas_settings() ## if not outputname is given, create it from the input dscalar map if not outputbase: outputbase = os.path.join(os.path.dirname(dscalar_in.path), dscalar_in.base) ciftify.utils.check_output_writable(outputbase, exit_on_error=True) clusters_dscalar = clusterise_dscalar_input(dscalar_in.path, arguments, surf_settings, tmpdir) if dont_output_clusters: cluster_dlabel = os.path.join(tmpdir, 'clust.dlabel.nii') else: cluster_dlabel = '{}_clust.dlabel.nii'.format(outputbase) empty_labels = os.path.join(tmpdir, 'empty_labels.txt') ciftify.utils.run('touch {}'.format(empty_labels)) ciftify.utils.run([ 'wb_command', '-cifti-label-import', clusters_dscalar, empty_labels, cluster_dlabel ]) ## load the data label_data, label_dict = ciftify.niio.load_LR_label(cluster_dlabel, map_number=1) ## define the outputcsv outputcsv = '{}_statclust_report.csv'.format(outputbase) logger.info('Output table: {}'.format(outputcsv)) ## load the vertex areas surf_va_LR = load_LR_vertex_areas(surf_settings) ## assert that the dimensions match if not (label_data.shape[0] == surf_va_LR.shape[0]): logger.error('label file vertices {} not equal to vertex areas {}' ''.format(label_data.shape[0], surf_va_LR.shape[0])) sys.exit(1) ## use the label dict to start the report dataframe df = pd.DataFrame.from_dict(label_dict, orient="index") df['label_idx'] = df.index df = df.rename(index=str, columns={0: "label_name"}) # calculate a column of the surface area for row ROIs df['area'] = -999 for pd_idx in df.index.get_values(): df.loc[pd_idx, 'area'] = ciftify.report.calc_cluster_area( pd_idx, label_data, surf_va_LR) for atlas in atlas_settings.values(): df = report_atlas_overlap(df, label_data, atlas, surf_va_LR, min_percent_overlap=5) df.to_csv(outputcsv) if output_peaktable: write_statclust_peaktable(dscalar_in.path, clusters_dscalar, outputbase, arguments, surf_settings, atlas_settings)