def narps(): basedir = '/tmp/data' assert os.path.exists(basedir) narps = Narps(basedir) narps.load_data() narps.metadata = pandas.read_csv( os.path.join(narps.dirs.dirs['metadata'], 'all_metadata.csv')) return (narps)
def narps(): dataurl = os.environ['DATA_URL'] basedir = '/tmp/data' if not os.path.exists(basedir): os.mkdir(basedir) narps = Narps(basedir, dataurl=dataurl) narps.write_data() return (narps)
if args.dataurl is not None: dataurl = args.dataurl elif 'DATA_URL' in os.environ: dataurl = os.environ['DATA_URL'] print('reading data URL from environment') else: dataurl = None print('info.json no present - data downloading will be disabled') # set up simulation if specified if args.simulate: print('using simulated data') # load main class from real analysis narps_orig = Narps(basedir, overwrite=False) # create simulated data # setup main class from original data narps = Narps(basedir) narps.load_data() # Load full metadata and put into narps structure narps.metadata = pandas.read_csv( os.path.join(narps.dirs.dirs['metadata'], 'all_metadata.csv')) basedir = setup_simulated_data(narps, verbose=False) make_orig_image_sets(narps, basedir, verbose=True,
action='store_true', help='skip creation of overlap/range/std maps') args = parser.parse_args() # set up base directory if args.basedir is not None: basedir = args.basedir elif 'NARPS_BASEDIR' in os.environ: basedir = os.environ['NARPS_BASEDIR'] print("using basedir specified in NARPS_BASEDIR") else: basedir = '/data' print("using default basedir:", basedir) # setup main class narps = Narps(basedir) narps.load_data() # Load full metadata and put into narps structure narps.metadata = pandas.read_csv( os.path.join(narps.dirs.dirs['metadata'], 'all_metadata.csv')) if not args.test: if not args.skip_maps: # create maps showing overlap of thresholded images mk_overlap_maps(narps) mk_range_maps(narps) mk_std_maps(narps) if args.detailed:
'mean_unthresh_correlation_by_cluster.csv'), index=False) return (results_df_wide) if __name__ == "__main__": # parse arguments parser = argparse.ArgumentParser(description='Get similarity summary') parser.add_argument('-b', '--basedir', help='base directory') parser.add_argument('-t', '--test', action='store_true', help='use testing mode (no processing)') args = parser.parse_args() # set up base directory if args.basedir is not None: basedir = args.basedir elif 'NARPS_BASEDIR' in os.environ: basedir = os.environ['NARPS_BASEDIR'] print("using basedir specified in NARPS_BASEDIR") else: basedir = '/data' print("using default basedir:", basedir) narps = Narps(basedir) if not args.test: corr_summary = get_similarity_summary(narps)
alldata_df.loc[i, 'package'] = 'Other' # save data for loading into R alldata_df.to_csv( os.path.join(narps.dirs.dirs['metadata'], 'all_metadata.csv')) if __name__ == "__main__": # parse arguments parser = argparse.ArgumentParser(description='Generate NARPS metadata') parser.add_argument('-b', '--basedir', help='base directory') args = parser.parse_args() # set up base directory if args.basedir is not None: basedir = args.basedir elif 'NARPS_BASEDIR' in os.environ: basedir = os.environ['NARPS_BASEDIR'] print("using basedir specified in NARPS_BASEDIR") else: basedir = '/data' print("using default basedir:", basedir) overwrite = False # setup main class narps = Narps(basedir, overwrite=overwrite) prepare_metadata(narps)
cut_coords=cut_coords, axes=ax[i]) plt.savefig(os.path.join(narps.dirs.dirs['figures'], 'consensus_map.pdf')) plt.close(fig) if __name__ == "__main__": # set an environment variable called NARPS_BASEDIR # with location of base directory if 'NARPS_BASEDIR' in os.environ: basedir = os.environ['NARPS_BASEDIR'] else: basedir = '/data' # setup main class narps = Narps(basedir) narps.load_data() narps.dirs.dirs['consensus'] = os.path.join(narps.dirs.dirs['output'], 'consensus_analysis') logfile = os.path.join(narps.dirs.dirs['logs'], '%s.txt' % sys.argv[0].split('.')[0]) log_to_file(logfile, 'running %s' % sys.argv[0].split('.')[0], flush=True) if not os.path.exists(narps.dirs.dirs['consensus']): os.mkdir(narps.dirs.dirs['consensus']) run_ttests(narps, logfile) mk_figures(narps, logfile)
# save data to file metadata_merged.to_csv( os.path.join(narps.dirs.dirs['figures'], 'MethodsTableMetadataMerged.csv')) decision_wide.to_csv( os.path.join(narps.dirs.dirs['figures'], 'DecisionDataWide.csv')) confidence_wide.to_csv( os.path.join(narps.dirs.dirs['figures'], 'ConfidenceDataWide.csv')) if __name__ == "__main__": # set an environment variable called NARPS_BASEDIR # with location of base directory if 'NARPS_BASEDIR' in os.environ: basedir = os.environ['NARPS_BASEDIR'] else: basedir = '/tmp/data' # setup main class narps = Narps(basedir, dataurl=os.environ['DATA_URL']) narps.load_data() logfile = os.path.join(narps.dirs.dirs['logs'], 'MakeSupplementaryFigure1.txt') log_to_file(logfile, 'running MakeSupplementaryFigure1.py', flush=True) metadata = get_all_metadata(narps) mk_supp_figure1(narps, metadata)
def narps(basedir): narps = Narps(basedir) narps.write_data() return (narps)