def get_project_redcap_records(config, redcap_cred): token = get_redcap_token(config, redcap_cred) redcap_url = config.get_key('REDCAPAPI') logger.debug("Accessing REDCap API at {}".format(redcap_url)) payload = {'token': token, 'format': 'json', 'content': 'record', 'type': 'flat'} response = requests.post(redcap_url, data=payload) if response.status_code != 200: logger.error("Cannot access redcap data at URL {}".format(redcap_url)) sys.exit(1) current_study = config.get_key('STUDY_TAG') project_records = [] for item in response.json(): record = Record(item) if record.id is None: continue if record.matches_study(current_study): project_records.append(record) return project_records
def get_redcap_records(config, redcap_cred): token = get_token(config, redcap_cred) redcap_url = config.get_key('RedcapApi') logger.debug("Accessing REDCap API at {}".format(redcap_url)) payload = {'token': token, 'format': 'json', 'content': 'record', 'type': 'flat'} response = requests.post(redcap_url, data=payload) if response.status_code != 200: logger.error("Cannot access redcap data at URL {}".format(redcap_url)) sys.exit(1) current_study = config.get_key('StudyTag') try: id_map = config.get_key('IdMap') except UndefinedSetting: id_map = None try: project_records = parse_records(response, current_study, id_map) except ValueError as e: logger.error("Couldn't parse redcap records for server response {}. " "Reason: {}".format(response.content, e)) project_records = [] return project_records
def get_xnat_config(config, site): try: cred_file = config.get_key('XNAT_source_credentials', site=site) server = config.get_key('XNAT_source', site=site) archive = config.get_key('XNAT_source_archive', site=site) except datman.config.UndefinedSetting: raise KeyError("Missing configuration. Please ensure study or site " "configuration defines all needed values: XNAT_source, " "XNAT_source_credentials, XNAT_source_archive. See " "help string for more details.") destination = config.get_path('zips') # User may provide full path or name of a file in metadata folder if os.path.exists(cred_file): credentials_path = cred_file else: credentials_path = os.path.join(config.get_path('meta'), cred_file) if not os.path.exists(credentials_path): logger.critical("Can't find credentials file at {} or {}. Please " "check that 'XNAT_source_credentials' is set " "correctly.".format(cred_file, credentials_path)) sys.exit(1) return credentials_path, server, archive, destination
def get_xnat_config(config, site): try: cred_file = config.get_key('XNAT_source_credentials', site=site) server = config.get_key('XNAT_source', site=site) archive = config.get_key('XNAT_source_archive', site=site) except KeyError: raise KeyError("Missing configuration. Please ensure study or site " "configuration defines all needed values: XNAT_source, " "XNAT_source_credentials, XNAT_source_archive. See help string " "for more details.") destination = config.get_path('zips') # User may provide full path or name of a file in metadata folder if os.path.exists(cred_file): credentials_path = cred_file else: credentials_path = os.path.join(config.get_path('meta'), cred_file) if not os.path.exists(credentials_path): logger.critical("Can't find credentials file at {} or {}. Please " "check that \'XNAT_source_credentials\' is set " "correctly.".format(cred_file, credentials_path)) sys.exit(1) return credentials_path, server, archive, destination
def get_project_redcap_records(config, redcap_cred): #Read token file and key redcap address token = get_redcap_token(config, redcap_cred) redcap_url = config.get_key('REDCAPAPI') logger.debug("Accessing REDCap API at {}".format(redcap_url)) payload = { 'token': token, 'format': 'json', 'content': 'record', 'type': 'flat' } #Submit request to REDCAP response = requests.post(redcap_url, data=payload) if response.status_code != 200: logger.error("Cannot access redcap data at URL {}".format(redcap_url)) sys.exit(1) current_study = config.get_key('STUDY_TAG') try: project_records = parse_records(response, current_study) except ValueError as e: logger.error("Couldnt parse redcap records for server response {}. " "Reason: {}".format(response.content, e)) project_records = [] #Return list of records for selected studies return project_records
def update_redcap(config): """Update the REDCap configuration in the dashboard's database. Args: config (:obj:`datman.config.config`): A datman config for a study. """ try: project = config.get_key("RedcapProjectId") instrument = config.get_key("RedcapInstrument") url = config.get_key("RedcapUrl") except UndefinedSetting: return rc_config = dashboard.queries.get_redcap_config(project, instrument, url, create=True) if not rc_config: logger.error(f"Failed getting config for {project} {instrument} {url}") return try: rc_config.token = read_token(config) except Exception: pass update_setting(rc_config, "date_field", config, "RedcapDate") update_setting(rc_config, "comment_field", config, "RedcapComments") update_setting(rc_config, "session_id_field", config, "RedcapSubj") update_setting(rc_config, "completed_field", config, "RedcapStatus") update_setting(rc_config, "completed_value", config, "RedcapStatusValue") update_setting(rc_config, "event_ids", config, "RedcapEventId") rc_config.save()
def update_header_diffs(scan): site = scan.session.timepoint.site_id config = datman.config.config(study=scan.get_study().id) try: tolerance = config.get_key("HeaderFieldTolerance", site=site) except Exception: tolerance = {} try: ignore = config.get_key("IgnoreHeaderFields", site=site) except Exception: ignore = [] scan.update_header_diffs(ignore=ignore, tolerance=tolerance)
def update_studies(config, skip_delete=False, delete_all=False): """Update the settings in the database for all studies. Args: config (:obj:`datman.config.config`): a datman config object. skip_delete (bool, optional): Don't prompt the user and skip deletion of any study records no longer defined in the config files. delete_all (bool, optional): Don't prompt the user and delete any study records no longer defined in the config files. """ try: studies = config.get_key("Projects").keys() except UndefinedSetting: logger.debug("No configured projects detected.") return all_studies = dashboard.queries.get_studies() undefined = [study for study in all_studies if study.id not in studies] if undefined: delete_records( undefined, prompt=("Study {} missing from config files. If deleted any " "timepoints and their contents will also be deleted."), skip_delete=skip_delete, delete_all=delete_all) for study in studies: update_study(study, config, skip_delete, delete_all)
def setup_logger(filepath, to_server, debug, config, sub_ids): logger.setLevel(logging.DEBUG) dmlogger.setLevel(logging.DEBUG) date = str(datetime.date.today()) sub = '_{}'.format(sub_ids[0]) if len(sub_ids) == 1 else '' log_name = os.path.join(filepath, date + "-dm_to_bids{}.log".format(sub)) fhandler = logging.FileHandler(log_name, "w") fhandler.setLevel(logging.DEBUG) shandler = logging.StreamHandler() if debug: shandler.setLevel(logging.DEBUG) else: shandler.setLevel(logging.WARN) formatter = logging.Formatter( "[%(name)s] %(asctime)s - %(levelname)s: %(message)s") fhandler.setFormatter(formatter) shandler.setFormatter(formatter) logger.addHandler(fhandler) logger.addHandler(shandler) dmlogger.addHandler(shandler) if to_server: server_ip = config.get_key('LOGSERVER') server_handler = logging.handlers.SocketHandler( server_ip, logging.handlers.DEFAULT_TCP_LOGGING_PORT) server_handler.setLevel(logging.CRITICAL) logger.addHandler(server_handler)
def link_shared_ids(config, connection, record): try: xnat_archive = config.get_key('XNAT_Archive', site=record.id.site) except datman.config.UndefinedSetting: logger.error("Can't find XNAT_Archive for subject {}" "".format(record.id)) return project = connection.select.project(xnat_archive) subject = project.subject(str(record.id)) experiment = get_experiment(subject) if not experiment: logger.error("Redcap or XNAT record may be misnamed - no " "matching experiments found on XNAT for redcap subject " "{}. Skipping".format(record.id)) return logger.debug("Working on subject {} in project {}".format(record.id, xnat_archive)) if record.comment and not DRYRUN: update_xnat_comment(experiment, subject, record) if record.shared_ids and not DRYRUN: update_xnat_shared_ids(subject, record) make_links(record)
def link_shared_ids(config, connection, record): try: xnat_archive = config.get_key('XNAT_Archive', site=record.id.site) except datman.config.UndefinedSetting: logger.error("Can't find XNAT_Archive for subject {}".format( record.id)) return project = connection.select.project(xnat_archive) subject = project.subject(str(record.id)) experiment = get_experiment(subject) if not experiment: logger.error("No matching experiments for subject {}." \ " Skipping".format(record.id)) return logger.debug("Working on subject {} in project {}".format( record.id, xnat_archive)) if record.comment and not DRYRUN: update_xnat_comment(experiment, subject, record) #If a shared ID is found in REDCAP update XNAT's shared IDs if record.shared_ids and not DRYRUN: update_xnat_shared_ids(subject, record) make_links(record)
def update_setting(record, attribute, config, key, site=None): try: value = config.get_key(key, site=site) except UndefinedSetting: pass else: setattr(record, attribute, value)
def test_get_number_of_templates_returns_expected_value(self, mock_exists): mock_exists.return_value = True config = self.__make_mock_config() maget_config = maget.MagetConfig(config) expected_num = config.get_key('magetbrain')['templates'] assert maget_config.num_templates == expected_num
def get_regex(config): try: regex = config.get_key('TASK_REGEX') except datman.config.UndefinedSetting: logger.warn("'TASK_REGEX' not defined in settings, using default " "regex to locate task files.") regex = 'behav|\.edat2' # noqa: W605 return regex
def add_server_handler(config): try: server_ip = config.get_key('LOGSERVER') except KeyError: raise KeyError("\'LOGSERVER\' not defined in site config file.") server_handler = logging.handlers.SocketHandler(server_ip, logging.handlers.DEFAULT_TCP_LOGGING_PORT) logger.addHandler(server_handler)
def add_server_handler(config): try: server_ip = config.get_key('LOGSERVER') except datman.config.UndefinedSetting: raise KeyError("\'LOGSERVER\' not defined in site config file.") server_handler = logging.handlers.SocketHandler( server_ip, logging.handlers.DEFAULT_TCP_LOGGING_PORT) logger.addHandler(server_handler)
def get_search_paths(config, ignored_paths): paths = config.get_key('paths') try: path_keys = paths.keys() except AttributeError: logger.info("No paths set for {}".format(config.study_name)) return [] search_paths = [path for path in path_keys if path not in ignored_paths] return search_paths
def collect_settings(config, key_map, site=None): all_vals = {} for attr_name in key_map: try: val = config.get_key(key_map[attr_name], site=site) except UndefinedSetting: val = None all_vals[attr_name] = val return all_vals
def test_get_subject_tags_returns_expected_tag_dict(self, mock_exists): mock_exists.return_value = True config = self.__make_mock_config() expected_dict = config.get_key('magetbrain')['subject_tags'] expected_dict['T1'] = 't1' maget_config = maget.MagetConfig(config) assert maget_config.subject_tags == expected_dict
def get_regex(config): try: regex = config.get_key("TaskRegex") except datman.config.UndefinedSetting: logger.warn( "'TaskRegex' not defined in settings, using default " "regex to locate task files." ) regex = "behav|\.edat2" # noqa: W605 return regex
def test_get_subject_tags_adds_default_T1_tag_when_not_given( self, mock_exists): mock_exists.return_value = True config = self.__make_mock_config() settings_tag_dict = config.get_key('magetbrain')['subject_tags'] assert 'T1' not in settings_tag_dict.keys() maget_config = maget.MagetConfig(config) assert maget_config.subject_tags['T1'] == 't1'
def notes_expected(site, study_name): """ Grabs 'USES_TECHNOTES' key in study config file to determine whether technotes are expected """ try: technotes = config.get_key('USES_TECHNOTES', site=site) except KeyError: technotes = False return technotes
def notes_expected(site, study_name): """ Grabs 'UsesTechNotes' key in study config file to determine whether technotes are expected """ try: technotes = config.get_key('UsesTechNotes', site=site) except datman.config.UndefinedSetting: technotes = False return technotes
def test_get_subject_tags_adds_default_T1_tag_when_not_given(self, mock_exists): mock_exists.return_value = True config = self.__make_mock_config() settings_tag_dict = config.get_key('magetbrain')['subject_tags'] assert 'T1' not in settings_tag_dict.keys() maget_config = maget.MagetConfig(config) assert maget_config.subject_tags['T1'] == 't1'
def update_study(study_id, config, skip_delete=False, delete_all=False): """Update all settings stored in the database for the given study. Args: study_id (str): The ID of the study to update. config (:obj:`datman.config.config`): a Datman config object. skip_delete (bool, optional): Don't prompt the user and skip deletion of any records no longer defined in the config files. delete_all (bool, optional): Don't prompt the user and delete any records no longer defined in the config files. """ try: config.set_study(study_id) except Exception as e: logger.error(f"Can't access config for {study_id}. Reason - {e}") return try: ignore = config.get_key("DbIgnore") except UndefinedSetting: ignore = False if ignore: return study = dashboard.queries.get_studies(study_id, create=True)[0] update_setting(study, "description", config, "Description") update_setting(study, "name", config, "FullName") update_setting(study, "is_open", config, "IsOpen") update_redcap(config) try: sites = config.get_sites() except UndefinedSetting: logger.error(f"No sites defined for {study_id}") return undefined = [site_id for site_id in study.sites if site_id not in sites] delete_records( undefined, prompt=("Site {} will be deleted from study " f"{study.id}. Any records referencing this study/site pair " "will be removed."), delete_func=lambda x: study.delete_site(x), skip_delete=skip_delete, delete_all=delete_all) for site_id in sites: update_site(study, site_id, config, skip_delete=skip_delete, delete_all=delete_all)
def main(): global LOG_DIR arguments = docopt(__doc__) LOG_DIR = arguments['--log-dir'] host = arguments['--host'] port = arguments['--port'] config = datman.config.config() if LOG_DIR is None: LOG_DIR = config.get_key('SERVER_LOG_DIR') if host is None: host = config.get_key('LOGSERVER') if port is None: port = logging.handlers.DEFAULT_TCP_LOGGING_PORT # Start server tcpserver = LogRecordSocketReceiver(host, port) tcpserver.serve_until_stopped()
def update_header_diffs(scan): site = scan.session.timepoint.site_id config = datman.config.config(study=scan.get_study().id) try: tolerance = config.get_key("HeaderFieldTolerance", site=site) except Exception: tolerance = {} try: ignore = config.get_key("IgnoreHeaderFields", site=site) except Exception: ignore = [] tags = config.get_tags(site=site) try: qc_type = tags.get(scan.tag, "qc_type") except KeyError: check_bvals = False else: check_bvals = qc_type == 'dti' scan.update_header_diffs(ignore=ignore, tolerance=tolerance, bvals=check_bvals)
def get_projects(config): """Find all XNAT projects and the list of scan sites uploaded to each one. Args: config (:obj:`datman.config.config`): The config for a study Returns: dict: A map of XNAT project names to the URL(s) of the server holding that project. """ projects = {} for site in config.get_sites(): xnat_project = config.get_key("XNAT_Archive", site=site) projects.setdefault(xnat_project, set()).add(site) return projects
def read_token(config): """Read the REDCap token from a file defined by the Datman config. Args: config (:obj:`datman.config.config`): A datman config object for a specific study. """ metadata = config.get_path("meta") token_file = config.get_key("RedcapToken") token_path = os.path.join(metadata, token_file) try: with open(token_path, "r") as fh: return fh.readline().strip() except Exception as e: logger.error( f"Failed to read RedCap token at {token_path}. Reason - {e}")
def validate_subject_id(subject_id, config): """Ensures subject ID correctness based on configuration settings. This checks that a given ID: 1. Matches a supported naming convention 2. Matches a study tag that's defined in the configuration file for the current study 3. Matches a site that is defined for the study tag Args: subject_id (:obj:`str`): A subject ID to check. config (:obj:`datman.config.config`): A datman config instance that has been initialized to the study the subject ID should belong to. Raises: ParseException: When an ID is given that does not match any supported convention or that contains incorrect fields for the current study. Returns: :obj:`datman.scanid.Identifier`: A parsed datman identifier matching subject_id """ try: settings = config.get_key("ID_MAP") except datman.config.UndefinedSetting: settings = None new_subject_id = scanid.parse(subject_id, settings) valid_tags = config.get_study_tags() try: sites = valid_tags[new_subject_id.study] except KeyError: raise ParseException( f"Subject id {new_subject_id} has undefined " f"study code {new_subject_id.study}" ) if new_subject_id.site not in sites: raise ParseException( f"Subject id {new_subject_id} has undefined " f"site {new_subject_id.site} for study " f"{new_subject_id.study}" ) return new_subject_id
def update_tags(config, skip_delete=False, delete_all=False): """Update the tags defined in the database. Args: config (:obj:`datman.datman.config`): A datman config object. skip_delete (bool, optional): Don't prompt the user and skip deletion of any scantype records no longer defined in the config files. delete_all (bool, optional): Don't prompt the user and delete any scantype records no longer defined in the config files. """ try: tag_settings = config.get_key("ExportSettings") except UndefinedSetting: logger.info("No defined tags found, skipping tag update.") return for tag in tag_settings: db_entry = dashboard.queries.get_scantypes(tag, create=True)[0] try: qc_type = tag_settings[tag]["QcType"] except KeyError: qc_type = None try: pha_type = tag_settings[tag]["QcPha"] except KeyError: pha_type = None db_entry.qc_type = qc_type db_entry.pha_type = pha_type db_entry.save() all_tags = dashboard.queries.get_scantypes() undefined = [ record for record in all_tags if record.tag not in tag_settings ] if not undefined: return delete_records( undefined, prompt=("Tag {} undefined. If deleted any scan records with this " "tag will also be removed."), skip_delete=skip_delete, delete_all=delete_all)
def update_site(study, site_id, config, skip_delete=False, delete_all=False): """Update the settings in the database for a study's scan site. Args: study (:obj:`dashboard.models.Study`): A study from the database. site_id (:obj:`str`): The name of a site that should be associated with this study or a site from the study that should have its settings updated. config (:obj:`datman.config.config`): A datman config instance for the study. skip_delete (bool, optional): Don't prompt the user and skip deletion of any site records no longer in the config files. delete_all (bool, optional): Don't prompt the user and delete any site records no longer in the config files. """ settings = collect_settings(config, { "code": "StudyTag", "redcap": "UsesRedcap", "notes": "UsesTechNotes", "xnat_archive": "XnatArchive", "xnat_convention": "XnatConvention" }, site=site_id) try: xnat_fname = config.get_key("XnatCredentials", site=site_id) settings["xnat_credentials"] = os.path.join(config.get_path("meta"), xnat_fname) except UndefinedSetting: pass try: settings["xnat_url"] = get_server(config) except UndefinedSetting: pass try: study.update_site(site_id, create=True, **settings) except Exception as e: logger.error(f"Failed updating settings for study {study} and site " f"{site_id}. Reason - {e}") update_expected_scans(study, site_id, config, skip_delete, delete_all)
def link_shared_ids(config, connection, record): xnat_archive = config.get_key('XNAT_Archive', site=record.id.site) project = connection.select.project(xnat_archive) subject = project.subject(str(record.id)) experiment = get_experiment(subject) if not experiment: logger.error("No matching experiments for subject {}." \ " Skipping".format(record.id)) return logger.debug("Working on subject {} in project {}".format(record.id, xnat_archive)) if record.comment and not DRYRUN: update_xnat_comment(experiment, subject, record) if record.shared_ids and not DRYRUN: update_xnat_shared_ids(subject, record) make_links(record)
def get_xnat_url(config): url = config.get_key('XNATSERVER') if 'https' not in url: url = "https://" + url return url
def main(): #Parse arguments arguments = docopt(__doc__) study = arguments['<study>'] out = arguments['<out>'] bids_json = arguments['<json>'] subjects = arguments['--subject'] exclude = arguments['--exclude'] quiet = arguments['--quiet'] verbose = arguments['--verbose'] debug = arguments['--debug'] rewrite = arguments['--rewrite'] tmp_dir = arguments['--tmp-dir'] or '/tmp/' bids_dir = arguments['--bids-dir'] or tmp_dir log_dir = arguments['--log'] DRYRUN = arguments['--DRYRUN'] walltime = arguments['--walltime'] #Strategy pattern dictionary strat_dict = { 'FMRIPREP' : fmriprep_fork, 'MRIQC' : mriqc_fork, 'FMRIPREP_CIFTIFY' : ciftify_fork } thread_dict = { 'FMRIPREP' : '--nthreads', 'MRIQC' : '--n_procs', 'FMRIPREP_CIFTIFY' : '--n_cpus' } #Configuration config = get_datman_config(study) configure_logger(quiet,verbose,debug) try: queue = config.site_config['SystemSettings'][os.environ['DM_SYSTEM']]['QUEUE'] except KeyError as e: logger.error('Config exception, key not found: {}'.format(e)) sys.exit(1) #JSON parsing, formatting, and validating jargs = get_json_args(bids_json) jargs = validate_json_args(jargs,strat_dict) try: jargs.update({'keeprecon' : config.get_key('KeepRecon')}) except KeyError: jargs.update({'keeprecon':True}) n_thread = get_requested_threads(jargs,thread_dict) #Get redirect command string and exclusion list log_cmd = partial(gen_log_redirect,log_dir=log_dir,out_dir=out) exclude_cmd_list = [''] if exclude else get_exclusion_cmd(exclude) #Get subjects and filter if not rewrite and group if longitudinal subjects = subjects or [s for s in os.listdir(config.get_path('nii')) if 'PHA' not in s] subjects = subjects if rewrite else filter_subjects(subjects, out, jargs['app']) logger.info('Running {}'.format(subjects)) subjects = group_subjects(subjects) #Process subject groups for s in subjects.keys(): #Get subject directory and log tag log_tag = log_cmd(subject=s,app_name=jargs['app']) #Get commands init_cmd_list = get_init_cmd(study,s,bids_dir,tmp_dir,out,jargs['img'],log_tag) n2b_cmd = get_nii_to_bids_cmd(study,subjects[s],log_tag) bids_cmd_list = strat_dict[jargs['app']](jargs,log_tag,out,s) #Write commands to executable and submit master_cmd = init_cmd_list + [n2b_cmd] + exclude_cmd_list + bids_cmd_list + ['\n cleanup \n'] fd, job_file = tempfile.mkstemp(suffix='datman_BIDS_job',dir=tmp_dir) os.close(fd) write_executable(job_file,master_cmd) if not DRYRUN: submit_jobfile(job_file,s,queue,walltime,n_thread)
def main(): global dryrun arguments = docopt(__doc__) study = arguments['<study>'] config = arguments['--config'] system = arguments['--system'] QC_file = arguments['--QC-transfer'] FA_tag = arguments['--FA-tag'] subject_filter = arguments['--subject-filter'] FA_filter = arguments['--FA-filter'] CALC_MD = arguments['--calc-MD'] CALC_ALL = arguments['--calc-all'] walltime = arguments['--walltime'] walltime_post = arguments['--walltime-post'] POST_ONLY = arguments['--post-only'] NO_POST = arguments['--no-post'] quiet = arguments['--quiet'] verbose = arguments['--verbose'] debug = arguments['--debug'] DRYRUN = arguments['--dry-run'] if quiet: logger.setLevel(logging.ERROR) if verbose: logger.setLevel(logging.INFO) if debug: logger.setLevel(logging.DEBUG) config = datman.config.config(filename=config, system=system, study=study) ## make the output directory if it doesn't exist input_dir = config.get_path('dtifit') output_dir = config.get_path('enigmaDTI') log_dir = os.path.join(output_dir,'logs') run_dir = os.path.join(output_dir,'bin') dm.utils.makedirs(log_dir) dm.utils.makedirs(run_dir) logger.debug(arguments) if FA_tag == None: FA_tag = '_FA.nii.gz' subjects = dm.proc.get_subject_list(input_dir, subject_filter, QC_file) # check if we have any work to do, exit if not if len(subjects) == 0: logger.info('No outstanding scans to process.') sys.exit(1) # grab the prefix from the subid if not given prefix = config.get_key('STUDY_TAG') ## write and check the run scripts script_names = ['run_engimadti.sh','concatresults.sh'] write_run_scripts(script_names, run_dir, output_dir, CALC_MD, CALC_ALL, debug) checklist_file = os.path.normpath(output_dir + '/ENIGMA-DTI-checklist.csv') checklist_cols = ['id', 'FA_nii', 'date_ran','qc_rator', 'qc_rating', 'notes'] checklist = dm.proc.load_checklist(checklist_file, checklist_cols) checklist = dm.proc.add_new_subjects_to_checklist(subjects, checklist, checklist_cols) # Update checklist with new FA files to process listed under FA_nii column checklist = dm.proc.find_images(checklist, 'FA_nii', input_dir, FA_tag, subject_filter = subject_filter, image_filter = FA_filter) job_name_prefix="edti{}_{}".format(prefix,datetime.datetime.today().strftime("%Y%m%d-%H%M%S")) submit_edti = False ## Change dir so it can be submitted without the full path os.chdir(run_dir) if not POST_ONLY: with make_temp_directory() as temp_dir: cmds_file = os.path.join(temp_dir,'commands.txt') with open(cmds_file, 'w') as cmdlist: for i in range(0,len(checklist)): subid = checklist['id'][i] # make sure that second filter is being applied to the qsub bit if subject_filter and subject_filter not in subid: continue ## make sure that a T1 has been selected for this subject if pd.isnull(checklist['FA_nii'][i]): continue ## format contents of T1 column into recon-all command input smap = checklist['FA_nii'][i] if subject_previously_completed(output_dir, subid, smap): continue # If POSTFS_ONLY == False, the run script will be the first or # only name in the list cmdlist.write("bash -l {rundir}/{script} {output} {inputFA}\n".format( rundir = run_dir, script = script_names[0], output = os.path.join(output_dir,subid), inputFA = os.path.join(input_dir, subid, smap))) ## add today's date to the checklist checklist['date_ran'][i] = datetime.date.today() submit_edti = True if submit_edti: qbatch_run_cmd = dm.proc.make_file_qbatch_command(cmds_file, job_name_prefix, log_dir, walltime) os.chdir(run_dir) dm.utils.run(qbatch_run_cmd, DRYRUN) ## if any subjects have been submitted, ## submit a final job that will consolidate the results after they are finished os.chdir(run_dir) post_edit_cmd = 'echo bash -l {rundir}/{script}'.format( rundir = run_dir, script = script_names[1]) if submit_edti: qbatch_post_cmd = dm.proc.make_piped_qbatch_command(post_edit_cmd, '{}_post'.format(job_name_prefix), log_dir, walltime_post, afterok = job_name_prefix) dm.utils.run(qbatch_post_cmd, DRYRUN) if not DRYRUN: ## write the checklist out to a file checklist.to_csv(checklist_file, sep=',', index = False)
def add_server_handler(config): server_ip = config.get_key('LOGSERVER') server_handler = logging.handlers.SocketHandler(server_ip, logging.handlers.DEFAULT_TCP_LOGGING_PORT) logger.addHandler(server_handler)