# import of third party modules

# import of local modules
import hcp.hcp3t.archive as hcp3t_archive
import hcp.hcp3t.bedpostx.one_subject_completion_checker as one_subject_completion_checker
import hcp.hcp3t.subject as hcp3t_subject
import utils.file_utils as file_utils

# authorship information
__author__ = "Timothy B. Brown"
__copyright__ = "Copyright 2016, The Human Connectome Project"
__maintainer__ = "Timothy B. Brown"

# configure logging and create module logger
logging.config.fileConfig(file_utils.get_logging_config_file_name(__file__))
logger = logging.getLogger(file_utils.get_logger_name(__file__))

# module constants
DNM = "---"  # Does Not Matter
NA = "N/A"  # Not Available
DATE_FORMAT = '%Y-%m-%d %H:%M:%S'

if __name__ == '__main__':

    # get list of subjects to check
    subject_file_name = file_utils.get_subjects_file_name(__file__)
    logger.info("Retrieving subject list from: " + subject_file_name)
    subject_list = hcp3t_subject.read_subject_info_list(subject_file_name,
                                                        separator='\t')
# import of third party modules
# None

# import of local modules
import hcp.hcp7t.archive as hcp7t_archive
import hcp.hcp7t.resting_state_stats.one_subject_completion_checker as one_subject_completion_checker
import hcp.hcp7t.subject as hcp7t_subject
import utils.file_utils as file_utils

# authorship information
__author__ = "Timothy B. Brown"
__copyright__ = "Copyright 2016, The Human Connectome Project"
__maintainer__ = "Timothy B. Brown"

# configure logging and create module logger
logging.config.fileConfig(file_utils.get_logging_config_file_name(__file__))
logger = logging.getLogger(file_utils.get_logger_name(__file__))

DNM = "---" # Does Not Matter
NA = "N/A" # Not Available
DATE_FORMAT = '%Y-%m-%d %H:%M:%S'


def get_resource_name(scan_name):
    return scan_name + '_RSS'


def _is_subject_complete(subject_results_dict):
    for scan, scan_results_dict in subject_results_dict.items():
        if scan_results_dict['files_exist'] == 'FALSE':
            return False
    # read the configuration file
    config_file_name = file_utils.get_config_file_name(__file__)
    print("Reading configuration from file: " + config_file_name)
    config = my_configparser.MyConfigParser()
    config.read(config_file_name)

    # process subjects in the list
    batch_submitter = BatchSubmitter()
    batch_submitter.submit_jobs(userid, password, subject_list, config,
                                force_submission)


if __name__ == '__main__':

    logging_config_file_name = file_utils.get_logging_config_file_name(
        __file__)
    print("Reading logging configuration from file: " +
          logging_config_file_name)
    logging.config.fileConfig(logging_config_file_name,
                              disable_existing_loggers=False)

    parser = my_argparse.MyArgumentParser(
        description="Submit a batch of HCP 7T MultiRunIcaFix Jobs")

    # option arguments
    # The -f or --force option tells this program to ignore the fact that a job may
    # already be running for a specified subject/scan and submit jobs anyhow.
    # Keep in mind that this will very likely royally screw up the mechanism for
    # keeping track of whether jobs are queued or running for that subject/scan.
    # But sometimes, particularly during testing, it is useful and necessary.
    parser.add_argument('-f',
Exemple #4
0
    elif args.phase == "MSMALL_PREREQS":
        data_retriever.get_msmall_prereqs(subject_info, args.output_study_dir)

    elif args.phase == "DEDRIFTANDRESAMPLE_PREREQS":
        data_retriever.get_dedriftandresample_prereqs(subject_info,
                                                      args.output_study_dir)
        # Get the group average drift data
        # As of February 2017, the group average drift data has been moved from HCP_Staging to
        # HCP_1200
        data_retriever.get_msm_group_average_drift_data(
            "HCP_1200", args.output_study_dir)

    elif args.phase == "REAPPLYFIX_PREREQS":
        data_retriever.get_reapplyfix_prereqs(subject_info,
                                              args.output_study_dir)

    if args.remove_non_subdirs:
        # remove any non-subdirectory data at the output study directory level
        data_retriever.remove_non_subdirs(args.output_study_dir)


if __name__ == '__main__':
    logging_config_file_name = file_utils.get_logging_config_file_name(
        __file__, use_env_variable=False)
    print("logging_config_file_name:", logging_config_file_name)

    logging.config.fileConfig(logging_config_file_name,
                              disable_existing_loggers=False)
    main()