예제 #1
0
def main():
    arguments = docopt(__doc__)
    url = arguments['--URL']
    study = arguments['<study>']
    output_path = arguments['--output']

    config = datman.config.config(study=study)
    meta_path = config.get_path('meta')
    token_file = 'ahrc_token'
    token_path = os.path.join(meta_path, token_file)
    output_path = os.path.join(config.get_path('data'), output_path)

    token = get_token(token_path)
    payload = get_payload(token)

    REDCap_variables = [
        'record_id', 'redcap_event_name', 'demo_sex_birth',
        'demo_age_study_entry', 'demo_highest_grade_self', 'term_premature_yn'
    ]

    data = make_rest(url, payload, REDCap_variables)
    column_headers = [
        'record_id', 'group', 'sex', 'age', 'education', 'terminated'
    ]

    make_csv(output_path, data, column_headers)
예제 #2
0
def main():
    global DRYRUN
    arguments = docopt(__doc__)
    project = arguments['<project>']
    series = arguments['<series>']
    blacklist = arguments['--blacklist']
    ignored_paths = arguments['--ignore-path']
    verbose = arguments['--verbose']
    debug = arguments['--debug']
    quiet = arguments['--quiet']
    DRYRUN = arguments['--dry-run']

    if verbose:
        logger.setLevel(logging.INFO)
    if debug:
        logger.setLevel(logging.DEBUG)
    if quiet:
        logger.setLevel(logging.ERROR)

    config = datman.config.config(study=project)

    blacklist = get_blacklist(arguments['--blacklist'], series, config)
    logger.debug("Found blacklist data: {}".format(blacklist))

    remove_blacklisted_items(blacklist, config, ignored_paths)
예제 #3
0
def main():
    global DRYRUN
    arguments = docopt(__doc__)
    project = arguments['<project>']
    xnat_cred = arguments['--xnat']
    redcap_cred = arguments['--redcap']
    site_config = arguments['--site-config']
    verbose = arguments['--verbose']
    debug = arguments['--debug']
    quiet = arguments['--quiet']
    DRYRUN = arguments['--dry-run']

    # Set log format
    log_handler.setFormatter(logging.Formatter('[%(name)s] %(levelname)s - '
            '{study}: %(message)s'.format(study=project)))

    if verbose:
        logger.setLevel(logging.INFO)
    if debug:
        logger.setLevel(logging.DEBUG)
    if quiet:
        logger.setLevel(logging.ERROR)

    config = datman.config.config(filename=site_config, study=project)

    user_name, password = os.environ["XNAT_USER"], os.environ["XNAT_PASS"]
    xnat_url = get_xnat_url(config)

    scan_complete_records = get_project_redcap_records(config, redcap_cred)

    with datman.utils.XNATConnection(xnat_url, user_name,
                                     password) as connection:
        for record in scan_complete_records:
            link_shared_ids(config, connection, record)
예제 #4
0
def main():
    arguments = docopt(__doc__)
    study = arguments['<study>']
    session = arguments['<session>']
    backupdir = arguments['--backupdir']
    purgedb = arguments['--purgedb']

    CFG.set_study(study)
    base_dir = CFG.get_study_base()
    logger.info('Searching folders:{}'.format(base_dir))
    # Create the backup folder
    outdir = os.path.realpath(os.path.join(backupdir, session))

    try:
        os.makedirs(outdir)
    except OSError:
        logger.error('Failed making backup directory:{}'.format(outdir))
        return

    if not purge_filesystem(session, base_dir, outdir):
        # somethings gone wrong. End processing here.
        return

    if purgedb:
        try:
            db = datman.dashboard.dashboard(study)
            db.delete_session(session)
        except:
            return
예제 #5
0
def main():
    global REWRITE

    arguments = docopt(__doc__)
    use_server = arguments['--log-to-server']
    verbose = arguments['--verbose']
    debug = arguments['--debug']
    quiet = arguments['--quiet']
    study = arguments['<study>']
    session = arguments['<session>']
    REWRITE = arguments['--rewrite']

    config = get_config(study)

    if use_server:
        add_server_handler(config)

    if quiet:
        logger.setLevel(logging.ERROR)
    if verbose:
        logger.setLevel(logging.INFO)
    if debug:
        logger.setLevel(logging.DEBUG)

    if session:
        subject = prepare_scan(session, config)
        qc_single_scan(subject, config)
        return

    qc_all_scans(config)
예제 #6
0
def main():
    global DRYRUN, CONFIG
    arguments = docopt(__doc__)
    link_file = arguments['<link_file>']
    src_session = arguments['<src_session>']
    trg_session = arguments['<trg_session>']
    tags = arguments['<tags>']
    verbose = arguments['--verbose']
    debug = arguments['--debug']
    DRYRUN = arguments['--dry-run']
    quiet = arguments['--quiet']

    logger.setLevel(logging.WARN)

    if quiet:
        logger.setLevel(logging.ERROR)

    if verbose:
        logger.setLevel(logging.INFO)

    if debug:
        logger.setLevel(logging.DEBUG)

    if link_file is not None:
        logger.info("Using link file {} to make links".format(link_file))
        for line in read_link_file(link_file):
            link_session_data(line[0], line[1], line[2])
        return

    create_linked_session(src_session, trg_session, tags)
예제 #7
0
def main():
    from datman.docopt import docopt
    import sys
    arguments = docopt(__doc__)

    if arguments['--showheaders']:
        for archive in arguments['<archive>']:
            manifest = datman.utils.get_archive_headers(archive,
                                                        stop_after_first=False)
            filepath, headers = manifest.items()[0]
            print ",".join([archive, filepath])
            print "\t" + "\n\t".join(headers.dir())
        return

    headers = arguments['--headers'] and arguments['--headers'].split(',') or \
                default_headers[:]
    headers.insert(0, "Path")

    rows = []
    for archive in arguments['<archive>']:
        manifest = datman.utils.get_archive_headers(archive)
        sortedseries = sorted(manifest.iteritems(),
                              key=lambda x: x[1].get('SeriesNumber'))
        for path, dataset in sortedseries:
            row = dict([(header, dataset.get(header, ""))
                        for header in headers])
            row['Path'] = path
            rows.append(row)
            if arguments['--oneseries']: break

    data = pd.DataFrame(rows)
    print data.to_csv(index=False)
예제 #8
0
def main():
    global DRYRUN, PARALLEL, LOG_DIR, SYSTEM
    arguments = docopt(__doc__)
    study = arguments['<study>']
    use_server = arguments['--log-to-server']
    scanid = arguments['--subject']
    debug = arguments['--debug']
    resubmit = arguments['--resubmit']
    PARALLEL = arguments['--parallel']
    DRYRUN = arguments['--dry-run']
    # If you add an option/argument that needs to be propagated to subject jobs
    # after a batch submit make sure to add it to create_command()
    # If it needs to be propagated to recon-all add it to
    # get_freesurfer_arguments()

    config = load_config(study)

    if use_server:
        add_server_handler(config)
    if debug:
        logger.setLevel(logging.DEBUG)

    logger.info('Starting')
    check_input_paths(config)
    qc_subjects = config.get_subject_metadata()

    fs_path = config.get_path('freesurfer')
    LOG_DIR = make_error_log_dir(fs_path)
    SYSTEM = config.system

    if scanid:
        # single subject mode
        blacklisted_series = get_blacklist(qc_subjects, scanid)
        subject = dm_scan.Scan(scanid, config)

        if subject.is_phantom:
            sys.exit(
                'Subject {} is a phantom, cannot be analyzed'.format(scanid))

        run_freesurfer(subject, blacklisted_series, config, resubmit)
        return

    # batch mode
    update_aggregate_stats(config)
    destination = os.path.join(fs_path, 'freesurfer_aggregate_log.csv')
    update_aggregate_log(config, qc_subjects, destination)

    fs_subjects = get_new_subjects(config, qc_subjects)
    logger.info("Submitting {} new subjects".format(len(fs_subjects)))

    if resubmit:
        # Filter out subjects that were just submitted to reduce search space
        remaining_subjects = filter(lambda x: x not in fs_subjects,
                                    qc_subjects)
        halted_subjects = get_halted_subjects(fs_path, remaining_subjects)
        logger.info("Resubmitting {} subjects".format(len(halted_subjects)))
        fs_subjects.extend(halted_subjects)

    submit_proc_freesurfer(fs_path, fs_subjects, arguments)
예제 #9
0
def main():
    arguments = docopt(__doc__)
    verbose = arguments['--verbose']
    debug = arguments['--debug']
    quiet = arguments['--quiet']
    study = arguments['<study>']
    session = arguments['<session>']

    # setup logging
    logging.basicConfig()
    ch = logging.StreamHandler(sys.stdout)
    ch.setLevel(logging.WARN)
    logger.setLevel(logging.WARN)
    if quiet:
        logger.setLevel(logging.ERROR)
        ch.setLevel(logging.ERROR)
    if verbose:
        logger.setLevel(logging.INFO)
        ch.setLevel(logging.INFO)
    if debug:
        logger.setLevel(logging.DEBUG)
        ch.setLevel(logging.DEBUG)

    formatter = logging.Formatter('%(asctime)s - %(name)s - '
                                  '%(levelname)s - %(message)s')
    ch.setFormatter(formatter)

    logger.addHandler(ch)

    # setup the config object
    cfg = datman.config.config(study=study)

    dir_nii = cfg.get_path('nii')
    dir_res = cfg.get_path('resources')

    # setup the dashboard object
    db = datman.dashboard.dashboard(study=study)

    if session:
        # single session defined on command line
        sessions = [session]
    else:
        # session not defined find all sessions in a project
        sessions = os.listdir(dir_res)

    logger.info('Processing {} sessions'.format(len(sessions)))
    for session in sessions:
        try:
            process_session(cfg, db, dir_nii, dir_res, session)
        except:
            logger.error('Failed processing session:{}'.format(session))
예제 #10
0
def main():
    global cfg, DRYRUN
    arguments = docopt(__doc__)
    verbose = arguments['--verbose']
    debug = arguments['--debug']
    quiet = arguments['--quiet']
    DRYRUN = arguments['--dry-run']
    study = arguments['<study>']
    session = arguments['<session>']

    # setup logging
    ch = logging.StreamHandler(sys.stdout)
    ch.setLevel(logging.WARN)
    logger.setLevel(logging.WARN)
    if quiet:
        logger.setLevel(logging.ERROR)
        ch.setLevel(logging.ERROR)
    if verbose:
        logger.setLevel(logging.INFO)
        ch.setLevel(logging.INFO)
    if debug:
        logger.setLevel(logging.DEBUG)
        ch.setLevel(logging.DEBUG)

    formatter = logging.Formatter('%(asctime)s - %(name)s - '
                                  '%(levelname)s - %(message)s')
    ch.setFormatter(formatter)

    logger.addHandler(ch)

    # setup the config object
    logger.info('Loading config')

    cfg = datman.config.config(study=study)

    nii_dir = cfg.get_path('nii')
    images = []
    if session:
        base_dir = os.path.join(nii_dir, session)
        files = os.listdir(base_dir)
        add_session_PDT2s(files, images, base_dir)
    else:
        for root, dirs, files in os.walk(nii_dir):
            add_session_PDT2s(files, images, root)

    logger.info('Found {} splittable nifti files with tag "PDT2"'.format(
            len(images)))
    for image in images:
        split(image)
예제 #11
0
def main():

    arguments = docopt(__doc__)
    x = abs(float(arguments['<x>']))
    y = abs(float(arguments['<y>']))
    z = abs(float(arguments['<z>']))
    files = arguments['<files>']

    if x == 0: x = None
    if y == 0: y = None
    if z == 0: z = None
    target = [x, y, z]

    # remove all input files with ORIG_RESOLUTION in the name
    files = filter(lambda x: 'ORIG_RESOLUTION' not in x, files)

    # loop through files, reslice as necessary
    for f in files:
        reslice(target, f)
예제 #12
0
def make_qc_command(subject_id, study):
    arguments = docopt(__doc__)
    use_server = arguments['--log-to-server']
    verbose = arguments['--verbose']
    debug = arguments['--debug']
    quiet = arguments['--quiet']
    command = " ".join([__file__, study, subject_id])
    if verbose:
        command = " ".join([command, '-v'])
    if debug:
        command = " ".join([command, '-d'])
    if quiet:
        command = " ".join([command, '-q'])
    if use_server:
        command = " ".join([command, '--log-to-server'])

    if REWRITE:
        command = command + ' --rewrite'

    return command
예제 #13
0
def main():
    arguments = docopt(__doc__)
    project = arguments['<project>']
    output_loc = arguments['--output']
    config_file = arguments['--config_file']
    system = arguments['--system']
    xnat_cred = arguments['--xnat-credentials']
    quiet = arguments['--quiet']
    debug = arguments['--debug']
    verbose = arguments['--verbose']

    if verbose:
        logger.setLevel(logging.INFO)
    if debug:
        logger.setLevel(logging.DEBUG)
    if quiet:
        logger.setLevel(logging.ERROR)

    config = datman.config.config(filename=config_file,
                                  system=system,
                                  study=project)

    output_file = set_output_name(output_loc, config)

    xnat_url = get_xnat_url(config)
    username, password = datman.utils.get_xnat_credentials(config, xnat_cred)
    xnat_project_names = config.get_xnat_projects()
    logger.debug("Summarizing XNAT projects {}".format(xnat_project_names))

    with datman.utils.XNATConnection(xnat_url, username,
                                     password) as xnat_connection:
        overviews = get_session_overviews(xnat_connection, xnat_project_names)

    with requests.Session() as session:
        session.auth = (username, password)
        MR_ids = get_MR_ids(session, xnat_url, xnat_project_names)

    merged_records = merge_overview_and_labels(overviews, MR_ids)
    write_overview_csv(merged_records, output_file)
예제 #14
0
def main():
    global DRYRUN, CONFIG
    arguments = docopt(__doc__)
    link_file = arguments['<link_file>']
    src_session = arguments['<src_session>']
    trg_session = arguments['<trg_session>']
    tags = arguments['<tags>']
    verbose = arguments['--verbose']
    debug = arguments['--debug']
    DRYRUN = arguments['--dry-run']
    quiet = arguments['--quiet']

    logger.setLevel(logging.WARN)

    if quiet:
        logger.setLevel(logging.ERROR)

    if verbose:
        logger.setLevel(logging.INFO)

    if debug:
        logger.setLevel(logging.DEBUG)

    if link_file is not None:
        logger.info("Using link file {} to make links".format(link_file))
        for line in read_link_file(link_file):
            link_session_data(line[0], line[1], line[2])
        return

    logger.info("Linking the provided source {} and target " \
            "{}".format(src_session, trg_session))
    tags = link_session_data(src_session, trg_session, tags)

    src_link_file = get_external_links_csv(src_session)
    trg_link_file = get_external_links_csv(trg_session)

    for link_file in [src_link_file, trg_link_file]:
        write_link_file(link_file, src_session, trg_session, tags)
예제 #15
0
def main():
    global DRYRUN
    arguments = docopt(__doc__)
    study = arguments['<study>']
    subject = arguments['<subject>']
    use_server = arguments['--log-to-server']
    debug = arguments['--debug']
    DRYRUN = arguments['--dry-run']

    config = datman.config.config(study=study)

    if use_server:
        add_server_handler(config)
    if debug:
        logging.getLogger().setLevel(logging.DEBUG)

    check_environment()

    if subject:
        run_pipeline(config, subject, arguments['<T1>'], arguments['<T2>'])
        return

    run_all_subjects(config, arguments)
예제 #16
0
            continue
        make_job(f, out_path)


def main(study, session=None):
    logger.info('Processing study:{}'.format(study))
    if session:
        process_session(session)
    else:
        sessions = os.listdir(NII_PATH)
        logger.info('Found {} sessions.'.format(len(sessions)))
        for session in sessions:
            process_session(session)

if __name__ == '__main__':
    arguments = docopt(__doc__)
    study = arguments['<study>']
    session = arguments['<session>']
    ATLAS_FILE = arguments['--atlas_file']
    CLUSTER_DIR = arguments['--cluster_dir']
    MRML_FILE = arguments['--mrml_file']
    CLUSTER_PATTERN = arguments['--cluster-pattern']
    CONTAINER = arguments['--mitk_container']
    CLEANUP = arguments['--leave_temp_files']
    LOGDIR = arguments['--logDir']
    OVERWRITE = arguments['--rewrite']
    TAGS = arguments['--tags']

    TAGS = [tag.strip() for tag in TAGS.split(',')]

    QUIET = False
예제 #17
0
def main():
    """
    Runs fmri data through the specified epitome script.
    """
    arguments = docopt(__doc__)

    study  = arguments['<study>']
    scanid = arguments['--subject']
    debug  = arguments['--debug']
    dryrun = arguments['--dry-run']

    # configure logging
    logging.info('Starting')
    if debug:
        logger.setLevel(logging.DEBUG)

    # load config for study
    try:
        config = cfg.config(study=study)
    except ValueError:
        logger.error('study {} not defined'.format(study))
        sys.exit(1)

    study_base = config.get_study_base(study)

    for k in ['nii', 'fmri', 'hcp']:
        if k not in config.site_config['paths']:
            logger.error("paths:{} not defined in site config".format(k))
            sys.exit(1)

    for x in config.study_config['fmri'].iteritems():
        for k in ['dims', 'del', 'pipeline', 'tags', 'export', 'tr']:
            if k not in x[1].keys():
                logger.error("fmri:{}:{} not defined in configuration file".format(x[0], k))
                sys.exit(1)

    nii_dir = os.path.join(study_base, config.site_config['paths']['nii'])

    if scanid:
        path = os.path.join(nii_dir, scanid)
        if '_PHA_' in scanid:
            sys.exit('Subject {} if a phantom, cannot be analyzed'.format(scanid))
        try:
            run_epitome(path, config, study)
        except Exception as e:
            logging.error(e)
            sys.exit(1)

    # run in batch mode
    else:
        subjects = []
        nii_dirs = glob.glob('{}/*'.format(nii_dir))

        # find subjects where at least one expected output does not exist
        for path in nii_dirs:
            subject = os.path.basename(path)

            if sid.is_phantom(subject):
                logger.debug("Subject {} is a phantom. Skipping.".format(subject))
                continue

            fmri_dir = utils.define_folder(os.path.join(study_base, config.site_config['paths']['fmri']))
            for exp in config.study_config['fmri'].keys():
                expected_names = config.study_config['fmri'][exp]['export']
                subj_dir = os.path.join(fmri_dir, exp, subject)
                if not outputs_exist(subj_dir, expected_names):
                    subjects.append(subject)
                    break

        subjects = list(set(subjects))

        # submit a list of calls to ourself, one per subject
        commands = []
        if debug:
            debugopt = '--debug'
        else:
            debugopt = ''

        for subject in subjects:
            commands.append(" ".join(['python ', __file__, study, '--subject {} '.format(subject), debugopt]))

        if commands:
            logger.debug('queueing up the following commands:\n'+'\n'.join(commands))
            for i, cmd in enumerate(commands):
                jobname = 'dm_fmri_{}_{}'.format(i, time.strftime("%Y%m%d-%H%M%S"))
                jobfile = '/tmp/{}'.format(jobname)
                logfile = '/tmp/{}.log'.format(jobname)
                errfile = '/tmp/{}.err'.format(jobname)
                with open(jobfile, 'wb') as fid:
                    fid.write('#!/bin/bash\n')
                    fid.write(cmd)

                rtn, out = utils.run('qsub -V -q main.q -o {} -e {} -N {} {}'.format(
                    logfile, errfile, jobname, jobfile))

                if rtn:
                    logger.error("Job submission failed. Output follows.")
                    logger.error("stdout: {}".format(out))
                    sys.exit(1)
예제 #18
0
def main():
    arguments = docopt(__doc__)
    study = arguments['<study>']
    scanid = arguments['--subject']
    debug = arguments['--debug']
    dryrun = arguments['--dry-run']

    # configure logging
    logging.info('Starting')
    if debug:
        logger.setLevel(logging.DEBUG)

    # load config for study
    try:
        config = cfg.config(study=study)
    except ValueError:
        logger.error('study {} not defined'.format(study))
        sys.exit(1)

    study_base = config.get_study_base(study)

    for k in ['freesurfer', 'hcp']:
        if k not in config.site_config['paths']:
            logger.error("paths:{} not defined in site config".format(k))
            sys.exit(1)

    freesurfer_dir = config.get_path('freesurfer')
    hcp_dir = config.get_path('hcp')

    if scanid:
        path = os.path.join(freesurfer_dir, scanid)
        try:
            run_hcp_convert(path, config, study)
        except Exception as e:
            logging.error(e)
            sys.exit(1)
        return

    config = cfg.config(study=study)
    qced_subjects = config.get_subject_metadata()

    # running for batch mode

    new_subjects = []
    # find subjects where at least one expected output does not exist
    for subject in qced_subjects:
        subj_dir = os.path.join(hcp_dir, subject)
        if not ciftify_outputs_exist(subj_dir):
            if fs_outputs_exist(os.path.join(freesurfer_dir, subject)):
                new_subjects.append(subject)

    create_indices_bm(config, study)

    # submit a list of calls to ourself, one per subject
    commands = []
    if debug:
        debugopt = '--debug'
    else:
        debugopt = ''

    for subject in new_subjects:
        commands.append(" ".join(
            [__file__, study, '--subject {} '.format(subject), debugopt]))

    if commands:
        logger.debug('queueing up the following commands:\n' +
                     '\n'.join(commands))

    for i, cmd in enumerate(commands):
        jobname = 'dm_fs2hcp_{}_{}'.format(i, time.strftime("%Y%m%d-%H%M%S"))
        jobfile = '/tmp/{}'.format(jobname)
        logfile = '/tmp/{}.log'.format(jobname)
        errfile = '/tmp/{}.err'.format(jobname)
        with open(jobfile, 'wb') as fid:
            fid.write('#!/bin/bash\n')
            fid.write(cmd)

        rtn, out = utils.run('qsub -V -q main.q -o {} -e {} -N {} {}'.format(
            logfile, errfile, jobname, jobfile))

        if rtn:
            logger.error("Job submission failed. Output follows.")
            logger.error("stdout: {}".format(out))
            sys.exit(1)
예제 #19
0
def main():
    arguments = docopt(__doc__)
    study = arguments['<study>']
    output_csv = arguments['<csv_file>']
    verbose = arguments['--verbose']
    debug = arguments['--debug']
    quiet = arguments['--quiet']

    if quiet:
        logger.setLevel(logging.ERROR)

    if verbose:
        logger.setLevel(logging.INFO)

    if debug:
        logger.setLevel(logging.DEBUG)

    logging.info('Starting')

    # Check the yaml file can be read correctly
    logger.debug('Reading yaml file.')

    cfg = datman.config.config(study=study)
    #
    # ## Read in the configuration yaml file
    # if not os.path.isfile(config_yaml):
    #     raise ValueError("configuration file {} not found. Try again."
    #                      .format(config_yaml))
    #
    # ## load the yml file
    # with open(config_yaml, 'r') as stream:
    #     CONFIG = yaml.load(stream)
    #
    # ## check that the required keys are there
    # ExpectedKeys = ['paths']
    # diffs = set(ExpectedKeys) - set(CONFIG.keys())
    # if len(diffs) > 0:
    #     raise ImportError("configuration file missing {}".format(diffs))

    #dcm_dir = CONFIG['paths']['dcm']
    dcm_dir = cfg.get_path('dcm')

    logger.debug('Getting scan list for {}'.format(dcm_dir))
    scans = datman.utils.get_folder_headers(dcm_dir)
    logger.info('Found {} scans'.format(len(scans)))

    headers = [
        "FOLDER", "SUBJECT", "SESSION", "SCANDATE", "SITE",
        "SUBJECT/REPEAT/PHANTOM"
    ]

    results = []
    for key, val in scans.iteritems():
        res = process_scan(key, val)
        if res:
            result = [
                key, res[0], res[1],
                datetime.strftime(res[3], '%Y-%m-%d'), res[2]
            ]
            if res[4]:
                result.append('PHANTOM')
            elif res[5]:
                result.append('REPEAT')
            else:
                result.append("SUBJECT")
            results.append(result)

    if output_csv:
        with open(output_csv, 'wb') as csvfile:
            csv_writer = csv.writer(csvfile)
            csv_writer.writerow(headers)
            for row in results:
                csv_writer.writerow(row)
    else:
        print(','.join(headers))
        for row in results:
            print(','.join(row))
예제 #20
0
def main():

    arguments = docopt(__doc__)
    study = arguments['<study>']
    scanid = arguments['--subject']
    debug = arguments['--debug']

    logging.info('Starting')
    if debug:
        logger.setLevel(logging.DEBUG)

    # load config for study
    try:
        config = cfg.config(study=study)
    except ValueError:
        logger.error(
            'study {} not defined in master configuration file\n{}'.format(
                study, NODE))
        sys.exit(1)

    study_base = config.get_study_base(study)

    if 'fmri' not in config.site_config['paths']:
        logger.error(
            "paths:fmri not defined in site configuration file\n{}".format(
                NODE))
        sys.exit(1)

    fmri_dir = os.path.join(study_base, config.site_config['paths']['fmri'])

    if scanid:
        path = os.path.join(fmri_dir, scanid)
        try:
            run_analysis(scanid, config, study)
        except Exception as e:
            logger.error(e)
            sys.exit(1)

    # run in batch mode
    else:
        # look for subjects with at least one fmri type missing outputs
        subjects = []

        # loop through fmri experiments defined
        for exp in config.study_config['fmri'].keys():
            expected_files = config.study_config['fmri'][exp]['conn']
            fmri_dirs = glob.glob('{}/*'.format(os.path.join(fmri_dir, exp)))

            for subj_dir in fmri_dirs:
                candidates = glob.glob('{}/*'.format(subj_dir))
                for filetype in expected_files:
                    # add subject if outputs don't already exist
                    if not filter(
                            lambda x: '{}_roi-corrs.csv'.format(filetype) in x,
                            candidates):
                        subjects.append(os.path.basename(subj_dir))
                        break

        # collapse found subjects (do not double-count) and create a list of commands
        commands = []
        subjects = list(set(subjects))
        for subject in subjects:
            commands.append(" ".join(
                [__file__, study, '--subject {}'.format(subject)]))

        if commands:
            logger.debug('queueing up the following commands:\n' +
                         '\n'.join(commands))

            for i, cmd in enumerate(commands):
                jobname = 'dm_rest_{}_{}'.format(
                    i, time.strftime("%Y%m%d-%H%M%S"))
                jobfile = '/tmp/{}'.format(jobname)
                logfile = '/tmp/{}.log'.format(jobname)
                errfile = '/tmp/{}.err'.format(jobname)
                with open(jobfile, 'wb') as fid:
                    fid.write('#!/bin/bash\n')
                    fid.write(cmd)

                rtn, out = utils.run(
                    'qsub -V -q main.q -o {} -e {} -N {} {}'.format(
                        logfile, errfile, jobname, jobfile))
                if rtn:
                    logger.error(
                        "Job submission failed. Output follows. {}".format(
                            NODE))
                    logger.error("stdout: {}".format(out))
                    sys.exit(1)
예제 #21
0
def main():
    arguments = docopt(__doc__)
    study = arguments['<study>']
    use_server = arguments['--log-to-server']
    debug = arguments['--debug']

    config = load_config(study)

    if use_server:
        add_server_handler(config)
    if debug:
        logger.setLevel(logging.DEBUG)
    ## setup some paths
    study_base_dir = config.get_study_base()
    fs_dir = config.get_path('freesurfer')
    data_dir = config.get_path('nii')
    # not sure where to put this. Potentially it could be very large
    # keeping it means existing subjects don't get re-run.
    # it could be deleted but then would need extra code to Determine
    # if subjects have been run.
    working_dir = os.path.join(study_base_dir,
                               'pipelines/workingdir_reconflow')

    ## These are overrides, for testing
    base_dir = '/external/rprshnas01/tigrlab/'
    fs_dir = os.path.join(base_dir, 'scratch/twright/pipelines/freesurfer',
                          study)

    working_dir = os.path.join(
        base_dir, 'scratch/twright/pipelines/workingdir_reconflow')

    # freesurfer fails if the subjects dir doesn't exist
    check_folder_exists(fs_dir)
    # get the list of subjects that are not phantoms and have been qc'd
    subject_list = config.get_subject_metadata()
    subject_list = [
        subject for subject in subject_list
        if not dm_scanid.is_phantom(subject)
    ]

    # Need to determine if the study has T2 (or FLAIR) scans,
    # do this by looking in the study_config.yml for expected scantypes.
    # Current pipelines add T2 files if they exist on a per-subject basis
    # Nipype expects the each run of the pipeline to be the same across all subjects
    # it is possible to set some parameters on a per-subject basis (see nu-iter setting)
    # but is this desirable?
    scan_types = get_common_scan_types(config)

    if not 'T1' in scan_types:
        msg = 'Study {} does not have T1 scans, aborting.'.format(study)
        sys.exit(msg)

    templates = {'T1': '{dm_subject_id}/{dm_subject_id}_??_T1_??*.nii.gz'}
    if 'T2' in scan_types:
        templates['T2'] = '{dm_subject_id}/{dm_subject_id}_??_T2_??*.nii.gz'
    if 'FLAIR' in scan_types:
        logger.debug('FLAIR processing not yet implemented')
        #templates = {'T2': '{dm_subject_id}/{dm_subject_id}_??_FLAIR _??*.nii.gz'}

    # setup the nipype nodes
    # infosource justs iterates through the list of subjects
    infosource = Node(IdentityInterface(fields=['subject_id']),
                      name="infosource")
    # For testing
    subject_list = ['DTI_CMH_H001_02']
    infosource.iterables = ('subject_id', subject_list)

    # sf finds the files for each subject. The dmSelectFiles class
    # overrides the nipype.SelectFiles adding checks that the numbers
    # of files matches those defined in study_config.yml
    sf = Node(dmSelectFiles(templates), name="selectFiles")

    sf.inputs.base_directory = data_dir

    # set_nuiter implements a simple function to set the iteration count
    # on a subject by subject basis
    set_nuiter = Node(Function(input_names=['subject_id'],
                               output_names=['nu_iter'],
                               function=get_nuiter_settings),
                      name='get_nuiter')

    # reconall is the interface for the recon-all freesurfer function
    # currently seem unable to specify multiple directives
    #    (e.g. -qcache and -notal-check)
    reconall = Node(ReconAll(directive='all',
                             parallel=True,
                             subjects_dir=fs_dir),
                    name='recon-all')
    # if this is running on a cluster, we can specify node specific requirements
    #  i.e. reconall runs well with lots of cores.
    reconall.plugin_args = {
        'qsub_args': '-l nodes=1:ppn=24',
        'overwrite': True
    }

    # get_summary extracts the summary information from the output of reconall
    get_summary = Node(EnigmaSummaryTask(), name='Enigma_Summaries')

    ## Create the workflow
    reconflow = Workflow(name='reconflow')
    reconflow.base_dir = working_dir

    # need a different connection pattern and param for the reconall node
    # if T2 files exist
    sf_ra_conx = [('T1', 'T1_files')]

    if 'T2' in scan_types:
        reconall.inputs.use_T2 = True
        sf_ra_conx.append('T2', 'T2_file')

    ## Connect the outputs from each node to the corresponding inputs
    # Basically we link the defined outputs from each node, to the inputs of the next node
    #   Each item in the list is [node1, node2, [(output_node1, input_node2)]]

    # Problem here due to incompatibilities between freesurfer 5 & 6
    # this pattern works for freesurfer 5.3.0 (without the parallel flag for reconall)
    # but failes for 6.0.0, which doesn't support the nuierations flag.
    # reconflow.connect([(infosource, sf, [('subject_id', 'dm_subject_id')]),
    #                    (infosource, set_nuiter, [('subject_id', 'subject_id')]),
    #                    (sf, reconall, sf_ra_conx),
    #                    (set_nuiter, reconall, [('nu_iter', 'flags')])])

    # this is the freesurfer 6 compatible version
    reconflow.connect([(infosource, sf, [('subject_id', 'dm_subject_id')]),
                       (infosource, reconall, [('subject_id', 'subject_id')]),
                       (sf, reconall, sf_ra_conx),
                       (reconall, get_summary,
                        [('subjects_dir', 'subjects_dir'),
                         ('subject_id', 'subject_id'),
                         ('subjects_dir', 'output_path')])])

    # need to use a job template to ensure the environment is set correctly
    # on the running nodes.
    # Not sure why the current env isn't being passed
    job_template = os.path.join(os.path.dirname(__file__),
                                'job_template_scc.sh')

    ## run the actual workflow.
    # the pbsGraph plugin creates jobs for each node on a PBS torque using
    # torque scheduling to keep them in order.
    # Use plugin='SGEGraph' to run on lab cluster (not sure what will happen
    #   to the reconflow node if we don't have any 24 core machines).
    # Don't specify a plugin to run on a single machine
    reconflow.run(plugin='PBSGraph', plugin_args=dict(template=job_template))
예제 #22
0
def main():
    arguments = docopt(__doc__)

    study = arguments['<study>']
    subject = arguments['--subject']
    debug = arguments['--debug']

    logging.info('Starting')
    if debug:
        logger.setLevel(logging.DEBUG)

    # load config for study
    try:
        config = cfg.config(study=study)
    except ValueError:
        logger.error('study {} not defined'.format(study))
        sys.exit(1)

    study_base = config.get_study_base(study)

    if 'ea' not in config.study_config['fmri'].keys():
        logger.error('ea not defined in fmri in {}'.format(config_file))
        sys.exit(1)

    for k in ['nii', 'fmri', 'hcp']:
        if k not in config.site_config['paths']:
            logger.error("paths:{} not defined in {}".format(k, config_file))
            sys.exit(1)

    ea_dir = os.path.join(study_base, config.site_config['paths']['fmri'],
                          'ea')
    nii_dir = os.path.join(study_base, config.site_config['paths']['nii'])

    if subject:
        if '_PHA_' in subject:
            logger.error("{} is a phantom, cannot analyze".format(subject))
            sys.exit(1)
        analyze_subject(subject, config, study)

    else:
        # batch mode
        subjects = glob.glob('{}/*'.format(nii_dir))
        commands = []

        if debug:
            opts = '--debug'
        else:
            opts = ''

        for path in subjects:
            subject = os.path.basename(path)
            if check_complete(ea_dir, subject):
                logger.debug('{} already analysed'.format(subject))
            else:
                commands.append(" ".join(
                    [__file__, study, '--subject {}'.format(subject), opts]))

        if commands:
            logger.debug("queueing up the following commands:\n" +
                         '\n'.join(commands))
            for i, cmd in enumerate(commands):
                jobname = "dm_ea_{}_{}".format(i,
                                               time.strftime("%Y%m%d-%H%M%S"))
                jobfile = '/tmp/{}'.format(jobname)
                logfile = '/tmp/{}.log'.format(jobname)
                errfile = '/tmp/{}.err'.format(jobname)

                with open(jobfile, 'wb') as fid:
                    fid.write('#!/bin/bash\n')
                    fid.write(cmd)

                rtn, out = utils.run(
                    'qsub -V -q main.q -o {} -e {} -N {} {}'.format(
                        logfile, errfile, jobname, jobfile))
                # qbacth method -- might bring it back, but not needed
                #fd, path = tempfile.mkstemp()
                #os.write(fd, '\n'.join(commands))
                #os.close(fd)
                #rtn, out, err = utils.run('qbatch -i --logdir {ld} -N {name} --walltime {wt} {cmds}'.format(ld=logdir, name=jobname, wt=walltime, cmds=path))
                if rtn:
                    logger.error(
                        "Job submission failed\nstdout: {}".format(out))
                    sys.exit(1)
예제 #23
0
def main():
    arguments = docopt(__doc__)
    study     = arguments['<study>']
    scanid    = arguments['--subject']
    debug     = arguments['--debug']
    dryrun    = arguments['--dry-run']

    # configure logging
    logging.info('Starting')
    if debug:
        logger.setLevel(logging.DEBUG)

    # load config for study
    try:
        config = cfg.config(study=study)
    except ValueError:
        logger.error('study {} not defined'.format(study))
        sys.exit(1)

    study_base = config.get_study_base(study)

    for k in ['freesurfer', 'hcp']:
        if k not in config.site_config['paths']:
            logger.error("paths:{} not defined in site config".format(k))
            sys.exit(1)

    freesurfer_dir = os.path.join(study_base, config.site_config['paths']['freesurfer'])
    hcp_dir = os.path.join(study_base, config.site_config['paths']['hcp'])

    if scanid:
        path = os.path.join(freesurfer_dir, scanid)
        try:
            run_hcp_convert(path, config, study)
        except Exception as e:
            logging.error(e)
            sys.exit(1)

    # run in batch mode
    else:
        subjects = []
        freesurfer_dirs = glob.glob('{}/*'.format(freesurfer_dir))

        # find subjects where at least one expected output does not exist
        for path in nii_dirs:
            subject = os.path.basename(path)

            hcp_dir = utils.define_folder(os.path.join(study_base, config.site_config['paths']['hcp']))
            if not outputs_exist(subj_dir):
                subjects.append(subject)

        subjects = list(set(subjects))

        # submit a list of calls to ourself, one per subject
        commands = []
        if debug:
            debugopt = '--debug'
        else:
            debugopt = ''

        for subject in subjects:
            commands.append(" ".join([__file__, study, '--subject {} '.format(subject), debugopt]))

        if commands:
            logger.debug('queueing up the following commands:\n'+'\n'.join(commands))
            for i, cmd in enumerate(commands):
                jobname = 'dm_fs2hcp_{}_{}'.format(i, time.strftime("%Y%m%d-%H%M%S"))
                jobfile = '/tmp/{}'.format(jobname)
                logfile = '/tmp/{}.log'.format(jobname)
                errfile = '/tmp/{}.err'.format(jobname)
                with open(jobfile, 'wb') as fid:
                    fid.write('#!/bin/bash\n')
                    fid.write(cmd)

                rtn, out = utils.run('qsub -V -q main.q -o {} -e {} -N {} {}'.format(
                    logfile, errfile, jobname, jobfile))
                #rtn, out = utils.run('echo bash -l {}/{} {} | qbatch -N {} --logdir {} --walltime {} -'.format(bin_dir, script, subid, jobname, logs_dir, walltime))
                if rtn:
                    logger.error("Job submission failed. Output follows.")
                    logger.error("stdout: {}".format(out))
                    sys.exit(1)
예제 #24
0
def main():
    """
    Loops through subjects, preprocessing using supplied script, and runs a
    first-level GLM using AFNI (tent functions, 15 s window) on all subjects.
    """
    arguments  = docopt(__doc__)
    project    = arguments['<project>']
    tmp_path   = arguments['<tmppath>']
    script     = arguments['<script>']
    assets     = arguments['<assets>']
    verbose    = arguments['--verbose']
    debug      = arguments['--debug']

    if verbose: 
        logger.setLevel(logging.INFO)
    if debug: 
        logger.setLevel(logging.DEBUG)

    data_path = dm.utils.define_folder(os.path.join(project, 'data'))
    nii_path = dm.utils.define_folder(os.path.join(data_path, 'nii'))
    func_path = dm.utils.define_folder(os.path.join(data_path, 'imob'))
    tmp_path = dm.utils.define_folder(tmp_path)
    _ = dm.utils.define_folder(os.path.join(project, 'logs'))
    log_path = dm.utils.define_folder(os.path.join(project, 'logs/imob'))

    list_of_names = []
    tmpdict = {}
    subjects = dm.utils.get_subjects(nii_path)

    # preprocess

    for sub in subjects:
        if dm.scanid.is_phantom(sub) == True:
            logger.debug("Skipping phantom subject {}".format(sub))
            continue
        if os.path.isfile(os.path.join(func_path, '{sub}/{sub}_preproc-complete.log'.format(sub=sub))) == True:
            continue
        try:
            logger.info("Preprocessing subject {}".format(sub))
            name, tmpdict = process_functional_data(sub, data_path, log_path, tmp_path, tmpdict, script)
            list_of_names.append(name)

        except ValueError as ve:
            continue

    if len(list_of_names) > 0:
        dm.utils.run_dummy_q(list_of_names)

    # export
    for sub in tmpdict:
        if os.path.isfile(os.path.join(func_path, '{sub}/{sub}_preproc-complete.log'.format(sub=sub))) == True:
            continue
        try:
            logger.info("Exporting subject {}".format(sub))
            export_data(sub, tmpdict[sub], func_path)
        except:
            logger.error('Failed to export {}'.format(sub))
            continue
        else:
            continue

    # analyze
    for sub in subjects:
        if dm.scanid.is_phantom(sub) == True:
            continue
        if os.path.isfile(os.path.join(func_path, '{sub}/{sub}_analysis-complete.log'.format(sub=sub))) == True:
            continue
        try:
            logger.info("Analyzing subject {}".format(sub))
            script = generate_analysis_script(sub, func_path, assets)
            if script: 
                returncode, _, _ = dm.utils.run('bash {}'.format(script))
                dm.utils.check_returncode(returncode)
                dm.utils.run('touch {func_path}/{sub}/{sub}_analysis-complete.log'.format(func_path=func_path, sub=sub))
        except Exception, e:
            logger.exception('Failed to analyze IMOB data for {}.'.format(sub))
예제 #25
0
def main():
    """
    Runs .nrrd data through unring.py.
    """
    arguments = docopt(__doc__)

    study  = arguments['<study>']
    batch  = arguments['--batch']
    debug  = arguments['--debug']
    dryrun = arguments['--dry-run']

    # configure logging
    logging.info('Starting')
    if debug:
        logger.setLevel(logging.DEBUG)

    # load config for study
    try:
        config = cfg.config(study=study)
    except ValueError:
        logger.error('study {} not defined'.format(study))
        sys.exit(1)

    study_base = config.get_study_base(study)

    for k in ['nrrd']:
        if k not in config.site_config['paths']:
            logger.error("paths:{} not defined in site config".format(k))
            sys.exit(1)

    nrrd_dir = os.path.join(study_base, config.site_config['paths']['nrrd'])

    # runs in serial (due to MATLAB dependencies)
    if batch:
        try:
            run_all(nrrd_dir, config, study)
        except Exception as e:
            logging.error(e)
            sys.exit(1)

    # default behaviour: submit self to queue in batch mode
    else:
        if debug:
            debugopt = '--debug'
        else:
            debugopt = ''

        cmd = 'python {} {} --batch {}'.format(__file__, study, debugopt)
        jobname = 'dm_unring_{}'.format(time.strftime("%Y%m%d-%H%M%S"))
        jobfile = '/tmp/{}'.format(jobname)
        logfile = '/tmp/{}.log'.format(jobname)
        errfile = '/tmp/{}.err'.format(jobname)

        with open(jobfile, 'wb') as fid:
            fid.write('#!/bin/bash\n')
            fid.write(cmd)

            rtn, out = utils.run('qsub -V -q main.q -o {} -e {} -N {} {}'.format(
                logfile, errfile, jobname, jobfile))

            if rtn:
                logger.error("Job submission failed. Output follows.")
                logger.error("stdout: {}".format(out))
                sys.exit(1)
예제 #26
0
def main():
    global xnat
    global cfg
    global excluded_studies
    global DRYRUN
    global dashboard

    arguments = docopt(__doc__)
    verbose = arguments['--verbose']
    debug = arguments['--debug']
    quiet = arguments['--quiet']
    study = arguments['<study>']
    server = arguments['--server']
    credfile = arguments['--credfile']
    username = arguments['--username']
    session = arguments['<session>']
    db_ignore = arguments['--dont-update-dashboard']

    if arguments['--dry-run']:
        DRYRUN = True
        db_ignore = True

    # setup logging
    ch = logging.StreamHandler(sys.stdout)
    ch.setLevel(logging.WARN)
    logger.setLevel(logging.WARN)
    if quiet:
        logger.setLevel(logging.ERROR)
        ch.setLevel(logging.ERROR)
    if verbose:
        logger.setLevel(logging.INFO)
        ch.setLevel(logging.INFO)
    if debug:
        logger.setLevel(logging.DEBUG)
        ch.setLevel(logging.DEBUG)

    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - {study} - '
        '%(levelname)s - %(message)s'.format(study=study))
    ch.setFormatter(formatter)

    logger.addHandler(ch)

    # setup the config object
    logger.info('Loading config')

    cfg = datman.config.config(study=study)

    # setup the xnat object
    if not server:
        try:
            server = 'https://{}:{}'.format(cfg.get_key(['XNATSERVER']),
                                            cfg.get_key(['XNATPORT']))
        except KeyError:
            logger.error(
                'Failed to get xnat server info for study:{}'.format(study))
            return

    if username:
        password = getpass.getpass()
    else:
        #Moving away from storing credentials in text files
        """
        if not credfile:
            credfile = os.path.join(cfg.get_path('meta', study),
                                    'xnat-credentials')
        with open(credfile) as cf:
            lines = cf.readlines()
            username = lines[0].strip()
            password = lines[1].strip()
        """
        username = os.environ["XNAT_USER"]
        password = os.environ["XNAT_PASS"]

    xnat = datman.xnat.xnat(server, username, password)

    # setup the dashboard object
    if not db_ignore:
        try:
            dashboard = datman.dashboard.dashboard(study)
        except datman.dashboard.DashboardException as e:
            logger.error('Failed to initialise dashboard')

    # get the list of xnat projects linked to the datman study
    xnat_projects = cfg.get_xnat_projects(study)
    sessions = []
    if session:
        # if session has been provided on the command line, identify which
        # project it is in
        try:
            xnat_project = xnat.find_session(session, xnat_projects)
        except datman.exceptions.XnatException as e:
            raise e

        if not xnat_project:
            logger.error(
                'Failed to find session:{} in xnat.'
                ' Ensure it is named correctly with timepoint and repeat.'.
                format(xnat_projects))
            return

        sessions.append((xnat_project, session))
    else:
        for project in xnat_projects:
            project_sessions = xnat.get_sessions(project)
            for session in project_sessions:
                try:
                    i = datman.scanid.parse(session['label'])
                    if not datman.scanid.is_phantom(
                            session['label']) and i.session == '':
                        # raise an exception if scan is not a phantom and series is missing
                        raise datman.scanid.ParseException
                except datman.scanid.ParseException:
                    logger.error(
                        'Invalid session id:{} in project:{}, skipping.'.
                        format(session['label'], project))
                    continue
                sessions.append((project, session['label']))

    logger.info('Found {} sessions for study: {}'.format(len(sessions), study))

    for session in sessions:
        process_session(session)
예제 #27
0
def main():
    """
    Essentially, analyzes the resting-state data.

    1) Runs functional data through a defined epitome script.
    2) Extracts time series from the cortex using MRI-space ROIs.
    3) Generates a correlation matrix for each subject.
    4) Generates an experiment-wide correlation matrix.
    5) Generates a set of graph metrics for each subject.
    """

    arguments = docopt(__doc__)
    project = arguments['<project>']
    script = arguments['<script>']
    atlas = arguments['<atlas>']
    subjects = arguments['<subject>']
    tags = arguments['--tags'].split(',')
    checklistfile = arguments['--checklist']
    verbose = arguments['--verbose']
    debug = arguments['--debug']

    if verbose:
        logger.setLevel(logging.INFO)
    if debug:
        logger.setLevel(logging.DEBUG)

    # check inputs
    if not os.path.isfile(atlas):
        logger.error("Atlas {} does not exist".format(atlas))
        sys.exit(-1)

    if not os.path.isfile(script):
        logger.error("Epitome script {} does not exist".format(script))
        sys.exit(-1)

    # submit jobs if not working on single subject
    submit_mode = len(subjects) == 0
    logger.debug("Subjects: {}".format(subjects))
    logger.debug("Submit mode: {}".format(submit_mode))

    nii_path = os.path.join(project, 'data', 'nii')
    subjects = subjects or dm.utils.get_subjects(nii_path)

    if checklistfile and not os.path.exists(checklistfile): 
        logger.fatal('Checklist {} does not exist'.format(checklistfile))
        sys.exit(1)

    logger.debug('Using checklist: {}'.format(checklistfile))
    checklist = dm.checklist.load(checklistfile)

    for subject in subjects:
        if checklist.is_blacklisted(STAGE_NAME, subject): 
            logger.info("Subject {} blacklisted. Skipping.".format(subject))
            continue
            
        if is_complete(project, subject):
            logger.info("Subject {} processed. Skipping.".format(subject))
            continue

        if dm.scanid.is_phantom(subject):
            logger.debug("Subject {} is a phantom. Skipping.".format(subject))
            continue

        try:
            data = get_required_data(project, subject, tags, checklist)
        except MissingDataException, e:
            logger.error(e.message)
            continue

        if submit_mode:
            opts = ''
            opts += verbose and ' --verbose' or ''
            opts += debug and ' --debug' or ''
            opts += tags and ' --tags=' + ','.join(tags) or ''

            cmd = "{me} {opts} {project} {script} {atlas} {subject}".format(
                me=__file__,
                opts=opts,
                project=project,
                script=script,
                atlas=atlas,
                subject=subject)
            job_name = 'dm_rest_{}'.format(subject)
            memopts = 'h_vmem=3G,mem_free=3G,virtual_free=3G'
            stamp = time.strftime("%Y%m%d-%H%M%S")
            logfile = '{name}-{stamp}.log'.format(name=job_name, stamp=stamp)
            logpath = os.path.join(project, 'logs', 'rest', logfile)
            qsub = 'qsub -V -N {name} -l {memopts} -o {logpath} -j y -b y {cmd}'.format(
                name=job_name,
                memopts=memopts,
                logpath=logpath,
                cmd=cmd)

            logger.debug('exec: {}'.format(qsub))
            dm.utils.run(qsub)
        else:
            process_subject(project, data, subject, tags, atlas, script)
예제 #28
0
def main():
    # make the already_linked dict global as we are going to use it a lot
    global already_linked
    global lookup
    global DRYRUN

    arguments = docopt(__doc__)
    verbose = arguments['--verbose']
    debug = arguments['--debug']
    DRYRUN = arguments['--dry-run']
    quiet = arguments['--quiet']
    study = arguments['<study>']
    lookup_path = arguments['--lookup']
    scanid_field = arguments['--scanid-field']
    zipfile = arguments['<zipfile>']

    # setup logging
    ch = logging.StreamHandler(sys.stdout)
    ch.setLevel(logging.WARN)
    logger.setLevel(logging.WARN)
    if quiet:
        logger.setLevel(logging.ERROR)
        ch.setLevel(logging.ERROR)
    if verbose:
        logger.setLevel(logging.INFO)
        ch.setLevel(logging.INFO)
    if debug:
        logger.setLevel(logging.DEBUG)
        ch.setLevel(logging.DEBUG)

    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - {study} - '
        '%(levelname)s - %(message)s'.format(study=study))
    ch.setFormatter(formatter)

    logger.addHandler(ch)

    # setup the config object
    cfg = datman.config.config(study=study)
    if not lookup_path:
        lookup_path = os.path.join(cfg.get_path('meta'), 'scans.csv')

    dicom_path = cfg.get_path('dicom')
    zips_path = cfg.get_path('zips')

    if not os.path.isdir(dicom_path):
        logger.warning('Dicom path:{} doesnt exist'.format(dicom_path))
        try:
            os.makedirs(dicom_path)
        except IOError:
            logger.error('Failed to create dicom path:{}'.format(dicom_path))
            return

    if not os.path.isdir(zips_path):
        logger.error('Zips path:{} doesnt exist'.format(zips_path))
        return

    try:
        lookup = pd.read_table(lookup_path, sep='\s+', dtype=str)
    except IOError:
        logger.error('Lookup file:{} not found'.format(lookup_path))
        return

    # identify which zip files have already been linked
    already_linked = {
        os.path.realpath(f): f
        for f in glob.glob(os.path.join(dicom_path, '*')) if os.path.islink(f)
    }

    if zipfile:
        if isinstance(zipfile, basestring):
            zipfile = [zipfile]
        archives = [os.path.join(zips_path, zip) for zip in zipfile]
    else:
        archives = [
            os.path.join(zips_path, archive)
            for archive in os.listdir(zips_path)
            if os.path.splitext(archive)[1] == '.zip'
        ]

    logger.info('Found {} archives'.format(len(archives)))
    for archive in archives:
        link_archive(archive, dicom_path, scanid_field)
예제 #29
0
def main():
    """
    1) Runs functional data through a custom epitome script.
    2) Extracts block onsets, durations, and parametric modulators from
       behavioual log files collected at the scanner (and stored in RESOURCES).
    3) Writes out AFNI-formatted timing files as well as a GLM script per
       subject.
    4) Executes this script, producing beta-weights for each subject.
    """

    arguments  = docopt(__doc__)
    project    = arguments['<project>']
    tmp_path   = arguments['<tmppath>']
    script     = arguments['<script>']
    assets     = arguments['<assets>']
    verbose    = arguments['--verbose']
    debug      = arguments['--debug']

    if verbose: 
        logging.getLogger().setLevel(logging.INFO)
    if debug: 
        logging.getLogger().setLevel(logging.DEBUG)

    data_path = dm.utils.define_folder(os.path.join(project, 'data'))
    nii_path = dm.utils.define_folder(os.path.join(data_path, 'nii'))
    func_path = dm.utils.define_folder(os.path.join(data_path, 'ea'))
    tmp_path = dm.utils.define_folder(tmp_path)
    _ = dm.utils.define_folder(os.path.join(project, 'logs'))
    log_path = dm.utils.define_folder(os.path.join(project, 'logs/ea'))

    list_of_names = []
    tmpdict = {}

    # preprocess
    subjects = dm.utils.get_subjects(nii_path)
    for sub in subjects:
        if dm.scanid.is_phantom(sub) == True: 
            continue
        if os.path.isfile(os.path.join(func_path, '{sub}/{sub}_preproc-complete.log'.format(sub=sub))) == True:
            continue
        try:
            name, tmpdict = process_functional_data(sub, data_path, log_path, tmp_path, tmpdict, script)
            list_of_names.append(name)

        except ValueError as ve:
            continue

    if len(list_of_names) > 0:
        dm.utils.run_dummy_q(list_of_names)

    # export
    for sub in tmpdict:
        if os.path.isfile(os.path.join(func_path, '{sub}/{sub}_preproc-complete.log'.format(sub=sub))) == True:
            continue
        try:
            export_data(sub, tmpdict[sub], func_path)
        except:
            logger.error('Failed to export {}'.format(sub))
            continue
        else:
            continue

    # analyze
    subjects = dm.utils.get_subjects(func_path)

    for sub in subjects:
        if dm.scanid.is_phantom(sub) == True: 
            continue
        if os.path.isfile('{func_path}/{sub}/{sub}_analysis-complete.log'.format(func_path=func_path, sub=sub)) == True:
            continue
        
        # get all the log files for a subject
        try:
            resdirs = glob.glob(os.path.join(data_path, 'RESOURCES', sub + '_??'))
            resources = []
            for resdir in resdirs:
                resfiles = [os.path.join(dp, f) for 
                                      dp, dn, fn in os.walk(resdir) for f in fn]
                resources.extend(resfiles)

            logs = filter(lambda x: '.log' in x and 'UCLAEmpAcc' in x, resources)
            logs.sort()
        except:
            logger.error('No BEHAV data for {}.'.format(sub))
            continue

        if len(logs) != 3:
            logger.error('Did not find exactly 3 logs for {}.'.format(sub))
            continue

        # exract all of the data from the logs
        on_all, dur_all, corr_all, push_all = [], [], [], []

        try:
            for log in logs:
                on, dur, corr, push = process_behav_data(log, assets, func_path, sub, 'vid')
                on_all.extend(on)
                dur_all.extend(dur)
                corr_all.extend(corr)
                push_all.extend(push)
        except Exception, e:
            logger.error('Failed to parse logs for {}, log={}.'.format(sub, log))
            continue

        # write data to stimulus timing file for AFNI, and a QC csv
        try:
            # write each stimulus time:
            #         [start_time]*[amplitude],[buttonpushes]:[block_length]
            #         30*5,0.002:12

            # OFFSET 4 TRs == 8 Seconds!
            # on = on - 8.0
            f1 = open('{func_path}/{sub}/{sub}_block-times_ea.1D'.format(func_path=func_path, sub=sub), 'wb') # stim timing file
            f2 = open('{func_path}/{sub}/{sub}_corr_push.csv'.format(func_path=func_path, sub=sub), 'wb') # r values and num pushes / minute
            f2.write('correlation,n-pushes-per-minute\n')
            for i in range(len(on_all)):
                f1.write('{o:.2f}*{r:.2f},{p}:{d:.2f} '.format(o=on_all[i]-8.0, r=corr_all[i], p=push_all[i], d=dur_all[i]))
                f2.write('{r:.2f},{p}\n'.format(r=corr_all[i], p=push_all[i]))
            f1.write('\n') # add newline at the end of each run (up to 3 runs.)
        except:
            logger.error('Failed to open block_times & corr_push for {}'.format(sub))
            continue
        finally:
            f1.close()
            f2.close()

        # analyze the data
        try:
            generate_analysis_script(sub, func_path)
            dm.utils.run('bash {func_path}/{sub}/{sub}_glm_1stlevel_cmd.sh'.format(func_path=func_path, sub=sub))
            dm.utils.run('touch {func_path}/{sub}/{sub}_analysis-complete.log'.format(func_path=func_path, sub=sub))
        except:
            continue
예제 #30
0
def main():
    """
    Loops through subjects, preprocessing using supplied script, and runs a
    first-level GLM using AFNI (tent functions, 15 s window) on all subjects.
    """
    arguments = docopt(__doc__)
    study = arguments['<study>']
    subject = arguments['--subject']
    debug = arguments['--debug']

    logging.info('Starting')
    if debug:
        logger.setLevel(logging.DEBUG)

    # load config for study
    try:
        config = cfg.config(study=study)
    except ValueError:
        logger.error('study {} not defined'.format(study))
        sys.exit(1)

    study_base = config.get_study_base(study)
    imob_dir = os.path.join(study_base, config.site_config['paths']['fmri'],
                            'imob')

    # process a single subject
    if subject:

        # get required inputs from each
        files = glob.glob(os.path.join(imob_dir, subject) + '/*.nii.gz')
        inputs = get_inputs(files, config)

        # check if subject has already been processed
        if check_complete(imob_dir, subject):
            logger.info('{} already analysed'.format(subject))
            sys.exit(0)

        # first level GLM for inputs
        for input_type in inputs.keys():
            script = generate_analysis_script(subject, inputs, input_type,
                                              config, study)
            rtn, out = utils.run('chmod 754 {}'.format(script))
            rtn, out = utils.run(script)
            if rtn:
                logger.error(
                    'Script {} failed to run on subject {} with error:\n{}'.
                    format(script, subject, out))
                sys.exit(1)

    # process all subjects
    else:
        commands = []
        for path in glob.glob('{}/*'.format(imob_dir)):
            subject = os.path.basename(path)

            # add subject if any of the expected outputs do not exist
            files = glob.glob(os.path.join(imob_dir, subject) + '/*.nii.gz')
            try:
                inputs = get_inputs(files, config)
            except:
                logger.debug('Invalid inputs for {}'.format(subject))
                continue
            expected = inputs.keys()

            for exp in expected:
                if not filter(
                        lambda x: '{}_glm_IM_1stlvl_{}'.format(subject, exp) in
                        x, files):
                    commands.append(" ".join(
                        [__file__, study, '--subject {}'.format(subject)]))
                    break

        if commands:
            logger.debug("queueing up the following commands:\n" +
                         '\n'.join(commands))
            #fd, path = tempfile.mkstemp()
            #os.write(fd, '\n'.join(commands))
            #os.close(fd)
            for i, cmd in enumerate(commands):
                jobname = "dm_imob_{}_{}".format(
                    i, time.strftime("%Y%m%d-%H%M%S"))
                jobfile = '/tmp/{}'.format(jobname)
                logfile = '/tmp/{}.log'.format(jobname)
                errfile = '/tmp/{}.err'.format(jobname)
                with open(jobfile, 'wb') as fid:
                    fid.write('#!/bin/bash\n')
                    fid.write(cmd)

                rtn, out = utils.run(
                    'qsub -V -q main.q -o {} -e {} -N {} {}'.format(
                        logfile, errfile, jobname, jobfile))
                #rtn, out, err = utils.run('qbatch -i --logdir {logdir} -N {name} --walltime {wt} {cmds}'.format(logdir = log_path, name = jobname, wt = walltime, cmds = path))
                if rtn:
                    logger.error("Job submission failed. Output follows.")
                    logger.error("stdout: {}".format(out))
                    sys.exit(1)
예제 #31
0
def main():
    global username
    global server
    global password
    global XNAT
    global CFG

    arguments = docopt(__doc__)
    verbose = arguments['--verbose']
    debug = arguments['--debug']
    quiet = arguments['--quiet']
    study = arguments['<study>']
    server = arguments['--server']
    credfile = arguments['--credfile']
    username = arguments['--username']
    archive = arguments['<archive>']

    # setup logging
    ch = logging.StreamHandler(sys.stdout)
    ch.setLevel(logging.WARN)
    logger.setLevel(logging.WARN)
    if quiet:
        logger.setLevel(logging.ERROR)
        ch.setLevel(logging.ERROR)
    if verbose:
        logger.setLevel(logging.INFO)
        ch.setLevel(logging.INFO)
    if debug:
        logger.setLevel(logging.DEBUG)
        ch.setLevel(logging.DEBUG)

    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - {study} - '
        '%(levelname)s - %(message)s'.format(study=study))
    ch.setFormatter(formatter)

    logger.addHandler(ch)

    # setup the config object
    logger.info('Loading config')

    CFG = datman.config.config(study=study)

    XNAT = get_xnat(server=server, credfile=credfile, username=username)

    dicom_dir = CFG.get_path('dicom', study)
    # deal with a single archive specified on the command line,
    # otherwise process all files in dicom_dir
    if archive:
        # check if the specified archive is a valid file
        if os.path.isfile(archive):
            dicom_dir = os.path.dirname(os.path.normpath(archive))
            archives = [os.path.basename(os.path.normpath(archive))]
        elif datman.scanid.is_scanid_with_session(archive):
            # a sessionid could have been provided, lets be nice and handle that
            archives = [datman.utils.splitext(archive)[0] + '.zip']
        else:
            logger.error('Cant find archive:{}'.format(archive))
            return
    else:
        archives = os.listdir(dicom_dir)

    logger.debug('Processing files in:{}'.format(dicom_dir))
    logger.info('Processing {} files'.format(len(archives)))

    for archivefile in archives:
        process_archive(os.path.join(dicom_dir, archivefile))
예제 #32
0
def main():
    arguments = docopt(__doc__)
    verbose = arguments['--verbose']
    debug = arguments['--debug']
    dryrun = arguments['--dry-run']
    quiet = arguments['--quiet']
    study = arguments['<study>']

    # setup logging
    ch = logging.StreamHandler(sys.stdout)
    log_level = logging.WARN

    if quiet:
        log_level = logging.ERROR
    if verbose:
        log_level = logging.INFO
    if debug:
        log_level = logging.DEBUG
    logger.setLevel(log_level)
    ch.setLevel(log_level)
    logging.getLogger("paramiko").setLevel(log_level)

    formatter = logging.Formatter('%(asctime)s - %(name)s - {study} - '\
            ' %(levelname)s - %(message)s'.format(study=study))
    ch.setFormatter(formatter)

    logger.addHandler(ch)

    # setup the config object
    cfg = datman.config.config(study=study)

    # get folder information from the config object
    mrusers = cfg.get_key(['MRUSER'])
    mrfolders = cfg.get_key(['MRFOLDER'])
    mrserver = cfg.get_key(['FTPSERVER'])

    zips_path = cfg.get_path('zips')
    meta_path = cfg.get_path('meta')

    # Check the local project zips dir exists, create if not
    if not os.path.isdir(zips_path):
        logger.warning(
            'Zips directory: {} not found; creating.'.format(zips_path))
        if not dryrun:
            os.mkdir(zips_path)

    # MRfolders entry in config file should be a list, but could be a string
    if isinstance(mrfolders, basestring):
        mrfolders = [mrfolders]

    # MRUSER entry in config file should be a list, but could be a string
    if isinstance(mrusers, basestring):
        mrusers = [mrusers]

    # load the password
    pass_file = os.path.join(meta_path, 'mrftppass.txt')
    if not os.path.isfile(pass_file):
        logger.error('Password file: {} not found'.format(pass_file))
        raise IOError

    passwords = []
    with open(pass_file, 'r') as pass_file:
        for password in pass_file:
            password = password.strip()
            if password:
                passwords.append(password)

    # actually do the copying
    assert len(passwords) == len(mrusers), \
        'Each mruser in config should have and entry in the password file'

    for iloc in range(len(mrusers)):
        mruser = mrusers[iloc]
        password = passwords[iloc]
        with pysftp.Connection(mrserver, username=mruser,
                               password=password) as sftp:

            valid_dirs = get_valid_remote_dirs(sftp, mrfolders)
            if len(valid_dirs) < 1:
                logger.error('Source folders:{} not found'.format(mrfolders))

            for valid_dir in valid_dirs:
                #  process each folder in turn
                logger.debug('Copying from:{}  to:{}'.format(
                    valid_dir, zips_path))
                process_dir(sftp, valid_dir, zips_path)