예제 #1
0
def load_config(study):
    try:
        config = cfg.config(study=study)
    except ValueError:
        logger.error('study {} not defined'.format(study))
        sys.exit(1)
    return config
예제 #2
0
def main():

    arguments = docopt(__doc__)

    study = arguments["<study>"]

    cfg = config.config(study=study)

    subjects = arguments["--subject"]
    bids_dir = arguments["--bids-dir"] or cfg.get_path("bids")
    yml = arguments["--yaml"] or YAML
    rewrite = arguments["--rewrite"]
    debug = arguments["--debug"]

    global ALLOW_INCOMPLETE
    ALLOW_INCOMPLETE = arguments["--allow-incomplete"]

    be = BIDSEnforcer(yml)

    if debug:
        logger.setLevel(logging.DEBUG)

    make_dataset_description(bids_dir, study, be.version)

    if not subjects:
        subjects = os.listdir(cfg.get_path("nii"))

    for s in subjects:

        if "PHA" in s:
            logger.info("{} is a Phantom scan - skipping...".format(s))
            continue

        logger.info("Processing: {}".format(s))
        process_subject(s, cfg, be, bids_dir, rewrite)
예제 #3
0
def load_config(study):
    try:
        config = cfg.config(study=study)
    except ValueError:
        logger.error('study {} not defined'.format(study))
        sys.exit(1)
    return config
예제 #4
0
def main():
    arguments = docopt(__doc__)

    study = arguments['<study>']
    sub_ids = arguments['<sub-id>']
    nii_dir = arguments['--nii-dir']
    bids_dir = arguments['--bids-dir']
    fmriprep_dir = arguments['--fmriprep-out-dir']
    fs_dir = arguments['--freesurfer-dir']
    rewrite = arguments['--rewrite']
    to_server = arguments['--log-to-server']
    debug = arguments['--debug']
    queue = arguments['--use-queue']

    cfg = config.config(study=study)
    logger.info("Study to convert to BIDS Format: {}".format(study))

    if not bids_dir:
        bids_dir = os.path.join(cfg.get_path('data'), "bids/")
    create_dir(bids_dir)

    log_dir = os.path.join(bids_dir, 'logs')
    create_dir(log_dir)

    setup_logger(log_dir, to_server, debug, cfg, sub_ids)
    logger.info("BIDS folder will be {}".format(bids_dir))

    if not nii_dir:
        nii_dir = cfg.get_path('nii')
        logger.info(
            "Nii files to be converted to BIDS format will be from: {}".format(
                nii_dir))

    if fmriprep_dir:
        fmriprep_fs_dir = os.path.join(fmriprep_dir, 'freesurfer')
        create_dir(fmriprep_fs_dir)
        logger.info(
            'Fmriprep freesurfer dir will be:{}'.format(fmriprep_fs_dir))
    else:
        fmriprep_fs_dir = None

    if not fs_dir:
        fs_dir = cfg.get_path('freesurfer')
        logger.info('Freesurfer dir is: {}'.format(fs_dir))

    all_tags = init_setup(study, cfg, bids_dir)
    create_task_json(bids_dir, tag_map['fmri'])

    to_delete = set()

    try:
        sites = cfg.get_sites()
    except KeyError, err:
        logger.error(err)
        sys.exit(1)
예제 #5
0
def main():
    arguments = docopt(__doc__)
    study = arguments['<study>']
    path = arguments['<path>']

    try:
        cfg = config.config()
        p = cfg.get_path(path, study=study)
        print(p)
    except Exception as e:
        eprint(str(e))
예제 #6
0
def main():
    config = cfg.config()
    studies = config.get_key('Projects').keys()

    for study in studies:
        try:
            config.set_study(study)
        except Exception:
            pass
        is_open = config.get_key('IsOpen')

        dashboard.set_study_status(study, is_open)
예제 #7
0
def main():
    quit = "n"

    username = os.environ["XNAT_USER"]
    password = os.environ["XNAT_PASS"]
    central = Interface(server="https://xnat.imaging-genetics.camh.ca",
                        user=username,
                        password=password)

    while (quit != "y"):
        study = raw_input("Which study do you want to track scans for? ")

        con = CON.config()

        try:
            projects = set(con.get_xnat_projects(study))
        except ValueError:
            print "Study does not exist"
            return 0

        tracking_table = dict()

        for project in projects:
            constraints = [('xnat:mrSessionData/PROJECT', '=', project)]
            table = central.select('xnat:mrSessionData', [
                'xnat:mrSessionData/SUBJECT_LABEL', 'xnat:mrSessionData/DATE',
                'xnat:mrSessionData/INSERT_DATE'
            ]).where(constraints)
            sort = sorted(table.items(), key=operator.itemgetter(2))
            for item in sort:
                #print(item)
                site_name = scanid.parse(item[0]).site
                if scanid.is_phantom(item[0]):
                    site_name += "_PHA"
                    if "FBN" in item[0]:
                        site_name += "_FBN"
                    elif "ADN" in item[0]:
                        site_name += "_ADN"
                site_dict = tracking_table.setdefault(site_name, dict())
                last_update = site_dict.setdefault(uploaddate, datetime.min)
                current_update = datetime.strptime(item[2], datetimeformat)
                if last_update < current_update:
                    site_dict[date] = item[1]
                    site_dict[uploaddate] = current_update
                    if last_update == datetime.min:
                        site_dict[uploaddiff] = "No Other Uploads"
                    else:
                        site_dict[uploaddiff] = dttostr(current_update -
                                                        last_update)
                #break
        printdict(tracking_table)

        quit = raw_input("Quit? y/n ")
예제 #8
0
def main():
    arguments = docopt.docopt(__doc__)
    rootdir = arguments['--root']
    if arguments['--study']:
        cfg = config.config()
        rootdir = cfg.get_study_base(arguments['--study'])

    for projectdir in get_project_dirs(rootdir):
        checklist = os.path.join(projectdir, 'metadata', 'checklist.csv')

        checklistdict = read_checklist(checklist)

        for timepointdir in sorted(glob.glob(projectdir + '/data/nii/*')):
            if '_PHA_' in timepointdir:
                continue

            timepoint = os.path.basename(timepointdir)
            qcdocname = 'qc_' + timepoint
            qcdoc = os.path.join(projectdir, 'qc', timepoint,
                                 (qcdocname + '.html'))

            data_mtime = max(
                map(get_mtime,
                    glob.glob(timepointdir + '/*.nii.gz') + [timepointdir]))

            # notify about missing QC reports or those with no checklist entry
            if qcdocname not in checklistdict:
                print('No checklist entry for {}'.format(timepointdir))
                continue
            elif not os.path.exists(qcdoc):
                print('No QC doc generated for {}'.format(timepointdir))
                continue

            # find QC documents that are older than the most recent data export
            if arguments['--show-newer'] and data_mtime > os.path.getmtime(
                    qcdoc):
                newer = filter(
                    lambda x: os.path.getmtime(x) > os.path.getmtime(qcdoc),
                    glob.glob(timepointdir + '/*'))
                if newer != []:
                    print('{}: QC doc is older than data in folder {} {} {}'.
                          format(qcdoc, timepointdir, data_mtime,
                                 os.path.getmtime(qcdoc)))
                    print('\t' + '\n\t'.join(newer))

            # notify about unchecked QC reports
            if not checklistdict[qcdocname]:
                print '{}: QC doc not signed off on'.format(qcdoc)
예제 #9
0
def get_nuiter_settings(subject_id):
    """
    Returns the site specific nu_iter settings for the pipeline
    Note as this is run as a function node I'm not sure how to handle logging
    >>> get_nuiter_settings('SPN01_CMH_0001_01')
    4
    >>> get_nuiter_settings('SPN01_MRC_0001_01')
    8
    """
    import datman.config as cfg
    import datman.scanid as scanid

    default_value = '-nuiterations 4'

    config = cfg.config()

    ident = scanid.parse(subject_id)
    site = ident.site

    try:
        study = config.map_xnat_archive_to_project(ident.study)
        config.set_study(study)
    except ValueError:
        # logger.warning('Study:{} not defined in config'.format(study))
        return (default_value)

    try:
        settings = config.get_key('freesurfer')
        nu_iter_settings = settings['nu_iter']
    except KeyError:
        # logger.warning('Freesurfer setting not found')
        return (default_value)

    try:
        if site in nu_iter_settings:
            iter_count = nu_iter_settings[site]
        elif 'DEFAULT' in nu_iter_settings:
            iter_count = nu_iter_settings['DEFAULT']
    except TypeError:
        # incase the nu_iter isn't defined as a dict ()
        iter_count = nu_iter_settings

    return ('-nuiterations {}'.format(iter_count))
예제 #10
0
def main():
    arguments = docopt(__doc__)

    study = arguments['<study>']
    subject = arguments['--subject']
    debug = arguments['--debug']

    logging.info('Starting')
    if debug:
        logger.setLevel(logging.DEBUG)

    # load config for study
    try:
        config = cfg.config(study=study)
    except ValueError:
        logger.error('study {} not defined'.format(study))
        sys.exit(1)

    study_base = config.get_study_base(study)

    if 'ea' not in config.study_config['fmri'].keys():
        logger.error('ea not defined in fmri in {}'.format(config_file))
        sys.exit(1)

    for k in ['nii', 'fmri', 'hcp']:
        if k not in config.get_key('Paths'):
            logger.error("paths:{} not defined in {}".format(k, config_file))
            sys.exit(1)

    ea_dir = os.path.join(study_base, config.get_path('fmri'), 'ea')
    nii_dir = os.path.join(study_base, config.get_path('nii'))

    if subject:
        subjects = [subject]
    else:
        subjects = glob.glob('{}/*'.format(nii_dir))

    for s in subjects:
        if '_PHA_' in subject:
            logger.error("{} if a phantom, cannot analyze".format(s))
            continue
        analyze_subject(s, config, study)
예제 #11
0
def main():
    arguments = docopt(__doc__)
    rootdir = arguments['--root']
    if arguments['--study']:
        cfg = config.config()
        rootdir = cfg.get_study_base(arguments['--study'])

    for projectdir in get_project_dirs(rootdir):
        checklist = os.path.join(projectdir, 'metadata', 'checklist.csv')

        checklistdict = datman.utils.read_checklist(path=checklist)

        for timepointdir in sorted(glob.glob(os.path.join(projectdir, 'data',
                'nii', '*'))):
            if '_PHA_' in timepointdir:
                continue

            timepoint = os.path.basename(timepointdir)
            qcdocname = 'qc_' + timepoint
            qcdoc = os.path.join(projectdir, 'qc', timepoint, (qcdocname + '.html'))

            data_mtime = max(map(get_mtime, glob.glob(timepointdir + '/*.nii.gz')+[timepointdir]))

            # notify about missing QC reports or those with no checklist entry
            if timepoint not in checklistdict:
                print('No checklist entry for {}'.format(timepointdir))
                continue
            elif not os.path.exists(qcdoc):
                print('No QC doc generated for {}'.format(timepointdir))
                continue

            # find QC documents that are older than the most recent data export
            if arguments['--show-newer'] and data_mtime > os.path.getmtime(qcdoc):
                newer = filter(lambda x: os.path.getmtime(x) > os.path.getmtime(qcdoc), glob.glob(timepointdir + '/*'))
                if newer != []:
                    print('{}: QC doc is older than data in folder {} {} {}'.format(qcdoc, timepointdir, data_mtime, os.path.getmtime(qcdoc)))
                    print('\t' + '\n\t'.join(newer))

            # notify about unchecked QC reports
            if not checklistdict[timepoint]:
                print '{}: QC doc not signed off on'.format(qcdoc)
예제 #12
0
def main():
    """
    Runs fmri data through the specified epitome script.
    """
    arguments = docopt(__doc__)

    study  = arguments['<study>']
    scanid = arguments['--subject']
    debug  = arguments['--debug']
    dryrun = arguments['--dry-run']

    # configure logging
    logging.info('Starting')
    if debug:
        logger.setLevel(logging.DEBUG)

    # load config for study
    try:
        config = cfg.config(study=study)
    except ValueError:
        logger.error('study {} not defined'.format(study))
        sys.exit(1)

    study_base = config.get_study_base(study)

    for k in ['nii', 'fmri', 'hcp']:
        if k not in config.site_config['paths']:
            logger.error("paths:{} not defined in site config".format(k))
            sys.exit(1)

    for x in config.study_config['fmri'].iteritems():
        for k in ['dims', 'del', 'pipeline', 'tags', 'export', 'tr']:
            if k not in x[1].keys():
                logger.error("fmri:{}:{} not defined in configuration file".format(x[0], k))
                sys.exit(1)

    nii_dir = os.path.join(study_base, config.site_config['paths']['nii'])

    if scanid:
        path = os.path.join(nii_dir, scanid)
        if '_PHA_' in scanid:
            sys.exit('Subject {} if a phantom, cannot be analyzed'.format(scanid))
        try:
            run_epitome(path, config, study)
        except Exception as e:
            logging.error(e)
            sys.exit(1)

    # run in batch mode
    else:
        subjects = []
        nii_dirs = glob.glob('{}/*'.format(nii_dir))

        # find subjects where at least one expected output does not exist
        for path in nii_dirs:
            subject = os.path.basename(path)

            if sid.is_phantom(subject):
                logger.debug("Subject {} is a phantom. Skipping.".format(subject))
                continue

            fmri_dir = utils.define_folder(os.path.join(study_base, config.site_config['paths']['fmri']))
            for exp in config.study_config['fmri'].keys():
                expected_names = config.study_config['fmri'][exp]['export']
                subj_dir = os.path.join(fmri_dir, exp, subject)
                if not outputs_exist(subj_dir, expected_names):
                    subjects.append(subject)
                    break

        subjects = list(set(subjects))

        # submit a list of calls to ourself, one per subject
        commands = []
        if debug:
            debugopt = '--debug'
        else:
            debugopt = ''

        for subject in subjects:
            commands.append(" ".join(['python ', __file__, study, '--subject {} '.format(subject), debugopt]))

        if commands:
            logger.debug('queueing up the following commands:\n'+'\n'.join(commands))
            for i, cmd in enumerate(commands):
                jobname = 'dm_fmri_{}_{}'.format(i, time.strftime("%Y%m%d-%H%M%S"))
                jobfile = '/tmp/{}'.format(jobname)
                logfile = '/tmp/{}.log'.format(jobname)
                errfile = '/tmp/{}.err'.format(jobname)
                with open(jobfile, 'wb') as fid:
                    fid.write('#!/bin/bash\n')
                    fid.write(cmd)

                rtn, out = utils.run('qsub -V -q main.q -o {} -e {} -N {} {}'.format(
                    logfile, errfile, jobname, jobfile))

                if rtn:
                    logger.error("Job submission failed. Output follows.")
                    logger.error("stdout: {}".format(out))
                    sys.exit(1)
예제 #13
0
def main():
    """
    Runs .nrrd data through unring.py.
    """
    arguments = docopt(__doc__)

    study = arguments['<study>']
    batch = arguments['--batch']
    debug = arguments['--debug']
    dryrun = arguments['--dry-run']

    # configure logging
    logging.info('Starting')
    if debug:
        logger.setLevel(logging.DEBUG)

    # load config for study
    try:
        config = cfg.config(study=study)
    except ValueError:
        logger.error('study {} not defined'.format(study))
        sys.exit(1)

    study_base = config.get_study_base(study)

    for k in ['nrrd']:
        if k not in config.get_path('Paths'):
            logger.error("paths:{} not defined in site config".format(k))
            sys.exit(1)

    nrrd_dir = os.path.join(study_base, config.get_path('nrrd'))

    # runs in serial (due to MATLAB dependencies)
    if batch:
        try:
            run_all(nrrd_dir, config, study)
        except Exception as e:
            logging.error(e)
            sys.exit(1)

    # default behaviour: submit self to queue in batch mode
    else:
        if debug:
            debugopt = '--debug'
        else:
            debugopt = ''

        cmd = 'python {} {} --batch {}'.format(__file__, study, debugopt)
        jobname = 'dm_unring_{}'.format(time.strftime("%Y%m%d-%H%M%S"))
        jobfile = '/tmp/{}'.format(jobname)
        logfile = '/tmp/{}.log'.format(jobname)
        errfile = '/tmp/{}.err'.format(jobname)

        with open(jobfile, 'wb') as fid:
            fid.write('#!/bin/bash\n')
            fid.write(cmd)

            rtn, out = utils.run(
                'qsub -V -q main.q -o {} -e {} -N {} {}'.format(
                    logfile, errfile, jobname, jobfile))

            if rtn:
                logger.error("Job submission failed. Output follows.")
                logger.error("stdout: {}".format(out))
                sys.exit(1)
예제 #14
0
def main():

    arguments = docopt(__doc__)
    study = arguments['<study>']
    scanid = arguments['--subject']
    debug = arguments['--debug']

    logging.info('Starting')
    if debug:
        logger.setLevel(logging.DEBUG)

    # load config for study
    try:
        config = cfg.config(study=study)
    except ValueError:
        logger.error(
            'study {} not defined in master configuration file\n{}'.format(
                study, NODE))
        sys.exit(1)

    study_base = config.get_study_base(study)

    if 'fmri' not in config.site_config['paths']:
        logger.error(
            "paths:fmri not defined in site configuration file\n{}".format(
                NODE))
        sys.exit(1)

    fmri_dir = os.path.join(study_base, config.site_config['paths']['fmri'])

    if scanid:
        path = os.path.join(fmri_dir, scanid)
        try:
            run_analysis(scanid, config, study)
        except Exception as e:
            logger.error(e)
            sys.exit(1)

    # run in batch mode
    else:
        # look for subjects with at least one fmri type missing outputs
        subjects = []

        # loop through fmri experiments defined
        for exp in config.study_config['fmri'].keys():
            expected_files = config.study_config['fmri'][exp]['conn']
            fmri_dirs = glob.glob('{}/*'.format(os.path.join(fmri_dir, exp)))

            for subj_dir in fmri_dirs:
                candidates = glob.glob('{}/*'.format(subj_dir))
                for filetype in expected_files:
                    # add subject if outputs don't already exist
                    if not filter(
                            lambda x: '{}_roi-corrs.csv'.format(filetype) in x,
                            candidates):
                        subjects.append(os.path.basename(subj_dir))
                        break

        # collapse found subjects (do not double-count) and create a list of commands
        commands = []
        subjects = list(set(subjects))
        for subject in subjects:
            commands.append(" ".join(
                [__file__, study, '--subject {}'.format(subject)]))

        if commands:
            logger.debug('queueing up the following commands:\n' +
                         '\n'.join(commands))

            for i, cmd in enumerate(commands):
                jobname = 'dm_rest_{}_{}'.format(
                    i, time.strftime("%Y%m%d-%H%M%S"))
                jobfile = '/tmp/{}'.format(jobname)
                logfile = '/tmp/{}.log'.format(jobname)
                errfile = '/tmp/{}.err'.format(jobname)
                with open(jobfile, 'wb') as fid:
                    fid.write('#!/bin/bash\n')
                    fid.write(cmd)

                rtn, out = utils.run(
                    'qsub -V -q main.q -o {} -e {} -N {} {}'.format(
                        logfile, errfile, jobname, jobfile))
                if rtn:
                    logger.error(
                        "Job submission failed. Output follows. {}".format(
                            NODE))
                    logger.error("stdout: {}".format(out))
                    sys.exit(1)
예제 #15
0
def main():
    """
    Runs fmri data through the specified epitome script.
    """
    arguments = docopt(__doc__)

    study  = arguments['<study>']
    scanid = arguments['--subject']
    debug  = arguments['--debug']
    dryrun = arguments['--dry-run']
    output = arguments['--output']
    exports = arguments['--exports']
    task = arguments['--task']

    # configure logging
    logging.info('Starting')
    if debug:
        logger.setLevel(logging.DEBUG)

    # load config for study
    try:
        config = cfg.config(study=study)
    except ValueError:
        logger.error('study {} not defined'.format(study))
        sys.exit(1)

    study_base = config.get_study_base(study)

    #Parse optional arguments
    output_dir = output if output else os.path.join(study_base,config.get_path('fmri'))
    opt_exports = [e for e in exports.split(',')] if exports else []

    #Check if task is available
    if task:

        try:
            config.study_config['fmri'][task]
        except KeyError:
            logger.error('Task {} not found in study config!'.format(task))
            sys.exit(1)
        tasks = {k:v for k,v in config.study_config['fmri'].iteritems() if k == task}

    else:
        tasks = config.study_config['fmri']

    for k in ['nii', 'fmri', 'hcp']:
        if k not in config.get_key('Paths'):
            logger.error("paths:{} not defined in site config".format(k))
            sys.exit(1)

    for x in tasks.iteritems():
        for k in ['dims', 'del', 'pipeline', 'tags', 'export', 'tr']:
            if k not in x[1].keys():
                logger.error("fmri:{}:{} not defined in configuration file".format(x[0], k))
                sys.exit(1)

    nii_dir = os.path.join(study_base, config.get_path('nii'))


    if scanid:
        path = os.path.join(nii_dir, scanid)
        if '_PHA_' in scanid:
            sys.exit('Subject {} if a phantom, cannot be analyzed'.format(scanid))
        try:
            run_epitome(path, config, study, output_dir, opt_exports,tasks)
        except Exception as e:
            logging.error(e)
            sys.exit(1)

    # run in batch mode
    else:
        subjects = []
        nii_dirs = glob.glob('{}/*'.format(nii_dir))

        # find subjects where at least one expected output does not exist
        for path in nii_dirs:
            subject = os.path.basename(path)

            if sid.is_phantom(subject):
                logger.debug("Subject {} is a phantom. Skipping.".format(subject))
                continue

            fmri_dir = utils.define_folder(output_dir)
            for exp in config.study_config['fmri'].keys():
                expected_names = set(config.study_config['fmri'][exp]['export'] + opt_exports)
                subj_dir = os.path.join(fmri_dir, exp, subject)
                if not outputs_exist(subj_dir, expected_names):
                    subjects.append(subject)
                    break

        subjects = list(set(subjects))

        # submit a list of calls to ourself, one per subject
        commands = []

        g_opts = ' --output {} --exports {}'.format(output_dir,exports)


        if task:
            g_opts += ' --task {}'.format(task)
        if debug:
            g_opts += ' --debug'

        for subject in subjects:
            sub_tag = ' --subject {}'.format(subject)
            commands.append(" ".join(['python ', __file__, study,g_opts,sub_tag]))

        if commands:
            logger.debug('queueing up the following commands:\n'+'\n'.join(commands))
            for i, cmd in enumerate(commands):
                jobname = 'dm_fmri_{}_{}'.format(i, time.strftime("%Y%m%d-%H%M%S"))
                jobfile = '/tmp/{}'.format(jobname)
                logfile = '/tmp/{}.log'.format(jobname)
                errfile = '/tmp/{}.err'.format(jobname)
                with open(jobfile, 'wb') as fid:
                    fid.write('#!/bin/bash\n')
                    fid.write(cmd)

                rtn, out = utils.run('qsub -V -q main.q -o {} -e {} -N {} {}'.format(
                    logfile, errfile, jobname, jobfile))

                if rtn:
                    logger.error("Job submission failed. Output follows.")
                    logger.error("stdout: {}".format(out))
                    sys.exit(1)
예제 #16
0
    TAGS = arguments['--tags']

    TAGS = [tag.strip() for tag in TAGS.split(',')]

    QUIET = False
    DEBUG = False
    if arguments['--debug']:
        DEBUG = True
        logger.setLevel(logging.DEBUG)
    elif arguments['--quiet']:
        QUIET = True
        logger.setLevel(logging.ERROR)
    else:
        logger.setLevel(logging.WARN)

    CFG = config.config(study=study)

    DTIPREP_PATH = CFG.get_path('dtiprep')
    NII_PATH = CFG.get_path('nii')

    if not LOGDIR:
        LOGDIR = os.path.join(DTIPREP_PATH, 'tractmap_logs')
    if not os.path.isdir(LOGDIR):
        logger.info("Creating log dir:{}".format(LOGDIR))
        try:
            os.mkdir(LOGDIR)
        except OSError:
            msg = 'Failed creating log directory"{}'.format(LOGDIR)
            logger.error(msg)
            sys.exit(msg)
예제 #17
0
def test_initialise_from_environ():
    os.environ['DM_CONFIG'] = os.path.join(FIXTURE_DIR, 'site_config.yml')
    os.environ['DM_SYSTEM'] = 'test'
    cfg = config.config()
예제 #18
0
def main():
    arguments = docopt(__doc__)
    study     = arguments['<study>']
    scanid    = arguments['--subject']
    debug     = arguments['--debug']
    dryrun    = arguments['--dry-run']

    # configure logging
    logging.info('Starting')
    if debug:
        logger.setLevel(logging.DEBUG)

    # load config for study
    try:
        config = cfg.config(study=study)
    except ValueError:
        logger.error('study {} not defined'.format(study))
        sys.exit(1)

    study_base = config.get_study_base(study)

    for k in ['freesurfer', 'hcp']:
        if k not in config.get_key('Paths'):
            logger.error("paths:{} not defined in site config".format(k))
            sys.exit(1)

    freesurfer_dir = config.get_path('freesurfer')
    hcp_dir = config.get_path('hcp')
    logs_dir = make_error_log_dir(hcp_dir)

    if scanid:
        path = os.path.join(freesurfer_dir, scanid)
        try:
            run_hcp_convert(path, config, study)
        except Exception as e:
            logging.error(e)
            sys.exit(1)
        return

    qced_subjects = utils.get_subject_metadata(config)

    # running for batch mode
    commands = []
    # find subjects where at least one expected output does not exist
    for subject in qced_subjects:
        fs_outputs = os.path.join(freesurfer_dir, subject)
        if not fs_outputs_exist(fs_outputs):
            continue
        subj_dir = os.path.join(hcp_dir, subject)
        if not ciftify_outputs_exist(subj_dir):
            cmd = make_subject_cmd(study, subject, debug=debug)
            commands.append(cmd)
        elif not qc_outputs_exist(subj_dir):
            cmd = make_vis_cmd(hcp_dir, subject, debug=debug)
            commands.append(cmd)

    if commands:
        logger.debug('queueing up the following commands:\n'+'\n'.join(commands))

    # submit a list of calls to ourself, one per subject
    for i, cmd in enumerate(commands):
        job_name = 'dm_fs2hcp_{}_{}'.format(i, time.strftime("%Y%m%d-%H%M%S"))
        utils.submit_job(cmd, job_name, logs_dir,
            system = config.system, dryrun = dryrun)

    create_indices_bm(config, study)
예제 #19
0
    TAGS = arguments['--tags']

    TAGS = [tag.strip() for tag in TAGS.split(',')]

    QUIET = False
    DEBUG = False
    if arguments['--debug']:
        DEBUG = True
        logger.setLevel(logging.DEBUG)
    elif arguments['--quiet']:
        QUIET = True
        logger.setLevel(logging.ERROR)
    else:
        logger.setLevel(logging.WARN)

    CFG = config.config(study=study)

    DTIPREP_PATH = CFG.get_path('dtiprep')
    NII_PATH = CFG.get_path('nii')

    if not LOGDIR:
        LOGDIR = os.path.join(DTIPREP_PATH, 'tractmap_logs')
    if not os.path.isdir(LOGDIR):
        logger.info("Creating log dir:{}".format(LOGDIR))
        try:
            os.mkdir(LOGDIR)
        except OSError:
            msg = 'Failed creating log directory"{}'.format(LOGDIR)
            logger.error(msg)
            sys.exit(msg)
예제 #20
0
def test_initialise_from_environ():
    os.environ['DM_CONFIG'] = os.path.join(FIXTURE_DIR, 'site_config.yml')
    os.environ['DM_SYSTEM'] = 'test'
    config.config()
예제 #21
0
def main():
    arguments = docopt(__doc__)
    study = arguments['<study>']
    scanid = arguments['--subject']
    debug = arguments['--debug']
    dryrun = arguments['--dry-run']

    # configure logging
    logging.info('Starting')
    if debug:
        logger.setLevel(logging.DEBUG)

    # load config for study
    try:
        config = cfg.config(study=study)
    except ValueError:
        logger.error('study {} not defined'.format(study))
        sys.exit(1)

    study_base = config.get_study_base(study)

    for k in ['freesurfer', 'hcp']:
        if k not in config.site_config['paths']:
            logger.error("paths:{} not defined in site config".format(k))
            sys.exit(1)

    freesurfer_dir = config.get_path('freesurfer')
    hcp_dir = config.get_path('hcp')

    if scanid:
        path = os.path.join(freesurfer_dir, scanid)
        try:
            run_hcp_convert(path, config, study)
        except Exception as e:
            logging.error(e)
            sys.exit(1)
        return

    config = cfg.config(study=study)
    qced_subjects = config.get_subject_metadata()

    # running for batch mode

    new_subjects = []
    # find subjects where at least one expected output does not exist
    for subject in qced_subjects:
        subj_dir = os.path.join(hcp_dir, subject)
        if not ciftify_outputs_exist(subj_dir):
            if fs_outputs_exist(os.path.join(freesurfer_dir, subject)):
                new_subjects.append(subject)

    create_indices_bm(config, study)

    # submit a list of calls to ourself, one per subject
    commands = []
    if debug:
        debugopt = '--debug'
    else:
        debugopt = ''

    for subject in new_subjects:
        commands.append(" ".join(
            [__file__, study, '--subject {} '.format(subject), debugopt]))

    if commands:
        logger.debug('queueing up the following commands:\n' +
                     '\n'.join(commands))

    for i, cmd in enumerate(commands):
        jobname = 'dm_fs2hcp_{}_{}'.format(i, time.strftime("%Y%m%d-%H%M%S"))
        jobfile = '/tmp/{}'.format(jobname)
        logfile = '/tmp/{}.log'.format(jobname)
        errfile = '/tmp/{}.err'.format(jobname)
        with open(jobfile, 'wb') as fid:
            fid.write('#!/bin/bash\n')
            fid.write(cmd)

        rtn, out = utils.run('qsub -V -q main.q -o {} -e {} -N {} {}'.format(
            logfile, errfile, jobname, jobfile))

        if rtn:
            logger.error("Job submission failed. Output follows.")
            logger.error("stdout: {}".format(out))
            sys.exit(1)
예제 #22
0
def main():
    """
    Runs .nrrd data through unring.py.
    """
    arguments = docopt(__doc__)

    study  = arguments['<study>']
    batch  = arguments['--batch']
    debug  = arguments['--debug']
    dryrun = arguments['--dry-run']

    # configure logging
    logging.info('Starting')
    if debug:
        logger.setLevel(logging.DEBUG)

    # load config for study
    try:
        config = cfg.config(study=study)
    except ValueError:
        logger.error('study {} not defined'.format(study))
        sys.exit(1)

    study_base = config.get_study_base(study)

    for k in ['nrrd']:
        if k not in config.get_path('Paths'):
            logger.error("paths:{} not defined in site config".format(k))
            sys.exit(1)

    nrrd_dir = os.path.join(study_base, config.get_path('nrrd'))

    # runs in serial (due to MATLAB dependencies)
    if batch:
        try:
            run_all(nrrd_dir, config, study)
        except Exception as e:
            logging.error(e)
            sys.exit(1)

    # default behaviour: submit self to queue in batch mode
    else:
        if debug:
            debugopt = '--debug'
        else:
            debugopt = ''

        cmd = 'python {} {} --batch {}'.format(__file__, study, debugopt)
        jobname = 'dm_unring_{}'.format(time.strftime("%Y%m%d-%H%M%S"))
        jobfile = '/tmp/{}'.format(jobname)
        logfile = '/tmp/{}.log'.format(jobname)
        errfile = '/tmp/{}.err'.format(jobname)

        with open(jobfile, 'wb') as fid:
            fid.write('#!/bin/bash\n')
            fid.write(cmd)

            rtn, out = utils.run('qsub -V -q main.q -o {} -e {} -N {} {}'.format(
                logfile, errfile, jobname, jobfile))

            if rtn:
                logger.error("Job submission failed. Output follows.")
                logger.error("stdout: {}".format(out))
                sys.exit(1)
예제 #23
0
def main():
    arguments = docopt(__doc__)

    study = arguments['<study>']
    subject = arguments['--subject']
    debug = arguments['--debug']

    logging.info('Starting')
    if debug:
        logger.setLevel(logging.DEBUG)

    # load config for study
    try:
        config = cfg.config(study=study)
    except ValueError:
        logger.error('study {} not defined'.format(study))
        sys.exit(1)

    study_base = config.get_study_base(study)

    if 'ea' not in config.study_config['fmri'].keys():
        logger.error('ea not defined in fmri in {}'.format(config_file))
        sys.exit(1)

    for k in ['nii', 'fmri', 'hcp']:
        if k not in config.site_config['paths']:
            logger.error("paths:{} not defined in {}".format(k, config_file))
            sys.exit(1)

    ea_dir = os.path.join(study_base, config.site_config['paths']['fmri'],
                          'ea')
    nii_dir = os.path.join(study_base, config.site_config['paths']['nii'])

    if subject:
        if '_PHA_' in subject:
            logger.error("{} is a phantom, cannot analyze".format(subject))
            sys.exit(1)
        analyze_subject(subject, config, study)

    else:
        # batch mode
        subjects = glob.glob('{}/*'.format(nii_dir))
        commands = []

        if debug:
            opts = '--debug'
        else:
            opts = ''

        for path in subjects:
            subject = os.path.basename(path)
            if check_complete(ea_dir, subject):
                logger.debug('{} already analysed'.format(subject))
            else:
                commands.append(" ".join(
                    [__file__, study, '--subject {}'.format(subject), opts]))

        if commands:
            logger.debug("queueing up the following commands:\n" +
                         '\n'.join(commands))
            for i, cmd in enumerate(commands):
                jobname = "dm_ea_{}_{}".format(i,
                                               time.strftime("%Y%m%d-%H%M%S"))
                jobfile = '/tmp/{}'.format(jobname)
                logfile = '/tmp/{}.log'.format(jobname)
                errfile = '/tmp/{}.err'.format(jobname)

                with open(jobfile, 'wb') as fid:
                    fid.write('#!/bin/bash\n')
                    fid.write(cmd)

                rtn, out = utils.run(
                    'qsub -V -q main.q -o {} -e {} -N {} {}'.format(
                        logfile, errfile, jobname, jobfile))
                # qbacth method -- might bring it back, but not needed
                #fd, path = tempfile.mkstemp()
                #os.write(fd, '\n'.join(commands))
                #os.close(fd)
                #rtn, out, err = utils.run('qbatch -i --logdir {ld} -N {name} --walltime {wt} {cmds}'.format(ld=logdir, name=jobname, wt=walltime, cmds=path))
                if rtn:
                    logger.error(
                        "Job submission failed\nstdout: {}".format(out))
                    sys.exit(1)
예제 #24
0
def main():
    """
    Loops through subjects, preprocessing using supplied script, and runs a
    first-level GLM using AFNI (tent functions, 15 s window) on all subjects.
    """
    arguments = docopt(__doc__)
    study = arguments['<study>']
    subject = arguments['--subject']
    debug = arguments['--debug']

    logging.info('Starting')
    if debug:
        logger.setLevel(logging.DEBUG)

    # load config for study
    try:
        config = cfg.config(study=study)
    except ValueError:
        logger.error('study {} not defined'.format(study))
        sys.exit(1)

    study_base = config.get_study_base(study)
    imob_dir = os.path.join(study_base, config.site_config['paths']['fmri'],
                            'imob')

    # process a single subject
    if subject:

        # get required inputs from each
        files = glob.glob(os.path.join(imob_dir, subject) + '/*.nii.gz')
        inputs = get_inputs(files, config)

        # check if subject has already been processed
        if check_complete(imob_dir, subject):
            logger.info('{} already analysed'.format(subject))
            sys.exit(0)

        # first level GLM for inputs
        for input_type in inputs.keys():
            script = generate_analysis_script(subject, inputs, input_type,
                                              config, study)
            rtn, out = utils.run('chmod 754 {}'.format(script))
            rtn, out = utils.run(script)
            if rtn:
                logger.error(
                    'Script {} failed to run on subject {} with error:\n{}'.
                    format(script, subject, out))
                sys.exit(1)

    # process all subjects
    else:
        commands = []
        for path in glob.glob('{}/*'.format(imob_dir)):
            subject = os.path.basename(path)

            # add subject if any of the expected outputs do not exist
            files = glob.glob(os.path.join(imob_dir, subject) + '/*.nii.gz')
            try:
                inputs = get_inputs(files, config)
            except:
                logger.debug('Invalid inputs for {}'.format(subject))
                continue
            expected = inputs.keys()

            for exp in expected:
                if not filter(
                        lambda x: '{}_glm_IM_1stlvl_{}'.format(subject, exp) in
                        x, files):
                    commands.append(" ".join(
                        [__file__, study, '--subject {}'.format(subject)]))
                    break

        if commands:
            logger.debug("queueing up the following commands:\n" +
                         '\n'.join(commands))
            #fd, path = tempfile.mkstemp()
            #os.write(fd, '\n'.join(commands))
            #os.close(fd)
            for i, cmd in enumerate(commands):
                jobname = "dm_imob_{}_{}".format(
                    i, time.strftime("%Y%m%d-%H%M%S"))
                jobfile = '/tmp/{}'.format(jobname)
                logfile = '/tmp/{}.log'.format(jobname)
                errfile = '/tmp/{}.err'.format(jobname)
                with open(jobfile, 'wb') as fid:
                    fid.write('#!/bin/bash\n')
                    fid.write(cmd)

                rtn, out = utils.run(
                    'qsub -V -q main.q -o {} -e {} -N {} {}'.format(
                        logfile, errfile, jobname, jobfile))
                #rtn, out, err = utils.run('qbatch -i --logdir {logdir} -N {name} --walltime {wt} {cmds}'.format(logdir = log_path, name = jobname, wt = walltime, cmds = path))
                if rtn:
                    logger.error("Job submission failed. Output follows.")
                    logger.error("stdout: {}".format(out))
                    sys.exit(1)
예제 #25
0
import datman.config as cfg
import datman.scan

# Necessary to silence all logging from dm_qc_report during tests.
logging.disable(logging.CRITICAL)

qc = importlib.import_module('bin.dm_qc_report')

FIXTURE = "tests/fixture_project_settings"

site_config_path = os.path.join(FIXTURE, 'site_config.yaml')
system = 'local'
study = 'STUDY'

config = cfg.config(filename=site_config_path, system=system, study=study)

class GetConfig(unittest.TestCase):
    @nose.tools.raises(SystemExit)
    def test_exits_gracefully_with_bad_study(self):
        config = qc.get_config(study="madeupcode")

    @nose.tools.raises(SystemExit)
    @patch('datman.config.config')
    def test_exits_gracefully_when_paths_missing_from_config(self, mock_config):
        mock_config.return_value.get_path.side_effect = lambda path: {'dcm': '',
                'nii': ''}[path]
        config = qc.get_config("STUDY")

class VerifyInputPaths(unittest.TestCase):
예제 #26
0
def main():
    """
    Loops through subjects, preprocessing using supplied script, and runs a
    first-level GLM using AFNI (tent functions, 15 s window) on all subjects.
    """
    arguments = docopt(__doc__)
    study     = arguments['<study>']
    subject   = arguments['--subject']
    debug     = arguments['--debug']

    logging.info('Starting')
    if debug:
        logger.setLevel(logging.DEBUG)

    # load config for study
    try:
        config = cfg.config(study=study)
    except ValueError:
        logger.error('study {} not defined'.format(study))
        sys.exit(1)

    study_base = config.get_study_base(study)
    imob_dir = os.path.join(study_base, config.get_path('fmri'), 'imob')

    # process a single subject
    if subject:

        # get required inputs from each
        files = glob.glob(os.path.join(imob_dir, subject) + '/*.nii.gz')
        inputs = get_inputs(files, config)

        # check if subject has already been processed
        if check_complete(imob_dir, subject):
            logger.info('{} already analysed'.format(subject))
            sys.exit(0)

        # first level GLM for inputs
        for input_type in inputs.keys():
            script = generate_analysis_script(subject, inputs, input_type, config, study)
            rtn, out = utils.run('chmod 754 {}'.format(script))
            rtn, out = utils.run(script)
            if rtn:
                logger.error('Script {} failed to run on subject {} with error:\n{}'.format(
                    script, subject, out))
                sys.exit(1)

    # process all subjects
    else:
        commands = []
        for path in glob.glob('{}/*'.format(imob_dir)):
            subject = os.path.basename(path)

            # add subject if any of the expected outputs do not exist
            files = glob.glob(os.path.join(imob_dir, subject) + '/*.nii.gz')
            try:
                inputs = get_inputs(files, config)
            except:
                logger.debug('Invalid inputs for {}'.format(subject))
                continue
            expected = inputs.keys()

            for exp in expected:
                if not filter(lambda x: '{}_glm_IM_1stlvl_{}'.format(subject, exp) in x, files):
                    commands.append(" ".join([__file__, study, '--subject {}'.format(subject)]))
                    break

        if commands:
            logger.debug("queueing up the following commands:\n"+'\n'.join(commands))
            #fd, path = tempfile.mkstemp()
            #os.write(fd, '\n'.join(commands))
            #os.close(fd)
            for i, cmd in enumerate(commands):
                jobname = "dm_imob_{}_{}".format(i, time.strftime("%Y%m%d-%H%M%S"))
                jobfile = '/tmp/{}'.format(jobname)
                logfile = '/tmp/{}.log'.format(jobname)
                errfile = '/tmp/{}.err'.format(jobname)
                with open(jobfile, 'wb') as fid:
                    fid.write('#!/bin/bash\n')
                    fid.write(cmd)

                rtn, out = utils.run('qsub -V -q main.q -o {} -e {} -N {} {}'.format(
                    logfile, errfile, jobname, jobfile))
                #rtn, out, err = utils.run('qbatch -i --logdir {logdir} -N {name} --walltime {wt} {cmds}'.format(logdir = log_path, name = jobname, wt = walltime, cmds = path))
                if rtn:
                    logger.error("Job submission failed. Output follows.")
                    logger.error("stdout: {}".format(out))
                    sys.exit(1)
예제 #27
0
 def __init__(self, templates, **kwargs):
     self.dm_config = dm_cfg.config()
     super(dmSelectFiles, self).__init__(templates, **kwargs)
예제 #28
0
class TestScan(unittest.TestCase):
    good_name = "STUDY_CMH_9999_01"
    bad_name = "STUDYCMH_9999"
    phantom = "STUDY_CMH_PHA_XXX9999"
    config = cfg.config(filename=site_config, system=system, study=study)

    def test_raises_parse_exception_with_bad_subject_id(self):
        with pytest.raises(datman.scanid.ParseException):
            datman.scan.Scan(self.bad_name, self.config)

    def test_makes_scan_instance_for_id_without_session(self):
        subject = datman.scan.Scan(self.good_name, self.config)

        assert subject is not None
        # Check that the missing session was set to the default
        assert subject.session == '01'

    def test_makes_scan_instance_for_phantom(self):
        subject = datman.scan.Scan(self.phantom, self.config)

        assert subject is not None
        assert subject.full_id == self.phantom

    def test_is_phantom_sets_correctly(self):
        subject = datman.scan.Scan(self.good_name, self.config)
        phantom = datman.scan.Scan(self.phantom, self.config)

        assert not subject.is_phantom
        assert phantom.is_phantom

    @patch('os.path.exists')
    def test_resources_paths_uses_full_id_plus_session(self, mock_exists):
        mock_exists.return_value = True
        subject = datman.scan.Scan(self.good_name, self.config)

        expected_path = self.config.get_path('resources') + \
            "STUDY_CMH_9999_01_01"
        assert subject.resource_path == expected_path

    def test_returns_expected_subject_paths(self):
        subject = datman.scan.Scan(self.good_name, self.config)

        expected_nii = self.config.get_path('nii') + self.good_name
        expected_qc = self.config.get_path('qc') + self.good_name

        assert subject.nii_path == expected_nii
        assert subject.qc_path == expected_qc

    def test_niftis_set_to_empty_list_when_broken_path(self):
        subject = datman.scan.Scan(self.good_name, self.config)

        assert subject.niftis == []

    @patch('glob.glob')
    def test_niftis_with_either_extension_type_found(self, mock_glob):
        simple_ext = "{}_01_T1_02_SagT1-BRAVO.nii".format(self.good_name)
        complex_ext = "{}_01_DTI60-1000_05_Ax-DTI-60.nii.gz".format(
            self.good_name)
        wrong_ext = "{}_01_DTI60-1000_05_Ax-DTI-60.bvec".format(self.good_name)

        nii_list = [simple_ext, complex_ext, wrong_ext]
        mock_glob.return_value = nii_list

        subject = datman.scan.Scan(self.good_name, self.config)

        found_niftis = [series.path for series in subject.niftis]
        expected = [simple_ext, complex_ext]

        assert sorted(found_niftis) == sorted(expected)

    @patch('glob.glob')
    def test_subject_series_with_nondatman_name_causes_parse_exception(
            self,
            mock_glob):
        well_named = "{}_01_T1_02_SagT1-BRAVO.nii".format(self.good_name)
        badly_named1 = "{}_01_DTI60-1000_05_Ax-DTI-60.nii".format(
            self.bad_name)
        badly_named2 = "{}_01_T2_07.nii".format(self.good_name)

        nii_list = [well_named, badly_named1, badly_named2]
        mock_glob.return_value = nii_list

        with pytest.raises(datman.scanid.ParseException):
            datman.scan.Scan(self.good_name, self.config)

    @patch('glob.glob')
    def test_nii_tags_lists_all_tags(self, mock_glob):
        T1 = "STUDY_CAMH_9999_01_01_T1_02_SagT1-BRAVO.nii"
        DTI = "STUDY_CAMH_9999_01_01_DTI60-1000_05_Ax-DTI-60.nii"

        mock_glob.return_value = [T1, DTI]

        subject = datman.scan.Scan(self.good_name, self.config)

        assert sorted(subject.nii_tags) == sorted(['T1', 'DTI60-1000'])


    @patch('glob.glob')
    def test_get_tagged_nii_finds_all_matching_series(self, mock_glob):
        T1_1 = "STUDY_CAMH_9999_01_01_T1_02_SagT1-BRAVO.nii"
        T1_2 = "STUDY_CAMH_9999_01_01_T1_03_SagT1-BRAVO.nii.gz"
        DTI = "STUDY_CAMH_9999_01_01_DTI_05_Ax-DTI-60.nii"

        mock_glob.return_value = [T1_1, DTI, T1_2]

        subject = datman.scan.Scan(self.good_name, self.config)

        actual_T1s = [series.path for series in subject.get_tagged_nii('T1')]
        expected = [T1_1, T1_2]
        assert sorted(actual_T1s) == sorted(expected)

        actual_DTIs = [series.path for series in subject.get_tagged_nii('DTI')]
        expected = [DTI]
        assert actual_DTIs == expected

    @patch('glob.glob')
    def test_get_tagged_X_returns_empty_list_when_no_tag_files(self,
                                                               mock_glob):
        nifti = "STUDY_CAMH_9999_01_01_T1_03_SagT1-BRAVO.nii.gz"

        mock_glob.return_value = [nifti]

        subject = datman.scan.Scan(self.good_name, self.config)

        assert subject.get_tagged_nii('DTI') == []
예제 #29
0
def main():
    arguments = docopt(__doc__)
    study     = arguments['<study>']
    scanid    = arguments['--subject']
    debug     = arguments['--debug']
    dryrun    = arguments['--dry-run']

    # configure logging
    logging.info('Starting')
    if debug:
        logger.setLevel(logging.DEBUG)

    # load config for study
    try:
        config = cfg.config(study=study)
    except ValueError:
        logger.error('study {} not defined'.format(study))
        sys.exit(1)

    study_base = config.get_study_base(study)

    for k in ['freesurfer', 'hcp']:
        if k not in config.site_config['paths']:
            logger.error("paths:{} not defined in site config".format(k))
            sys.exit(1)

    freesurfer_dir = os.path.join(study_base, config.site_config['paths']['freesurfer'])
    hcp_dir = os.path.join(study_base, config.site_config['paths']['hcp'])

    if scanid:
        path = os.path.join(freesurfer_dir, scanid)
        try:
            run_hcp_convert(path, config, study)
        except Exception as e:
            logging.error(e)
            sys.exit(1)

    # run in batch mode
    else:
        subjects = []
        freesurfer_dirs = glob.glob('{}/*'.format(freesurfer_dir))

        # find subjects where at least one expected output does not exist
        for path in nii_dirs:
            subject = os.path.basename(path)

            hcp_dir = utils.define_folder(os.path.join(study_base, config.site_config['paths']['hcp']))
            if not outputs_exist(subj_dir):
                subjects.append(subject)

        subjects = list(set(subjects))

        # submit a list of calls to ourself, one per subject
        commands = []
        if debug:
            debugopt = '--debug'
        else:
            debugopt = ''

        for subject in subjects:
            commands.append(" ".join([__file__, study, '--subject {} '.format(subject), debugopt]))

        if commands:
            logger.debug('queueing up the following commands:\n'+'\n'.join(commands))
            for i, cmd in enumerate(commands):
                jobname = 'dm_fs2hcp_{}_{}'.format(i, time.strftime("%Y%m%d-%H%M%S"))
                jobfile = '/tmp/{}'.format(jobname)
                logfile = '/tmp/{}.log'.format(jobname)
                errfile = '/tmp/{}.err'.format(jobname)
                with open(jobfile, 'wb') as fid:
                    fid.write('#!/bin/bash\n')
                    fid.write(cmd)

                rtn, out = utils.run('qsub -V -q main.q -o {} -e {} -N {} {}'.format(
                    logfile, errfile, jobname, jobfile))
                #rtn, out = utils.run('echo bash -l {}/{} {} | qbatch -N {} --logdir {} --walltime {} -'.format(bin_dir, script, subid, jobname, logs_dir, walltime))
                if rtn:
                    logger.error("Job submission failed. Output follows.")
                    logger.error("stdout: {}".format(out))
                    sys.exit(1)
예제 #30
0
# Necessary to silence all logging from dm_qc_report during tests.
logging.disable(logging.CRITICAL)

# Turn off testing of integrated dashboard functions for now
datman.dashboard.dash_found = False

qc = importlib.import_module('bin.dm_qc_report')

FIXTURE = "tests/fixture_project_settings"

site_config_path = os.path.join(FIXTURE, 'site_config.yaml')
system = 'local'
study = 'STUDY'

config = cfg.config(filename=site_config_path, system=system, study=study)


class GetConfig(unittest.TestCase):
    def test_exits_gracefully_with_bad_study(self):
        with pytest.raises(SystemExit):
            qc.get_config(study="madeupcode")

    @patch('datman.config.config')
    def test_exits_gracefully_when_paths_missing_from_config(
            self, mock_config):

        with pytest.raises(SystemExit):
            mock_config.return_value.get_path.side_effect = lambda path: \
                {'nii': ''}[path]
            qc.get_config("STUDY")
예제 #31
0
def main():
    arguments   = docopt(__doc__)

    study   = arguments['<study>']
    subject = arguments['--subject']
    debug   = arguments['--debug']

    logging.info('Starting')
    if debug:
        logger.setLevel(logging.DEBUG)

    # load config for study
    try:
        config = cfg.config(study=study)
    except ValueError:
        logger.error('study {} not defined'.format(study))
        sys.exit(1)

    study_base = config.get_study_base(study)

    if 'ea' not in config.study_config['fmri'].keys():
        logger.error('ea not defined in fmri in {}'.format(config_file))
        sys.exit(1)

    for k in ['nii', 'fmri', 'hcp']:
        if k not in config.get_key('Paths'):
            logger.error("paths:{} not defined in {}".format(k, config_file))
            sys.exit(1)

    ea_dir = os.path.join(study_base, config.get_path('fmri'), 'ea')
    nii_dir = os.path.join(study_base, config.get_path('nii'))

    if subject:
        if '_PHA_' in subject:
            logger.error("{} is a phantom, cannot analyze".format(subject))
            sys.exit(1)
        analyze_subject(subject, config, study)

    else:
        # batch mode
        subjects = glob.glob('{}/*'.format(nii_dir))
        commands = []

        if debug:
            opts = '--debug'
        else:
            opts = ''

        for path in subjects:
            subject = os.path.basename(path)
            if check_complete(ea_dir, subject):
                logger.debug('{} already analysed'.format(subject))
            else:
                commands.append(" ".join([__file__, study, '--subject {}'.format(subject), opts]))

        if commands:
            logger.debug("queueing up the following commands:\n"+'\n'.join(commands))
            for i, cmd in enumerate(commands):
                jobname = "dm_ea_{}_{}".format(i, time.strftime("%Y%m%d-%H%M%S"))
                jobfile = '/tmp/{}'.format(jobname)
                logfile = '/tmp/{}.log'.format(jobname)
                errfile = '/tmp/{}.err'.format(jobname)

                with open(jobfile, 'wb') as fid:
                    fid.write('#!/bin/bash\n')
                    fid.write(cmd)

                rtn, out = utils.run('qsub -V -q main.q -o {} -e {} -N {} {}'.format(
                    logfile, errfile, jobname, jobfile))
                # qbacth method -- might bring it back, but not needed
                #fd, path = tempfile.mkstemp()
                #os.write(fd, '\n'.join(commands))
                #os.close(fd)
                #rtn, out, err = utils.run('qbatch -i --logdir {ld} -N {name} --walltime {wt} {cmds}'.format(ld=logdir, name=jobname, wt=walltime, cmds=path))
                if rtn:
                    logger.error("Job submission failed\nstdout: {}".format(out))
                    sys.exit(1)