示例#1
0
def submit_job(cmd, i, walltime="23:00:00"):
    if DRYRUN:
        return

    job_name = 'dm_freesurfer_{}_{}'.format(i, time.strftime("%Y%m%d-%H%M%S"))

    # Bit of an ugly hack to allow job submission on the scc. Should be replaced
    # with drmaa or some other queue interface later
    if SYSTEM is 'kimel':
        job_file = '/tmp/{}'.format(job_name)

        with open(job_file, 'wb') as fid:
            fid.write('#!/bin/bash\n')
            fid.write(cmd)
        job = "qsub -V -q main.q -N {} {}".format(job_name, job_file)
        rtn, out = utils.run(job)
    else:
        job = "echo {} | qbatch -N {} --logdir {} --walltime {} -".format(
            cmd, job_name, LOG_DIR, walltime)
        rtn, out = utils.run(job, specialquote=False)

    if rtn:
        logger.error("Job submission failed.")
        if out:
            logger.error("stdout: {}".format(out))
        sys.exit(1)
示例#2
0
def update_aggregate_stats(config):
    logger.info("Updating aggregate stats")
    freesurfer_dir = config.get_path('freesurfer')
    enigma_ctx = os.path.join(config.system_config['DATMAN_ASSETSDIR'],
            'ENGIMA_ExtractCortical.sh')
    enigma_sub = os.path.join(config.system_config['DATMAN_ASSETSDIR'],
            'ENGIMA_ExtractSubcortical.sh')
    utils.run('{} {} {}'.format(enigma_ctx, freesurfer_dir,
            config.study_config['STUDY_TAG']), dryrun=DRYRUN)
    utils.run('{} {} {}'.format(enigma_sub, freesurfer_dir,
            config.study_config['STUDY_TAG']), dryrun=DRYRUN)
示例#3
0
def update_aggregate_stats(config):
    logger.info("Updating aggregate stats")
    freesurfer_dir = config.get_path('freesurfer')
    enigma_ctx = os.path.join(config.system_config['DATMAN_ASSETSDIR'],
            'ENGIMA_ExtractCortical.sh')
    enigma_sub = os.path.join(config.system_config['DATMAN_ASSETSDIR'],
            'ENGIMA_ExtractSubcortical.sh')
    utils.run('{} {} {}'.format(enigma_ctx, freesurfer_dir,
            config.study_config['STUDY_TAG']), dryrun=DRYRUN)
    utils.run('{} {} {}'.format(enigma_sub, freesurfer_dir,
            config.study_config['STUDY_TAG']), dryrun=DRYRUN)
示例#4
0
def make_epitome_folders(path, n_runs):
    """
    Makes an epitome-compatible folder structure with functional data FUNC of n
    import pipesruns, and a single T1.

    This works assuming we've run everything through freesurfer.

    If we need multisession, it might make sense to run this multiple times
    (once per session).
    """
    utils.run('mkdir -p ' + path + '/TEMP/SUBJ/T1/SESS01/RUN01')
    for r in np.arange(n_runs)+1:
        num = "{:0>2}".format(str(r))
        utils.run('mkdir -p ' + path + '/TEMP/SUBJ/FUNC/SESS01/RUN' + num)
示例#5
0
def make_epitome_folders(path, n_runs):
    """
    Makes an epitome-compatible folder structure with functional data FUNC of n
    import pipesruns, and a single T1.

    This works assuming we've run everything through freesurfer.

    If we need multisession, it might make sense to run this multiple times
    (once per session).
    """
    utils.run('mkdir -p ' + path + '/TEMP/SUBJ/T1/SESS01/RUN01')
    for r in np.arange(n_runs) + 1:
        num = "{:0>2}".format(str(r))
        utils.run('mkdir -p ' + path + '/TEMP/SUBJ/FUNC/SESS01/RUN' + num)
示例#6
0
def run_hcp_convert(path, config, study):
    """Runs fs2hcp on the input subject"""
    study_base = config.get_study_base(study)
    subject = os.path.basename(path)
    freesurfer_dir = os.path.join(study_base, config.site_config['paths']['freesurfer'])
    hcp_dir = os.path.join(study_base, config.site_config['paths']['hcp'])
    output_dir = utils.define_folder(os.path.join(hcp_dir, subject))

    if outputs_exist(output_dir):
        logger.debug('outputs found in {}'.format(path))
        sys.exit()

    # reset / remove error.log
    error_log = os.path.join(output_dir, 'error.log')
    if os.path.isfile(error_log):
        os.remove(error_log)

    # run fs2hcp
    command = 'fs2hcp --FSpath={} --HCPpath={} --subject={}'.format(freesurfer_dir, hcp_dir, subject)
    rtn, out = utils.run(command)
    if rtn:
        error_message = "fs2hcp failed: {}\n{}".format(command, out)
        logger.debug(error_message)
        with open(error_log, 'wb') as f:
            f.write('{}\n{}'.format(error_message, NODE))
示例#7
0
def create_indices_bm(config, study):
    hcp_dir = config.get_path('hcp')
    command = 'cifti_vis_recon_all index --hcp-data-dir {}'.format(hcp_dir)
    rtn, out = utils.run(command)
    if rtn:
        error_message = "fs2hcp failed: {}\n{}".format(command, out)
        logger.debug(error_message)
示例#8
0
def create_indices_bm(config, study):
    hcp_dir = config.get_path('hcp')
    if os.path.exists(os.path.join(hcp_dir, 'qc_recon_all')):
        command = 'cifti_vis_recon_all index --hcp-data-dir {}'.format(hcp_dir)
        rtn, out = utils.run(command)
        if rtn:
            error_message = "qc index creation failed: {}\n{}".format(command, out)
            logger.debug(error_message)
    else:
        logger.debug('qc_recon_all directory does not exist, not generating index')
示例#9
0
def run_pipeline(config, subject, t1, t2):
    if not input_exists(t1) or not input_exists(t2):
        sys.exit(1)
    base_dir = utils.define_folder(config.get_path('hcp_fs'))
    dest_dir = utils.define_folder(os.path.join(base_dir, subject))
    with utils.cd(dest_dir):
        hcp_pipeline = "hcp-freesurfer.sh {} {} {} {}".format(base_dir, subject,
                t1, t2)
        rtn, out = utils.run(hcp_pipeline, dryrun=DRYRUN)
        if rtn:
            logger.error("hcp-freesurfer.sh exited with non-zero status code. "
                    "Output: {}".format(out))
示例#10
0
def run_hcp_convert(path, config, study):
    """Runs fs2hcp on the input subject"""
    study_base = config.get_study_base(study)
    subject = os.path.basename(path)
    freesurfer_dir = config.get_path('freesurfer')
    hcp_dir =       config.get_path('hcp')
    output_dir = os.path.join(hcp_dir, subject)

    # run fs2hcp
    #command = 'fs2hcp --FSpath={} --HCPpath={} --subject={}'.format(freesurfer_dir, hcp_dir, subject)
    command = 'ciftify_recon_all --fs-subjects-dir {} --hcp-data-dir {} {}'.format(freesurfer_dir, hcp_dir, subject)
    rtn, out = utils.run(command)
    if rtn:
        error_message = "ciftify_recon_all failed: {}\n{}".format(command, out)
        logger.debug(error_message)

    command2 = make_vis_cmd(hcp_dir, subject)
    rtn, out = utils.run(command2)
    if rtn:
        error_message = "cifti_vis_recon_all snaps failed: {}\n{}".format(command2, out)
        logger.debug(error_message)
示例#11
0
def run_pipeline(config, subject, t1, t2):
    if not input_exists(t1) or not input_exists(t2):
        sys.exit(1)
    base_dir = utils.define_folder(config.get_path('hcp_fs'))
    dest_dir = utils.define_folder(os.path.join(base_dir, subject))
    with utils.cd(dest_dir):
        hcp_pipeline = "hcp-freesurfer.sh {} {} {} {}".format(
            base_dir, subject, t1, t2)
        rtn, out = utils.run(hcp_pipeline, dryrun=DRYRUN)
        if rtn:
            logger.error("hcp-freesurfer.sh exited with non-zero status code. "
                         "Output: {}".format(out))
示例#12
0
def submit_job(cmd, subid, log_dir, walltime="36:00:00"):
    job_name = "dm_hcp_freesurfer_{}_{}".format(subid,
            time.strftime("%Y%m%d-%H%M%S"))

    rtn, out = utils.run("echo {} | qbatch -N {} --walltime {} "
            "--logdir {} -".format(cmd, job_name, walltime, log_dir),
            specialquote=False, dryrun=DRYRUN)

    if rtn:
        logger.error("Job submission failed.")
        if out:
            logger.error("stdout: {}".format(out))
        sys.exit(1)
示例#13
0
def run_hcp_convert(path, config, study):
    """Runs fs2hcp on the input subject"""
    study_base = config.get_study_base(study)
    subject = os.path.basename(path)
    freesurfer_dir = config.get_path('freesurfer')
    hcp_dir = config.get_path('hcp')
    output_dir = os.path.join(hcp_dir, subject)

    # run fs2hcp
    #command = 'fs2hcp --FSpath={} --HCPpath={} --subject={}'.format(freesurfer_dir, hcp_dir, subject)
    command = 'ciftify_recon_all --fs-subjects-dir {} --hcp-data-dir {} {}'.format(
        freesurfer_dir, hcp_dir, subject)
    rtn, out = utils.run(command)
    if rtn:
        error_message = "fs2hcp failed: {}\n{}".format(command, out)
        logger.debug(error_message)

    command2 = 'cifti_vis_recon_all snaps --hcp-data-dir {} {}'.format(
        hcp_dir, subject)
    rtn, out = utils.run(command2)
    if rtn:
        error_message = "fs2hcp failed: {}\n{}".format(command2, out)
        logger.debug(error_message)
示例#14
0
def submit_job(cmd, i, walltime="24:00:00"):
    if DRYRUN:
        return

    job_name = 'dm_freesurfer_{}_{}'.format(i, time.strftime("%Y%m%d-%H%M%S"))
    job_file = '/tmp/{}'.format(job_name)

    with open(job_file, 'wb') as fid:
        fid.write('#!/bin/bash\n')
        fid.write(cmd)

    rtn, out = utils.run("qsub -V -q main.q -N {} {}".format(
        job_name, job_file))

    if rtn:
        logger.error("Job submission failed.")
        if out:
            logger.error("stdout: {}".format(out))
        sys.exit(1)
示例#15
0
def run_all(nrrd_dir, config, study):
    """Finds all non-phantom input nrrds and run unring.py in serial."""
    study_base = config.get_study_base(study)
    subjects = os.listdir(nrrd_dir)
    subjects = filter(lambda x: '_PHA_' not in x, subjects)
    unring_dir = utils.define_folder(
        os.path.join(study_base, config.get_path('unring')))
    tags = config.study_config['unring']['tags']

    for subject in subjects:
        output_dir = utils.define_folder(os.path.join(unring_dir, subject))
        inputs = os.listdir(os.path.join(nrrd_dir, subject))
        inputs = select_inputs(inputs,
                               tags)  # selects inputs with matching tag

        for input_file in inputs:

            # don't run if the outputs of unring already exist
            if outputs_exist(output_dir, input_file):
                continue

            # reset / remove error.log
            error_log = os.path.join(output_dir, 'error.log')
            if os.path.isfile(error_log):
                os.remove(error_log)

            output_fname = os.path.join(output_dir, input_file)
            input_fname = os.path.join(nrrd_dir, subject, input_file)
            cmd = 'unring.py {} {} -v'.format(input_fname, output_fname)
            logger.debug('running {}'.format(cmd))
            rtn, out = utils.run(cmd)
            if rtn:
                error_message = "unring.py failed: {}\n{}".format(cmd, out)
                logger.info(error_message)
                with open(error_log, 'wb') as f:
                    f.write('{}\n{}'.format(error_message, NODE))
                continue
            else:
                pass
示例#16
0
def run_freesurfer(subject, blacklist, config, resubmit=False):
    """Finds the inputs for subject and runs freesurfer."""
    freesurfer_path = config.get_path('freesurfer')
    output_dir = os.path.join(freesurfer_path, subject.full_id)

    if outputs_exist(output_dir) and not resubmit:
        return

    # reset / remove error.log
    error_log = os.path.join(LOG_DIR, '{}_error.log'.format(subject.full_id))
    if os.path.isfile(error_log):
        os.remove(error_log)

    args = get_freesurfer_arguments(config, subject.site)

    scripts_dir = os.path.join(output_dir, 'scripts')

    if outputs_exist(output_dir):
        # If outputs exist and the script didnt return above, it means
        # 'resubmit' == True and the subject must be restarted
        remove_IsRunning(scripts_dir)
        input_files = []
    else:
        input_files = get_anatomical_images('tags', 'i', subject, blacklist, config,
                error_log)
        optional_files = get_optional_images(subject, blacklist, config,
                error_log)
        input_files.extend(optional_files)

    command = "recon-all {args} -subjid {subid} {inputs}".format(args=args,
            subid=subject.full_id, inputs=" ".join(input_files))
    logger.info('Running recon-all')
    rtn, out = utils.run(command, dryrun=DRYRUN)
    if rtn:
        error_message = 'freesurfer failed: {}\n{}'.format(command, out)
        logger.debug(error_message)
        write_lines(error_log, '{}\n{}'.format(error_message, NODE))
示例#17
0
def run_freesurfer(subject, blacklist, config, resubmit=False):
    """Finds the inputs for subject and runs freesurfer."""
    freesurfer_path = config.get_path('freesurfer')
    output_dir = os.path.join(freesurfer_path, subject.full_id)

    if outputs_exist(output_dir) and not resubmit:
        return

    # reset / remove error.log
    error_log = os.path.join(LOG_DIR, '{}_error.log'.format(subject.full_id))
    if os.path.isfile(error_log):
        os.remove(error_log)

    args = get_freesurfer_arguments(config, subject.site)

    scripts_dir = os.path.join(output_dir, 'scripts')

    if outputs_exist(output_dir):
        # If outputs exist and the script didnt return above, it means
        # 'resubmit' == True and the subject must be restarted
        remove_IsRunning(scripts_dir)
        input_files = []
    else:
        input_files = get_anatomical_images('tags', 'i', subject, blacklist,
                                            config, error_log)
        optional_files = get_optional_images(subject, blacklist, config,
                                             error_log)
        input_files.extend(optional_files)

    command = "recon-all {args} -subjid {subid} {inputs}".format(
        args=args, subid=subject.full_id, inputs=" ".join(input_files))
    logger.info('Running recon-all')
    rtn, out = utils.run(command, dryrun=DRYRUN)
    if rtn:
        error_message = 'freesurfer failed: {}\n{}'.format(command, out)
        logger.debug(error_message)
        write_lines(error_log, '{}\n{}'.format(error_message, NODE))
示例#18
0
def run_all(nrrd_dir, config, study):
    """Finds all non-phantom input nrrds and run unring.py in serial."""
    study_base = config.get_study_base(study)
    subjects = os.listdir(nrrd_dir)
    subjects = filter(lambda x: '_PHA_' not in x, subjects)
    unring_dir = utils.define_folder(os.path.join(study_base, config.get_path('unring')))
    tags = config.study_config['unring']['tags']

    for subject in subjects:
        output_dir = utils.define_folder(os.path.join(unring_dir, subject))
        inputs = os.listdir(os.path.join(nrrd_dir, subject))
        inputs = select_inputs(inputs, tags) # selects inputs with matching tag

        for input_file in inputs:

            # don't run if the outputs of unring already exist
            if outputs_exist(output_dir, input_file):
                continue

            # reset / remove error.log
            error_log = os.path.join(output_dir, 'error.log')
            if os.path.isfile(error_log):
                os.remove(error_log)

            output_fname = os.path.join(output_dir, input_file)
            input_fname = os.path.join(nrrd_dir, subject, input_file)
            cmd = 'unring.py {} {} -v'.format(input_fname, output_fname)
            logger.debug('running {}'.format(cmd))
            rtn, out = utils.run(cmd)
            if rtn:
                error_message = "unring.py failed: {}\n{}".format(cmd, out)
                logger.info(error_message)
                with open(error_log, 'wb') as f:
                    f.write('{}\n{}'.format(error_message, NODE))
                continue
            else:
                pass
示例#19
0
def main():
    arguments = docopt(__doc__)
    study = arguments['<study>']
    scanid = arguments['--subject']
    debug = arguments['--debug']
    dryrun = arguments['--dry-run']

    # configure logging
    logging.info('Starting')
    if debug:
        logger.setLevel(logging.DEBUG)

    # load config for study
    try:
        config = cfg.config(study=study)
    except ValueError:
        logger.error('study {} not defined'.format(study))
        sys.exit(1)

    study_base = config.get_study_base(study)

    for k in ['freesurfer', 'hcp']:
        if k not in config.site_config['paths']:
            logger.error("paths:{} not defined in site config".format(k))
            sys.exit(1)

    freesurfer_dir = config.get_path('freesurfer')
    hcp_dir = config.get_path('hcp')

    if scanid:
        path = os.path.join(freesurfer_dir, scanid)
        try:
            run_hcp_convert(path, config, study)
        except Exception as e:
            logging.error(e)
            sys.exit(1)
        return

    config = cfg.config(study=study)
    qced_subjects = config.get_subject_metadata()

    # running for batch mode

    new_subjects = []
    # find subjects where at least one expected output does not exist
    for subject in qced_subjects:
        subj_dir = os.path.join(hcp_dir, subject)
        if not ciftify_outputs_exist(subj_dir):
            if fs_outputs_exist(os.path.join(freesurfer_dir, subject)):
                new_subjects.append(subject)

    create_indices_bm(config, study)

    # submit a list of calls to ourself, one per subject
    commands = []
    if debug:
        debugopt = '--debug'
    else:
        debugopt = ''

    for subject in new_subjects:
        commands.append(" ".join(
            [__file__, study, '--subject {} '.format(subject), debugopt]))

    if commands:
        logger.debug('queueing up the following commands:\n' +
                     '\n'.join(commands))

    for i, cmd in enumerate(commands):
        jobname = 'dm_fs2hcp_{}_{}'.format(i, time.strftime("%Y%m%d-%H%M%S"))
        jobfile = '/tmp/{}'.format(jobname)
        logfile = '/tmp/{}.log'.format(jobname)
        errfile = '/tmp/{}.err'.format(jobname)
        with open(jobfile, 'wb') as fid:
            fid.write('#!/bin/bash\n')
            fid.write(cmd)

        rtn, out = utils.run('qsub -V -q main.q -o {} -e {} -N {} {}'.format(
            logfile, errfile, jobname, jobfile))

        if rtn:
            logger.error("Job submission failed. Output follows.")
            logger.error("stdout: {}".format(out))
            sys.exit(1)
示例#20
0
def main():
    """
    Loops through subjects, preprocessing using supplied script, and runs a
    first-level GLM using AFNI (tent functions, 15 s window) on all subjects.
    """
    arguments = docopt(__doc__)
    study     = arguments['<study>']
    subject   = arguments['--subject']
    debug     = arguments['--debug']

    logging.info('Starting')
    if debug:
        logger.setLevel(logging.DEBUG)

    # load config for study
    try:
        config = cfg.config(study=study)
    except ValueError:
        logger.error('study {} not defined'.format(study))
        sys.exit(1)

    study_base = config.get_study_base(study)
    imob_dir = os.path.join(study_base, config.get_path('fmri'), 'imob')

    # process a single subject
    if subject:

        # get required inputs from each
        files = glob.glob(os.path.join(imob_dir, subject) + '/*.nii.gz')
        inputs = get_inputs(files, config)

        # check if subject has already been processed
        if check_complete(imob_dir, subject):
            logger.info('{} already analysed'.format(subject))
            sys.exit(0)

        # first level GLM for inputs
        for input_type in inputs.keys():
            script = generate_analysis_script(subject, inputs, input_type, config, study)
            rtn, out = utils.run('chmod 754 {}'.format(script))
            rtn, out = utils.run(script)
            if rtn:
                logger.error('Script {} failed to run on subject {} with error:\n{}'.format(
                    script, subject, out))
                sys.exit(1)

    # process all subjects
    else:
        commands = []
        for path in glob.glob('{}/*'.format(imob_dir)):
            subject = os.path.basename(path)

            # add subject if any of the expected outputs do not exist
            files = glob.glob(os.path.join(imob_dir, subject) + '/*.nii.gz')
            try:
                inputs = get_inputs(files, config)
            except:
                logger.debug('Invalid inputs for {}'.format(subject))
                continue
            expected = inputs.keys()

            for exp in expected:
                if not filter(lambda x: '{}_glm_IM_1stlvl_{}'.format(subject, exp) in x, files):
                    commands.append(" ".join([__file__, study, '--subject {}'.format(subject)]))
                    break

        if commands:
            logger.debug("queueing up the following commands:\n"+'\n'.join(commands))
            #fd, path = tempfile.mkstemp()
            #os.write(fd, '\n'.join(commands))
            #os.close(fd)
            for i, cmd in enumerate(commands):
                jobname = "dm_imob_{}_{}".format(i, time.strftime("%Y%m%d-%H%M%S"))
                jobfile = '/tmp/{}'.format(jobname)
                logfile = '/tmp/{}.log'.format(jobname)
                errfile = '/tmp/{}.err'.format(jobname)
                with open(jobfile, 'wb') as fid:
                    fid.write('#!/bin/bash\n')
                    fid.write(cmd)

                rtn, out = utils.run('qsub -V -q main.q -o {} -e {} -N {} {}'.format(
                    logfile, errfile, jobname, jobfile))
                #rtn, out, err = utils.run('qbatch -i --logdir {logdir} -N {name} --walltime {wt} {cmds}'.format(logdir = log_path, name = jobname, wt = walltime, cmds = path))
                if rtn:
                    logger.error("Job submission failed. Output follows.")
                    logger.error("stdout: {}".format(out))
                    sys.exit(1)
示例#21
0
            logger.error(msg)
        finally:
            f1.close()
            f2.close()
            f3.close()
            f4.close()

    # run the GLM
    files = glob.glob(os.path.join(ea_dir, subject + '/*.nii.gz'))
    inputs = get_inputs(files, config)

    for input_type in inputs.keys():

        script = generate_analysis_script(subject, inputs, input_type, config,
                                          study)
        rtn, out = utils.run('chmod 754 {}'.format(script))
        rtn, out = utils.run(script)
        if rtn:
            logger.error(
                'Script {} failed to run on subject {} with error:\n{}'.format(
                    script, subject, out))
            sys.exit(1)


def main():
    arguments = docopt(__doc__)

    study = arguments['<study>']
    subject = arguments['--subject']
    debug = arguments['--debug']
示例#22
0
def main():
    """
    Runs fmri data through the specified epitome script.
    """
    arguments = docopt(__doc__)

    study  = arguments['<study>']
    scanid = arguments['--subject']
    debug  = arguments['--debug']
    dryrun = arguments['--dry-run']

    # configure logging
    logging.info('Starting')
    if debug:
        logger.setLevel(logging.DEBUG)

    # load config for study
    try:
        config = cfg.config(study=study)
    except ValueError:
        logger.error('study {} not defined'.format(study))
        sys.exit(1)

    study_base = config.get_study_base(study)

    for k in ['nii', 'fmri', 'hcp']:
        if k not in config.site_config['paths']:
            logger.error("paths:{} not defined in site config".format(k))
            sys.exit(1)

    for x in config.study_config['fmri'].iteritems():
        for k in ['dims', 'del', 'pipeline', 'tags', 'export', 'tr']:
            if k not in x[1].keys():
                logger.error("fmri:{}:{} not defined in configuration file".format(x[0], k))
                sys.exit(1)

    nii_dir = os.path.join(study_base, config.site_config['paths']['nii'])

    if scanid:
        path = os.path.join(nii_dir, scanid)
        if '_PHA_' in scanid:
            sys.exit('Subject {} if a phantom, cannot be analyzed'.format(scanid))
        try:
            run_epitome(path, config, study)
        except Exception as e:
            logging.error(e)
            sys.exit(1)

    # run in batch mode
    else:
        subjects = []
        nii_dirs = glob.glob('{}/*'.format(nii_dir))

        # find subjects where at least one expected output does not exist
        for path in nii_dirs:
            subject = os.path.basename(path)

            if sid.is_phantom(subject):
                logger.debug("Subject {} is a phantom. Skipping.".format(subject))
                continue

            fmri_dir = utils.define_folder(os.path.join(study_base, config.site_config['paths']['fmri']))
            for exp in config.study_config['fmri'].keys():
                expected_names = config.study_config['fmri'][exp]['export']
                subj_dir = os.path.join(fmri_dir, exp, subject)
                if not outputs_exist(subj_dir, expected_names):
                    subjects.append(subject)
                    break

        subjects = list(set(subjects))

        # submit a list of calls to ourself, one per subject
        commands = []
        if debug:
            debugopt = '--debug'
        else:
            debugopt = ''

        for subject in subjects:
            commands.append(" ".join(['python ', __file__, study, '--subject {} '.format(subject), debugopt]))

        if commands:
            logger.debug('queueing up the following commands:\n'+'\n'.join(commands))
            for i, cmd in enumerate(commands):
                jobname = 'dm_fmri_{}_{}'.format(i, time.strftime("%Y%m%d-%H%M%S"))
                jobfile = '/tmp/{}'.format(jobname)
                logfile = '/tmp/{}.log'.format(jobname)
                errfile = '/tmp/{}.err'.format(jobname)
                with open(jobfile, 'wb') as fid:
                    fid.write('#!/bin/bash\n')
                    fid.write(cmd)

                rtn, out = utils.run('qsub -V -q main.q -o {} -e {} -N {} {}'.format(
                    logfile, errfile, jobname, jobfile))

                if rtn:
                    logger.error("Job submission failed. Output follows.")
                    logger.error("stdout: {}".format(out))
                    sys.exit(1)
示例#23
0
def run_epitome(path, config, study):
    """
    Finds the appropriate inputs for input subject, builds a temporary epitome
    folder, runs epitome, and finally copies the outputs to the fmri_dir.
    """
    study_base = config.get_study_base(study)
    subject = os.path.basename(path)
    nii_dir = os.path.join(study_base, config.site_config['paths']['nii'])
    t1_dir = os.path.join(study_base, config.site_config['paths']['hcp'])
    fmri_dir = utils.define_folder(os.path.join(study_base, config.site_config['paths']['fmri']))
    experiments = config.study_config['fmri'].keys()

    # run file collection --> epitome --> export for each study
    logger.debug('experiments found {}'.format(experiments))
    for exp in experiments:
        logger.debug('running experiment {}'.format(exp))
        # collect the files needed for each experiment
        expected_names = config.study_config['fmri'][exp]['export']
        expected_tags = config.study_config['fmri'][exp]['tags']
        output_dir = utils.define_folder(os.path.join(fmri_dir, exp, subject))

        # don't run if the outputs of epitome already exist
        if outputs_exist(output_dir, expected_names):
            continue

        # reset / remove error.log
        error_log = os.path.join(output_dir, 'error.log')
        if os.path.isfile(error_log):
            os.remove(error_log)

        failed = False

        if type(expected_tags) == str:
            expected_tags = [expected_tags]

        # locate functional data
        files = glob.glob(path + '/*')
        functionals = []
        for tag in expected_tags:
            candidates = filter(lambda x: tag in x, files)
            candidates = utils.filter_niftis(candidates)
            candidates.sort()
            logger.debug('checking functional inputs {}'.format(candidates))
            try:
                check_inputs(config, tag, path, candidates)
            except Exception as m:
                error_message = 'Did not find the correct number of fMRI inputs:\n{}'.format(m)
                logger.debug(error_message)
                with open(error_log, 'wb') as f:
                    f.write('{}\n{}'.format(error_message, NODE))
                failed = True
                break
            functionals.extend(candidates)

        # locate anatomical data
        anat_path = os.path.join(t1_dir, os.path.basename(path), 'T1w')
        files = glob.glob(anat_path + '/*')
        anatomicals = []
        for anat in ['aparc+aseg.nii.gz', 'aparc.a2009s+aseg.nii.gz', 'T1w_brain.nii.gz']:
            if not filter(lambda x: anat in x, files):
                error_message = 'expected anatomical {} not found in {}'.format(anat, anat_path)
                logger.debug(error_message)
                with open(error_log, 'wb') as f:
                    f.write('{}\n{}'.format(error_message, NODE))
                failed = True
                break
            anatomicals.append(os.path.join(anat_path, anat))

        # don't run epitome if all of the inputs do not exist
        if failed:
            continue

        # create and populate epitome directory
        epi_dir = tempfile.mkdtemp()
        utils.make_epitome_folders(epi_dir, len(functionals))
        epi_t1_dir = '{}/TEMP/SUBJ/T1/SESS01'.format(epi_dir)
        epi_func_dir = '{}/TEMP/SUBJ/FUNC/SESS01'.format(epi_dir)

        try:
            shutil.copyfile(anatomicals[0], '{}/anat_aparc_brain.nii.gz'.format(epi_t1_dir))
            shutil.copyfile(anatomicals[1], '{}/anat_aparc2009_brain.nii.gz'.format(epi_t1_dir))
            shutil.copyfile(anatomicals[2], '{}/anat_T1_brain.nii.gz'.format(epi_t1_dir))
            for i, d in enumerate(functionals):
                shutil.copyfile(d, '{}/RUN{}/FUNC.nii.gz'.format(epi_func_dir, '%02d' % (i + 1)))
        except IOError as e:
            error_message = 'unable to copy files to {}\n{}'.format(epi_dir, e)
            logger.error(error_message)
            with open(error_log, 'wb') as f:
                f.write('{}\n{}'.format(error_message, NODE))
            continue

        # collect command line options
        dims = config.study_config['fmri'][exp]['dims']
        tr = config.study_config['fmri'][exp]['tr']
        delete = config.study_config['fmri'][exp]['del']
        pipeline =  config.study_config['fmri'][exp]['pipeline']

        pipeline = os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, 'assets/{}'.format(pipeline))
        if not os.path.isfile(pipeline):
            raise Exception('invalid pipeline {} defined!'.format(pipeline))

        # run epitome
        command = '{} {} {} {} {}'.format(pipeline, epi_dir, delete, tr, dims)
        rtn, out = utils.run(command)
        if rtn:
            error_message = "epitome script failed: {}\n{}".format(command, out)
            logger.debug(error_message)
            with open(error_log, 'wb') as f:
                f.write('{}\n{}'.format(error_message, NODE))
            continue
        else:
            pass

        # export fmri data
        epitome_outputs = glob.glob(epi_func_dir + '/*')
        for name in expected_names:
            try:
                matches = filter(lambda x: 'func_' + name in x, epitome_outputs)
                matches.sort()

                # attempt to export the defined epitome stages for all runs
                if len(matches) != len(functionals):
                    error_message = 'epitome output {} not created for all inputs'.format(name)
                    logger.error(error_message)
                    with open(error_log, 'wb') as f:
                        f.write('{}\n{}'.format(error_message, NODE))
                    continue
                for i, match in enumerate(matches):
                    func_basename = utils.splitext(os.path.basename(functionals[i]))[0]
                    func_output = os.path.join(output_dir, func_basename + '_{}.nii.gz'.format(name))
                    export_file(match, func_output)

                # export all anatomical / registration information
                export_file_list('anat_', epitome_outputs, output_dir)
                export_file_list('reg_',  epitome_outputs, output_dir)
                export_file_list('mat_',  epitome_outputs, output_dir)

                # export PARAMS folder
                export_directory(os.path.join(epi_func_dir, 'PARAMS'), os.path.join(output_dir, 'PARAMS'))

            except ProcessingError as p:
                error_message = 'error exporting: {}'.format(p)
                logger.error(error_message)
                with open(error_log, 'wb') as f:
                    f.write('{}\n{}'.format(error_message, NODE))
                continue

        # remove temporary directory
        shutil.rmtree(epi_dir)
示例#24
0
def generate_analysis_script(subject, inputs, input_type, config, study):
    """
    This writes the analysis script to replicate the methods in Harvey et al
    2013 Schizophrenia Bulletin. It expects timing files to exist.

    Briefly, this method uses the correlation between the empathic ratings of
    the participant and the actor from each video to generate an amplitude-
    modulated box-car model to be fit to each time-series. This model is
    convolved with an HRF, and is run alongside a standard boxcar. This allows
    us to detect regions that modulate their 'activation strength' with
    empathic accruacy, and those that generally track the watching of
    emotionally-valenced videos (but do not parametrically modulate).
    Since each video is of a different length, each block is encoded as such
    in the stimulus-timing file (all times in seconds):
        [start_time]*[amplitude]:[block_length]
        30*5:12
    See '-stim_times_AM2' in AFNI's 3dDeconvolve 'help' for more.
    """
    study_base = config.get_study_base(study)
    subject_dir = os.path.join(study_base, config.get_path('fmri'), 'ea', subject)
    script = '{subject_dir}/{subject}_glm_1stlevel_{input_type}.sh'.format(
        subject_dir=subject_dir, subject=subject, input_type=input_type)

    # combine motion paramaters (glob because run does not expand * any longer)
    f1 = glob.glob('{}/PARAMS/motion.*.01.1D'.format(subject_dir))[0]
    f2 = glob.glob('{}/PARAMS/motion.*.02.1D'.format(subject_dir))[0]
    f3 = glob.glob('{}/PARAMS/motion.*.03.1D'.format(subject_dir))[0]
    rtn, out = utils.run('cat {} {} {} > {}/{}_motion.1D'.format(
        f1, f2, f3, subject_dir, subject), specialquote=False)

    # get input data, turn into a single string
    input_list = inputs[input_type]
    input_list.sort()

    input_data = ''
    for i in input_list:
        input_data += '{} '.format(i)

    # open up the master script, write common variables
    f = open(script, 'wb')
    f.write("""#!/bin/bash

# clean up
rm {subject_dir}/*_glm_*

# Empathic accuracy (with amplitude modulation) GLM for {sub}.
3dDeconvolve \\
    -input {input_data} \\
    -mask {subject_dir}/anat_EPI_mask_MNI-nonlin.nii.gz \\
    -ortvec {subject_dir}/{sub}_motion.1D motion_paramaters \\
    -polort 4 \\
    -num_stimts 1 \\
    -local_times \\
    -jobs 4 \\
    -x1D {subject_dir}/{sub}_glm_vid_1stlevel_design.mat \\
    -stim_times_AM2 1 {subject_dir}/{sub}_vid_block-times_ea.1D \'dmBLOCK(1)\' \\
    -stim_label 1 empathic_accuracy \\
    -fitts {subject_dir}/{sub}_glm_vid_1stlevel_explained.nii.gz \\
    -errts {subject_dir}/{sub}_glm_vid_1stlevel_residuals.nii.gz \\
    -bucket {subject_dir}/{sub}_glm_vid_1stlevel.nii.gz \\
    -cbucket {subject_dir}/{sub}_glm_vid_1stlevel_coeffs.nii.gz \\
    -fout \\
    -tout \\
    -xjpeg {subject_dir}/{sub}_glm_vid_1stlevel_matrix.jpg

# Colour disciminiation (with amplitude modulation) GLM for {sub}.
3dDeconvolve \\
    -input {input_data} \\
    -mask {subject_dir}/anat_EPI_mask_MNI-nonlin.nii.gz \\
    -ortvec {subject_dir}/{sub}_motion.1D motion_paramaters \\
    -polort 4 \\
    -num_stimts 1 \\
    -local_times \\
    -jobs 4 \\
    -x1D {subject_dir}/{sub}_glm_cvid_1stlevel_design.mat \\
    -stim_times_AM2 1 {subject_dir}/{sub}_cvid_block-times_ea.1D \'dmBLOCK(1)\' \\
    -stim_label 1 color_videos \\
    -fitts {subject_dir}/{sub}_glm_cvid_1stlevel_explained.nii.gz \\
    -errts {subject_dir}/{sub}_glm_cvid_1stlevel_residuals.nii.gz \\
    -bucket {subject_dir}/{sub}_glm_cvid_1stlevel.nii.gz \\
    -cbucket {subject_dir}/{sub}_glm_cvid_1stlevel_coeffs.nii.gz \\
    -fout \\
    -tout \\
    -xjpeg {subject_dir}/{sub}_glm_cvid_1stlevel_matrix.jpg

""".format(input_data=input_data, subject_dir=subject_dir, sub=subject))
    f.close()

    return script
示例#25
0
def main():
    arguments = docopt(__doc__)
    study     = arguments['<study>']
    scanid    = arguments['--subject']
    debug     = arguments['--debug']
    dryrun    = arguments['--dry-run']

    # configure logging
    logging.info('Starting')
    if debug:
        logger.setLevel(logging.DEBUG)

    # load config for study
    try:
        config = cfg.config(study=study)
    except ValueError:
        logger.error('study {} not defined'.format(study))
        sys.exit(1)

    study_base = config.get_study_base(study)

    for k in ['freesurfer', 'hcp']:
        if k not in config.site_config['paths']:
            logger.error("paths:{} not defined in site config".format(k))
            sys.exit(1)

    freesurfer_dir = os.path.join(study_base, config.site_config['paths']['freesurfer'])
    hcp_dir = os.path.join(study_base, config.site_config['paths']['hcp'])

    if scanid:
        path = os.path.join(freesurfer_dir, scanid)
        try:
            run_hcp_convert(path, config, study)
        except Exception as e:
            logging.error(e)
            sys.exit(1)

    # run in batch mode
    else:
        subjects = []
        freesurfer_dirs = glob.glob('{}/*'.format(freesurfer_dir))

        # find subjects where at least one expected output does not exist
        for path in nii_dirs:
            subject = os.path.basename(path)

            hcp_dir = utils.define_folder(os.path.join(study_base, config.site_config['paths']['hcp']))
            if not outputs_exist(subj_dir):
                subjects.append(subject)

        subjects = list(set(subjects))

        # submit a list of calls to ourself, one per subject
        commands = []
        if debug:
            debugopt = '--debug'
        else:
            debugopt = ''

        for subject in subjects:
            commands.append(" ".join([__file__, study, '--subject {} '.format(subject), debugopt]))

        if commands:
            logger.debug('queueing up the following commands:\n'+'\n'.join(commands))
            for i, cmd in enumerate(commands):
                jobname = 'dm_fs2hcp_{}_{}'.format(i, time.strftime("%Y%m%d-%H%M%S"))
                jobfile = '/tmp/{}'.format(jobname)
                logfile = '/tmp/{}.log'.format(jobname)
                errfile = '/tmp/{}.err'.format(jobname)
                with open(jobfile, 'wb') as fid:
                    fid.write('#!/bin/bash\n')
                    fid.write(cmd)

                rtn, out = utils.run('qsub -V -q main.q -o {} -e {} -N {} {}'.format(
                    logfile, errfile, jobname, jobfile))
                #rtn, out = utils.run('echo bash -l {}/{} {} | qbatch -N {} --logdir {} --walltime {} -'.format(bin_dir, script, subid, jobname, logs_dir, walltime))
                if rtn:
                    logger.error("Job submission failed. Output follows.")
                    logger.error("stdout: {}".format(out))
                    sys.exit(1)
示例#26
0
def main():
    """
    Runs .nrrd data through unring.py.
    """
    arguments = docopt(__doc__)

    study  = arguments['<study>']
    batch  = arguments['--batch']
    debug  = arguments['--debug']
    dryrun = arguments['--dry-run']

    # configure logging
    logging.info('Starting')
    if debug:
        logger.setLevel(logging.DEBUG)

    # load config for study
    try:
        config = cfg.config(study=study)
    except ValueError:
        logger.error('study {} not defined'.format(study))
        sys.exit(1)

    study_base = config.get_study_base(study)

    for k in ['nrrd']:
        if k not in config.get_path('Paths'):
            logger.error("paths:{} not defined in site config".format(k))
            sys.exit(1)

    nrrd_dir = os.path.join(study_base, config.get_path('nrrd'))

    # runs in serial (due to MATLAB dependencies)
    if batch:
        try:
            run_all(nrrd_dir, config, study)
        except Exception as e:
            logging.error(e)
            sys.exit(1)

    # default behaviour: submit self to queue in batch mode
    else:
        if debug:
            debugopt = '--debug'
        else:
            debugopt = ''

        cmd = 'python {} {} --batch {}'.format(__file__, study, debugopt)
        jobname = 'dm_unring_{}'.format(time.strftime("%Y%m%d-%H%M%S"))
        jobfile = '/tmp/{}'.format(jobname)
        logfile = '/tmp/{}.log'.format(jobname)
        errfile = '/tmp/{}.err'.format(jobname)

        with open(jobfile, 'wb') as fid:
            fid.write('#!/bin/bash\n')
            fid.write(cmd)

            rtn, out = utils.run('qsub -V -q main.q -o {} -e {} -N {} {}'.format(
                logfile, errfile, jobname, jobfile))

            if rtn:
                logger.error("Job submission failed. Output follows.")
                logger.error("stdout: {}".format(out))
                sys.exit(1)
示例#27
0
def main():

    arguments = docopt(__doc__)
    study = arguments['<study>']
    scanid = arguments['--subject']
    debug = arguments['--debug']

    logging.info('Starting')
    if debug:
        logger.setLevel(logging.DEBUG)

    # load config for study
    try:
        config = cfg.config(study=study)
    except ValueError:
        logger.error(
            'study {} not defined in master configuration file\n{}'.format(
                study, NODE))
        sys.exit(1)

    study_base = config.get_study_base(study)

    if 'fmri' not in config.site_config['paths']:
        logger.error(
            "paths:fmri not defined in site configuration file\n{}".format(
                NODE))
        sys.exit(1)

    fmri_dir = os.path.join(study_base, config.site_config['paths']['fmri'])

    if scanid:
        path = os.path.join(fmri_dir, scanid)
        try:
            run_analysis(scanid, config, study)
        except Exception as e:
            logger.error(e)
            sys.exit(1)

    # run in batch mode
    else:
        # look for subjects with at least one fmri type missing outputs
        subjects = []

        # loop through fmri experiments defined
        for exp in config.study_config['fmri'].keys():
            expected_files = config.study_config['fmri'][exp]['conn']
            fmri_dirs = glob.glob('{}/*'.format(os.path.join(fmri_dir, exp)))

            for subj_dir in fmri_dirs:
                candidates = glob.glob('{}/*'.format(subj_dir))
                for filetype in expected_files:
                    # add subject if outputs don't already exist
                    if not filter(
                            lambda x: '{}_roi-corrs.csv'.format(filetype) in x,
                            candidates):
                        subjects.append(os.path.basename(subj_dir))
                        break

        # collapse found subjects (do not double-count) and create a list of commands
        commands = []
        subjects = list(set(subjects))
        for subject in subjects:
            commands.append(" ".join(
                [__file__, study, '--subject {}'.format(subject)]))

        if commands:
            logger.debug('queueing up the following commands:\n' +
                         '\n'.join(commands))

            for i, cmd in enumerate(commands):
                jobname = 'dm_rest_{}_{}'.format(
                    i, time.strftime("%Y%m%d-%H%M%S"))
                jobfile = '/tmp/{}'.format(jobname)
                logfile = '/tmp/{}.log'.format(jobname)
                errfile = '/tmp/{}.err'.format(jobname)
                with open(jobfile, 'wb') as fid:
                    fid.write('#!/bin/bash\n')
                    fid.write(cmd)

                rtn, out = utils.run(
                    'qsub -V -q main.q -o {} -e {} -N {} {}'.format(
                        logfile, errfile, jobname, jobfile))
                if rtn:
                    logger.error(
                        "Job submission failed. Output follows. {}".format(
                            NODE))
                    logger.error("stdout: {}".format(out))
                    sys.exit(1)
示例#28
0
def generate_analysis_script(subject, inputs, input_type, config, study):
    """
    This writes the analysis script to replicate the methods in Harvey et al
    2013 Schizophrenia Bulletin. It expects timing files to exist.

    Briefly, this method uses the correlation between the empathic ratings of
    the participant and the actor from each video to generate an amplitude-
    modulated box-car model to be fit to each time-series. This model is
    convolved with an HRF, and is run alongside a standard boxcar. This allows
    us to detect regions that modulate their 'activation strength' with
    empathic accruacy, and those that generally track the watching of
    emotionally-valenced videos (but do not parametrically modulate).
    Since each video is of a different length, each block is encoded as such
    in the stimulus-timing file (all times in seconds):
        [start_time]*[amplitude]:[block_length]
        30*5:12
    See '-stim_times_AM2' in AFNI's 3dDeconvolve 'help' for more.
    """
    study_base = config.get_study_base(study)
    subject_dir = os.path.join(study_base, config.site_config['paths']['fmri'],
                               'ea', subject)
    script = '{subject_dir}/{subject}_glm_1stlevel_{input_type}.sh'.format(
        subject_dir=subject_dir, subject=subject, input_type=input_type)

    # combine motion paramaters (glob because run does not expand * any longer)
    f1 = glob.glob('{}/PARAMS/motion.*.01.1D'.format(subject_dir))[0]
    f2 = glob.glob('{}/PARAMS/motion.*.02.1D'.format(subject_dir))[0]
    f3 = glob.glob('{}/PARAMS/motion.*.03.1D'.format(subject_dir))[0]
    rtn, out = utils.run('cat {} {} {} > {}/{}_motion.1D'.format(
        f1, f2, f3, subject_dir, subject),
                         specialquote=False)

    # get input data, turn into a single string
    input_list = inputs[input_type]
    input_list.sort()

    input_data = ''
    for i in input_list:
        input_data += '{} '.format(i)

    # open up the master script, write common variables
    f = open(script, 'wb')
    f.write("""#!/bin/bash

# clean up
rm {subject_dir}/*_glm_*

# Empathic accuracy (with amplitude modulation) GLM for {sub}.
3dDeconvolve \\
    -input {input_data} \\
    -mask {subject_dir}/anat_EPI_mask_MNI-nonlin.nii.gz \\
    -ortvec {subject_dir}/{sub}_motion.1D motion_paramaters \\
    -polort 4 \\
    -num_stimts 1 \\
    -local_times \\
    -jobs 4 \\
    -x1D {subject_dir}/{sub}_glm_vid_1stlevel_design.mat \\
    -stim_times_AM2 1 {subject_dir}/{sub}_vid_block-times_ea.1D \'dmBLOCK(1)\' \\
    -stim_label 1 empathic_accuracy \\
    -fitts {subject_dir}/{sub}_glm_vid_1stlevel_explained.nii.gz \\
    -errts {subject_dir}/{sub}_glm_vid_1stlevel_residuals.nii.gz \\
    -bucket {subject_dir}/{sub}_glm_vid_1stlevel.nii.gz \\
    -cbucket {subject_dir}/{sub}_glm_vid_1stlevel_coeffs.nii.gz \\
    -fout \\
    -tout \\
    -xjpeg {subject_dir}/{sub}_glm_vid_1stlevel_matrix.jpg

# Colour disciminiation (with amplitude modulation) GLM for {sub}.
3dDeconvolve \\
    -input {input_data} \\
    -mask {subject_dir}/anat_EPI_mask_MNI-nonlin.nii.gz \\
    -ortvec {subject_dir}/{sub}_motion.1D motion_paramaters \\
    -polort 4 \\
    -num_stimts 1 \\
    -local_times \\
    -jobs 4 \\
    -x1D {subject_dir}/{sub}_glm_cvid_1stlevel_design.mat \\
    -stim_times_AM2 1 {subject_dir}/{sub}_cvid_block-times_ea.1D \'dmBLOCK(1)\' \\
    -stim_label 1 color_videos \\
    -fitts {subject_dir}/{sub}_glm_cvid_1stlevel_explained.nii.gz \\
    -errts {subject_dir}/{sub}_glm_cvid_1stlevel_residuals.nii.gz \\
    -bucket {subject_dir}/{sub}_glm_cvid_1stlevel.nii.gz \\
    -cbucket {subject_dir}/{sub}_glm_cvid_1stlevel_coeffs.nii.gz \\
    -fout \\
    -tout \\
    -xjpeg {subject_dir}/{sub}_glm_cvid_1stlevel_matrix.jpg

""".format(input_data=input_data, subject_dir=subject_dir, sub=subject))
    f.close()

    return script
示例#29
0
def main():
    """
    Runs fmri data through the specified epitome script.
    """
    arguments = docopt(__doc__)

    study  = arguments['<study>']
    scanid = arguments['--subject']
    debug  = arguments['--debug']
    dryrun = arguments['--dry-run']
    output = arguments['--output']
    exports = arguments['--exports']
    task = arguments['--task']

    # configure logging
    logging.info('Starting')
    if debug:
        logger.setLevel(logging.DEBUG)

    # load config for study
    try:
        config = cfg.config(study=study)
    except ValueError:
        logger.error('study {} not defined'.format(study))
        sys.exit(1)

    study_base = config.get_study_base(study)

    #Parse optional arguments
    output_dir = output if output else os.path.join(study_base,config.get_path('fmri'))
    opt_exports = [e for e in exports.split(',')] if exports else []

    #Check if task is available
    if task:

        try:
            config.study_config['fmri'][task]
        except KeyError:
            logger.error('Task {} not found in study config!'.format(task))
            sys.exit(1)
        tasks = {k:v for k,v in config.study_config['fmri'].iteritems() if k == task}

    else:
        tasks = config.study_config['fmri']

    for k in ['nii', 'fmri', 'hcp']:
        if k not in config.get_key('Paths'):
            logger.error("paths:{} not defined in site config".format(k))
            sys.exit(1)

    for x in tasks.iteritems():
        for k in ['dims', 'del', 'pipeline', 'tags', 'export', 'tr']:
            if k not in x[1].keys():
                logger.error("fmri:{}:{} not defined in configuration file".format(x[0], k))
                sys.exit(1)

    nii_dir = os.path.join(study_base, config.get_path('nii'))


    if scanid:
        path = os.path.join(nii_dir, scanid)
        if '_PHA_' in scanid:
            sys.exit('Subject {} if a phantom, cannot be analyzed'.format(scanid))
        try:
            run_epitome(path, config, study, output_dir, opt_exports,tasks)
        except Exception as e:
            logging.error(e)
            sys.exit(1)

    # run in batch mode
    else:
        subjects = []
        nii_dirs = glob.glob('{}/*'.format(nii_dir))

        # find subjects where at least one expected output does not exist
        for path in nii_dirs:
            subject = os.path.basename(path)

            if sid.is_phantom(subject):
                logger.debug("Subject {} is a phantom. Skipping.".format(subject))
                continue

            fmri_dir = utils.define_folder(output_dir)
            for exp in config.study_config['fmri'].keys():
                expected_names = set(config.study_config['fmri'][exp]['export'] + opt_exports)
                subj_dir = os.path.join(fmri_dir, exp, subject)
                if not outputs_exist(subj_dir, expected_names):
                    subjects.append(subject)
                    break

        subjects = list(set(subjects))

        # submit a list of calls to ourself, one per subject
        commands = []

        g_opts = ' --output {} --exports {}'.format(output_dir,exports)


        if task:
            g_opts += ' --task {}'.format(task)
        if debug:
            g_opts += ' --debug'

        for subject in subjects:
            sub_tag = ' --subject {}'.format(subject)
            commands.append(" ".join(['python ', __file__, study,g_opts,sub_tag]))

        if commands:
            logger.debug('queueing up the following commands:\n'+'\n'.join(commands))
            for i, cmd in enumerate(commands):
                jobname = 'dm_fmri_{}_{}'.format(i, time.strftime("%Y%m%d-%H%M%S"))
                jobfile = '/tmp/{}'.format(jobname)
                logfile = '/tmp/{}.log'.format(jobname)
                errfile = '/tmp/{}.err'.format(jobname)
                with open(jobfile, 'wb') as fid:
                    fid.write('#!/bin/bash\n')
                    fid.write(cmd)

                rtn, out = utils.run('qsub -V -q main.q -o {} -e {} -N {} {}'.format(
                    logfile, errfile, jobname, jobfile))

                if rtn:
                    logger.error("Job submission failed. Output follows.")
                    logger.error("stdout: {}".format(out))
                    sys.exit(1)
示例#30
0
def run_epitome(path, config, study, output, exports, tasks):
    """
    Finds the appropriate inputs for input subject, builds a temporary epitome
    folder, runs epitome, and finally copies the outputs to the fmri_dir.
    """
    study_base = config.get_study_base(study)
    subject = os.path.basename(path)
    nii_dir = os.path.join(study_base, config.get_path('nii'))
    t1_dir = os.path.join(study_base, config.get_path('hcp'))

    fmri_dir = utils.define_folder(output)
    experiments = tasks.keys()

    # run file collection --> epitome --> export for each study
    logger.debug('experiments found {}'.format(experiments))
    for exp in experiments:
        logger.debug('running experiment {}'.format(exp))
        # collect the files needed for each experiment
        expected_names = set(config.study_config['fmri'][exp]['export'] + exports)
        expected_tags = config.study_config['fmri'][exp]['tags']
        output_dir = utils.define_folder(os.path.join(fmri_dir, exp, subject))

        # don't run if the outputs of epitome already exist
        if outputs_exist(output_dir, expected_names):
            continue

        # reset / remove error.log
        error_log = os.path.join(output_dir, 'error.log')
        if os.path.isfile(error_log):
            os.remove(error_log)

        failed = False

        if type(expected_tags) == str:
            expected_tags = [expected_tags]

        # locate functional data
        files = glob.glob(path + '/*')
        functionals = []
        for tag in expected_tags:
            candidates = filter(lambda x: tag in x, files)
            candidates = utils.filter_niftis(candidates)
            candidates.sort()
            logger.debug('checking functional inputs {}'.format(candidates))
            try:
                check_inputs(config, tag, path, candidates)
            except Exception as m:
                error_message = 'Did not find the correct number of fMRI inputs:\n{}'.format(m)
                logger.debug(error_message)
                with open(error_log, 'wb') as f:
                    f.write('{}\n{}'.format(error_message, NODE))
                failed = True
                break
            functionals.extend(candidates)

        # locate anatomical data
        anat_path = os.path.join(t1_dir, os.path.basename(path), 'T1w')
        files = glob.glob(anat_path + '/*')
        anatomicals = []
        for anat in ['aparc+aseg.nii.gz', 'aparc.a2009s+aseg.nii.gz', 'T1w_brain.nii.gz']:
            if not filter(lambda x: anat in x, files):
                error_message = 'expected anatomical {} not found in {}'.format(anat, anat_path)
                logger.debug(error_message)
                with open(error_log, 'wb') as f:
                    f.write('{}\n{}'.format(error_message, NODE))
                failed = True
                break
            anatomicals.append(os.path.join(anat_path, anat))

        # don't run epitome if all of the inputs do not exist
        if failed:
            continue

        # create and populate epitome directory
        epi_dir = tempfile.mkdtemp()
        make_epitome_folders(epi_dir, len(functionals))
        epi_t1_dir = '{}/TEMP/SUBJ/T1/SESS01'.format(epi_dir)
        epi_func_dir = '{}/TEMP/SUBJ/FUNC/SESS01'.format(epi_dir)

        try:
            shutil.copyfile(anatomicals[0], '{}/anat_aparc_brain.nii.gz'.format(epi_t1_dir))
            shutil.copyfile(anatomicals[1], '{}/anat_aparc2009_brain.nii.gz'.format(epi_t1_dir))
            shutil.copyfile(anatomicals[2], '{}/anat_T1_brain.nii.gz'.format(epi_t1_dir))
            for i, d in enumerate(functionals):
                shutil.copyfile(d, '{}/RUN{}/FUNC.nii.gz'.format(epi_func_dir, '%02d' % (i + 1)))
        except IOError as e:
            error_message = 'unable to copy files to {}\n{}'.format(epi_dir, e)
            logger.error(error_message)
            with open(error_log, 'wb') as f:
                f.write('{}\n{}'.format(error_message, NODE))
            continue

        # collect command line options
        dims = config.study_config['fmri'][exp]['dims']
        tr = config.study_config['fmri'][exp]['tr']
        delete = config.study_config['fmri'][exp]['del']
        pipeline =  config.study_config['fmri'][exp]['pipeline']

        pipeline = os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, 'assets/{}'.format(pipeline))
        if not os.path.isfile(pipeline):
            raise Exception('invalid pipeline {} defined!'.format(pipeline))

        # run epitome
        command = '{} {} {} {} {}'.format(pipeline, epi_dir, delete, tr, dims)
        rtn, out = utils.run(command)
        if rtn:
            error_message = "epitome script failed: {}\n{}".format(command, out)
            logger.debug(error_message)
            with open(error_log, 'wb') as f:
                f.write('{}\n{}'.format(error_message, NODE))
            continue
        else:
            pass

        # export fmri data
        epitome_outputs = glob.glob(epi_func_dir + '/*')
        for name in expected_names:
            try:
                matches = filter(lambda x: 'func_' + name in x, epitome_outputs)
                matches.sort()

                # attempt to export the defined epitome stages for all runs
                if len(matches) != len(functionals):
                    error_message = 'epitome output {} not created for all inputs'.format(name)
                    logger.error(error_message)
                    with open(error_log, 'wb') as f:
                        f.write('{}\n{}'.format(error_message, NODE))
                    continue
                for i, match in enumerate(matches):
                    func_basename = utils.splitext(os.path.basename(functionals[i]))[0]
                    func_output = os.path.join(output_dir, func_basename + '_{}.nii.gz'.format(name))
                    export_file(match, func_output)

                # export all anatomical / registration information
                export_file_list('anat_', epitome_outputs, output_dir)
                export_file_list('reg_',  epitome_outputs, output_dir)
                export_file_list('mat_',  epitome_outputs, output_dir)

                # export PARAMS folder
                export_directory(os.path.join(epi_func_dir, 'PARAMS'), os.path.join(output_dir, 'PARAMS'))

            except ProcessingError as p:
                error_message = 'error exporting: {}'.format(p)
                logger.error(error_message)
                with open(error_log, 'wb') as f:
                    f.write('{}\n{}'.format(error_message, NODE))
                continue

        # remove temporary directory
        shutil.rmtree(epi_dir)
示例#31
0
def main():
    arguments = docopt(__doc__)

    study = arguments['<study>']
    subject = arguments['--subject']
    debug = arguments['--debug']

    logging.info('Starting')
    if debug:
        logger.setLevel(logging.DEBUG)

    # load config for study
    try:
        config = cfg.config(study=study)
    except ValueError:
        logger.error('study {} not defined'.format(study))
        sys.exit(1)

    study_base = config.get_study_base(study)

    if 'ea' not in config.study_config['fmri'].keys():
        logger.error('ea not defined in fmri in {}'.format(config_file))
        sys.exit(1)

    for k in ['nii', 'fmri', 'hcp']:
        if k not in config.site_config['paths']:
            logger.error("paths:{} not defined in {}".format(k, config_file))
            sys.exit(1)

    ea_dir = os.path.join(study_base, config.site_config['paths']['fmri'],
                          'ea')
    nii_dir = os.path.join(study_base, config.site_config['paths']['nii'])

    if subject:
        if '_PHA_' in subject:
            logger.error("{} is a phantom, cannot analyze".format(subject))
            sys.exit(1)
        analyze_subject(subject, config, study)

    else:
        # batch mode
        subjects = glob.glob('{}/*'.format(nii_dir))
        commands = []

        if debug:
            opts = '--debug'
        else:
            opts = ''

        for path in subjects:
            subject = os.path.basename(path)
            if check_complete(ea_dir, subject):
                logger.debug('{} already analysed'.format(subject))
            else:
                commands.append(" ".join(
                    [__file__, study, '--subject {}'.format(subject), opts]))

        if commands:
            logger.debug("queueing up the following commands:\n" +
                         '\n'.join(commands))
            for i, cmd in enumerate(commands):
                jobname = "dm_ea_{}_{}".format(i,
                                               time.strftime("%Y%m%d-%H%M%S"))
                jobfile = '/tmp/{}'.format(jobname)
                logfile = '/tmp/{}.log'.format(jobname)
                errfile = '/tmp/{}.err'.format(jobname)

                with open(jobfile, 'wb') as fid:
                    fid.write('#!/bin/bash\n')
                    fid.write(cmd)

                rtn, out = utils.run(
                    'qsub -V -q main.q -o {} -e {} -N {} {}'.format(
                        logfile, errfile, jobname, jobfile))
                # qbacth method -- might bring it back, but not needed
                #fd, path = tempfile.mkstemp()
                #os.write(fd, '\n'.join(commands))
                #os.close(fd)
                #rtn, out, err = utils.run('qbatch -i --logdir {ld} -N {name} --walltime {wt} {cmds}'.format(ld=logdir, name=jobname, wt=walltime, cmds=path))
                if rtn:
                    logger.error(
                        "Job submission failed\nstdout: {}".format(out))
                    sys.exit(1)
示例#32
0
            msg = 'Failed to open block_times & corr_push for {} with excuse {}'.format(subject, e.strerror)
            logger.error(msg)
        finally:
            f1.close()
            f2.close()
            f3.close()
            f4.close()

    # run the GLM
    files = glob.glob(os.path.join(ea_dir, subject + '/*.nii.gz'))
    inputs = get_inputs(files, config)

    for input_type in inputs.keys():

        script = generate_analysis_script(subject, inputs, input_type, config, study)
        rtn, out = utils.run('chmod 754 {}'.format(script))
        rtn, out = utils.run(script)
        if rtn:
            logger.error('Script {} failed to run on subject {} with error:\n{}'.format(
                script, subject, out))
            sys.exit(1)

def main():
    arguments   = docopt(__doc__)

    study   = arguments['<study>']
    subject = arguments['--subject']
    debug   = arguments['--debug']

    logging.info('Starting')
    if debug:
示例#33
0
def run_analysis(scanid, config, study):
    """
    Extracts: time series, correlation matricies using defined atlas.
    """
    study_base = config.get_study_base(study)
    fmri_dir = os.path.join(study_base, config.site_config['paths']['fmri'])
    experiments = config.study_config['fmri'].keys()
    atlas = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                         os.pardir, 'assets/shen_2mm_268_parcellation.nii.gz')

    if not os.path.isfile(atlas):
        print('ERROR: atlas file {} not found'.format(atlas))
        sys.exit(1)

    for exp in experiments:
        path = os.path.join(fmri_dir, exp, scanid)

        # get filetypes to analyze, ignoring ROI files
        inputs = get_inputs(config, path, exp, scanid)

        for filename in inputs:
            basename = os.path.basename(utils.splitext(filename)[0])

            # if the final correlation matrix exists, skip processing
            if os.path.isfile(os.path.join(path, basename + '_roi-corrs.csv')):
                continue

            # generate ROI file in register with subject's data
            roi_file = os.path.join(path, basename + '_rois.nii.gz')
            if not os.path.isfile(roi_file):
                rtn, out = utils.run(
                    '3dresample -master {} -prefix {} -inset {}'.format(
                        filename, roi_file, atlas))
                if rtn:
                    logger.error('{}\n{}'.format(out, NODE))
                    raise Exception(
                        'Error resampling atlas {} to match {}.'.format(
                            atlas, filename))
                else:
                    pass

            rois, _, _, _ = utils.loadnii(roi_file)
            data, _, _, _ = utils.loadnii(filename)

            n_rois = len(np.unique(rois[rois > 0]))
            dims = np.shape(data)

            # loop through all ROIs, extracting mean timeseries.
            output = np.zeros((n_rois, dims[1]))

            for i, roi in enumerate(np.unique(rois[rois > 0])):
                idx = np.where(rois == roi)[0]

                if len(idx) > 0:
                    output[i, :] = np.mean(data[idx, :], axis=0)

            # save the raw time series
            np.savetxt(os.path.join(path, basename + '_roi-timeseries.csv'),
                       output,
                       delimiter=',')

            # save the full correlation matrix
            corrs = np.corrcoef(output)
            np.savetxt(os.path.join(path, basename + '_roi-corrs.csv'),
                       corrs,
                       delimiter=',')
示例#34
0
def main():
    arguments   = docopt(__doc__)

    study   = arguments['<study>']
    subject = arguments['--subject']
    debug   = arguments['--debug']

    logging.info('Starting')
    if debug:
        logger.setLevel(logging.DEBUG)

    # load config for study
    try:
        config = cfg.config(study=study)
    except ValueError:
        logger.error('study {} not defined'.format(study))
        sys.exit(1)

    study_base = config.get_study_base(study)

    if 'ea' not in config.study_config['fmri'].keys():
        logger.error('ea not defined in fmri in {}'.format(config_file))
        sys.exit(1)

    for k in ['nii', 'fmri', 'hcp']:
        if k not in config.get_key('Paths'):
            logger.error("paths:{} not defined in {}".format(k, config_file))
            sys.exit(1)

    ea_dir = os.path.join(study_base, config.get_path('fmri'), 'ea')
    nii_dir = os.path.join(study_base, config.get_path('nii'))

    if subject:
        if '_PHA_' in subject:
            logger.error("{} is a phantom, cannot analyze".format(subject))
            sys.exit(1)
        analyze_subject(subject, config, study)

    else:
        # batch mode
        subjects = glob.glob('{}/*'.format(nii_dir))
        commands = []

        if debug:
            opts = '--debug'
        else:
            opts = ''

        for path in subjects:
            subject = os.path.basename(path)
            if check_complete(ea_dir, subject):
                logger.debug('{} already analysed'.format(subject))
            else:
                commands.append(" ".join([__file__, study, '--subject {}'.format(subject), opts]))

        if commands:
            logger.debug("queueing up the following commands:\n"+'\n'.join(commands))
            for i, cmd in enumerate(commands):
                jobname = "dm_ea_{}_{}".format(i, time.strftime("%Y%m%d-%H%M%S"))
                jobfile = '/tmp/{}'.format(jobname)
                logfile = '/tmp/{}.log'.format(jobname)
                errfile = '/tmp/{}.err'.format(jobname)

                with open(jobfile, 'wb') as fid:
                    fid.write('#!/bin/bash\n')
                    fid.write(cmd)

                rtn, out = utils.run('qsub -V -q main.q -o {} -e {} -N {} {}'.format(
                    logfile, errfile, jobname, jobfile))
                # qbacth method -- might bring it back, but not needed
                #fd, path = tempfile.mkstemp()
                #os.write(fd, '\n'.join(commands))
                #os.close(fd)
                #rtn, out, err = utils.run('qbatch -i --logdir {ld} -N {name} --walltime {wt} {cmds}'.format(ld=logdir, name=jobname, wt=walltime, cmds=path))
                if rtn:
                    logger.error("Job submission failed\nstdout: {}".format(out))
                    sys.exit(1)
示例#35
0
def main():
    """
    Runs .nrrd data through unring.py.
    """
    arguments = docopt(__doc__)

    study = arguments['<study>']
    batch = arguments['--batch']
    debug = arguments['--debug']
    dryrun = arguments['--dry-run']

    # configure logging
    logging.info('Starting')
    if debug:
        logger.setLevel(logging.DEBUG)

    # load config for study
    try:
        config = cfg.config(study=study)
    except ValueError:
        logger.error('study {} not defined'.format(study))
        sys.exit(1)

    study_base = config.get_study_base(study)

    for k in ['nrrd']:
        if k not in config.get_path('Paths'):
            logger.error("paths:{} not defined in site config".format(k))
            sys.exit(1)

    nrrd_dir = os.path.join(study_base, config.get_path('nrrd'))

    # runs in serial (due to MATLAB dependencies)
    if batch:
        try:
            run_all(nrrd_dir, config, study)
        except Exception as e:
            logging.error(e)
            sys.exit(1)

    # default behaviour: submit self to queue in batch mode
    else:
        if debug:
            debugopt = '--debug'
        else:
            debugopt = ''

        cmd = 'python {} {} --batch {}'.format(__file__, study, debugopt)
        jobname = 'dm_unring_{}'.format(time.strftime("%Y%m%d-%H%M%S"))
        jobfile = '/tmp/{}'.format(jobname)
        logfile = '/tmp/{}.log'.format(jobname)
        errfile = '/tmp/{}.err'.format(jobname)

        with open(jobfile, 'wb') as fid:
            fid.write('#!/bin/bash\n')
            fid.write(cmd)

            rtn, out = utils.run(
                'qsub -V -q main.q -o {} -e {} -N {} {}'.format(
                    logfile, errfile, jobname, jobfile))

            if rtn:
                logger.error("Job submission failed. Output follows.")
                logger.error("stdout: {}".format(out))
                sys.exit(1)
示例#36
0
def main():
    """
    Loops through subjects, preprocessing using supplied script, and runs a
    first-level GLM using AFNI (tent functions, 15 s window) on all subjects.
    """
    arguments = docopt(__doc__)
    study = arguments['<study>']
    subject = arguments['--subject']
    debug = arguments['--debug']

    logging.info('Starting')
    if debug:
        logger.setLevel(logging.DEBUG)

    # load config for study
    try:
        config = cfg.config(study=study)
    except ValueError:
        logger.error('study {} not defined'.format(study))
        sys.exit(1)

    study_base = config.get_study_base(study)
    imob_dir = os.path.join(study_base, config.site_config['paths']['fmri'],
                            'imob')

    # process a single subject
    if subject:

        # get required inputs from each
        files = glob.glob(os.path.join(imob_dir, subject) + '/*.nii.gz')
        inputs = get_inputs(files, config)

        # check if subject has already been processed
        if check_complete(imob_dir, subject):
            logger.info('{} already analysed'.format(subject))
            sys.exit(0)

        # first level GLM for inputs
        for input_type in inputs.keys():
            script = generate_analysis_script(subject, inputs, input_type,
                                              config, study)
            rtn, out = utils.run('chmod 754 {}'.format(script))
            rtn, out = utils.run(script)
            if rtn:
                logger.error(
                    'Script {} failed to run on subject {} with error:\n{}'.
                    format(script, subject, out))
                sys.exit(1)

    # process all subjects
    else:
        commands = []
        for path in glob.glob('{}/*'.format(imob_dir)):
            subject = os.path.basename(path)

            # add subject if any of the expected outputs do not exist
            files = glob.glob(os.path.join(imob_dir, subject) + '/*.nii.gz')
            try:
                inputs = get_inputs(files, config)
            except:
                logger.debug('Invalid inputs for {}'.format(subject))
                continue
            expected = inputs.keys()

            for exp in expected:
                if not filter(
                        lambda x: '{}_glm_IM_1stlvl_{}'.format(subject, exp) in
                        x, files):
                    commands.append(" ".join(
                        [__file__, study, '--subject {}'.format(subject)]))
                    break

        if commands:
            logger.debug("queueing up the following commands:\n" +
                         '\n'.join(commands))
            #fd, path = tempfile.mkstemp()
            #os.write(fd, '\n'.join(commands))
            #os.close(fd)
            for i, cmd in enumerate(commands):
                jobname = "dm_imob_{}_{}".format(
                    i, time.strftime("%Y%m%d-%H%M%S"))
                jobfile = '/tmp/{}'.format(jobname)
                logfile = '/tmp/{}.log'.format(jobname)
                errfile = '/tmp/{}.err'.format(jobname)
                with open(jobfile, 'wb') as fid:
                    fid.write('#!/bin/bash\n')
                    fid.write(cmd)

                rtn, out = utils.run(
                    'qsub -V -q main.q -o {} -e {} -N {} {}'.format(
                        logfile, errfile, jobname, jobfile))
                #rtn, out, err = utils.run('qbatch -i --logdir {logdir} -N {name} --walltime {wt} {cmds}'.format(logdir = log_path, name = jobname, wt = walltime, cmds = path))
                if rtn:
                    logger.error("Job submission failed. Output follows.")
                    logger.error("stdout: {}".format(out))
                    sys.exit(1)