Ejemplo n.º 1
0
def run_pipeline(config, subject, t1, t2):
    if not input_exists(t1) or not input_exists(t2):
        sys.exit(1)
    base_dir = utils.define_folder(config.get_path('hcp_fs'))
    dest_dir = utils.define_folder(os.path.join(base_dir, subject))
    with utils.cd(dest_dir):
        hcp_pipeline = "hcp-freesurfer.sh {} {} {} {}".format(
            base_dir, subject, t1, t2)
        rtn, out = utils.run(hcp_pipeline, dryrun=DRYRUN)
        if rtn:
            logger.error("hcp-freesurfer.sh exited with non-zero status code. "
                         "Output: {}".format(out))
Ejemplo n.º 2
0
def run_pipeline(config, subject, t1, t2):
    if not input_exists(t1) or not input_exists(t2):
        sys.exit(1)
    base_dir = utils.define_folder(config.get_path('hcp_fs'))
    dest_dir = utils.define_folder(os.path.join(base_dir, subject))
    with utils.cd(dest_dir):
        hcp_pipeline = "hcp-freesurfer.sh {} {} {} {}".format(base_dir, subject,
                t1, t2)
        rtn, out = utils.run(hcp_pipeline, dryrun=DRYRUN)
        if rtn:
            logger.error("hcp-freesurfer.sh exited with non-zero status code. "
                    "Output: {}".format(out))
Ejemplo n.º 3
0
def run_hcp_convert(path, config, study):
    """Runs fs2hcp on the input subject"""
    study_base = config.get_study_base(study)
    subject = os.path.basename(path)
    freesurfer_dir = os.path.join(study_base, config.site_config['paths']['freesurfer'])
    hcp_dir = os.path.join(study_base, config.site_config['paths']['hcp'])
    output_dir = utils.define_folder(os.path.join(hcp_dir, subject))

    if outputs_exist(output_dir):
        logger.debug('outputs found in {}'.format(path))
        sys.exit()

    # reset / remove error.log
    error_log = os.path.join(output_dir, 'error.log')
    if os.path.isfile(error_log):
        os.remove(error_log)

    # run fs2hcp
    command = 'fs2hcp --FSpath={} --HCPpath={} --subject={}'.format(freesurfer_dir, hcp_dir, subject)
    rtn, out = utils.run(command)
    if rtn:
        error_message = "fs2hcp failed: {}\n{}".format(command, out)
        logger.debug(error_message)
        with open(error_log, 'wb') as f:
            f.write('{}\n{}'.format(error_message, NODE))
Ejemplo n.º 4
0
def get_new_subjects(config, qc_subjects):
    fs_subjects = []
    for subject in qc_subjects:
        if sid.is_phantom(subject):
            logger.debug("Subject {} is a phantom. Skipping.".format(subject))
            continue
        freesurfer_dir = utils.define_folder(config.get_path('freesurfer'))
        fs_subject_dir = os.path.join(freesurfer_dir, subject)
        if not outputs_exist(fs_subject_dir):
            fs_subjects.append(subject)
    return fs_subjects
Ejemplo n.º 5
0
def get_new_subjects(config, qc_subjects):
    fs_subjects = []
    for subject in qc_subjects:
        if sid.is_phantom(subject):
            logger.debug("Subject {} is a phantom. Skipping.".format(subject))
            continue
        freesurfer_dir = utils.define_folder(config.get_path('freesurfer'))
        fs_subject_dir = os.path.join(freesurfer_dir, subject)
        if not outputs_exist(fs_subject_dir):
            fs_subjects.append(subject)
    return fs_subjects
Ejemplo n.º 6
0
def run_all(nrrd_dir, config, study):
    """Finds all non-phantom input nrrds and run unring.py in serial."""
    study_base = config.get_study_base(study)
    subjects = os.listdir(nrrd_dir)
    subjects = filter(lambda x: '_PHA_' not in x, subjects)
    unring_dir = utils.define_folder(
        os.path.join(study_base, config.get_path('unring')))
    tags = config.study_config['unring']['tags']

    for subject in subjects:
        output_dir = utils.define_folder(os.path.join(unring_dir, subject))
        inputs = os.listdir(os.path.join(nrrd_dir, subject))
        inputs = select_inputs(inputs,
                               tags)  # selects inputs with matching tag

        for input_file in inputs:

            # don't run if the outputs of unring already exist
            if outputs_exist(output_dir, input_file):
                continue

            # reset / remove error.log
            error_log = os.path.join(output_dir, 'error.log')
            if os.path.isfile(error_log):
                os.remove(error_log)

            output_fname = os.path.join(output_dir, input_file)
            input_fname = os.path.join(nrrd_dir, subject, input_file)
            cmd = 'unring.py {} {} -v'.format(input_fname, output_fname)
            logger.debug('running {}'.format(cmd))
            rtn, out = utils.run(cmd)
            if rtn:
                error_message = "unring.py failed: {}\n{}".format(cmd, out)
                logger.info(error_message)
                with open(error_log, 'wb') as f:
                    f.write('{}\n{}'.format(error_message, NODE))
                continue
            else:
                pass
Ejemplo n.º 7
0
def run_all(nrrd_dir, config, study):
    """Finds all non-phantom input nrrds and run unring.py in serial."""
    study_base = config.get_study_base(study)
    subjects = os.listdir(nrrd_dir)
    subjects = filter(lambda x: '_PHA_' not in x, subjects)
    unring_dir = utils.define_folder(os.path.join(study_base, config.get_path('unring')))
    tags = config.study_config['unring']['tags']

    for subject in subjects:
        output_dir = utils.define_folder(os.path.join(unring_dir, subject))
        inputs = os.listdir(os.path.join(nrrd_dir, subject))
        inputs = select_inputs(inputs, tags) # selects inputs with matching tag

        for input_file in inputs:

            # don't run if the outputs of unring already exist
            if outputs_exist(output_dir, input_file):
                continue

            # reset / remove error.log
            error_log = os.path.join(output_dir, 'error.log')
            if os.path.isfile(error_log):
                os.remove(error_log)

            output_fname = os.path.join(output_dir, input_file)
            input_fname = os.path.join(nrrd_dir, subject, input_file)
            cmd = 'unring.py {} {} -v'.format(input_fname, output_fname)
            logger.debug('running {}'.format(cmd))
            rtn, out = utils.run(cmd)
            if rtn:
                error_message = "unring.py failed: {}\n{}".format(cmd, out)
                logger.info(error_message)
                with open(error_log, 'wb') as f:
                    f.write('{}\n{}'.format(error_message, NODE))
                continue
            else:
                pass
Ejemplo n.º 8
0
def main():
    """
    Runs fmri data through the specified epitome script.
    """
    arguments = docopt(__doc__)

    study  = arguments['<study>']
    scanid = arguments['--subject']
    debug  = arguments['--debug']
    dryrun = arguments['--dry-run']

    # configure logging
    logging.info('Starting')
    if debug:
        logger.setLevel(logging.DEBUG)

    # load config for study
    try:
        config = cfg.config(study=study)
    except ValueError:
        logger.error('study {} not defined'.format(study))
        sys.exit(1)

    study_base = config.get_study_base(study)

    for k in ['nii', 'fmri', 'hcp']:
        if k not in config.site_config['paths']:
            logger.error("paths:{} not defined in site config".format(k))
            sys.exit(1)

    for x in config.study_config['fmri'].iteritems():
        for k in ['dims', 'del', 'pipeline', 'tags', 'export', 'tr']:
            if k not in x[1].keys():
                logger.error("fmri:{}:{} not defined in configuration file".format(x[0], k))
                sys.exit(1)

    nii_dir = os.path.join(study_base, config.site_config['paths']['nii'])

    if scanid:
        path = os.path.join(nii_dir, scanid)
        if '_PHA_' in scanid:
            sys.exit('Subject {} if a phantom, cannot be analyzed'.format(scanid))
        try:
            run_epitome(path, config, study)
        except Exception as e:
            logging.error(e)
            sys.exit(1)

    # run in batch mode
    else:
        subjects = []
        nii_dirs = glob.glob('{}/*'.format(nii_dir))

        # find subjects where at least one expected output does not exist
        for path in nii_dirs:
            subject = os.path.basename(path)

            if sid.is_phantom(subject):
                logger.debug("Subject {} is a phantom. Skipping.".format(subject))
                continue

            fmri_dir = utils.define_folder(os.path.join(study_base, config.site_config['paths']['fmri']))
            for exp in config.study_config['fmri'].keys():
                expected_names = config.study_config['fmri'][exp]['export']
                subj_dir = os.path.join(fmri_dir, exp, subject)
                if not outputs_exist(subj_dir, expected_names):
                    subjects.append(subject)
                    break

        subjects = list(set(subjects))

        # submit a list of calls to ourself, one per subject
        commands = []
        if debug:
            debugopt = '--debug'
        else:
            debugopt = ''

        for subject in subjects:
            commands.append(" ".join(['python ', __file__, study, '--subject {} '.format(subject), debugopt]))

        if commands:
            logger.debug('queueing up the following commands:\n'+'\n'.join(commands))
            for i, cmd in enumerate(commands):
                jobname = 'dm_fmri_{}_{}'.format(i, time.strftime("%Y%m%d-%H%M%S"))
                jobfile = '/tmp/{}'.format(jobname)
                logfile = '/tmp/{}.log'.format(jobname)
                errfile = '/tmp/{}.err'.format(jobname)
                with open(jobfile, 'wb') as fid:
                    fid.write('#!/bin/bash\n')
                    fid.write(cmd)

                rtn, out = utils.run('qsub -V -q main.q -o {} -e {} -N {} {}'.format(
                    logfile, errfile, jobname, jobfile))

                if rtn:
                    logger.error("Job submission failed. Output follows.")
                    logger.error("stdout: {}".format(out))
                    sys.exit(1)
Ejemplo n.º 9
0
def run_epitome(path, config, study):
    """
    Finds the appropriate inputs for input subject, builds a temporary epitome
    folder, runs epitome, and finally copies the outputs to the fmri_dir.
    """
    study_base = config.get_study_base(study)
    subject = os.path.basename(path)
    nii_dir = os.path.join(study_base, config.site_config['paths']['nii'])
    t1_dir = os.path.join(study_base, config.site_config['paths']['hcp'])
    fmri_dir = utils.define_folder(os.path.join(study_base, config.site_config['paths']['fmri']))
    experiments = config.study_config['fmri'].keys()

    # run file collection --> epitome --> export for each study
    logger.debug('experiments found {}'.format(experiments))
    for exp in experiments:
        logger.debug('running experiment {}'.format(exp))
        # collect the files needed for each experiment
        expected_names = config.study_config['fmri'][exp]['export']
        expected_tags = config.study_config['fmri'][exp]['tags']
        output_dir = utils.define_folder(os.path.join(fmri_dir, exp, subject))

        # don't run if the outputs of epitome already exist
        if outputs_exist(output_dir, expected_names):
            continue

        # reset / remove error.log
        error_log = os.path.join(output_dir, 'error.log')
        if os.path.isfile(error_log):
            os.remove(error_log)

        failed = False

        if type(expected_tags) == str:
            expected_tags = [expected_tags]

        # locate functional data
        files = glob.glob(path + '/*')
        functionals = []
        for tag in expected_tags:
            candidates = filter(lambda x: tag in x, files)
            candidates = utils.filter_niftis(candidates)
            candidates.sort()
            logger.debug('checking functional inputs {}'.format(candidates))
            try:
                check_inputs(config, tag, path, candidates)
            except Exception as m:
                error_message = 'Did not find the correct number of fMRI inputs:\n{}'.format(m)
                logger.debug(error_message)
                with open(error_log, 'wb') as f:
                    f.write('{}\n{}'.format(error_message, NODE))
                failed = True
                break
            functionals.extend(candidates)

        # locate anatomical data
        anat_path = os.path.join(t1_dir, os.path.basename(path), 'T1w')
        files = glob.glob(anat_path + '/*')
        anatomicals = []
        for anat in ['aparc+aseg.nii.gz', 'aparc.a2009s+aseg.nii.gz', 'T1w_brain.nii.gz']:
            if not filter(lambda x: anat in x, files):
                error_message = 'expected anatomical {} not found in {}'.format(anat, anat_path)
                logger.debug(error_message)
                with open(error_log, 'wb') as f:
                    f.write('{}\n{}'.format(error_message, NODE))
                failed = True
                break
            anatomicals.append(os.path.join(anat_path, anat))

        # don't run epitome if all of the inputs do not exist
        if failed:
            continue

        # create and populate epitome directory
        epi_dir = tempfile.mkdtemp()
        utils.make_epitome_folders(epi_dir, len(functionals))
        epi_t1_dir = '{}/TEMP/SUBJ/T1/SESS01'.format(epi_dir)
        epi_func_dir = '{}/TEMP/SUBJ/FUNC/SESS01'.format(epi_dir)

        try:
            shutil.copyfile(anatomicals[0], '{}/anat_aparc_brain.nii.gz'.format(epi_t1_dir))
            shutil.copyfile(anatomicals[1], '{}/anat_aparc2009_brain.nii.gz'.format(epi_t1_dir))
            shutil.copyfile(anatomicals[2], '{}/anat_T1_brain.nii.gz'.format(epi_t1_dir))
            for i, d in enumerate(functionals):
                shutil.copyfile(d, '{}/RUN{}/FUNC.nii.gz'.format(epi_func_dir, '%02d' % (i + 1)))
        except IOError as e:
            error_message = 'unable to copy files to {}\n{}'.format(epi_dir, e)
            logger.error(error_message)
            with open(error_log, 'wb') as f:
                f.write('{}\n{}'.format(error_message, NODE))
            continue

        # collect command line options
        dims = config.study_config['fmri'][exp]['dims']
        tr = config.study_config['fmri'][exp]['tr']
        delete = config.study_config['fmri'][exp]['del']
        pipeline =  config.study_config['fmri'][exp]['pipeline']

        pipeline = os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, 'assets/{}'.format(pipeline))
        if not os.path.isfile(pipeline):
            raise Exception('invalid pipeline {} defined!'.format(pipeline))

        # run epitome
        command = '{} {} {} {} {}'.format(pipeline, epi_dir, delete, tr, dims)
        rtn, out = utils.run(command)
        if rtn:
            error_message = "epitome script failed: {}\n{}".format(command, out)
            logger.debug(error_message)
            with open(error_log, 'wb') as f:
                f.write('{}\n{}'.format(error_message, NODE))
            continue
        else:
            pass

        # export fmri data
        epitome_outputs = glob.glob(epi_func_dir + '/*')
        for name in expected_names:
            try:
                matches = filter(lambda x: 'func_' + name in x, epitome_outputs)
                matches.sort()

                # attempt to export the defined epitome stages for all runs
                if len(matches) != len(functionals):
                    error_message = 'epitome output {} not created for all inputs'.format(name)
                    logger.error(error_message)
                    with open(error_log, 'wb') as f:
                        f.write('{}\n{}'.format(error_message, NODE))
                    continue
                for i, match in enumerate(matches):
                    func_basename = utils.splitext(os.path.basename(functionals[i]))[0]
                    func_output = os.path.join(output_dir, func_basename + '_{}.nii.gz'.format(name))
                    export_file(match, func_output)

                # export all anatomical / registration information
                export_file_list('anat_', epitome_outputs, output_dir)
                export_file_list('reg_',  epitome_outputs, output_dir)
                export_file_list('mat_',  epitome_outputs, output_dir)

                # export PARAMS folder
                export_directory(os.path.join(epi_func_dir, 'PARAMS'), os.path.join(output_dir, 'PARAMS'))

            except ProcessingError as p:
                error_message = 'error exporting: {}'.format(p)
                logger.error(error_message)
                with open(error_log, 'wb') as f:
                    f.write('{}\n{}'.format(error_message, NODE))
                continue

        # remove temporary directory
        shutil.rmtree(epi_dir)
Ejemplo n.º 10
0
def main():
    """
    Runs fmri data through the specified epitome script.
    """
    arguments = docopt(__doc__)

    study  = arguments['<study>']
    scanid = arguments['--subject']
    debug  = arguments['--debug']
    dryrun = arguments['--dry-run']
    output = arguments['--output']
    exports = arguments['--exports']
    task = arguments['--task']

    # configure logging
    logging.info('Starting')
    if debug:
        logger.setLevel(logging.DEBUG)

    # load config for study
    try:
        config = cfg.config(study=study)
    except ValueError:
        logger.error('study {} not defined'.format(study))
        sys.exit(1)

    study_base = config.get_study_base(study)

    #Parse optional arguments
    output_dir = output if output else os.path.join(study_base,config.get_path('fmri'))
    opt_exports = [e for e in exports.split(',')] if exports else []

    #Check if task is available
    if task:

        try:
            config.study_config['fmri'][task]
        except KeyError:
            logger.error('Task {} not found in study config!'.format(task))
            sys.exit(1)
        tasks = {k:v for k,v in config.study_config['fmri'].iteritems() if k == task}

    else:
        tasks = config.study_config['fmri']

    for k in ['nii', 'fmri', 'hcp']:
        if k not in config.get_key('Paths'):
            logger.error("paths:{} not defined in site config".format(k))
            sys.exit(1)

    for x in tasks.iteritems():
        for k in ['dims', 'del', 'pipeline', 'tags', 'export', 'tr']:
            if k not in x[1].keys():
                logger.error("fmri:{}:{} not defined in configuration file".format(x[0], k))
                sys.exit(1)

    nii_dir = os.path.join(study_base, config.get_path('nii'))


    if scanid:
        path = os.path.join(nii_dir, scanid)
        if '_PHA_' in scanid:
            sys.exit('Subject {} if a phantom, cannot be analyzed'.format(scanid))
        try:
            run_epitome(path, config, study, output_dir, opt_exports,tasks)
        except Exception as e:
            logging.error(e)
            sys.exit(1)

    # run in batch mode
    else:
        subjects = []
        nii_dirs = glob.glob('{}/*'.format(nii_dir))

        # find subjects where at least one expected output does not exist
        for path in nii_dirs:
            subject = os.path.basename(path)

            if sid.is_phantom(subject):
                logger.debug("Subject {} is a phantom. Skipping.".format(subject))
                continue

            fmri_dir = utils.define_folder(output_dir)
            for exp in config.study_config['fmri'].keys():
                expected_names = set(config.study_config['fmri'][exp]['export'] + opt_exports)
                subj_dir = os.path.join(fmri_dir, exp, subject)
                if not outputs_exist(subj_dir, expected_names):
                    subjects.append(subject)
                    break

        subjects = list(set(subjects))

        # submit a list of calls to ourself, one per subject
        commands = []

        g_opts = ' --output {} --exports {}'.format(output_dir,exports)


        if task:
            g_opts += ' --task {}'.format(task)
        if debug:
            g_opts += ' --debug'

        for subject in subjects:
            sub_tag = ' --subject {}'.format(subject)
            commands.append(" ".join(['python ', __file__, study,g_opts,sub_tag]))

        if commands:
            logger.debug('queueing up the following commands:\n'+'\n'.join(commands))
            for i, cmd in enumerate(commands):
                jobname = 'dm_fmri_{}_{}'.format(i, time.strftime("%Y%m%d-%H%M%S"))
                jobfile = '/tmp/{}'.format(jobname)
                logfile = '/tmp/{}.log'.format(jobname)
                errfile = '/tmp/{}.err'.format(jobname)
                with open(jobfile, 'wb') as fid:
                    fid.write('#!/bin/bash\n')
                    fid.write(cmd)

                rtn, out = utils.run('qsub -V -q main.q -o {} -e {} -N {} {}'.format(
                    logfile, errfile, jobname, jobfile))

                if rtn:
                    logger.error("Job submission failed. Output follows.")
                    logger.error("stdout: {}".format(out))
                    sys.exit(1)
Ejemplo n.º 11
0
def run_epitome(path, config, study, output, exports, tasks):
    """
    Finds the appropriate inputs for input subject, builds a temporary epitome
    folder, runs epitome, and finally copies the outputs to the fmri_dir.
    """
    study_base = config.get_study_base(study)
    subject = os.path.basename(path)
    nii_dir = os.path.join(study_base, config.get_path('nii'))
    t1_dir = os.path.join(study_base, config.get_path('hcp'))

    fmri_dir = utils.define_folder(output)
    experiments = tasks.keys()

    # run file collection --> epitome --> export for each study
    logger.debug('experiments found {}'.format(experiments))
    for exp in experiments:
        logger.debug('running experiment {}'.format(exp))
        # collect the files needed for each experiment
        expected_names = set(config.study_config['fmri'][exp]['export'] + exports)
        expected_tags = config.study_config['fmri'][exp]['tags']
        output_dir = utils.define_folder(os.path.join(fmri_dir, exp, subject))

        # don't run if the outputs of epitome already exist
        if outputs_exist(output_dir, expected_names):
            continue

        # reset / remove error.log
        error_log = os.path.join(output_dir, 'error.log')
        if os.path.isfile(error_log):
            os.remove(error_log)

        failed = False

        if type(expected_tags) == str:
            expected_tags = [expected_tags]

        # locate functional data
        files = glob.glob(path + '/*')
        functionals = []
        for tag in expected_tags:
            candidates = filter(lambda x: tag in x, files)
            candidates = utils.filter_niftis(candidates)
            candidates.sort()
            logger.debug('checking functional inputs {}'.format(candidates))
            try:
                check_inputs(config, tag, path, candidates)
            except Exception as m:
                error_message = 'Did not find the correct number of fMRI inputs:\n{}'.format(m)
                logger.debug(error_message)
                with open(error_log, 'wb') as f:
                    f.write('{}\n{}'.format(error_message, NODE))
                failed = True
                break
            functionals.extend(candidates)

        # locate anatomical data
        anat_path = os.path.join(t1_dir, os.path.basename(path), 'T1w')
        files = glob.glob(anat_path + '/*')
        anatomicals = []
        for anat in ['aparc+aseg.nii.gz', 'aparc.a2009s+aseg.nii.gz', 'T1w_brain.nii.gz']:
            if not filter(lambda x: anat in x, files):
                error_message = 'expected anatomical {} not found in {}'.format(anat, anat_path)
                logger.debug(error_message)
                with open(error_log, 'wb') as f:
                    f.write('{}\n{}'.format(error_message, NODE))
                failed = True
                break
            anatomicals.append(os.path.join(anat_path, anat))

        # don't run epitome if all of the inputs do not exist
        if failed:
            continue

        # create and populate epitome directory
        epi_dir = tempfile.mkdtemp()
        make_epitome_folders(epi_dir, len(functionals))
        epi_t1_dir = '{}/TEMP/SUBJ/T1/SESS01'.format(epi_dir)
        epi_func_dir = '{}/TEMP/SUBJ/FUNC/SESS01'.format(epi_dir)

        try:
            shutil.copyfile(anatomicals[0], '{}/anat_aparc_brain.nii.gz'.format(epi_t1_dir))
            shutil.copyfile(anatomicals[1], '{}/anat_aparc2009_brain.nii.gz'.format(epi_t1_dir))
            shutil.copyfile(anatomicals[2], '{}/anat_T1_brain.nii.gz'.format(epi_t1_dir))
            for i, d in enumerate(functionals):
                shutil.copyfile(d, '{}/RUN{}/FUNC.nii.gz'.format(epi_func_dir, '%02d' % (i + 1)))
        except IOError as e:
            error_message = 'unable to copy files to {}\n{}'.format(epi_dir, e)
            logger.error(error_message)
            with open(error_log, 'wb') as f:
                f.write('{}\n{}'.format(error_message, NODE))
            continue

        # collect command line options
        dims = config.study_config['fmri'][exp]['dims']
        tr = config.study_config['fmri'][exp]['tr']
        delete = config.study_config['fmri'][exp]['del']
        pipeline =  config.study_config['fmri'][exp]['pipeline']

        pipeline = os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, 'assets/{}'.format(pipeline))
        if not os.path.isfile(pipeline):
            raise Exception('invalid pipeline {} defined!'.format(pipeline))

        # run epitome
        command = '{} {} {} {} {}'.format(pipeline, epi_dir, delete, tr, dims)
        rtn, out = utils.run(command)
        if rtn:
            error_message = "epitome script failed: {}\n{}".format(command, out)
            logger.debug(error_message)
            with open(error_log, 'wb') as f:
                f.write('{}\n{}'.format(error_message, NODE))
            continue
        else:
            pass

        # export fmri data
        epitome_outputs = glob.glob(epi_func_dir + '/*')
        for name in expected_names:
            try:
                matches = filter(lambda x: 'func_' + name in x, epitome_outputs)
                matches.sort()

                # attempt to export the defined epitome stages for all runs
                if len(matches) != len(functionals):
                    error_message = 'epitome output {} not created for all inputs'.format(name)
                    logger.error(error_message)
                    with open(error_log, 'wb') as f:
                        f.write('{}\n{}'.format(error_message, NODE))
                    continue
                for i, match in enumerate(matches):
                    func_basename = utils.splitext(os.path.basename(functionals[i]))[0]
                    func_output = os.path.join(output_dir, func_basename + '_{}.nii.gz'.format(name))
                    export_file(match, func_output)

                # export all anatomical / registration information
                export_file_list('anat_', epitome_outputs, output_dir)
                export_file_list('reg_',  epitome_outputs, output_dir)
                export_file_list('mat_',  epitome_outputs, output_dir)

                # export PARAMS folder
                export_directory(os.path.join(epi_func_dir, 'PARAMS'), os.path.join(output_dir, 'PARAMS'))

            except ProcessingError as p:
                error_message = 'error exporting: {}'.format(p)
                logger.error(error_message)
                with open(error_log, 'wb') as f:
                    f.write('{}\n{}'.format(error_message, NODE))
                continue

        # remove temporary directory
        shutil.rmtree(epi_dir)
Ejemplo n.º 12
0
def analyze_subject(subject, config, study):
    """
    1) finds the behavioural log files
    2) generates the stimulus timing files from these logs
    3) finds the pre-processed fmri data
    4) runs the standard GLM analysis on these data
    """
    study_base = config.get_study_base(study)
    resources_dir = os.path.join(study_base, config.get_path('resources'))
    ea_dir = os.path.join(study_base, config.get_path('fmri'), 'ea')
    output_dir = utils.define_folder(os.path.join(study_base, config.get_path('fmri'), 'ea', subject))

    # check if subject has already been processed
    if check_complete(ea_dir, subject):
        msg = '{} already analysed'.format(subject)
        logger.info(msg)
        sys.exit(0)

    # reset / remove error.log
    error_log = os.path.join(output_dir, 'error.log')
    if os.path.isfile(error_log):
        os.remove(error_log)

    # find the behavioural data, and exit if we fail to find it
    try:
        resdirs = glob.glob(os.path.join(resources_dir, subject + '_??'))
        resources = []
        for resdir in resdirs:
            resfiles = [os.path.join(dp, f) for dp, dn, fn in os.walk(resdir) for f in fn]
            resources.extend(resfiles)
        logs = filter(lambda x: '.log' in x and 'UCLAEmpAcc' in x, resources)
        logs.sort()
    except:
        logger.error('No BEHAV data for {}.'.format(subject))
        sys.exit(1)

    # if we have the wrong number of logs, don't guess which to use, just fail
    if len(logs) != 3:
        error_message = 'Did not find exactly 3 logs for {}\nfound:{}.'.format(subject, logs)
        logger.error(error_message)
        with open(error_log, 'wb') as f:
            f.write('{}\n{}'.format(error_message, NODE))
        sys.exit(1)

    # parse and write the logs seperately for each experiment condition (video or shapes/colours video)
    for test_type in ['vid','cvid']:
        # extract all of the data from the logs
        on_all, dur_all, corr_all, push_all, timings_all = [], [], [], [], []
        try:
            logger.info('Parsing {} logfiles for subject'.format(len(logs), subject))
            for log in logs:
                # extract the block id from the logfilename
                block_id = os.path.splitext(os.path.basename(log))[0][-1]
                on, dur, corr, push, timings = process_behav_data(log, output_dir, subject, test_type, block_id)
                on_all.extend(on)
                dur_all.extend(dur)
                corr_all.extend(corr)
                push_all.extend(push)
                timings_all.extend(timings)
        except Exception, e:
            msg = 'Failed to parse logs for {}, with {}.'.format(subject, str(e))
            logger.error(msg)
            sys.exit(1)

        # write data to stimulus timing file for AFNI, and a QC csv
        # on_all = sorted(on_all, key=lambda x:x[1])
        timings_all = sorted(timings_all, key=lambda x: (x[2], x[3], x[1]))    # put the responses into order
        try:
            logger.info('Writing stimulus data')
            # write each stimulus time:
            #         [start_time]*[amplitude],[buttonpushes]:[block_length]
            #         30*5,0.002:12
            # OFFSET 4 TRs == 8 Seconds!
            # on = on - 8.0
            f1 = open('{}/{}_{}_block-times_ea.1D'.format(output_dir, subject, test_type), 'wb') # stim timing file
            f2 = open('{}/{}_{}_corr_push.csv'.format(output_dir, subject, test_type), 'wb')     # r values and num pushes / minute
            f3 = open('{}/{}_{}_button-times.csv'.format(output_dir, subject, test_type), 'wb')  # button responses and timings
            f4 = open('{}/{}_{}_vid-onsets.csv'.format(output_dir, subject, test_type), 'wb')    # button responses and timings
            f2.write('correlation,n-pushes-per-minute\n')
            f3.write('Block_ID,Video,Response,Timing\n')
            f4.write('Block_ID,Video, Onset\n')

            for i in range(len(on_all)):
                f1.write('{o:.2f}*{r:.2f},{p}:{d:.2f} '.format(o=on_all[i][1]-8.0, r=corr_all[i], p=push_all[i], d=dur_all[i]))
                f2.write('{r:.2f},{p}\n'.format(r=corr_all[i], p=push_all[i]))
            for timing in timings_all:
                f3.write('{b},{v},{r},{t:.2f}\n'.format(b=timing[2], v=timing[3], r=timing[0], t=timing[1]))
            for onset in on_all:
                f4.write('{b},{r},{t:.2f}\n'.format(b=onset[2], r=onset[0], t=onset[1]))
            f1.write('\n') # add newline at the end of each run (up to 3 runs.)
        except IOError as e:
            msg = 'Failed to open block_times & corr_push for {} with excuse {}'.format(subject, e.strerror)
            logger.error(msg)
        finally:
            f1.close()
            f2.close()
            f3.close()
            f4.close()
Ejemplo n.º 13
0
def main():
    arguments = docopt(__doc__)
    study     = arguments['<study>']
    scanid    = arguments['--subject']
    debug     = arguments['--debug']
    dryrun    = arguments['--dry-run']

    # configure logging
    logging.info('Starting')
    if debug:
        logger.setLevel(logging.DEBUG)

    # load config for study
    try:
        config = cfg.config(study=study)
    except ValueError:
        logger.error('study {} not defined'.format(study))
        sys.exit(1)

    study_base = config.get_study_base(study)

    for k in ['freesurfer', 'hcp']:
        if k not in config.site_config['paths']:
            logger.error("paths:{} not defined in site config".format(k))
            sys.exit(1)

    freesurfer_dir = os.path.join(study_base, config.site_config['paths']['freesurfer'])
    hcp_dir = os.path.join(study_base, config.site_config['paths']['hcp'])

    if scanid:
        path = os.path.join(freesurfer_dir, scanid)
        try:
            run_hcp_convert(path, config, study)
        except Exception as e:
            logging.error(e)
            sys.exit(1)

    # run in batch mode
    else:
        subjects = []
        freesurfer_dirs = glob.glob('{}/*'.format(freesurfer_dir))

        # find subjects where at least one expected output does not exist
        for path in nii_dirs:
            subject = os.path.basename(path)

            hcp_dir = utils.define_folder(os.path.join(study_base, config.site_config['paths']['hcp']))
            if not outputs_exist(subj_dir):
                subjects.append(subject)

        subjects = list(set(subjects))

        # submit a list of calls to ourself, one per subject
        commands = []
        if debug:
            debugopt = '--debug'
        else:
            debugopt = ''

        for subject in subjects:
            commands.append(" ".join([__file__, study, '--subject {} '.format(subject), debugopt]))

        if commands:
            logger.debug('queueing up the following commands:\n'+'\n'.join(commands))
            for i, cmd in enumerate(commands):
                jobname = 'dm_fs2hcp_{}_{}'.format(i, time.strftime("%Y%m%d-%H%M%S"))
                jobfile = '/tmp/{}'.format(jobname)
                logfile = '/tmp/{}.log'.format(jobname)
                errfile = '/tmp/{}.err'.format(jobname)
                with open(jobfile, 'wb') as fid:
                    fid.write('#!/bin/bash\n')
                    fid.write(cmd)

                rtn, out = utils.run('qsub -V -q main.q -o {} -e {} -N {} {}'.format(
                    logfile, errfile, jobname, jobfile))
                #rtn, out = utils.run('echo bash -l {}/{} {} | qbatch -N {} --logdir {} --walltime {} -'.format(bin_dir, script, subid, jobname, logs_dir, walltime))
                if rtn:
                    logger.error("Job submission failed. Output follows.")
                    logger.error("stdout: {}".format(out))
                    sys.exit(1)
Ejemplo n.º 14
0
def analyze_subject(subject, config, study):
    """
    1) finds the behavioural log files
    2) generates the stimulus timing files from these logs
    3) finds the pre-processed fmri data
    4) runs the standard GLM analysis on these data
    """
    study_base = config.get_study_base(study)
    resources_dir = os.path.join(study_base,
                                 config.site_config['paths']['resources'])
    ea_dir = os.path.join(study_base, config.site_config['paths']['fmri'],
                          'ea')
    output_dir = utils.define_folder(
        os.path.join(study_base, config.site_config['paths']['fmri'], 'ea',
                     subject))

    # check if subject has already been processed
    if check_complete(ea_dir, subject):
        msg = '{} already analysed'.format(subject)
        logger.info(msg)
        sys.exit(0)

    # reset / remove error.log
    error_log = os.path.join(output_dir, 'error.log')
    if os.path.isfile(error_log):
        os.remove(error_log)

    # find the behavioural data, and exit if we fail to find it
    try:
        resdirs = glob.glob(os.path.join(resources_dir, subject + '_??'))
        resources = []
        for resdir in resdirs:
            resfiles = [
                os.path.join(dp, f) for dp, dn, fn in os.walk(resdir)
                for f in fn
            ]
            resources.extend(resfiles)
        logs = filter(lambda x: '.log' in x and 'UCLAEmpAcc' in x, resources)
        logs.sort()
    except:
        logger.error('No BEHAV data for {}.'.format(subject))
        sys.exit(1)

    # if we have the wrong number of logs, don't guess which to use, just fail
    if len(logs) != 3:
        error_message = 'Did not find exactly 3 logs for {}\nfound:{}.'.format(
            subject, logs)
        logger.error(error_message)
        with open(error_log, 'wb') as f:
            f.write('{}\n{}'.format(error_message, NODE))
        sys.exit(1)

    # parse and write the logs seperately for each experiment condition (video or shapes/colours video)
    for test_type in ['vid', 'cvid']:
        # extract all of the data from the logs
        on_all, dur_all, corr_all, push_all, timings_all = [], [], [], [], []
        try:
            logger.info('Parsing {} logfiles for subject'.format(
                len(logs), subject))
            for log in logs:
                # extract the block id from the logfilename
                block_id = os.path.splitext(os.path.basename(log))[0][-1]
                on, dur, corr, push, timings = process_behav_data(
                    log, output_dir, subject, test_type, block_id)
                on_all.extend(on)
                dur_all.extend(dur)
                corr_all.extend(corr)
                push_all.extend(push)
                timings_all.extend(timings)
        except Exception, e:
            msg = 'Failed to parse logs for {}, with {}.'.format(
                subject, str(e))
            logger.error(msg)
            sys.exit(1)

        # write data to stimulus timing file for AFNI, and a QC csv
        # on_all = sorted(on_all, key=lambda x:x[1])
        timings_all = sorted(
            timings_all, key=lambda x:
            (x[2], x[3], x[1]))  # put the responses into order
        try:
            logger.info('Writing stimulus data')
            # write each stimulus time:
            #         [start_time]*[amplitude],[buttonpushes]:[block_length]
            #         30*5,0.002:12
            # OFFSET 4 TRs == 8 Seconds!
            # on = on - 8.0
            f1 = open('{}/{}_{}_block-times_ea.1D'.format(
                output_dir, subject, test_type), 'wb')  # stim timing file
            f2 = open('{}/{}_{}_corr_push.csv'.format(output_dir, subject,
                                                      test_type),
                      'wb')  # r values and num pushes / minute
            f3 = open('{}/{}_{}_button-times.csv'.format(
                output_dir, subject, test_type),
                      'wb')  # button responses and timings
            f4 = open('{}/{}_{}_vid-onsets.csv'.format(output_dir, subject,
                                                       test_type),
                      'wb')  # button responses and timings
            f2.write('correlation,n-pushes-per-minute\n')
            f3.write('Block_ID,Video,Response,Timing\n')
            f4.write('Block_ID,Video, Onset\n')

            for i in range(len(on_all)):
                f1.write('{o:.2f}*{r:.2f},{p}:{d:.2f} '.format(o=on_all[i][1] -
                                                               8.0,
                                                               r=corr_all[i],
                                                               p=push_all[i],
                                                               d=dur_all[i]))
                f2.write('{r:.2f},{p}\n'.format(r=corr_all[i], p=push_all[i]))
            for timing in timings_all:
                f3.write('{b},{v},{r},{t:.2f}\n'.format(b=timing[2],
                                                        v=timing[3],
                                                        r=timing[0],
                                                        t=timing[1]))
            for onset in on_all:
                f4.write('{b},{r},{t:.2f}\n'.format(b=onset[2],
                                                    r=onset[0],
                                                    t=onset[1]))
            f1.write(
                '\n')  # add newline at the end of each run (up to 3 runs.)
        except IOError as e:
            msg = 'Failed to open block_times & corr_push for {} with excuse {}'.format(
                subject, e.strerror)
            logger.error(msg)
        finally:
            f1.close()
            f2.close()
            f3.close()
            f4.close()