def run_subject_preproc(jobfile, subject, session=None):
    """ Create jobfile and run it on """
    output_name = os.path.join(
        '/tmp', os.path.basename(jobfile)[:-4] + '_%s.ini' % subject)
    _adapt_jobfile(jobfile, subject, output_name, session)
    # Read the jobfile
    list_subjects, params = _generate_preproc_pipeline(output_name)
    # Preproc and Dump data
    subject_data = do_subjects_preproc(output_name, report=True)
    return subject_data
Beispiel #2
0
def generate_glm_input(jobfile):
    """ retrun a list of dictionaries that represent the data available
    for GLM analysis"""
    list_subjects, params = _generate_preproc_pipeline(jobfile)
    output = []
    for subject in list_subjects:
        output_dir = subject.output_dir
        reports_output_dir = os.path.join(output_dir, 'reports')
        basenames = [
            'wr' + os.path.basename(func_)[:-3] for func_ in subject.func
        ]
        gii_basenames = [
            'r' + os.path.basename(func_).split('.')[0] + '_fsaverage_lh.gii'
            for func_ in subject.func
        ]
        gii_basenames += [
            'r' + os.path.basename(func_).split('.')[0] + '_fsaverage_rh.gii'
            for func_ in subject.func
        ]
        func = [
            os.path.join(output_dir, 'freesurfer', basename)
            for basename in gii_basenames
        ]
        realignment_parameters = [
            os.path.join(session_output_dir, 'rp_' + basename[2:-4] + '.txt')
            for (session_output_dir,
                 basename) in zip(subject.session_output_dirs, basenames)
        ] * 2
        session_ids = [
            session_id
            for (session_id, onset) in zip(subject.session_id, subject.onset)
            if onset is not None
        ]
        onsets = [onset for onset in subject.onset if onset is not None]
        subject_ = {
            'output_dir': output_dir,
            'session_output_dirs': subject.session_output_dirs,
            'subject_id': subject.subject_id,
            'session_id': session_ids * 2,
            'TR': subject.TR,
            'drift_model': subject.drift_model,
            'hfcut': subject.hfcut,
            'time_units': subject.time_units,
            'hrf_model': subject.hrf_model,
            'onset': onsets * 2,
            'report': True,
            'reports_output_dir': reports_output_dir,
            'basenames': gii_basenames,
            'func': func,
            'realignment_parameters': realignment_parameters,
        }
        output.append(subject_)
    return output
def generate_glm_input(jobfile, smooth=None, lowres=False):
    """ retrun a list of dictionaries that represent the data available
    for GLM analysis"""
    list_subjects, params = _generate_preproc_pipeline(jobfile)
    output = []
    for subject in list_subjects:
        if lowres:
            output_dir = subject.output_dir.replace('derivatives', '3mm')
        elif smooth is not None:
            output_dir = subject.output_dir.replace('derivatives',
                                                    'smooth_derivatives')
        else:
            output_dir = subject.output_dir
        if not os.path.exists(output_dir):
            os.makedirs(output_dir)

        anat = glob.glob(
            os.path.join(subject.anat_output_dir, 'wsub*_T1w.nii.gz'))[0]
        reports_output_dir = os.path.join(output_dir, 'reports')
        report_log_filename = os.path.join(reports_output_dir,
                                           'report_log.html')
        report_preproc_filename = os.path.join(reports_output_dir,
                                               'report_preproc.html')
        report_filename = os.path.join(reports_output_dir, 'report.html')
        tmp_output_dir = os.path.join(output_dir, 'tmp')
        basenames = [
            'wr' + os.path.basename(func_)[:-3] for func_ in subject.func
        ]
        func = [
            os.path.join(session_output_dir, basename + '.gz')
            for (session_output_dir,
                 basename) in zip(subject.session_output_dirs, basenames)
        ]
        if lowres:
            func = [f.replace('derivatives', '3mm') for f in func]

        realignment_parameters = [
            os.path.join(session_output_dir, 'rp_' + basename[2:-4] + '.txt')
            for (session_output_dir,
                 basename) in zip(subject.session_output_dirs, basenames)
        ]

        hrf_model = subject.hrf_model
        if 'retino' in jobfile:
            hrf_model = 'spm'

        subject_ = {
            'scratch': output_dir,
            'output_dir': output_dir,
            'session_output_dirs': subject.session_output_dirs,
            'anat_output_dir': subject.anat_output_dir,
            'tmp_output_dir': tmp_output_dir,
            'data_dir': subject.data_dir,
            'subject_id': subject.subject_id,
            'session_id': subject.session_id,
            'TR': subject.TR,
            'drift_model': subject.drift_model,
            'high_pass': 1. / 128,
            'time_units': subject.time_units,
            'hrf_model': hrf_model,
            'anat': anat,
            'onset': subject.onset,
            'report': True,
            'reports_output_dir': reports_output_dir,
            'report_log_filename': report_log_filename,
            'report_preproc_filename': report_preproc_filename,
            'report_filename': report_filename,
            'basenames': basenames,
            'func': func,
            'n_sessions': len(func),
            'realignment_parameters': realignment_parameters,
        }
        output.append(subject_)
    return output
    # CONFIGURATION
    protocols = [
        'WM', 'MOTOR', 'LANGUAGE', 'EMOTION', 'GAMBLING', 'RELATIONAL',
        'SOCIAL'
    ]
    slicer = 'ortho'  # slicer of activation maps QA
    cut_coords = None
    threshold = 3.
    cluster_th = 15  # minimum number of voxels in reported clusters

    ####################################
    # read input configuration
    conf_file = os.path.join(os.path.dirname(sys.argv[0]), "HCP.ini")

    for protocol in protocols:
        subjects, preproc_params = _generate_preproc_pipeline(
            conf_file, protocol=protocol)

        fwhm = preproc_params.get("fwhm")
        task_output_dir = os.path.join(os.path.dirname(subjects[0].output_dir))
        kwargs = {
            "regress_motion": True,
            "slicer": slicer,
            "threshold": threshold,
            "cluster_th": cluster_th,
            "protocol": protocol,
            "dc":
            not preproc_params.get("disable_distortion_correction", False),
            "realign": preproc_params["realign"],
            "coregister": preproc_params["coregister"],
            "segment": preproc_params["segment"],
            "normalize": preproc_params["normalize"],
Beispiel #5
0
def generate_glm_input(jobfile, smooth=None, lowres=False):
    """ retrun a list of dictionaries that represent the data available
    for GLM analysis"""
    list_subjects, params = _generate_preproc_pipeline(jobfile)
    output = []
    for subject in list_subjects:
        output_dir = subject.output_dir
        print(output_dir)

        if not os.path.exists(output_dir):
            os.makedirs(output_dir)

        # use normalized anat
        anat = os.path.join(os.path.dirname(subject.anat),
                            'w' + os.path.basename(subject.anat))

        # use normalized fMRI
        basenames = [
            'wr' + os.path.basename(func_)[:-3] for func_ in subject.func
        ]
        dirnames = [os.path.dirname(func_) for func_ in subject.func]
        func = [
            os.path.join(dirname, basename + '.gz')
            for (dirname, basename) in zip(dirnames, basenames)
        ]
        if lowres:
            func = [f.replace('derivatives', '3mm') for f in func]

        realignment_parameters = [
            os.path.join(dirname, 'rp_' + basename[2:-4] + '.txt')
            for (dirname, basename) in zip(dirnames, basenames)
        ]

        # misc report directories
        reports_output_dir = os.path.join(output_dir, 'reports')
        report_log_filename = os.path.join(reports_output_dir,
                                           'report_log.html')
        report_preproc_filename = os.path.join(reports_output_dir,
                                               'report_preproc.html')
        report_filename = os.path.join(reports_output_dir, 'report.html')
        tmp_output_dir = os.path.join(output_dir, 'tmp')
        subject_ = {
            'scratch': output_dir,
            'output_dir': output_dir,
            'session_output_dirs': subject.session_output_dirs,
            'anat_output_dir': subject.anat_output_dir,
            'tmp_output_dir': tmp_output_dir,
            'data_dir': subject.data_dir,
            'subject_id': subject.subject_id,
            'session_id': subject.session_id,
            'TR': subject.TR,
            'drift_model': subject.drift_model,
            'hfcut': subject.hfcut,
            'time_units': subject.time_units,
            'hrf_model': subject.hrf_model,
            'anat': anat,
            'onset': subject.onset,
            'report': True,
            'reports_output_dir': reports_output_dir,
            'report_log_filename': report_log_filename,
            'report_preproc_filename': report_preproc_filename,
            'report_filename': report_filename,
            'basenames': basenames,
            'func': func,
            'n_sessions': len(func),
            'realignment_parameters': realignment_parameters,
        }
        output.append(subject_)
    return output
Beispiel #6
0
        # additional ``kwargs`` for more informative report
        paradigm=paradigm.__dict__,
        TR=tr,
        n_scans=n_scans,
        hfcut=hfcut,
        frametimes=frametimes,
        drift_model=drift_model,
        hrf_model=hrf_model,
    )

    ProgressReport().finish_dir(subject_session_output_dir)
    print "Statistic report written to %s\r\n" % stats_report_filename
    return z_maps


if __name__ == '__main__':
    #File containing configuration for preprocessing the data
    jobfile = (
        '/neurospin/unicog/resources/git_depot/unicog/unicogfmri/localizer'
        '/volume_glm/Step1_config.ini')
    list_subjects, params = _generate_preproc_pipeline(jobfile)

    #Preproc
    subject_data = preproc(jobfile)

    #first_level
    for dict_subject in subject_data:
        dict_subject = dict_subject.__dict__
        z_maps = first_level(dict_subject)
        'EMOTION',
        'GAMBLING',
        'RELATIONAL',
        'SOCIAL'
        ]
    slicer = 'ortho'  # slicer of activation maps QA
    cut_coords = None
    threshold = 3.
    cluster_th = 15  # minimum number of voxels in reported clusters

    ####################################
    # read input configuration
    conf_file = os.path.join(os.path.dirname(sys.argv[0]), "HCP.ini")

    for protocol in protocols:
        subjects, preproc_params = _generate_preproc_pipeline(
            conf_file, protocol=protocol)

        fwhm = preproc_params.get("fwhm")
        task_output_dir = os.path.join(os.path.dirname(subjects[0].output_dir))
        kwargs = {"regress_motion": True,
                  "slicer": slicer,
                  "threshold": threshold,
                  "cluster_th": cluster_th,
                  "protocol": protocol,
                  "dc": not preproc_params.get(
                "disable_distortion_correction", False),
                  "realign": preproc_params["realign"],
                  "coregister": preproc_params["coregister"],
                  "segment": preproc_params["segment"],
                  "normalize": preproc_params["normalize"],
                  'func_write_voxel_sizes': preproc_params[
        subject_id="sub001",
        start_time=stats_start_time,
        title="GLM for subject %s" % subject_dic['session_id'],
    
        # additional ``kwargs`` for more informative report
        paradigm=paradigm.__dict__,
        TR=tr,
        n_scans=n_scans,
        hfcut=hfcut,
        frametimes=frametimes,
        drift_model=drift_model,
        hrf_model=hrf_model,
        )
    
    ProgressReport().finish_dir(subject_session_output_dir)
    print "Statistic report written to %s\r\n" % stats_report_filename
    return z_maps

if __name__ == '__main__':
    #File containing configuration for preprocessing the data
    jobfile=sys.argv[1]
    list_subjects, params =  _generate_preproc_pipeline(jobfile) 

    #Preproc
    subject_data = preproc(jobfile)
    
    #first_level
    for dict_subject in subject_data:
        dict_subject = dict_subject.__dict__
        z_maps = first_level(dict_subject)