Exemple #1
0
def create_workflow():
    parser = argparse.ArgumentParser()
    parser.add_argument('-d', dest='data', help="Path to bids dataset")
    args = parser.parse_args()
    if not os.path.exists(args.data):
        raise IOError('Input data not found')
    if not os.path.exists(OUTDIR):
        os.makedirs(OUTDIR)

    # grab data from bids structure
    layout = BIDSLayout(args.data)
    subj = layout.get_subjects()[0]
    func = [
        f.filename
        for f in layout.get(subject=subj, type='bold', extensions=['nii.gz'])
    ][0]

    outfile = os.path.join(OUTDIR, 'test_{}_{}_motcor'.format(subj, ENV['os']))

    # run interface
    realign = Realign()
    realign.inputs.in_files = func
    # FIX: this has to be unique for each environment
    realign.inputs.out_prefix = outfile
    res = realign.run()

    # write out json to keep track of information
    ENV.update({'inputs': res.inputs})
    ENV.update({'nipype_version': nipype.__version__})
    #ENV.update({'outputs': res.outputs})
    # write out to json
    env_to_json(ENV, outname=outfile + '.json')
def create_workflow():
    parser = argparse.ArgumentParser()
    parser.add_argument('-d', dest='data', help="Path to bids dataset")
    args = parser.parse_args()
    if not os.path.exists(args.data):
        raise IOError('Input data not found')
    if not os.path.exists(OUTDIR):
        os.makedirs(OUTDIR)

    # grab data from bids structure
    layout = BIDSLayout(args.data)
    subj = layout.get_subjects()[0]
    t1 = [
        f.filename
        for f in layout.get(subject=subj, type='T1w', extensions=['nii.gz'])
    ][0]

    outfile = os.path.join(OUTDIR, 'test_{}_{}_brain'.format(subj, ENV['os']))

    # run afni skullstrip
    skullstrip = SkullStrip()
    skullstrip.inputs.in_file = t1
    skullstrip.inputs.outputtype = 'NIFTI_GZ'
    # FIX: this has to be unique for each environment
    skullstrip.inputs.out_file = outfile + '.nii.gz'
    res = skullstrip.run()

    # write out json to keep track of information
    ENV.update({'inputs': res.inputs})
    #ENV.update({'outputs': res.outputs})
    # write out to json
    env_to_json(ENV, outname=outfile + '.json')
Exemple #3
0
    def new_project(self, ui_info):
        new_project = gui.CMP_Project_Info()
        np_res = new_project.configure_traits(view='create_view')

        if np_res and os.path.exists(new_project.base_directory):
            try:
                bids_layout = BIDSLayout(new_project.base_directory)
                for subj in bids_layout.get_subjects():
                    new_project.subjects.append('sub-' + str(subj))
                # new_project.subjects = ['sub-'+str(subj) for subj in bids_layout.get_subjects()]

                # new_project.configure_traits(subject=Enum(*subjects))
                # print new_project.subjects

                print "Default subject : " + new_project.subject
                np_res = new_project.configure_traits(view='subject_view')
                print "Selected subject : " + new_project.subject
            except:
                error(
                    message=
                    "Invalid BIDS dataset. Please see documentation for more details.",
                    title="BIDS error")

            self.pipeline = init_project(new_project, True)
            if self.pipeline != None:
                # update_last_processed(new_project, self.pipeline) # Not required as the project is new, so no update should be done on processing status
                ui_info.ui.context["object"].project_info = new_project
                ui_info.ui.context["object"].pipeline = self.pipeline
                self.project_loaded = True
Exemple #4
0
def write_scantsv(bids_dir, dicom_dir=None, live=False):
    """Make subject scan files (needed for NDA submission)"""
    # TODO: improved with metadata
    if not os.path.exists(dicom_dir):
        logging.warning('Specify valid dicom directory with [-d] flag')
        return
    layout = BIDSLayout(bids_dir)
    subs = sorted([x for x in layout.get_subjects()])
    for sid in subs:
        dcm = read_file(glob(op(dicom_dir, '*' + sid, '*'))[-1],
                        force=True).AcquisitionDate
        date = '-'.join([dcm[:4], dcm[4:6], dcm[6:]])
        logging.info("{0}'s scan date: {1}".format(sid, date))
        scans = []
        for scan in [
                f.filename
                for f in layout.get(subject=sid, extensions=['nii', 'nii.gz'])
        ]:
            paths = scan.split(os.sep)
            scans.append(os.sep.join(paths[-2:]))
            outname = op(bids_dir, paths[-3], paths[-3] + '_scans.tsv')
        if live:
            with open(outname, 'wt') as tsvfile:
                writer = csv.writer(tsvfile, delimiter='\t')
                writer.writerow(['filename', 'acq_time'])
                for scan in sorted(scans):
                    writer.writerow([scan, date])
            logging.info('Wrote {0}'.format(outname))
Exemple #5
0
def create_workflow():
    parser = argparse.ArgumentParser()
    parser.add_argument('-d', dest='data', help="Path to bids dataset")
    args = parser.parse_args()
    if not os.path.exists(args.data):
        raise IOError('Input data not found')
    if not os.path.exists(OUTDIR):
        os.makedirs(OUTDIR)

    # grab data from bids structure
    layout = BIDSLayout(args.data)
    subj = layout.get_subjects()[0]
    func = [
        f.filename
        for f in layout.get(subject=subj, type='bold', extensions=['nii.gz'])
    ][0]

    outfile = os.path.join(OUTDIR, 'test_{}_{}_motcor'.format(subj, ENV['os']))

    # run interface
    # Just SpatialRealign for the moment - TODO add time element with tr/slices
    realign = SpaceTimeRealigner()
    realign.inputs.in_file = [func]
    # no out_file input, will need to be renamed after
    res = realign.run()

    # write out json to keep track of information
    ENV.update({'inputs': res.inputs})
    ENV.update({'nipype_version': nipype.__version__})
    #ENV.update({'outputs': res.outputs})
    # write out to json
    env_to_json(ENV, outname=outfile + '.json')
Exemple #6
0
def get_participants(nip):

    from bids.grabbids import BIDSLayout

    layout = BIDSLayout(nip.input_path)
    participants = layout.get_subjects()

    return list(set(participants) - set(nip.skipped_participants))
Exemple #7
0
    def change_subject(self, ui_info):
        changed_project = ui_info.ui.context["object"].project_info

        print "BIDS directoy : %s" % changed_project.base_directory
        try:
            bids_layout = BIDSLayout(changed_project.base_directory)
            changed_project.subjects = []
            for subj in bids_layout.get_subjects():
                changed_project.subjects.append('sub-' + str(subj))
            # changed_project.subjects = ['sub-'+str(subj) for subj in bids_layout.get_subjects()]
            print "Subjects : %s" % changed_project.subjects

            print "Previous selected subject : %s" % changed_project.subject
            changed_project.configure_traits(view='subject_view')
            print "New selected subject : %s" % changed_project.subject
        except:
            error(
                message=
                "Invalid BIDS dataset. Please see documentation for more details.",
                title="BIDS error")

        self.inputs_checked = False

        changed_project.config_file = os.path.join(
            changed_project.base_directory, 'derivatives', '%s_%s_config.ini' %
            (changed_project.subject, changed_project.process_type))

        if os.path.isfile(
                changed_project.config_file
        ):  # If existing config file / connectome data, load subject project

            print "Existing config file for subject %s: %s" % (
                changed_project.config_file, changed_project.subject)

            changed_project.process_type = get_process_detail(
                changed_project, 'Global', 'process_type')
            changed_project.diffusion_imaging_model = get_process_detail(
                changed_project, 'Global', 'diffusion_imaging_model')

            self.pipeline = init_project(changed_project, False)
            if self.pipeline != None:
                update_last_processed(changed_project, self.pipeline)
                ui_info.ui.context["object"].project_info = changed_project
                ui_info.ui.context["object"].pipeline = self.pipeline
                print "Config for subject %s loaded !" % ui_info.ui.context[
                    "object"].project_info.subject
                self.project_loaded = True

        else:
            print "Not existing config file (%s) / connectome data for subject %s - Created new project" % (
                changed_project, changed_project.subject)
            self.pipeline = init_project(changed_project, True)
            if self.pipeline != None:
                # update_last_processed(new_project, self.pipeline) # Not required as the project is new, so no update should be done on processing status
                ui_info.ui.context["object"].project_info = changed_project
                ui_info.ui.context["object"].pipeline = self.pipeline
                self.project_loaded = True
Exemple #8
0
def _get_subjects(root_input_folder):
    """
    build subject list form either input arguments (participant_label, participant_file) or
    (if participant_label and participant_file are not specified) input data in bids_input_folder,
    then remove subjects form list according to participant_exclusion_file (if any)
    """
    layout = BIDSLayout(root_input_folder)
    return [(os.path.abspath(
        os.path.join(root_input_folder, "sub-{}".format(subject))), subject)
            for subject in layout.get_subjects()]
def anon_acqtimes(dset_dir):
    """
    Anonymize acquisition datetimes for a dataset. Works for both longitudinal
    and cross-sectional studies. The time of day is preserved, but the first
    scan is set to January 1st, 1800. In a longitudinal study, each session is
    anonymized relative to the first session, so that time between sessions is
    preserved.

    Overwrites scan tsv files in dataset. Only run this *after* data collection
    is complete for the study, especially if it's longitudinal.

    Parameters
    ----------
    dset_dir : str
        Path to BIDS dataset to be anonymized.
    """
    bl_dt = parser.parse('1800-01-01')

    layout = BIDSLayout(dset_dir)
    subjects = layout.get_subjects()
    sessions = sorted(layout.get_sessions())

    for sub in subjects:
        if not sessions:
            scans_file = op.join(dset_dir,
                                 'sub-{0}/sub-{0}_scans.tsv'.format(sub))
            df = pd.read_csv(scans_file, sep='\t')
            first_scan = df['acq_time'].min()
            first_dt = parser.parse(first_scan.split('T')[0])
            diff = first_dt - bl_dt
            acq_times = df['acq_time'].apply(parser.parse)
            acq_times = (acq_times - diff).astype(str)
            df['acq_time'] = acq_times
            # df.to_csv(scans_file, sep='\t', index=False)
        else:
            # Separated from dataset sessions in case subject missed some
            sub_ses = sorted(layout.get_sessions(subject=sub))
            for i, ses in enumerate(sub_ses):
                scans_file = op.join(dset_dir,
                                     'sub-{0}/ses-{1}/sub-{0}_ses-{1}_scans.'
                                     'tsv'.format(sub, ses))
                df = pd.read_csv(scans_file, sep='\t')
                if i == 0:
                    # Anonymize in terms of first scan for subject.
                    first_scan = df['acq_time'].min()
                    first_dt = parser.parse(first_scan.split('T')[0])
                    diff = first_dt - bl_dt

                acq_times = df['acq_time'].apply(parser.parse)
                acq_times = (acq_times - diff).astype(str)
                df['acq_time'] = acq_times
Exemple #10
0
def get_bids_surf_data_node(path_bids):
    layout = BIDSLayout(path_bids)
    subjects = layout.get_subjects()
    sessions = layout.get_sessions()
    print("Found {} subjects and {} sessions in the dataset".format(
        len(subjects), len(sessions)))
    bids_data_grabber = Node(Function(
        function=get_bids_surf_data,
        input_names=["path_bids", "subject", "session", "output_dir"],
        output_names=[
            "lh_surf", "rh_surf", "confounds", "outputDir", "prefix"
        ]),
                             name="SurfaceDataGrabber")
    bids_data_grabber.inputs.path_bids = path_bids
    bids_data_grabber.inputs.output_dir = opj(path_bids, "derivatives",
                                              "connectivityWorkflowSurface")
    bids_data_grabber.iterables = [("subject", subjects),
                                   ("session", sessions)]
    return bids_data_grabber
Exemple #11
0
def get_scan_duration(output_dir, modality="func", task="rest"):
    """

    """
    layout = BIDSLayout(output_dir)
    subjects_list = layout.get_subjects()

    scan_duration = pd.DataFrame([])

    #
    for sub_id in subjects_list:
        sub_dir = os.path.join(output_dir, "sub-" + sub_id)
        ses_id_list = layout.get_sessions(subject=sub_id)

        for ses_id in ses_id_list:
            sub_ses_path = os.path.join(sub_dir, "ses-" + ses_id)
            f = layout.get(subject=sub_id,
                           session=ses_id,
                           modality=modality,
                           task=task,
                           extensions='.nii.gz')
            if len(f) > 1:
                raise Exception(
                    "something went wrong, more than one %s %s file detected: %s"
                    % (modality, task, f))
            elif len(f) == 1:
                duration = (layout.get_metadata(
                    f[0].filename)["ScanDurationSec"])
                scan_duration_sub = pd.DataFrame(
                    OrderedDict([("subject_id", sub_id),
                                 ("sesssion_id", ses_id),
                                 ("scan_duration_s", [duration])]))
                scan_duration = scan_duration.append(scan_duration_sub)

    out_str = modality
    if task:
        out_str += "_" + task
    output_file = os.path.join(output_dir, "scan_duration_%s.tsv" % out_str)
    print("Writing scan duration to %s" % output_file)
    to_tsv(scan_duration, output_file)
Exemple #12
0
    def create_RDD(self, sc):

        sub_dir = "tar_files"

        layout = BIDSLayout(self.bids_dataset)
        participants = layout.get_subjects()

        # Create RDD of file paths as key and tarred subject data as value
        if self.use_hdfs:
            for sub in participants:
                layout.get(subject=sub)
                self.create_tar_file(sub_dir, "sub-{0}.tar".format(sub),
                                     layout.files)

            return sc.binaryFiles("file://" + os.path.abspath(sub_dir))

        # Create RDD of tuples containing tuples of subject names and no data
        it = iter(participants)
        empty_list = [None] * len(participants)
        list_participants = zip(it, empty_list)

        return sc.parallelize(list_participants)
Exemple #13
0
def GetBidsDataGrabberNode(pathBids):
    layout = BIDSLayout(pathBids)
    subjects = layout.get_subjects()
    sessions = layout.get_sessions()
    print("Found {} subjects and {} sessions in the dataset".format(
        len(subjects), len(sessions)))
    #Initialize the dataGrabber node
    BIDSDataGrabber = Node(Function(
        function=get_BidsData,
        input_names=["pathBids", "subject", "session", "outputDir"],
        output_names=[
            "aparcaseg", "preproc", "confounds", "outputDir", "prefix"
        ]),
                           name="FunctionalDataGrabber")
    #Specify path to dataset
    BIDSDataGrabber.inputs.pathBids = pathBids
    BIDSDataGrabber.inputs.outputDir = opj(pathBids, "derivatives",
                                           "connectivityWorkflow")
    #Specify subjects and sessions to iterate over them
    #Stored in iterables for multiprocessing purpose
    BIDSDataGrabber.iterables = [("subject", subjects), ("session", sessions)]
    #Return the node
    return BIDSDataGrabber
Exemple #14
0
def generateApine(bids_dir, dset=None):
    """generateApine takes a bids directory and optionally dataset name,
    and generates an Apine JSON object.

    Parameters
    ----------
    bids_dir : str
        The BIDS data directory.
    dset : str
        The dataset name. If none is provided, the directory will be used.

    Returns
    -------
    dict
        Apine dictionary object.
    """
    bids = BIDSLayout(bids_dir)
    apine = list()

    # For every entity...
    for subid in bids.get_subjects():
        current = OrderedDict()
        current["dataset"] = bids_dir if dset is None else dset
        current["participant"] = subid

        if not op.isdir(op.join(bids_dir, 'sub-{}'.format(subid))):
            print("sub-{} detected, but no directory found!!".format(subid))
            continue

        # And for every session...
        nosesh = len(bids.get_sessions()) == 0
        sesh_array = [None] if nosesh else bids.get_sessions()
        for sesid in sesh_array:
            if not nosesh:
                current["session"] = sesid

            # And for every modality...
            for mod in bids.get_modalities():
                current["modality"] = mod

                # Get corresponding data
                if nosesh:
                    data = bids.get(subject=subid,
                                    modality=mod,
                                    extensions="nii|nii.gz")
                else:
                    data = bids.get(subject=subid,
                                    session=sesid,
                                    modality=mod,
                                    extensions="nii|nii.gz")

                # Now, for every piece of data for this participant, session, and modality...
                for dat in data:
                    # Add the filename
                    current["filename"] = op.basename(dat.filename)
                    cleanname = op.basename(dat.filename).split('.')[0]
                    current["filename_keys"] = [
                        keyval for keyval in cleanname.split("_")
                        if "sub-" not in keyval and "ses-" not in keyval
                    ]
                    tmp = deepcopy(current)
                    apine += [tmp]

    return apine
Exemple #15
0
def calc_demos(
    output_dir,
    ses_id_list,
    raw_dir,
    in_ses_folder,
    demo_file,
    pwd,
    use_new_ids=True,
    new_id_lut_file=None,
    public_output=True,
):
    '''
    Calcluates demos from acq_time
    '''
    assert pwd != "", "password empty"
    demo_df = read_protected_file(demo_file, pwd, "demos.txt")

    out_demo_df = pd.DataFrame([])
    out_acq_time_df = pd.DataFrame([])

    layout = BIDSLayout(output_dir)
    new_sub_id_list = layout.get_subjects()

    for new_subject_id in new_sub_id_list:
        old_subject_id = get_private_sub_id(new_subject_id, new_id_lut_file)

        for old_ses_id in ses_id_list:
            subject_ses_folder = os.path.join(raw_dir, old_ses_id,
                                              in_ses_folder)
            os.chdir(subject_ses_folder)
            subject_folder = sorted(glob(old_subject_id + "*"))
            assert len(
                subject_folder
            ) < 2, "more than one subject folder %s" % old_subject_id

            if subject_folder:
                subject_folder = subject_folder[0]
                abs_subject_folder = os.path.abspath(subject_folder)
                os.chdir(abs_subject_folder)

                if use_new_ids:
                    bids_sub = new_subject_id
                else:
                    bids_sub = get_clean_subject_id(old_subject_id)
                bids_ses = get_clean_ses_id(old_ses_id)

                par_file_list = glob(os.path.join(abs_subject_folder, "*.par"))

                if par_file_list:
                    par_file = par_file_list[0]
                    df_subject, df_acq_time_subject = fetch_demos(
                        demo_df, old_subject_id, bids_sub, bids_ses, par_file)
                    out_demo_df = pd.concat((out_demo_df, df_subject))
                    out_acq_time_df = pd.concat(
                        (out_acq_time_df, df_acq_time_subject))

    to_tsv(out_demo_df, os.path.join(output_dir, "participants.tsv"))
    if not public_output:
        to_tsv(out_acq_time_df, os.path.join(output_dir, "acq_time.tsv"))

    print("\n\n\n\nDONE.\nExported demos for %d subjects." %
          len(new_sub_id_list))
    print(new_sub_id_list)
Exemple #16
0
def run_rsHRF():
    parser = get_parser()

    args = parser.parse_args()

    arg_groups = {}

    for group in parser._action_groups:
        group_dict = {
            a.dest: getattr(args, a.dest, None)
            for a in group._group_actions
        }
        arg_groups[group.title] = group_dict

    para = arg_groups['Parameters']

    if args.input_file is not None and args.analysis_level:
        parser.error(
            'analysis_level cannot be used with --input_file, do not supply it'
        )

    if args.input_file is not None and args.participant_label:
        parser.error(
            'participant_labels are not to be used with --input_file, do not supply it'
        )

    if args.input_file is not None and args.brainmask:
        parser.error(
            '--brainmask cannot be used with --input_file, use --atlas instead'
        )

    if args.bids_dir is not None and not args.analysis_level:
        parser.error(
            'analysis_level needs to be supplied with bids_dir, choices=[participant]'
        )

    if args.input_file is not None and (not args.input_file.endswith(
        ('.nii', '.nii.gz'))):
        parser.error('--input_file should end with .nii or .nii.gz')

    if args.atlas is not None and (not args.atlas.endswith(
        ('.nii', '.nii.gz'))):
        parser.error('--atlas should end with .nii or .nii.gz')

    if args.input_file is not None and args.atlas is not None:
        # carry analysis with input_file and atlas
        TR = spm_dep.spm.spm_vol(args.input_file).header.get_zooms()[-1]
        if TR <= 0:
            if para['TR'] <= 0:
                parser.error('Please supply a valid TR using -TR argument')
        else:
            if para['TR'] == -1:
                para['TR'] = TR
            elif para['TR'] <= 0:
                print('Invalid TR supplied, using implicit TR: {0}'.format(TR))
                para['TR'] = TR
        para['dt'] = para['TR'] / para['T']
        para['lag'] = np.arange(np.fix(para['min_onset_search'] / para['dt']),
                                np.fix(para['max_onset_search'] / para['dt']) +
                                1,
                                dtype='int')
        fourD_rsHRF.demo_4d_rsHRF(args.input_file,
                                  args.atlas,
                                  args.output_dir,
                                  para,
                                  args.n_jobs,
                                  mode='input w/ atlas')

    if args.bids_dir is not None and args.atlas is not None:
        # carry analysis with bids_dir and 1 atlas
        layout = BIDSLayout(args.bids_dir)

        if args.participant_label:
            input_subjects = args.participant_label
            subjects_to_analyze = layout.get_subjects(subject=input_subjects)
        else:
            subjects_to_analyze = layout.get_subjects()

        if not subjects_to_analyze:
            parser.error(
                'Could not find participants. Please make sure the BIDS data '
                'structure is present and correct. Datasets can be validated online '
                'using the BIDS Validator (http://incf.github.io/bids-validator/).'
            )

        all_inputs = layout.get(modality='func',
                                subject=subjects_to_analyze,
                                task='rest',
                                type='preproc',
                                extensions=['nii', 'nii.gz'])
        if not all_inputs != []:
            parser.error(
                'There are no files of type *preproc.nii / *preproc.nii.gz '
                'Please make sure to have at least one file of the above type '
                'in the BIDS specification')
        else:
            for file_count in range(len(all_inputs)):
                try:
                    TR = layout.get_metadata(
                        all_inputs[file_count].filename)['RepetitionTime']
                except KeyError as e:
                    TR = spm_dep.spm.spm_vol(
                        all_inputs[file_count].filename).header.get_zooms()[-1]
                para['TR'] = TR
                para['dt'] = para['TR'] / para['T']
                para['lag'] = np.arange(
                    np.fix(para['min_onset_search'] / para['dt']),
                    np.fix(para['max_onset_search'] / para['dt']) + 1,
                    dtype='int')
                fourD_rsHRF.demo_4d_rsHRF(all_inputs[file_count],
                                          args.atlas,
                                          args.output_dir,
                                          para,
                                          args.n_jobs,
                                          mode='bids w/ atlas')

    if args.bids_dir is not None and args.brainmask:
        # carry analysis with bids_dir and brainmask
        layout = BIDSLayout(args.bids_dir)

        if args.participant_label:
            input_subjects = args.participant_label
            subjects_to_analyze = layout.get_subjects(subject=input_subjects)
        else:
            subjects_to_analyze = layout.get_subjects()

        if not subjects_to_analyze:
            parser.error(
                'Could not find participants. Please make sure the BIDS data '
                'structure is present and correct. Datasets can be validated online '
                'using the BIDS Validator (http://incf.github.io/bids-validator/).'
            )

        all_inputs = layout.get(modality='func',
                                subject=subjects_to_analyze,
                                task='rest',
                                type='preproc',
                                extensions=['nii', 'nii.gz'])
        all_masks = layout.get(modality='func',
                               subject=subjects_to_analyze,
                               task='rest',
                               type='brainmask',
                               extensions=['nii', 'nii.gz'])

        if not all_inputs != []:
            parser.error(
                'There are no files of type *preproc.nii / *preproc.nii.gz '
                'Please make sure to have at least one file of the above type '
                'in the BIDS specification')
        if not all_masks != []:
            parser.error(
                'There are no files of type *brainmask.nii / *brainmask.nii.gz '
                'Please make sure to have at least one file of the above type '
                'in the BIDS specification')
        if len(all_inputs) != len(all_masks):
            parser.error(
                'The number of *preproc.nii / .nii.gz and the number of '
                '*brainmask.nii / .nii.gz are different. Please make sure that '
                'there is one mask for each input_file present')

        all_inputs.sort()
        all_masks.sort()

        all_prefix_match = False
        prefix_match_count = 0
        for i in range(len(all_inputs)):
            input_prefix = all_inputs[i].filename.split('/')[-1].split(
                '_preproc')[0]
            mask_prefix = all_masks[i].filename.split('/')[-1].split(
                '_brainmask')[0]
            if input_prefix == mask_prefix:
                prefix_match_count += 1
            else:
                all_prefix_match = False
                break
        if prefix_match_count == len(all_inputs):
            all_prefix_match = True

        if not all_prefix_match:
            parser.error(
                'The mask and input files should have the same prefix for correspondence. '
                'Please consider renaming your files')
        else:
            for file_count in range(len(all_inputs)):
                try:
                    TR = layout.get_metadata(
                        all_inputs[file_count].filename)['RepetitionTime']
                except KeyError as e:
                    TR = spm_dep.spm.spm_vol(
                        all_inputs[file_count].filename).header.get_zooms()[-1]
                para['TR'] = TR
                para['dt'] = para['TR'] / para['T']
                para['lag'] = np.arange(
                    np.fix(para['min_onset_search'] / para['dt']),
                    np.fix(para['max_onset_search'] / para['dt']) + 1,
                    dtype='int')
                fourD_rsHRF.demo_4d_rsHRF(all_inputs[file_count],
                                          all_masks[file_count],
                                          args.output_dir,
                                          para,
                                          args.n_jobs,
                                          mode='bids')
Exemple #17
0
def run_rsHRF():
    parser     = get_parser()
    args       = parser.parse_args()
    arg_groups = {}
    for group in parser._action_groups:
        group_dict              = {a.dest: getattr(args, a.dest, None) for a in group._group_actions }
        arg_groups[group.title] = group_dict
    para          = arg_groups['Parameters']
    nargs         = len(sys.argv)
    temporal_mask = []

    if (not args.GUI) and (args.output_dir is None):
        parser.error('--output_dir is required when executing in command-line interface')

    if (not args.GUI) and (args.estimation is None):
        parser.error('--estimation rule is required when executing in command-line interface')

    if (args.GUI):
        if (nargs == 2):
            try:
                from .rsHRF_GUI import run
                run.run()
            except ModuleNotFoundError:
                parser.error('--GUI should not be used inside a Docker container')
        else:
            parser.error('--no other arguments should be supplied with --GUI')

    if (args.input_file is not None or args.ts is not None) and args.analysis_level:
        parser.error('analysis_level cannot be used with --input_file or --ts, do not supply it')

    if (args.input_file is not None or args.ts is not None) and args.participant_label:
        parser.error('participant_labels are not to be used with --input_file or --ts, do not supply it')

    if args.input_file is not None and args.brainmask:
        parser.error('--brainmask cannot be used with --input_file, use --atlas instead')

    if args.ts is not None and (args.brainmask or args.atlas):
        parser.error('--atlas or --brainmask cannot be used with --ts, do not supply it')

    if args.bids_dir is not None and not (args.brainmask or args.atlas):
        parser.error('--atlas or --brainmask needs to be supplied with --bids_dir')

    if args.bids_dir is not None and not args.analysis_level:
        parser.error('analysis_level needs to be supplied with bids_dir, choices=[participant]')

    if args.input_file is not None and (not args.input_file.endswith(('.nii', '.nii.gz', '.gii', '.gii.gz'))):
        parser.error('--input_file should end with .gii, .gii.gz, .nii or .nii.gz')

    if args.atlas is not None and (not args.atlas.endswith(('.nii', '.nii.gz','.gii', '.gii.gz'))):
        parser.error('--atlas should end with .gii, .gii.gz, .nii or .nii.gz')

    if args.ts is not None and (not args.ts.endswith(('.txt'))):
        parser.error('--ts file should end with .txt')

    if args.temporal_mask is not None and (not args.temporal_mask.endswith(('.dat'))):
        parser.error('--temporal_mask ile should end with ".dat"')

    if args.temporal_mask is not None:
        f = open(args.temporal_mask,'r')
        for line in f:
            for each in line:
                if each in ['0','1']:
                    temporal_mask.append(int(each))

    if args.estimation == 'sFIR' or args.estimation == 'FIR':
        para['T'] = 1

    if args.ts is not None:
        file_type = op.splitext(args.ts)
        if para['TR'] <= 0:
            parser.error('Please supply a valid TR using -TR argument')
        else:
            TR = para['TR']
        para['dt'] = para['TR'] / para['T']
        para['lag'] = np.arange(np.fix(para['min_onset_search'] / para['dt']),
                                np.fix(para['max_onset_search'] / para['dt']) + 1,
                                dtype='int')
        fourD_rsHRF.demo_rsHRF(args.ts, None, args.output_dir, para, args.n_jobs, file_type, mode='time-series', temporal_mask=temporal_mask, wiener=args.wiener)

    if args.input_file is not None:
        if args.atlas is not None:
            if (args.input_file.endswith(('.nii', '.nii.gz')) and args.atlas.endswith(('.gii', '.gii.gz'))) or (args.input_file.endswith(('.gii', '.gii.gz')) and args.atlas.endswith(('.nii', '.nii.gz'))):
                parser.error('--atlas and input_file should be of the same type [NIfTI or GIfTI]')

        # carry analysis with input_file and atlas
        file_type = op.splitext(args.input_file)
        if file_type[-1] == ".gz":
            file_type = op.splitext(file_type[-2])[-1] + file_type[-1]
        else:
            file_type = file_type[-1]
        if ".nii" in file_type:
            TR = (spm_dep.spm.spm_vol(args.input_file).header.get_zooms())[-1]
        else:
            if para['TR'] == -1:
                parser.error('Please supply a valid TR using -TR argument')
            else:
                TR = para['TR']
        if TR <= 0:
            if para['TR'] <= 0:
                parser.error('Please supply a valid TR using -TR argument')
        else:
            if para['TR'] == -1:
                para['TR'] = TR
            elif para['TR'] <= 0:
                print('Invalid TR supplied, using implicit TR: {0}'.format(TR))
                para['TR'] = TR
        para['dt'] = para['TR'] / para['T']
        para['lag'] = np.arange(np.fix(para['min_onset_search'] / para['dt']),
                                np.fix(para['max_onset_search'] / para['dt']) + 1,
                                dtype='int')
        fourD_rsHRF.demo_rsHRF(args.input_file, args.atlas, args.output_dir, para, args.n_jobs, file_type, mode='input', temporal_mask=temporal_mask, wiener=args.wiener)


    if args.bids_dir is not None and args.atlas is not None:
        # carry analysis with bids_dir and 1 atlas
        layout = BIDSLayout(args.bids_dir)

        if args.participant_label:
            input_subjects = args.participant_label
            subjects_to_analyze = layout.get_subjects(subject=input_subjects)
        else:
            subjects_to_analyze = layout.get_subjects()

        if not subjects_to_analyze:
            parser.error('Could not find participants. Please make sure the BIDS data '
                         'structure is present and correct. Datasets can be validated online '
                         'using the BIDS Validator (http://incf.github.io/bids-validator/).')

        if not args.atlas.endswith(('.nii', '.nii.gz')):
            parser.error('--atlas should end with .nii or .nii.gz')

        all_inputs = layout.get(modality='func', subject=subjects_to_analyze, task='rest', type='preproc', extensions=['nii', 'nii.gz'])
        if not all_inputs != []:
            parser.error('There are no files of type *preproc.nii / *preproc.nii.gz '
                         'Please make sure to have at least one file of the above type '
                         'in the BIDS specification')
        else:
            num_errors = 0
            for file_count in range(len(all_inputs)):
                try:
                    TR = layout.get_metadata(all_inputs[file_count].filename)['RepetitionTime']
                except KeyError as e:
                    TR = spm_dep.spm.spm_vol(all_inputs[file_count].filename).header.get_zooms()[-1]
                para['TR'] = TR
                para['dt'] = para['TR'] / para['T']
                para['lag'] = np.arange(np.fix(para['min_onset_search'] / para['dt']),
                                        np.fix(para['max_onset_search'] / para['dt']) + 1,
                                        dtype='int')
                num_errors += 1
                try:
                    fourD_rsHRF.demo_rsHRF(all_inputs[file_count], args.atlas, args.output_dir, para, args.n_jobs, file_type, mode='bids w/ atlas', temporal_mask=temporal_mask, wiener=args.wiener)
                    num_errors -=1
                except ValueError as err:
                    print(err.args[0])
                except:
                    print("Unexpected error:", sys.exc_info()[0])
            success = len(all_inputs) - num_errors
            if success == 0:
                raise RuntimeError('Dimensions were inconsistent for all input-mask pairs; \n'
                                   'No inputs were processed!')

    if args.bids_dir is not None and args.brainmask:
        # carry analysis with bids_dir and brainmask
        layout = BIDSLayout(args.bids_dir)

        if args.participant_label:
            input_subjects = args.participant_label
            subjects_to_analyze = layout.get_subjects(subject=input_subjects)
        else:
            subjects_to_analyze = layout.get_subjects()

        if not subjects_to_analyze:
            parser.error('Could not find participants. Please make sure the BIDS data '
                         'structure is present and correct. Datasets can be validated online '
                         'using the BIDS Validator (http://incf.github.io/bids-validator/).')

        all_inputs = layout.get(modality='func', subject=subjects_to_analyze, task='rest', type='preproc', extensions=['nii', 'nii.gz'])
        all_masks = layout.get(modality='func', subject=subjects_to_analyze, task='rest', type='brainmask', extensions=['nii', 'nii.gz'])
        if not all_inputs != []:
            parser.error('There are no files of type *preproc.nii / *preproc.nii.gz '
                         'Please make sure to have at least one file of the above type '
                         'in the BIDS specification')
        if not all_masks != []:
            parser.error('There are no files of type *brainmask.nii / *brainmask.nii.gz '
                         'Please make sure to have at least one file of the above type '
                         'in the BIDS specification')
        if len(all_inputs) != len(all_masks):
            parser.error('The number of *preproc.nii / .nii.gz and the number of '
                         '*brainmask.nii / .nii.gz are different. Please make sure that '
                         'there is one mask for each input_file present')

        all_inputs.sort()
        all_masks.sort()

        all_prefix_match = False
        prefix_match_count = 0
        for i in range(len(all_inputs)):
            input_prefix = all_inputs[i].filename.split('/')[-1].split('_preproc')[0]
            mask_prefix = all_masks[i].filename.split('/')[-1].split('_brainmask')[0]
            if input_prefix == mask_prefix:
                prefix_match_count += 1
            else:
                all_prefix_match = False
                break
        if prefix_match_count == len(all_inputs):
            all_prefix_match = True

        if not all_prefix_match:
            parser.error('The mask and input files should have the same prefix for correspondence. '
                         'Please consider renaming your files')
        else:
            num_errors = 0
            for file_count in range(len(all_inputs)):
                file_type = op.splitext(all_inputs[file_count].filename)[1]
                if file_type == ".nii" or file_type == ".nii.gz":
                    try:
                        TR = layout.get_metadata(all_inputs[file_count].filename)['RepetitionTime']
                    except KeyError as e:
                        TR = spm_dep.spm.spm_vol(all_inputs[file_count].filename).header.get_zooms()[-1]
                    para['TR'] = TR
                else:
                    spm_dep.spm.spm_vol(all_inputs[file_count].filename)
                    TR = spm_dep.spm.spm_vol(all_inputs[file_count].filename).get_arrays_from_intent("NIFTI_INTENT_TIME_SERIES")[0].meta.get_metadata()["TimeStep"]
                    para['TR'] = float(TR) * 0.001


                para['dt'] = para['TR'] / para['T']
                para['lag'] = np.arange(np.fix(para['min_onset_search'] / para['dt']),
                                        np.fix(para['max_onset_search'] / para['dt']) + 1,
                                        dtype='int')
                num_errors += 1
                try:
                    fourD_rsHRF.demo_rsHRF(all_inputs[file_count], all_masks[file_count], args.output_dir, para, args.n_jobs, mode='bids', temporal_mask=temporal_mask, wiener=args.wiener)
                    num_errors -=1
                except ValueError as err:
                    print(err.args[0])
                except:
                    print("Unexpected error:", sys.exc_info()[0])
            success = len(all_inputs) - num_errors
            if success == 0:
                raise RuntimeError('Dimensions were inconsistent for all input-mask pairs; \n'
                                   'No inputs were processed!')
Exemple #18
0
    def check_input(self, layout, gui=True):
        print '**** Check Inputs  ****'
        diffusion_available = False
        bvecs_available = False
        bvals_available = False
        valid_inputs = False

        if self.global_conf.subject_session == '':
            subject = self.subject
        else:
            subject = "_".join(
                (self.subject, self.global_conf.subject_session))

        dwi_file = os.path.join(self.subject_directory, 'dwi',
                                subject + '_dwi.nii.gz')
        bval_file = os.path.join(self.subject_directory, 'dwi',
                                 subject + '_dwi.bval')
        bvec_file = os.path.join(self.subject_directory, 'dwi',
                                 subject + '_dwi.bvec')

        subjid = self.subject.split("-")[1]

        try:
            layout = BIDSLayout(self.base_directory)
            print "Valid BIDS dataset with %s subjects" % len(
                layout.get_subjects())
            for subj in layout.get_subjects():
                self.global_conf.subjects.append('sub-' + str(subj))
            # self.global_conf.subjects = ['sub-'+str(subj) for subj in layout.get_subjects()]
            self.global_conf.modalities = [
                str(mod) for mod in layout.get_modalities()
            ]
            # mods = layout.get_modalities()
            types = layout.get_types()
            # print "Available modalities :"
            # for mod in mods:
            #     print "-%s" % mod

            if self.global_conf.subject_session == '':

                files = layout.get(subject=subjid,
                                   type='dwi',
                                   extensions='.nii.gz')
                if len(files) > 0:
                    dwi_file = files[0].filename
                    print dwi_file
                else:
                    error(message="Diffusion image not found for subject %s." %
                          (subjid),
                          title="Error",
                          buttons=['OK', 'Cancel'],
                          parent=None)
                    return

                files = layout.get(subject=subjid,
                                   type='dwi',
                                   extensions='.bval')
                if len(files) > 0:
                    bval_file = files[0].filename
                    print bval_file
                else:
                    error(
                        message="Diffusion bval image not found for subject %s."
                        % (subjid),
                        title="Error",
                        buttons=['OK', 'Cancel'],
                        parent=None)
                    return

                files = layout.get(subject=subjid,
                                   type='dwi',
                                   extensions='.bvec')
                if len(files) > 0:
                    bvec_file = files[0].filename
                    print bvec_file
                else:
                    error(
                        message="Diffusion bvec image not found for subject %s."
                        % (subjid),
                        title="Error",
                        buttons=['OK', 'Cancel'],
                        parent=None)
                    return
            else:
                sessid = self.global_conf.subject_session.split("-")[1]

                files = layout.get(subject=subjid,
                                   type='dwi',
                                   extensions='.nii.gz',
                                   session=sessid)
                if len(files) > 0:
                    dwi_file = files[0].filename
                    print dwi_file
                else:
                    error(
                        message=
                        "Diffusion image not found for subject %s, session %s."
                        % (subjid, self.global_conf.subject_session),
                        title="Error",
                        buttons=['OK', 'Cancel'],
                        parent=None)
                    return

                files = layout.get(subject=subjid,
                                   type='dwi',
                                   extensions='.bval',
                                   session=sessid)
                if len(files) > 0:
                    bval_file = files[0].filename
                    print bval_file
                else:
                    error(
                        message=
                        "Diffusion bval image not found for subject %s, session %s."
                        % (subjid, self.global_conf.subject_session),
                        title="Error",
                        buttons=['OK', 'Cancel'],
                        parent=None)
                    return

                files = layout.get(subject=subjid,
                                   type='dwi',
                                   extensions='.bvec',
                                   session=sessid)
                if len(files) > 0:
                    bvec_file = files[0].filename
                    print bvec_file
                else:
                    error(
                        message=
                        "Diffusion bvec image not found for subject %s, session %s."
                        % (subjid, self.global_conf.subject_session),
                        title="Error",
                        buttons=['OK', 'Cancel'],
                        parent=None)
                    return

            print "Looking for...."
            print "dwi_file : %s" % dwi_file
            print "bvecs_file : %s" % bvec_file
            print "bvals_file : %s" % bval_file

            for typ in types:
                if typ == 'dwi' and os.path.isfile(dwi_file):
                    print "%s available" % typ
                    diffusion_available = True

        except:
            error(
                message=
                "Invalid BIDS dataset. Please see documentation for more details.",
                title="Error",
                buttons=['OK', 'Cancel'],
                parent=None)
            return

        if os.path.isfile(bval_file): bvals_available = True

        if os.path.isfile(bvec_file): bvecs_available = True

        if diffusion_available:
            if bvals_available and bvecs_available:
                self.stages[
                    'Diffusion'].config.diffusion_imaging_model_choices = self.diffusion_imaging_model

                #Copy diffusion data to derivatives / cmp  / subject / dwi
                if self.global_conf.subject_session == '':
                    out_dwi_file = os.path.join(self.derivatives_directory,
                                                'cmp', self.subject, 'dwi',
                                                subject + '_dwi.nii.gz')
                    out_bval_file = os.path.join(self.derivatives_directory,
                                                 'cmp', self.subject, 'dwi',
                                                 subject + '_dwi.bval')
                    out_bvec_file = os.path.join(self.derivatives_directory,
                                                 'cmp', self.subject, 'dwi',
                                                 subject + '_dwi.bvec')
                else:
                    out_dwi_file = os.path.join(
                        self.derivatives_directory, 'cmp', self.subject,
                        self.global_conf.subject_session, 'dwi',
                        subject + '_dwi.nii.gz')
                    out_bval_file = os.path.join(
                        self.derivatives_directory, 'cmp', self.subject,
                        self.global_conf.subject_session, 'dwi',
                        subject + '_dwi.bval')
                    out_bvec_file = os.path.join(
                        self.derivatives_directory, 'cmp', self.subject,
                        self.global_conf.subject_session, 'dwi',
                        subject + '_dwi.bvec')

                if not os.path.isfile(out_dwi_file):
                    shutil.copy(src=dwi_file, dst=out_dwi_file)
                if not os.path.isfile(out_bvec_file):
                    shutil.copy(src=bvec_file, dst=out_bvec_file)
                if not os.path.isfile(out_bval_file):
                    shutil.copy(src=bval_file, dst=out_bval_file)

                valid_inputs = True
                input_message = 'Inputs check finished successfully.\nDiffusion and morphological data available.'
            else:
                input_message = 'Error during inputs check.\nDiffusion bvec or bval files not available.'
        else:
            if self.global_conf.subject_session == '':
                input_message = 'Error during inputs check. No diffusion data available in folder ' + os.path.join(
                    self.base_directory, self.subject, 'dwi') + '!'
            else:
                input_message = 'Error during inputs check. No diffusion data available in folder ' + os.path.join(
                    self.base_directory, self.subject,
                    self.global_conf.subject_session, 'dwi') + '!'
        #diffusion_imaging_model = diffusion_imaging_model[0]

        if gui:
            #input_notification = Check_Input_Notification(message=input_message, diffusion_imaging_model_options=diffusion_imaging_model,diffusion_imaging_model=diffusion_imaging_model)
            #input_notification.configure_traits()
            print input_message
            self.global_conf.diffusion_imaging_model = self.diffusion_imaging_model

            # if diffusion_available:
            #     n_vol = nib.load(dwi_file).shape[3]
            #     if self.stages['Preprocessing'].config.end_vol == 0 or self.stages['Preprocessing'].config.end_vol == self.stages['Preprocessing'].config.max_vol or self.stages['Preprocessing'].config.end_vol >= n_vol-1:
            #         self.stages['Preprocessing'].config.end_vol = n_vol-1
            #     self.stages['Preprocessing'].config.max_vol = n_vol-1

            self.stages[
                'Registration'].config.diffusion_imaging_model = self.diffusion_imaging_model
            self.stages[
                'Diffusion'].config.diffusion_imaging_model = self.diffusion_imaging_model
        else:
            print input_message
            self.global_conf.diffusion_imaging_model = self.diffusion_imaging_model

            # if diffusion_available:
            #     n_vol = nib.load(dwi_file).shape[3]
            #     if self.stages['Preprocessing'].config.end_vol == 0 or self.stages['Preprocessing'].config.end_vol == self.stages['Preprocessing'].config.max_vol or self.stages['Preprocessing'].config.end_vol >= n_vol-1:
            #         self.stages['Preprocessing'].config.end_vol = n_vol-1
            #     self.stages['Preprocessing'].config.max_vol = n_vol-1

            self.stages[
                'Registration'].config.diffusion_imaging_model = self.diffusion_imaging_model
            self.stages[
                'Diffusion'].config.diffusion_imaging_model = self.diffusion_imaging_model

        if (diffusion_available):
            valid_inputs = True
        else:
            print "Missing required inputs."
            error(
                message=
                "Missing diffusion inputs. Please see documentation for more details.",
                title="Error",
                buttons=['OK', 'Cancel'],
                parent=None)

        for stage in self.stages.values():
            if stage.enabled:
                print stage.name
                print stage.stage_dir

        self.fill_stages_outputs()

        return valid_inputs
Exemple #19
0
def compare_par_nii(output_dir, old_sub_id_list, raw_dir, ses_id_list,
                    in_ses_folder, info_list, new_id_lut_file):
    """
    - Checks that all subjects from subject list are in sourcedata
    - Checks that par and nii filecount agrees
    - Exports nii filecount to output_dir
    """
    # first check that all subjects from id list are in the output_dir
    print("\nchecking that all subjects from id list are in the output_dir...")
    layout = BIDSLayout(output_dir)
    subjects_list = layout.get_subjects()

    for old_sub_id in old_sub_id_list:
        new_sub_id = get_public_sub_id(old_sub_id, new_id_lut_file)
        sub_dir = os.path.join(output_dir, "sub-" + new_sub_id)
        f = glob(sub_dir)
        if not f:
            raise Exception("No folder not found: %s" % sub_dir)
    print("%d subjects from list found in folder %s. Seems OK...\n" %
          (len(old_sub_id_list), output_dir))

    # compare filecount of par and nii files and export
    filecount = pd.DataFrame([])
    for new_sub_id in subjects_list:
        old_sub_id = get_private_sub_id(new_sub_id, new_id_lut_file)

        for old_ses_id in ses_id_list:
            new_ses_id = "tp" + old_ses_id[-1]
            sub_ses_par_dir = os.path.join(
                raw_dir, old_ses_id, in_ses_folder,
                old_sub_id + "_t%s_raw" % new_ses_id[-1])
            sub_ses_nii_dir = os.path.join(output_dir, "sub-" + new_sub_id,
                                           "ses-" + new_ses_id)

            n_files = OrderedDict([("subject_id", new_sub_id),
                                   ("session_id", new_ses_id)])

            for info in info_list:
                par_search_str = os.path.join(
                    sub_ses_par_dir, "*" + info["search_str"] + "*.par")
                par_f = glob(par_search_str)
                n_files_par = len(par_f)

                if "acq" in info.keys():
                    acq_str = "_acq-" + info["acq"]
                else:
                    acq_str = ""
                if "direction" in info.keys():
                    dir_str = "_dir-" + info["direction"]
                else:
                    dir_str = ""
                nii_search_str = os.path.join(
                    sub_ses_nii_dir, info["bids_modality"], "*" + acq_str +
                    "*" + dir_str + "*" + info["bids_name"] + "*.nii.gz")
                nii_f = glob(nii_search_str)
                n_files_nifti = len(nii_f)

                c = info["bids_modality"] + "_" + info["bids_name"] + \
                    acq_str.replace("-", "") + dir_str.replace("-", "")

                n_files[c] = [n_files_nifti]

                if not n_files_par == n_files_nifti:
                    raise Exception(
                        "missmatch between par and nii file count %s %s %s %s"
                        % (new_sub_id, new_ses_id, par_search_str,
                           nii_search_str))
                # TODO check physio
                if "physio" in info.keys() and info["physio"]:
                    phys_par_search_str = os.path.join(
                        sub_ses_par_dir,
                        "*" + info["search_str"] + "*_physio.log")
                    phys_par_f = glob(phys_par_search_str)
                    phys_n_files_par = len(phys_par_f)

                    phys_nii_search_str = os.path.join(
                        sub_ses_nii_dir, info["bids_modality"],
                        "*" + acq_str + "*" + dir_str + "*" +
                        info["bids_name"] + "*_physio.tsv")
                    phys_nii_f = glob(phys_nii_search_str)
                    phys_n_files_nifti = len(phys_nii_f)

                    c = info["bids_modality"] + "_" + info["bids_name"] + \
                        acq_str.replace("-", "") + dir_str.replace("-", "") + "_physio"
                    n_files[c] = [phys_n_files_nifti]

                    if not phys_n_files_par == phys_n_files_nifti:
                        raise Exception(
                            "missmatch between par and nii file count %s %s %s %s"
                            % (new_sub_id, new_ses_id, phys_par_search_str,
                               phys_nii_search_str))

            filecount = filecount.append(pd.DataFrame(n_files))

    output_file = os.path.join(output_dir, "n_files.tsv")
    to_tsv(filecount, output_file)
    print("Compared filecount from par and nifti files. Seems OK...")
    print("Filecount written to %s" % output_file)
Exemple #20
0
    def load_project(self, ui_info):
        loaded_project = gui.CMP_Project_Info()
        np_res = loaded_project.configure_traits(view='open_view')

        print "Default subject : " + loaded_project.subject

        is_bids = False

        try:
            bids_layout = BIDSLayout(loaded_project.base_directory)
            is_bids = True
            loaded_project.subjects = []
            for subj in bids_layout.get_subjects():
                loaded_project.subjects.append('sub-' + str(subj))
            # loaded_project.subjects = ['sub-'+str(subj) for subj in bids_layout.get_subjects()]
        except:
            error(
                message=
                "Invalid BIDS dataset. Please see documentation for more details.",
                title="BIDS error")

        self.inputs_checked = False

        print loaded_project.subjects

        if np_res and os.path.exists(
                loaded_project.base_directory) and is_bids:
            # Retrocompatibility with v2.1.0 where only one config.ini file was created
            if os.path.exists(
                    os.path.join(loaded_project.base_directory, 'derivatives',
                                 'config.ini')):
                loaded_project.config_file = os.path.join(
                    loaded_project.base_directory, 'derivatives', 'config.ini')
            # Load new format: <process_type>_config.ini
            else:
                loaded_project.available_config = [
                    os.path.basename(s)[:-11] for s in glob.glob(
                        os.path.join(loaded_project.base_directory,
                                     'derivatives', '*_config.ini'))
                ]
                if len(loaded_project.available_config) > 1:
                    loaded_project.config_to_load = loaded_project.available_config[
                        0]
                    config_selected = loaded_project.configure_traits(
                        view='select_config_to_load')
                    if not config_selected:
                        return 0
                else:
                    loaded_project.config_to_load = loaded_project.available_config[
                        0]

                loaded_project.subject = loaded_project.config_to_load.split(
                    "_")[0]

                print "Config to load: %s" % loaded_project.config_to_load
                loaded_project.config_file = os.path.join(
                    loaded_project.base_directory, 'derivatives',
                    '%s_config.ini' % loaded_project.config_to_load)
                print "Config file: %s" % loaded_project.config_file

            loaded_project.process_type = get_process_detail(
                loaded_project, 'Global', 'process_type')
            loaded_project.diffusion_imaging_model = get_process_detail(
                loaded_project, 'Global', 'diffusion_imaging_model')
            self.pipeline = init_project(loaded_project, False)
            if self.pipeline != None:
                update_last_processed(loaded_project, self.pipeline)
                ui_info.ui.context["object"].project_info = loaded_project
                ui_info.ui.context["object"].pipeline = self.pipeline
                print "Config for subject %s loaded !" % ui_info.ui.context[
                    "object"].project_info.subject
                self.project_loaded = True
                # Move old to new config filename format
                if os.path.exists(
                        os.path.join(loaded_project.base_directory,
                                     'derivatives', 'config.ini')):
                    loaded_project.config_file = '%s_config.ini' % get_process_detail(
                        loaded_project, 'Global', 'process_type')
                    os.remove(
                        os.path.join(loaded_project.base_directory,
                                     'derivatives', 'config.ini'))
                    save_config(
                        self.pipeline,
                        ui_info.ui.context["object"].project_info.config_file)
Exemple #21
0
def bidsmri2project(directory, args):
    #Parse dataset_description.json file in BIDS directory
    if (os.path.isdir(os.path.join(directory))):
        try:
            with open(os.path.join(directory,
                                   'dataset_description.json')) as data_file:
                dataset = json.load(data_file)
        except OSError:
            logging.critical(
                "Cannot find dataset_description.json file which is required in the BIDS spec"
            )
            exit("-1")
    else:
        logging.critical("Error: BIDS directory %s does not exist!" %
                         os.path.join(directory))
        exit("-1")

    #create project / nidm-exp doc
    project = Project()

    #add various attributes if they exist in BIDS dataset
    for key in dataset:
        #if key from dataset_description file is mapped to term in BIDS_Constants.py then add to NIDM object
        if key in BIDS_Constants.dataset_description:
            if type(dataset[key]) is list:
                project.add_attributes({
                    BIDS_Constants.dataset_description[key]:
                    "".join(dataset[key])
                })
            else:
                project.add_attributes(
                    {BIDS_Constants.dataset_description[key]: dataset[key]})
        #add absolute location of BIDS directory on disk for later finding of files which are stored relatively in NIDM document
        project.add_attributes({Constants.PROV['Location']: directory})

    #get BIDS layout
    bids_layout = BIDSLayout(directory)

    #create empty dictinary for sessions where key is subject id and used later to link scans to same session as demographics
    session = {}
    participant = {}
    #Parse participants.tsv file in BIDS directory and create study and acquisition objects
    if os.path.isfile(os.path.join(directory, 'participants.tsv')):
        with open(os.path.join(directory, 'participants.tsv')) as csvfile:
            participants_data = csv.DictReader(csvfile, delimiter='\t')

            #logic to map variables to terms.#########################################################################################################

            #first iterate over variables in dataframe and check which ones are already mapped as BIDS constants and which are not.  For those that are not
            #we want to use the variable-term mapping functions to help the user do the mapping
            #iterate over columns
            mapping_list = []
            column_to_terms = {}
            for field in participants_data.fieldnames:

                #column is not in BIDS_Constants
                if not (field in BIDS_Constants.participants):
                    #add column to list for column_to_terms mapping
                    mapping_list.append(field)

            #do variable-term mappings
            if ((args.json_map != False) or (args.key != None)):

                #if user didn't supply a json mapping file but we're doing some variable-term mapping create an empty one for column_to_terms to use
                if args.json_map == None:
                    #defaults to participants.json because here we're mapping the participants.tsv file variables to terms
                    args.json_map = os.path.isfile(
                        os.path.join(directory, 'participants.json'))

                #maps variables in CSV file to terms
                temp = DataFrame(columns=mapping_list)
                column_to_terms.update(
                    map_variables_to_terms(directory=directory,
                                           df=temp,
                                           apikey=args.key,
                                           output_file=os.path.join(
                                               directory, 'participants.json'),
                                           json_file=args.json_map,
                                           owl_file=args.owl))

            for row in participants_data:
                #create session object for subject to be used for participant metadata and image data
                #parse subject id from "sub-XXXX" string
                temp = row['participant_id'].split("-")
                #for ambiguity in BIDS datasets.  Sometimes participant_id is sub-XXXX and othertimes it's just XXXX
                if len(temp) > 1:
                    subjid = temp[1]
                else:
                    subjid = temp[0]
                logging.info(subjid)
                session[subjid] = Session(project)

                #add acquisition object
                acq = AssessmentAcquisition(session=session[subjid])

                acq_entity = AssessmentObject(acquisition=acq)
                participant[subjid] = {}
                participant[subjid]['person'] = acq.add_person(
                    attributes=({
                        Constants.NIDM_SUBJECTID: row['participant_id']
                    }))

                #add qualified association of participant with acquisition activity
                acq.add_qualified_association(
                    person=participant[subjid]['person'],
                    role=Constants.NIDM_PARTICIPANT)

                for key, value in row.items():
                    #for variables in participants.tsv file who have term mappings in BIDS_Constants.py use those, add to json_map so we don't have to map these if user
                    #supplied arguments to map variables
                    if key in BIDS_Constants.participants:

                        #if this was the participant_id, we already handled it above creating agent / qualified association
                        if not (BIDS_Constants.participants[key]
                                == Constants.NIDM_SUBJECTID):
                            acq_entity.add_attributes(
                                {BIDS_Constants.participants[key]: value})

                    #else if user added -mapvars flag to command line then we'll use the variable-> term mapping procedures to help user map variables to terms (also used
                    # in CSV2NIDM.py)
                    else:

                        if key in column_to_terms:
                            acq_entity.add_attributes(
                                {
                                    QualifiedName(
                                        provNamespace(
                                            Core.safe_string(None,
                                                             string=str(key)), column_to_terms[key]["url"]), ""):
                                    value
                                })
                        else:

                            acq_entity.add_attributes(
                                {Constants.BIDS[key.replace(" ", "_")]: value})

    #create acquisition objects for each scan for each subject

    #loop through all subjects in dataset
    for subject_id in bids_layout.get_subjects():
        logging.info("Converting subject: %s" % subject_id)
        #skip .git directories...added to support datalad datasets
        if subject_id.startswith("."):
            continue

        #check if there's a session number.  If so, store it in the session activity
        session_dirs = bids_layout.get(target='session',
                                       subject=subject_id,
                                       return_type='dir')
        #if session_dirs has entries then get any metadata about session and store in session activity

        #bids_layout.get(subject=subject_id,type='session',extensions='.tsv')
        #bids_layout.get(subject=subject_id,type='scans',extensions='.tsv')
        #bids_layout.get(extensions='.tsv',return_type='obj')

        #check whether sessions have been created (i.e. was there a participants.tsv file?  If not, create here
        if not (subject_id in session):
            session[subject_id] = Session(project)

        for file_tpl in bids_layout.get(subject=subject_id,
                                        extensions=['.nii', '.nii.gz']):
            #create an acquisition activity
            acq = MRAcquisition(session[subject_id])

            #check whether participant (i.e. agent) for this subject already exists (i.e. if participants.tsv file exists) else create one
            if not (subject_id in participant):
                participant[subject_id] = {}
                participant[subject_id]['person'] = acq.add_person(
                    attributes=({
                        Constants.NIDM_SUBJECTID: subject_id
                    }))

            #add qualified association with person
            acq.add_qualified_association(
                person=participant[subject_id]['person'],
                role=Constants.NIDM_PARTICIPANT)

            if file_tpl.modality == 'anat':
                #do something with anatomicals
                acq_obj = MRObject(acq)
                #add image contrast type
                if file_tpl.type in BIDS_Constants.scans:
                    acq_obj.add_attributes({
                        Constants.NIDM_IMAGE_CONTRAST_TYPE:
                        BIDS_Constants.scans[file_tpl.type]
                    })
                else:
                    logging.info(
                        "WARNING: No matching image contrast type found in BIDS_Constants.py for %s"
                        % file_tpl.type)

                #add image usage type
                if file_tpl.modality in BIDS_Constants.scans:
                    acq_obj.add_attributes({
                        Constants.NIDM_IMAGE_USAGE_TYPE:
                        BIDS_Constants.scans[file_tpl.modality]
                    })
                else:
                    logging.info(
                        "WARNING: No matching image usage type found in BIDS_Constants.py for %s"
                        % file_tpl.modality)
                #add file link
                #make relative link to
                acq_obj.add_attributes({
                    Constants.NIDM_FILENAME:
                    getRelPathToBIDS(file_tpl.filename, directory)
                })

                #add sha512 sum
                if isfile(join(directory, file_tpl.filename)):
                    acq_obj.add_attributes({
                        Constants.CRYPTO_SHA512:
                        getsha512(join(directory, file_tpl.filename))
                    })
                else:
                    logging.info(
                        "WARNINGL file %s doesn't exist! No SHA512 sum stored in NIDM files..."
                        % join(directory, file_tpl.filename))
                #get associated JSON file if exists
                json_data = bids_layout.get_metadata(file_tpl.filename)
                if json_data:
                    for key in json_data:
                        if key in BIDS_Constants.json_keys:
                            if type(json_data[key]) is list:
                                acq_obj.add_attributes({
                                    BIDS_Constants.json_keys[key.replace(
                                        " ", "_")]:
                                    ''.join(str(e) for e in json_data[key])
                                })
                            else:
                                acq_obj.add_attributes({
                                    BIDS_Constants.json_keys[key.replace(
                                        " ", "_")]:
                                    json_data[key]
                                })
            elif file_tpl.modality == 'func':
                #do something with functionals
                acq_obj = MRObject(acq)
                #add image contrast type
                if file_tpl.type in BIDS_Constants.scans:
                    acq_obj.add_attributes({
                        Constants.NIDM_IMAGE_CONTRAST_TYPE:
                        BIDS_Constants.scans[file_tpl.type]
                    })
                else:
                    logging.info(
                        "WARNING: No matching image contrast type found in BIDS_Constants.py for %s"
                        % file_tpl.type)

                #add image usage type
                if file_tpl.modality in BIDS_Constants.scans:
                    acq_obj.add_attributes({
                        Constants.NIDM_IMAGE_USAGE_TYPE:
                        BIDS_Constants.scans[file_tpl.modality]
                    })
                else:
                    logging.info(
                        "WARNING: No matching image usage type found in BIDS_Constants.py for %s"
                        % file_tpl.modality)
                #add file link
                acq_obj.add_attributes({
                    Constants.NIDM_FILENAME:
                    getRelPathToBIDS(file_tpl.filename, directory)
                })
                #add sha512 sum
                if isfile(join(directory, file_tpl.filename)):
                    acq_obj.add_attributes({
                        Constants.CRYPTO_SHA512:
                        getsha512(join(directory, file_tpl.filename))
                    })
                else:
                    logging.info(
                        "WARNINGL file %s doesn't exist! No SHA512 sum stored in NIDM files..."
                        % join(directory, file_tpl.filename))

                if 'run' in file_tpl._fields:
                    acq_obj.add_attributes(
                        {BIDS_Constants.json_keys["run"]: file_tpl.run})

                #get associated JSON file if exists
                json_data = bids_layout.get_metadata(file_tpl.filename)

                if json_data:
                    for key in json_data:
                        if key in BIDS_Constants.json_keys:
                            if type(json_data[key]) is list:
                                acq_obj.add_attributes({
                                    BIDS_Constants.json_keys[key.replace(
                                        " ", "_")]:
                                    ''.join(str(e) for e in json_data[key])
                                })
                            else:
                                acq_obj.add_attributes({
                                    BIDS_Constants.json_keys[key.replace(
                                        " ", "_")]:
                                    json_data[key]
                                })

                #get associated events TSV file
                if 'run' in file_tpl._fields:
                    events_file = bids_layout.get(subject=subject_id,
                                                  extensions=['.tsv'],
                                                  modality=file_tpl.modality,
                                                  task=file_tpl.task,
                                                  run=file_tpl.run)
                else:
                    events_file = bids_layout.get(subject=subject_id,
                                                  extensions=['.tsv'],
                                                  modality=file_tpl.modality,
                                                  task=file_tpl.task)
                #if there is an events file then this is task-based so create an acquisition object for the task file and link
                if events_file:
                    #for now create acquisition object and link it to the associated scan
                    events_obj = AcquisitionObject(acq)
                    #add prov type, task name as prov:label, and link to filename of events file

                    events_obj.add_attributes({
                        PROV_TYPE:
                        Constants.NIDM_MRI_BOLD_EVENTS,
                        BIDS_Constants.json_keys["TaskName"]:
                        json_data["TaskName"],
                        Constants.NIDM_FILENAME:
                        getRelPathToBIDS(events_file[0].filename, directory)
                    })
                    #link it to appropriate MR acquisition entity
                    events_obj.wasAttributedTo(acq_obj)

            elif file_tpl.modality == 'dwi':
                #do stuff with with dwi scans...
                acq_obj = MRObject(acq)
                #add image contrast type
                if file_tpl.type in BIDS_Constants.scans:
                    acq_obj.add_attributes({
                        Constants.NIDM_IMAGE_CONTRAST_TYPE:
                        BIDS_Constants.scans[file_tpl.type]
                    })
                else:
                    logging.info(
                        "WARNING: No matching image contrast type found in BIDS_Constants.py for %s"
                        % file_tpl.type)

                #add image usage type
                if file_tpl.modality in BIDS_Constants.scans:
                    acq_obj.add_attributes({
                        Constants.NIDM_IMAGE_USAGE_TYPE:
                        BIDS_Constants.scans["dti"]
                    })
                else:
                    logging.info(
                        "WARNING: No matching image usage type found in BIDS_Constants.py for %s"
                        % file_tpl.modality)
                #add file link
                acq_obj.add_attributes({
                    Constants.NIDM_FILENAME:
                    getRelPathToBIDS(file_tpl.filename, directory)
                })
                #add sha512 sum
                if isfile(join(directory, file_tpl.filename)):
                    acq_obj.add_attributes({
                        Constants.CRYPTO_SHA512:
                        getsha512(join(directory, file_tpl.filename))
                    })
                else:
                    logging.info(
                        "WARNINGL file %s doesn't exist! No SHA512 sum stored in NIDM files..."
                        % join(directory, file_tpl.filename))

                if 'run' in file_tpl._fields:
                    acq_obj.add_attributes(
                        {BIDS_Constants.json_keys["run"]: file_tpl.run})

                #get associated JSON file if exists
                json_data = bids_layout.get_metadata(file_tpl.filename)

                if json_data:
                    for key in json_data:
                        if key in BIDS_Constants.json_keys:
                            if type(json_data[key]) is list:
                                acq_obj.add_attributes({
                                    BIDS_Constants.json_keys[key.replace(
                                        " ", "_")]:
                                    ''.join(str(e) for e in json_data[key])
                                })
                            else:
                                acq_obj.add_attributes({
                                    BIDS_Constants.json_keys[key.replace(
                                        " ", "_")]:
                                    json_data[key]
                                })

                #for bval and bvec files, what to do with those?

                #for now, create new generic acquisition objects, link the files, and associate with the one for the DWI scan?
                acq_obj_bval = AcquisitionObject(acq)
                acq_obj_bval.add_attributes(
                    {PROV_TYPE: BIDS_Constants.scans["bval"]})
                #add file link to bval files
                acq_obj_bval.add_attributes({
                    Constants.NIDM_FILENAME:
                    getRelPathToBIDS(bids_layout.get_bval(file_tpl.filename),
                                     directory)
                })
                #add sha512 sum
                if isfile(join(directory, file_tpl.filename)):
                    acq_obj_bval.add_attributes({
                        Constants.CRYPTO_SHA512:
                        getsha512(join(directory, file_tpl.filename))
                    })
                else:
                    logging.info(
                        "WARNINGL file %s doesn't exist! No SHA512 sum stored in NIDM files..."
                        % join(directory, file_tpl.filename))
                acq_obj_bvec = AcquisitionObject(acq)
                acq_obj_bvec.add_attributes(
                    {PROV_TYPE: BIDS_Constants.scans["bvec"]})
                #add file link to bvec files
                acq_obj_bvec.add_attributes({
                    Constants.NIDM_FILENAME:
                    getRelPathToBIDS(bids_layout.get_bvec(file_tpl.filename),
                                     directory)
                })
                if isfile(join(directory, file_tpl.filename)):
                    #add sha512 sum
                    acq_obj_bvec.add_attributes({
                        Constants.CRYPTO_SHA512:
                        getsha512(join(directory, file_tpl.filename))
                    })
                else:
                    logging.info(
                        "WARNINGL file %s doesn't exist! No SHA512 sum stored in NIDM files..."
                        % join(directory, file_tpl.filename))

                #link bval and bvec acquisition object entities together or is their association with DWI scan...

        #Added temporarily to support phenotype files
        #for each *.tsv / *.json file pair in the phenotypes directory
        for tsv_file in glob.glob(os.path.join(directory, "phenotype",
                                               "*.tsv")):
            #for now, open the TSV file, extract the row for this subject, store it in an acquisition object and link to
            #the associated JSON data dictionary file
            with open(tsv_file) as phenofile:
                pheno_data = csv.DictReader(phenofile, delimiter='\t')
                for row in pheno_data:
                    subjid = row['participant_id'].split("-")
                    if not subjid[1] == subject_id:
                        continue
                    else:
                        #add acquisition object
                        acq = AssessmentAcquisition(session=session[subjid[1]])
                        #add qualified association with person
                        acq.add_qualified_association(
                            person=participant[subject_id]['person'],
                            role=Constants.NIDM_PARTICIPANT)

                        acq_entity = AssessmentObject(acquisition=acq)

                        for key, value in row.items():
                            #we're using participant_id in NIDM in agent so don't add to assessment as a triple.
                            #BIDS phenotype files seem to have an index column with no column header variable name so skip those
                            if ((not key == "participant_id") and (key != "")):
                                #for now we're using a placeholder namespace for BIDS and simply the variable names as the concept IDs..
                                acq_entity.add_attributes(
                                    {Constants.BIDS[key]: value})

                        #link TSV file
                        acq_entity.add_attributes({
                            Constants.NIDM_FILENAME:
                            getRelPathToBIDS(tsv_file, directory)
                        })
                        #link associated JSON file if it exists
                        data_dict = os.path.join(
                            directory, "phenotype",
                            os.path.splitext(os.path.basename(tsv_file))[0] +
                            ".json")
                        if os.path.isfile(data_dict):
                            acq_entity.add_attributes({
                                Constants.BIDS["data_dictionary"]:
                                getRelPathToBIDS(data_dict, directory)
                            })

    return project
Exemple #22
0
    for input in descriptor_dict['inputs']:
        if input['id'] == 'analysis_level':
            levels = input['value-choices']
    assert levels, "analysis_level must have value-choices"

    invocation_dict = json.load(open(args.invocation_file))

    bids_dir = invocation_dict['bids_dir']

    layout = BIDSLayout(bids_dir)
    participants_to_analyze = []
    if 'participant_label' in invocation_dict.keys():
        participants_to_analyze = invocation_dict['participant_label']
    # for all subjects
    else:
        participants_to_analyze = layout.get_subjects()

    print(levels)
    print(participants_to_analyze)

    dep_ids = []
    for level in levels:
        id_sources = []

        if level.startswith('participant'):
            for participant in participants_to_analyze:
                filename = "subtask_%s_%s.json" % (level, participant)
                prepare_and_save_subtask(tool_class=args.app_descriptor_file,
                                         app_name=descriptor_dict['name'],
                                         filename=filename,
                                         invocation_dict=invocation_dict,
Exemple #23
0
def main(argv):
    parser = ArgumentParser(
        description=
        'This program will convert a BIDS MRI dataset to a NIDM-Experiment \
        RDF document.  It will parse phenotype information and simply store variables/values \
        and link to the associated json data dictionary file.')

    parser.add_argument('-d',
                        dest='directory',
                        required=True,
                        help="Path to BIDS dataset directory")
    parser.add_argument('-o',
                        dest='outputfile',
                        default="nidm.ttl",
                        help="NIDM output turtle file")
    args = parser.parse_args()

    directory = args.directory
    outputfile = args.outputfile

    #importlib.reload(sys)
    #sys.setdefaultencoding('utf8')

    #Parse dataset_description.json file in BIDS directory
    with open(os.path.join(directory,
                           'dataset_description.json')) as data_file:
        dataset = json.load(data_file)
    #print(dataset_data)

    #create project / nidm-exp doc
    project = Project()

    #add various attributes if they exist in BIDS dataset
    for key in dataset:
        #print(key)
        #if key from dataset_description file is mapped to term in BIDS_Constants.py then add to NIDM object
        if key in BIDS_Constants.dataset_description:
            if type(dataset[key]) is list:
                project.add_attributes({
                    BIDS_Constants.dataset_description[key]:
                    "".join(dataset[key])
                })
            else:
                project.add_attributes(
                    {BIDS_Constants.dataset_description[key]: dataset[key]})

    #create empty dictinary for sessions where key is subject id and used later to link scans to same session as demographics
    session = {}
    #Parse participants.tsv file in BIDS directory and create study and acquisition objects
    with open(os.path.join(directory, 'participants.tsv')) as csvfile:
        participants_data = csv.DictReader(csvfile, delimiter='\t')
        #print(participants_data.fieldnames)
        for row in participants_data:
            #create session object for subject to be used for participant metadata and image data
            #parse subject id from "sub-XXXX" string
            subjid = row['participant_id'].split("-")
            session[subjid[1]] = Session(project)

            #add acquisition object
            acq = Acquisition(session=session[subjid[1]])
            acq_entity = DemographicsAcquisitionObject(acquisition=acq)
            participant = acq.add_person(role=Constants.NIDM_PARTICIPANT,
                                         attributes=({
                                             Constants.NIDM_SUBJECTID:
                                             row['participant_id']
                                         }))

            for key, value in row.items():
                #for now only convert variables in participants.tsv file who have term mappings in BIDS_Constants.py
                if key in BIDS_Constants.participants:
                    acq_entity.add_attributes(
                        {BIDS_Constants.participants[key]: value})

    #get BIDS layout
    bids_layout = BIDSLayout(directory)

    #create acquisition objects for each scan for each subject

    #loop through all subjects in dataset
    for subject_id in bids_layout.get_subjects():
        #skip .git directories...added to support datalad datasets
        if subject_id.startswith("."):
            continue
        for file_tpl in bids_layout.get(subject=subject_id,
                                        extensions=['.nii', '.nii.gz']):
            #create an acquisition activity
            acq = Acquisition(session[subject_id])

            #print(file_tpl.type)
            if file_tpl.modality == 'anat':
                #do something with anatomicals
                acq_obj = MRAcquisitionObject(acq)
                acq_obj.add_attributes(
                    {PROV_TYPE: BIDS_Constants.scans[file_tpl.modality]})
                #add file link
                #make relative link to
                acq_obj.add_attributes(
                    {Constants.NIDM_FILENAME: file_tpl.filename})
                #get associated JSON file if exists
                json_data = bids_layout.get_metadata(file_tpl.filename)
                if json_data:
                    for key in json_data:
                        if key in BIDS_Constants.json_keys:
                            if type(json_data[key]) is list:
                                acq_obj.add_attributes({
                                    BIDS_Constants.json_keys[key]:
                                    ''.join(str(e) for e in json_data[key])
                                })
                            else:
                                acq_obj.add_attributes({
                                    BIDS_Constants.json_keys[key]:
                                    json_data[key]
                                })
            elif file_tpl.modality == 'func':
                #do something with functionals
                acq_obj = MRAcquisitionObject(acq)
                acq_obj.add_attributes(
                    {PROV_TYPE: BIDS_Constants.scans[file_tpl.modality]})
                #add file link
                acq_obj.add_attributes(
                    {Constants.NIDM_FILENAME: file_tpl.filename})
                if 'run' in file_tpl._fields:
                    acq_obj.add_attributes(
                        {BIDS_Constants.json_keys["run"]: file_tpl.run})

                #get associated JSON file if exists
                json_data = bids_layout.get_metadata(file_tpl.filename)

                if json_data:
                    for key in json_data:
                        if key in BIDS_Constants.json_keys:
                            if type(json_data[key]) is list:
                                acq_obj.add_attributes({
                                    BIDS_Constants.json_keys[key]:
                                    ''.join(str(e) for e in json_data[key])
                                })
                            else:
                                acq_obj.add_attributes({
                                    BIDS_Constants.json_keys[key]:
                                    json_data[key]
                                })

                #get associated events TSV file
                if 'run' in file_tpl._fields:
                    events_file = bids_layout.get(subject=subject_id,
                                                  extensions=['.tsv'],
                                                  modality=file_tpl.modality,
                                                  task=file_tpl.task,
                                                  run=file_tpl.run)
                else:
                    events_file = bids_layout.get(subject=subject_id,
                                                  extensions=['.tsv'],
                                                  modality=file_tpl.modality,
                                                  task=file_tpl.task)
                #if there is an events file then this is task-based so create an acquisition object for the task file and link
                if events_file:
                    #for now create acquisition object and link it to the associated scan
                    events_obj = AcquisitionObject(acq)
                    #add prov type, task name as prov:label, and link to filename of events file
                    events_obj.add_attributes({
                        PROV_TYPE:
                        Constants.NIDM_MRI_BOLD_EVENTS,
                        BIDS_Constants.json_keys["TaskName"]:
                        json_data["TaskName"],
                        Constants.NFO["filename"]:
                        events_file[0].filename
                    })
                    #link it to appropriate MR acquisition entity
                    events_obj.wasAttributedTo(acq_obj)

            elif file_tpl.modality == 'dwi':
                #do stuff with with dwi scans...
                acq_obj = MRAcquisitionObject(acq)
                acq_obj.add_attributes(
                    {PROV_TYPE: BIDS_Constants.scans[file_tpl.modality]})
                #add file link
                acq_obj.add_attributes(
                    {Constants.NIDM_FILENAME: file_tpl.filename})
                if 'run' in file_tpl._fields:
                    acq_obj.add_attributes(
                        {BIDS_Constants.json_keys["run"]: file_tpl.run})
                    #get associated JSON file if exists
                json_data = bids_layout.get_metadata(file_tpl.filename)

                if json_data:
                    for key in json_data:
                        if key in BIDS_Constants.json_keys:
                            if type(json_data[key]) is list:
                                acq_obj.add_attributes({
                                    BIDS_Constants.json_keys[key]:
                                    ''.join(str(e) for e in json_data[key])
                                })
                            else:
                                acq_obj.add_attributes({
                                    BIDS_Constants.json_keys[key]:
                                    json_data[key]
                                })

                #for bval and bvec files, what to do with those?
                #for now, create new generic acquisition objects, link the files, and associate with the one for the DWI scan?
                acq_obj_bval = AcquisitionObject(acq)
                acq_obj_bval.add_attributes(
                    {PROV_TYPE: BIDS_Constants.scans["bval"]})
                #add file link to bval files
                acq_obj_bval.add_attributes({
                    Constants.NIDM_FILENAME:
                    bids_layout.get_bval(file_tpl.filename)
                })
                acq_obj_bvec = AcquisitionObject(acq)
                acq_obj_bvec.add_attributes(
                    {PROV_TYPE: BIDS_Constants.scans["bvec"]})
                #add file link to bvec files
                acq_obj_bvec.add_attributes({
                    Constants.NIDM_FILENAME:
                    bids_layout.get_bvec(file_tpl.filename)
                })
                #link bval and bvec acquisition object entities together or is their association with enclosing activity enough?

        #Added temporarily to support phenotype files
        #for each *.tsv / *.json file pair in the phenotypes directory
        for tsv_file in glob.glob(os.path.join(directory, "phenotype",
                                               "*.tsv")):
            #for now, open the TSV file, extract the row for this subject, store it in an acquisition object and link to
            #the associated JSON data dictionary file
            with open(tsv_file) as phenofile:
                pheno_data = csv.DictReader(phenofile, delimiter='\t')
                for row in pheno_data:
                    subjid = row['participant_id'].split("-")
                    if not subjid[1] == subject_id:
                        continue
                    else:
                        #add acquisition object
                        acq = Acquisition(session=session[subjid[1]])
                        acq_entity = AssessmentAcquisitionObject(
                            acquisition=acq)
                        participant = acq.add_person(
                            role=Constants.NIDM_PARTICIPANT,
                            attributes=({
                                Constants.NIDM_SUBJECTID:
                                row['participant_id']
                            }))

                        for key, value in row.items():
                            if not key == "participant_id":
                                #for now we're using a placeholder namespace for BIDS and simply the variable names as the concept IDs..
                                acq_entity.add_attributes(
                                    {Constants.BIDS[key]: value})

                        #link TSV file
                        acq_entity.add_attributes(
                            {Constants.NIDM_FILENAME: tsv_file})
                        #link associated JSON file if it exists
                        data_dict = os.path.join(
                            directory, "phenotype",
                            os.path.splitext(os.path.basename(tsv_file))[0] +
                            ".json")
                        if os.path.isfile(data_dict):
                            acq_entity.add_attributes(
                                {Constants.BIDS["data_dictionary"]: data_dict})

    #serialize graph
    #print(project.graph.get_provn())
    with open(outputfile, 'w') as f:
        f.write(project.serializeTurtle())
        #f.write(project.graph.get_provn())
    #save a DOT graph as PNG
    project.save_DotGraph(str(outputfile + ".png"), format="png")
Exemple #24
0
vols = 120  # needed for volume matching. Tells how many volumes you need to keep.
number_of_skipped_volumes = 4
#  True volumes will be vols - number_of_skipped_volumes

num_proc = 4

number_of_subjects = -1
# number_of_subjects = 2 # Number of subjects you wish to work with

# ----------------------------- Getting Subjects -------------------------------
# ----------------------------------- BIDS -------------------------------------
layout = BIDSLayout(data_directory)

if number_of_subjects == -1:
    number_of_subjects = len(layout.get_subjects())

subject_list = (layout.get_subjects())[0:number_of_subjects]
# subject_list = list(map(int, subject_list))

# -----------------------------------File List----------------------------------
# group1FilesPath = ''
# group2FilesPath = ''
#
# group1FilesList = np.genfromtxt(group1FilesPath,dtype='unicode')
# group2FilesList = np.genfromtxt(group2FilesPath,dtype='unicode')
#
# fileList = group1FilesPath + group2FilesPath

# -----------------------------------------------------------------------------
keep_keys = [
    'CogAtlasID', 'ConversionSoftware', 'ConversionSoftwareVersion',
    'EchoTime', 'FlipAngle', 'ImageType', 'InversionTime',
    'MagneticFieldStrength', 'Manufacturer', 'ManufacturersModelName',
    'ProtocolName', 'RepetitionTime', 'ScanOptions', 'ScanningSequence',
    'SequenceVariant', 'SeriesNumber', 'SoftwareVersions', 'TaskName'
]
slice_times = [
    0.0023, 1.0023, 0.0499, 1.0499, 0.0975, 1.0975, 0.1451, 1.1451, 0.1927,
    1.1928, 0.2404, 1.2404, 0.288, 1.288, 0.3356, 1.3356, 0.3832, 1.3832,
    0.4308, 1.4309, 0.4785, 1.4785, 0.5261, 1.5261, 0.5737, 1.5737, 0.6213,
    1.6213, 0.6689, 1.6689, 0.7166, 1.7166, 0.7642, 1.7642, 0.8118, 1.8118,
    0.8594, 1.8594, 0.907, 1.907, 0.9547, 1.9546
]

subjects = layout.get_subjects()
for subj in subjects:
    # Functional scans
    scans = layout.get(subject=subj, extensions='nii.gz', type='bold')
    for scan in scans:
        json_file = layout.get_nearest(scan.filename, extensions='json')
        metadata = layout.get_metadata(scan.filename)
        if 'dcmmeta_shape' in metadata.keys() or not metadata:
            metadata2 = {
                key: metadata[key]
                for key in keep_keys if key in metadata.keys()
            }
            for key in keep_keys:
                if key not in metadata.keys(
                ) and key in metadata['global']['const'].keys():
                    metadata2[key] = metadata['global']['const'][key]
Exemple #26
0
    def check_input1(self, gui=True):
        print '**** Check Inputs  ****'
        diffusion_available = False
        bvecs_available = False
        bvals_available = False
        t1_available = False
        t2_available = False
        valid_inputs = False

        dwi_file = os.path.join(self.subject_directory, 'dwi',
                                self.subject + '_dwi.nii.gz')
        bval_file = os.path.join(self.subject_directory, 'dwi',
                                 self.subject + '_dwi.bval')
        bvec_file = os.path.join(self.subject_directory, 'dwi',
                                 self.subject + '_dwi.bvec')
        T1_file = os.path.join(self.subject_directory, 'anat',
                               self.subject + '_T1w.nii.gz')
        T2_file = os.path.join(self.subject_directory, 'anat',
                               self.subject + '_T2w.nii.gz')

        print "Looking for...."
        print "dwi_file : %s" % dwi_file
        print "bvecs_file : %s" % bvec_file
        print "bvals_file : %s" % bval_file
        print "T1_file : %s" % T1_file
        print "T2_file : %s" % T2_file

        try:
            layout = BIDSLayout(self.base_directory)
            print "Valid BIDS dataset with %s subjects" % len(
                layout.get_subjects())
            for subj in layout.get_subjects():
                self.global_conf.subjects.append('sub-' + str(subj))
            # self.global_conf.subjects = ['sub-'+str(subj) for subj in layout.get_subjects()]
            self.global_conf.modalities = [
                str(mod) for mod in layout.get_modalities()
            ]
            # mods = layout.get_modalities()
            types = layout.get_types()
            # print "Available modalities :"
            # for mod in mods:
            #     print "-%s" % mod

            for typ in types:
                if typ == 'dwi' and os.path.isfile(dwi_file):
                    print "%s available" % typ
                    diffusion_available = True

                if typ == 'T1w' and os.path.isfile(T1_file):
                    print "%s available" % typ
                    t1_available = True

                if typ == 'T2w' and os.path.isfile(T2_file):
                    print "%s available" % typ
                    t2_available = True
        except:
            error(
                message=
                "Invalid BIDS dataset. Please see documentation for more details.",
                title="Error",
                buttons=['OK', 'Cancel'],
                parent=None)
            return

        if os.path.isfile(bval_file): bvals_available = True

        if os.path.isfile(bvec_file): bvecs_available = True

        mem = Memory(base_dir=os.path.join(self.derivatives_directory, 'cmp',
                                           self.subject, 'tmp', 'nipype'))
        swap_and_reorient = mem.cache(SwapAndReorient)

        if diffusion_available:
            if bvals_available and bvecs_available:
                self.stages[
                    'Diffusion'].config.diffusion_imaging_model_choices = self.diffusion_imaging_model

                #Copy diffusion data to derivatives / cmp  / subject / dwi
                out_dwi_file = os.path.join(self.derivatives_directory, 'cmp',
                                            self.subject, 'dwi',
                                            self.subject + '_dwi.nii.gz')
                out_bval_file = os.path.join(self.derivatives_directory, 'cmp',
                                             self.subject, 'dwi',
                                             self.subject + '_dwi.bval')
                out_bvec_file = os.path.join(self.derivatives_directory, 'cmp',
                                             self.subject, 'dwi',
                                             self.subject + '_dwi.bvec')

                shutil.copy(src=dwi_file, dst=out_dwi_file)
                shutil.copy(src=bvec_file, dst=out_bvec_file)
                shutil.copy(src=bval_file, dst=out_bval_file)

                if t2_available:
                    print "Swap and reorient T2"
                    swap_and_reorient(
                        src_file=os.path.join(self.subject_directory, 'anat',
                                              self.subject + '_T2w.nii.gz'),
                        ref_file=os.path.join(self.subject_directory, 'dwi',
                                              self.subject + '_dwi.nii.gz'),
                        out_file=os.path.join(self.derivatives_directory,
                                              'cmp', self.subject, 'anat',
                                              self.subject + '_T2w.nii.gz'))
                if t1_available:
                    swap_and_reorient(
                        src_file=os.path.join(self.subject_directory, 'anat',
                                              self.subject + '_T1w.nii.gz'),
                        ref_file=os.path.join(self.subject_directory, 'dwi',
                                              self.subject + '_dwi.nii.gz'),
                        out_file=os.path.join(self.derivatives_directory,
                                              'cmp', self.subject, 'anat',
                                              self.subject + '_T1w.nii.gz'))
                    valid_inputs = True
                    input_message = 'Inputs check finished successfully.\nDiffusion and morphological data available.'
                else:
                    input_message = 'Error during inputs check.\nMorphological data (T1) not available.'
            else:
                input_message = 'Error during inputs check.\nDiffusion bvec or bval files not available.'
        elif t1_available:
            input_message = 'Error during inputs check. \nDiffusion data not available (DSI/DTI/HARDI).'
        else:
            input_message = 'Error during inputs check. No diffusion or morphological data available in folder ' + os.path.join(
                self.base_directory, 'RAWDATA') + '!'

        #diffusion_imaging_model = diffusion_imaging_model[0]

        if gui:
            #input_notification = Check_Input_Notification(message=input_message, diffusion_imaging_model_options=diffusion_imaging_model,diffusion_imaging_model=diffusion_imaging_model)
            #input_notification.configure_traits()
            print input_message
            self.global_conf.diffusion_imaging_model = self.diffusion_imaging_model
            # diffusion_file = os.path.join(self.subject_directory,'dwi',self.subject+'_dwi.nii.gz')
            # n_vol = nib.load(diffusion_file).shape[3]
            # if self.stages['Preprocessing'].config.end_vol == 0 or self.stages['Preprocessing'].config.end_vol == self.stages['Preprocessing'].config.max_vol or self.stages['Preprocessing'].config.end_vol >= n_vol-1:
            #     self.stages['Preprocessing'].config.end_vol = n_vol-1
            # self.stages['Preprocessing'].config.max_vol = n_vol-1
            self.stages[
                'Registration'].config.diffusion_imaging_model = self.diffusion_imaging_model
            self.stages[
                'Diffusion'].config.diffusion_imaging_model = self.diffusion_imaging_model
        else:
            print input_message
            self.global_conf.diffusion_imaging_model = self.diffusion_imaging_model
            # diffusion_file = os.path.join(self.subject_directory,'dwi',self.subject+'_dwi.nii.gz')
            # n_vol = nib.load(diffusion_file).shape[3]
            # if self.stages['Preprocessing'].config.end_vol == 0 or self.stages['Preprocessing'].config.end_vol == self.stages['Preprocessing'].config.max_vol or self.stages['Preprocessing'].config.end_vol >= n_vol-1:
            #     self.stages['Preprocessing'].config.end_vol = n_vol-1
            # self.stages['Preprocessing'].config.max_vol = n_vol-1
            self.stages[
                'Registration'].config.diffusion_imaging_model = self.diffusion_imaging_model
            self.stages[
                'Diffusion'].config.diffusion_imaging_model = self.diffusion_imaging_model

        if t2_available:
            self.stages['Registration'].config.registration_mode_trait = [
                'Linear + Non-linear (FSL)'
            ]  #,'BBregister (FS)','Nonlinear (FSL)']

        if (t1_available and diffusion_available):
            valid_inputs = True
        else:
            print "Missing required inputs."
            error(
                message=
                "Missing required inputs. Please see documentation for more details.",
                title="Error",
                buttons=['OK', 'Cancel'],
                parent=None)

        for stage in self.stages.values():
            if stage.enabled:
                print stage.name
                print stage.stage_dir

        self.fill_stages_outputs()

        return valid_inputs
Exemple #27
0
import os
import ants
import glob
import shutil
from bids.grabbids import BIDSLayout
import time
from joblib import Parallel, delayed
import multiprocessing

derivatives = '/Volumes/data/prebiostress/data/derivatives'
layout = BIDSLayout(derivatives) # ignore the error here

# do T1
for subj in layout.get_subjects(): # loop on number of subjects
    print(subj)
    for ses in layout.get_sessions(): # loop on number of sessions
        targ = os.path.join(derivatives,"sub-"+subj,"ses-"+ses,"anat") # find path to anat folder for this subject and this session
        fpaths = glob.glob(targ+"/*T1w_dn.nii.gz") # find the T1
        fpaths = ''.join(fpaths)
        if len(fpaths) > 0:
            img = ants.image_read(fpaths)

####### TESTING ############
img = ants.image_read('/Volumes/data/prebiostress/data/derivatives/sub-01/ses-1/anat/sub-01_ses-1_T1w_dn.nii.gz',reorient='LSP')
img = ants.image_read('/Volumes/data/prebiostress/data/derivatives/sub-01/ses-1/anat/sub-01_ses-1_T1w_dn.nii.gz')
img = ants.reorient_image2(img,orientation='LSP')
ants.image_write(img,'output.nii')
img.get_sessions()

img = ants.registration.reorient_image(img,orientation='LSP')
Exemple #28
0
def test_get_subjects():
    data_dir = join(dirname(__file__), 'data', '7t_trt')
    layout = BIDSLayout(data_dir)
    result = layout.get_subjects()
    predicted = ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10']
    assert predicted == result
Exemple #29
0
layout = BIDSLayout(args.bids_dir)

if args.analysis_level == "participant1" and not args.participant_label:
    raise Exception(
        "For template level processing subjects must be explicitly specified")

# Select subjects
subjects_to_analyze = []
T1w_files = []

# only for a subset of subjects
if args.participant_label:
    subjects_to_analyze = args.participant_label
# for all subjects
else:
    subjects_to_analyze = layout.get_subjects()

# Convert subjects to T1W files
for subject_label in subjects_to_analyze:
    subject_T1w_files = layout.get(subject=subject_label,
                                   type='T1w',
                                   extensions=['.nii', '.nii.gz'],
                                   return_type='file')
    if len(subject_T1w_files) == 0:
        raise Exception("No T1w files found for participant %s" %
                        subject_label)
    else:
        # If template phase, limit templates to first timepoint for subjects
        if args.analysis_level == "participant1":
            T1w_files.append(subject_T1w_files[0])
        else:
Exemple #30
0
def main():
    parser = argparse.ArgumentParser(
        description='Example BIDS App entrypoint script.')
    parser.add_argument('app_descriptor_file', help='app descriptor')
    parser.add_argument('invocation_file', help='invocation file')
    args = parser.parse_args()

    descriptor_dict = json.load(open(args.app_descriptor_file))
    levels = None
    session_support = False
    for input in descriptor_dict['inputs']:
        if input['id'] == 'analysis_level':
            levels = input['value-choices']
        elif input['id'] == 'session_label':
            session_support = True
    assert levels, "analysis_level must have value-choices"
    invocation_dict = json.load(open(args.invocation_file))
    bids_dir = invocation_dict['bids_dir']
    layout = BIDSLayout(bids_dir)

    if 'participant_label' in invocation_dict.keys():
        participants_to_analyze = invocation_dict['participant_label']
    # for all subjects
    else:
        participants_to_analyze = layout.get_subjects()

    if 'session_label' in invocation_dict.keys():
        sessions_to_analyze = invocation_dict['session_label']
    # for all sessions
    else:
        sessions_to_analyze = layout.get_sessions()

    if not session_support:
        sessions_to_analyze = None

    print(levels)
    print(participants_to_analyze)
    print(sessions_to_analyze)

    dep_ids = []
    for level in levels:
        id_sources = []
        if level.startswith('session') and session_support:
            for participant in participants_to_analyze:
                for session in sessions_to_analyze:
                    filename = "level-%s_sub-%s_ses-%s_subtask.json" % (
                        level, participant, session)
                    prepare_and_save_subtask(
                        tool_class=args.app_descriptor_file,
                        app_name=descriptor_dict['name'],
                        filename=filename,
                        invocation_dict=invocation_dict,
                        participant_label=participant,
                        session_label=session,
                        analysis_level=level,
                        dep_ids=dep_ids)
                    id_sources.append(filename.replace('.json', '.*bid'))

            dep_ids = get_dep_ids(id_sources)

        elif level.startswith('participant'):
            for participant in participants_to_analyze:
                filename = "level-%s_sub-%s_subtask.json" % (level,
                                                             participant)
                prepare_and_save_subtask(tool_class=args.app_descriptor_file,
                                         app_name=descriptor_dict['name'],
                                         filename=filename,
                                         invocation_dict=invocation_dict,
                                         participant_label=participant,
                                         session_label=sessions_to_analyze,
                                         analysis_level=level,
                                         dep_ids=dep_ids)
                id_sources.append(filename.replace('.json', '.*bid'))

            dep_ids = get_dep_ids(id_sources)

        elif level.startswith('group'):
            filename = "level-%s_subtask.json" % (level)
            prepare_and_save_subtask(tool_class=args.app_descriptor_file,
                                     app_name=descriptor_dict['name'],
                                     filename=filename,
                                     invocation_dict=invocation_dict,
                                     participant_label=participants_to_analyze,
                                     session_label=sessions_to_analyze,
                                     analysis_level=level,
                                     dep_ids=dep_ids)
            id_sources.append(filename.replace('.json', '.*bid'))
            dep_ids = get_dep_ids(id_sources)