Exemplo n.º 1
0
def add_all_tasks(local_path, **kwargs):
    local_path = Path(local_path)

    assert isfile(str(local_path / 'dataset_description.json'))

    layout = BIDSLayout(str(local_path), derivatives=True)
    dataset_ids = []
    for task in layout.get_tasks():
        print(dir(task))
        dataset_ids.append(add_task(task, local_path=local_path, **kwargs))
    return dataset_ids
Exemplo n.º 2
0
    def _run_interface(self, runtime):
        from bids.layout import BIDSLayout

        layout = BIDSLayout(self.inputs.bids_dir, derivatives=True)

        entities = []
        extensions = ['preproc_bold.nii.gz']

        for subject in np.sort(layout.get_subjects()):
            file = layout.get(subject=subject,
                              task=layout.get_tasks(),
                              extensions=extensions)
            if file == []:
                pass
            else:
                entity = {'subject': subject}  #, 'session': session}
                entities.append(entity)
                self._results['entities'] = entities

        return runtime
Exemplo n.º 3
0
from bids.layout import BIDSLayout

# Directory where your data set resides.
dataDir = '/tmp/Data/ds114'

# Creating the layout object for this BIDS data set
layout = BIDSLayout(dataDir)

# subjects
subjList = layout.get_subjects()

# sessions
sesList = layout.get_sessions()

# tasks
taskList = layout.get_tasks()

# runs
runList = layout.get_runs()

# List of all fMRI data for subject 01
fMRI_sub01 = layout.get(subject='01',
                        suffix='bold',
                        extension=['nii', 'nii.gz'],
                        return_type='file')

# Lets focus on test session
fMRI_sub01_test = layout.get(subject='01',
                             session='test',
                             suffix='bold',
                             extension=['nii', 'nii.gz'],
Exemplo n.º 4
0
def add_task(task_name, dataset_name=None, local_path=None,
             dataset_address=None, preproc_address=None,
             include_predictors=None, exclude_predictors=None,
             reingest=False, scan_length=1000,
             dataset_summary=None, url=None, task_summary=None, **kwargs):
    """ Adds a BIDS dataset task to the database.
        Args:
            task_name - task to add
            dataset_name - overide dataset name
            local_path - path to local bids dataset.
            dataset_address - remote address of BIDS dataset.
            preproc_address - remote address of preprocessed files.
            include_predictors - set of predictors to ingest
            exclude_predictors - set of predictors to exclude from ingestions
            reingest - force reingesting even if dataset already exists
            scan_length - default scan length in case it cant be found in image
            dataset_summary - Dataset summary description,
            url - Dataset external link,
            task_summary - Task summary description,
            kwargs - arguments to filter runs by
        Output:
            dataset model id
    """
    cache.clear()

    if dataset_address is not None and local_path is None:
        local_path = install(
            source=dataset_address,
            path=(Path(
                current_app.config['DATASET_DIR']) / dataset_name).as_posix()
            ).path

    local_path = Path(local_path)

    assert isfile(str(local_path / 'dataset_description.json'))

    layout = BIDSLayout(str(local_path), derivatives=True)
    if task_name not in layout.get_tasks():
        raise ValueError("Task {} not found in dataset {}".format(
            task_name, local_path))

    dataset_name = dataset_name if dataset_name is not None \
        else layout.description['Name']

    # Get or create dataset model from mandatory arguments
    dataset_model, new_ds = get_or_create(Dataset, name=dataset_name)

    if new_ds:
        dataset_model.description = layout.description
        dataset_model.summary = dataset_summary
        dataset_model.url = url
        dataset_model.dataset_address = dataset_address
        dataset_model.preproc_address = preproc_address
        dataset_model.local_path = local_path.as_posix()
        db.session.commit()
    elif not reingest:
        print("Dataset found, skipping ingestion...")
        # return dataset_model.id

    # Get or create task
    task_model, new_task = get_or_create(
        Task, name=task_name, dataset_id=dataset_model.id)

    if new_task:
        task_model.description = json.load(
            (local_path / 'task-{}_bold.json'.format(task_name)).open())
        task_model.summary = task_summary
        task_model.TR = task_model.description['RepetitionTime']
        db.session.commit()

    stims_processed = {}
    """ Parse every Run """
    print("Parsing runs")
    '''
    all_runs = layout.get(task=task_name, suffix='bold', extensions='.nii.gz',
                          desc=None, **kwargs)
    '''
    # this worked one above didn't
    all_runs = layout.get(task=task_name, suffix='bold', extensions='.nii.gz')
    for img in progressbar(all_runs):
        """ Extract Run information """
        # Get entities
        entities = {entity: getattr(img, entity)
                    for entity in ['subject', 'session', 'acquisition']
                    if entity in img.entities}
        run_number = img.run if hasattr(img, 'run') else None

        run_model, new = get_or_create(
            Run, dataset_id=dataset_model.id, number=run_number,
            task_id=task_model.id, **entities)
        entities['task'] = task_model.name
        if run_number:
            run_number = str(run_number).zfill(2)
            entities['run'] = run_number

        # Get duration (helps w/ transformations)
        if img.image is not None:
            run_model.duration = img.image.shape[3] * \
             img.image.header.get_zooms()[-1]
        else:
            run_model.duration = scan_length

        # Put back as int
        if 'run' in entities:
            entities['run'] = int(entities['run'])

        """ Extract Predictors"""
        # Assert event files exist (for DataLad)
        for e in layout.get_nearest(
          img.path, suffix='events', all_=True, strict=False):
            assert isfile(e)

        collection = layout.get_collections(
            'run', scan_length=run_model.duration, **entities)[0]

        if 'stim_file' in collection.variables:
            stims = collection.variables.pop('stim_file')
        else:
            stims = None

        add_predictor_collection(
            collection, dataset_model.id, run_model.id,
            include=include_predictors,
            exclude=exclude_predictors, TR=task_model.TR)

        """ Ingest Stimuli """
        if stims is not None:
            for i, val in enumerate(stims.values):
                stim_path = local_path / 'stimuli' / val
                if val not in stims_processed:
                    try:
                        stim_hash = hash_stim(stim_path)
                    except OSError:
                        current_app.logger.debug(
                            '{} not found.'.format(stim_path))
                        continue

                    stims_processed[val] = stim_hash
                else:
                    stim_hash = stims_processed[val]
                stim_model, _ = add_stimulus(
                    stim_hash, path=stim_path, dataset_id=dataset_model.id)

                # Get or create Run Stimulus association
                runstim, _ = get_or_create(
                    RunStimulus, stimulus_id=stim_model.id,
                    run_id=run_model.id,
                    onset=stims.onset.tolist()[i],
                    duration=stims.duration.tolist()[i])

    """ Add GroupPredictors """
    print("Adding group predictors")
    add_group_predictors(dataset_model.id, local_path / 'participants.tsv')

    return dataset_model.id