Ejemplo n.º 1
0
def synthetic(request):
    root = join(get_test_data_path(), 'synthetic')
    if request.param == 'preproc':
        layout = BIDSLayout(root, derivatives=True)
        dataset = load_variables(layout, skip_empty=True, desc='preproc',
                                 space='T1w')
    else:
        layout = BIDSLayout(root)
        dataset = load_variables(layout, skip_empty=True)
    yield request.param, dataset
Ejemplo n.º 2
0
def synthetic(request):
    root = join(get_test_data_path(), 'synthetic')
    if request.param == 'preproc':
        layout = BIDSLayout(root, derivatives=True)
        dataset = load_variables(layout,
                                 skip_empty=True,
                                 desc='preproc',
                                 space='T1w')
    else:
        layout = BIDSLayout(root)
        dataset = load_variables(layout, skip_empty=True)
    yield request.param, dataset
Ejemplo n.º 3
0
def test_load_participants(layout1):
    index = load_variables(layout1, types='participants')
    assert isinstance(index, NodeIndex)
    dataset = index.get_nodes(level='dataset')[0]
    assert isinstance(dataset, Node)
    assert len(dataset.variables) == 2
    assert {'age', 'sex'} == set(dataset.variables.keys())
    age = dataset.variables['age']
    assert isinstance(age, SimpleVariable)
    assert age.index.shape == (16, 2)
    assert age.values.shape == (16,)

    index = load_variables(layout1, types='participants', subject=['^1.*'])
    age = index.get_nodes(level='dataset')[0].variables['age']
    assert age.index.shape == (7, 2)
    assert age.values.shape == (7,)
Ejemplo n.º 4
0
def test_downsampling(tmpdir):
    tmpdir.chdir()
    os.makedirs('sub-01/func')
    import numpy as np
    TR, newTR, nvols, newvols = 2.00000, 6.0, 90, 30
    Fs = 1 / TR
    t = np.linspace(0, int(nvols / Fs), nvols, endpoint=False)
    values = np.sin(0.025 * 2 * np.pi * t) + np.cos(0.1166 * 2 * np.pi * t)
    with open('sub-01/func/sub-01_task-task_events.tsv', 'w') as fobj:
        fobj.write('onset\tduration\tval\n')
        for idx, val in enumerate(values):
            fobj.write('%f\t%f\t%f\n' % (idx * TR, TR, val))
    with open('sub-01/func/sub-01_task-task_bold.json', 'w') as fobj:
        json.dump({'RepetitionTime': TR}, fobj)

    dataobj = np.zeros((5, 5, 5, nvols), dtype=np.int16)
    affine = np.diag((2.5, 2.5, 2.5, 1))
    img = nb.Nifti1Image(dataobj, affine)
    img.header.set_zooms((2.5, 2.5, 2.5, TR))
    img.to_filename('sub-01/func/sub-01_task-task_bold.nii.gz')

    layout = BIDSLayout('.', validate=False)
    coll = load_variables(layout).get_collections('run')[0]
    dense_var = coll.variables['val'].to_dense(1.0 / TR)
    regressor = dense_var.resample(1.0 / newTR).values
    assert regressor.shape == (newvols, 1)
    # This checks that the filtering has happened. If it has not, then
    # this value for this frequency bin will be an alias and have a
    # very different amplitude
    assert np.allclose(
        np.abs(np.fft.fft(regressor.values.ravel()))[9], 0.46298273)
    # This checks that the signal (0.025 Hz) within the new Nyquist
    # rate actually gets passed through.
    assert np.allclose(
        np.abs(np.fft.fft(regressor.values.ravel()))[4], 8.88189504)
Ejemplo n.º 5
0
def test_load_participants(layout1):
    index = load_variables(layout1, types='participants')
    assert isinstance(index, NodeIndex)
    dataset = index.get_nodes(level='dataset')[0]
    assert isinstance(dataset, Node)
    assert len(dataset.variables) == 2
    assert {'age', 'sex'} == set(dataset.variables.keys())
    age = dataset.variables['age']
    assert isinstance(age, SimpleVariable)
    assert age.index.shape == (16, 1)
    assert age.values.shape == (16,)

    index = load_variables(layout1, types='participants', subject=['^1.*'])
    age = index.get_nodes(level='dataset')[0].variables['age']
    assert age.index.shape == (7, 1)
    assert age.values.shape == (7,)
Ejemplo n.º 6
0
    def get_collections(self, level, types=None, variables=None, merge=False,
                        sampling_rate=None, skip_empty=False, **kwargs):
        """Return one or more variable Collections in the BIDS project.

        Args:
            level (str): The level of analysis to return variables for. Must be
                one of 'run', 'session', 'subject', or 'dataset'.
            types (str, list): Types of variables to retrieve. All valid values
            reflect the filename stipulated in the BIDS spec for each kind of
            variable. Valid values include: 'events', 'physio', 'stim',
            'scans', 'participants', 'sessions', and 'regressors'.
            variables (list): Optional list of variables names to return. If
                None, all available variables are returned.
            merge (bool): If True, variables are merged across all observations
                of the current level. E.g., if level='subject', variables from
                all subjects will be merged into a single collection. If False,
                each observation is handled separately, and the result is
                returned as a list.
            sampling_rate (int, str): If level='run', the sampling rate to
                pass onto the returned BIDSRunVariableCollection.
            skip_empty (bool): Whether or not to skip empty Variables (i.e.,
                where there are no rows/records in a file after applying any
                filtering operations like dropping NaNs).
            kwargs: Optional additional arguments to pass onto load_variables.
        """
        from bids.variables import load_variables
        index = load_variables(self, types=types, levels=level,
                               skip_empty=skip_empty, **kwargs)
        return index.get_collections(level, variables, merge,
                                     sampling_rate=sampling_rate)
Ejemplo n.º 7
0
def synthetic(request):
    import bids.config
    # Not testing with/without here
    with patch.dict('bids.config._settings'):
        bids.config.set_option('extension_initial_dot', True)
        root = join(get_test_data_path(), 'synthetic')
        if request.param == 'preproc':
            layout = BIDSLayout(root, derivatives=True)
            dataset = load_variables(layout,
                                     skip_empty=True,
                                     desc='preproc',
                                     space='T1w')
        else:
            layout = BIDSLayout(root)
            dataset = load_variables(layout, skip_empty=True)
        yield request.param, dataset
Ejemplo n.º 8
0
def synthetic(request):
    path = join(get_test_data_path(), 'synthetic')
    if request.param == "preproc":
        set_option('loop_preproc', True)
        path = (path, ['bids', 'derivatives'])
    layout = BIDSLayout(path)
    yield request.param, load_variables(layout)
    set_option('loop_preproc', False)
Ejemplo n.º 9
0
def test_merge_sparse_run_variables(layout1):
    dataset = load_variables(layout1, types='events', scan_length=480)
    runs = dataset.get_nodes('run')
    variables = [r.variables['RT'] for r in runs]
    n_rows = sum([len(c.values) for c in variables])
    merged = merge_variables(variables)
    assert len(merged.values) == n_rows
    assert set(merged.index.columns) == set(variables[0].index.columns)
Ejemplo n.º 10
0
def test_merge_sparse_run_variables(layout1):
    dataset = load_variables(layout1, types='events', scan_length=480)
    runs = dataset.get_nodes('run')
    variables = [r.variables['RT'] for r in runs]
    n_rows = sum([len(c.values) for c in variables])
    merged = merge_variables(variables)
    assert len(merged.values) == n_rows
    assert set(merged.index.columns) == set(variables[0].index.columns)
Ejemplo n.º 11
0
def test_get_collections_merged(layout1):
    dataset = load_variables(layout1, scan_length=480)
    collection = dataset.get_collections('run', merge=True)
    assert isinstance(collection, BIDSRunVariableCollection)
    assert len(collection.variables) == 8
    vals = collection.variables['RT'].values
    ents = collection.variables['RT'].index
    assert len(ents) == len(vals) == 4096
    assert set(ents.columns) == {'task', 'run', 'subject', 'suffix', 'datatype'}
Ejemplo n.º 12
0
def test_merge_simple_variables(layout2):
    index = load_variables(layout2, types='sessions')
    subjects = index.get_nodes('subject')
    variables = [s.variables['panas_sad'] for s in subjects]
    n_rows = sum([len(c.values) for c in variables])
    merged = merge_variables(variables)
    assert len(merged.values) == n_rows
    assert set(merged.index.columns) == set(variables[0].index.columns)
    assert variables[3].values.iloc[1] == merged.values.iloc[7]
Ejemplo n.º 13
0
def test_get_collections_merged(layout1):
    dataset = load_variables(layout1, scan_length=480)
    collection = dataset.get_collections('run', merge=True)
    assert isinstance(collection, BIDSRunVariableCollection)
    assert len(collection.variables) == 8
    vals = collection.variables['RT'].values
    ents = collection.variables['RT'].index
    assert len(ents) == len(vals) == 4096
    assert set(ents.columns) == {'task', 'run', 'subject'}
Ejemplo n.º 14
0
def test_merge_simple_variables(layout2):
    index = load_variables(layout2, types='sessions')
    subjects = index.get_nodes('subject')
    variables = [s.variables['panas_sad'] for s in subjects]
    n_rows = sum([len(c.values) for c in variables])
    merged = merge_variables(variables)
    assert len(merged.values) == n_rows
    assert set(merged.index.columns) == set(variables[0].index.columns)
    assert variables[3].values.iloc[1] == merged.values.iloc[7]
Ejemplo n.º 15
0
def test_get_nodes(layout1):
    index = load_variables(layout1, scan_length=480)
    nodes = index.get_nodes('session')
    assert len(nodes) == 0
    nodes = index.get_nodes('dataset')
    assert len(nodes) == 1
    assert all([isinstance(n, Node) for n in nodes])
    nodes = index.get_nodes('run', {'subject': ['01', '02', '03']})
    assert len(nodes) == 9
    assert all([isinstance(n, RunNode) for n in nodes])
Ejemplo n.º 16
0
def test_get_nodes(layout1):
    index = load_variables(layout1, scan_length=480)
    nodes = index.get_nodes('session')
    assert len(nodes) == 0
    nodes = index.get_nodes('dataset')
    assert len(nodes) == 1
    assert all([isinstance(n, Node) for n in nodes])
    nodes = index.get_nodes('run', {'subject': ['01', '02', '03']})
    assert len(nodes) == 9
    assert all([isinstance(n, RunNode) for n in nodes])
Ejemplo n.º 17
0
def synthetic(request):
    root = join(get_test_data_path(), 'synthetic')
    default_preproc = get_option('loop_preproc')
    if request.param == 'preproc':
        set_option('loop_preproc', True)
        layout = BIDSLayout((root, ['bids', 'derivatives']))
    else:
        set_option('loop_preproc', default_preproc)
        layout = BIDSLayout(root, exclude='derivatives')
    yield request.param, load_variables(layout, skip_empty=True)
    set_option('loop_preproc', default_preproc)
Ejemplo n.º 18
0
def test_load_events(layout1):
    index = load_variables(layout1, types='events', scan_length=480)
    runs = index.get_nodes(level='run', entities={'subject': '01'})
    assert len(runs) == 3
    assert isinstance(runs[0], RunNode)
    variables = runs[0].variables
    assert len(variables) == 8
    targ_cols = {'parametric gain', 'PTval', 'trial_type', 'respnum'}
    assert not (targ_cols - set(variables.keys()))
    assert isinstance(variables['parametric gain'], SparseRunVariable)
    assert variables['parametric gain'].index.shape == (86, 5)
    assert variables['parametric gain'].source == 'events'
Ejemplo n.º 19
0
def test_load_events(layout1):
    index = load_variables(layout1, types='events', scan_length=480)
    runs = index.get_nodes(level='run', entities={'subject': '01'})
    assert len(runs) == 3
    assert isinstance(runs[0], RunNode)
    variables = runs[0].variables
    assert len(variables) == 8
    targ_cols = {'parametric gain', 'PTval', 'trial_type', 'respnum'}
    assert not (targ_cols - set(variables.keys()))
    assert isinstance(variables['parametric gain'], SparseRunVariable)
    assert variables['parametric gain'].index.shape == (86, 3)
    assert variables['parametric gain'].source == 'events'
Ejemplo n.º 20
0
 def get_collections(self,
                     level,
                     types=None,
                     variables=None,
                     merge=False,
                     sampling_rate=None,
                     **kwargs):
     from bids.variables import load_variables
     index = load_variables(self, types=types, levels=level, **kwargs)
     return index.get_collections(level,
                                  variables,
                                  merge,
                                  sampling_rate=sampling_rate)
Ejemplo n.º 21
0
def test_merge_densified_variables(layout1):
    SR = 10
    dataset = load_variables(layout1, types='events', scan_length=480)
    runs = dataset.get_nodes('run')
    vars_ = [r.variables['RT'].to_dense(SR) for r in runs]
    dense = merge_variables(vars_)
    assert isinstance(dense, DenseRunVariable)
    n_rows = 480 * SR
    assert dense.values.shape == (len(runs) * n_rows, 1)
    for i in range(len(runs)):
        onset = i * n_rows
        offset = onset + n_rows
        run_vals = vars_[i].values
        dense_vals = dense.values.iloc[onset:offset].reset_index(drop=True)
        assert dense_vals.equals(run_vals)
Ejemplo n.º 22
0
def test_merge_densified_variables(layout1):
    SR = 10
    dataset = load_variables(layout1, types='events', scan_length=480)
    runs = dataset.get_nodes('run')
    vars_ = [r.variables['RT'].to_dense(SR) for r in runs]
    dense = merge_variables(vars_)
    assert isinstance(dense, DenseRunVariable)
    n_rows = 480 * SR
    assert dense.values.shape == (len(runs) * n_rows, 1)
    for i in range(len(runs)):
        onset = i * n_rows
        offset = onset + n_rows
        run_vals = vars_[i].values
        dense_vals = dense.values.iloc[onset:offset].reset_index(drop=True)
        assert dense_vals.equals(run_vals)
Ejemplo n.º 23
0
def test_filter_simple_variable(layout2):
    dataset = load_variables(layout2, types=['scans'])
    sessions = dataset.get_nodes('session')
    variables = [s.variables['surroundings'] for s in sessions]
    merged = merge_variables(variables)
    assert merged.to_df().shape == (60, 9)
    filt = merged.filter({'acq': 'fullbrain'})
    assert filt.to_df().shape == (40, 9)
    flt1 = merged.filter({'acq': 'fullbrain', 'subject': ['01', '02']}).to_df()
    assert flt1.shape == (8, 9)
    flt2 = merged.filter(query='acq=="fullbrain" and subject in ["01", "02"]')
    flt2 = flt2.to_df()
    assert flt1.equals(flt2)
    assert merged.filter({'nonexistent': 2}, strict=True) is None
    merged.filter({'acq': 'fullbrain'}, inplace=True)
    assert merged.to_df().shape == (40, 9)
Ejemplo n.º 24
0
def test_sparse_run_variable_to_dense(layout1):
    index = load_variables(layout1, types='events', scan_length=480)
    runs = index.get_nodes('run', {'subject': ['01', '02']})

    for _, run in enumerate(runs):
        var = run.variables['RT']
        dense = var.to_dense(20)

        # All sparse values must occur in the dense variable
        dense_vals = set(np.unique(dense.values.values))
        sparse_vals = set(np.unique(var.values.values)) | {0}
        assert not (sparse_vals - dense_vals)

        assert len(dense.values) > len(var.values)
        assert isinstance(dense, DenseRunVariable)
        assert dense.values.shape == (9600, 1)
        assert len(dense.run_info) == len(var.run_info)
        assert dense.source == 'events'
Ejemplo n.º 25
0
def test_filter_simple_variable(layout2):
    dataset = load_variables(layout2, types=['scans'])
    sessions = dataset.get_nodes('session')
    variables = [s.variables['surroundings'] for s in sessions]
    merged = merge_variables(variables)
    assert merged.to_df().shape == (60, 9)
    filt = merged.filter({'acquisition': 'fullbrain'})
    assert filt.to_df().shape == (40, 9)
    flt1 = merged.filter({'acquisition': 'fullbrain',
                          'subject': ['01', '02']}).to_df()
    assert flt1.shape == (8, 9)
    query = 'acquisition=="fullbrain" and subject in ["01", "02"]'
    flt2 = merged.filter(query=query)
    flt2 = flt2.to_df()
    assert flt1.equals(flt2)
    assert merged.filter({'nonexistent': 2}, strict=True) is None
    merged.filter({'acquisition': 'fullbrain'}, inplace=True)
    assert merged.to_df().shape == (40, 9)
Ejemplo n.º 26
0
def test_sparse_run_variable_to_dense(layout1):
    index = load_variables(layout1, types='events', scan_length=480)
    runs = index.get_nodes('run', {'subject': ['01', '02']})

    for i, run in enumerate(runs):
        var = run.variables['RT']
        dense = var.to_dense(20)

        # Check that all unique values are identical
        sparse_vals = set(np.unique(var.values.values)) | {0}
        dense_vals = set(np.unique(dense.values.values))
        assert sparse_vals == dense_vals

        assert len(dense.values) > len(var.values)
        assert isinstance(dense, DenseRunVariable)
        assert dense.values.shape == (9600, 1)
        assert len(dense.run_info) == len(var.run_info)
        assert dense.source == 'events'
Ejemplo n.º 27
0
def test_sparse_run_variable_to_dense(layout1):
    index = load_variables(layout1, types='events', scan_length=480)
    runs = index.get_nodes('run', {'subject': ['01', '02']})

    for i, run in enumerate(runs):
        var = run.variables['RT']
        dense = var.to_dense(20)

        # Check that all unique values are identical
        sparse_vals = set(np.unique(var.values.values)) | {0}
        dense_vals = set(np.unique(dense.values.values))
        assert sparse_vals == dense_vals

        assert len(dense.values) > len(var.values)
        assert isinstance(dense, DenseRunVariable)
        assert dense.values.shape == (9600, 1)
        assert len(dense.run_info) == len(var.run_info)
        assert dense.source == 'events'
Ejemplo n.º 28
0
def test_resampling_edge_case(tmpdir, TR, nvols):
    tmpdir.chdir()
    os.makedirs('sub-01/func')
    with open('sub-01/func/sub-01_task-task_events.tsv', 'w') as fobj:
        fobj.write('onset\tduration\tval\n1\t0.1\t1\n')
    with open('sub-01/func/sub-01_task-task_bold.json', 'w') as fobj:
        json.dump({'RepetitionTime': TR}, fobj)

    dataobj = np.zeros((5, 5, 5, nvols), dtype=np.int16)
    affine = np.diag((2.5, 2.5, 2.5, 1))
    img = nb.Nifti1Image(dataobj, affine)
    img.header.set_zooms((2.5, 2.5, 2.5, TR))
    img.to_filename('sub-01/func/sub-01_task-task_bold.nii.gz')

    layout = BIDSLayout('.', validate=False)
    coll = load_variables(layout).get_collections('run')[0]
    dense_var = coll.variables['val'].to_dense(coll.sampling_rate)
    regressor = dense_var.resample(1.0 / TR).values
    assert regressor.shape == (nvols, 1)
Ejemplo n.º 29
0
def test_resampling_edge_case(tmpdir, TR, nvols):
    tmpdir.chdir()
    os.makedirs('sub-01/func')
    with open('sub-01/func/sub-01_task-task_events.tsv', 'w') as fobj:
        fobj.write('onset\tduration\tval\n1\t0.1\t1\n')
    with open('sub-01/func/sub-01_task-task_bold.json', 'w') as fobj:
        json.dump({'RepetitionTime': TR}, fobj)

    dataobj = np.zeros((5, 5, 5, nvols), dtype=np.int16)
    affine = np.diag((2.5, 2.5, 2.5, 1))
    img = nb.Nifti1Image(dataobj, affine)
    img.header.set_zooms((2.5, 2.5, 2.5, TR))
    img.to_filename('sub-01/func/sub-01_task-task_bold.nii.gz')

    layout = BIDSLayout('.', validate=False)
    coll = load_variables(layout).get_collections('run')[0]
    dense_var = coll.variables['val'].to_dense(coll.sampling_rate)
    regressor = dense_var.resample(1.0 / TR).values
    assert regressor.shape == (nvols, 1)
Ejemplo n.º 30
0
def test_densify_merged_variables(layout1):
    SR = 10
    dataset = load_variables(layout1, types='events', scan_length=480)
    runs = dataset.get_nodes('run')
    vars_ = [r.variables['RT'] for r in runs]
    var = merge_variables(vars_)
    dense = var.to_dense(SR)
    assert isinstance(dense, DenseRunVariable)
    n_rows = 480 * SR
    assert dense.values.shape == (len(runs) * n_rows, 1)
    for i in range(len(runs)):
        onset = i * n_rows
        offset = onset + n_rows
        # resampled values from split vs. merged will be trivially different
        # at boundaries due to interpolation, so leave a buffer
        run_vals = vars_[i].to_dense(SR).values.values.ravel()
        dense_vals = dense.values.values.ravel()[onset:offset]
        assert run_vals.shape == dense_vals.shape
        assert np.array_equal(run_vals, dense_vals)
Ejemplo n.º 31
0
    def get_collections(self,
                        level,
                        types=None,
                        variables=None,
                        merge=False,
                        sampling_rate=None,
                        skip_empty=False,
                        **kwargs):
        ''' Return one or more Collections containing variables found in the
        BIDS project.

        Args:
            level (str): The level of analysis to return variables for. Must be
                one of 'run', 'session', 'subject', or 'dataset'.
            types (str, list): Types of variables to retrieve. All valid values
            reflect the filename stipulated in the BIDS spec for each kind of
            variable. Valid values include: 'events', 'physio', 'stim',
            'scans', 'participants', 'sessions', and 'confounds'.
            variables (list): Optional list of variables names to return. If
                None, all available variables are returned.
            merge (bool): If True, variables are merged across all observations
                of the current level. E.g., if level='subject', variables from
                all subjects will be merged into a single collection. If False,
                each observation is handled separately, and the result is
                returned as a list.
            sampling_rate (int, str): If level='run', the sampling rate to
                pass onto the returned BIDSRunVariableCollection.
            skip_empty (bool): Whether or not to skip empty Variables (i.e.,
                where there are no rows/records in a file after applying any
                filtering operations like dropping NaNs).
            kwargs: Optional additional arguments to pass onto load_variables.

        '''
        from bids.variables import load_variables
        index = load_variables(self,
                               types=types,
                               levels=level,
                               skip_empty=skip_empty,
                               **kwargs)
        return index.get_collections(level,
                                     variables,
                                     merge,
                                     sampling_rate=sampling_rate)
Ejemplo n.º 32
0
def auto_model(layout, scan_length=None, one_vs_rest=False):
    """Create a simple default model for each of the tasks in a BIDSLayout.
    Contrasts each trial type against all other trial types and trial types
    at the run level and then uses dummy contrasts at each other level
    present to aggregate these results up.

    Parameters
    ----------
    layout : :obj:`bids.layout.BIDSLayout`
        A BIDSLayout instance
    scan_length : int
        Scan length for loading event variables in cases
        where the scan length can not be read from the nifti.
        Primarily for testing.
    one_vs_rest : bool
        Set to True if you would like to autogenerate
        contrasts of each trial type against everyother trialtype.

    Returns
    -------
    list
        list of model dictionaries for each task
    """

    base_name = layout._root.name
    tasks = layout.entities['task'].unique()
    task_models = []

    for task_name in tasks:
        # Populate model meta-data
        model = OrderedDict()
        model["Name"] = "_".join([base_name, task_name])
        model["Description"] = ("Autogenerated model for the %s task from %s" %
                                (task_name, base_name))
        model["Input"] = {"Task": task_name}
        nodes = []

        # Make run level block
        transformations = OrderedDict(Name='Factor', Input=['trial_type'])
        run = OrderedDict(Level='Run',
                          Name='Run',
                          Transformations=[transformations])

        # Get trial types
        run_nodes = load_variables(layout,
                                   task=task_name,
                                   levels=['run'],
                                   scan_length=scan_length)

        evs = []
        for n in run_nodes.nodes:
            evs.extend(n.variables['trial_type'].values.values)
        trial_types = np.unique(evs)
        trial_type_factors = ["trial_type." + tt for tt in trial_types]

        # Add HRF
        run['Transformations'].append(
            OrderedDict(Name='Convolve', Input=trial_type_factors))

        run_model = OrderedDict(X=trial_type_factors)
        run["Model"] = run_model

        if one_vs_rest:
            # If there are multiple trial types, build contrasts
            contrasts = []
            for tt in trial_types:
                cdict = OrderedDict()
                if len(trial_types) > 1:
                    cdict["Name"] = "run_" + tt + "_vs_others"
                else:
                    cdict["Name"] = "run_" + tt
                cdict["ConditionList"] = trial_type_factors

                # Calculate weights for contrast
                weights = np.ones(len(trial_types))
                try:
                    weights[trial_types != tt] = -1.0 / (len(trial_types) - 1)
                except ZeroDivisionError:
                    pass
                cdict["Weights"] = list(weights)

                cdict["Test"] = "t"
                contrasts.append(cdict)

            run["Contrasts"] = contrasts
        nodes.append(run)

        if one_vs_rest:
            # if there are multiple sessions, t-test run level contrasts at
            # session level
            sessions = layout.get_sessions()
            if len(sessions) > 1:
                # get contrasts names from previous block
                contrast_names = [cc["Name"] for cc in nodes[-1]["Contrasts"]]
                nodes.append(
                    _make_passthrough_contrast("Session", contrast_names,
                                               "FEMA"))

            subjects = layout.get_subjects()
            if len(subjects) > 1:
                # get contrasts names from previous block
                contrast_names = [cc["Name"] for cc in nodes[-1]["Contrasts"]]
                nodes.append(
                    _make_passthrough_contrast("Subject", contrast_names,
                                               "FEMA"))

            # get contrasts names from previous block
            contrast_names = [cc["Name"] for cc in nodes[-1]["Contrasts"]]
            nodes.append(
                _make_passthrough_contrast("Dataset", contrast_names, "t"))

        model["Nodes"] = nodes
        task_models.append(model)

    return task_models
Ejemplo n.º 33
0
def auto_model(layout, scan_length=None, one_vs_rest=False):
    '''Create a simple default model for each of the tasks in a BIDSLayout.
    Contrasts each trial type against all other trial types and trial types
    at the run level and then uses t-tests at each other level present to
    aggregate these results up.

    Args:
        layout (BIDSLayout) A BIDSLayout instance
        scan_length (Int) Scan length for loading event varibles in cases
             where the scan length can not be read from the nifti.
             Primarily for testing.
        one_vs_rest (Bool) Set to True if you would like to autogenerate
             contrasts of each trial type against everyother trialtype.

    Returns:
        models (list) list of model dictionaries for each task
    '''

    base_name = split(layout.root)[-1]
    tasks = layout.entities['bids.task'].unique()
    task_models = []

    for task_name in tasks:
        # Populate model meta-data
        model = OrderedDict()
        model["name"] = "_".join([base_name, task_name])
        model["description"] = ("Autogenerated model for the %s task from %s" %
                                (task_name, base_name))
        model["input"] = {"task": task_name}
        blocks = []

        # Make run level block
        transformations = OrderedDict(name='factor', input=['trial_type'])
        run = OrderedDict(level='run',
                          name='run',
                          transformations=[transformations])

        # Get trial types
        run_nodes = load_variables(layout,
                                   task=task_name,
                                   levels=['run'],
                                   scan_length=scan_length)

        evs = []
        for n in run_nodes.nodes:
            evs.extend(n.variables['trial_type'].values.values)
        trial_types = np.unique(evs)
        trial_type_factors = ["trial_type." + tt for tt in trial_types]

        run_model = OrderedDict(HRF_variables=trial_type_factors,
                                variables=['trial_type'])
        run["model"] = run_model

        if one_vs_rest:
            # if there are multiple trial types, build contrasts
            contrasts = []
            for i, tt in enumerate(trial_types):
                cdict = OrderedDict()
                if len(trial_types) > 1:
                    cdict["name"] = "run_" + tt + "_vs_others"
                else:
                    cdict["name"] = "run_" + tt
                cdict["condition_list"] = trial_type_factors

                # Calculate weights for contrast
                weights = np.ones(len(trial_types))
                try:
                    weights[trial_types != tt] = -1.0 / (len(trial_types) - 1)
                except ZeroDivisionError:
                    pass
                cdict["weights"] = list(weights)

                cdict["type"] = "T"
                contrasts.append(cdict)

            run["contrasts"] = contrasts
        blocks.append(run)

        if one_vs_rest:
            # if there are multiple sessions, t-test run level contrasts at
            # session level
            sessions = layout.get_sessions()
            if len(sessions) > 1:
                # get contrasts names from previous block
                contrast_names = [cc["name"] for cc in blocks[-1]["contrasts"]]
                blocks.append(
                    _make_passthrough_contrast("session", contrast_names))

            subjects = layout.get_subjects()
            if len(subjects) > 1:
                # get contrasts names from previous block
                contrast_names = [cc["name"] for cc in blocks[-1]["contrasts"]]
                blocks.append(
                    _make_passthrough_contrast("subject", contrast_names))

            # get contrasts names from previous block
            contrast_names = [cc["name"] for cc in blocks[-1]["contrasts"]]
            blocks.append(_make_passthrough_contrast("dataset",
                                                     contrast_names))

        model["blocks"] = blocks
        task_models.append(model)

    return task_models
Ejemplo n.º 34
0
def test_get_collections_unmerged(layout2):
    dataset = load_variables(layout2, types=['sessions'], scan_length=480)
    colls = dataset.get_collections('subject', merge=False)
    assert len(colls) == 10
    assert len(colls[0].variables) == 94
    assert colls[0]['panas_at_ease'].values.shape == (2,)
Ejemplo n.º 35
0
def synthetic():
    path = join(get_test_data_path(), 'synthetic')
    layout = BIDSLayout(path)
    return load_variables(layout)
Ejemplo n.º 36
0
def test_get_collections_unmerged(layout2):
    dataset = load_variables(layout2, types=['sessions'], scan_length=480)
    colls = dataset.get_collections('subject', merge=False)
    assert len(colls) == 10
    assert len(colls[0].variables) == 94
    assert colls[0]['panas_at_ease'].values.shape == (2,)
Ejemplo n.º 37
0
def auto_model(layout, scan_length=None, one_vs_rest=False):
    '''Create a simple default model for each of the tasks in a BIDSLayout.
    Contrasts each trial type against all other trial types and trial types
    at the run level and then uses t-tests at each other level present to
    aggregate these results up.

    Args:
        layout (BIDSLayout) A BIDSLayout instance
        scan_length (Int) Scan length for loading event varibles in cases
             where the scan length can not be read from the nifti.
             Primarily for testing.
        one_vs_rest (Bool) Set to True if you would like to autogenerate
             contrasts of each trial type against everyother trialtype.

    Returns:
        models (list) list of model dictionaries for each task
    '''

    base_name = split(layout.root)[-1]
    tasks = layout.entities['task'].unique()
    task_models = []

    for task_name in tasks:
        # Populate model meta-data
        model = OrderedDict()
        model["Name"] = "_".join([base_name, task_name])
        model["Description"] = ("Autogenerated model for the %s task from %s" %
                                (task_name, base_name))
        model["Input"] = {"Task": task_name}
        steps = []

        # Make run level block
        transformations = OrderedDict(Name='Factor', Input=['trial_type'])
        run = OrderedDict(Level='Run', Name='Run',
                          Transformations=[transformations])

        # Get trial types
        run_nodes = load_variables(layout, task=task_name, levels=['run'],
                                   scan_length=scan_length)

        evs = []
        for n in run_nodes.nodes:
            evs.extend(n.variables['trial_type'].values.values)
        trial_types = np.unique(evs)
        trial_type_factors = ["trial_type." + tt for tt in trial_types]

        # Add HRF
        run['Transformations'].append(
                OrderedDict(Name='Convolve', Input=trial_type_factors))

        run_model = OrderedDict(X=trial_type_factors)
        run["Model"] = run_model

        if one_vs_rest:
            # if there are multiple trial types, build contrasts
            contrasts = []
            for i, tt in enumerate(trial_types):
                cdict = OrderedDict()
                if len(trial_types) > 1:
                    cdict["Name"] = "run_" + tt + "_vs_others"
                else:
                    cdict["Name"] = "run_" + tt
                cdict["ConditionList"] = trial_type_factors

                # Calculate weights for contrast
                weights = np.ones(len(trial_types))
                try:
                    weights[trial_types != tt] = -1.0 / (len(trial_types) - 1)
                except ZeroDivisionError:
                    pass
                cdict["Weights"] = list(weights)

                cdict["Type"] = "t"
                contrasts.append(cdict)

            run["Contrasts"] = contrasts
        steps.append(run)

        if one_vs_rest:
            # if there are multiple sessions, t-test run level contrasts at
            # session level
            sessions = layout.get_sessions()
            if len(sessions) > 1:
                # get contrasts names from previous block
                contrast_names = [cc["Name"] for cc in steps[-1]["Contrasts"]]
                steps.append(_make_passthrough_contrast("Session",
                                                         contrast_names))

            subjects = layout.get_subjects()
            if len(subjects) > 1:
                # get contrasts names from previous block
                contrast_names = [cc["Name"] for cc in steps[-1]["Contrasts"]]
                steps.append(_make_passthrough_contrast("Subject",
                                                         contrast_names))

            # get contrasts names from previous block
            contrast_names = [cc["Name"] for cc in steps[-1]["Contrasts"]]
            steps.append(_make_passthrough_contrast("Dataset",
                                                     contrast_names))

        model["Steps"] = steps
        task_models.append(model)

    return task_models
Ejemplo n.º 38
0
def synthetic():
    path = join(get_test_data_path(), 'synthetic')
    layout = BIDSLayout(path, exclude='derivatives/')
    return load_variables(layout)