コード例 #1
0
ファイル: test_system.py プロジェクト: shashankg7/modl
def test_get_data_dir(tmpdir):
    # testing folder creation under different environments, enforcing
    # a custom clean install
    os.environ.pop('CACHE', None)
    os.environ.pop('SHARED_CACHE', None)

    expected_base_dir = os.path.expanduser('~/cache')
    data_dir = get_cache_dirs()[0]
    assert_equal(data_dir, expected_base_dir)

    expected_base_dir = os.path.join(tmpdir, 'cache')
    os.environ['CACHE'] = expected_base_dir
    data_dir = get_cache_dirs()[0]
    assert_equal(data_dir, expected_base_dir)

    expected_base_dir = os.path.join(tmpdir, 'cache_shared')
    os.environ['SHARED_CACHE'] = expected_base_dir
    data_dir = get_cache_dirs()[0]
    assert_equal(data_dir, expected_base_dir)

    expected_base_dir = os.path.join(tmpdir, 'cache')
    os.environ.pop('CACHE', None)
    os.environ.pop('SHARED_CACHE', None)
    data_dir = get_cache_dirs(expected_base_dir)[0]
    assert_equal(data_dir, expected_base_dir)
コード例 #2
0
ファイル: decompose_fmri.py プロジェクト: shashankg7/modl
def decompose_run(
    smoothing_fwhm,
    batch_size,
    learning_rate,
    verbose,
    reduction,
    alpha,
    n_jobs,
    n_epochs,
    buffer_size,
    init,
    _seed,
):
    n_components = init['n_components']
    dict_init = load_init()
    train_data, test_data, mask = load_data()

    memory = Memory(cachedir=get_cache_dirs()[0], verbose=2)

    cb = rfMRIDictionaryScorer(test_data)
    dict_fact = fMRIDictFact(
        smoothing_fwhm=smoothing_fwhm,
        mask=mask,
        memory=memory,
        memory_level=2,
        verbose=verbose,
        n_epochs=n_epochs,
        n_jobs=n_jobs,
        random_state=_seed,
        n_components=n_components,
        dict_init=dict_init,
        learning_rate=learning_rate,
        batch_size=batch_size,
        reduction=reduction,
        alpha=alpha,
        buffer_size=buffer_size,
        callback=cb,
    )
    dict_fact.fit(train_data)

    dict_fact.components_.to_filename('components.nii.gz')
    fig = plt.figure()
    display_maps(fig, dict_fact.components_)
    fig, ax = plt.subplots(1, 1)
    ax.plot(cb.time, cb.score, marker='o')
    plt.show()
コード例 #3
0
ファイル: test_fmri.py プロジェクト: xiaomaiyun/modl
def test_dict_fact(method, memory):
    if memory:
        memory = Memory(cachedir=get_cache_dirs()[0])
        memory_level = 2
    else:
        if method != 'masked':
            pytest.skip()
        memory = Memory(cachedir=None)
        memory_level = 0
    data, mask_img, components, init = _make_test_data(n_subjects=10)
    dict_fact = fMRIDictFact(n_components=4,
                             random_state=0,
                             memory=memory,
                             memory_level=memory_level,
                             mask=mask_img,
                             dict_init=init,
                             method=method,
                             reduction=2,
                             smoothing_fwhm=None,
                             n_epochs=2,
                             alpha=1)
    dict_fact.fit(data)
    maps = np.rollaxis(dict_fact.components_img_.get_data(), 3, 0)
    components = np.rollaxis(components.get_data(), 3, 0)
    maps = maps.reshape((maps.shape[0], -1))
    components = components.reshape((components.shape[0], -1))

    S = np.sqrt(np.sum(components**2, axis=1))
    S[S == 0] = 1
    components /= S[:, np.newaxis]

    S = np.sqrt(np.sum(maps**2, axis=1))
    S[S == 0] = 1
    maps /= S[:, np.newaxis]

    G = np.abs(components.dot(maps.T))

    recovered_maps = np.sum(G > 0.95)
    assert (recovered_maps >= 4)
コード例 #4
0
reduction = 12
alpha = 1e-3
n_epochs = 2
verbose = 15
n_jobs = 2
smoothing_fwhm = 6

dict_init = fetch_atlas_smith_2009().rsn20

dataset = fetch_adhd(n_subjects=40)
data = dataset.rest.values
train_data, test_data = train_test_split(data, test_size=1, random_state=0)
train_imgs, train_confounds = zip(*train_data)
test_imgs, test_confounds = zip(*test_data)
mask = dataset.mask
memory = Memory(cachedir=get_cache_dirs()[0], verbose=2)

cb = rfMRIDictionaryScorer(test_imgs, test_confounds=test_confounds)
dict_fact = fMRIDictFact(
    smoothing_fwhm=smoothing_fwhm,
    method=method,
    step_size=step_size,
    mask=mask,
    memory=memory,
    memory_level=2,
    verbose=verbose,
    n_epochs=n_epochs,
    n_jobs=n_jobs,
    random_state=1,
    n_components=n_components,
    dict_init=dict_init,
コード例 #5
0
reduction = 12
alpha = 1e-3
n_epochs = 5
verbose = 15
n_jobs = 2
smoothing_fwhm = 6

dict_init = fetch_atlas_smith_2009().rsn20

dataset = fetch_adhd(n_subjects=40)
data = dataset.rest.values
train_data, test_data = train_test_split(data, test_size=1, random_state=0)
train_imgs, train_confounds = zip(*train_data)
test_imgs, test_confounds = zip(*test_data)
mask = dataset.mask
memory = Memory(cachedir=get_cache_dirs()[0],
                verbose=2)

cb = rfMRIDictionaryScorer(test_imgs, test_confounds=test_confounds)
dict_fact = fMRIDictFact(smoothing_fwhm=smoothing_fwhm,
                         method=method,
                         optimizer=optimizer,
                         step_size=step_size,
                         mask=mask,
                         memory=memory,
                         memory_level=2,
                         verbose=verbose,
                         n_epochs=n_epochs,
                         n_jobs=n_jobs,
                         random_state=1,
                         n_components=n_components,
コード例 #6
0
n_epochs = 4
verbose = 15
n_jobs = 70
smoothing_fwhm = 6
components_list = [20, 40, 80, 120, 200, 300, 500]
n_runs = 20

dict_init = fetch_atlas_smith_2009().rsn20

dataset = fetch_adhd(n_subjects=40)
data = dataset.rest.values
train_data, test_data = train_test_split(data, test_size=2, random_state=0)
train_imgs, train_confounds = zip(*train_data)
test_imgs, test_confounds = zip(*test_data)
mask = dataset.mask
mem = Memory(location=get_cache_dirs()[0])
masker = NiftiMasker(mask_img=mask).fit()


def fit_single(train_imgs, test_imgs, n_components, random_state):
    dict_fact = fMRIDictFact(
        smoothing_fwhm=smoothing_fwhm,
        method=method,
        step_size=step_size,
        mask=mask,
        memory=mem,
        memory_level=2,
        verbose=verbose,
        n_epochs=n_epochs,
        n_jobs=1,
        random_state=random_state,
コード例 #7
0
def reduce(dataset, output_dir=None, direct=False, source='hcp_rs_concat'):
    """Create a reduced version of a given dataset.
        Unmask must be called beforehand"""
    memory = Memory(cachedir=get_cache_dirs()[0], verbose=2)
    print('Fetch data')
    this_dataset_dir = join(get_output_dir(output_dir), 'unmasked', dataset)
    masker, X = get_raw_contrast_data(this_dataset_dir)
    print('Retrieve components')
    if source == 'craddock':
        components = fetch_craddock_parcellation().parcellate400
        niimgs = masker.inverse_transform(X.values)
        label_masker = NiftiLabelsMasker(labels_img=components,
                                         smoothing_fwhm=0,
                                         mask_img=masker.mask_img_).fit()
        # components = label_masker.inverse_transform(np.eye(400))
        print('Transform and fit data')
        Xt = label_masker.transform(niimgs)
    else:
        if source == 'msdl':
            components = [fetch_atlas_msdl()['maps']]
        else:
            data = fetch_atlas_modl()
            if source == 'hcp_rs':
                components_imgs = [data.nips2017_components256]
            elif source == 'hcp_rs_concat':
                components_imgs = [
                    data.nips2017_components16, data.nips2017_components64,
                    data.nips2017_components256
                ]
            elif source == 'hcp_336':
                components_imgs = [data.nips2017_components336]
            elif source == 'hcp_new':
                components_imgs = [
                    data.positive_new_components16,
                    data.positive_new_components64,
                    data.positive_new_components128
                ]
            elif source == 'hcp_new_big':
                components_imgs = [
                    data.positive_new_components16,
                    data.positive_new_components64,
                    data.positive_new_components512
                ]
            elif source == 'hcp_rs_positive_concat':
                components_imgs = [
                    data.positive_components16, data.positive_components64,
                    data.positive_components512
                ]
            elif source == 'hcp_new_208':
                components_imgs = [data.positive_new_components208]

            components = masker.transform(components_imgs)
        print('Transform and fit data')
        proj, proj_inv, _ = memory.cache(make_projection_matrix)(
            components, scale_bases=True)
        if direct:
            proj = proj_inv.T
        Xt = X.dot(proj)
    Xt = pd.DataFrame(data=Xt, index=X.index)
    this_source = source
    if direct:
        this_source += '_direct'
    this_output_dir = join(get_output_dir(output_dir), 'reduced', this_source,
                           dataset)
    if not os.path.exists(this_output_dir):
        os.makedirs(this_output_dir)
    print(join(this_output_dir, 'Xt.pkl'))
    Xt.to_pickle(join(this_output_dir, 'Xt.pkl'))
    dump(masker, join(this_output_dir, 'masker.pkl'))
    np.save(join(output_dir, 'components'), components)