Esempio n. 1
0
def compute_components(n_components,
                       batch_size,
                       learning_rate,
                       smoothing_fwhm,
                       positive,
                       reduction,
                       alpha,
                       method,
                       n_epochs,
                       verbose,
                       n_jobs,
                       _run):
    artifact_dir = join(_run.observers[0].basedir, 'artifacts')
    if not os.path.exists(artifact_dir):
        os.makedirs(artifact_dir)
    raw_res_dir = join(modl_get_output_dir(), 'unmasked', 'hcp')
    masker, data = get_raw_rest_data(raw_res_dir)

    train_imgs, test_imgs = train_test_split(data, train_size=1000, test_size=1, random_state=0)
    train_imgs = train_imgs['filename'].values
    test_imgs = test_imgs['filename'].values

    cb = rfMRIDictionaryScorer(test_imgs, info=_run.info)
    dict_fact = fMRIDictFact(method=method,
                             mask=masker,
                             verbose=verbose,
                             n_epochs=n_epochs,
                             smoothing_fwhm=smoothing_fwhm,
                             n_jobs=n_jobs,
                             random_state=1,
                             n_components=n_components,
                             positive=positive,
                             learning_rate=learning_rate,
                             batch_size=batch_size,
                             reduction=reduction,
                             alpha=alpha,
                             callback=cb,
                             )
    dict_fact.fit(train_imgs)
    dict_fact.components_img_.to_filename(join(artifact_dir,
                                               'components.nii.gz'))
    fig = plt.figure()
    display_maps(fig, dict_fact.components_img_)
    plt.savefig(join(artifact_dir, 'components.png'))

    fig, ax = plt.subplots(1, 1)
    ax.plot(cb.time, cb.score, marker='o')
    plt.savefig(join(artifact_dir, 'score.png'))
Esempio n. 2
0
def compute_components(n_components, batch_size, learning_rate, positive,
                       reduction, alpha, method, n_epochs, verbose,
                       smoothing_fwhm, n_jobs, raw_dir, output_dir):
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    info = {}

    masker, data = get_raw_rest_data(raw_dir)

    train_imgs, test_imgs = train_test_split(data,
                                             train_size=None,
                                             test_size=1,
                                             random_state=0)
    train_imgs = train_imgs['filename'].values
    test_imgs = test_imgs['filename'].values

    cb = rfMRIDictionaryScorer(test_imgs, info=info, artifact_dir=output_dir)
    dict_fact = fMRIDictFact(
        method=method,
        mask=masker,
        smoothing_fwhm=smoothing_fwhm,
        verbose=verbose,
        n_epochs=n_epochs,
        n_jobs=n_jobs,
        random_state=1,
        n_components=n_components,
        positive=positive,
        learning_rate=learning_rate,
        batch_size=batch_size,
        reduction=reduction,
        alpha=alpha,
        callback=cb,
    )
    dict_fact.fit(train_imgs)
    dict_fact.components_img_.to_filename(join(output_dir,
                                               'components.nii.gz'))
Esempio n. 3
0
n_epochs = 2
verbose = 15
n_jobs = 2
smoothing_fwhm = 6

dict_init = fetch_atlas_smith_2009().rsn20

dataset = fetch_adhd(n_subjects=40)
data = dataset.rest.values
train_data, test_data = train_test_split(data, test_size=1, random_state=0)
train_imgs, train_confounds = zip(*train_data)
test_imgs, test_confounds = zip(*test_data)
mask = dataset.mask
memory = Memory(cachedir=get_cache_dirs()[0], verbose=2)

cb = rfMRIDictionaryScorer(test_imgs, test_confounds=test_confounds)
dict_fact = fMRIDictFact(
    smoothing_fwhm=smoothing_fwhm,
    method=method,
    step_size=step_size,
    mask=mask,
    memory=memory,
    memory_level=2,
    verbose=verbose,
    n_epochs=n_epochs,
    n_jobs=n_jobs,
    random_state=1,
    n_components=n_components,
    dict_init=dict_init,
    positive=True,
    learning_rate=learning_rate,
def compute_components(n_components, batch_size, learning_rate, method,
                       reduction, alpha, step_size, n_jobs, n_epochs, verbose,
                       source, _run):
    basedir = join(_run.observers[0].basedir, str(_run._id))
    artifact_dir = join(basedir, 'artifacts')
    if not os.path.exists(artifact_dir):
        os.makedirs(artifact_dir)

    if source == 'hcp':
        # Hack to recover data from TSP
        train_size = None
        smoothing_fwhm = 3
        test_size = 2
        data_dir = get_data_dirs()[0]
        mask = fetch_hcp_mask()
        masker = MultiRawMasker(mask_img=mask,
                                smoothing_fwhm=smoothing_fwhm,
                                detrend=True,
                                standardize=True)
        mapping = json.load(
            open(join(data_dir, 'HCP_unmasked/mapping.json'), 'r'))
        data = sorted(list(mapping.values()))
        data = list(map(lambda x: join(data_dir, x), data))
        data = pd.DataFrame(data, columns=['filename'])
    else:
        smoothing_fwhm = 6
        train_size = 4
        test_size = 4
        raw_res_dir = join(get_output_dir(), 'unmasked', source)
        try:
            masker, data = get_raw_rest_data(raw_res_dir)
        except ValueError:  # On local machine:
            raw_res_dir = join(get_output_dir(), 'unmask', source)
            masker, data = get_raw_rest_data(raw_res_dir)

    train_imgs, test_imgs = train_test_split(data,
                                             test_size=test_size,
                                             random_state=0,
                                             train_size=train_size)
    train_imgs = train_imgs['filename'].values
    test_imgs = test_imgs['filename'].values

    cb = rfMRIDictionaryScorer(test_imgs, info=_run.info)
    dict_fact = fMRIDictFact(
        method=method,
        mask=masker,
        verbose=verbose,
        n_epochs=n_epochs,
        n_jobs=n_jobs,
        random_state=1,
        n_components=n_components,
        smoothing_fwhm=smoothing_fwhm,
        learning_rate=learning_rate,
        batch_size=batch_size,
        reduction=reduction,
        step_size=step_size,
        alpha=alpha,
        callback=cb,
    )
    dict_fact.fit(train_imgs)
    dict_fact.components_img_.to_filename(
        join(artifact_dir, 'components.nii.gz'))
    fig = plt.figure()
    display_maps(fig, dict_fact.components_img_)
    plt.savefig(join(artifact_dir, 'components.png'))

    fig, ax = plt.subplots(1, 1)
    ax.plot(cb.cpu_time, cb.score, marker='o')
    _run.info['time'] = cb.cpu_time
    _run.info['score'] = cb.score
    _run.info['iter'] = cb.iter
    plt.savefig(join(artifact_dir, 'score.png'))