Пример #1
0
def test_fetch_neuroquery_model():
    resp = _FileResponse("mock-neuroquery_model.zip")
    with mock.patch("requests.get", return_value=resp) as mock_get:
        with tempfile.TemporaryDirectory() as tmp_dir:
            data_dir = datasets.fetch_neuroquery_model(tmp_dir)
            model = encoding.NeuroQueryModel.from_data_dir(data_dir)
            res = model("reading words")
            assert "z_map" in res
            data_dir = datasets.fetch_neuroquery_model(tmp_dir)
            mock_get.assert_called_once()
Пример #2
0
def test_fetch_neuroquery_model():
    getter = _FileGetter("mock-neuroquery_data-master.zip")
    with mock.patch("requests.get", getter):
        with tempfile.TemporaryDirectory() as tmp_dir:
            data_dir = datasets.fetch_neuroquery_model(tmp_dir)
            model = text_to_brain.TextToBrain.from_data_dir(data_dir)
            res = model("reading words")
            data_dir = datasets.fetch_neuroquery_model(tmp_dir)
        assert getter.n_calls == 2
        assert "z_map" in res

    getter = _FileGetter("mock-neuroquery_data-master.zip", 7)
    with mock.patch("requests.get", getter):
        with tempfile.TemporaryDirectory() as tmp_dir:
            with pytest.raises(RuntimeError):
                data_dir = datasets.fetch_neuroquery_model(tmp_dir)
        assert getter.n_calls == 5
Пример #3
0
from scipy import sparse
import pandas as pd
from joblib import Memory
from nilearn import plotting

from neuroquery import datasets
from neuroquery.img_utils import coordinates_to_maps
from neuroquery.smoothed_regression import SmoothedRegression
from neuroquery.tokenization import TextVectorizer
from neuroquery.encoding import NeuroQueryModel

# Choose where to store the cache and the model once it is trained
output_directory = "trained_text_to_brain_model"
cache_directory = "cache"

data_dir = pathlib.Path(datasets.fetch_neuroquery_model())

corpus_metadata = pd.read_csv(str(data_dir / "corpus_metadata.csv"))
vectorizer = TextVectorizer.from_vocabulary_file(
    str(data_dir / "vocabulary.csv"))

# The TFIDF features stored with NeuroQuery data correspond to the terms in
# `vocabulary.csv` and the studies in `corpus_metadata.csv`;
# see `README.md` in the data directory for details
tfidf = sparse.load_npz(str(data_dir / "corpus_tfidf.npz"))

coordinates = pd.read_csv(datasets.fetch_peak_coordinates())

######################################################################
# Transform the coordinates into brain maps
# -----------------------------------------
Пример #4
0
######################################################################
# Finding terms with similar activations
# ======================================

######################################################################
# Load model and compute maps for all terms
# -----------------------------------------
# We cache the computed maps to save time when running this script again

# You can set the cache directory to a different location
cache_directory = pathlib.Path("cache")
cache_directory.mkdir(parents=True, exist_ok=True)
maps_file = cache_directory / "masked_term_maps.npy"

encoder = NeuroQueryModel.from_data_dir(datasets.fetch_neuroquery_model())
voc = np.asarray(encoder.full_vocabulary())
if maps_file.is_file():
    term_maps = np.load(str(maps_file))
else:
    print("Computing brain maps for all terms in the vocabulary...")
    term_maps = encoder.get_masker().transform(
        encoder.transform(voc[:, None])["brain_map"])
    print("Done")
    np.save(str(maps_file), term_maps)

######################################################################
# Obtain some example brain maps
# ------------------------------
# We load example subject-level tmaps from a localizer dataset, and also
# generate a brain maps from a set of MNI coordinates.
Пример #5
0
import pathlib

import requests
from neuroquery import datasets

datasets.fetch_neuroquery_model()
datasets.fetch_peak_coordinates()
datasets.fetch_neuroquery_model(model_name="ensemble_model_2020-02-12")
maps_url = "https://osf.io/n5avm/download"
data_dir = datasets.get_neuroquery_data_dir()
extra_data = pathlib.Path(data_dir) / "extra"
extra_data.mkdir(exist_ok=True, parents=True)
maps_file = extra_data / "masked_term_maps.npy"
if not maps_file.is_file():
    print("downloading neuroquery maps...")
    resp = requests.get(maps_url)
    with open(str(maps_file), "wb") as f:
        f.write(resp.content)
    print("done")
    del resp
Пример #6
0
from neuroquery import datasets
from neuroquery_image_search import NeuroQueryImageSearch

datasets.fetch_neuroquery_model()
NeuroQueryImageSearch()