예제 #1
0
def test_gaussian_coord_smoothing():
    coords = [(0.0, 0.0, 0.0), (10.0, -10.0, 30.0)]
    computed_img = img_utils.gaussian_coord_smoothing(coords)
    masker = NiftiSpheresMasker(coords + [(-10.0, 10.0, -30)]).fit()
    values = masker.transform(computed_img)[0]
    assert (values[:2] > computed_img.get_data().max() / 2.0).all()
    assert values[-1] == pytest.approx(0.0)
예제 #2
0
def test_coordinates_to_maps():
    coords = pd.DataFrame.from_dict({
        "pmid": [3, 17, 17, 2, 2],
        "x": [0.0, 0.0, 10.0, 5.0, 3.0],
        "y": [0.0, 0.0, -10.0, 15.0, -9.0],
        "z": [27.0, 0.0, 30.0, 17.0, 177.0],
    })
    maps, masker = img_utils.coordinates_to_maps(coords)
    assert maps.shape == (3, 28542)
    coords_17 = [(0.0, 0.0, 0.0), (10.0, -10.0, 30.0)]
    img_17 = img_utils.gaussian_coord_smoothing(coords_17, target_affine=4.0)
    assert np.allclose(masker.transform(img_17),
                       maps.loc[17, :].values,
                       atol=1e-10)
    help=".csv file containing the coordinates. Must have columns"
    " 'pmid', 'x', 'y', and 'z'.",
)
parser.add_argument("output_directory",
                    help="directory where generated maps are saved.")
parser.add_argument("--fwhm",
                    type=float,
                    default=8.0,
                    help="full width at half maximum")
args = parser.parse_args()

out_dir = pathlib.Path(args.output_directory)
out_dir.mkdir(parents=True, exist_ok=True)

coordinates = pd.read_csv(args.coordinates_csv)
articles = coordinates.groupby("pmid")
for i, (pmid, article_coordinates) in enumerate(articles):
    print(
        "{:.1%} pmid: {:< 20}".format(i / len(articles), pmid),
        end="\r",
        flush=True,
    )
    img_file = out_dir / "pmid_{}.nii.gz".format(pmid)
    if not img_file.is_file():
        img = img_utils.gaussian_coord_smoothing(
            article_coordinates.loc[:, ["x", "y", "z"]].values, fwhm=args.fwhm)
        img.to_filename(str(img_file))

print("\n")
print(out_dir)
예제 #4
0
# ------------------------------
# We load example subject-level tmaps from a localizer dataset, and also
# generate a brain maps from a set of MNI coordinates.

queries = {}

contrasts = ["left vs right button press", "sentence listening"]

for contrast in contrasts:
    query_map = fetch_localizer_contrasts([contrast],
                                          n_subjects=1,
                                          get_tmaps=True)["tmaps"][0]
    queries[contrast] = query_map

dmn_coords = [(0, -52, 18), (-46, -68, 32), (46, -68, 32), (1, 50, -5)]
dmn_img = gaussian_coord_smoothing(dmn_coords, encoder.get_masker())
masked_dmn = encoder.get_masker().transform(dmn_img).ravel()
queries["DMN coordinates"] = dmn_img

######################################################################
# Discover which terms have activations similar to the query map
# --------------------------------------------------------------
# Here we simply use the dot product with the absolute values of the input map

for name, query_map in queries.items():
    masked_query = encoder.get_masker().transform(query_map).ravel()
    similarities = np.abs(masked_query).dot(term_maps.T)
    # rescale by document frequencies, optional
    similarities *= np.log(1 + encoder.document_frequencies().values.ravel())
    top_20 = np.argsort(similarities)[::-1][:20]
    top_terms = voc[top_20].ravel()