Beispiel #1
0
def test_convert_sleuth_to_json_smoke():
    """Smoke test for Sleuth text file conversion."""
    out_file = os.path.abspath("temp.json")
    sleuth_file = os.path.join(get_test_data_path(), "test_sleuth_file.txt")
    sleuth_file2 = os.path.join(get_test_data_path(), "test_sleuth_file2.txt")
    sleuth_file3 = os.path.join(get_test_data_path(), "test_sleuth_file3.txt")
    sleuth_file4 = os.path.join(get_test_data_path(), "test_sleuth_file4.txt")
    sleuth_file5 = os.path.join(get_test_data_path(), "test_sleuth_file5.txt")
    # Use one input file
    io.convert_sleuth_to_json(sleuth_file, out_file)
    dset = nimare.dataset.Dataset(out_file)
    assert os.path.isfile(out_file)
    assert isinstance(dset, nimare.dataset.Dataset)
    assert dset.coordinates.shape[0] == 7
    assert len(dset.ids) == 3
    os.remove(out_file)
    # Use two input files
    io.convert_sleuth_to_json([sleuth_file, sleuth_file2], out_file)
    dset2 = nimare.dataset.Dataset(out_file)
    assert isinstance(dset2, nimare.dataset.Dataset)
    assert dset2.coordinates.shape[0] == 11
    assert len(dset2.ids) == 5
    # Use invalid input (number instead of file)
    with pytest.raises(ValueError):
        io.convert_sleuth_to_json(5, out_file)
    # Use invalid input (one coordinate is a str instead of a number)
    with pytest.raises(ValueError):
        io.convert_sleuth_to_json(sleuth_file3, out_file)
    # Use invalid input (one has x & y, but not z)
    with pytest.raises(ValueError):
        io.convert_sleuth_to_json(sleuth_file4, out_file)
    # Use invalid input (bad space)
    with pytest.raises(ValueError):
        io.convert_sleuth_to_json(sleuth_file5, out_file)
Beispiel #2
0
def testdata_ibma(tmp_path_factory):
    """
    Load data from dataset into global variables.
    """
    tmpdir = tmp_path_factory.mktemp("testdata_ibma")

    # Load dataset
    dset_file = os.path.join(get_test_data_path(), "test_pain_dataset.json")
    dset_dir = os.path.join(get_test_data_path(), "test_pain_dataset")
    mask_file = os.path.join(dset_dir, "mask.nii.gz")
    dset = nimare.dataset.Dataset(dset_file, mask=mask_file)
    dset.update_path(dset_dir)
    # Move image contents of Dataset to temporary directory
    for c in dset.images.columns:
        if c.endswith("__relative"):
            continue
        for f in dset.images[c].values:
            if (f is None) or not os.path.isfile(f):
                continue
            new_f = f.replace(dset_dir.rstrip(os.path.sep),
                              str(tmpdir.absolute()).rstrip(os.path.sep))
            dirname = os.path.dirname(new_f)
            if not os.path.isdir(dirname):
                os.makedirs(dirname)
            copyfile(f, new_f)
    dset.update_path(tmpdir)
    return dset
Beispiel #3
0
def test_convert_neurosynth_to_dataset_smoke():
    """Smoke test for Neurosynth file conversion."""
    coordinates_file = os.path.join(
        get_test_data_path(),
        "data-neurosynth_version-7_coordinates.tsv.gz",
    )
    metadata_file = os.path.join(
        get_test_data_path(),
        "data-neurosynth_version-7_metadata.tsv.gz",
    )
    features = {
        "features":
        os.path.join(
            get_test_data_path(),
            "data-neurosynth_version-7_vocab-terms_source-abstract_type-tfidf_features.npz",
        ),
        "vocabulary":
        os.path.join(get_test_data_path(),
                     "data-neurosynth_version-7_vocab-terms_vocabulary.txt"),
    }
    dset = io.convert_neurosynth_to_dataset(
        coordinates_file,
        metadata_file,
        annotations_files=features,
    )
    assert isinstance(dset, nimare.dataset.Dataset)
    assert "terms_abstract_tfidf__abilities" in dset.annotations.columns
Beispiel #4
0
def test_convert_sleuth_to_dataset_smoke():
    """
    Smoke test for Sleuth text file conversion.
    """
    sleuth_file = os.path.join(get_test_data_path(), "test_sleuth_file.txt")
    sleuth_file2 = os.path.join(get_test_data_path(), "test_sleuth_file2.txt")
    sleuth_file3 = os.path.join(get_test_data_path(), "test_sleuth_file3.txt")
    sleuth_file4 = os.path.join(get_test_data_path(), "test_sleuth_file4.txt")
    sleuth_file5 = os.path.join(get_test_data_path(), "test_sleuth_file5.txt")
    # Use one input file
    dset = io.convert_sleuth_to_dataset(sleuth_file)
    assert isinstance(dset, nimare.dataset.Dataset)
    assert dset.coordinates.shape[0] == 7
    assert len(dset.ids) == 3
    # Use two input files
    dset2 = io.convert_sleuth_to_dataset([sleuth_file, sleuth_file2])
    assert isinstance(dset2, nimare.dataset.Dataset)
    assert dset2.coordinates.shape[0] == 11
    assert len(dset2.ids) == 5
    # Use invalid input
    with pytest.raises(ValueError):
        io.convert_sleuth_to_dataset(5)
    # Use invalid input (one coordinate is a str instead of a number)
    with pytest.raises(ValueError):
        io.convert_sleuth_to_dataset(sleuth_file3)
    # Use invalid input (one has x & y, but not z)
    with pytest.raises(ValueError):
        io.convert_sleuth_to_dataset(sleuth_file4)
    # Use invalid input (bad space)
    with pytest.raises(ValueError):
        io.convert_sleuth_to_dataset(sleuth_file5)
Beispiel #5
0
def test_convert_neurosynth_to_json_smoke():
    """Smoke test for Neurosynth file conversion."""
    out_file = os.path.abspath("temp.json")
    coordinates_file = os.path.join(
        get_test_data_path(),
        "data-neurosynth_version-7_coordinates.tsv.gz",
    )
    metadata_file = os.path.join(
        get_test_data_path(),
        "data-neurosynth_version-7_metadata.tsv.gz",
    )
    features = {
        "features":
        os.path.join(
            get_test_data_path(),
            "data-neurosynth_version-7_vocab-terms_source-abstract_type-tfidf_features.npz",
        ),
        "vocabulary":
        os.path.join(get_test_data_path(),
                     "data-neurosynth_version-7_vocab-terms_vocabulary.txt"),
    }
    io.convert_neurosynth_to_json(
        coordinates_file,
        metadata_file,
        out_file,
        annotations_files=features,
    )
    dset = nimare.dataset.Dataset(out_file)
    assert os.path.isfile(out_file)
    assert isinstance(dset, nimare.dataset.Dataset)
    os.remove(out_file)
Beispiel #6
0
def test_dataset_smoke():
    """Smoke test for nimare.dataset.Dataset initialization and get methods."""
    db_file = op.join(get_test_data_path(), "neurosynth_dset.json")
    dset = dataset.Dataset(db_file)
    dset.update_path(get_test_data_path())
    assert isinstance(dset, nimare.dataset.Dataset)
    methods = [dset.get_images, dset.get_labels, dset.get_metadata, dset.get_texts]
    for method in methods:
        assert isinstance(method(), list)
        assert isinstance(method(ids=dset.ids[:5]), list)
        assert isinstance(method(ids=dset.ids[0]), list)
    assert isinstance(dset.get_images(imtype="beta"), list)
    assert isinstance(dset.get_metadata(field="sample_sizes"), list)
    assert isinstance(dset.get_studies_by_label("cogat_cognitive_control"), list)
    assert isinstance(dset.get_studies_by_coordinate(np.array([[20, 20, 20]])), list)
    mask_data = np.zeros(dset.masker.mask_img.shape, int)
    mask_data[40, 40, 40] = 1
    mask_img = nib.Nifti1Image(mask_data, dset.masker.mask_img.affine)
    assert isinstance(dset.get_studies_by_mask(mask_img), list)

    dset1 = dset.slice(dset.ids[:5])
    dset2 = dset.slice(dset.ids[5:])
    assert isinstance(dset1, dataset.Dataset)
    dset_merged = dset1.merge(dset2)
    assert isinstance(dset_merged, dataset.Dataset)
Beispiel #7
0
def test_convert_neurosynth_to_dataset_smoke():
    """
    Smoke test for Sleuth text file conversion.
    """
    db_file = op.join(get_test_data_path(), 'test_neurosynth_database.txt')
    features_file = op.join(get_test_data_path(),
                            'test_neurosynth_features.txt')
    dset = io.convert_neurosynth_to_dataset(db_file, features_file)
    assert isinstance(dset, nimare.dataset.Dataset)
Beispiel #8
0
def test_scale_workflow_function_smoke(tmp_path_factory):
    """Run smoke test of the SCALE workflow as a function."""
    tmpdir = tmp_path_factory.mktemp("test_scale_workflow_function_smoke")
    sleuth_file = op.join(get_test_data_path(), "test_sleuth_file.txt")
    prefix = "test"
    baseline = op.join(get_test_data_path(), "test_baseline.txt")

    # The same test is run with both workflow function and CLI
    workflows.scale_workflow(
        sleuth_file, baseline=baseline, output_dir=tmpdir, prefix=prefix, n_iters=5, n_cores=1
    )
    assert op.isfile(op.join(tmpdir, f"{prefix}_input_coordinates.txt"))
Beispiel #9
0
def test_dataset_smoke():
    """
    Smoke test for nimare.dataset.Dataset initialization and get methods.
    """
    db_file = op.join(get_test_data_path(), 'neurosynth_dset.json')
    dset = dataset.Dataset(db_file)
    dset.update_path(get_test_data_path())
    assert isinstance(dset, nimare.dataset.Dataset)
    assert isinstance(dset.get_images(imtype='beta'), list)
    assert isinstance(dset.get_labels(), list)
    assert isinstance(dset.get_metadata(field='sample_sizes'), list)
    assert isinstance(dset.get_studies_by_label('cogat_cognitive_control'),
                      list)
    assert isinstance(dset.get_studies_by_coordinate(np.array([[20, 20, 20]])),
                      list)
Beispiel #10
0
def test_convert_neurosynth_to_json_smoke():
    """
    Smoke test for Neurosynth file conversion.
    """
    out_file = os.path.abspath("temp.json")
    db_file = os.path.join(get_test_data_path(),
                           "test_neurosynth_database.txt")
    features_file = os.path.join(get_test_data_path(),
                                 "test_neurosynth_features.txt")
    io.convert_neurosynth_to_json(db_file,
                                  out_file,
                                  annotations_file=features_file)
    dset = nimare.dataset.Dataset(out_file)
    assert os.path.isfile(out_file)
    assert isinstance(dset, nimare.dataset.Dataset)
    os.remove(out_file)
Beispiel #11
0
def testdata_laird():
    """
    Load data from dataset into global variables.
    """
    testdata_laird = nimare.dataset.Dataset.load(
        os.path.join(get_test_data_path(), "neurosynth_laird_studies.pkl.gz"))
    return testdata_laird
Beispiel #12
0
def test_convert_sleuth_to_dataset_smoke():
    """
    Smoke test for Sleuth text file conversion.
    """
    sleuth_file = op.join(get_test_data_path(), 'test_sleuth_file.txt')
    dset = io.convert_sleuth_to_dataset(sleuth_file)
    assert isinstance(dset, nimare.dataset.Dataset)
Beispiel #13
0
def test_database_smoke():
    """
    Smoke test for nimare.dataset.Database initialization.
    """
    db_file = op.join(get_test_data_path(), 'neurosynth_dset.json')
    dbase = dataset.Database(db_file)
    assert isinstance(dbase, nimare.dataset.Database)
Beispiel #14
0
def testdata_cbma_full():
    """Generate more complete coordinate-based dataset for tests.

    Same as above, except returns all coords, not just one per study.
    """
    dset_file = os.path.join(get_test_data_path(), "test_pain_dataset.json")
    dset = nimare.dataset.Dataset(dset_file)
    return dset
Beispiel #15
0
def testdata_cbma():
    dset_file = os.path.join(get_test_data_path(), "nidm_pain_dset.json")
    dset = nimare.dataset.Dataset(dset_file)

    # Only retain one peak in each study in coordinates
    # Otherwise centers of mass will be obscured in kernel tests by overlapping
    # kernels
    dset.coordinates = dset.coordinates.drop_duplicates(subset=["id"])
    return dset
Beispiel #16
0
def test_ale_workflow_function_smoke(tmp_path_factory):
    """Run smoke test for Sleuth ALE workflow."""
    tmpdir = tmp_path_factory.mktemp("test_ale_workflow_function_smoke")
    sleuth_file = op.join(get_test_data_path(), "test_sleuth_file.txt")
    prefix = "test"

    # The same test is run with both workflow function and CLI
    workflows.ale_sleuth_workflow(
        sleuth_file, output_dir=tmpdir, prefix=prefix, n_iters=10, n_cores=1
    )
    assert op.isfile(op.join(tmpdir, f"{prefix}_input_coordinates.txt"))
Beispiel #17
0
def testdata_ibma_resample(tmp_path_factory):
    """Create dataset for image-based resampling tests."""
    tmpdir = tmp_path_factory.mktemp("testdata_ibma_resample")

    # Load dataset
    dset_file = os.path.join(get_test_data_path(), "test_pain_dataset.json")
    dset_dir = os.path.join(get_test_data_path(), "test_pain_dataset")
    mask_file = os.path.join(dset_dir, "mask.nii.gz")
    dset = nimare.dataset.Dataset(dset_file, mask=mask_file)
    dset.update_path(dset_dir)

    # create reproducible random number generator for resampling
    rng = np.random.default_rng(seed=123)
    # Move image contents of Dataset to temporary directory
    for c in dset.images.columns:
        if c.endswith("__relative"):
            continue

        for f in dset.images[c].values:
            if (f is None) or not os.path.isfile(f):
                continue

            new_f = f.replace(dset_dir.rstrip(os.path.sep),
                              str(tmpdir.absolute()).rstrip(os.path.sep))
            dirname = os.path.dirname(new_f)
            if not os.path.isdir(dirname):
                os.makedirs(dirname)

            # create random affine to make images different shapes
            affine = np.eye(3)
            np.fill_diagonal(affine, rng.choice([1, 2, 3]))
            img = resample_img(
                nib.load(f),
                target_affine=affine,
                interpolation="linear",
                clip=True,
            )
            nib.save(img, new_f)
    dset.update_path(tmpdir)
    return dset
Beispiel #18
0
def get_data(download_data):
    """
    Load data from dataset into global variables.
    """
    # Load dataset
    dset_file = op.join(get_test_data_path(), 'nidm_pain_dset.json')
    dset = nimare.dataset.Dataset(dset_file)
    dset.update_path(pytest.dset_dir)

    # Ugly searching until better methods are implemented.
    z_ids = [
        id_ for id_ in dset.ids if dset.get_images(id_, imtype='z') is not None
    ]
    z_files = dset.get_images(z_ids, imtype='z')
    sample_sizes = dset.get_metadata(z_ids, 'sample_sizes')
    sample_sizes = np.array([np.mean(n) for n in sample_sizes])

    # Create reduced dataset for ibma
    pytest.dset_z = dset.slice(z_ids)

    # Now get the actual data for esma
    z_imgs = [nib.load(f) for f in z_files]
    z_data = apply_mask(z_imgs, dset.masker.mask_img)
    pytest.data_z = z_data
    pytest.sample_sizes_z = sample_sizes

    # Ugly searching until better methods are implemented.
    con_ids = [
        id_ for id_ in dset.ids
        if dset.get_images(id_, imtype='con') is not None
    ]
    se_ids = [
        id_ for id_ in dset.ids
        if dset.get_images(id_, imtype='se') is not None
    ]
    conse_ids = sorted(list(set(con_ids).intersection(se_ids)))

    # Create reduced dataset for ibma
    pytest.dset_conse = dset.slice(conse_ids)

    # Now get the actual data for esma
    con_files = dset.get_images(conse_ids, imtype='con')
    se_files = dset.get_images(conse_ids, imtype='se')
    sample_sizes = dset.get_metadata(conse_ids, 'sample_sizes')
    sample_sizes = np.array([np.mean(n) for n in sample_sizes])
    con_imgs = [nib.load(f) for f in con_files]
    se_imgs = [nib.load(f) for f in se_files]
    con_data = apply_mask(con_imgs, dset.masker.mask_img)
    se_data = apply_mask(se_imgs, dset.masker.mask_img)
    pytest.data_con = con_data
    pytest.data_se = se_data
    pytest.sample_sizes_con = sample_sizes
Beispiel #19
0
def test_conperm_workflow_function_smoke(testdata_ibma, tmp_path_factory):
    """Run smoke test of the contrast permutation workflow as a function."""
    tmpdir = tmp_path_factory.mktemp("test_conperm_workflow_function_smoke")
    dset = testdata_ibma
    files = dset.get_images(imtype="beta")
    mask_image = op.join(get_test_data_path(), "test_pain_dataset", "mask.nii.gz")
    prefix = "test"

    # The same test is run with both workflow function and CLI
    workflows.conperm_workflow(
        files, mask_image=mask_image, output_dir=tmpdir, prefix=prefix, n_iters=5
    )
    assert op.isfile(op.join(tmpdir, f"{prefix}_logp.nii.gz"))
Beispiel #20
0
def test_scale_workflow_cli_smoke(tmp_path_factory):
    tmpdir = tmp_path_factory.mktemp("test_scale_workflow_cli_smoke")
    sleuth_file = op.join(get_test_data_path(), "test_sleuth_file.txt")
    prefix = "test"
    baseline = op.join(get_test_data_path(), "test_baseline.txt")

    cli._main([
        "scale",
        "--baseline",
        baseline,
        "--output_dir",
        str(tmpdir),
        "--prefix",
        prefix,
        "--n_iters",
        "5",
        "--n_cores",
        "1",
        sleuth_file,
    ])
    assert op.isfile(op.join(tmpdir,
                             "{}_input_coordinates.txt".format(prefix)))
Beispiel #21
0
def test_scale_workflow_cli_smoke(tmp_path_factory):
    """Run smoke test of the SCALE workflow as a CLI."""
    tmpdir = tmp_path_factory.mktemp("test_scale_workflow_cli_smoke")
    sleuth_file = op.join(get_test_data_path(), "test_sleuth_file.txt")
    prefix = "test"
    baseline = op.join(get_test_data_path(), "test_baseline.txt")

    cli._main(
        [
            "scale",
            "--baseline",
            baseline,
            "--output_dir",
            str(tmpdir),
            "--prefix",
            prefix,
            "--n_iters",
            "5",
            "--n_cores",
            "1",
            sleuth_file,
        ]
    )
    assert op.isfile(op.join(tmpdir, f"{prefix}_input_coordinates.txt"))
Beispiel #22
0
def get_data():
    """
    Load data from dataset into global variables.
    """
    # Load dataset
    dset_file = op.join(get_test_data_path(), 'nidm_pain_dset.json')
    with open(dset_file, 'r') as fo:
        dset_dict = json.load(fo)
    db = nimare.dataset.Database(dset_file)
    dset = db.get_dataset()
    pytest.dset_dict = dset_dict
    pytest.mask_img = dset.mask

    # Regular z maps
    z_files, ns = get_files(pytest.dset_dict, ['z', 'n'])
    z_imgs = [nib.load(f) for f in z_files]
    z_data = apply_mask(z_imgs, pytest.mask_img)

    # T maps to be converted to z
    t_files, t_ns = get_files(pytest.dset_dict, ['t!z', 'n'])
    t_imgs = [nib.load(f) for f in t_files]
    t_data_list = [apply_mask(t_img, pytest.mask_img) for t_img in t_imgs]
    tz_data_list = [
        nimare.utils.t_to_z(t_data, t_ns[i] - 1)
        for i, t_data in enumerate(t_data_list)
    ]
    tz_data = np.vstack(tz_data_list)

    # Combine
    z_data = np.vstack((z_data, tz_data))
    ns = np.concatenate((ns, t_ns))
    sample_sizes = np.array(ns)
    pytest.z_data = z_data
    pytest.sample_sizes_z = sample_sizes

    con_files, se_files, ns = get_files(dset_dict, ['con', 'se', 'n'])
    con_imgs = [nib.load(f) for f in con_files]
    se_imgs = [nib.load(f) for f in se_files]
    con_data = apply_mask(con_imgs, pytest.mask_img)
    se_data = apply_mask(se_imgs, pytest.mask_img)
    sample_sizes = np.array(ns)
    pytest.con_data = con_data
    pytest.se_data = se_data
    pytest.sample_sizes_con = sample_sizes
Beispiel #23
0
def test_ale_workflow_function_smoke_2(tmp_path_factory):
    """
    Smoke test for Sleuth ALE workflow with subtraction analysis
    """
    tmpdir = tmp_path_factory.mktemp("test_ale_workflow_function_smoke_2")
    sleuth_file = op.join(get_test_data_path(), "test_sleuth_file.txt")
    prefix = "test"

    # The same test is run with both workflow function and CLI
    workflows.ale_sleuth_workflow(
        sleuth_file,
        sleuth_file2=sleuth_file,
        output_dir=tmpdir,
        prefix=prefix,
        n_iters=10,
        n_cores=1,
    )
    assert op.isfile(
        op.join(tmpdir, "{}_group2_input_coordinates.txt".format(prefix)))
Beispiel #24
0
def test_jackknife_with_custom_masker_smoke(testdata_ibma):
    """Ensure that Jackknife will work with NiftiLabelsMaskers.

    CBMAs don't work with NiftiLabelsMaskers and VarianceBasedLikelihood takes ~1 minute,
    which is too long for a single test, so I'm just using SampleSizeBasedLikelihood.
    """
    atlas = op.join(get_test_data_path(), "test_pain_dataset", "atlas.nii.gz")
    masker = NiftiLabelsMasker(atlas)

    meta = ibma.SampleSizeBasedLikelihood(mask=masker)
    res = meta.fit(testdata_ibma)

    jackknife = Jackknife(target_image="z", voxel_thresh=0.5)
    cluster_table, labeled_img = jackknife.transform(res)
    assert cluster_table.shape[0] == len(meta.inputs_["id"]) + 1

    # A Jackknife with a target_image that isn't present in the MetaResult raises a ValueError.
    with pytest.raises(ValueError):
        jackknife = Jackknife(target_image="doggy", voxel_thresh=0.5)
        jackknife.transform(res)
Beispiel #25
0
def test_conperm_workflow_cli_smoke(testdata_ibma, tmp_path_factory):
    tmpdir = tmp_path_factory.mktemp("test_conperm_workflow_cli_smoke")
    dset = testdata_ibma
    files = dset.get_images(imtype="beta")
    mask_image = op.join(get_test_data_path(), "test_pain_dataset",
                         "mask.nii.gz")
    prefix = "test"

    cli._main([
        "conperm",
        "--output_dir",
        str(tmpdir),
        "--mask",
        mask_image,
        "--prefix",
        prefix,
        "--n_iters",
        "5",
    ] + files)
    assert op.isfile(op.join(tmpdir, "{}_logp.nii.gz".format(prefix)))
Beispiel #26
0
def test_ale_workflow_cli_smoke(tmp_path_factory):
    """Run smoke test for Sleuth ALE workflow."""
    tmpdir = tmp_path_factory.mktemp("test_ale_workflow_cli_smoke")
    sleuth_file = op.join(get_test_data_path(), "test_sleuth_file.txt")
    prefix = "test"

    cli._main(
        [
            "ale",
            "--output_dir",
            str(tmpdir),
            "--prefix",
            prefix,
            "--n_iters",
            "10",
            "--n_cores",
            "1",
            sleuth_file,
        ]
    )
    assert op.isfile(op.join(tmpdir, f"{prefix}_input_coordinates.txt"))
Beispiel #27
0
def get_files(ddict, types, data_dir=None):
    """
    Returns a list of files associated with a given data type
    from a set of subfolders within a directory. Allows for
    multiple data types and only returns a set of files from folders
    with all of the requested types.
    """
    if data_dir is None:
        data_dir = get_test_data_path()

    all_files = []
    for study in ddict.keys():
        files = []
        cdict = ddict[study]['contrasts']['1']
        for t in types:
            temp = _get_file(cdict, t, data_dir)
            if temp:
                files.append(temp)

        if len(files) == len(types):
            all_files.append(files)
    all_files = list(map(list, zip(*all_files)))
    return all_files
Beispiel #28
0
def test_ale_workflow_cli_smoke_2(tmp_path_factory):
    """
    Smoke test for Sleuth ALE workflow with subtraction analysis
    """
    tmpdir = tmp_path_factory.mktemp("test_ale_workflow_cli_smoke_2")
    sleuth_file = op.join(get_test_data_path(), "test_sleuth_file.txt")
    prefix = "test"
    cli._main([
        "ale",
        "--output_dir",
        str(tmpdir),
        "--prefix",
        prefix,
        "--n_iters",
        "10",
        "--n_cores",
        "1",
        "--file2",
        sleuth_file,
        sleuth_file,
    ])
    assert op.isfile(
        op.join(tmpdir, "{}_group2_input_coordinates.txt".format(prefix)))
Beispiel #29
0
def test_conperm_workflow_cli_smoke(testdata_ibma, tmp_path_factory):
    """Run smoke test of the contrast permutation workflow as a CLI."""
    tmpdir = tmp_path_factory.mktemp("test_conperm_workflow_cli_smoke")
    dset = testdata_ibma
    files = dset.get_images(imtype="beta")
    mask_image = op.join(get_test_data_path(), "test_pain_dataset", "mask.nii.gz")
    prefix = "test"

    cli._main(
        [
            "conperm",
            "--output_dir",
            str(tmpdir),
            "--mask",
            mask_image,
            "--prefix",
            prefix,
            "--n_iters",
            "5",
        ]
        + files
    )
    assert op.isfile(op.join(tmpdir, f"{prefix}_logp.nii.gz"))
Beispiel #30
0
def test_ibma_with_custom_masker(testdata_ibma, caplog, estimator, expectation,
                                 masker_source):
    """Ensure voxel-to-ROI reduction works, but only for Estimators that allow it.

    Notes
    -----
    Currently masker_source is not used, but ultimately we will want to test cases where the
    Dataset uses a NiftiLabelsMasker.
    """
    atlas = op.join(get_test_data_path(), "test_pain_dataset", "atlas.nii.gz")
    masker = NiftiLabelsMasker(atlas)

    dset = testdata_ibma
    meta = estimator(mask=masker)

    if expectation == "error":
        with pytest.raises(ValueError):
            meta.fit(dset)
    elif expectation == "warning":
        with caplog.at_level(logging.WARNING, logger="nimare.meta.ibma"):
            res = meta.fit(dset)
            assert "will likely produce biased results" in caplog.text
        caplog.clear()
    else:
        with caplog.at_level(logging.WARNING, logger="nimare.meta.ibma"):
            res = meta.fit(dset)
            assert "will likely produce biased results" not in caplog.text
        caplog.clear()

    # Only fit the estimator if it doesn't raise a ValueError
    if expectation != "error":
        assert isinstance(res, nimare.results.MetaResult)
        # There are five "labels", but one of them has no good data,
        # so the outputs should be 4 long.
        assert res.maps["z"].shape == (5, )
        assert np.isnan(res.maps["z"][0])
        assert res.get_map("z").shape == (10, 10, 10)