def test_build_parcellations(random_state=0):
    """Test parcellations building.
    """
    # check random state
    rng = check_random_state(random_state)

    # Generate toy data
    # define data structure
    shape = (5, 5, 5)
    mask = np.ones(shape, dtype=bool)
    # data generation
    data1 = np.ones(shape)
    data1[1:3, 1:3, 1:3] = 2.
    data2 = np.ones(shape)
    data2[3:, 3:, 3:] = 4.
    data = np.ones((4, np.prod(shape)))  # 4 ravelized images
    data[0] = np.ravel(data1)
    data[1] = np.ravel(data2)

    # Test _build_parcellations function
    parcelled_data, labels = _build_parcellations(
        data,
        mask,
        n_parcellations=2,
        n_parcels=3,
        # make sure we use observations 1 and 2 at least once
        n_bootstrap_samples=8,
        random_state=rng)
    # check parcelled_data
    assert_equal(parcelled_data.shape, (4, 3 * 2))
    assert_array_equal(
        np.sort(np.unique(parcelled_data[0])),  # order is hard to predict
        [1, 2])
    assert_array_equal(
        np.sort(np.unique(parcelled_data[1])),  # order is hard to predict
        [1, 4])
    assert_array_equal(parcelled_data[2], [1, 1, 1, 1, 1, 1])
    assert_array_equal(parcelled_data[3], [1, 1, 1, 1, 1, 1])
    # check labels
    assert_equal(len(labels.shape), 1)
    assert_array_equal(np.unique(labels), np.arange(2 * 3))
示例#2
0
def test_build_parcellations(random_state=0):
    """Test parcellations building.
    """
    # check random state
    rng = check_random_state(random_state)

    # Generate toy data
    # define data structure
    shape = (5, 5, 5)
    mask = np.ones(shape, dtype=bool)
    # data generation
    data1 = np.ones(shape)
    data1[1:3, 1:3, 1:3] = 2.
    data2 = np.ones(shape)
    data2[3:, 3:, 3:] = 4.
    data = np.ones((4, np.prod(shape)))  # 4 ravelized images
    data[0] = np.ravel(data1)
    data[1] = np.ravel(data2)

    # Test _build_parcellations function
    parcelled_data, labels = _build_parcellations(
        data, mask, n_parcellations=2, n_parcels=3,
        # make sure we use observations 1 and 2 at least once
        n_bootstrap_samples=8, random_state=rng)
    # check parcelled_data
    assert_equal(parcelled_data.shape, (4, 3 * 2))
    assert_array_equal(
        np.sort(np.unique(parcelled_data[0])),  # order is hard to predict
        [1, 2])
    assert_array_equal(
        np.sort(np.unique(parcelled_data[1])),  # order is hard to predict
        [1, 4])
    assert_array_equal(parcelled_data[2], [1, 1, 1, 1, 1, 1])
    assert_array_equal(parcelled_data[3], [1, 1, 1, 1, 1, 1])
    # check labels
    assert_equal(len(labels.shape), 1)
    assert_array_equal(np.unique(labels), np.arange(2 * 3))
示例#3
0
def test_rpbi_core_withcovars(random_state=0):
    """Test Randomized Parcellation Based Inference core function with covars.
    """
    # check random state
    rng = check_random_state(random_state)

    # Generate toy data
    # define data structure
    shape = (5, 5, 5)
    n_voxels = np.prod(shape)
    mask = np.ones(shape, dtype=bool)
    # data generation
    data = np.zeros(shape)
    data[1:3, 1:3, 1:3] = 2.
    data = data.reshape((1, -1))
    data = np.repeat(data, 8, 0)
    # add noise to avoid constant columns
    data += 0.1 * rng.randn(data.shape[0], data.shape[1])

    # Parcellate data and extract signal averages
    n_parcellations = 2
    n_parcels = 3
    parcelled_data, labels = _build_parcellations(
        data, mask, n_parcellations=n_parcellations, n_parcels=n_parcels,
        # make sure we use observations 1 and 2 at least once
        n_bootstrap_samples=6, random_state=rng)

    # Covariates (dummy)
    covars = 0.1 * rng.randn(8).reshape((-1, 1))

    # RPBI from already parcelled data
    rng = check_random_state(random_state)
    pvalues, counting_statistic_original_data, h0 = rpbi_core(
        np.ones((8, 1)), parcelled_data, n_parcellations, labels,
        n_parcels, confounding_vars=covars, model_intercept=False,
        threshold=0.05 / n_parcels, n_perm=9, random_state=rng)
    # check pvalues
    expected_pvalues = np.zeros(shape)
    expected_pvalues[1:3, 1:3, 1:3] = 1.
    expected_pvalues = np.ravel(expected_pvalues)
    assert_equal(pvalues.shape, (n_voxels,))
    assert_array_equal(pvalues, expected_pvalues)
    # check counting statistic
    assert_equal(counting_statistic_original_data.shape, (n_voxels,))
    assert_array_equal(counting_statistic_original_data, 2 * expected_pvalues)
    # h0
    assert_equal(h0.shape, (9,))
    assert_array_equal(h0, np.zeros(9))

    # Same thing with model_intercept=True
    rng = check_random_state(random_state)
    pvalues, counting_statistic_original_data, h0 = rpbi_core(
        np.ones((8, 1)), parcelled_data, n_parcellations, labels,
        n_parcels, confounding_vars=covars, model_intercept=True,
        threshold=0.05 / n_parcels, n_perm=9, random_state=rng)
    # check pvalues
    expected_pvalues = np.zeros(shape)
    expected_pvalues[1:3, 1:3, 1:3] = 1.
    expected_pvalues = np.ravel(expected_pvalues)
    assert_equal(pvalues.shape, (n_voxels,))
    assert_array_almost_equal(pvalues, expected_pvalues)
    # check counting statistic
    assert_equal(counting_statistic_original_data.shape, (n_voxels,))
    assert_array_almost_equal(counting_statistic_original_data,
                              2 * expected_pvalues)
    # h0
    assert_equal(h0.shape, (9,))
    assert_array_almost_equal(h0, np.zeros(9))

    # Replace intercept test with a more complex test
    rng = check_random_state(random_state)
    tested_var = np.ones(8)
    tested_var[0:4] = 0
    parcelled_data[0:4] *= -1
    pvalues, counting_statistic_original_data, h0 = rpbi_core(
        tested_var, parcelled_data, n_parcellations, labels,
        n_parcels, confounding_vars=covars, model_intercept=False,
        threshold=0.1 / n_parcels, n_perm=9, random_state=rng)
    # check pvalues
    expected_pvalues = np.zeros(shape)
    expected_pvalues[1:3, 1:3, 1:3] = 1.
    expected_pvalues = np.ravel(expected_pvalues)
    assert_equal(pvalues.shape, (n_voxels,))
    assert_array_almost_equal(pvalues, expected_pvalues)
    # check counting statistic
    assert_equal(counting_statistic_original_data.shape, (n_voxels,))
    assert_array_almost_equal(counting_statistic_original_data,
                              2 * expected_pvalues)
    # h0
    assert_equal(h0.shape, (9,))
    assert_array_almost_equal(h0, np.zeros(9))

    # Same thing with intercept modelling
    rng = check_random_state(random_state)
    pvalues, counting_statistic_original_data, h0 = rpbi_core(
        tested_var, parcelled_data, n_parcellations, labels,
        n_parcels, confounding_vars=covars, model_intercept=True,
        threshold=0.1 / n_parcels, n_perm=9, random_state=rng)
    # check pvalues
    expected_pvalues = np.zeros(shape)
    expected_pvalues[1:3, 1:3, 1:3] = 1.
    expected_pvalues = np.ravel(expected_pvalues)
    assert_equal(pvalues.shape, (n_voxels,))
    assert_array_almost_equal(pvalues, expected_pvalues)
    # check counting statistic
    assert_equal(counting_statistic_original_data.shape, (n_voxels,))
    assert_array_almost_equal(counting_statistic_original_data,
                              2 * expected_pvalues)
    # h0
    assert_equal(h0.shape, (9,))
    assert_array_almost_equal(h0, np.zeros(9))
示例#4
0
def test_compute_counting_statistic_from_parcel_level_scores(random_state=1):
    """Test the computation of RPBI's counting statistic.
    """
    # check random state
    rng = check_random_state(random_state)

    # Generate toy data
    # define data structure
    shape = (5, 5, 5)
    n_voxels = np.prod(shape)
    mask = np.ones(shape, dtype=bool)
    # data generation
    data1 = np.ones(shape)
    data1[1:3, 1:3, 1:3] = 2.
    data2 = np.ones(shape)
    data2[3:, 3:, 3:] = 4.
    data = np.ones((4, np.prod(shape)))  # 4 ravelized images
    data[0] = np.ravel(data1)
    data[1] = np.ravel(data2)

    # Parcellate data and extract signal averages
    n_parcellations = 2
    n_parcels = 3
    parcelled_data, labels = _build_parcellations(
        data, mask, n_parcellations=n_parcellations, n_parcels=n_parcels,
        # make sure we use observations 1 and 2 at least once
        n_bootstrap_samples=6, random_state=rng)
    parcel_level_results = GrowableSparseArray(n_rows=2)
    data_tmp = parcelled_data[0]
    data_tmp[data_tmp < 2] = 0
    parcel_level_results.append(0, data_tmp)
    data_tmp = parcelled_data[1]
    data_tmp[data_tmp < 2] = 0
    parcel_level_results.append(1, data_tmp)
    parcellation_masks = np.zeros((n_parcellations * n_parcels, n_voxels))
    for j in np.arange(n_parcellations):  # loop on parcellations
        label_slice = slice(j * n_voxels, (j + 1) * n_voxels)
        for l in np.unique(labels[label_slice]):
            parcellation_masks[l] = labels[label_slice] == l
    parcellation_masks = sparse.coo_matrix(
        parcellation_masks.astype(np.float32)).tocsr()

    # Transform back data
    # (transformed data should be similar to the original data (up to
    # thresholding and sum across parcellations) since by construction
    # the signal is homogeneous within each parcel for each subject)
    thresholded_data = data.copy()
    thresholded_data[thresholded_data < 2] = 0.
    thresholded_data *= 2.
    res = _compute_counting_statistic_from_parcel_level_scores(
        parcel_level_results.get_data(), slice(0, 2), parcellation_masks,
        n_parcellations, n_parcellations * n_parcels)
    counting_stats_original_data, h0 = res
    assert_array_equal(counting_stats_original_data,
                       thresholded_data[0])
    assert_array_equal(h0, [8])

    # Same thing but only for the permuted data
    res = _compute_counting_statistic_from_parcel_level_scores(
        parcel_level_results.get_data()[2:], slice(1, 2),
        parcellation_masks, n_parcellations, n_parcellations * n_parcels)
    counting_stats_original_data, h0 = res
    assert_array_equal(counting_stats_original_data, [])
    assert_array_equal(h0, [8])
def test_rpbi_core_withcovars(random_state=0):
    """Test Randomized Parcellation Based Inference core function with covars.
    """
    # check random state
    rng = check_random_state(random_state)

    # Generate toy data
    # define data structure
    shape = (5, 5, 5)
    n_voxels = np.prod(shape)
    mask = np.ones(shape, dtype=bool)
    # data generation
    data = np.zeros(shape)
    data[1:3, 1:3, 1:3] = 2.
    data = data.reshape((1, -1))
    data = np.repeat(data, 8, 0)
    # add noise to avoid constant columns
    data += 0.1 * rng.randn(data.shape[0], data.shape[1])

    # Parcellate data and extract signal averages
    n_parcellations = 2
    n_parcels = 3
    parcelled_data, labels = _build_parcellations(
        data,
        mask,
        n_parcellations=n_parcellations,
        n_parcels=n_parcels,
        # make sure we use observations 1 and 2 at least once
        n_bootstrap_samples=6,
        random_state=rng)

    # Covariates (dummy)
    covars = 0.1 * rng.randn(8).reshape((-1, 1))

    # RPBI from already parcelled data
    rng = check_random_state(random_state)
    pvalues, counting_statistic_original_data, h0 = rpbi_core(
        np.ones((8, 1)),
        parcelled_data,
        n_parcellations,
        labels,
        n_parcels,
        confounding_vars=covars,
        model_intercept=False,
        threshold=0.05 / n_parcels,
        n_perm=9,
        random_state=rng)
    # check pvalues
    expected_pvalues = np.zeros(shape)
    expected_pvalues[1:3, 1:3, 1:3] = 1.
    expected_pvalues = np.ravel(expected_pvalues)
    assert_equal(pvalues.shape, (n_voxels, ))
    assert_array_equal(pvalues, expected_pvalues)
    # check counting statistic
    assert_equal(counting_statistic_original_data.shape, (n_voxels, ))
    assert_array_equal(counting_statistic_original_data, 2 * expected_pvalues)
    # h0
    assert_equal(h0.shape, (9, ))
    assert_array_equal(h0, np.zeros(9))

    # Same thing with model_intercept=True
    rng = check_random_state(random_state)
    pvalues, counting_statistic_original_data, h0 = rpbi_core(
        np.ones((8, 1)),
        parcelled_data,
        n_parcellations,
        labels,
        n_parcels,
        confounding_vars=covars,
        model_intercept=True,
        threshold=0.05 / n_parcels,
        n_perm=9,
        random_state=rng)
    # check pvalues
    expected_pvalues = np.zeros(shape)
    expected_pvalues[1:3, 1:3, 1:3] = 1.
    expected_pvalues = np.ravel(expected_pvalues)
    assert_equal(pvalues.shape, (n_voxels, ))
    assert_array_almost_equal(pvalues, expected_pvalues)
    # check counting statistic
    assert_equal(counting_statistic_original_data.shape, (n_voxels, ))
    assert_array_almost_equal(counting_statistic_original_data,
                              2 * expected_pvalues)
    # h0
    assert_equal(h0.shape, (9, ))
    assert_array_almost_equal(h0, np.zeros(9))

    # Replace intercept test with a more complex test
    rng = check_random_state(random_state)
    tested_var = np.ones(8)
    tested_var[0:4] = 0
    parcelled_data[0:4] *= -1
    pvalues, counting_statistic_original_data, h0 = rpbi_core(
        tested_var,
        parcelled_data,
        n_parcellations,
        labels,
        n_parcels,
        confounding_vars=covars,
        model_intercept=False,
        threshold=0.1 / n_parcels,
        n_perm=9,
        random_state=rng)
    # check pvalues
    expected_pvalues = np.zeros(shape)
    expected_pvalues[1:3, 1:3, 1:3] = 1.
    expected_pvalues = np.ravel(expected_pvalues)
    assert_equal(pvalues.shape, (n_voxels, ))
    assert_array_almost_equal(pvalues, expected_pvalues)
    # check counting statistic
    assert_equal(counting_statistic_original_data.shape, (n_voxels, ))
    assert_array_almost_equal(counting_statistic_original_data,
                              2 * expected_pvalues)
    # h0
    assert_equal(h0.shape, (9, ))
    assert_array_almost_equal(h0, np.zeros(9))

    # Same thing with intercept modelling
    rng = check_random_state(random_state)
    pvalues, counting_statistic_original_data, h0 = rpbi_core(
        tested_var,
        parcelled_data,
        n_parcellations,
        labels,
        n_parcels,
        confounding_vars=covars,
        model_intercept=True,
        threshold=0.1 / n_parcels,
        n_perm=9,
        random_state=rng)
    # check pvalues
    expected_pvalues = np.zeros(shape)
    expected_pvalues[1:3, 1:3, 1:3] = 1.
    expected_pvalues = np.ravel(expected_pvalues)
    assert_equal(pvalues.shape, (n_voxels, ))
    assert_array_almost_equal(pvalues, expected_pvalues)
    # check counting statistic
    assert_equal(counting_statistic_original_data.shape, (n_voxels, ))
    assert_array_almost_equal(counting_statistic_original_data,
                              2 * expected_pvalues)
    # h0
    assert_equal(h0.shape, (9, ))
    assert_array_almost_equal(h0, np.zeros(9))
def test_compute_counting_statistic_from_parcel_level_scores(random_state=1):
    """Test the computation of RPBI's counting statistic.
    """
    # check random state
    rng = check_random_state(random_state)

    # Generate toy data
    # define data structure
    shape = (5, 5, 5)
    n_voxels = np.prod(shape)
    mask = np.ones(shape, dtype=bool)
    # data generation
    data1 = np.ones(shape)
    data1[1:3, 1:3, 1:3] = 2.
    data2 = np.ones(shape)
    data2[3:, 3:, 3:] = 4.
    data = np.ones((4, np.prod(shape)))  # 4 ravelized images
    data[0] = np.ravel(data1)
    data[1] = np.ravel(data2)

    # Parcellate data and extract signal averages
    n_parcellations = 2
    n_parcels = 3
    parcelled_data, labels = _build_parcellations(
        data,
        mask,
        n_parcellations=n_parcellations,
        n_parcels=n_parcels,
        # make sure we use observations 1 and 2 at least once
        n_bootstrap_samples=6,
        random_state=rng)
    parcel_level_results = GrowableSparseArray(n_rows=2)
    data_tmp = parcelled_data[0]
    data_tmp[data_tmp < 2] = 0
    parcel_level_results.append(0, data_tmp)
    data_tmp = parcelled_data[1]
    data_tmp[data_tmp < 2] = 0
    parcel_level_results.append(1, data_tmp)
    parcellation_masks = np.zeros((n_parcellations * n_parcels, n_voxels))
    for j in np.arange(n_parcellations):  # loop on parcellations
        label_slice = slice(j * n_voxels, (j + 1) * n_voxels)
        for l in np.unique(labels[label_slice]):
            parcellation_masks[l] = labels[label_slice] == l
    parcellation_masks = sparse.coo_matrix(
        parcellation_masks.astype(np.float32)).tocsr()

    # Transform back data
    # (transformed data should be similar to the original data (up to
    # thresholding and sum across parcellations) since by construction
    # the signal is homogeneous within each parcel for each subject)
    thresholded_data = data.copy()
    thresholded_data[thresholded_data < 2] = 0.
    thresholded_data *= 2.
    res = _compute_counting_statistic_from_parcel_level_scores(
        parcel_level_results.get_data(), slice(0, 2), parcellation_masks,
        n_parcellations, n_parcellations * n_parcels)
    counting_stats_original_data, h0 = res
    assert_array_equal(counting_stats_original_data, thresholded_data[0])
    assert_array_equal(h0, [8])

    # Same thing but only for the permuted data
    res = _compute_counting_statistic_from_parcel_level_scores(
        parcel_level_results.get_data()[2:], slice(1, 2), parcellation_masks,
        n_parcellations, n_parcellations * n_parcels)
    counting_stats_original_data, h0 = res
    assert_array_equal(counting_stats_original_data, [])
    assert_array_equal(h0, [8])