예제 #1
0
    def test_different_sizes_null(self):
        np.random.seed(314)

        A1 = er_np(100, 0.8)
        A2 = er_np(1000, 0.7)

        ldt_corrected_1 = latent_distribution_test(
            A1,
            A2,
            test="hsic",
            metric="gaussian",
            n_components=2,
            n_bootstraps=100,
            size_correction=True,
        )
        ldt_corrected_2 = latent_distribution_test(
            A2,
            A1,
            test="hsic",
            metric="gaussian",
            n_components=2,
            n_bootstraps=100,
            size_correction=True,
        )

        self.assertTrue(ldt_corrected_1[1] <= 0.05)
        self.assertTrue(ldt_corrected_2[1] <= 0.05)
예제 #2
0
 def test_ase_works(self):
     np.random.seed(888)
     A1 = er_np(20, 0.3)
     A2 = er_np(20, 0.3)
     tests = {"dcorr": "euclidean", "hsic": "gaussian", "mgc": "euclidean"}
     for test in tests.keys():
         ldt = latent_distribution_test(A1, A2, test, tests[test], n_bootstraps=10)
예제 #3
0
    def test_n_bootstraps(self):
        np.random.seed(1234556)
        A1 = er_np(20, 0.3)
        A2 = er_np(20, 0.3)

        lpt = latent_position_test(A1, A2, n_bootstraps=234, n_components=None)
        assert lpt[2]["null_distribution_1"].shape[0] == 234
예제 #4
0
    def test_different_sizes_null(self):
        np.random.seed(314)

        A1 = er_np(100, 0.8)
        A2 = er_np(1000, 0.8)

        ldt_not_corrected = LatentDistributionTest("hsic",
                                                   "gaussian",
                                                   n_components=2,
                                                   n_bootstraps=100,
                                                   size_correction=False)
        ldt_corrected_1 = LatentDistributionTest("hsic",
                                                 "gaussian",
                                                 n_components=2,
                                                 n_bootstraps=100,
                                                 size_correction=True)
        ldt_corrected_2 = LatentDistributionTest("hsic",
                                                 "gaussian",
                                                 n_components=2,
                                                 n_bootstraps=100,
                                                 size_correction=True)

        p_not_corrected = ldt_not_corrected.fit_predict(A1, A2)
        p_corrected_1 = ldt_corrected_1.fit_predict(A1, A2)
        p_corrected_2 = ldt_corrected_2.fit_predict(A2, A1)

        self.assertTrue(p_not_corrected <= 0.05)
        self.assertTrue(p_corrected_1 > 0.05)
        self.assertTrue(p_corrected_2 > 0.05)
예제 #5
0
 def test_distances_and_kernels(self):
     np.random.seed(123)
     A1 = er_np(20, 0.3)
     A2 = er_np(100, 0.3)
     # some valid combinations of test and metric
     # # would love to do this, but currently FutureWarning breaks this
     # with pytest.warns(None) as record:
     #     for test in self.tests.keys():
     #         ldt = LatentDistributionTest(test, self.tests[test])
     #         ldt.fit(A1, A2)
     #     ldt = LatentDistributionTest("hsic", "rbf")
     #     ldt.fit(A1, A2)
     # assert len(record) == 0
     for test in self.tests.keys():
         ldt = LatentDistributionTest(test, self.tests[test])
         ldt.fit(A1, A2)
     ldt = LatentDistributionTest("hsic", "rbf")
     ldt.fit(A1, A2)
     # some invalid combinations of test and metric
     with pytest.warns(UserWarning):
         ldt = LatentDistributionTest("hsic", "euclidean")
     with pytest.warns(UserWarning):
         ldt = LatentDistributionTest("dcorr", "gaussian")
     with pytest.warns(UserWarning):
         ldt = LatentDistributionTest("dcorr", "rbf")
예제 #6
0
    def test_pooled(self):
        np.random.seed(123)
        A1 = er_np(20, 0.3)
        A2 = er_np(100, 0.3)

        ldt = LatentDistributionTest(pooled=True)
        ldt.fit(A1, A2)
예제 #7
0
    def test_different_aligners(self):
        np.random.seed(314)
        A1 = er_np(100, 0.8)
        A2 = er_np(100, 0.8)
        ase_1 = AdjacencySpectralEmbed(n_components=2)
        X1 = ase_1.fit_transform(A1)
        ase_2 = AdjacencySpectralEmbed(n_components=2)
        X2 = ase_2.fit_transform(A2)
        X2 = -X2

        ldt_1 = latent_distribution_test(X1, X2, input_graph=False, align_type=None)
        self.assertTrue(ldt_1[1] < 0.05)

        ldt_2 = latent_distribution_test(
            X1, X2, input_graph=False, align_type="sign_flips"
        )
        self.assertTrue(ldt_2[1] >= 0.05)

        # also checking that kws are passed through
        ldt_3 = latent_distribution_test(
            X1,
            X2,
            input_graph=False,
            align_type="seedless_procrustes",
            align_kws={"init": "sign_flips"},
        )
        self.assertTrue(ldt_3[1] >= 0.05)
예제 #8
0
 def test_workers(self):
     np.random.seed(888)
     A1 = er_np(20, 0.3)
     A2 = er_np(20, 0.3)
     ldt = latent_distribution_test(
         A1, A2, "dcorr", "euclidean", n_bootstraps=4, workers=4
     )
예제 #9
0
    def test_bad_matrix_inputs(self):
        np.random.seed(1234556)
        A1 = er_np(20, 0.3)
        A2 = er_np(20, 0.3)

        bad_matrix = [[1, 2]]
        with self.assertRaises(TypeError):
            latent_distribution_test(bad_matrix, A2, test="dcorr")
예제 #10
0
    def test_callable_metric(self):
        np.random.seed(888)
        A1 = er_np(20, 0.3)
        A2 = er_np(20, 0.3)

        def metric_func(X, Y=None, workers=None):
            return pairwise_distances(X, metric="euclidean") * 0.5

        ldt = latent_distribution_test(A1, A2, "dcorr", metric_func, n_bootstraps=10)
예제 #11
0
 def setUpClass(cls):
     np.random.seed(123456)
     cls.tests = {
         "dcorr": "euclidean",
         "hsic": "gaussian",
         "mgc": "euclidean"
     }
     cls.A1 = er_np(20, 0.3)
     cls.A2 = er_np(20, 0.3)
예제 #12
0
    def test_directed_inputs(self):
        np.random.seed(2)
        A = er_np(100, 0.3, directed=True)
        B = er_np(100, 0.3, directed=True)
        C = er_np(100, 0.3, directed=False)

        # two directed graphs is okay
        latent_distribution_test(A, B)

        # an undirected and a direced graph is not okay
        with self.assertRaises(ValueError):
            latent_distribution_test(A, C)
        with self.assertRaises(ValueError):
            latent_distribution_test(C, B)
예제 #13
0
    def test_bad_matrix_inputs(self):
        np.random.seed(1234556)
        A1 = er_np(20, 0.3)
        A2 = er_np(20, 0.3)
        A1[2, 0] = 1  # make asymmetric
        with pytest.raises(
                NotImplementedError):  # TODO : remove when we implement
            latent_position_test(A1, A2)

        bad_matrix = [[1, 2]]
        with pytest.raises(TypeError):
            latent_position_test(bad_matrix, A2)

        with pytest.raises(ValueError):
            latent_position_test(A1[:2, :2], A2)
예제 #14
0
 def test_passing_networkx(self):
     np.random.seed(123)
     A1 = er_np(20, 0.8)
     A2 = er_np(20, 0.8)
     A1_nx = nx.from_numpy_matrix(A1)
     A2_nx = nx.from_numpy_matrix(A2)
     # check passing nx, when exepect embeddings
     with self.assertRaises(TypeError):
         latent_distribution_test(A1_nx, A1, input_graph=False)
     with self.assertRaises(TypeError):
         latent_distribution_test(A1, A2_nx, input_graph=False)
     with self.assertRaises(TypeError):
         latent_distribution_test(A1_nx, A2_nx, input_graph=False)
     # check that the appropriate input works
     latent_distribution_test(A1_nx, A2_nx, input_graph=True)
예제 #15
0
파일: test_models.py 프로젝트: zeou1/graspy
 def setup_class(cls):
     np.random.seed(8888)
     cls.graph = er_np(1000, 0.5)
     cls.p = 0.5
     cls.p_mat = np.full((1000, 1000), 0.5)
     cls.estimator = EREstimator(directed=True, loops=False)
     cls.estimator.fit(cls.graph)
     cls.p_hat = cls.estimator.p_
예제 #16
0
 def test_passing_embeddings(self):
     np.random.seed(123)
     A1 = er_np(20, 0.8)
     A2 = er_np(20, 0.8)
     ase_1 = AdjacencySpectralEmbed(n_components=2)
     X1 = ase_1.fit_transform(A1)
     ase_2 = AdjacencySpectralEmbed(n_components=2)
     X2 = ase_2.fit_transform(A2)
     ase_3 = AdjacencySpectralEmbed(n_components=1)
     X3 = ase_3.fit_transform(A2)
     # check embeddings having weird ndim
     with self.assertRaises(ValueError):
         ldt = LatentDistributionTest(input_graph=False)
         ldt.fit_predict(X1, X2.reshape(-1, 1, 1))
     with self.assertRaises(ValueError):
         ldt = LatentDistributionTest(input_graph=False)
         ldt.fit_predict(X1.reshape(-1, 1, 1), X2)
     # check embeddings having mismatching number of components
     with self.assertRaises(ValueError):
         ldt = LatentDistributionTest(input_graph=False)
         ldt.fit_predict(X1, X3)
     with self.assertRaises(ValueError):
         ldt = LatentDistributionTest(input_graph=False)
         ldt.fit_predict(X3, X1)
     # check passing weird stuff as input (caught by us)
     with self.assertRaises(TypeError):
         ldt = LatentDistributionTest(input_graph=False)
         ldt.fit_predict("hello there", X1)
     with self.assertRaises(TypeError):
         ldt = LatentDistributionTest(input_graph=False)
         ldt.fit_predict(X1, "hello there")
     with self.assertRaises(TypeError):
         ldt = LatentDistributionTest(input_graph=False)
         ldt.fit_predict({"hello": "there"}, X1)
     with self.assertRaises(TypeError):
         ldt = LatentDistributionTest(input_graph=False)
         ldt.fit_predict(X1, {"hello": "there"})
     # check passing infinite in input (caught by check_array)
     with self.assertRaises(ValueError):
         X1_w_inf = X1.copy()
         X1_w_inf[1, 1] = np.inf
         ldt = LatentDistributionTest(input_graph=False)
         ldt.fit_predict(X1_w_inf, X2)
     # check that the appropriate input works
     ldt = LatentDistributionTest(input_graph=False)
     ldt.fit_predict(X1, X2)
예제 #17
0
    def test_bad_kwargs(self):
        np.random.seed(888)
        A1 = er_np(20, 0.3)
        A2 = er_np(20, 0.3)

        # check test argument
        with self.assertRaises(TypeError):
            latent_distribution_test(A1, A2, test=0)
        with self.assertRaises(ValueError):
            latent_distribution_test(A1, A2, test="foo")
        # check metric argument
        with self.assertRaises(TypeError):
            latent_distribution_test(A1, A2, metric=0)
        with self.assertRaises(ValueError):
            latent_distribution_test(A1, A2, metric="some_kind_of_kernel")
        # check n_components argument
        with self.assertRaises(TypeError):
            latent_distribution_test(A1, A2, n_components=0.5)
        with self.assertRaises(ValueError):
            latent_distribution_test(A1, A2, n_components=-100)
        # check n_bootstraps argument
        with self.assertRaises(TypeError):
            latent_distribution_test(A1, A2, n_bootstraps=0.5)
        with self.assertRaises(ValueError):
            latent_distribution_test(A1, A2, n_bootstraps=-100)
        # check workers argument
        with self.assertRaises(TypeError):
            latent_distribution_test(A1, A2, workers=0.5)
            latent_distribution_test(A1, A2, workers="oops")
        # check size_correction argument
        with self.assertRaises(TypeError):
            latent_distribution_test(A1, A2, size_correction=0)
        # check pooled argument
        with self.assertRaises(TypeError):
            latent_distribution_test(A1, A2, pooled=0)
        # check align_type argument
        with self.assertRaises(ValueError):
            latent_distribution_test(A1, A2, align_type="foo")
        with self.assertRaises(TypeError):
            latent_distribution_test(A1, A2, align_type={"not a": "string"})
        # check align_kws argument
        with self.assertRaises(TypeError):
            latent_distribution_test(A1, A2, align_kws="foo")
        # check input_graph argument
        with self.assertRaises(TypeError):
            latent_distribution_test(A1, A2, input_graph="hello")
예제 #18
0
파일: test_models.py 프로젝트: zeou1/graspy
    def test_ER_score(self):
        p_mat = self.p_mat
        graph = self.graph
        estimator = EREstimator(directed=False)
        _test_score(estimator, p_mat, graph)

        with pytest.raises(ValueError):
            estimator.score_samples(graph=er_np(500, 0.5))
예제 #19
0
    def test_padding(self):
        n = 50
        p = 0.4

        G1 = er_np(n=n, p=p)
        G2 = G1[:-2, :-2]  # remove two nodes
        gmp_adopted = GMP(padding="adopted")
        res = gmp_adopted.fit(G1, G2)

        self.assertTrue(0.95 <= (sum(res.perm_inds_ == np.arange(n)) / n))
예제 #20
0
    def test_padding(self):
        n = 50
        p = 0.4

        np.random.seed(1)
        G1 = er_np(n=n, p=p)
        G2 = G1[:(n - 1), :(n - 1)]  # remove two nodes
        gmp_adopted = GMP(padding="adopted")
        res = gmp_adopted.fit(G1, G2)

        assert 1.0 == (sum(res.perm_inds_ == np.arange(n)) / n)
예제 #21
0
    def test_bad_kwargs(self):
        np.random.seed(1234556)
        A1 = er_np(20, 0.3)
        A2 = er_np(20, 0.3)

        with pytest.raises(ValueError):
            latent_position_test(A1, A2, n_components=-100)
        with pytest.raises(ValueError):
            latent_position_test(A1, A2, test_case="oops")
        with pytest.raises(ValueError):
            latent_position_test(A1, A2, n_bootstraps=-100)
        with pytest.raises(ValueError):
            latent_position_test(A1, A2, embedding="oops")
        with pytest.raises(TypeError):
            latent_position_test(A1, A2, n_bootstraps=0.5)
        with pytest.raises(TypeError):
            latent_position_test(A1, A2, n_components=0.5)
        with pytest.raises(TypeError):
            latent_position_test(A1, A2, embedding=6)
        with pytest.raises(TypeError):
            latent_position_test(A1, A2, test_case=6)
예제 #22
0
파일: test_models.py 프로젝트: zeou1/graspy
    def test_ER_sample(self):
        with pytest.raises(ValueError):
            self.estimator.sample(n_samples=-1)

        with pytest.raises(TypeError):
            self.estimator.sample(n_samples="nope")
        g = er_np(100, 0.5)
        estimator = EREstimator(directed=True, loops=False)
        estimator.fit(g)
        p_mat = np.full((100, 100), 0.5)
        p_mat -= np.diag(np.diag(p_mat))
        _test_sample(estimator, p_mat)
예제 #23
0
파일: test_models.py 프로젝트: zeou1/graspy
    def test_DCER_inputs(self):
        with pytest.raises(TypeError):
            DCEREstimator(directed="hey")

        with pytest.raises(TypeError):
            DCEREstimator(loops=6)

        graph = er_np(100, 0.5)
        dcere = DCEREstimator()

        with pytest.raises(ValueError):
            dcere.fit(graph[:, :99])

        with pytest.raises(ValueError):
            dcere.fit(graph[..., np.newaxis])
예제 #24
0
파일: test_models.py 프로젝트: zeou1/graspy
    def test_DCSBM_inputs(self):
        with pytest.raises(TypeError):
            DCSBMEstimator(directed="hey")

        with pytest.raises(TypeError):
            DCSBMEstimator(loops=6)

        with pytest.raises(TypeError):
            DCSBMEstimator(n_components="XD")

        with pytest.raises(ValueError):
            DCSBMEstimator(n_components=-1)

        with pytest.raises(TypeError):
            DCSBMEstimator(min_comm="1")

        with pytest.raises(ValueError):
            DCSBMEstimator(min_comm=-1)

        with pytest.raises(TypeError):
            DCSBMEstimator(max_comm="ay")

        with pytest.raises(ValueError):
            DCSBMEstimator(max_comm=-1)

        with pytest.raises(ValueError):
            DCSBMEstimator(min_comm=4, max_comm=2)

        graph = er_np(100, 0.5)
        bad_y = np.zeros(99)
        dcsbe = DCSBMEstimator()
        with pytest.raises(ValueError):
            dcsbe.fit(graph, y=bad_y)

        with pytest.raises(ValueError):
            dcsbe.fit(graph[:, :99])

        with pytest.raises(ValueError):
            dcsbe.fit(graph[..., np.newaxis])

        with pytest.raises(TypeError):
            DCSBMEstimator(cluster_kws=1)

        with pytest.raises(TypeError):
            DCSBMEstimator(embed_kws=1)
예제 #25
0
    def test_vn_algorithm(self):
        g1 = er_np(n=50, p=0.6)
        node_shuffle = np.random.permutation(50)

        g2 = g1[np.ix_(node_shuffle, node_shuffle)]

        kklst = [(xx, yy)
                 for xx, yy in zip(node_shuffle, np.arange(len(node_shuffle)))]
        kklst.sort(key=lambda x: x[0])
        kklst = np.array(kklst)

        voi = 7
        nseeds = 6

        vnsgm = VNviaSGM()
        nomlst = vnsgm.fit_predict(g1, g2, voi,
                                   [kklst[0:nseeds, 0], kklst[0:nseeds, 1]])

        assert nomlst[0][0] == kklst[np.where(kklst[:, 0] == voi)[0][0], 1]
예제 #26
0
    def test_n_bootstraps(self):
        A1 = er_np(20, 0.3)
        A2 = er_np(20, 0.3)

        ldt = latent_distribution_test(A1, A2, n_bootstraps=123)
        assert ldt[2]["null_distribution"].shape[0] == 123
예제 #27
0
 def test_pooled(self):
     np.random.seed(123)
     A1 = er_np(20, 0.3)
     A2 = er_np(100, 0.3)
     latent_distribution_test(A1, A2, pooled=True)
 def setUpClass(cls):
     np.random.seed(1234556)
     cls.A1 = er_np(20, 0.3)
     cls.A2 = er_np(20, 0.3)
예제 #29
0
 def test_ase_works(self):
     np.random.seed(1234556)
     A1 = er_np(20, 0.3)
     A2 = er_np(20, 0.3)
     lpt = latent_position_test(A1, A2)
예제 #30
0
 def test_omni_works(self):
     np.random.seed(1234556)
     A1 = er_np(20, 0.3)
     A2 = er_np(20, 0.3)
     lpt = latent_position_test(A1, A2, embedding="omnibus")