def test_crema_dcm_Dianati_random_dense_20(self):

        network = mg.random_weighted_matrix_generator_dense(n=20,
                                                            sup_ext=10,
                                                            sym=False,
                                                            seed=None)
        network_bin = (network > 0).astype(int)

        g = sample.DirectedGraph(adjacency=network)

        g.solve_tool(
            model="crema",
            method="fixed-point",
            initial_guess="random",
            adjacency="dcm",
            max_steps=1000,
            verbose=False,
        )

        g._solution_error()

        # test result

        self.assertTrue(g.relative_error_strength < 1e-1)
        self.assertTrue(g.relative_error_strength < 1e-2)
    def test_decm(self):
        """test with 3 classes of cardinality 1
        and no zero degrees
        """

        n, s = (4, 25)

        A = mg.random_weighted_matrix_generator_dense(n,
                                                      sup_ext=10,
                                                      sym=False,
                                                      seed=s,
                                                      intweights=True)

        g = sample.DirectedGraph(A)

        g.solve_tool(
            model="decm",
            method="quasinewton",
            initial_guess="uniform",
            max_steps=200,
            verbose=False,
        )

        # g._solution_error()
        # debug
        # print(g.r_dseq_out)
        # print(g.r_dseq_in)
        # print(g.rnz_dseq_out)
        # print(g.rnz_dseq_in)
        # print('\ntest 0: error = {}'.format(g.error))

        # test result
        self.assertTrue(g.error < 1e-1)
Exemple #3
0
    def test_quasinewton_0(self):
        n, seed = (4, 22)
        A = mg.random_weighted_matrix_generator_dense(n,
                                                      sym=False,
                                                      seed=seed,
                                                      sup_ext=100,
                                                      intweights=True)
        # print(A)

        g = sample.DirectedGraph(A)

        g._solve_problem(
            model="decm",
            method="quasinewton",
            max_steps=100,
            verbose=False,
            initial_guess="uniform",
        )

        g._solution_error()
        # debug
        # print('\n test 1, no zeros, n = {}, error = {}'.format(n, g.error))

        # test result
        self.assertTrue(g.error < 1)
    def test_3(self):
        n, seed = (40, 35)
        A = mg.random_weighted_matrix_generator_dense(n,
                                                      sym=False,
                                                      seed=seed,
                                                      sup_ext=100,
                                                      intweights=True)
        A[0, :] = 0

        g = sample.DirectedGraph(A)

        g._solve_problem(
            model="decm_exp",
            method="quasinewton",
            max_steps=20000,
            verbose=False,
            initial_guess="uniform",
            linsearch=True,
        )

        g._solution_error()
        # debug
        print("\n test 3, zeros, dimension n = {}, error = {}".format(
            n, g.error))

        # test result
        self.assertTrue(g.error < 1)
    def test_crema_original_newton_random_dense_20_undir(self):

        network = mg.random_weighted_matrix_generator_dense(n=20,
                                                            sup_ext=10,
                                                            sym=True,
                                                            seed=None)
        network_bin = (network > 0).astype(int)

        g = sample_und.UndirectedGraph(adjacency=network)

        g.solve_tool(
            model="crema",
            method="quasinewton",
            initial_guess="random",
            adjacency=network_bin,
            max_steps=1000,
            verbose=False,
        )

        g._solution_error()

        # test result

        self.assertTrue(g.relative_error_strength < 1e-1)
        self.assertTrue(g.relative_error_strength < 1e-2)
Exemple #6
0
    def test_ECM_Dianati_random_dense_20_undir(self):

        network = mg.random_weighted_matrix_generator_dense(n=20,
                                                            sup_ext=10,
                                                            sym=True,
                                                            seed=None,
                                                            intweights=True)
        network_bin = (network > 0).astype(int)

        g = sample_und.UndirectedGraph(adjacency=network)

        g.solve_tool(
            model="ecm_exp",
            method="newton",
            max_steps=1000,
            verbose=False,
            initial_guess="uniform",
        )

        g._solution_error()

        # test result

        self.assertTrue(g.error < 1e-1)
        self.assertTrue(g.error < 1e-2)
    def test_newton_3(self):
        n, seed = (40, 22)
        A = mg.random_weighted_matrix_generator_dense(n,
                                                      sym=False,
                                                      seed=seed,
                                                      sup_ext=100,
                                                      intweights=True)

        g = sample.DirectedGraph(A)

        g._solve_problem(
            model="decm",
            method="newton",
            max_steps=300,
            verbose=False,
            initial_guess="uniform",
        )

        g._solution_error()

        # print(g.expected_dseq)
        # print(g.dseq_out,g.dseq_in)
        # print('\n test 3, no zeros, dimension n = {} error = {}'.format(n, g.error))
        # print(g.error_dseq)

        # test result
        self.assertTrue(g.error < 1)
Exemple #8
0
    def test_fixedpoint_dcm_2(self):
        # test Matrix 1
        n, seed = (40, 35)
        A = mg.random_weighted_matrix_generator_dense(n,
                                                      sym=False,
                                                      seed=seed,
                                                      sup_ext=100,
                                                      intweights=True)

        g = sample.DirectedGraph(A)

        g._solve_problem(
            model="decm",
            method="fixed-point",
            max_steps=25000,
            verbose=False,
            initial_guess="uniform",
            linsearch=True,
        )

        g._solution_error()
        # debug
        # print("\n test 3, no zeros, dimension n = {}, error = {}".format(n, g.error))

        # test result
        self.assertTrue(g.error < 1)
Exemple #9
0
    def test_newton_4(self):
        # convergence relies heavily on x0
        n, s = (40, 35)
        # n, s = (5, 35)
        A = mg.random_weighted_matrix_generator_dense(n,
                                                      sup_ext=100,
                                                      sym=False,
                                                      seed=s,
                                                      intweights=True)
        A[0, :] = 0

        bA = np.array([[1 if aa != 0 else 0 for aa in a] for a in A])

        k_out = np.sum(bA, axis=1)
        k_in = np.sum(bA, axis=0)
        s_out = np.sum(A, axis=1)
        s_in = np.sum(A, axis=0)

        x0 = 0.1 * np.ones(4 * n)
        # x0 = np.concatenate((-1*np.ones(2*n), np.ones(2*n)))
        args = (k_out, k_in, s_out, s_in)
        x0[np.concatenate(args) == 0] = 1e3

        fun = lambda x: -mof.loglikelihood_prime_decm_exp(x, args)
        fun_jac = lambda x: -mof.loglikelihood_hessian_decm_exp(x, args)
        step_fun = lambda x: -mof.loglikelihood_decm_exp(x, args)
        lin_fun = lambda x: mof.linsearch_fun_DECM_exp(x, (step_fun, ))
        hes_reg = sof.matrix_regulariser_function

        sol = sof.solver(
            x0,
            fun=fun,
            step_fun=step_fun,
            fun_jac=fun_jac,
            linsearch_fun=lin_fun,
            tol=1e-6,
            eps=1e-5,
            max_steps=100,
            method="newton",
            verbose=False,
            regularise=True,
            full_return=False,
            linsearch=True,
            hessian_regulariser=hes_reg,
        )
        sol = np.exp(-sol)

        ek = mof.expected_decm(sol)
        k = np.concatenate((k_out, k_in, s_out, s_in))
        err = np.max(np.abs(ek - k))
        # debug
        # print(ek)
        # print(k)
        # print('\ntest 4: error = {}'.format(err))
        # print('method: {}, matrix {}x{} with zeros'.format('newton', n,n))

        # test result
        self.assertTrue(err < 1e-1)
Exemple #10
0
    def test_quasinewton_1(self):
        n, s = (4, 25)

        A = mg.random_weighted_matrix_generator_dense(n,
                                                      sup_ext=10,
                                                      sym=False,
                                                      seed=s,
                                                      intweights=True)
        A[0, :] = 0

        bA = np.array([[1 if aa != 0 else 0 for aa in a] for a in A])

        k_out = np.sum(bA, axis=1)
        k_in = np.sum(bA, axis=0)
        s_out = np.sum(A, axis=1)
        s_in = np.sum(A, axis=0)

        x0 = 0.9 * np.ones(n * 4)
        args = (k_out, k_in, s_out, s_in)

        fun = lambda x: -mof.loglikelihood_prime_decm_exp(x, args)
        fun_jac = lambda x: -mof.loglikelihood_hessian_diag_decm_exp(x, args)
        step_fun = lambda x: -mof.loglikelihood_decm_exp(x, args)
        lin_fun = lambda x: mof.linsearch_fun_DECM_exp(x, (
            mof.loglikelihood_decm_exp, args))
        hes_reg = sof.matrix_regulariser_function

        sol = sof.solver(
            x0,
            fun=fun,
            step_fun=step_fun,
            fun_jac=fun_jac,
            linsearch_fun=lin_fun,
            tol=1e-6,
            eps=1e-10,
            max_steps=300,
            method="quasinewton",
            verbose=False,
            regularise=True,
            full_return=False,
            linsearch=True,
            hessian_regulariser=hes_reg,
        )
        sol = np.exp(-sol)

        ek = mof.expected_decm(sol)
        k = np.concatenate((k_out, k_in, s_out, s_in))
        err = np.max(np.abs(ek - k))
        # debug
        # print(ek)
        # print(k)
        # print('\ntest 0: error = {}'.format(err))
        # print('method = {}, matrix {}x{}'.format('quasinewton', n, n))

        # test result
        self.assertTrue(err < 1e-1)
Exemple #11
0
    def test_iterative_3(self):

        n, s = (40, 35)
        # n, s = (5, 35)
        A = mg.random_weighted_matrix_generator_dense(n,
                                                      sup_ext=100,
                                                      sym=False,
                                                      seed=s,
                                                      intweights=True)
        A[0, :] = 0

        bA = np.array([[1 if aa != 0 else 0 for aa in a] for a in A])

        k_out = np.sum(bA, axis=1)
        k_in = np.sum(bA, axis=0)
        s_out = np.sum(A, axis=1)
        s_in = np.sum(A, axis=0)

        x0 = 0.1 * np.ones(n * 4)
        args = (k_out, k_in, s_out, s_in)
        x0[np.concatenate(args) == 0] = 1e3

        fun = lambda x: mof.iterative_decm_exp(x, args)
        step_fun = lambda x: -mof.loglikelihood_decm_exp(x, args)
        lin_fun = lambda x: mof.linsearch_fun_DECM_exp(x, (step_fun, ))
        hes_reg = sof.matrix_regulariser_function

        sol = sof.solver(
            x0,
            fun=fun,
            step_fun=step_fun,
            linsearch_fun=lin_fun,
            tol=1e-6,
            eps=1e-10,
            max_steps=7000,
            method="fixed-point",
            verbose=False,
            regularise=True,
            full_return=False,
            linsearch=True,
            hessian_regulariser=hes_reg,
        )

        sol = np.exp(-sol)
        ek = mof.expected_decm(sol)
        k = np.concatenate((k_out, k_in, s_out, s_in))
        err = np.max(np.abs(ek - k))
        # debug
        # print(ek)
        # print(k)
        # print('\ntest 6: error = {}'.format(err))
        # print('method: {}, matrix {}x{} '.format('iterative', n,n))

        # test result
        self.assertTrue(err < 1)
    def test_decm_exp_uniform(self):
        n, seed = (4, 22)
        A = mg.random_weighted_matrix_generator_dense(n,
                                                      sym=False,
                                                      seed=seed,
                                                      sup_ext=100,
                                                      intweights=True)

        g = sample.DirectedGraph(A)
        g.initial_guess = 'uniform'
        g._set_initial_guess('decm_exp')
        tester = np.exp(np.ones(4 * n))
        self.assertTrue(g.x0.all() == tester.all())
    def test_ecm(self):
        n, seed = (4, 22)
        A = mg.random_weighted_matrix_generator_dense(n,
                                                      sym=False,
                                                      seed=seed,
                                                      sup_ext=100,
                                                      intweights=True)

        x0 = np.random.rand(n)
        g = sample_u.UndirectedGraph(A)
        g.initial_guess = x0
        g._set_initial_guess_crema_undirected()
        self.assertTrue(g.x0.all() == x0.all())
    def test_decm(self):
        n, seed = (4, 22)
        A = mg.random_weighted_matrix_generator_dense(n,
                                                      sym=False,
                                                      seed=seed,
                                                      sup_ext=100,
                                                      intweights=True)

        x0 = np.random.rand(4 * n)
        g = sample.DirectedGraph(A)
        g.initial_guess = x0
        g._set_initial_guess('decm')
        g._set_solved_problem_decm(x0)
        self.assertTrue(np.concatenate((g.x, g.y)).all() == x0.all())
    def test_decm_uniform(self):
        n, seed = (4, 22)
        A = mg.random_weighted_matrix_generator_dense(n,
                                                      sym=False,
                                                      seed=seed,
                                                      sup_ext=100,
                                                      intweights=True)

        g = sample.DirectedGraph(A)
        g.initial_guess = 'uniform'
        g._set_initial_guess('decm')
        self.assertTrue(
            np.concatenate((g.x, g.y, g.out_strength,
                            g.in_strength)).all() == np.ones(4 * n).all())
    def test_crema_uniform(self):
        n, seed = (4, 22)
        A = mg.random_weighted_matrix_generator_dense(n,
                                                      sym=False,
                                                      seed=seed,
                                                      sup_ext=100,
                                                      intweights=True)

        g = sample_u.UndirectedGraph(A)
        g.initial_guess = 'strengths_minor'
        g._set_initial_guess('crema')

        x = (g.strength_sequence > 0).astype(float) / (g.strength_sequence + 1)
        self.assertTrue(g.x0.all() == x.all())
Exemple #17
0
    def test_loglikelihood_hessian_diag_dcm_exp_zeros(self):

        # convergence relies heavily on x0
        n, s = (10, 35)
        # n, s = (5, 35)
        A = mg.random_weighted_matrix_generator_dense(n,
                                                      sup_ext=100,
                                                      sym=False,
                                                      seed=s,
                                                      intweights=True)
        A[0, :] = 0
        A[:, 5] = 0

        bA = np.array([[1 if aa != 0 else 0 for aa in a] for a in A])

        k_out = np.sum(bA, axis=1)
        k_in = np.sum(bA, axis=0)
        s_out = np.sum(A, axis=1)
        s_in = np.sum(A, axis=0)

        g = sample.DirectedGraph(A)
        g.initial_guess = "uniform"
        g.regularise = "identity"
        g._initialize_problem("decm", "newton")
        # theta = np.random.rand(6)
        theta = 0.5 * np.ones(n * 4)
        theta[np.concatenate((k_out, k_in, s_out, s_in)) == 0] = 1e4

        x0 = np.exp(-theta)

        f_sample = np.zeros(n * 4)
        for i in range(n * 4):
            f = lambda x: loglikelihood_prime_decm_exp(x, g.args)[i]
            f_sample[i] = approx_fprime(theta, f, epsilon=1e-6)[i]

        f_exp = loglikelihood_hessian_diag_decm_exp(theta, g.args)

        # debug
        # print(a)
        # print(theta, x0)
        # print(g.args)
        # print('approx',f_sample)
        # print('my',f_exp)
        # print('gradient', loglikelihood_prime_decm_exp(theta, g.args))
        # print('diff',f_sample - f_exp)
        # print('max',np.max(np.abs(f_sample - f_exp)))

        # test result
        self.assertTrue(np.allclose(f_sample, f_exp))
    def test_crema_uniform(self):
        n, seed = (4, 22)
        A = mg.random_weighted_matrix_generator_dense(n,
                                                      sym=False,
                                                      seed=seed,
                                                      sup_ext=100,
                                                      intweights=True)

        g = sample.DirectedGraph(A)
        g.initial_guess = 'strengths_minor'
        g._set_initial_guess_crema_directed()
        x = np.concatenate(
            (ntw_f.out_strength(A) / (ntw_f.out_strength(A) + 1),
             ntw_f.in_strength(A) / (ntw_f.in_strength(A) + 1)))
        self.assertTrue(g.x0.all() == x.all())
Exemple #19
0
    def test_ECM_quasinewton_random_dense_20_undir(self):

        network = mg.random_weighted_matrix_generator_dense(n=20,
                                                            sup_ext=10,
                                                            sym=True,
                                                            seed=10,
                                                            intweights=True)

        g = sample_und.UndirectedGraph(adjacency=network)

        g.solve_tool(
            model="ecm",
            method="quasinewton",
            max_steps=1000,
            verbose=False,
            initial_guess="random",
        )

        g._solution_error()

        # test result
        self.assertTrue(g.error < 1e-1)
Exemple #20
0
    def test_0(self):
        N, seed = (10, 42)
        network = mg.random_weighted_matrix_generator_dense(n=N,
                                                            sup_ext=10,
                                                            sym=False,
                                                            seed=seed,
                                                            intweights=False)

        g = sample.DirectedGraph(network)
        network_bin = (network > 0).astype(int)

        g.solve_tool(
            model="crema",
            method="quasinewton",
            initial_guess="random",
            adjacency=network_bin,
            max_steps=1000,
            verbose=False,
        )

        # g._solution_error()
        err = g.error

        # print('\ntest 5: error = {}'.format(g.error))
        n = 100
        output_dir = "sample_crema_decm_det/"
        # random.seed(100)
        g.ensemble_sampler(n=n, output_dir=output_dir, seed=42)

        #

        dk_out = {'{}'.format(i): g.dseq_out[i] for i in range(N)}
        dk_in = {'{}'.format(i): g.dseq_in[i] for i in range(N)}
        ds_out = {'{}'.format(i): g.out_strength[i] for i in range(N)}
        ds_in = {'{}'.format(i): g.in_strength[i] for i in range(N)}

        # read all sampled graphs and check the average degree distribution is close enough
        dk_out_emp = {'{}'.format(i): 0 for i in range(N)}
        dk_in_emp = {'{}'.format(i): 0 for i in range(N)}
        ds_out_emp = {'{}'.format(i): 0 for i in range(N)}
        ds_in_emp = {'{}'.format(i): 0 for i in range(N)}

        for l in range(n):
            f = output_dir + "{}.txt".format(l)
            if not os.stat(f).st_size == 0:
                g_tmp = nx.read_edgelist(f,
                                         data=(("weight", float), ),
                                         create_using=nx.DiGraph())
                dk_out_tmp = dict(g_tmp.out_degree)
                dk_in_tmp = dict(g_tmp.in_degree)
                ds_out_tmp = dict(g_tmp.out_degree(weight='weight'))
                ds_in_tmp = dict(g_tmp.in_degree(weight='weight'))
                for item in dk_out_tmp.keys():
                    dk_out_emp[item] += dk_out_tmp[item]
                    dk_in_emp[item] += dk_in_tmp[item]
                    ds_out_emp[item] += ds_out_tmp[item]
                    ds_in_emp[item] += ds_in_tmp[item]

        for item in dk_out_emp.keys():
            dk_out_emp[item] = dk_out_emp[item] / n
            dk_in_emp[item] = dk_in_emp[item] / n
            ds_out_emp[item] = ds_out_emp[item] / n
            ds_in_emp[item] = ds_in_emp[item] / n

        adk_out_diff = np.array(
            [abs(dk_out[item] - dk_out_emp[item]) for item in dk_out.keys()])
        adk_in_diff = np.array(
            [abs(dk_in[item] - dk_in_emp[item]) for item in dk_in.keys()])
        ads_out_diff = np.array(
            [abs(ds_out[item] - ds_out_emp[item]) for item in ds_out.keys()])
        ads_in_diff = np.array(
            [abs(ds_in[item] - ds_in_emp[item]) for item in ds_in.keys()])
        a_diff = np.concatenate(
            (adk_out_diff, adk_in_diff, ads_out_diff, ads_in_diff))
        # d_diff = {item:d[item] - d_emp[item] for item in d.keys()}
        # s_diff = {item:s[item] - s_emp[item] for item in s.keys()}

        ensemble_error = np.linalg.norm(a_diff, np.inf)

        # debug
        """
        for i in range(N):
            for j in range(N):
                if i!=j:
                    aux = x[i]*x[j]
                    # print("({},{}) p = {}".format(i,j,aux/(1+aux)))
        """

        # debug
        """
        print('\n original degree sequence ', dk_out, dk_in)
        print('\n original strength sequence ',ds_out, ds_in)
        print('\n ensemble average degree sequence', dk_out_emp, dk_in_emp)
        print('\n ensemble average strength sequence', ds_out_emp, ds_in_emp)
        print('\n empirical error = {}'.format(ensemble_error))
        print('\n theoretical error = {}'.format(err))
        print('\n original degree sequence ', dk_out, dk_in)
        """

        l = os.listdir(output_dir)

        for f in l:
            os.remove(output_dir + f)
        os.rmdir(output_dir)

        # test result
        self.assertTrue(ensemble_error < 4)
Exemple #21
0
    def test_0(self):
        n, seed = (5, 42)
        network = mg.random_weighted_matrix_generator_dense(n=n,
                                                            sup_ext=10,
                                                            sym=True,
                                                            seed=seed,
                                                            intweights=True)
        # number of copies to generate

        g = sample.UndirectedGraph(adjacency=network)

        g.solve_tool(
            model="crema",
            method="quasinewton",
            initial_guess="random",
            adjacency="cm",
            max_steps=1000,
            verbose=False,
        )

        # g._solution_error()
        err = g.error

        # print('\ntest 5: error = {}'.format(g.error))
        n_sample = 50
        output_dir = "sample_crema_ecm_prob/"
        # random.seed(100)
        g.ensemble_sampler(n=n_sample, output_dir=output_dir, seed=42)

        d = {'{}'.format(i): g.dseq[i] for i in range(n)}
        s = {'{}'.format(i): g.strength_sequence[i] for i in range(n)}

        # read all sampled graphs and check the average degree distribution
        d_emp = {'{}'.format(i): 0 for i in range(n)}
        s_emp = {'{}'.format(i): 0 for i in range(n)}

        for l in range(n_sample):
            f = output_dir + "{}.txt".format(l)
            if not os.stat(f).st_size == 0:
                g_tmp = nx.read_edgelist(f, data=(("weight", float), ))
                d_tmp = dict(g_tmp.degree)
                s_tmp = dict(g_tmp.degree(weight='weight'))
                for item in d_tmp.keys():
                    d_emp[item] += d_tmp[item]
                    s_emp[item] += s_tmp[item]

        for item in d_emp.keys():
            d_emp[item] = d_emp[item] / n_sample
            s_emp[item] = s_emp[item] / n_sample

        ad_diff = np.array([abs(d[item] - d_emp[item]) for item in d.keys()])
        as_diff = np.array([abs(s[item] - s_emp[item]) for item in s.keys()])
        a_diff = np.concatenate((ad_diff, as_diff))
        d_diff = {item: abs(d[item] - d_emp[item]) for item in d.keys()}
        s_diff = {item: abs(s[item] - s_emp[item]) for item in s.keys()}

        ensemble_error = np.linalg.norm(a_diff, np.inf)

        # debug
        """
        print('\n original degree sequence ', d)
        print('\n original strength sequence ', s)
        print('\n ensemble average strength sequence', s_emp)
        print('\n degree by degree difference vector ', d_diff)
        print('\n strength by strength difference vector ', s_diff)
        print('\n empirical error = {}'.format(ensemble_error))
        print('\n theoretical error = {}'.format(err))
        """

        l = os.listdir(output_dir)
        for f in l:
            os.remove(output_dir + f)
        os.rmdir(output_dir)

        # test result
        self.assertTrue(ensemble_error < 3)
    def test_0(self):
        """test with 3 classes of cardinality 1
        and no zero degrees
        """
        """
        A = np.array(
            [
                [0, 1, 1, 0],
                [1, 0, 0, 1],
                [1, 0, 0, 0],
                [0, 1, 0, 0],
            ]
        )
        e = [(0,1), (0,2), (1,3)]
        d = [1,1,2,2]
        print(e)
        print(d)
        """
        N, seed = (50, 42)
        A = mg.random_weighted_matrix_generator_dense(n=N,
                                                      sup_ext=10,
                                                      sym=False,
                                                      seed=seed,
                                                      intweights=True)

        g = sample.DirectedGraph(A)

        g._solve_problem(
            model="decm_exp",
            method="newton",
            max_steps=100,
            verbose=False,
            linsearch=True,
            initial_guess="uniform",
        )

        x = g.x
        y = g.y
        b_out = g.b_out
        b_in = g.b_in

        # g._solution_error()
        err = g.error

        # print('\ntest 5: error = {}'.format(g.error))
        n = 100
        output_dir = "sample_decm/"
        # random.seed(100)
        g.ensemble_sampler(n=n, output_dir=output_dir, seed=42)

        #

        dk_out = {'{}'.format(i): g.dseq_out[i] for i in range(N)}
        dk_in = {'{}'.format(i): g.dseq_in[i] for i in range(N)}
        ds_out = {'{}'.format(i): g.out_strength[i] for i in range(N)}
        ds_in = {'{}'.format(i): g.in_strength[i] for i in range(N)}

        # read all sampled graphs and check the average degree distribution is close enough
        dk_out_emp = {'{}'.format(i): 0 for i in range(N)}
        dk_in_emp = {'{}'.format(i): 0 for i in range(N)}
        ds_out_emp = {'{}'.format(i): 0 for i in range(N)}
        ds_in_emp = {'{}'.format(i): 0 for i in range(N)}

        for l in range(n):
            f = output_dir + "{}.txt".format(l)
            if not os.stat(f).st_size == 0:
                g_tmp = nx.read_edgelist(f,
                                         data=(("weight", float), ),
                                         create_using=nx.DiGraph())
                dk_out_tmp = dict(g_tmp.out_degree)
                dk_in_tmp = dict(g_tmp.in_degree)
                ds_out_tmp = dict(g_tmp.out_degree(weight='weight'))
                ds_in_tmp = dict(g_tmp.in_degree(weight='weight'))
                for item in dk_out_tmp.keys():
                    dk_out_emp[item] += dk_out_tmp[item]
                    dk_in_emp[item] += dk_in_tmp[item]
                    ds_out_emp[item] += ds_out_tmp[item]
                    ds_in_emp[item] += ds_in_tmp[item]

        for item in dk_out_emp.keys():
            dk_out_emp[item] = dk_out_emp[item] / n
            dk_in_emp[item] = dk_in_emp[item] / n
            ds_out_emp[item] = ds_out_emp[item] / n
            ds_in_emp[item] = ds_in_emp[item] / n

        adk_out_diff = np.array(
            [abs(dk_out[item] - dk_out_emp[item]) for item in dk_out.keys()])
        adk_in_diff = np.array(
            [abs(dk_in[item] - dk_in_emp[item]) for item in dk_in.keys()])
        ads_out_diff = np.array(
            [abs(ds_out[item] - ds_out_emp[item]) for item in ds_out.keys()])
        ads_in_diff = np.array(
            [abs(ds_in[item] - ds_in_emp[item]) for item in ds_in.keys()])
        a_diff = np.concatenate(
            (adk_out_diff, adk_in_diff, ads_out_diff, ads_in_diff))
        # d_diff = {item:d[item] - d_emp[item] for item in d.keys()}
        # s_diff = {item:s[item] - s_emp[item] for item in s.keys()}

        ensemble_error = np.linalg.norm(a_diff, np.inf)

        #debug
        """
        for i in range(N):
            for j in range(N):
                if i!=j:
                    aux = x[i]*x[j]
                    # print("({},{}) p = {}".format(i,j,aux/(1+aux)))
        """

        # debug
        """
        print('\n original degree sequence ', dk_out, dk_in)
        print('\n original strength sequence ',ds_out, ds_in)
        print('\n ensemble average degree sequence', dk_out_emp, dk_in_emp)
        print('\n ensemble average strength sequence', ds_out_emp, ds_in_emp)
        print('\n empirical error = {}'.format(ensemble_error))
        print('\n theoretical error = {}'.format(err))
        """

        l = os.listdir(output_dir)
        for f in l:
            os.remove(output_dir + f)
        os.rmdir(output_dir)

        # test result
        self.assertTrue(ensemble_error < 10)
Exemple #23
0
    def test_0(self):
        """test with 3 classes of cardinality 1
        and no zero degrees
        """
        """
        A = np.array(
            [
                [0, 1, 1, 0],
                [1, 0, 0, 1],
                [1, 0, 0, 0],
                [0, 1, 0, 0],
            ]
        )
        e = [(0,1), (0,2), (1,3)]
        d = [1,1,2,2]
        print(e)
        print(d)
        """
        N, seed = (50, 42)
        A  = mg.random_weighted_matrix_generator_dense(
            n=N, sup_ext=10, sym=True, seed=seed, intweights=True
        )
        # number of copies to generate

        g = sample.UndirectedGraph(A)

        g._solve_problem(
            model="ecm",
            method="newton",
            max_steps=100,
            verbose=False,
            linsearch=True,
            initial_guess="uniform",
        )

        x = g.x
        # g._solution_error()
        err = g.error

        # print('\ntest 5: error = {}'.format(g.error))
        n = 1000
        output_dir = "sample_ecm/"
        # random.seed(100)
        g.ensemble_sampler(n=n, output_dir=output_dir, seed=42)

        d = {'{}'.format(i):g.dseq[i] for i in range(N)}
        s = {'{}'.format(i):g.strength_sequence[i] for i in range(N)}


        # read all sampled graphs and check the average degree distribution is close enough
        d_emp = {'{}'.format(i):0 for i in range(N)}
        s_emp = {'{}'.format(i):0 for i in range(N)}

        for l in range(n):
            f = output_dir + "{}.txt".format(l)
            if not os.stat(f).st_size == 0:
                g_tmp = nx.read_edgelist(f, data=(("weight", float),))
                d_tmp = dict(g_tmp.degree)
                s_tmp = dict(g_tmp.degree(weight='weight'))
                for item in d_tmp.keys(): 
                    d_emp[item] += d_tmp[item]
                    s_emp[item] += s_tmp[item]

        for item in d_emp.keys(): 
            d_emp[item] = d_emp[item]/n
            s_emp[item] = s_emp[item]/n

        ad_diff = np.array([abs(d[item] - d_emp[item]) for item in d.keys()])
        as_diff = np.array([abs(s[item] - s_emp[item]) for item in s.keys()])
        a_diff = np.concatenate((ad_diff, as_diff)) 
        d_diff = {item:d[item] - d_emp[item] for item in d.keys()}
        s_diff = {item:s[item] - s_emp[item] for item in s.keys()}

        ensemble_error = np.linalg.norm(a_diff, np.inf)

        #debug
        """
        for i in range(N):
            for j in range(N):
                if i!=j:
                    aux = x[i]*x[j]
                    # print("({},{}) p = {}".format(i,j,aux/(1+aux)))
        """


        # debug
        """
        print('\n original degree sequence ', d)
        print('\n original strength sequence ', s)
        print('\n ensemble average strength sequence', s_emp)
        print('\n degree by degree difference vector ', d_diff)
        print('\n strength by strength difference vector ', s_diff)
        print('\n empirical error = {}'.format(ensemble_error))
        print('\n theoretical error = {}'.format(err))
        """


        l = os.listdir(output_dir)
        for f in l:
            os.remove(output_dir + f)
        os.rmdir(output_dir)

        # test result
        self.assertTrue(ensemble_error<3)