def test_fixedpoint_cm_9(self): """classes with cardinality more than 1 and zero degrees""" # test Matrix 1 n, seed = (100, 22) A = mg.random_binary_matrix_generator_dense(n, sym=True, seed=seed) g = sample.UndirectedGraph(A) g._solve_problem( model="cm-new", method="quasinewton", initial_guess="random", max_steps=300, verbose=False, linsearch="True", ) g.solution_error() # print('degseq = ', np.concatenate((g.dseq_out, g.dseq_in))) # print('expected degseq = ',g.expected_dseq) # debug # print(g.r_dseq_out) # print(g.r_dseq_in) # print(g.rnz_dseq_out) # print(g.rnz_dseq_in) # print('\ntest 9: error = {}'.format(g.error)) # test result self.assertTrue(g.error < 1e-1)
def test_CReAMa_cm_quasinewton_random_dense_20(self): network = mg.random_weighted_matrix_generator_dense(n=20, sup_ext=10, sym=True, seed=None) network_bin = (network > 0).astype(int) g = sample_und.UndirectedGraph(adjacency=network) g.solve_tool( model="CReAMa", method="newton", initial_guess="random", adjacency="cm-new", max_steps=1000, verbose=False, ) g.solution_error() # test result self.assertTrue(g.relative_error_strength < 1e-1) self.assertTrue(g.relative_error_strength < 1e-2)
def test_fixedpoint_dcm_10(self): # test Matrix 1 n, seed = (40, 35) A = mg.random_weighted_matrix_generator_dense(n, sym=False, seed=seed, sup_ext=100, intweights=True) A[0, :] = 0 g = sample.DirectedGraph(A) g._solve_problem( model="decm", method="fixed-point", max_steps=20000, verbose=False, initial_guess="uniform", linsearch=True, ) g.solution_error() # debug # print("\n test 4, zeros, dimension n = {}, error = {}".format(n, g.error)) # test result self.assertTrue(g.error < 1)
def test_loglikelihood_hessian_dcm_new_simmetry(self): n, seed = (3, 42) a = mg.random_binary_matrix_generator_dense(n, sym=False, seed=seed) g = sample.DirectedGraph(a) g.degree_reduction() g.initial_guess = "uniform" g.regularise = "identity" g._initialize_problem("dcm", "newton") theta = np.random.rand(2 * n) x0 = np.exp(-theta) k_out = g.args[0] k_in = g.args[1] nz_index_out = g.args[2] nz_index_in = g.args[3] f = loglikelihood_hessian_dcm_new(theta, g.args) f_t = f.T # debug # print(a) # print(theta, x0) # print(g.args) # print(f-f_t) # test result self.assertTrue(np.allclose(f - f_t, np.zeros((2 * n, 2 * n))))
def test_3(self): n, seed = (40, 22) a = mg.random_binary_matrix_generator_dense(n, sym=False, seed=seed) a[0, :] = 0 k_out = np.sum(a, 1) k_in = np.sum(a, 0) g = sample.DirectedGraph(a) g._solve_problem( model="dcm_new", method="newton", max_steps=3000, verbose=False, initial_guess="uniform", linsearch="False", ) g.solution_error() err = g.error # debug # print('\ntest 1: error = {}'.format(err)) # test result self.assertTrue(err < 1)
def test_iterative_dcm(self): n, seed = (3, 42) a = mg.random_binary_matrix_generator_dense(n, sym=False, seed=seed) # rd g = sample.DirectedGraph(a) g.degree_reduction() g.initial_guess = "uniform" g.regularise = "eigenvalues" g._initialize_problem("dcm", "fixed-point") x0 = 0.5 * np.ones(4) f_sample = -g.fun(x0) g.last_model = "dcm" g._set_solved_problem(f_sample) f_full = np.concatenate((g.x, g.y)) f_correct = -np.array([2.5, 2.5, 0, 0, 1, 1.25]) # debug # print(a) # print(x0, x) # print(f_full) # test result self.assertTrue(np.allclose(f_full, f_correct))
def test_loglikelihood_hessian_diag_dcm_new(self): n, seed = (3, 42) a = mg.random_binary_matrix_generator_dense(n, sym=False, seed=seed) g = sample.DirectedGraph(a) g.degree_reduction() g.initial_guess = "uniform" g.regularise = "identity" g._initialize_problem("dcm", "newton") theta = np.random.rand(n * 2) x0 = np.exp(-theta) k_out = g.args[0] k_in = g.args[1] nz_index_out = g.args[2] nz_index_in = g.args[3] f_sample = np.zeros(2 * n) for i in range(2 * n): f = lambda x: loglikelihood_prime_dcm_new(x, g.args)[i] f_sample[i] = approx_fprime(theta, f, epsilon=1e-6)[i] g.last_model = "dcm" f_new = loglikelihood_hessian_diag_dcm_new(theta, g.args) # debug # print(a) # print(theta, x0) # print(g.args) # print(f_sample) # print(f_new) # test result self.assertTrue(np.allclose(f_sample, f_new))
def test_CReAMa_original_Dianati_random_dense_20_dir(self): network = mg.random_weighted_matrix_generator_dense( n=20, sup_ext=10, sym=False, seed=None ) network_bin = (network > 0).astype(int) g = sample.DirectedGraph(adjacency=network) g.solve_tool( model="CReAMa", method="fixed-point", initial_guess = "random", adjacency=network_bin, max_steps=1000, verbose=False, ) g.solution_error() # test result self.assertTrue(g.relative_error_strength < 1e-1) self.assertTrue(g.relative_error_strength < 1e-2) self.assertTrue((g._weighted_realisation() >= 0).all())
def test_loglikelihood_dcm(self): """ a = np.array([[0, 1, 1], [1, 0, 1], [0, 1, 0]]) """ n, seed = (3, 42) a = mg.random_binary_matrix_generator_dense(n, sym=False, seed=seed) g = sample.DirectedGraph(a) g.degree_reduction() g.initial_guess = "uniform" g.regularise = "eigenvalues" g._initialize_problem("dcm", "quasinewton") x0 = np.concatenate((g.r_x, g.r_y)) # call loglikelihood function f_sample = -g.step_fun(x0) f_correct = 4 * np.log(1 / 2) - 3 * np.log(5 / 4) # debug # print(x0) # print(f_sample) # print(f_correct) # test result self.assertTrue(round(f_sample, 3) == round(f_correct, 3))
def test_fixedpoint_2(self): """classes with cardinality 1 and zero degrees""" # test Matrix 1 n, seed = (40, 22) A = mg.random_binary_matrix_generator_dense(n, sym=False, seed=seed) A[0, :] = 0 g = sample.DirectedGraph(A) g._solve_problem( model="dcm", method="fixed-point", max_steps=300, verbose=False, initial_guess="uniform", linsearch="False", ) g.solution_error() # print('degseq = ', np.concatenate((g.dseq_out, g.dseq_in))) # print('expected degseq = ',g.expected_dseq) # debug # print(g.r_dseq_out) # print(g.r_dseq_in) # print(g.rnz_dseq_out) # print(g.rnz_dseq_in) # print('\ntest 7: error = {}'.format(g.error)) # test result self.assertTrue(g.error < 1e-1)
def test_newton_4(self): # convergence relies heavily on x0 n, s = (40, 35) # n, s = (5, 35) A = mg.random_weighted_matrix_generator_dense(n, sup_ext=100, sym=False, seed=s, intweights=True) A[0, :] = 0 bA = np.array([[1 if aa != 0 else 0 for aa in a] for a in A]) k_out = np.sum(bA, axis=1) k_in = np.sum(bA, axis=0) s_out = np.sum(A, axis=1) s_in = np.sum(A, axis=0) x0 = 0.1 * np.ones(4 * n) # x0 = np.concatenate((-1*np.ones(2*n), np.ones(2*n))) args = (k_out, k_in, s_out, s_in) x0[np.concatenate(args) == 0] = 1e3 fun = lambda x: -sample.loglikelihood_prime_decm_new(x, args) fun_jac = lambda x: -sample.loglikelihood_hessian_decm_new(x, args) step_fun = lambda x: -sample.loglikelihood_decm_new(x, args) lin_fun = lambda x: sample.linsearch_fun_DECM_new(x, (step_fun, )) hes_reg = sample.hessian_regulariser_function sol = sample.solver( x0, fun=fun, step_fun=step_fun, fun_jac=fun_jac, linsearch_fun=lin_fun, tol=1e-6, eps=1e-5, max_steps=100, method="newton", verbose=False, regularise=True, full_return=False, linsearch=True, hessian_regulariser=hes_reg, ) sol = np.exp(-sol) ek = sample.expected_decm(sol) k = np.concatenate((k_out, k_in, s_out, s_in)) err = np.max(np.abs(ek - k)) # debug # print(ek) # print(k) # print('\ntest 4: error = {}'.format(err)) # print('method: {}, matrix {}x{} with zeros'.format('newton', n,n)) # test result self.assertTrue(err < 1e-1)
def test_quasinewton_1(self): n, s = (4, 25) A = mg.random_weighted_matrix_generator_dense(n, sup_ext=10, sym=False, seed=s, intweights=True) A[0, :] = 0 bA = np.array([[1 if aa != 0 else 0 for aa in a] for a in A]) k_out = np.sum(bA, axis=1) k_in = np.sum(bA, axis=0) s_out = np.sum(A, axis=1) s_in = np.sum(A, axis=0) x0 = 0.9 * np.ones(n * 4) args = (k_out, k_in, s_out, s_in) fun = lambda x: -sample.loglikelihood_prime_decm_new(x, args) fun_jac = lambda x: -sample.loglikelihood_hessian_diag_decm_new( x, args) step_fun = lambda x: -sample.loglikelihood_decm_new(x, args) lin_fun = lambda x: sample.linsearch_fun_DECM_new(x, (step_fun, )) hes_reg = sample.hessian_regulariser_function sol = sample.solver( x0, fun=fun, step_fun=step_fun, fun_jac=fun_jac, linsearch_fun=lin_fun, tol=1e-6, eps=1e-10, max_steps=300, method="quasinewton", verbose=False, regularise=True, full_return=False, linsearch=True, hessian_regulariser=hes_reg, ) sol = np.exp(-sol) ek = sample.expected_decm(sol) k = np.concatenate((k_out, k_in, s_out, s_in)) err = np.max(np.abs(ek - k)) # debug # print(ek) # print(k) # print('\ntest 0: error = {}'.format(err)) # print('method = {}, matrix {}x{}'.format('quasinewton', n, n)) # test result self.assertTrue(err < 1e-1)
def test_dcm_new_uniform(self): n, seed = (4, 22) A = mg.random_binary_matrix_generator_dense(n, sym=False, seed=seed) g = sample.DirectedGraph(A) g.initial_guess = 'uniform' g._set_initial_guess('dcm_new') self.assertTrue( np.concatenate((g.r_x, g.r_y)).all() == np.array( [1e3, 0.5, 0.5, 0.5, 1e3, 0.5]).all())
def test_cm_uniform(self): n, seed = (4, 22) A = mg.random_binary_matrix_generator_dense(n, sym=True, seed=seed) g = sample_u.UndirectedGraph(A) g.initial_guess = 'uniform' g.last_model = "cm" g._set_initial_guess('cm') self.assertTrue(g.x0.all() == np.array([0.5, 0.5]).all())
def test_dcm_new(self): n, seed = (4, 22) A = mg.random_binary_matrix_generator_dense(n, sym=False, seed=seed) x0 = np.random.rand(2 * n) g = sample.DirectedGraph(A) g.initial_guess = x0 g._set_initial_guess('dcm_new') g._set_solved_problem_dcm(x0) self.assertTrue(np.concatenate((g.x, g.y)).all() == x0.all())
def test_iterative_2(self): n, s = (40, 35) # n, s = (5, 35) A = mg.random_weighted_matrix_generator_dense(n, sup_ext=100, sym=False, seed=s, intweights=True) # A[0,:] = 0 bA = np.array([[1 if aa != 0 else 0 for aa in a] for a in A]) k_out = np.sum(bA, axis=1) k_in = np.sum(bA, axis=0) s_out = np.sum(A, axis=1) s_in = np.sum(A, axis=0) x0 = 0.1 * np.ones(n * 4) args = (k_out, k_in, s_out, s_in) x0[np.concatenate(args) == 0] = 1e3 fun = lambda x: sample.iterative_decm_new(x, args) step_fun = lambda x: -sample.loglikelihood_decm_new(x, args) lin_fun = lambda x: sample.linsearch_fun_DECM_new(x, (step_fun, )) hes_reg = sample.hessian_regulariser_function sol = sample.solver( x0, fun=fun, step_fun=step_fun, linsearch_fun=lin_fun, tol=1e-6, eps=1e-10, max_steps=7000, method="fixed-point", verbose=False, regularise=True, full_return=False, linsearch=True, hessian_regulariser=hes_reg, ) sol = np.exp(-sol) ek = sample.expected_decm(sol) k = np.concatenate((k_out, k_in, s_out, s_in)) err = np.max(np.abs(ek - k)) # debug # print(ek) # print(k) # print('\ntest 5: error = {}'.format(err)) # print('method: {}, matrix {}x{} '.format('iterative', n,n)) # test result self.assertTrue(err < 1)
def test_cm(self): n, seed = (4, 22) A = mg.random_binary_matrix_generator_dense(n, sym=True, seed=seed) x0 = np.random.rand(n) g = sample_u.UndirectedGraph(A) g.initial_guess = x0 g._set_initial_guess_cm() g.full_return = False g.last_model = "cm" g._set_solved_problem_cm(g.x0) self.assertTrue(g.x.all() == x0.all())
def test_ecm(self): n, seed = (4, 22) A = mg.random_weighted_matrix_generator_dense(n, sym=False, seed=seed, sup_ext=100, intweights=True) x0 = np.random.rand(n) g = sample_u.UndirectedGraph(A) g.initial_guess = x0 g._set_initial_guess_CReAMa() self.assertTrue(g.x0.all() == x0.all())
def test_CREAMA_uniform(self): n, seed = (4, 22) A = mg.random_weighted_matrix_generator_dense(n, sym=False, seed=seed, sup_ext=100, intweights=True) g = sample_u.UndirectedGraph(A) g.initial_guess = 'strengths_minor' g._set_initial_guess('CReAMa') x = (g.strength_sequence > 0).astype(float) / (g.strength_sequence + 1) self.assertTrue(g.x0.all() == x.all())
def test_decm_new_uniform(self): n, seed = (4, 22) A = mg.random_weighted_matrix_generator_dense(n, sym=False, seed=seed, sup_ext=100, intweights=True) g = sample.DirectedGraph(A) g.initial_guess = 'uniform' g._set_initial_guess('decm_new') self.assertTrue( np.concatenate((g.x, g.y, g.out_strength, g.in_strength)).all() == np.ones(4 * n).all())
def test_decm(self): n, seed = (4, 22) A = mg.random_weighted_matrix_generator_dense(n, sym=False, seed=seed, sup_ext=100, intweights=True) x0 = np.random.rand(4 * n) g = sample.DirectedGraph(A) g.initial_guess = x0 g._set_initial_guess('decm') g._set_solved_problem_decm(x0) self.assertTrue(np.concatenate((g.x, g.y)).all() == x0.all())
def test_loglikelihood_hessian_diag_dcm_new_zeros(self): # convergence relies heavily on x0 n, s = (10, 35) # n, s = (5, 35) A = mg.random_weighted_matrix_generator_dense(n, sup_ext=100, sym=False, seed=s, intweights=True) A[0, :] = 0 A[:, 5] = 0 bA = np.array([[1 if aa != 0 else 0 for aa in a] for a in A]) k_out = np.sum(bA, axis=1) k_in = np.sum(bA, axis=0) s_out = np.sum(A, axis=1) s_in = np.sum(A, axis=0) g = sample.DirectedGraph(A) g.initial_guess = "uniform" g.regularise = "identity" g._initialize_problem("decm", "newton") # theta = np.random.rand(6) theta = 0.5 * np.ones(n * 4) theta[np.concatenate((k_out, k_in, s_out, s_in)) == 0] = 1e4 x0 = np.exp(-theta) f_sample = np.zeros(n * 4) for i in range(n * 4): f = lambda x: loglikelihood_prime_decm_new(x, g.args)[i] f_sample[i] = approx_fprime(theta, f, epsilon=1e-6)[i] f_new = loglikelihood_hessian_diag_decm_new(theta, g.args) # debug # print(a) # print(theta, x0) # print(g.args) # print('approx',f_sample) # print('my',f_new) # print('gradient', loglikelihood_prime_decm_new(theta, g.args)) # print('diff',f_sample - f_new) # print('max',np.max(np.abs(f_sample - f_new))) # test result self.assertTrue(np.allclose(f_sample, f_new))
def test_CREAMA(self): n, seed = (4, 22) A = mg.random_weighted_matrix_generator_dense(n, sym=False, seed=seed, sup_ext=100, intweights=True) x0 = np.random.rand(n) g = sample_u.UndirectedGraph(A) g.initial_guess = x0 g._set_initial_guess_CReAMa() g.full_return = False g._set_solved_problem_CReAMa(g.x0) self.assertTrue(g.beta.all() == x0.all())
def test_CREAMA_uniform(self): n, seed = (4, 22) A = mg.random_weighted_matrix_generator_dense(n, sym=False, seed=seed, sup_ext=100, intweights=True) g = sample.DirectedGraph(A) g.initial_guess = 'strengths_minor' g._set_initial_guess_CReAMa() x = np.concatenate( (sample.out_strength(A) / (sample.out_strength(A) + 1), sample.in_strength(A) / (sample.in_strength(A) + 1))) self.assertTrue(g.x0.all() == x.all())
def test_loglikelihood_hessian_dcm(self): # n,s =(3, 1) n, s = (30, 1) a = mg.random_binary_matrix_generator_custom_density(n=n, p=0.15, sym=False, seed=s) g = sample.DirectedGraph(a) g.degree_reduction() g.initial_guess = "uniform" g.regularise = "eigenvalues" g._initialize_problem("dcm", "newton") k_out = g.args[0] k_in = g.args[1] nz_index_out = g.args[2] nz_index_in = g.args[3] n_rd = len(k_out) theta = np.random.rand(2 * n_rd) f_sample = np.zeros((n_rd * 2, n_rd * 2)) for i in range(n_rd * 2): f = lambda x: sample.loglikelihood_prime_dcm(x, g.args)[i] f_sample[i, :] = approx_fprime(theta, f, epsilon=1e-6) f_new = sample.loglikelihood_hessian_dcm(theta, g.args) """ for i in range(2*n_rd): for j in range(2*n_rd): if np.allclose(f_new[i,j], f_sample[i,j], atol=1e-1) == False: print(i,j) print(f_new[i,j]) print(f_sample[i,j]) print(f_sample[i,j]/f_new[i,j]) """ # debug # print(theta, x0) # print(g.args) # print(n_rd/n) # print(f_sample) # print(f_new) # test result self.assertTrue(np.allclose(f_sample, f_new, atol=1))
def test_loglikelihood(self): # problem initialisation A = mg.random_weighted_matrix_generator_dense(n=10, sym=False) prova = sample.DirectedGraph(A) prova.initial_guess = "random" prova._initialize_problem("decm", "quasinewton") sol = np.concatenate((prova.x, prova.y, prova.b_out, prova.b_in)) # casadi functions initialization x, f = casadi_loglikelihood_decm(A) casadi_fun = Function("f", [x], [f]) f_og = sample.loglikelihood_decm(sol, prova.args) f_casadi = casadi_fun(sol) err_loglikelihood = abs(f_og - f_casadi) # print('loglikelihood og-casadi = {}'.format(err_loglikelihood)) self.assertTrue(err_loglikelihood < 1e-10)
def test_loglikelihood_hessian_dcm_new_emi_simmetry(self): n, s = (50, 1) # n,s =(2, 1) a = mg.random_binary_matrix_generator_custom_density(n=n, p=0.15, sym=False, seed=s) g = sample.DirectedGraph(a) g.degree_reduction() g.initial_guess = "uniform" g.regularise = "identity" g._initialize_problem("dcm", "newton") k_out = g.args[0] k_in = g.args[1] nz_index_out = g.args[2] nz_index_in = g.args[3] n_rd = len(k_out) # print(n_rd/n) theta = np.random.rand(2 * n_rd) f_new = loglikelihood_hessian_dcm_new(theta, g.args) for i in range(2 * n_rd): for j in range(2 * n_rd): if f_new[i, j] - f_new[j, i] != 0: print(i, j) # debug # print(a) # print(theta, x0) # print(g.args) # print(f_sample) # print(f_new) # test result self.assertTrue( np.allclose(f_new - f_new.T, np.zeros((2 * n_rd, 2 * n_rd))))
def test_loglikelihood_prime_dcm(self): """ a = np.array([[0, 1, 1], [1, 0, 1], [0, 1, 0]]) """ n, seed = (30, 42) a = mg.random_binary_matrix_generator_dense(n, sym=False, seed=seed) # rd g = sample.DirectedGraph(a) g.degree_reduction() g.initial_guess = "uniform" g.regularise = "eigenvalues" g._initialize_problem("dcm", "newton") k_out = g.args[0] k_in = g.args[1] nz_index_out = g.args[2] nz_index_in = g.args[3] n_rd = len(k_out) theta = np.random.rand(2 * n_rd) f_sample = np.zeros(n_rd * 2) f = lambda x: sample.loglikelihood_dcm(x, g.args) f_sample = approx_fprime(theta, f, epsilon=1e-6) f_new = sample.loglikelihood_prime_dcm(theta, g.args) # debug # print(a) # print(x0, x) # print(f_sample) # print(f_new) # for i in range(2*n_rd): # if not np.allclose(f_new[i], f_sample[i],atol=1e-1): # print(i) # test result self.assertTrue(np.allclose(f_sample, f_new, atol=1e-1))
def test_loglikelihood_prime(self): # problem initialisation A = mg.random_weighted_matrix_generator_dense(n=10, sym=False) prova = sample.DirectedGraph(A) prova.initial_guess = "random" prova._initialize_problem("decm", "quasinewton") sol = np.concatenate((prova.x, prova.y, prova.b_out, prova.b_in)) # casadi functions initialization x, f = casadi_loglikelihood_decm(A) casadi_fun = Function("f", [x], [f]) fj = jacobian(f, x) casadi_fun_gradient = Function("j", [x], [fj]) fj_og = sample.loglikelihood_prime_decm(sol, prova.args) fj_casadi = casadi_fun_gradient(sol) err_ll_prime = max(abs(fj_og - fj_casadi.__array__()[0])) # print('loglikelihood prime og-casadi = {}'.format(err_ll_prime )) self.assertTrue(err_ll_prime < 1e-10)
def test_iterative_dcm_new(self): n, seed = (3, 42) a = mg.random_binary_matrix_generator_dense(n, sym=False, seed=seed) # rd g = sample.DirectedGraph(a) g.degree_reduction() g.initial_guess = "uniform" g.regularise = "identity" g._initialize_problem("dcm", "fixed-point") # theta = np.random.rand(6) theta = np.array([np.infty, 0.5, 0.5, 0.5, np.infty, 0.5]) x0 = np.exp(-theta) f_sample = g.fun(x0) g.last_model = "dcm" g._set_solved_problem(f_sample) f_full = np.concatenate((g.x, g.y)) f_full = -np.log(f_full) f_new = iterative_dcm_new(theta, g.args) g._set_solved_problem_dcm(f_new) f_new_full = np.concatenate((g.x, g.y)) # f_new_bis = iterative_dcm_new_bis(theta, g.args) # print('normale ',f_new) # print('bis',f_new_bis) # debug # print(a) # print(theta, x0) # print(g.args) # print(f_full) # print(f_new) # print(f_new_full) # test result self.assertTrue(np.allclose(f_full, f_new_full))