def setUp(self): np.random.seed(SEED) self.G_params = {} self.G_params['type'] = ds.SBM self.G_params['N'] = 32 self.G_params['k'] = 4 self.G_params['p'] = 0.8 self.G_params['q'] = 0.1 self.G_params['type_z'] = ds.RAND self.dest = 5 self.Gx, self.Gy = ds.nodes_perturbated_graphs(self.G_params, self.dest, seed=SEED)
def test_permute_graph(self): dest = 30 Gx, Gy = ds.nodes_perturbated_graphs(self.G_params, dest, perm=True, seed=SEED) Ax = Gx.W.todense() Ay = Gy.W.todense() P = Gy.info['perm_matrix'] Ax_rm = np.delete(Ax, Gy.info['rm_nodes'], axis=0) Ax_rm = np.delete(Ax_rm, Gy.info['rm_nodes'], axis=1) rm_comms_X = np.delete(Gx.info['node_com'], Gy.info['rm_nodes']) comm_Y = Gy.info['node_com'] self.assertFalse(np.array_equal(Ax_rm, Ay)) self.assertFalse(np.array_equal(rm_comms_X, comm_Y)) self.assertTrue(np.array_equal(np.eye(Gy.N), P.dot(P.T))) self.assertTrue(np.array_equal(Ax_rm, P.T.dot(Ay).dot(P))) self.assertTrue(np.array_equal(rm_comms_X, P.T.dot(comm_Y)))
def test_model(id, signals, nn_params, model_params): Gx, Gy = data_sets.nodes_perturbated_graphs(signals['g_params'], signals['pert'], perm=signals['perm']) # Define the data model data = data_sets.LinearDS2GSNodesPert( Gx, Gy, signals['N_samples'], signals['L_filter'], signals['g_params']['k'], # k is n_delts median=signals['median']) data.to_unit_norm() data.add_noise(signals['noise'], test_only=signals['test_only']) data.to_tensor() if nn_params['arch_type'] == "basic": Gx.compute_laplacian('normalized') archit = BasicArch(Gx.L.todense(), nn_params['F'], nn_params['K'], nn_params['M'], nn_params['nonlin'], ARCH_INFO) elif nn_params['arch_type'] == "mlp": archit = MLP(nn_params['F'], nn_params['nonlin'], ARCH_INFO) elif nn_params['arch_type'] == "conv": archit = ConvNN(N, nn_params['F'], nn_params['K'], nn_params['nonlin'], nn_params['M'], ARCH_INFO) elif nn_params['arch_type'] == "linear": archit = MLP(nn_params['F'], nn_params['nonlin'], ARCH_INFO) else: raise RuntimeError("arch_type has to be either basic, mlp or conv") model_params['arch'] = archit model = Model(**model_params) t_init = time.time() epochs, _, _ = model.fit(data.train_X, data.train_Y, data.val_X, data.val_Y) t_conv = time.time() - t_init mean_err, med_err, mse = model.test(data.test_X, data.test_Y) print( "DONE {}: MSE={} - Mean Err={} - Median Err={} - Params={} - t_conv={} - epochs={}" .format(id, mse, mean_err, med_err, model.count_params(), round(t_conv, 4), epochs), flush=True) return mse, mean_err, med_err, model.count_params(), t_conv, epochs
def test_permutated_S(self): n_samps = [50, 20, 20] L = 6 n_delts = self.G_params['k'] Gx, Gy = ds.nodes_perturbated_graphs(self.G_params, self.dest, perm=True, seed=SEED) data = ds.LinearDS2GSNodesPert(Gx, Gy, n_samps, L, n_delts) P = data.Gy.info['perm_matrix'] rm_nodes = Gy.info['rm_nodes'] train_Sx_rm = np.delete(data.train_Sx, rm_nodes, axis=1) val_Sx_rm = np.delete(data.val_Sx, rm_nodes, axis=1) test_Sx_rm = np.delete(data.test_Sx, rm_nodes, axis=1) self.assertFalse(np.array_equal(train_Sx_rm, data.train_Sy)) self.assertFalse(np.array_equal(val_Sx_rm, data.val_Sy)) self.assertFalse(np.array_equal(test_Sx_rm, data.test_Sy)) self.assertTrue(np.array_equal(train_Sx_rm, data.train_Sy.dot(P))) self.assertTrue(np.array_equal(val_Sx_rm, data.val_Sy.dot(P))) self.assertTrue(np.array_equal(test_Sx_rm, data.test_Sy.dot(P)))
def test_perturbation(self): dest = 30 for i in range(10): Gx, Gy = ds.nodes_perturbated_graphs(self.G_params, dest, seed=SEED) Ax = Gx.W.todense() Ay = Gy.W.todense() Ax_rm = np.delete(Ax, Gy.info['rm_nodes'], axis=0) Ax_rm = np.delete(Ax_rm, Gy.info['rm_nodes'], axis=1) rm_comms = np.delete(Gx.info['node_com'], Gy.info['rm_nodes']) self.assertFalse(Gx.is_directed()) self.assertTrue(Gx.is_connected()) self.assertEqual(np.sum(np.diag(Ax)), 0) self.assertFalse(Gy.is_directed()) self.assertTrue(Gy.is_connected()) self.assertEqual(np.sum(np.diag(Ay)), 0) self.assertEqual(Gy.N, Gx.N-dest) self.assertTrue(np.array_equal(Ax_rm, Ay)) self.assertTrue(np.array_equal(rm_comms, Gy.info['node_com']))
def test_model(signals, nn_params, model_params): Gx, Gy = data_sets.nodes_perturbated_graphs(signals['g_params'], signals['pert'], perm=signals['perm'], seed=SEED) # Define the data model data = data_sets.LinearDS2GSNodesPert( Gx, Gy, signals['N_samples'], signals['L_filter'], signals['g_params']['k'], # k is n_delts median=signals['median']) data.to_unit_norm() data.add_noise(signals['noise'], test_only=signals['test_only']) data.to_tensor() Gx.compute_laplacian('normalized') Gy.compute_laplacian('normalized') archit = GIGOArch(Gx.L.todense(), Gy.L.todense(), nn_params['Fi'], nn_params['Fo'], nn_params['Ki'], nn_params['Ko'], nn_params['C'], nn_params['nonlin'], nn_params['last_act_fn'], nn_params['batch_norm'], nn_params['arch_info']) model_params['arch'] = archit model = Model(**model_params) t_init = time.time() epochs, _, _ = model.fit(data.train_X, data.train_Y, data.val_X, data.val_Y) t_conv = time.time() - t_init mean_err, med_err, mse = model.test(data.test_X, data.test_Y) print( "DONE: MSE={} - Mean Err={} - Median Err={} - Params={} - t_conv={} - epochs={}" .format(mse, mean_err, med_err, model.count_params(), round(t_conv, 4), epochs)) return mse, med_err, mean_err, model.count_params(), t_conv, epochs
def run(id, Gs, Signals, lrn, pert): if Gs['params']['type'] == ds.SBM: Gx, Gy = ds.nodes_perturbated_graphs(Gs['params'], pert, seed=SEED, perm=True) elif Gs['params']['type'] == ds.BA: Gx = ds.create_graph(Gs['params'], SEED) G_params_y = Gs['params'].copy() G_params_y['N'] = Gs['params']['N'] - pert Gy = ds.create_graph(G_params_y, 2 * SEED) else: raise RuntimeError("Choose a valid graph type") data = ds.LinearDS2GSNodesPert(Gx, Gy, Signals['samples'], Signals['L'], Signals['deltas'], median=Signals['median'], same_coeffs=Signals['same_coeffs'], neg_coeffs=Signals['neg_coeffs']) data.to_unit_norm() data.add_noise(Signals['noise'], test_only=Signals['test_only']) data.to_tensor() epochs = 0 params = np.zeros(N_EXPS) med_err = np.zeros(N_EXPS) mse = np.zeros(N_EXPS) for i, exp in enumerate(EXPS): if exp['type'] == 'Linear': model = LinearModel(exp['N']) elif exp['type'] == 'Enc_Dec': exp['n_dec'][-1] = Gy.N clust_x = gc.MultiResGraphClustering(Gx, exp['n_enc'], k=exp['n_enc'][-1], up_method=exp['downs']) clust_y = gc.MultiResGraphClustering(Gy, exp['n_dec'], k=exp['n_enc'][-1], up_method=exp['ups']) net = GraphEncoderDecoder(exp['f_enc'], clust_x.sizes, clust_x.Ds, exp['f_dec'], clust_y.sizes, clust_y.Us, exp['f_conv'], As_dec=clust_y.As, K_dec=exp['K_dec'], K_enc=exp['K_enc'], As_enc=clust_x.As, act_fn=lrn['af'], last_act_fn=lrn['laf'], ups=exp['ups'], downs=exp['downs']) elif exp['type'] == 'AutoConv': conv = exp['convs'][PERT.index(pert)] net = ConvAutoencoder(conv['f_enc'], conv['kernel_enc'], conv['f_dec'], conv['kernel_dec']) elif exp['type'] == 'AutoFC': exp['n_dec'][-1] = Gy.N net = FCAutoencoder(exp['n_enc'], exp['n_dec'], bias=exp['bias']) else: raise RuntimeError('Unknown experiment type') if exp['type'] != 'Linear': model = Model(net, learning_rate=lrn['lr'], decay_rate=lrn['dr'], batch_size=lrn['batch'], epochs=lrn['epochs'], eval_freq=EVAL_F, max_non_dec=lrn['non_dec'], verbose=VERBOSE, early_stop=exp['early_stop']) epochs, _, _ = model.fit(data.train_X, data.train_Y, data.val_X, data.val_Y) _, med_err[i], mse[i] = model.test(data.test_X, data.test_Y) params[i] = model.count_params() print('G: {}, {}-{} ({}): epochs {} - mse {} - MedianErr: {}'.format( id, i, exp['type'], params[i], epochs, mse[i], med_err[i])) return params, med_err, mse