예제 #1
0
 def test_set_coordinates(self):
     G = graphs.FullConnected()
     coords = self._rs.uniform(size=(G.N, 2))
     G.set_coordinates(coords)
     G.set_coordinates('ring2D')
     G.set_coordinates('random2D')
     G.set_coordinates('random3D')
     G.set_coordinates('spring')
     G.set_coordinates('spring', dim=3)
     G.set_coordinates('spring', dim=3, pos=G.coords)
     self.assertRaises(AttributeError, G.set_coordinates, 'community2D')
     G = graphs.Community()
     G.set_coordinates('community2D')
     self.assertRaises(ValueError, G.set_coordinates, 'invalid')
예제 #2
0
    :param L: List of coarsened laplacians
    :param poolings: Pooling values
    :return: New list with only useful laplacians
    """
    j = 0
    Lnew = list()
    Lnew.append(L[0])
    for p in poolings:
        j += int(np.log2(p)) if p > 1 else 0
        Lnew.append(L[j])


    return Lnew


if __name__ == '__main__':
    from pygsp import graphs
    from synthetic_data.data_generation import generate_spectral_samples
    from graph_utils.laplacian import initialize_laplacian_tensor

    N = 100
    G = graphs.Community(N)
    X, labels = generate_spectral_samples(5, 128, G, f_h=20, f_l=50, lambda_h=80, lambda_l=10)
    graphs, perm = coarsen(G.A, levels=3)
    X = perm_data(X, perm)
    num_levels = np.log2(np.prod([2, 4, 1])).astype(int)
    adjacencies, perm = coarsen(G.A, levels=num_levels)  # Coarsens in powers of 2
    L = [initialize_laplacian_tensor(A) for A in adjacencies]
    print(len(L))
    L = keep_pooling_laplacians(L, [2, 4])
    print([L.get_shape() for L in L])
예제 #3
0
def main(_):
    # Initialize tempdir
    if FLAGS.action == "eval" and FLAGS.read_dir is not None:
        FLAGS.log_dir = FLAGS.read_dir
    else:
        FLAGS.log_dir = os.path.join(FLAGS.log_dir, FLAGS.model_type)
        exp_n = _last_exp(
            FLAGS.log_dir) + 1 if FLAGS.action == "train" else _last_exp(
                FLAGS.log_dir)
        FLAGS.log_dir = os.path.join(FLAGS.log_dir, "exp_" + str(exp_n))

    print(FLAGS.log_dir)

    if FLAGS.action == "train":
        if tf.io.gfile.exists(FLAGS.log_dir):
            tf.gfile.DeleteRecursively(FLAGS.log_dir)
        tf.io.gfile.makedirs(FLAGS.log_dir)

        # Initialize data
        G = graphs.Community(FLAGS.num_vertices, seed=FLAGS.seed)
        G.compute_laplacian("normalized")
        # Save graph
        np.save(os.path.join(FLAGS.log_dir, "graph_weights"), G.W.todense())

        # Prepare pooling
        num_levels = _number_of_pooling_levels(FLAGS.vertex_poolings)
        error = True
        while error:
            try:
                adjacencies, perm = coarsen(
                    G.A, levels=num_levels)  # Coarsens in powers of 2
                error = False
            except IndexError:
                error = True
                continue

        np.save(os.path.join(FLAGS.log_dir, "ordering"), perm)
        L = [initialize_laplacian_tensor(A) for A in adjacencies]
        L = keep_pooling_laplacians(L, FLAGS.vertex_poolings)

    elif FLAGS.action == "eval":
        W = np.load(os.path.join(FLAGS.log_dir, "graph_weights.npy"))
        G = graphs.Graph(W)
        G.compute_laplacian("normalized")
        perm = np.load(os.path.join(FLAGS.log_dir, "ordering.npy"))

    if FLAGS.action == "train":
        #G_nx = nx.from_numpy_matrix(G.W.todense())
        if FLAGS.load_data:
            #num test can't exceed saved array size
            train_data, train_labels = load_from_npy('train', FLAGS.num_train)

        else:
            random.seed(FLAGS.seed)
            train_data, train_labels = epidemics_generator(
                g_nx=nx.from_numpy_matrix(G.W.todense()),
                batch_size=FLAGS.num_train,
                timesteps=FLAGS.num_frames,
                initial_nodes=random.sample(range(FLAGS.num_vertices),
                                            int(FLAGS.num_vertices / 10)))
            save_to_npy('train', train_data, train_labels)

        train_data = perm_data(train_data, perm)
        train_mb_source = MinibatchSource(train_data,
                                          train_labels,
                                          repeat=True)

    if FLAGS.load_data:
        #num test can't exceed saved array size
        test_data, test_labels = load_from_npy('test', FLAGS.num_test)

    else:
        test_data, test_labels = epidemics_generator(
            g_nx=nx.from_numpy_matrix(G.W.todense()),
            batch_size=FLAGS.num_test,
            timesteps=FLAGS.num_frames,
            initial_nodes=random.sample(range(FLAGS.num_vertices),
                                        int(FLAGS.num_vertices / 10)))
        save_to_npy('test', test_data, test_labels)

    test_data = perm_data(test_data, perm)
    test_mb_source = MinibatchSource(test_data, test_labels, repeat=False)

    if FLAGS.action == "train":
        params = vars(FLAGS)
        with open(os.path.join(FLAGS.log_dir, "params.json"), "w") as f:
            json.dump(params, f)

        # Run training and evaluation loop
        print("Training model...")
        run_training(L, train_mb_source, test_mb_source)
    elif FLAGS.action == "eval":
        print("Evaluating model...")
        run_eval(test_mb_source)
    else:
        raise ValueError("No valid action selected")
예제 #4
0
        T = T_2

    return T


if __name__ == '__main__':
    from pygsp import graphs
    from synthetic_data.data_generation import generate_spectral_samples_hard

    N = 100
    T = 128
    f_h = 50
    f_l = 15
    lambda_h = 80
    lambda_l = 15
    G = graphs.Community(N, seed=15)
    x, _ = generate_spectral_samples_hard(1,
                                          T,
                                          G,
                                          f_h,
                                          lambda_h,
                                          f_l,
                                          lambda_l,
                                          sigma=2,
                                          sigma_n=0.001)
    # x = hp_hp_sample(T, G, f_h, lambda_h)
    plot_joint_spectrum(x[0, :, :, 0], G, "hp_hp")
    plot_temporal_matrix(x[0, :, :, 0], "hp_hp_t")
    # x = lp_lp_sample(T, G, f_l, lambda_l)
    plot_joint_spectrum(x[1, :, :, 0], G, "lp_lp")
    plot_temporal_matrix(x[1, :, :, 0], "lp_lp_t")
예제 #5
0
 def test_community(self):
     graphs.Community()
     graphs.Community(comm_density=0.2)
     graphs.Community(k_neigh=5)
     graphs.Community(N=100, Nc=3, comm_sizes=[20, 50, 30])
예제 #6
0
def main(_):
    # Initialize tempdir
    if FLAGS.action == "eval" and FLAGS.read_dir is not None:
        FLAGS.log_dir = FLAGS.read_dir
    else:
        FLAGS.log_dir = os.path.join(FLAGS.log_dir, FLAGS.model_type)
        exp_n = _last_exp(
            FLAGS.log_dir) + 1 if FLAGS.action == "train" else _last_exp(
                FLAGS.log_dir)
        FLAGS.log_dir = os.path.join(FLAGS.log_dir, "exp_" + str(exp_n))

    print(FLAGS.log_dir)

    if FLAGS.action == "train":
        if tf.gfile.Exists(FLAGS.log_dir):
            tf.gfile.DeleteRecursively(FLAGS.log_dir)
        tf.gfile.MakeDirs(FLAGS.log_dir)

        # Initialize data

        G = graphs.Community(FLAGS.num_vertices,
                             seed=FLAGS.seed)  #, world_density = 0.1)

        G.compute_laplacian("normalized")

        # Save graph
        np.save(os.path.join(FLAGS.log_dir, "graph_weights"), G.W.todense())

        # Prepare pooling
        num_levels = _number_of_pooling_levels(FLAGS.vertex_poolings)
        error = True
        while error:
            try:
                adjacencies, perm = coarsen(
                    G.A, levels=num_levels)  # Coarsens in powers of 2
                error = False
            except IndexError:
                error = True
                continue

        np.save(os.path.join(FLAGS.log_dir, "ordering"), perm)
        L = [initialize_laplacian_tensor(A) for A in adjacencies]
        L = keep_pooling_laplacians(L, FLAGS.vertex_poolings)

        #0 graph
        if FLAGS.zeros == True:
            for i in range(len(L)):
                L[i] *= 0
            print(FLAGS.zeros)
            print("0 Graph \n\n")

    elif FLAGS.action == "eval":
        W = np.load(os.path.join(FLAGS.log_dir, "graph_weights.npy"))
        G = graphs.Graph(W)
        G.compute_laplacian("normalized")
        perm = np.load(os.path.join(FLAGS.log_dir, "ordering.npy"))

    if FLAGS.action == "train":
        train_data, train_labels = generate_spectral_samples_hard(
            N=FLAGS.num_train // FLAGS.num_classes,
            G=G,
            T=FLAGS.num_frames,
            f_h=FLAGS.f_h,
            f_l=FLAGS.f_h,
            lambda_h=FLAGS.lambda_h,
            lambda_l=FLAGS.lambda_l,
            sigma=FLAGS.sigma,
            sigma_n=FLAGS.sigma_n)
        train_data = perm_data(train_data, perm)
        train_mb_source = MinibatchSource(train_data,
                                          train_labels,
                                          repeat=True)

    test_data, test_labels = generate_spectral_samples_hard(
        N=FLAGS.num_test // FLAGS.num_classes,
        G=G,
        T=FLAGS.num_frames,
        f_h=FLAGS.f_h,
        f_l=FLAGS.f_h,
        lambda_h=FLAGS.lambda_h,
        lambda_l=FLAGS.lambda_l,
        sigma=FLAGS.sigma,
        sigma_n=FLAGS.sigma_n)

    test_data = perm_data(test_data, perm)
    test_mb_source = MinibatchSource(test_data, test_labels, repeat=False)

    if FLAGS.action == "train":
        params = vars(FLAGS)
        with open(os.path.join(FLAGS.log_dir, "params.json"), "w") as f:
            json.dump(params, f)

        # Run training and evaluation loop
        print("Training model...")
        run_training(L, train_mb_source, test_mb_source)
    elif FLAGS.action == "eval":
        print("Evaluating model...")
        run_eval(test_mb_source)
    else:
        raise ValueError("No valid action selected")
예제 #7
0
파일: test_graphs.py 프로젝트: dsacc/pygsp
 def test_Community():
     G = graphs.Community()
예제 #8
0
 def test_Community():
     G = graphs.Community()
     needed_attributes_testing(G)