Exemple #1
0
def main(_):
    # Initialize tempdir
    if tf.gfile.Exists(FLAGS.log_dir):
        tf.gfile.DeleteRecursively(FLAGS.log_dir)
    tf.gfile.MakeDirs(FLAGS.log_dir)

    # Initialize data
    G = graphs.ErdosRenyi(FLAGS.num_vertices, 0.1, seed=42)
    G.compute_laplacian("normalized")
    L = initialize_laplacian_tensor(G.W)
    W = (G.W).astype(np.float32)

    train_data, train_labels = generate_wave_samples(FLAGS.num_train,
                                                     W,
                                                     T=FLAGS.num_frames,
                                                     sigma=FLAGS.sigma)
    train_mb_source = MinibatchSource(train_data, train_labels)

    test_data, test_labels = generate_wave_samples(FLAGS.num_test,
                                                   W,
                                                   T=FLAGS.num_frames,
                                                   sigma=FLAGS.sigma)

    # Run training and evaluation loop
    run_training(train_mb_source, L, test_data, test_labels)
Exemple #2
0
def main(_):
    global EPOCH_SIZE
    # Initialize tempdir
    if FLAGS.action == "eval" and FLAGS.read_dir is not None:
        FLAGS.log_dir = FLAGS.read_dir
    else:
        FLAGS.log_dir = os.path.join(FLAGS.log_dir, FLAGS.model_type)
        exp_n = _last_exp(
            FLAGS.log_dir) + 1 if FLAGS.action == "train" else _last_exp(
                FLAGS.log_dir)
        FLAGS.log_dir = os.path.join(FLAGS.log_dir, "exp_" + str(exp_n))

    print(FLAGS.log_dir)

    # Initialize data
    G = create_graph(DATASET_FILE)
    G.compute_laplacian("normalized")

    if FLAGS.action == "train":
        if tf.gfile.Exists(FLAGS.log_dir):
            tf.gfile.DeleteRecursively(FLAGS.log_dir)
        tf.gfile.MakeDirs(FLAGS.log_dir)

        # Prepare pooling
        num_levels = _number_of_pooling_levels(FLAGS.vertex_poolings)
        error = True
        while error:
            try:
                adjacencies, perm = coarsen(
                    G.A, levels=num_levels)  # Coarsens in powers of 2
                error = False
            except IndexError:
                error = True
                continue

        np.save(os.path.join(FLAGS.log_dir, "ordering"), perm)
        L = [initialize_laplacian_tensor(A) for A in adjacencies]
        L = keep_pooling_laplacians(L, FLAGS.vertex_poolings)

    elif FLAGS.action == "eval":
        perm = np.load(os.path.join(FLAGS.log_dir, "ordering.npy"))

    train_mb_source, test_mb_source = create_train_test_mb_sources(
        DATASET_FILE, FLAGS.test_size, perm=perm, n_samples=FLAGS.n_samples)
    EPOCH_SIZE = train_mb_source.dataset_length

    if FLAGS.action == "train":
        params = vars(FLAGS)
        with open(os.path.join(FLAGS.log_dir, "params.json"), "w") as f:
            json.dump(params, f)

        # Run training and evaluation loop
        print("Training model...")
        run_training(L, train_mb_source, test_mb_source)
    elif FLAGS.action == "eval":
        print("Evaluating model...")
        run_eval(test_mb_source)
    else:
        raise ValueError("No valid action selected")
def main(_):
    # Initialize tempdir
    if tf.gfile.Exists(FLAGS.log_dir):
        tf.gfile.DeleteRecursively(FLAGS.log_dir)
    tf.gfile.MakeDirs(FLAGS.log_dir)

    x = gen_data(interval=5, future=7, graphDataPath=FLAGS.graphPath)

    sparsed = pd.read_csv('NY/data/' + FLAGS.graphPath, index_col=0)
    W = sparsed.values - np.identity(len(sparsed))
    G = graphs.Graph(W)

    G.compute_laplacian("normalized")
    L = initialize_laplacian_tensor(G.W)
    W = (G.W).astype(np.float32)

    print('{} nodes, {} edges'.format(G.N, G.Ne))

    scaler = StandardScaler()
    scaler.fit(x)

    seqs = np.array(list(split_seq(x.index.values, 19)))
    np.random.shuffle(seqs)
    print('{} total samples'.format(len(seqs)))

    split = int(len(seqs) * FLAGS.train_ratio)

    train_ds = DataLoader(x, seqs[:split], transform=scaler)
    train_dl = data.DataLoader(train_ds,
                               batch_size=10,
                               shuffle=True,
                               num_workers=4)

    test_ds = DataLoader(x, seqs[split:], transform=scaler)
    test_dl = data.DataLoader(test_ds,
                              batch_size=10,
                              shuffle=True,
                              num_workers=4)

    # Run training and evaluation loop

    run_training(train_dl, L, test_dl)
Exemple #4
0
def main(_):
    global EPOCH_SIZE
    # Initialize tempdir
    if FLAGS.action == "eval" and FLAGS.read_dir is not None:
        FLAGS.log_dir = FLAGS.read_dir
    else:
        FLAGS.log_dir = os.path.join(FLAGS.log_dir, FLAGS.model_type)
        exp_n = _last_exp(
            FLAGS.log_dir) + 1 if FLAGS.action == "train" else _last_exp(
                FLAGS.log_dir)
        FLAGS.log_dir = os.path.join(FLAGS.log_dir, "exp_" + str(exp_n))

    print(FLAGS.log_dir)

    # Initialize data
    X, y = load_data()
    G = create_spatial_eeg_graph(MONTAGE, q=FLAGS.q, k=FLAGS.k)
    # G = create_data_eeg_graph(MONTAGE, X)
    G.compute_laplacian("normalized")

    if FLAGS.action == "train":
        if tf.gfile.Exists(FLAGS.log_dir):
            tf.gfile.DeleteRecursively(FLAGS.log_dir)
        tf.gfile.MakeDirs(FLAGS.log_dir)

        # Prepare pooling
        num_levels = _number_of_pooling_levels(FLAGS.vertex_poolings)
        error = True
        while error:
            try:
                adjacencies, perm = coarsen(
                    G.A, levels=num_levels)  # Coarsens in powers of 2
                error = False
            except IndexError:
                error = True
                continue

        np.save(os.path.join(FLAGS.log_dir, "ordering"), perm)
        L = [initialize_laplacian_tensor(A) for A in adjacencies]
        L = keep_pooling_laplacians(L, FLAGS.vertex_poolings)

    elif FLAGS.action == "eval":
        perm = np.load(os.path.join(FLAGS.log_dir, "ordering.npy"))

    X = perm_data(X, perm)
    train_samples, test_samples = train_validation_test_split(
        len(y), FLAGS.margin, FLAGS.test_size)

    if FLAGS.action == "train":
        EPOCH_SIZE = train_samples.shape[0]
        print(EPOCH_SIZE)
        train_mb_source = EyeMinibatchSource(X,
                                             y,
                                             train_samples,
                                             margin=FLAGS.margin,
                                             repeat=True)
        EPOCH_SIZE = train_mb_source.dataset_length

    test_mb_source = EyeMinibatchSource(X,
                                        y,
                                        test_samples,
                                        margin=FLAGS.margin,
                                        repeat=False)

    if FLAGS.action == "train":
        params = vars(FLAGS)
        with open(os.path.join(FLAGS.log_dir, "params.json"), "w") as f:
            json.dump(params, f)

        # Run training and evaluation loop
        print("Training model...")
        run_training(L, train_mb_source, test_mb_source)
    elif FLAGS.action == "eval":
        print("Evaluating model...")
        run_eval(test_mb_source)
    else:
        raise ValueError("No valid action selected")
    :param L: List of coarsened laplacians
    :param poolings: Pooling values
    :return: New list with only useful laplacians
    """
    j = 0
    Lnew = list()
    Lnew.append(L[0])
    for p in poolings:
        j += int(np.log2(p)) if p > 1 else 0
        Lnew.append(L[j])


    return Lnew


if __name__ == '__main__':
    from pygsp import graphs
    from synthetic_data.data_generation import generate_spectral_samples
    from graph_utils.laplacian import initialize_laplacian_tensor

    N = 100
    G = graphs.Community(N)
    X, labels = generate_spectral_samples(5, 128, G, f_h=20, f_l=50, lambda_h=80, lambda_l=10)
    graphs, perm = coarsen(G.A, levels=3)
    X = perm_data(X, perm)
    num_levels = np.log2(np.prod([2, 4, 1])).astype(int)
    adjacencies, perm = coarsen(G.A, levels=num_levels)  # Coarsens in powers of 2
    L = [initialize_laplacian_tensor(A) for A in adjacencies]
    print(len(L))
    L = keep_pooling_laplacians(L, [2, 4])
    print([L.get_shape() for L in L])
def main(_):
    # Initialize tempdir
    if FLAGS.action == "eval" and FLAGS.read_dir is not None:
        FLAGS.log_dir = FLAGS.read_dir
    else:
        FLAGS.log_dir = os.path.join(FLAGS.log_dir, FLAGS.model_type)
        exp_n = _last_exp(
            FLAGS.log_dir) + 1 if FLAGS.action == "train" else _last_exp(
                FLAGS.log_dir)
        FLAGS.log_dir = os.path.join(FLAGS.log_dir, "exp_" + str(exp_n))

    print(FLAGS.log_dir)

    if FLAGS.action == "train":
        if tf.io.gfile.exists(FLAGS.log_dir):
            tf.gfile.DeleteRecursively(FLAGS.log_dir)
        tf.io.gfile.makedirs(FLAGS.log_dir)

        # Initialize data
        G = graphs.Community(FLAGS.num_vertices, seed=FLAGS.seed)
        G.compute_laplacian("normalized")
        # Save graph
        np.save(os.path.join(FLAGS.log_dir, "graph_weights"), G.W.todense())

        # Prepare pooling
        num_levels = _number_of_pooling_levels(FLAGS.vertex_poolings)
        error = True
        while error:
            try:
                adjacencies, perm = coarsen(
                    G.A, levels=num_levels)  # Coarsens in powers of 2
                error = False
            except IndexError:
                error = True
                continue

        np.save(os.path.join(FLAGS.log_dir, "ordering"), perm)
        L = [initialize_laplacian_tensor(A) for A in adjacencies]
        L = keep_pooling_laplacians(L, FLAGS.vertex_poolings)

    elif FLAGS.action == "eval":
        W = np.load(os.path.join(FLAGS.log_dir, "graph_weights.npy"))
        G = graphs.Graph(W)
        G.compute_laplacian("normalized")
        perm = np.load(os.path.join(FLAGS.log_dir, "ordering.npy"))

    if FLAGS.action == "train":
        #G_nx = nx.from_numpy_matrix(G.W.todense())
        if FLAGS.load_data:
            #num test can't exceed saved array size
            train_data, train_labels = load_from_npy('train', FLAGS.num_train)

        else:
            random.seed(FLAGS.seed)
            train_data, train_labels = epidemics_generator(
                g_nx=nx.from_numpy_matrix(G.W.todense()),
                batch_size=FLAGS.num_train,
                timesteps=FLAGS.num_frames,
                initial_nodes=random.sample(range(FLAGS.num_vertices),
                                            int(FLAGS.num_vertices / 10)))
            save_to_npy('train', train_data, train_labels)

        train_data = perm_data(train_data, perm)
        train_mb_source = MinibatchSource(train_data,
                                          train_labels,
                                          repeat=True)

    if FLAGS.load_data:
        #num test can't exceed saved array size
        test_data, test_labels = load_from_npy('test', FLAGS.num_test)

    else:
        test_data, test_labels = epidemics_generator(
            g_nx=nx.from_numpy_matrix(G.W.todense()),
            batch_size=FLAGS.num_test,
            timesteps=FLAGS.num_frames,
            initial_nodes=random.sample(range(FLAGS.num_vertices),
                                        int(FLAGS.num_vertices / 10)))
        save_to_npy('test', test_data, test_labels)

    test_data = perm_data(test_data, perm)
    test_mb_source = MinibatchSource(test_data, test_labels, repeat=False)

    if FLAGS.action == "train":
        params = vars(FLAGS)
        with open(os.path.join(FLAGS.log_dir, "params.json"), "w") as f:
            json.dump(params, f)

        # Run training and evaluation loop
        print("Training model...")
        run_training(L, train_mb_source, test_mb_source)
    elif FLAGS.action == "eval":
        print("Evaluating model...")
        run_eval(test_mb_source)
    else:
        raise ValueError("No valid action selected")
Exemple #7
0
def main(_):
    # Initialize tempdir
    if FLAGS.action == "eval" and FLAGS.read_dir is not None:
        FLAGS.log_dir = FLAGS.read_dir
    else:
        FLAGS.log_dir = os.path.join(FLAGS.log_dir, FLAGS.model_type)
        exp_n = _last_exp(
            FLAGS.log_dir) + 1 if FLAGS.action == "train" else _last_exp(
                FLAGS.log_dir)
        FLAGS.log_dir = os.path.join(FLAGS.log_dir, "exp_" + str(exp_n))

    print(FLAGS.log_dir)

    if FLAGS.action == "train":
        if tf.gfile.Exists(FLAGS.log_dir):
            tf.gfile.DeleteRecursively(FLAGS.log_dir)
        tf.gfile.MakeDirs(FLAGS.log_dir)

        # Initialize data

        G = graphs.Community(FLAGS.num_vertices,
                             seed=FLAGS.seed)  #, world_density = 0.1)

        G.compute_laplacian("normalized")

        # Save graph
        np.save(os.path.join(FLAGS.log_dir, "graph_weights"), G.W.todense())

        # Prepare pooling
        num_levels = _number_of_pooling_levels(FLAGS.vertex_poolings)
        error = True
        while error:
            try:
                adjacencies, perm = coarsen(
                    G.A, levels=num_levels)  # Coarsens in powers of 2
                error = False
            except IndexError:
                error = True
                continue

        np.save(os.path.join(FLAGS.log_dir, "ordering"), perm)
        L = [initialize_laplacian_tensor(A) for A in adjacencies]
        L = keep_pooling_laplacians(L, FLAGS.vertex_poolings)

        #0 graph
        if FLAGS.zeros == True:
            for i in range(len(L)):
                L[i] *= 0
            print(FLAGS.zeros)
            print("0 Graph \n\n")

    elif FLAGS.action == "eval":
        W = np.load(os.path.join(FLAGS.log_dir, "graph_weights.npy"))
        G = graphs.Graph(W)
        G.compute_laplacian("normalized")
        perm = np.load(os.path.join(FLAGS.log_dir, "ordering.npy"))

    if FLAGS.action == "train":
        train_data, train_labels = generate_spectral_samples_hard(
            N=FLAGS.num_train // FLAGS.num_classes,
            G=G,
            T=FLAGS.num_frames,
            f_h=FLAGS.f_h,
            f_l=FLAGS.f_h,
            lambda_h=FLAGS.lambda_h,
            lambda_l=FLAGS.lambda_l,
            sigma=FLAGS.sigma,
            sigma_n=FLAGS.sigma_n)
        train_data = perm_data(train_data, perm)
        train_mb_source = MinibatchSource(train_data,
                                          train_labels,
                                          repeat=True)

    test_data, test_labels = generate_spectral_samples_hard(
        N=FLAGS.num_test // FLAGS.num_classes,
        G=G,
        T=FLAGS.num_frames,
        f_h=FLAGS.f_h,
        f_l=FLAGS.f_h,
        lambda_h=FLAGS.lambda_h,
        lambda_l=FLAGS.lambda_l,
        sigma=FLAGS.sigma,
        sigma_n=FLAGS.sigma_n)

    test_data = perm_data(test_data, perm)
    test_mb_source = MinibatchSource(test_data, test_labels, repeat=False)

    if FLAGS.action == "train":
        params = vars(FLAGS)
        with open(os.path.join(FLAGS.log_dir, "params.json"), "w") as f:
            json.dump(params, f)

        # Run training and evaluation loop
        print("Training model...")
        run_training(L, train_mb_source, test_mb_source)
    elif FLAGS.action == "eval":
        print("Evaluating model...")
        run_eval(test_mb_source)
    else:
        raise ValueError("No valid action selected")
Exemple #8
0
def main(_):

    # Initialize tempdir
    if FLAGS.action == "eval" and FLAGS.read_dir is not None:
        FLAGS.log_dir = FLAGS.read_dir
    else:
        FLAGS.log_dir = os.path.join(FLAGS.log_dir, FLAGS.model_type)
        exp_n = _last_exp(
            FLAGS.log_dir) + 1 if FLAGS.action == "train" else _last_exp(
                FLAGS.log_dir)
        FLAGS.log_dir = os.path.join(FLAGS.log_dir, "exp_" + str(exp_n))

    print(FLAGS.log_dir)

    # Make graph and fill with data
    x = gen_data(interval=FLAGS.interval, graphDataPath=FLAGS.graphPath)
    x = (x - x.mean(axis=0)) / x.std(axis=0)

    print(x.shape)

    #Normalize them
    #scaler = StandardScaler()
    #scaler.fit(x)

    #Data loading
    seqs = np.array(
        list(
            split_seq(x.index.values,
                      FLAGS.num_frames + FLAGS.num_frames_test)))

    np.random.seed()
    np.random.shuffle(seqs)
    print('{} total samples'.format(len(seqs)))

    split = int(len(seqs) * FLAGS.train_ratio)

    #x.values = scaler.transform(x)

    train_ds = DataLoader(x,
                          seqs[:split],
                          train_num=FLAGS.num_frames,
                          test_num=FLAGS.num_frames_test,
                          transform=None)
    train_dl = data.DataLoader(train_ds,
                               batch_size=FLAGS.batch_size,
                               shuffle=True,
                               num_workers=4)

    test_ds = DataLoader(x,
                         seqs[split:],
                         train_num=FLAGS.num_frames,
                         test_num=FLAGS.num_frames_test,
                         transform=None)
    test_dl = data.DataLoader(test_ds,
                              batch_size=FLAGS.batch_size,
                              shuffle=False,
                              num_workers=4)

    if FLAGS.action == "train":
        if tf.io.gfile.exists(FLAGS.log_dir):
            tf.gfile.DeleteRecursively(FLAGS.log_dir)
        tf.io.gfile.makedirs(FLAGS.log_dir)

        sparsed = pd.read_csv('NY/data/' + FLAGS.graphPath, index_col=0)
        W = sparsed.values
        np.fill_diagonal(W, 0)
        #W = sparsed.values - np.identity(len(sparsed))
        G = graphs.Graph(W)

        #G = graphs.Community(len(sparsed), seed=0)

        G.compute_laplacian("normalized")
        L = initialize_laplacian_tensor(G.W)
        W = (G.W).astype(np.float32)

        #print(W)

        print('{} nodes, {} edges'.format(G.N, G.Ne))

        #Log to file
        params = vars(FLAGS)
        with open(os.path.join(FLAGS.log_dir, "params.json"), "w") as f:
            json.dump(params, f)

        # Run training and evaluation loop
        print("Training model...")
        run_training(train_dl, L, test_dl)

        #run eval

        results, truths = run_eval(test_dl)

        np.save('results', results)
        np.save('truths', truths)

    elif FLAGS.action == "eval":

        #x,y,prediction,phase,dropout,metric,metric_opt,loss = setup(L)

        results, truths = run_eval(test_dl)

        np.save('results', results)
        np.save('truths', truths)