Exemple #1
0
    def _load_current_data(self, X, cp, Tim):
        if self.perm is not None:
            X_current = perm_data(X[self.current_samples, :, :], self.perm)
        else:
            X_current = X[self.current_samples, :, :]

        y = _combine_params(cp, Tim)
        y_current = y[self.current_samples]

        #mean_X = np.mean(X_current, axis=(1, 2))
        #X_current = X_current - mean_X[:, np.newaxis, np.newaxis]
        return X_current, y_current
Exemple #2
0
def main(_):
    global EPOCH_SIZE
    # Initialize tempdir
    if FLAGS.action == "eval" and FLAGS.read_dir is not None:
        FLAGS.log_dir = FLAGS.read_dir
    else:
        FLAGS.log_dir = os.path.join(FLAGS.log_dir, FLAGS.model_type)
        exp_n = _last_exp(
            FLAGS.log_dir) + 1 if FLAGS.action == "train" else _last_exp(
                FLAGS.log_dir)
        FLAGS.log_dir = os.path.join(FLAGS.log_dir, "exp_" + str(exp_n))

    print(FLAGS.log_dir)

    # Initialize data
    X, y = load_data()
    G = create_spatial_eeg_graph(MONTAGE, q=FLAGS.q, k=FLAGS.k)
    # G = create_data_eeg_graph(MONTAGE, X)
    G.compute_laplacian("normalized")

    if FLAGS.action == "train":
        if tf.gfile.Exists(FLAGS.log_dir):
            tf.gfile.DeleteRecursively(FLAGS.log_dir)
        tf.gfile.MakeDirs(FLAGS.log_dir)

        # Prepare pooling
        num_levels = _number_of_pooling_levels(FLAGS.vertex_poolings)
        error = True
        while error:
            try:
                adjacencies, perm = coarsen(
                    G.A, levels=num_levels)  # Coarsens in powers of 2
                error = False
            except IndexError:
                error = True
                continue

        np.save(os.path.join(FLAGS.log_dir, "ordering"), perm)
        L = [initialize_laplacian_tensor(A) for A in adjacencies]
        L = keep_pooling_laplacians(L, FLAGS.vertex_poolings)

    elif FLAGS.action == "eval":
        perm = np.load(os.path.join(FLAGS.log_dir, "ordering.npy"))

    X = perm_data(X, perm)
    train_samples, test_samples = train_validation_test_split(
        len(y), FLAGS.margin, FLAGS.test_size)

    if FLAGS.action == "train":
        EPOCH_SIZE = train_samples.shape[0]
        print(EPOCH_SIZE)
        train_mb_source = EyeMinibatchSource(X,
                                             y,
                                             train_samples,
                                             margin=FLAGS.margin,
                                             repeat=True)
        EPOCH_SIZE = train_mb_source.dataset_length

    test_mb_source = EyeMinibatchSource(X,
                                        y,
                                        test_samples,
                                        margin=FLAGS.margin,
                                        repeat=False)

    if FLAGS.action == "train":
        params = vars(FLAGS)
        with open(os.path.join(FLAGS.log_dir, "params.json"), "w") as f:
            json.dump(params, f)

        # Run training and evaluation loop
        print("Training model...")
        run_training(L, train_mb_source, test_mb_source)
    elif FLAGS.action == "eval":
        print("Evaluating model...")
        run_eval(test_mb_source)
    else:
        raise ValueError("No valid action selected")
def main(_):
    # Initialize tempdir
    if FLAGS.action == "eval" and FLAGS.read_dir is not None:
        FLAGS.log_dir = FLAGS.read_dir
    else:
        FLAGS.log_dir = os.path.join(FLAGS.log_dir, FLAGS.model_type)
        exp_n = _last_exp(
            FLAGS.log_dir) + 1 if FLAGS.action == "train" else _last_exp(
                FLAGS.log_dir)
        FLAGS.log_dir = os.path.join(FLAGS.log_dir, "exp_" + str(exp_n))

    print(FLAGS.log_dir)

    if FLAGS.action == "train":
        if tf.io.gfile.exists(FLAGS.log_dir):
            tf.gfile.DeleteRecursively(FLAGS.log_dir)
        tf.io.gfile.makedirs(FLAGS.log_dir)

        # Initialize data
        G = graphs.Community(FLAGS.num_vertices, seed=FLAGS.seed)
        G.compute_laplacian("normalized")
        # Save graph
        np.save(os.path.join(FLAGS.log_dir, "graph_weights"), G.W.todense())

        # Prepare pooling
        num_levels = _number_of_pooling_levels(FLAGS.vertex_poolings)
        error = True
        while error:
            try:
                adjacencies, perm = coarsen(
                    G.A, levels=num_levels)  # Coarsens in powers of 2
                error = False
            except IndexError:
                error = True
                continue

        np.save(os.path.join(FLAGS.log_dir, "ordering"), perm)
        L = [initialize_laplacian_tensor(A) for A in adjacencies]
        L = keep_pooling_laplacians(L, FLAGS.vertex_poolings)

    elif FLAGS.action == "eval":
        W = np.load(os.path.join(FLAGS.log_dir, "graph_weights.npy"))
        G = graphs.Graph(W)
        G.compute_laplacian("normalized")
        perm = np.load(os.path.join(FLAGS.log_dir, "ordering.npy"))

    if FLAGS.action == "train":
        #G_nx = nx.from_numpy_matrix(G.W.todense())
        if FLAGS.load_data:
            #num test can't exceed saved array size
            train_data, train_labels = load_from_npy('train', FLAGS.num_train)

        else:
            random.seed(FLAGS.seed)
            train_data, train_labels = epidemics_generator(
                g_nx=nx.from_numpy_matrix(G.W.todense()),
                batch_size=FLAGS.num_train,
                timesteps=FLAGS.num_frames,
                initial_nodes=random.sample(range(FLAGS.num_vertices),
                                            int(FLAGS.num_vertices / 10)))
            save_to_npy('train', train_data, train_labels)

        train_data = perm_data(train_data, perm)
        train_mb_source = MinibatchSource(train_data,
                                          train_labels,
                                          repeat=True)

    if FLAGS.load_data:
        #num test can't exceed saved array size
        test_data, test_labels = load_from_npy('test', FLAGS.num_test)

    else:
        test_data, test_labels = epidemics_generator(
            g_nx=nx.from_numpy_matrix(G.W.todense()),
            batch_size=FLAGS.num_test,
            timesteps=FLAGS.num_frames,
            initial_nodes=random.sample(range(FLAGS.num_vertices),
                                        int(FLAGS.num_vertices / 10)))
        save_to_npy('test', test_data, test_labels)

    test_data = perm_data(test_data, perm)
    test_mb_source = MinibatchSource(test_data, test_labels, repeat=False)

    if FLAGS.action == "train":
        params = vars(FLAGS)
        with open(os.path.join(FLAGS.log_dir, "params.json"), "w") as f:
            json.dump(params, f)

        # Run training and evaluation loop
        print("Training model...")
        run_training(L, train_mb_source, test_mb_source)
    elif FLAGS.action == "eval":
        print("Evaluating model...")
        run_eval(test_mb_source)
    else:
        raise ValueError("No valid action selected")
Exemple #4
0
def main(_):
    # Initialize tempdir
    if FLAGS.action == "eval" and FLAGS.read_dir is not None:
        FLAGS.log_dir = FLAGS.read_dir
    else:
        FLAGS.log_dir = os.path.join(FLAGS.log_dir, FLAGS.model_type)
        exp_n = _last_exp(
            FLAGS.log_dir) + 1 if FLAGS.action == "train" else _last_exp(
                FLAGS.log_dir)
        FLAGS.log_dir = os.path.join(FLAGS.log_dir, "exp_" + str(exp_n))

    print(FLAGS.log_dir)

    if FLAGS.action == "train":
        if tf.gfile.Exists(FLAGS.log_dir):
            tf.gfile.DeleteRecursively(FLAGS.log_dir)
        tf.gfile.MakeDirs(FLAGS.log_dir)

        # Initialize data

        G = graphs.Community(FLAGS.num_vertices,
                             seed=FLAGS.seed)  #, world_density = 0.1)

        G.compute_laplacian("normalized")

        # Save graph
        np.save(os.path.join(FLAGS.log_dir, "graph_weights"), G.W.todense())

        # Prepare pooling
        num_levels = _number_of_pooling_levels(FLAGS.vertex_poolings)
        error = True
        while error:
            try:
                adjacencies, perm = coarsen(
                    G.A, levels=num_levels)  # Coarsens in powers of 2
                error = False
            except IndexError:
                error = True
                continue

        np.save(os.path.join(FLAGS.log_dir, "ordering"), perm)
        L = [initialize_laplacian_tensor(A) for A in adjacencies]
        L = keep_pooling_laplacians(L, FLAGS.vertex_poolings)

        #0 graph
        if FLAGS.zeros == True:
            for i in range(len(L)):
                L[i] *= 0
            print(FLAGS.zeros)
            print("0 Graph \n\n")

    elif FLAGS.action == "eval":
        W = np.load(os.path.join(FLAGS.log_dir, "graph_weights.npy"))
        G = graphs.Graph(W)
        G.compute_laplacian("normalized")
        perm = np.load(os.path.join(FLAGS.log_dir, "ordering.npy"))

    if FLAGS.action == "train":
        train_data, train_labels = generate_spectral_samples_hard(
            N=FLAGS.num_train // FLAGS.num_classes,
            G=G,
            T=FLAGS.num_frames,
            f_h=FLAGS.f_h,
            f_l=FLAGS.f_h,
            lambda_h=FLAGS.lambda_h,
            lambda_l=FLAGS.lambda_l,
            sigma=FLAGS.sigma,
            sigma_n=FLAGS.sigma_n)
        train_data = perm_data(train_data, perm)
        train_mb_source = MinibatchSource(train_data,
                                          train_labels,
                                          repeat=True)

    test_data, test_labels = generate_spectral_samples_hard(
        N=FLAGS.num_test // FLAGS.num_classes,
        G=G,
        T=FLAGS.num_frames,
        f_h=FLAGS.f_h,
        f_l=FLAGS.f_h,
        lambda_h=FLAGS.lambda_h,
        lambda_l=FLAGS.lambda_l,
        sigma=FLAGS.sigma,
        sigma_n=FLAGS.sigma_n)

    test_data = perm_data(test_data, perm)
    test_mb_source = MinibatchSource(test_data, test_labels, repeat=False)

    if FLAGS.action == "train":
        params = vars(FLAGS)
        with open(os.path.join(FLAGS.log_dir, "params.json"), "w") as f:
            json.dump(params, f)

        # Run training and evaluation loop
        print("Training model...")
        run_training(L, train_mb_source, test_mb_source)
    elif FLAGS.action == "eval":
        print("Evaluating model...")
        run_eval(test_mb_source)
    else:
        raise ValueError("No valid action selected")