Example #1
0
def read_mat_stuff_perm(N, per_subj, directory, perm):
    counter = 0
    my_data_perm = [None] * per_subj * N * 2
    for fname in glob.glob(directory):
        my_data = genfromtxt(fname, delimiter=',')
        my_data_perm[counter] = coarsening.perm_data(my_data, perm)
        counter = counter + 1
    return my_data_perm
Example #2
0
 def _coarsen(self, dense_node_inputs, adj):
     assert ('metis_' in FLAGS.coarsening)
     self.num_level = get_coarsen_level()
     assert (self.num_level >= 1)
     graphs, perm = coarsen(sp.csr_matrix(adj), levels=self.num_level,
                            self_connections=False)
     permuted_padded_dense_node_inputs = perm_data(
         dense_node_inputs.T, perm).T
     self.sparse_permuted_padded_dense_node_inputs = self._preprocess_inputs(
         sp.csr_matrix(permuted_padded_dense_node_inputs))
     self.coarsened_laplacians = []
     for g in graphs:
         self.coarsened_laplacians.append([self._preprocess_adj(g.todense())])
     assert (len(self.coarsened_laplacians) == self.num_laplacians * self.num_level + 1)
Example #3
0
    def do_train(self, samples, perm, L, L_max, do_training=True):
        if do_training:
            self.train()
        else:
            self.eval()
        total_loss = 0
        total_acc = 0
        embedding_size = 0
        optimizer = self.update(selfoptions['learning_rate'])
        dropout_value = self.options['dropout_value']
        l2_regularization = self.options['l2_regularization']
        for i, data in enumerate(samples, 1):
            optimizer.zero_grad()

            train_data, train_label = data
            train_data = perm_data(train_data, perm)
            embedding_size = len(train_data)
            # moving tensors to adequate device
            train_data = train_data.to(args.device)
            train_labels = train_labels._requires_grad(False).to(args.device)

            output = self.forward(train_data, dropout_value, L,
                                  lmax).to(args.device)
            loss = self.loss(output, train_labels, l2_regularization)
            loss_train = loss.data.item()
            if do_training is True:
                loss.backward()
                optimizer.step()

            total_loss += loss
            acc = self.accuracy(output.cpu().detach(),
                                train_labels.data.cpu().detach())
            total_acc += acc

        if do_training is True:
            global_step += len(samples)
            options['learning_rate'] = options['learning_rate'] * \
                pow(options['decay'], float(global_step // embedding_size))
            optimizer = net.update_learning_rate(optimizer,
                                                 options['learning_rate'])

        total_loss = total_loss / \
            len(samples.dataset) * self.options['batch_size']
        total_acc = total_acc / len(samples.dataset) * \
            self.options['batch_size']
        total_loss = torch.tensor(total_loss)
        return total_loss, total_acc
Example #4
0
def first_coarsen(node_features, n):
    """
    Returns a graph object based on the coordinates of the tract
    Input:
    node_features - An array (n x 3) with the 3D coordinates of each point
    n - The number of points sampled on each tract
    """
    row = np.array(list(range(n - 1)) +
                   list(range(1, n)))  # row[i] connects to col[i]
    col = np.array(list(range(1, n)) + list(range(n - 1)))
    data = np.array([float(1) for i in range(2 * (n - 1))])  # Unit weights
    A = scipy.sparse.csr_matrix((data, (row, col)),
                                shape=(n, n)).astype(np.float32)
    coarsening_levels = 2
    L, perm = coarsen(A, coarsening_levels)
    vert = perm_data(node_features.transpose(), perm)

    return vert.transpose().astype('float16'), L, perm
A = grid_graph(grid_side, number_edges,
               metric)  # create graph of Euclidean grid

# Compute coarsened graphs
coarsening_levels = 4

L, perm = coarsen(A, coarsening_levels)

# Compute max eigenvalue of graph Laplacians
lmax = []
for i in range(coarsening_levels):
    lmax.append(lmax_L(L[i]))
print('lmax: ' + str([lmax[i] for i in range(coarsening_levels)]))

# Reindex nodes to satisfy a binary tree structure
train_data = perm_data(train_data, perm)
val_data = perm_data(val_data, perm)
test_data = perm_data(test_data, perm)

print(train_data.shape)
print(val_data.shape)
print(test_data.shape)

print('Execution time: {:.2f}s'.format(time.time() - t_start))
del perm


class my_sparse_mm(torch.autograd.Function):
    """
    Implementation of a new autograd function for sparse variables,
    called "my_sparse_mm", by subclassing torch.autograd.Function
Example #6
0
# loading of MNIST dataset

from tensorflow.examples.tutorials.mnist import input_data

mnist = input_data.read_data_sets(dir_data, one_hot=False)

train_data = mnist.train.images.astype(np.float32)
val_data = mnist.validation.images.astype(np.float32)  # the first 5K samples of the training dataset
# are used for validation
test_data = mnist.test.images.astype(np.float32)
train_labels = mnist.train.labels
val_labels = mnist.validation.labels
test_labels = mnist.test.labels

t_start = time.time()
train_data = coarsening.perm_data(train_data, perm)
val_data = coarsening.perm_data(val_data, perm)
test_data = coarsening.perm_data(test_data, perm)
print('Execution time: {:.2f}s'.format(time.time() - t_start))
del perm


# %%
class ChebNet:
    """
    The neural network model.
    """

    # Helper functions used for constructing the model
    def _weight_variable(self, shape, regularization=True):
        """Initializer for the weights"""
Example #7
0
def prepare_graphs():
    p = os.path.join(os.getcwd(), '../survivalnet/data/Brain_Integ.mat')
    D = sio.loadmat(p)
    T = np.asarray([t[0] for t in D['Survival']])
    O = 1 - np.asarray([c[0] for c in D['Censored']])
    X = D['Integ_X']  #[:,1855:]
    X = (X - np.mean(X, axis=0)) / np.std(X, axis=0)
    fold_size = int(10 * len(X) / 100)
    X_train, T_train, O_train = X[2 * fold_size:], T[2 *
                                                     fold_size:], O[2 *
                                                                    fold_size:]
    X_test, T_test, O_test = X[:fold_size], T[:fold_size], O[:fold_size]
    X_val, T_val, O_val = X[fold_size:2 *
                            fold_size], T[fold_size:2 *
                                          fold_size], O[fold_size:2 *
                                                        fold_size]
    print_log('train and test shapes:' + str(X_train.shape) +
              str(X_test.shape))
    if not LOAD_A:
        start = time.time()
        dist, idx = graph.distance_scipy_spatial(X_train.T,
                                                 k=4,
                                                 metric='euclidean')
        print_log('graph constructed:' + str(dist.shape) + str(idx.shape) +
                  ' in ' + str(time.time() - start))
        A = graph.adjacency(dist, idx).astype(np.float32)
        d = X.shape[1]
        assert A.shape == (d, d)
        np.savez('A_sml',
                 data=A.data,
                 indices=A.indices,
                 indptr=A.indptr,
                 shape=A.shape)
        print('d = |V| = {}, k|V| < |E| = {}'.format(d, A.nnz))
    #plt.spy(A, markersize=2, color='black');
    #plt.savefig('tmp.png')
    else:
        start = time.time()
        loader = np.load('A.npz')
        A = csr_matrix((loader['data'], loader['indices'], loader['indptr']),
                       shape=loader['shape'])
        print_log('graph loaded:' + ' in ' + str(time.time() - start))
        print('adjacency matrix type and shape: ', A.__class__, A.shape)
    start = time.time()
    graphs, perm = coarsening.coarsen(A, levels=CL, self_connections=False)
    print_log('graph coarsened:' + ' in ' + str(time.time() - start))
    X_train = coarsening.perm_data(X_train, perm)
    X_val = coarsening.perm_data(X_val, perm)
    X_test = coarsening.perm_data(X_test, perm)
    print_log('train and test shapes:' + str(X_train.shape) +
              str(X_test.shape))
    L = [graph.laplacian(A, normalized=True) for A in graphs]
    #graph.plot_spectrum(L)

    n_train = len(X_train)
    params = dict()
    params['dir_name'] = 'demo'
    params['num_epochs'] = 2000
    params['batch_size'] = int(len(X_train) / 1.0)
    params['eval_frequency'] = 10

    # Building blocks.
    params['filter'] = 'chebyshev5'
    params['brelu'] = 'b1relu'
    params['pool'] = 'apool1'

    # Architecture.
    params['F'] = [8, 8, 8]  # Number of graph convolutional filters.
    params['K'] = [9, 9, 9]  # Polynomial orders.
    params['p'] = [2, 2, 2]  # Pooling sizes.
    params['M'] = [128, 1]  # Output dimensionality of fully connected layers.

    # Optimization.
    params['regularization'] = 0
    params['dropout'] = 1
    params['learning_rate'] = 1e-4
    params['decay_rate'] = 0.999
    params['momentum'] = 0
    params['decay_steps'] = n_train / params['batch_size']
    model = models.cgcnn(L, **params)
    accuracy, loss, t_step = model.cox_fit(X_train, T_train, O_train, X_val,
                                           T_val, O_val)
Example #8
0
def coarsen_again(node_features, perm):
    """No need to compute the same Laplacians for each graph"""
    vert = perm_data(node_features.transpose(), perm)

    return vert.transpose().astype('float16')