Beispiel #1
0
def load_hcp_tcgn():

    time_series, labels, As = load_hcp_example()

    normalized_laplacian = True
    coarsening_levels = 4

    graphs, perm = coarsening.coarsen(As[0],
                                      levels=coarsening_levels,
                                      self_connections=False)
    L = [graph.laplacian(A, normalized=normalized_laplacian) for A in graphs]

    # idx_train = range(40*512)
    # idx_test = range(len(idx_train), time_series.shape[0])
    idx_train = range(5 * 512)
    idx_test = range(len(idx_train), 10 * 512)

    train_data = time_series[idx_train]
    train_labels = labels[idx_train]
    test_data = time_series[idx_test]
    test_labels = labels[idx_test]

    train_data = perm_data_time(train_data, perm)
    test_data = perm_data_time(test_data, perm)

    return L, train_data, test_data, train_labels, test_labels
Beispiel #2
0
def load_hcp_tcgn(device):

    time_series, labels, As = load_hcp_example()

    normalized_laplacian = True
    coarsening_levels = 4

    graphs, perm = coarsening.coarsen(As[0],
                                      levels=coarsening_levels,
                                      self_connections=False)
    coos = [
        torch.tensor([graph.tocoo().row, graph.tocoo().col],
                     dtype=torch.long).to(device) for graph in graphs
    ]

    idx_train = range(int(0.8 * time_series.shape[0]))
    print('Size of train set: {}'.format(len(idx_train)))

    idx_test = range(len(idx_train), time_series.shape[0])
    print('Size of test set: {}'.format(len(idx_test)))

    train_data = time_series[idx_train]
    train_labels = labels[idx_train]
    test_data = time_series[idx_test]
    test_labels = labels[idx_test]

    train_data = perm_data_time(train_data, perm)
    test_data = perm_data_time(test_data, perm)

    return graphs, coos, train_data, test_data, train_labels, test_labels
Beispiel #3
0
def load_hcp_tcgn(device):

    time_series, labels, As = load_hcp_example()

    normalized_laplacian = True
    coarsening_levels = 4

    graphs, perm = coarsening.coarsen(As[0],
                                      levels=coarsening_levels,
                                      self_connections=False)
    L = [
        torch.tensor(graph.rescale_L(graph.laplacian(
            A, normalized=normalized_laplacian).todense(),
                                     lmax=2),
                     dtype=torch.float).to(device) for A in graphs
    ]

    L_sparse = list()
    for A in graphs:
        g = graph.rescale_L(graph.laplacian(A,
                                            normalized=normalized_laplacian),
                            lmax=2)
        coo = coo_matrix(g)
        values = coo.data
        indices = np.vstack((coo.row, coo.col))
        i = torch.LongTensor(indices)
        v = torch.FloatTensor(values)
        shape = coo.shape
        a = torch.sparse.FloatTensor(i, v, torch.Size(shape))
        a = a.to(device)
        L_sparse.append(a)

    # idx_train = range(17*512)
    idx_train = range(int(0.8 * time_series.shape[0]))
    print('Size of train set: {}'.format(len(idx_train)))

    idx_test = range(len(idx_train), time_series.shape[0])
    print('Size of test set: {}'.format(len(idx_test)))
    # idx_train = range(5*512)
    # idx_test = range(len(idx_train), 10*512)

    train_data = time_series[idx_train]
    train_labels = labels[idx_train]
    test_data = time_series[idx_test]
    test_labels = labels[idx_test]

    train_data = perm_data_time(train_data, perm)
    test_data = perm_data_time(test_data, perm)

    sparse = False
    if sparse:
        laplacian = L_sparse
    else:
        laplacian = L
    return laplacian, train_data, test_data, train_labels, test_labels
Beispiel #4
0
def load_hcp_tcgn(device):

    time_series, labels, As = load_hcp_example(full=True)

    normalized_laplacian = True
    coarsening_levels = 4
    shuffled = False

    A = As[0]
    #A = arr.todense()

    if shuffled:
        B = A.toarray()
        B = list(B[np.triu_indices(A.shape[0])])
        random.shuffle(B)
        A = np.zeros((A.shape[0], A.shape[0]))
        indices = np.triu_indices(A.shape[0])
        A[indices] = B
        A = A + A.T - np.diag(A.diagonal())
        A = sp.csr_matrix(A)
        graphs, perm = coarsening.coarsen(A,
                                          levels=coarsening_levels,
                                          self_connections=False)
    #else:
    W = sp.random(As[0].shape[0],
                  As[0].shape[0],
                  density=0,
                  format='csr',
                  data_rvs=lambda s: np.random.uniform(0, 1, size=s))

    #graphs, perm = coarsening.coarsen(W, levels=coarsening_levels, self_connections=False)
    #graphs = [As[0]]
    graphs = [W]

    coos = [
        torch.tensor([graph.tocoo().row, graph.tocoo().col],
                     dtype=torch.long).to(device) for graph in graphs
    ]

    idx_train = range(int(0.8 * time_series.shape[0]))
    print('Size of train set: {}'.format(len(idx_train)))

    idx_test = range(len(idx_train), time_series.shape[0])
    print('Size of test set: {}'.format(len(idx_test)))

    train_data = time_series[idx_train]
    train_labels = labels[idx_train]
    test_data = time_series[idx_test]
    test_labels = labels[idx_test]

    #train_data = perm_data_time(train_data, perm)
    #test_data = perm_data_time(test_data, perm)

    return graphs, coos, train_data, test_data, train_labels, test_labels