def plot_points(colors):
    model.eval()
    z = model.encode(data.x, data.train_pos_edge_index)
    z = TSNE(n_components=2).fit_transform(z.cpu().numpy())
    y = data.y.cpu().numpy()

    plt.figure(figsize=(8, 8))
    for i in range(dataset.num_classes):
        plt.scatter(z[y == i, 0], z[y == i, 1], s=20, color=colors[i])
    plt.axis('off')
    plt.show()
def plot_points(colors):
    model.eval()
    z = model(torch.arange(data.num_nodes, device=device))
    z = TSNE(n_components=2).fit_transform(z.cpu().numpy())
    y = data.y.cpu().numpy()

    plt.figure(figsize=(8, 8))
    for i in range(dataset.num_classes):
        plt.scatter(z[y == i, 0], z[y == i, 1], s=20, color=colors[i])
    plt.axis('off')
    plt.show()
Example #3
0
def plot_points(colors):
    model.eval()
    start = time.time()
    batch = torch.arange(data.num_nodes, device=device)
    z = model(batch)
    print('Node2Vec execution time: {0}'.format(time.time() - start))

    # Now use onnx
    sess_options = onnxruntime.SessionOptions()
    # This will save the optimized graph to the directory specified in optimized_model_filepath
    sess_options.optimized_model_filepath = os.path.join(
        "./models/graphml", "node2vec_optimized_model_{}.onnx".format(device))
    ort_session = onnxruntime.InferenceSession("models/graphml/node2vec.onnx",
                                               sess_options)

    def to_numpy(tensor):
        return tensor.detach().cpu().numpy(
        ) if tensor.requires_grad else tensor.cpu().numpy()

    # compute ONNX Runtime output prediction

    # get the outputs metadata as a list of :class:`onnxruntime.NodeArg`
    output_name = ort_session.get_outputs()[0].name

    # get the inputs metadata as a list of :class:`onnxruntime.NodeArg`
    input_name = ort_session.get_inputs()[0].name
    ort_inputs = {input_name: to_numpy(batch)}

    ort_session.set_providers(['CPUExecutionProvider'])

    # get the outputs metadata as a list of :class:`onnxruntime.NodeArg`
    output_name = ort_session.get_outputs()[0].name
    start = time.time()
    ort_outs = ort_session.run([output_name], ort_inputs)
    print('Node2Vec (ONNX) execution: {0}'.format(time.time() - start))

    z = TSNE(n_components=2).fit_transform(z.cpu().numpy())
    y = data.y.cpu().numpy()

    plt.figure(figsize=(8, 8))
    for i in range(dataset.num_classes):
        plt.scatter(z[y == i, 0], z[y == i, 1], s=20, color=colors[i])
    plt.axis('off')
    plt.show()
Example #4
0
                      lr=params.lr,
                      momentum=params.momentum)

criterion = nn.CrossEntropyLoss()

for epoch in range(params.epochs):
    t0 = time.time()
    print('Epoch: {}'.format(epoch))
    train.train(common_net, src_net, tgt_net, optimizer, criterion, epoch,
                src_train_dataloader, tgt_train_dataloader, train_hist)
    t1 = time.time() - t0
    print('Time: {:.4f}s'.format(t1))
    test.test(common_net, src_net, src_test_dataloader, tgt_test_dataloader,
              epoch, test_hist)

src_features = common_net(
    Variable(src_imgs.expand(src_imgs.shape[0], 3, 28, 28).cuda()))
tgt_features = common_net(
    Variable(tgt_imgs.expand(tgt_imgs.shape[0], 3, 28, 28).cuda()))
src_features = src_features.cpu().data.numpy()
tgt_features = tgt_features.cpu().data.numpy()
src_features = TSNE(n_components=2).fit_transform(src_features)
tgt_features = TSNE(n_components=2).fit_transform(tgt_features)

utils.visulize_loss(train_hist)
utils.visualize_accuracy(test_hist)
plt.scatter(src_features[:, 0], src_features[:, 1], color='r')
plt.scatter(tgt_features[:, 0], tgt_features[:, 1], color='b')
plt.title('Adapted')
pylab.show()
Example #5
0
            subset.edge_index_oriIndexxed).long().to(device)
        loss = model.loss(edge_index, batch)
        loss.backward()
        optimizer.step()
        total_loss += loss.item()
    return total_loss / len(hops_sample_loader)


for epoch in range(1, 500):
    loss = train()
    print('Epoch: {:02d}, Loss: {:.4f}'.format(epoch, loss))

model.eval()
with torch.no_grad():
    z = model(torch.arange(num_nodes, device=device))
    z = TSNE(n_components=2).fit_transform(z.cpu().numpy())
    y = data.node_class

# def plot_points(colors):
#     model.eval()
#     with torch.no_grad():
#         z = model(torch.arange(num_nodes, device=device))
#         z = TSNE(n_components=3).fit_transform(z.cpu().numpy())
#         y = data.node_class

colors = [
    '#ffc0cb', '#bada55', '#008080', '#420420', '#7fe5f0', '#065535', '#ffd700'
]
# ax = Axes3D(plt.figure())
plt.figure(figsize=(8, 8))
for i in range(7):