Exemple #1
0
 def test_identity_sum_features(self):
     graph_cnn = gcn.GCN(2, 2, 2, 2, 2, [2], 1, act=F.relu)
     eye_weights(graph_cnn.msg.f)
     eye_weights(graph_cnn.msg.g)
     eye_weights(graph_cnn.h)
     eye_weights(graph_cnn.k1)
     eye_weights(graph_cnn.k2)
     eye_weights(graph_cnn.l)
     eye_weights(graph_cnn.q)
     node_feats = torch.FloatTensor([[1, 2], [-3, -4], [5, 6]])
     adj_mat = torch.FloatTensor([[0, 1, 1], [1, 0, 0], [1, 0, 0]])
     edges = [(0, 1), (0, 2), (1, 0), (2, 0)]
     edge_feats = torch.FloatTensor([[-1, 2], [3, -4], [-5, 6], [7, -8]])
     node_output, edge_output = graph_cnn(node_feats, adj_mat, edges,
                                          edge_feats)
     self.assertTrue(
         bool(
             torch.all(
                 torch.eq(node_output,
                          torch.FloatTensor([[6, 8], [1, 2], [6, 8]])))))
     self.assertTrue(
         bool(
             torch.all(
                 torch.eq(
                     edge_output,
                     torch.FloatTensor([[7, 12], [15, 16], [7, 16],
                                        [19, 16]])))))
Exemple #2
0
 def test_zero_output(self):
     graph_cnn = gcn.GCN(3, 0, 0, 0, 16, [16], 5)
     node_feats = torch.FloatTensor(4, 3)
     node_feats.data.uniform_(-1, 1)
     adj_mat = torch.ones([4, 4])
     edges = [(0, 2), (0, 1), (1, 2), (2, 0)]
     edge_feats = None
     node_output, edge_output = graph_cnn(node_feats, adj_mat, edges,
                                          edge_feats)
     self.assertEqual(node_output.shape[1], 0)
     self.assertEqual(edge_output.shape[1], 0)
Exemple #3
0
    def test_edge_ouotput(self):
        seed = 42
        n_hids = [16]
        n_iters = 500

        torch.manual_seed(seed)
        np.random.seed(seed)

        graph_cnn = gcn.GCN(2, 2, 0, 1, 2, n_hids, 1)
        graph_cnn.train()
        optimizer = torch.optim.Adam(graph_cnn.parameters(), lr=1e-2)

        losses = []
        for _ in range(n_iters):

            node_feats_np = np.random.rand(3, 2)
            adj_mat_np = np.array([[0, 1, 1], [1, 0, 0], [1, 0, 0]])
            edges = [(0, 1), (0, 2), (1, 0), (2, 0)]

            node_feats = torch.FloatTensor(node_feats_np)
            adj_mat = torch.FloatTensor(adj_mat_np)

            node_output, edge_output = graph_cnn(node_feats, adj_mat, edges,
                                                 None)

            # left and right node contributes differently
            true_edge_output_np = np.array([
                2 * node_feats_np[0, :] + node_feats_np[1, :],
                2 * node_feats_np[0, :] + node_feats_np[2, :],
                2 * node_feats_np[1, :] + node_feats_np[0, :],
                2 * node_feats_np[2, :] + node_feats_np[0, :]
            ])

            true_edge_output_np = np.sum(true_edge_output_np,
                                         axis=1,
                                         keepdims=True)
            true_edge_output = torch.FloatTensor(true_edge_output_np)

            loss = torch.sum((true_edge_output - edge_output)**2)
            losses.append(loss)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        # < 5% error
        self.assertTrue(bool(losses[0] > 0.5))
        self.assertTrue(bool((sum(losses[-10:]) / 10) < 0.05))
Exemple #4
0
def main(args):
    gcn.utils.set_seed(args.seed)

    log.debug("Loading data from '%s'." % args.data)
    data = gcn.utils.load_pkl(args.data)
    G, vocab = data["graph"], data["vocab"]
    train_samples, dev_samples, test_samples = data["train"], data[
        "dev"], data["test"]
    log.info("Loaded data.")

    A_norm = G.get_adj_norm(args.device)
    X = torch.eye(G.get_node_size())
    log.info("Built A norm.")

    log.debug("Building model...")
    model = gcn.GCN(X.size(-1), vocab["tag"].size(), A_norm,
                    args.hidden_dim).to(args.device)
    opt = gcn.Optim(args.learning_rate, args.max_grad_value, args.weight_decay)
    opt.set_parameters(model.parameters(), args.optimizer)

    model_file = "save/model.pt"

    for name, value in model.named_parameters():
        log.debug("name: {}\t grad: {}".format(name, value.requires_grad))
    nParams = sum([p.nelement() for p in model.parameters()])
    log.debug("number of parameters: %d" % nParams)

    coach = gcn.Coach(model, opt, X, train_samples, dev_samples, test_samples,
                      args)
    if not args.from_begin:
        ckpt = torch.load(model_file)
        coach.load_ckpt(ckpt)
        log.info("Loaded from checkpoint.")

    # Train.
    log.info("Start training...")
    ret = coach.train()

    # Save.
    checkpoint = {
        "best_acc": ret[0],
        "best_epoch": ret[1],
        "best_state": ret[2],
    }
    torch.save(checkpoint, model_file)