def syn_task1(args, writer=None):
    # data
    G, labels, name = gengraph.gen_syn1(
        feature_generator=featgen.ConstFeatureGen(
            np.ones(args.input_dim, dtype=float)))
    num_classes = max(labels) + 1

    if args.method == "att":
        print("Method: att")
        model = models.GcnEncoderNode(
            args.input_dim,
            args.hidden_dim,
            args.output_dim,
            num_classes,
            args.num_gc_layers,
            bn=args.bn,
            args=args,
        )
    else:
        print("Method:", args.method)
        model = models.GcnEncoderNode(
            args.input_dim,
            args.hidden_dim,
            args.output_dim,
            num_classes,
            args.num_gc_layers,
            bn=args.bn,
            args=args,
        )
    if args.gpu:
        model = model.cuda()

    train_node_classifier(G, labels, model, args, writer=writer)
Пример #2
0
def main(args):
    # load dataset
    if args.dataset == 'syn1':
        g, labels, name = gen_syn1()
    elif args.dataset == 'syn2':
        g, labels, name = gen_syn2()
    elif args.dataset == 'syn3':
        g, labels, name = gen_syn3()
    elif args.dataset == 'syn4':
        g, labels, name = gen_syn4()
    elif args.dataset == 'syn5':
        g, labels, name = gen_syn5()
    else:
        raise NotImplementedError
    
    #Transform to dgl graph. 
    graph = dgl.from_networkx(g) 
    labels = th.tensor(labels, dtype=th.long)
    graph.ndata['label'] = labels
    graph.ndata['feat'] = th.randn(graph.number_of_nodes(), args.feat_dim)
    hid_dim = th.tensor(args.hidden_dim, dtype=th.long)
    label_dict = {'hid_dim':hid_dim}

    # save graph for later use
    save_graphs(filename='./'+args.dataset+'.bin', g_list=[graph], labels=label_dict)

    num_classes = max(graph.ndata['label']).item() + 1
    n_feats = graph.ndata['feat']

    #create model
    dummy_model = dummy_gnn_model(args.feat_dim, args.hidden_dim, num_classes)
    loss_fn = nn.CrossEntropyLoss()
    optim = th.optim.Adam(dummy_model.parameters(), lr=args.lr, weight_decay=args.wd)

    # train and output
    for epoch in range(args.epochs):

        dummy_model.train()

        logits = dummy_model(graph, n_feats)
        loss = loss_fn(logits, labels)
        acc = th.sum(logits.argmax(dim=1) == labels).item() / len(labels)
        
        optim.zero_grad()
        loss.backward()
        optim.step()

        print('In Epoch: {:03d}; Acc: {:.4f}; Loss: {:.6f}'.format(epoch, acc, loss.item()))

    # save model
    model_stat_dict = dummy_model.state_dict()
    model_path = os.path.join('./', 'dummy_model_{}.pth'.format(args.dataset))
    th.save(model_stat_dict, model_path)
Пример #3
0
def syn_task1(args, writer=None):
    # data
    print('Generating graph.')
    G, labels, name = gengraph.gen_syn1(
        feature_generator=featgen.ConstFeatureGen(
            np.ones(args.input_dim, dtype=float)))
    # print ('G.node[0]:', G.node[0]['feat'].dtype)
    # print ('Original labels:', labels)
    pyg_G = from_networkx(G)
    num_classes = max(labels) + 1
    labels = torch.LongTensor(labels)
    print('Done generating graph.')

    # if args.method == 'att':
    # print('Method: att')
    # model = models.GcnEncoderNode(args.input_dim, args.hidden_dim, args.output_dim, num_classes,
    # args.num_gc_layers, bn=args.bn, args=args)

    # else:
    # print('Method:', args.method)
    # model = models.GcnEncoderNode(args.input_dim, args.hidden_dim, args.output_dim, num_classes,
    # args.num_gc_layers, bn=args.bn, args=args)

    model = GCNNet(args.input_dim,
                   args.hidden_dim,
                   num_classes,
                   args.num_gc_layers,
                   args=args)

    if args.gpu:
        model = model.cuda()

    train_ratio = args.train_ratio
    num_train = int(train_ratio * G.number_of_nodes())
    num_test = G.number_of_nodes() - num_train
    shuffle_indices = list(range(G.number_of_nodes()))
    shuffle_indices = np.random.permutation(shuffle_indices)

    train_mask = num_train * [True] + num_test * [False]
    train_mask = torch.BoolTensor([train_mask[i] for i in shuffle_indices])
    test_mask = num_train * [False] + num_test * [True]
    test_mask = torch.BoolTensor([test_mask[i] for i in shuffle_indices])

    loader = torch_geometric.data.DataLoader([pyg_G], batch_size=1)
    opt = torch.optim.Adam(model.parameters(), lr=args.lr)
    for epoch in range(args.num_epochs):
        total_loss = 0
        model.train()
        for batch in loader:
            # print ('batch:', batch.feat)
            opt.zero_grad()
            pred = model(batch)

            pred = pred[train_mask]
            # print ('pred:', pred)
            label = labels[train_mask]
            # print ('label:', label)
            loss = model.loss(pred, label)
            print('loss:', loss)
            loss.backward()
            opt.step()
            total_loss += loss.item() * 1
        total_loss /= num_train
        writer.add_scalar("loss", total_loss, epoch)

        if epoch % 10 == 0:
            test_acc = test(loader, model, args, labels, test_mask)
            print("Epoch {}. Loss: {:.4f}. Test accuracy: {:.4f}".format(
                epoch, total_loss, test_acc))
            writer.add_scalar("test accuracy", test_acc, epoch)
Пример #4
0
def syn_task1(args, writer=None):
    print("\nStart with these parsed program arguments :\n", args)

    # np.ones(input_dim, dtype=float) = [1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]
    constant_feature = featureGen.ConstFeatureGen(
        np.ones(args.input_dim, dtype=float))
    print("Constant feature generator : ", constant_feature.val)

    #feat_dict = {i:{'feat': np.array(constant_feature.val, dtype=np.float32)} for i in G.nodes()}
    #print ('Values of feat_dict[0]["feat"]:', feat_dict[0]['feat'])

    #nx.set_node_attributes(G, feat_dict)
    #print('Node attributes of node \'0\', G.nodes[0]["feat"]:', G.nodes[0]['feat'])

    # Create the BA graph with the "house" motifs
    G, labels, name = gengraph.gen_syn1(feature_generator=constant_feature)

    # No .of classes from [0-3] for BA graph with house motifs
    num_classes = max(labels) + 1
    # Update number of classes in argument for training (Out of bounds error)
    args.num_classes = num_classes

    # GcnEncoderNode model
    print("------------ GCNEncoderNode Model ------------")
    print("Input dimensions :", args.input_dim)
    print("Hidden dimensions :", args.hidden_dim)
    print("Output dimensions :", args.output_dim)
    print("Number of classes in args :", args.num_classes)
    print("Number of GCN layers :", args.num_gc_layers)
    print("Method : ", args.method)

    model = models.GcnEncoderNode(args.input_dim,
                                  args.hidden_dim,
                                  args.output_dim,
                                  args.num_classes,
                                  args.num_gc_layers,
                                  bn=args.bn,
                                  args=args)

    print("GcnEncoderNode model :\n", model)

    #     if args.method == "att":
    #         print("Method: att")
    #         model = models.GcnEncoderNode(
    #             args.input_dim,
    #             args.hidden_dim,
    #             args.output_dim,
    #             num_classes,
    #             args.num_gc_layers,
    #             bn=args.bn,
    #             args=args,
    #         )
    #     else:
    #         print("Method:", args.method)
    #         model = models.GcnEncoderNode(
    #             args.input_dim,
    #             args.hidden_dim,
    #             args.output_dim,
    #             num_classes,
    #             args.num_gc_layers,
    #             bn=args.bn,
    #             args=args,
    #         )
    if args.gpu:
        model = model.cuda()

    train_node_classifier(G, labels, model, args, writer=writer)

    # Return model for manipulations in ipynb
    return model