Ejemplo n.º 1
0
else:
    ctx = mx.gpu(args.num_gpu)  # pylint: disable=invalid-name

# Set seed for random number generators in numpy and mxnet
np.random.seed(args.seed)
mx.random.seed(args.seed)

adj, features, labels, idx_train, idx_val, idx_test = load_data(ctx=ctx)  # pylint: disable=invalid-name

model = GCN(
    nfeat=features.shape[1],  # pylint: disable=invalid-name
    nhid=args.hidden,
    nclass=int(labels.max().asnumpy().item()) + 1,
    dropout=args.dropout)

model.collect_params().initialize(ctx=ctx)
trainer = gluon.Trainer(
    model.collect_params(),  # pylint: disable=invalid-name
    'adam',
    {
        'learning_rate': args.lr,
    })

# Note: Original implementation uses
# Negative Log Likelihood and not
# SoftmaxCrossEntropyLoss
loss = gluon.loss.SoftmaxCrossEntropyLoss()  # pylint: disable=invalid-name

accs = []  # pylint: disable=invalid-name

for epoch in range(args.epochs):
Ejemplo n.º 2
0
        for i in np.argsort(probability).tolist()[0][::-1][:t]:
            if i in train_dict:
                continue
            train_dict[i] = k

    print('new dataset size: %s' % (len(train_dict)))

    new_train_index = sorted(train_dict.keys())
    new_train_label = [train_dict[i] for i in new_train_index]

    net = GCN()
    net.initialize(ctx=ctx)
    net.hybridize()

    loss_function = gluon.loss.SoftmaxCrossEntropyLoss()
    trainer = gluon.Trainer(net.collect_params(), 'adam',
                            {'learning_rate': 1e-3})

    for epoch in range(100):
        with autograd.record():
            output = net(features, A_)
            l = loss_function(output[new_train_index],
                              nd.array(new_train_label, ctx=ctx))
        l.backward()
        trainer.step(1)
        print('training loss: %.2f' % (l.mean().asnumpy()[0]))

        output = net(features, A_)
        l = loss_function(output[idx[val_mask]],
                          nd.argmax(y_val[idx[val_mask]], axis=1))
        print('validation loss %.2f' % (l.mean().asnumpy()[0]))