示例#1
0
def train_mcd_single(gnn, optimizer, logger, gen, n_classes, it):
    start = time.time()
    W, labels = gen.sample_otf_single(is_training=True,
                                      cuda=torch.cuda.is_available())
    labels = labels.type(dtype_l)

    print('Num of edges: ', np.sum(W))

    if (args.generative_model == 'SBM_multiclass') and (args.n_classes == 2):
        labels = (labels + 1) / 2

    WW, x, WW_lg, y, P = get_lg_inputs(W, args.J)

    # print ('WW', WW.shape)
    # print ('WW_lg', WW_lg.shape)

    if (torch.cuda.is_available()):
        WW.cuda()
        x.cuda()
        WW_lg.cuda()
        y.cuda()
        P.cuda()
    # print ('input', input)
    pred = gnn(WW.type(dtype), x.type(dtype), WW_lg.type(dtype), y.type(dtype),
               P.type(dtype))

    loss = compute_loss_multiclass(pred, labels, n_classes)
    gnn.zero_grad()
    loss.backward()
    nn.utils.clip_grad_norm(gnn.parameters(), args.clip_grad_norm)
    optimizer.step()

    acc = compute_accuracy_multiclass(pred, labels, n_classes)

    elapsed = time.time() - start

    if (torch.cuda.is_available()):
        loss_value = float(loss.data.cpu().numpy())
    else:
        loss_value = float(loss.data.numpy())

    info = [
        'epoch', 'avg loss', 'avg acc', 'edge_density', 'noise', 'model',
        'elapsed'
    ]
    out = [it, loss_value, acc, args.edge_density, args.noise, 'LGNN', elapsed]
    print(template1.format(*info))
    print(template2.format(*out))

    del WW
    del WW_lg
    del x
    del y
    del P

    return loss_value, acc
示例#2
0
def test_mcd_single(gnn, logger, gen, n_classes, iter):

    start = time.time()
    W, labels = gen.sample_otf_single(is_training=False, cuda=torch.cuda.is_available())
    labels = labels.type(dtype_l)
    if (args.generative_model == 'SBM_multiclass') and (args.n_classes == 2):
        labels = (labels + 1)/2
    WW, x, WW_lg, y, P = get_lg_inputs(W, args.J)

    # print ('WW', WW.shape)
    # print ('WW_lg', WW_lg.shape)

    if (torch.cuda.is_available()):
        WW.cuda()
        x.cuda()
        WW_lg.cuda()
        y.cuda()
        P.cuda()
    # print ('input', input)
    pred_single = gnn(WW.type(dtype), x.type(dtype), WW_lg.type(dtype), y.type(dtype), P.type(dtype))
    labels_single = labels

    loss_test = compute_loss_multiclass(pred_single, labels_single, n_classes)
    acc_test = compute_accuracy_multiclass(pred_single, labels_single, n_classes)

    elapsed = time.time() - start
    # if (it % args.print_freq == 0):
    #     info = ['iter', 'avg loss', 'avg acc', 'edge_density',
    #             'noise', 'model', 'elapsed']
    #     out = [it, loss_test, acc_test, args.edge_density,
    #            args.noise, 'lGNN', elapsed]
    #     print(template1.format(*info))
    #     print(template2.format(*out))

    elapsed = time.time() - start

    if(torch.cuda.is_available()):
        loss_value = float(loss_test.data.cpu().numpy())
    else:
        loss_value = float(loss_test.data.numpy())

    info = ['epoch', 'avg loss', 'avg acc', 'edge_density',
            'noise', 'model', 'elapsed']
    out = [iter, loss_value, acc_test, args.edge_density,
           args.noise, 'lGNN', elapsed]
    print(template1.format(*info))
    print(template2.format(*out))

    del WW
    del WW_lg
    del x
    del y
    del P

    return loss_value, acc_test
示例#3
0
def train_single(gnn, optimizer, gen, n_classes, it):
    start = time.time()
    W, labels = gen.sample_otf_single(is_training=True,
                                      cuda=torch.cuda.is_available())
    labels = labels.type(dtype_l)

    if (args.generative_model == 'SBM_multiclass') and (args.n_classes == 2):
        labels = (labels + 1) / 2

    WW, x = get_gnn_inputs(W, args.J)

    if (torch.cuda.is_available()):
        WW.cuda()
        x.cuda()

    pred = gnn(WW.type(dtype), x.type(dtype))

    loss = compute_loss_multiclass(pred, labels, n_classes)
    gnn.zero_grad()
    loss.backward()
    nn.utils.clip_grad_norm_(gnn.parameters(), args.clip_grad_norm)
    optimizer.step()

    acc = compute_accuracy_multiclass(pred, labels, n_classes)

    elapsed = time.time() - start

    if (torch.cuda.is_available()):
        loss_value = float(loss.data.cpu().numpy())
    else:
        loss_value = float(loss.data.numpy())

    info = [
        'iter', 'avg loss', 'avg acc', 'edge_density', 'noise', 'model',
        'elapsed'
    ]
    out = [it, loss_value, acc, args.edge_density, args.noise, 'GNN', elapsed]
    print(template1.format(*info))
    print(template2.format(*out))

    del WW
    del x

    return loss_value, acc
示例#4
0
def test_single(gnn, gen, n_classes, it):

    start = time.time()
    W, labels = gen.sample_otf_single(is_training=False,
                                      cuda=torch.cuda.is_available())
    labels = labels.type(dtype_l)
    if (args.generative_model == 'SBM_multiclass') and (args.n_classes == 2):
        labels = (labels + 1) / 2
    WW, x = get_gnn_inputs(W, args.J)

    print('WW', WW.shape)

    if (torch.cuda.is_available()):
        WW.cuda()
        x.cuda()

    pred_single = gnn(WW.type(dtype), x.type(dtype))
    labels_single = labels

    loss_test = compute_loss_multiclass(pred_single, labels_single, n_classes)
    acc_test = compute_accuracy_multiclass(pred_single, labels_single,
                                           n_classes)

    elapsed = time.time() - start

    if (torch.cuda.is_available()):
        loss_value = float(loss_test.data.cpu().numpy())
    else:
        loss_value = float(loss_test.data.numpy())

    info = [
        'iter', 'avg loss', 'avg acc', 'edge_density', 'noise', 'model',
        'elapsed'
    ]
    out = [
        it, loss_value, acc_test, args.edge_density, args.noise, 'GNN', elapsed
    ]
    print(template1.format(*info))
    print(template2.format(*out))

    del WW
    del x

    return loss_value, acc_test