コード例 #1
0
def validation(model, features, support, val_label, val_mask):
    model.eval()
    output = model(features, support)
    loss_val = masked_loss(output, val_label, val_mask)
    acc_val = masked_acc(output, val_label, val_mask)
    print(f'[validation] loss = {loss_val} acc = {acc_val}')
    model.train()
コード例 #2
0
def train(model, features, support, train_label, train_mask, val_label,
          val_mask, test_label, test_mask):
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
    model.train()
    optimizer.zero_grad()

    for epoch in range(epochs):
        output = model(features, support)
        loss_train = masked_loss(output, train_label, train_mask)
        loss_train += model.l2_loss() * weight_decay
        acc_train = masked_acc(output, train_label, train_mask)
        loss_train.backward()
        optimizer.step()
        print(f'epoch: {epoch} loss = {loss_train} acc: {acc_train}')
        if (epoch + 1) % 10 == 0:
            validation(model, features, support, val_label, val_mask)

    test_model(model, features, support, test_label, test_mask)
コード例 #3
0
support = torch.sparse.FloatTensor(i.t(), v, supports[2]).float().to(device)

print('x :', feature)
print('sp:', support)
num_features_nonzero = feature._nnz()
feat_dim = feature.shape[1]

net = GCN(feat_dim, num_classes, num_features_nonzero)
net.to(device)
optimizer = optim.Adam(net.parameters(), lr=args.learning_rate)

net.train()
for epoch in range(args.epochs):
    out = net((feature, support))
    out = out[0]
    loss = masked_loss(out, train_label, train_mask)
    loss += args.weight_decay * net.l2_loss()

    acc = masked_acc(out, train_label, train_mask)

    optimizer.zero_grad()
    loss.backward()
    optimizer.step()

    if epoch % 10 == 0:

        print(epoch, loss.item(), acc.item())

net.eval()

out = net((feature, support))
コード例 #4
0
def test_model(model, features, support, test_label, test_mask):
    model.eval()
    output = model(features, support)
    loss_val = masked_loss(output, test_label, test_mask)
    acc_val = masked_acc(output, test_label, test_mask)
    print(f'[test] loss = {loss_val} acc = {acc_val}')
コード例 #5
0
            # noise
            w = max(0.0, 1.0 - progress / noise_ratio)
            noise_strength = 0.05 * noise_sigma * w**2
            noise = noise_strength * torch.randn_like(latent)

            # forward G
            out = G(latent + noise if perturb_latent else latent)

            if "dusty" in cfg.model.gen.arch:
                inv_gen = utils.tanh_to_sigmoid(out["depth_orig"])
            else:
                inv_gen = utils.tanh_to_sigmoid(out["depth"])

            # loss
            loss = utils.masked_loss(inv_ref, inv_gen, mask_ref, args.distance)

            # per-sample gradients
            optim.zero_grad()
            loss.backward(gradient=torch.ones_like(loss))
            optim.step()
            scheduler.step()

        # post-processing
        out = utils.postprocess(out, lidar, tol=args.tol)
        points_gen = utils.flatten(out["points"])
        points_ref = utils.flatten(xyz_ref)
        depth_gen = lidar.revert_depth(inv_gen, norm=False)
        depth_ref = lidar.revert_depth(inv_ref, norm=False)

        # evaluation
コード例 #6
0
ファイル: train_lstm_gcn.py プロジェクト: ahmadguze/gcn
for epoch in range(args.epochs):
    random.shuffle(train_list)
    t1 = time.time()
    for file_name in train_list:

        adj, features, train_labels, weight_mask = load_single_graph4lstm_gcn(
            file_name)

        feature, support = convert_sparse_train_input1(adj, features)
        train_labels, weight_mask = convert_loss_input(train_labels,
                                                       weight_mask)

        out = net((feature, support))
        out = out[0]
        loss = masked_loss(out, train_labels, weight_mask)

        #loss = weighted_loss(out, train_labels, weight_mask)

        # print("cross entropy loss: {:.5f} ".format(loss.item()))
        loss += args.weight_decay * net.l2_loss()

        # acc = masked_acc(out, train_labels)
        acc = cal_accuracy(out, train_labels)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        t2 = time.time()
        if (epoch + 1) % 10 == 0:
            print("Epoch:" ,'%04d'% (epoch+1), "time: {:.5f}, loss: {:.5f}, acc: {:.5f}".\