コード例 #1
0
def run(config, train_dataset, val_dataset):
    device = 'cpu' if torch.cuda.is_available() else 'cpu'
    model = GCN(1, 64, 4, 0.01).to(device)
    print("Training on {}, batch_size is {}, lr is {}".format(
        device, config['batch_size'], config['lr']))
    criterion = torch.nn.MSELoss()
    optimizer = optim.Adam(model.parameters(), lr=config['lr'])

    train_loader = DataLoader(train_dataset,
                              batch_size=config['batch_size'],
                              shuffle=True)
    val_loader = DataLoader(val_dataset,
                            batch_size=config['batch_size'],
                            shuffle=True)

    trainer = Trainer(model, train_loader, val_loader, criterion, optimizer,
                      config, device)
    train_acc, train_loss, val_acc, val_loss = trainer.train()
    return train_acc, train_loss, val_acc, val_loss
コード例 #2
0
G = dgl.DGLGraph()
G.from_scipy_sparse_matrix(adj)
if dataset == "nba":
    features = feature_norm(features)

#%%
sens[sens > 0] = 1
if sens_attr:
    sens[sens > 0] = 1
# Model and optimizer
model = GCN(nfeat=features.shape[1],
            nhid=args.hidden,
            nclass=1,
            dropout=args.dropout)
optimizer = optim.Adam(model.parameters(),
                       lr=args.lr,
                       weight_decay=args.weight_decay)

if args.cuda:
    model.cuda()
    features = features.cuda()
    # adj = adj.cuda()
    sens = sens.cuda()
    # idx_sens_train = idx_sens_train.cuda()
    # idx_val = idx_val.cuda()
    idx_test = idx_test.cuda()
    sens = sens.cuda()
    idx_sens_train = idx_sens_train.cuda()

from sklearn.metrics import accuracy_score, roc_auc_score, f1_score
コード例 #3
0
ファイル: CSL_Skeleton_GCN.py プロジェクト: iliasprc/SLR-1
                            num_workers=4,
                            pin_memory=True)
    # Create model
    model = GCN(in_channels=in_channels,
                num_class=num_classes,
                graph_args={
                    'layout': 'ntu-rgb+d'
                },
                edge_importance_weighting=True).to(device)
    # Run the model parallelly
    if torch.cuda.device_count() > 1:
        logger.info("Using {} GPUs".format(torch.cuda.device_count()))
        model = nn.DataParallel(model)
    # Create loss criterion & optimizer
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=learning_rate)

    # Start training
    logger.info("Training Started".center(60, '#'))
    for epoch in range(epochs):
        # Train the model
        train_epoch(model, criterion, optimizer, train_loader, device, epoch,
                    logger, log_interval, writer)

        # Validate the model
        val_epoch(model, criterion, val_loader, device, epoch, logger, writer)

        # Save model
        torch.save(
            model.state_dict(),
            os.path.join(model_path,
コード例 #4
0
class FairGNN(nn.Module):
    def __init__(self, nfeat, args):
        super(FairGNN, self).__init__()

        nhid = args.num_hidden
        dropout = args.dropout
        self.estimator = GCN(nfeat, args.hidden, 1, dropout)
        self.GNN = get_model(nfeat, args)
        self.classifier = nn.Linear(nhid, 1)
        self.adv = nn.Linear(nhid, 1)

        G_params = list(self.GNN.parameters()) + list(
            self.classifier.parameters()) + list(self.estimator.parameters())
        self.optimizer_G = torch.optim.Adam(G_params,
                                            lr=args.lr,
                                            weight_decay=args.weight_decay)
        self.optimizer_A = torch.optim.Adam(self.adv.parameters(),
                                            lr=args.lr,
                                            weight_decay=args.weight_decay)

        self.args = args
        self.criterion = nn.BCEWithLogitsLoss()

        self.G_loss = 0
        self.A_loss = 0

    def forward(self, g, x):
        s = self.estimator(g, x)
        z = self.GNN(g, x)
        y = self.classifier(z)
        return y, s

    def optimize(self, g, x, labels, idx_train, sens, idx_sens_train):
        self.train()

        ### update E, G
        self.adv.requires_grad_(False)
        self.optimizer_G.zero_grad()

        s = self.estimator(g, x)
        h = self.GNN(g, x)
        y = self.classifier(h)

        s_g = self.adv(h)

        s_score = torch.sigmoid(s.detach())
        # s_score = (s_score > 0.5).float()
        s_score[idx_sens_train] = sens[idx_sens_train].unsqueeze(1).float()
        y_score = torch.sigmoid(y)
        self.cov = torch.abs(
            torch.mean((s_score - torch.mean(s_score)) *
                       (y_score - torch.mean(y_score))))

        self.cls_loss = self.criterion(y[idx_train],
                                       labels[idx_train].unsqueeze(1).float())
        self.adv_loss = self.criterion(s_g, s_score)

        self.G_loss = self.cls_loss + self.args.alpha * self.cov - self.args.beta * self.adv_loss
        self.G_loss.backward()
        self.optimizer_G.step()

        ## update Adv
        self.adv.requires_grad_(True)
        self.optimizer_A.zero_grad()
        s_g = self.adv(h.detach())
        self.A_loss = self.criterion(s_g, s_score)
        self.A_loss.backward()
        self.optimizer_A.step()
コード例 #5
0
                                    random_walk=random_walk,
                                    n_graph_subsampling=0,
                                    graph_node_subsampling=args.graph_node_subsampling,
                                    graph_subsampling_rate=args.graph_subsampling_rate)      
             
             # Build graph data pytorch loader
             loader = torch.utils.data.DataLoader(gdata, 
                                                  batch_size=args.batch_size,
                                                  shuffle=split.find('train') >= 0,
                                                  num_workers=args.threads,
                                                  drop_last=False)
             loaders.append(loader)
         
         # Total trainable param
         c = 0
         for p in filter(lambda p: p.requires_grad, model.parameters()):
             c += p.numel()
         print('N trainable parameters:', c)
         
         # Optimizer
         optimizer = optim.Adam(
                     filter(lambda p: p.requires_grad, model.parameters()),
                     lr=args.learning_rate,
                     weight_decay=args.weight_decay,
                     betas=(0.5, 0.999))
 
         scheduler = lr_scheduler.MultiStepLR(optimizer, [20, 30], gamma=0.1)
         
         # Train function
         def train(train_loader):
             total_time_iter = 0