Esempio n. 1
0
def get_trainer(trial, dataloader):
    n_layers = trial.suggest_categorical('n_layer', [2, 3, 4])
    hidden_dims = []
    for i in range(n_layers):
        hidden_dim = int(
            trial.suggest_loguniform('hidden_dim_{}'.format(i), 4, 256))
        hidden_dims.append(hidden_dim)
    model = GAE(39, hidden_dims)
    lr = trial.suggest_loguniform('lr', 1e-6, 1e-2)
    optim = torch.optim.Adam(model.parameters(), lr=lr)
    trainer = Trainer(model, optim, dataloader)
    return trainer
Esempio n. 2
0
def main():
    if not os.path.exists(args.save_dir):
        os.makedirs(args.save_dir)

    # TODO: train test split
    # load and preprocess dataset
    data = load_data(args)
    features = torch.FloatTensor(data.features)
    in_feats = features.shape[1]
    print(features.shape)
    model = GAE(in_feats, [32,16])
    model.train()
    optim = torch.optim.Adam(model.parameters(), lr=1e-2)
    loss_function = BCELoss

    g = DGLGraph(data.graph)
    g.ndata['h'] = features


    n_epochs = 500
    losses = []
    print('Training Start')
    for epoch in tqdm(range(n_epochs)):
        g.ndata['h'] = features
        # normalization
        degs = g.in_degrees().float()
        norm = torch.pow(degs, -0.5)
        norm[torch.isinf(norm)] = 0
        g.ndata['norm'] = norm.unsqueeze(1)
        adj = g.adjacency_matrix().to_dense()
        pos_weight = torch.Tensor([float(adj.shape[0] * adj.shape[0] - adj.sum()) / adj.sum()])
        
        
        adj_logits = model.forward(g)#, features)
        
        loss = loss_function(adj_logits, adj, pos_weight=pos_weight)
        optim.zero_grad()
        loss.backward()
        optim.step()
        losses.append(loss.item())
        print('Epoch: {:02d} | Loss: {:.5f}'.format(epoch, loss))
        
    
    plt.plot(losses)
    plt.xlabel('iteration')
    plt.ylabel('train loss')
    plt.grid()
    plt.show()