def train_epoch_dense(model, optimizer, device, data_loader, epoch,
                      batch_size):

    model.train()
    epoch_loss = 0
    epoch_train_f1 = 0
    nb_data = 0
    gpu_mem = 0
    optimizer.zero_grad()
    for iter, (x_no_edge_feat, x_with_edge_feat, labels,
               edge_list) in enumerate(data_loader):
        if x_no_edge_feat is not None:
            x_no_edge_feat = x_no_edge_feat.to(device)
        if x_with_edge_feat is not None:
            x_with_edge_feat = x_with_edge_feat.to(device)
        labels = labels.to(device)
        edge_list = edge_list[0].to(device), edge_list[1].to(device)

        scores = model.forward(x_no_edge_feat, x_with_edge_feat, edge_list)
        loss = model.loss(scores, labels)
        loss.backward()

        if not (iter % batch_size):
            optimizer.step()
            optimizer.zero_grad()

        epoch_loss += loss.detach().item()
        epoch_train_f1 += binary_f1_score(scores, labels)
    epoch_loss /= (iter + 1)
    epoch_train_f1 /= (iter + 1)

    return epoch_loss, epoch_train_f1, optimizer
Exemplo n.º 2
0
def train_epoch(model, optimizer, device, data_loader, epoch):

    model.train()
    epoch_loss = 0
    epoch_train_f1 = 0
    nb_data = 0
    gpu_mem = 0
    for iter, (batch_graphs, batch_labels, batch_snorm_n,
               batch_snorm_e) in enumerate(data_loader):
        batch_x = batch_graphs.ndata['feat'].to(device)  # num x feat
        batch_e = batch_graphs.edata['feat'].to(device)
        batch_labels = batch_labels.to(device)
        batch_snorm_e = batch_snorm_e.to(device)
        batch_snorm_n = batch_snorm_n.to(device)  # num x 1
        optimizer.zero_grad()

        batch_scores = model.forward(batch_graphs, batch_x, batch_e,
                                     batch_snorm_n, batch_snorm_e)
        loss = model.loss(batch_scores, batch_labels)
        loss.backward()
        optimizer.step()
        epoch_loss += loss.detach().item()
        epoch_train_f1 += binary_f1_score(batch_scores, batch_labels)
    epoch_loss /= (iter + 1)
    epoch_train_f1 /= (iter + 1)

    return epoch_loss, epoch_train_f1, optimizer
Exemplo n.º 3
0
def evaluate_network_sparse(model, device, data_loader, epoch):

    model.eval()
    epoch_test_loss = 0
    epoch_test_f1 = 0
    nb_data = 0
    with torch.no_grad():
        for iter, (batch_graphs, batch_labels) in enumerate(data_loader):
            batch_x = batch_graphs.ndata['feat'].to(device)
            batch_e = batch_graphs.edata['feat'].to(device)
            batch_labels = batch_labels.to(device)

            batch_scores = model.forward(batch_graphs, batch_x, batch_e)
            loss = model.loss(batch_scores, batch_labels)
            epoch_test_loss += loss.detach().item()
            epoch_test_f1 += binary_f1_score(batch_scores, batch_labels)
        epoch_test_loss /= (iter + 1)
        epoch_test_f1 /= (iter + 1)

    return epoch_test_loss, epoch_test_f1
def train_epoch(model, optimizer, device, data_loader, epoch):

    model.train()
    epoch_loss = 0
    epoch_train_f1 = 0
    nb_data = 0

    print(
        time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + ' | ' +
        'Epoch [{:>2d}]: learning rate: [{:.10f}]'.format(
            epoch + 1, optimizer.param_groups[0]['lr']))
    for iter, (batch_graphs, batch_labels, batch_snorm_n,
               batch_snorm_e) in enumerate(data_loader):

        batch_x = batch_graphs.ndata['feat'].to(device)  # num x feat
        batch_e = batch_graphs.edata['feat'].to(device)
        batch_labels = batch_labels.to(device)
        batch_snorm_e = batch_snorm_e.to(device)
        batch_snorm_n = batch_snorm_n.to(device)  # num x 1
        optimizer.zero_grad()
        batch_scores = model.forward(batch_graphs, batch_x, batch_e,
                                     batch_snorm_n, batch_snorm_e)
        loss = model.loss(batch_scores, batch_labels)
        loss.backward()
        optimizer.step()
        epoch_loss += loss.detach().item()
        epoch_train_f1 += binary_f1_score(batch_scores, batch_labels)

        if iter % 10 == 0:
            print(
                time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + ' | ' +
                'Iter [{:>3d}]: Loss [{:.4f}]   Train F1 [{:.4f}]'.format(
                    iter, loss, epoch_train_f1 / (iter + 1)))

    print(
        time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + ' | ' +
        'Iter [{:>3d}]: Loss [{:.4f}]   Train F1 [{:.4f}]'.format(
            iter, loss, epoch_train_f1 / (iter + 1)))
    epoch_loss /= (iter + 1)
    epoch_train_f1 /= (iter + 1)
    return epoch_loss, epoch_train_f1, optimizer
Exemplo n.º 5
0
def evaluate_network_dense(model, device, data_loader, epoch):
    
    model.eval()
    epoch_test_loss = 0
    epoch_test_f1 = 0
    nb_data = 0
    with torch.no_grad():
        for iter, (x_no_edge_feat, x_with_edge_feat, labels, edge_list) in enumerate(data_loader):
            if x_no_edge_feat is not None:
                x_no_edge_feat = x_no_edge_feat.to(device)
            if x_with_edge_feat is not None:
                x_with_edge_feat = x_with_edge_feat.to(device)
            labels = labels.to(device)
            edge_list = edge_list[0].to(device), edge_list[1].to(device)

            scores = model.forward(x_no_edge_feat, x_with_edge_feat, edge_list)
            loss = model.loss(scores, labels) 
            epoch_test_loss += loss.detach().item()
            epoch_test_f1 += binary_f1_score(scores, labels)
        epoch_test_loss /= (iter + 1)
        epoch_test_f1 /= (iter + 1)
        
    return epoch_test_loss, epoch_test_f1