def train_epoch_dense(model, optimizer, device, data_loader, epoch, batch_size): model.train() epoch_loss = 0 epoch_train_acc = 0 nb_data = 0 gpu_mem = 0 optimizer.zero_grad() for iter, (x_with_node_feat, labels) in enumerate(data_loader): x_with_node_feat = x_with_node_feat.to(device) labels = labels.to(device) scores = model.forward(x_with_node_feat) loss = model.loss(scores, labels) loss.backward() if not (iter%batch_size): optimizer.step() optimizer.zero_grad() epoch_loss += loss.detach().item() epoch_train_acc += accuracy(scores, labels) epoch_loss /= (iter + 1) epoch_train_acc /= (iter + 1) return epoch_loss, epoch_train_acc, optimizer
def train_epoch(model, optimizer, device, data_loader, epoch): model.train() epoch_loss = 0 epoch_train_acc = 0 nb_data = 0 gpu_mem = 0 for iter, (batch_graphs, batch_labels, batch_snorm_n, batch_snorm_e) in enumerate(data_loader): batch_x = batch_graphs.ndata['feat'].to(device) # num x feat batch_e = batch_graphs.edata['feat'].to(device) batch_snorm_e = batch_snorm_e.to(device) batch_labels = batch_labels.to(device) batch_snorm_n = batch_snorm_n.to(device) # num x 1 optimizer.zero_grad() batch_scores = model.forward(batch_graphs, batch_x, batch_e, batch_snorm_n, batch_snorm_e) loss = model.loss(batch_scores, batch_labels) loss.backward() optimizer.step() epoch_loss += loss.detach().item() epoch_train_acc += accuracy(batch_scores, batch_labels) epoch_loss /= (iter + 1) epoch_train_acc /= (iter + 1) return epoch_loss, epoch_train_acc, optimizer
def train_epoch(model, optimizer, device, data_loader, epoch): model.train() epoch_loss = 0 epoch_train_acc = 0 nb_data = 0 print('Epoch [{}]: learning rate: [{:.6f}]'.format( epoch + 1, optimizer.param_groups[0]['lr'])) for iter, (batch_graphs, batch_labels, batch_snorm_n, batch_snorm_e) in enumerate(data_loader): batch_x = batch_graphs.ndata['feat'].to(device) # num x feat batch_e = batch_graphs.edata['feat'].to(device) batch_snorm_e = batch_snorm_e.to(device) batch_labels = batch_labels.to(device) batch_snorm_n = batch_snorm_n.to(device) # num x 1 optimizer.zero_grad() batch_scores = model.forward(batch_graphs, batch_x, batch_e, batch_snorm_n, batch_snorm_e) loss = model.loss(batch_scores, batch_labels) loss.backward() optimizer.step() epoch_loss += loss.detach().item() epoch_train_acc += accuracy(batch_scores, batch_labels) nb_data += batch_labels.size(0) if iter % 20 == 0: print('Iter [{}]: loss [{:.4f}] Train Acc [{:.4f}]'.format( iter, loss, epoch_train_acc / (nb_data))) epoch_loss /= (iter + 1) epoch_train_acc /= nb_data return epoch_loss, epoch_train_acc, optimizer
def train_epoch_sparse(model, optimizer, device, data_loader, epoch): model.train() epoch_loss = 0 epoch_train_acc = 0 nb_data = 0 gpu_mem = 0 for iter, (batch_graphs, batch_labels) in enumerate(data_loader): batch_x = batch_graphs.ndata['feat'].to(device) # num x feat batch_e = batch_graphs.edata['feat'].to(device) batch_labels = batch_labels.to(device) optimizer.zero_grad() try: batch_pos_enc = batch_graphs.ndata['pos_enc'].to(device) sign_flip = torch.rand(batch_pos_enc.size(1)).to(device) sign_flip[sign_flip>=0.5] = 1.0; sign_flip[sign_flip<0.5] = -1.0 batch_pos_enc = batch_pos_enc * sign_flip.unsqueeze(0) batch_scores = model.forward(batch_graphs, batch_x, batch_e, batch_pos_enc) except: batch_scores = model.forward(batch_graphs, batch_x, batch_e) loss = model.loss(batch_scores, batch_labels) loss.backward() optimizer.step() epoch_loss += loss.detach().item() epoch_train_acc += accuracy(batch_scores, batch_labels) epoch_loss /= (iter + 1) epoch_train_acc /= (iter + 1) return epoch_loss, epoch_train_acc, optimizer
def evaluate_network_sparse(model, device, data_loader, epoch): model.eval() epoch_test_loss = 0 epoch_test_acc = 0 nb_data = 0 with torch.no_grad(): for iter, (batch_graphs, batch_labels) in enumerate(data_loader): batch_graphs = batch_graphs.to(device) batch_x = batch_graphs.ndata['feat'].to(device) batch_e = batch_graphs.edata['feat'].to(device) batch_labels = batch_labels.to(device) try: batch_pos_enc = batch_graphs.ndata['pos_enc'].to(device) batch_scores = model.forward(batch_graphs, batch_x, batch_e, batch_pos_enc) except: batch_scores = model.forward(batch_graphs, batch_x, batch_e) loss = model.loss(batch_scores, batch_labels) epoch_test_loss += loss.detach().item() epoch_test_acc += accuracy(batch_scores, batch_labels) epoch_test_loss /= (iter + 1) epoch_test_acc /= (iter + 1) return epoch_test_loss, epoch_test_acc
def evaluate_network_sparse(model, device, data_loader, epoch): model.eval() epoch_test_loss = 0 epoch_test_acc = 0 nb_data = 0 with torch.no_grad(): for iter, (batch_graphs, batch_labels, batch_node_size, batch_edge_size) in enumerate(data_loader): batch_x = batch_graphs.ndata['feat'].to(device) batch_e = batch_graphs.edata['feat'].to(device) batch_labels = batch_labels.to(device) batch_scores = model.forward(batch_graphs, batch_x, batch_e, node_size=batch_node_size, edge_size=batch_edge_size) loss = model.loss(batch_scores, batch_labels) epoch_test_loss += loss.detach().item() epoch_test_acc += accuracy(batch_scores, batch_labels) nb_data += batch_labels.size(0) epoch_test_loss /= (iter + 1) epoch_test_acc /= nb_data return epoch_test_loss, epoch_test_acc
def train_epoch_sparse(model, optimizer, device, data_loader, epoch): model.train() epoch_loss = 0 epoch_train_acc = 0 nb_data = 0 gpu_mem = 0 print() for iter, (batch_graphs, batch_labels) in enumerate(data_loader): batch_x = batch_graphs.ndata['feat'].to(device) # num x feat batch_e = batch_graphs.edata['feat'].to(device) batch_labels = batch_labels.to(device) optimizer.zero_grad() batch_scores = model.forward(batch_graphs, batch_x, batch_e) loss = model.loss(batch_scores, batch_labels) loss.backward() optimizer.step() epoch_loss += loss.detach().item() epoch_train_acc += accuracy(batch_scores, batch_labels) nb_data += batch_labels.size(0) # if iter % 100 == 0: # print_str = "iter = " + str(iter) # print_str += ", loss = " + str(loss.detach().item()) # print_str += ", acc = " + str(accuracy(batch_scores, batch_labels)/batch_labels.size(0)) # print(print_str) epoch_loss /= (iter + 1) epoch_train_acc /= nb_data return epoch_loss, epoch_train_acc, optimizer
def evaluate_network(model, device, data_loader, epoch): model.eval() epoch_test_loss = 0 epoch_test_acc = 0 epoch_test_f1 = 0 nb_data = 0 with torch.no_grad(): for iter, (batch_graphs, batch_labels, batch_snorm_n, batch_snorm_e) in enumerate(data_loader): batch_x = batch_graphs.ndata['feat'].to(device) batch_e = batch_graphs.edata['feat'].to(device) batch_snorm_e = batch_snorm_e.to(device) batch_labels = batch_labels.to(device) batch_snorm_n = batch_snorm_n.to(device) batch_scores = model.forward(batch_graphs, batch_x, batch_e, batch_snorm_n, batch_snorm_e) loss = model.loss(batch_scores, batch_labels) epoch_test_loss += loss.detach().item() epoch_test_acc += accuracy(batch_scores, batch_labels) epoch_test_f1 += f1(batch_scores, batch_labels) epoch_test_loss /= (iter + 1) epoch_test_acc /= (iter + 1) epoch_test_f1 /= (iter + 1) return epoch_test_loss, epoch_test_acc, epoch_test_f1
def evaluate_network_sparse(model, device, data_loader, epoch, framework = 'pyg'): model.eval() epoch_test_loss = 0 epoch_test_acc = 0 nb_data = 0 if framework == 'pyg': with torch.no_grad(): for iter, batch_graphs in enumerate(data_loader): batch_x = batch_graphs.x.to(device) # num x feat edge_index = batch_graphs.edge_index.to(device) batch_e = batch_graphs.edge_attr.to(device) batch_labels = batch_graphs.y.long().to(device) try: batch_pos_enc = batch_graphs.pos_enc.to(device) batch_scores = model.forward(batch_x, edge_index,batch_e, batch_pos_enc) except: batch_scores = model.forward(batch_x, edge_index,batch_e) loss = model.loss(batch_scores, batch_labels) epoch_test_loss += loss.detach().item() epoch_test_acc += accuracy(batch_scores, batch_labels) epoch_test_loss /= (iter + 1) epoch_test_acc /= (iter + 1) return epoch_test_loss, epoch_test_acc elif framework == 'dgl': with torch.no_grad(): for iter, (batch_graphs, batch_labels) in enumerate(data_loader): batch_x = batch_graphs.ndata['feat'].to(device) batch_e = batch_graphs.edata['feat'].to(device) batch_labels = batch_labels.to(device) batch_graphs = batch_graphs.to(device) # add to satisfy the version to put the graph to the cuda try: batch_pos_enc = batch_graphs.ndata['pos_enc'].to(device) batch_scores = model.forward(batch_graphs, batch_x, batch_e, batch_pos_enc) except: batch_scores = model.forward(batch_graphs, batch_x, batch_e) loss = model.loss(batch_scores, batch_labels) epoch_test_loss += loss.detach().item() epoch_test_acc += accuracy(batch_scores, batch_labels) epoch_test_loss /= (iter + 1) epoch_test_acc /= (iter + 1) return epoch_test_loss, epoch_test_acc
def evaluate_network(model, graph, nfeat, efeat, norm_n, norm_e, mask, labels, epoch): model.eval() epoch_test_loss = 0 epoch_test_acc = 0 nb_data = 0 with torch.no_grad(): logits = model.forward(graph, nfeat, efeat, norm_n, norm_e) loss = model.loss(logits[mask], labels[mask]) epoch_test_loss = loss.detach().item() epoch_test_acc = accuracy(logits[mask], labels[mask]) return epoch_test_loss, epoch_test_acc
def train_epoch(model, optimizer, device, graph, nfeat, efeat, norm_n, norm_e, train_mask, labels, epoch): model.train() epoch_loss = 0 epoch_train_acc = 0 nb_data = 0 gpu_mem = 0 #logits = model.forward(graph, nfeat, efeat, norm_n, norm_e) logits = model(graph, nfeat, efeat, norm_n, norm_e) loss = model.loss(logits[train_mask], labels[train_mask]) optimizer.zero_grad() loss.backward() optimizer.step() epoch_loss = loss.detach().item() epoch_train_acc = accuracy(logits[train_mask], labels[train_mask]) return epoch_loss, epoch_train_acc, optimizer
def evaluate_network_dense(model, device, data_loader, epoch): model.eval() epoch_test_loss = 0 epoch_test_acc = 0 nb_data = 0 with torch.no_grad(): for iter, (x_with_node_feat, labels) in enumerate(data_loader): x_with_node_feat = x_with_node_feat.to(device) labels = labels.to(device) scores = model.forward(x_with_node_feat) loss = model.loss(scores, labels) epoch_test_loss += loss.detach().item() epoch_test_acc += accuracy(scores, labels) epoch_test_loss /= (iter + 1) epoch_test_acc /= (iter + 1) return epoch_test_loss, epoch_test_acc
def evaluate_network_sparse(model, device, dataset, val_idx): model.eval() epoch_test_loss = 0 epoch_test_acc = 0 nb_data = 0 with torch.no_grad(): batch_x = dataset.dataset[0].x.to(device) batch_e = dataset.edge_attr.to(device) batch_labels = dataset.dataset[0].y.long().to(device) edge_index = dataset.dataset[0].edge_index.long().to(device) val_idx = val_idx.to(device) batch_scores = model.forward(batch_x, edge_index, batch_e)[val_idx] loss = model.loss(batch_scores, batch_labels[val_idx]).to(torch.float) epoch_test_loss = loss.detach().item() epoch_test_acc = accuracy(batch_scores, batch_labels[val_idx]) / val_idx.size(0) return epoch_test_loss, epoch_test_acc
def train_epoch_sparse(model, optimizer, device, dataset, train_idx): model.train() epoch_loss = 0 epoch_train_acc = 0 nb_data = 0 gpu_mem = 0 # for iter, (batch_graphs, batch_labels) in enumerate(data_loader): batch_x = dataset.dataset[0].x.to(device) batch_e = dataset.edge_attr.to(device) batch_labels = dataset.dataset[0].y.long().to(device) edge_index = dataset.dataset[0].edge_index.long().to(device) train_idx = train_idx.to(device) optimizer.zero_grad() batch_scores = model.forward(batch_x, edge_index, batch_e)[train_idx] loss = model.loss(batch_scores, batch_labels[train_idx]).to(torch.float) loss.backward() optimizer.step() epoch_loss = loss.detach().item() epoch_train_acc = accuracy(batch_scores, batch_labels[train_idx]) / train_idx.size(0) return epoch_loss, epoch_train_acc, optimizer
def train_epoch_sparse(model, optimizer, device, data_loader, epoch, framework = 'pyg'): model.train() epoch_loss = 0 epoch_train_acc = 0 epoch_train_acc_ogb = 0 nb_data = 0 gpu_mem = 0 if framework == 'pyg': for iter, batch_graphs in enumerate(data_loader): batch_x = batch_graphs.x.to(device) # num x feat edge_index = batch_graphs.edge_index.to(device) batch_e = batch_graphs.edge_attr.to(device) batch_labels = batch_graphs.y.long().to(device) # batch_graphs = batch_graphs.to(device) # add to satisfy the version to put the graph to the cuda optimizer.zero_grad() try: batch_pos_enc = batch_graphs.pos_enc.to(device) sign_flip = torch.rand(batch_pos_enc.size(1)).to(device) sign_flip[sign_flip >= 0.5] = 1.0 sign_flip[sign_flip < 0.5] = -1.0 batch_pos_enc = batch_pos_enc * sign_flip.unsqueeze(0) batch_scores = model.forward(batch_x, edge_index, batch_e, batch_pos_enc) except: # batch_scores = model.forward(batch_graphs.x, batch_graphs.edge_index) batch_scores = model.forward(batch_x, edge_index, batch_e) loss = model.loss(batch_scores, batch_labels) loss.backward() optimizer.step() epoch_loss += loss.detach().item() epoch_train_acc += accuracy(batch_scores, batch_labels) # epoch_train_acc_ogb += accuracy_ogb(batch_scores, batch_labels) # nb_data += batch_labels.size(0) # print("Number: ", iter) epoch_loss /= (iter + 1) epoch_train_acc /= (iter + 1) # epoch_train_acc_ogb /= nb_data return epoch_loss, epoch_train_acc, optimizer elif framework == 'dgl': for iter, (batch_graphs, batch_labels) in enumerate(data_loader): batch_x = batch_graphs.ndata['feat'].to(device) # num x feat batch_e = batch_graphs.edata['feat'].to(device) batch_labels = batch_labels.to(device) batch_graphs = batch_graphs.to(device) #add to satisfy the version to put the graph to the cuda optimizer.zero_grad() try: batch_pos_enc = batch_graphs.ndata['pos_enc'].to(device) sign_flip = torch.rand(batch_pos_enc.size(1)).to(device) sign_flip[sign_flip>=0.5] = 1.0; sign_flip[sign_flip<0.5] = -1.0 batch_pos_enc = batch_pos_enc * sign_flip.unsqueeze(0) batch_scores = model.forward(batch_graphs, batch_x, batch_e, batch_pos_enc) except: batch_scores = model.forward(batch_graphs, batch_x, batch_e) loss = model.loss(batch_scores, batch_labels) loss.backward() optimizer.step() epoch_loss += loss.detach().item() epoch_train_acc += accuracy(batch_scores, batch_labels) # print("Number: ", iter) epoch_loss /= (iter + 1) epoch_train_acc /= (iter + 1) return epoch_loss, epoch_train_acc, optimizer