def train_model_time(dataset_str, depth=None, res_connection=False): A_norm, features, labels, _, _, _ = load_data(dataset_str) idx_train = np.arange(len(features)) idx_train = torch.LongTensor(idx_train) def train(model, idx_train): model.train() output = model(features) loss_train = loss(output, labels, idx_train) acc_train = accuracy(output, labels, idx_train) optimizer.zero_grad() loss_train.backward() optimizer.step() return loss_train.item(), acc_train.item() def evaluate(model, idx): model.eval() output = model(features) loss_ = loss(output, labels, idx) acc_ = accuracy(output, labels, idx) return loss_.item(), acc_.item() model = MutipleGCN(adj=A_norm, ngcu=depth, nfeat=features.shape[1], nhid=hidden, nclass=labels.max().item() + 1, dropout=dropout, res_connection=res_connection).to(device=device) optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay) t_start = time.time() for i in range(10): loss_train, acc_train = train(model, idx_train) training_time = (time.time() - t_start) / 10 t_start = time.time() for i in range(10): loss_val, acc_val = evaluate(model, idx_train) inference_time = (time.time() - t_start) / 10 return training_time, inference_time
self.gc1 = GCN(nfeat, nhid) self.gc2 = GCN(nhid, nclass) self.dropout = dropout def forward(self, x, adj): x = F.relu(self.gc1(x, adj)) x = F.dropout(x, self.dropout, training=self.training) x = self.gc2(x, adj) return F.log_softmax(x, dim=1) # Load data data_set = 'cora' #data_set = 'citeseer' adj, features, labels, idx_train, idx_val, idx_test = load_data(data_set) # Model and optimizer model = GCN_Net(nfeat=features.shape[1], nhid=args.hidden, nclass=labels.max().item() + 1, dropout=args.dropout) optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay) def train(epoch): t = time.time() model.train()
def train_model_depth(dataset_str, depth=None, res_connection=False): G, A_norm, features, labels, idx_train, idx_test, idx_val = load_data( dataset_str) def train(idx_train): model.train() output = model(features) loss_train = loss(output, labels, idx_train) acc_train = accuracy(output, labels, idx_train) optimizer.zero_grad() loss_train.backward() optimizer.step() return loss_train.item(), acc_train.item() def evaluate(idx): model.eval() output = model(features) loss_ = loss(output, labels, idx) acc_ = accuracy(output, labels, idx) return loss_.item(), acc_.item() nfolds = 5 kf = KFold(n_splits=nfolds) index_list = np.arange(len(features)) total_train_acc = [] total_test_acc = [] t = 0 for idx_train, idx_test in kf.split(index_list): model = MutipleGCN(adj=A_norm, ngcu=depth, nfeat=features.shape[1], nhid=hidden, nclass=labels.max().item() + 1, dropout=dropout, res_connection=res_connection).to(device=device) optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay) idx_train = torch.LongTensor(idx_train) idx_test = torch.LongTensor(idx_test) # Train model t_total = time.time() epoch_no_improvement = 0 min_val_loss = float('inf') for epoch in tqdm(range(epochs)): loss_train, acc_train = train(idx_train) loss_train, acc_train = evaluate(idx_train) loss_val, acc_val = evaluate(idx_test) t += 1 total_train_acc.append(acc_train) total_test_acc.append(acc_val) return total_train_acc, total_test_acc