Пример #1
0
 def score(self, epoch_counter):
     """
         Scoring on the test set.
         """
     print("\n\nModel evaluation.\n")
     start_time = time.time()
     self.model.eval()
     self.scores = []
     self.ground_truth = []
     for test_graph_pair in tqdm(self.testing_graphs):
         data = process_pair(test_graph_pair)
         self.ground_truth.append(calculate_normalized_ged(data))
         data = self.transfer_to_torch(data)
         target = data["target"]
         prediction = self.model(data)
         print("\n" + str(test_graph_pair) + "- " + "Similarity/Target: " +
               str(prediction) + " / " + str(target))
         self.scores.append(calculate_loss(prediction, target))
     print("--- %s seconds ---" % (time.time() - start_time))
     model_error = self.print_evaluation()
     print('\n\n >>>>>>>>>>>>>>>>>>\t' + str(model_error) + '\n')
     with open("./outputFiles/test/test_error_graph.txt",
               "a") as test_error_writer:
         test_error_writer.write(
             str(epoch_counter) + ',' + str(model_error) + '\n')
     test_error_writer.close()
Пример #2
0
    def score(self):
        """
        Scoring on the test set.
        """
        print("\n\nModel evaluation.\n")
        self.model.eval()
        self.scores = []
        self.ground_truth = []
        preds = []
        truths = []
        for graph_pair in tqdm(self.testing_graphs):
            data = process_pair(graph_pair)
            self.ground_truth.append(calculate_normalized_ged(data))
            data = self.transfer_to_torch(data)
            target = data["target"]
            prediction = self.model(data)
            self.scores.append(calculate_loss(prediction, target))

            preds.append(0 if prediction.item() < 0.5 else 1)
            truths.append(int(data["target"].item()))
        self.print_evaluation()
        plot_confusion_matrix(np.array(truths),
                              np.array(preds),
                              np.array([0, 1]),
                              title='SimGNN confusion matrix')
Пример #3
0
    def process_batch(self, batch):
        """
        Forward pass with a batch of data.

        :param batch: Batch of graph pair locations.
        :return loss: Loss on the batch.
        """
        self.optimizer.zero_grad()
        losses = 0
        for graph_pair in batch:
            # 载入json文件的数据
            data = process_pair(graph_pair)
            data = self.transfer_to_torch(data)
            target = data["target"]
            # forward过程
            '''
            经过transfer_to_torch处理的data是四个矩阵(2个边矩阵,2个特征矩阵)组成的
            '''
            prediction = self.model(data)
            losses = losses + torch.nn.functional.mse_loss(
                data["target"], prediction)
        losses.backward(retain_graph=True)
        self.optimizer.step()
        '''
        losses.item()
        The average of the batch losses will give you an estimate of the “epoch loss” during training. 
        Since you are calculating the loss anyway, 
        you could just sum it and calculate the mean after the epoch finishes.
        '''
        loss = losses.item()
        return loss
Пример #4
0
 def get_train_baseline_error(self):
     """
     Calculates the baseline error of the training data
     """
     self.train_ground_truth = []
     for graph_pair in tqdm(self.training_graphs):
         data = process_pair(graph_pair)
         self.train_ground_truth.append(calculate_normalized_ged(data))
     norm_ged_mean = np.mean(self.train_ground_truth)
     base_train_error = np.mean([(n - norm_ged_mean)**2
                                 for n in self.train_ground_truth])
     print("\nBaseline Training error: " + str(round(base_train_error, 5)))
Пример #5
0
    def load_model_parallel(self, pairList):

        #print("Parallel Execution of funcGNN from pretrained model")
        #self.model = funcGNN(self.args, self.number_of_labels)
        #self.model.load_state_dict(torch.load('./model_state.pth'))
        #self.model.eval()
        data = process_pair(pairList)
        self.ground_truth.append(calculate_normalized_ged(data))
        data = self.transfer_to_torch(data)
        target = data["target"]
        prediction = self.model(data)
        #print("\n" + str(pairList) + "- " + "Similarity/Target: " + str(prediction) + " / " + str(target))
        self.scores.append(calculate_loss(prediction, target))
Пример #6
0
 def score(self):
     """
     Scoring on the test set.
     """
     print("\n\nModel evaluation.\n")
     self.scores = []
     self.ground_truth = []
     for graph_pair in tqdm(self.testing_graphs):
         data = process_pair(graph_pair)
         self.ground_truth.append(calculate_normalized_ged(data))
         data = self.transfer_to_torch(data)
         target = self.model(data)
         prediction = self.model(data)
         self.scores.append(calculate_loss(prediction, target))
     self.print_evaluation()
Пример #7
0
 def initial_label_enumeration(self):
     """
     Collecting the unique node idsentifiers.
     """
     print("\nEnumerating unique labels.\n")
     self.training_graphs = glob.glob(self.args.training_graphs + "*.json")
     self.testing_graphs = glob.glob(self.args.testing_graphs + "*.json")
     graph_pairs = self.training_graphs + self.testing_graphs
     self.global_labels = set()
     for graph_pair in tqdm(graph_pairs):
         data = process_pair(graph_pair)
         self.global_labels = self.global_labels.union(set(data["labels_1"]))
         self.global_labels = self.global_labels.union(set(data["labels_2"]))
     self.global_labels = sorted(self.global_labels)
     self.global_labels = {val:index  for index, val in enumerate(self.global_labels)}
     self.number_of_labels = len(self.global_labels)
Пример #8
0
 def process_batch(self, batch):
     """
     Forward pass with a batch of data.
     :param batch: Batch of graph pair locations.
     :return loss: Loss on the batch.
     """
     self.optimizer.zero_grad()
     losses = 0
     for graph_pair in batch:
         data = process_pair(graph_pair)
         data = self.transfer_to_torch(data)
         target = data["target"]
         prediction = self.model(data)
         losses = losses + torch.nn.functional.mse_loss(data["target"], prediction)
     losses.backward(retain_graph=True)
     self.optimizer.step()
     loss = losses.item()
     return loss
Пример #9
0
 def load_model(self):
     print("\nSerial Execution of funcGNN from pretrained model")
     start_time = time.time()
     self.model = funcGNN(self.args, self.number_of_labels)
     self.model.load_state_dict(torch.load('./model_state.pth'))
     self.model.eval()
     self.scores = []
     self.ground_truth = []
     for test_graph_pair in tqdm(self.random_graphs):
         data = process_pair(test_graph_pair)
         self.ground_truth.append(calculate_normalized_ged(data))
         data = self.transfer_to_torch(data)
         target = data["target"]
         prediction = self.model(data)
         #print("\n" + str(test_graph_pair) + "- " + "Similarity/Target: " + str(prediction) + " / " + str(target))
         self.scores.append(calculate_loss(prediction, target))
         self.scores.append(
             torch.nn.functional.mse_loss(prediction, data["target"]))
     print("--- %s seconds ---" % (time.time() - start_time))
Пример #10
0
 def score(self):
     """
     Scoring on the test set.
     """
     print("\n\nModel evaluation.\n")
     if path.isfile(self.args.saved_model):
         self.model=torch.load(self.args.saved_model)
         self.model.train(False)
         self.model.eval()
     self.scores = []
     self.ground_truth = []
     for graph_pair in tqdm(self.testing_graphs):
         data = process_pair(graph_pair)
         data_org = data.copy()
         self.ground_truth.append(calculate_normalized_ged(data))
         data = self.transfer_to_torch(data)
         target = data["target"]
         prediction = self.model(data)
         print(f'Test target: {data_org["ged"]} {reverse_normalized_ged(-math.log(target),data_org)}, prediction: {reverse_normalized_ged(-math.log(prediction),data_org)}')
         self.scores.append(calculate_loss(prediction, target))
     self.print_evaluation()