예제 #1
0
    def test(self, test_data: List[GraphData], best_model_file: Optional[str] = None) -> Tuple[Loss, MicroF1]:
        print("##### Test Model #####")
        with torch.no_grad():
            if best_model_file:
                self._net.load_state_dict(torch.load(best_model_file))

            self._net.eval()
            results = []
            for graph in test_data:
                adjs = [sample_neighbors(graph.adj_coo_matrix,size) for size in [25,10]]
                output = self._net(graph.features_vectors,
                                   *adjs)
                labels = graph.labels

                loss = self._loss_fn(output, labels)
                f1 = MicroF1.calc(output, labels)

                results.append((loss.item(), f1))

            avg_loss, avg_f1 = self._avg_results(results)

            result = Loss("Test Loss", avg_loss), MicroF1("Test F1", avg_f1)

            print(f"{result[0]}, {result[1]}")
            return result
예제 #2
0
    def _evaluate(self) -> Tuple[Loss, MicroF1]:
        with torch.no_grad():
            self._net.eval()

            results = []
            for batch_step in tqdm(self._validation_loader):
                batch_step = batch_step.to(self._current_device)
                output = self._net(
                    self._validation_data.features_vectors[
                        batch_step.batch_idxs].to(self._current_device),
                    *batch_step.sampled_adjs)
                output = output[batch_step.sampled_idxs]
                labels = self._validation_data.labels[
                    batch_step.target_idxs].to(self._current_device)

                loss = self.unsup_loss(output,
                                       self._validation_data.positive_pairs,
                                       self._validation_data.adj_coo_matrix)
                f1 = MicroF1.calc(output, labels)

                results.append((loss.item(), f1))

            avg_loss, avg_f1 = self._avg_results(results)

            return Loss("Validation Loss",
                        avg_loss), MicroF1("Validation F1", avg_f1)
예제 #3
0
    def test(self, test_data: GraphData, best_model_file: Optional[str] = None) -> Tuple[Loss, MicroF1]:
        print("##### Test Model #####")
        loader = MiniBatchLoader(
            test_data.adj_coo_matrix, [25,10], test_data.test_mask, batch_size=512, shuffle=False)
        with torch.no_grad():
            if best_model_file:
                self._net.load_state_dict(torch.load(best_model_file))

            self._net.eval()
            results = []
            for batch_step in tqdm(loader):
                batch_step = batch_step.to(self._current_device)
                output = self._net(
                    test_data.features_vectors[batch_step.batch_idxs].to(self._current_device), *batch_step.sampled_adjs)
                output = output[batch_step.sampled_idxs]
                labels = test_data.labels[batch_step.target_idxs].to(self._current_device)

                loss = self._loss_fn(output, labels)
                f1 = MicroF1.calc(output, labels)

                results.append((loss.item(), f1))

            avg_loss, avg_f1 = self._avg_results(results)

            result = Loss("Test Loss", avg_loss), MicroF1("Test F1", avg_f1)

            print(f"{result[0]}, {result[1]}")
            return result
예제 #4
0
    def test(self,
             test_data: List[GraphData],
             best_model_file: Optional[str] = None) -> Tuple[Loss, MicroF1]:
        print("##### Test Model #####")
        with torch.no_grad():
            if best_model_file:
                self._net.load_state_dict(torch.load(best_model_file))

            self._net.eval()

            train_embeddings, train_labels = self._calc_embeddings(
                self._train_data)
            test_embeddings, test_labels = self._calc_embeddings(test_data)

            log = MultiOutputClassifier(SGDClassifier(loss="log"), n_jobs=10)
            log.fit(train_embeddings.detach().cpu(),
                    train_labels.detach().cpu())

            results = []
            for graph in test_data:
                output = self._run_net(graph)
                loss = self.unsup_loss(output, graph.positive_pairs,
                                       graph.adj_coo_matrix)
                results.append((loss.item(), ))

            avg_loss = list(self._avg_results(results))[0]
            f1 = MicroF1.calc(
                torch.from_numpy(log.predict(test_embeddings.detach().cpu())),
                test_labels.detach().cpu())

            result = Loss("Test Loss", avg_loss), MicroF1("Test F1", f1)

            print(f"{result[0]}, {result[1]}")
            return result
예제 #5
0
    def _evaluate(self) -> Tuple[Loss, MicroF1]:
        with torch.no_grad():
            self._net.eval()

            results = []
            for step in self._validation_data:
                output = self._net(step.features_vectors, step.adj_coo_matrix)
                results.append((
                    self._loss_fn(output, step.labels).item(),
                    #accuracy(output, step.labels),
                    MicroF1.calc(output, step.labels),
                ))

            avg_loss, avg_f1 = self._avg_results(results)
            return Loss("Validation Loss",
                        avg_loss), MicroF1("Validation F1", avg_f1)
예제 #6
0
    def _evaluate(self) -> Tuple[Loss, MicroF1]:
        with torch.no_grad():
            self._net.eval()

            results = []
            for graph in self._validation_data:
                adjs = [sample_neighbors(graph.adj_coo_matrix,size) for size in [25,10]]
                output = self._net(graph.features_vectors,
                                   *adjs)
                labels = graph.labels

                loss = self._loss_fn(output, labels)
                f1 = MicroF1.calc(output, labels)

                results.append((loss.item(), f1))

            avg_loss, avg_f1 = self._avg_results(results)

            return Loss("Validation Loss", avg_loss), MicroF1("Validation F1", avg_f1)
예제 #7
0
    def _train(self) -> Tuple[Loss, MicroF1]:
        self._net.train()

        results = []
        for train_graph in tqdm(self._train_data):
            self._optim.zero_grad()
            adjs = [sample_neighbors(train_graph.adj_coo_matrix,size) for size in [25,10]]
            output = self._net(train_graph.features_vectors,
                               *adjs)
            labels = train_graph.labels

            loss = self._loss_fn(output, labels)
            f1 = MicroF1.calc(output, labels)
            loss.backward()
            torch.nn.utils.clip_grad_value_(self._net.parameters(), 5)
            self._optim.step()

            results.append((loss.item(), f1))

        avg_loss, avg_f1 = self._avg_results(results)
        return Loss("Train Loss", avg_loss), MicroF1("Train F1", avg_f1)
예제 #8
0
    def _train(self) -> Tuple[Loss, MicroF1]:
        self._net.train()

        results = []
        for batch_step in tqdm(self._train_loader):
            batch_step = batch_step.to(self._current_device)
            self._optim.zero_grad()
            output = self._net(
                self._train_data.features_vectors[batch_step.batch_idxs].to(self._current_device), *batch_step.sampled_adjs)
            output = output[batch_step.sampled_idxs]
            labels = self._train_data.labels[batch_step.target_idxs].to(self._current_device)

            loss = self._loss_fn(output, labels)
            f1 = MicroF1.calc(output, labels)
            loss.backward()
            self._optim.step()

            results.append((loss.item(), f1))

        avg_loss, avg_f1 = self._avg_results(results)
        return Loss("Train Loss", avg_loss), MicroF1("Train F1", avg_f1)
예제 #9
0
    def test(self,
             test_data: List[GraphData],
             best_model_file: Optional[str] = None) -> Tuple[Loss, MicroF1]:
        print("##### Test Model #####")
        with torch.no_grad():
            if best_model_file:
                self._net.load_state_dict(torch.load(best_model_file))
            self._net.eval()

            results = []
            for step in test_data:
                output = self._net(step.features_vectors, step.adj_coo_matrix)
                results.append((
                    self._loss_fn(output, step.labels).item(),
                    MicroF1.calc(output, step.labels),
                ))

            avg_loss, avg_F1 = self._avg_results(results)
            result = Loss("Test Loss", avg_loss), MicroF1("Test F1", avg_F1)
            print(f"{result[0]}, {result[1]}")
            return result
예제 #10
0
    def _train(self) -> Tuple[Loss, MicroF1]:
        self._net.train()

        results = []
        for batch in self._batch_train_data:
            self._optim.zero_grad()

            labels = torch.cat([data.labels for data in batch])
            output = torch.cat([
                self._net(data.features_vectors, data.adj_coo_matrix)
                for data in batch
            ])

            loss = self._loss_fn(output, labels)
            f1 = MicroF1.calc(output, labels)
            loss.backward()
            self._optim.step()

            results.append((loss.item(), f1))

        avg_loss, avg_f1 = self._avg_results(results)
        return Loss("Train Loss", avg_loss), MicroF1("Train F1", avg_f1)