Example #1
0
class TestProcess(ActiveLearningTestMixin, ManagerTestCase):

    def setUp(self):
        super().setUp()
        self.c = ActiveLearningCore(self.relation, self.lbl_evs([None]*3))
        patcher = mock.patch.object(self.c, 'train_relation_classifier')
        self.mock_train_classifier = patcher.start()
        self.addCleanup(patcher.stop)

    def test_process_with_no_available_labels_does_nothing(self):
        self.c.process()
        self.assertFalse(self.mock_train_classifier.called)

    def test_process_with_not_both_labels_does_nothing(self):
        # by "both", we mean True and False
        self.c.add_answer(self.ev1, True)
        self.c.process()
        self.assertFalse(self.mock_train_classifier.called)
        self.c.add_answer(self.ev2, True)
        self.c.process()
        self.assertFalse(self.mock_train_classifier.called)
        self.c.add_answer(self.ev3, False)
        self.c.process()
        self.assertTrue(self.mock_train_classifier.called)

    def test_more_than_binary_labels_is_raise(self):
        self.c.add_answer(self.ev1, True)
        self.c.add_answer(self.ev2, False)
        self.c.add_answer(self.ev3, False)
        self.c.labeled_evidence[self.ev3] = 'weird thing'
        self.assertRaises(ValueError, self.c.process)
        self.assertFalse(self.mock_train_classifier.called)
class TestProcess(ActiveLearningTestMixin, ManagerTestCase):

    def setUp(self):
        super().setUp()
        self.c = ActiveLearningCore(self.relation, self.lbl_evs([None]*3))
        patcher = mock.patch.object(self.c, 'train_relation_classifier')
        self.mock_train_classifier = patcher.start()
        self.addCleanup(patcher.stop)

    def test_process_with_no_available_labels_does_nothing(self):
        self.c.process()
        self.assertFalse(self.mock_train_classifier.called)

    def test_process_with_not_both_labels_does_nothing(self):
        # by "both", we mean True and False
        self.c.add_answer(self.ev1, True)
        self.c.process()
        self.assertFalse(self.mock_train_classifier.called)
        self.c.add_answer(self.ev2, True)
        self.c.process()
        self.assertFalse(self.mock_train_classifier.called)
        self.c.add_answer(self.ev3, False)
        self.c.process()
        self.assertTrue(self.mock_train_classifier.called)

    def test_more_than_binary_labels_is_raise(self):
        self.c.add_answer(self.ev1, True)
        self.c.add_answer(self.ev2, False)
        self.c.add_answer(self.ev3, False)
        self.c.labeled_evidence[self.ev3] = 'weird thing'
        self.assertRaises(ValueError, self.c.process)
        self.assertFalse(self.mock_train_classifier.called)
Example #3
0
    def __call__(self, config):
        if u"class_weight" in config[u"classifier_args"]:
            d = config[u"classifier_args"][u"class_weight"]
            assert "true" in d and "false" in d and len(d) == 2
            config[u"classifier_args"][u"class_weight"] = {
                True: d["true"],
                False: d["false"]
            }

        # Prepare data
        if self.data is None or self.relname != config["relation"]:
            relation = iepy.data.models.Relation.objects.get(
                name=config["relation"])
            c_evidences = CEM.candidates_for_relation(relation)
            self.data = CEM.labels_for(relation, c_evidences,
                                       CEM.conflict_resolution_newest_wins)
            self.data = [(x, label) for x, label in self.data.items()
                         if label is not None]
            self.relname = config["relation"]
        data = self.data
        testset = {x: label for x, label in data}
        candidate_evidences = {x: None for x, _ in data}
        if not data:
            raise NotEnoughData("There is no labeled data for training")
        oracle_answers = config["oracle_answers"]
        N = len(data)
        M = N - oracle_answers  # test set size
        if M / N < 0.1:  # if there ir less than 10% left for testing
            raise NotEnoughData("There is not enough data for evaluation")

        result = {
            "train_size": oracle_answers,
            "test_size": M,
            "dataset_size": N,
            "start_time": time.time(),
        }

        # Interact with oracle
        alcore = ActiveLearningCore(config["relation"],
                                    candidate_evidences,
                                    extractor_config=config,
                                    performance_tradeoff=config["tradeoff"])
        alcore.start()
        # ^ Is acainst creenhouse emissions
        for _ in range(oracle_answers):
            q = alcore.questions[0]
            alcore.add_answer(q, testset[q])
            del testset[q]  # Once given for training cannot be part of testset
            alcore.process()

        test_evidences, test_labels = zip(*list(testset.items()))
        extractor = alcore.relation_classifier

        # Evaluate prediction
        predicted_dict = alcore.predict()
        test_evidences = list(testset)
        test_labels = [testset[x] for x in test_evidences]
        predicted_labels = [predicted_dict[x] for x in test_evidences]
        result.update(
            result_dict_from_predictions(test_evidences, test_labels,
                                         predicted_labels))

        # Evaluate ranking
        predicted_scores = extractor.decision_function(test_evidences)
        auroc = roc_auc_score(test_labels, predicted_scores)
        avgprec = average_precision_score(test_labels, predicted_scores)

        result.update({
            "auROC": auroc,
            "average_precision": avgprec,
        })
        return result
Example #4
0
    candidates = CandidateEvidenceManager.candidates_for_relation(relation)
    labeled_evidences = load_labeled_evidences(relation, candidates)
    iextractor = ActiveLearningCore(relation, labeled_evidences, extractor_config,
                                    performance_tradeoff=tuning_mode)
    iextractor.start()

    STOP = u'STOP'
    term = TerminalAdministration(relation,
                                  extra_options=[(STOP, u'Stop execution')])
    was_ever_trained = False
    while iextractor.questions:
        questions = list(iextractor.questions)  # copying the list
        term.update_candidate_evidences_to_label(questions)
        result = term()
        i = 0
        for c, label_value in load_labeled_evidences(relation, questions).items():
            if label_value is not None:
                iextractor.add_answer(c, label_value)
                i += 1
        print ('Added %s new human labels to the extractor core' % i)
        iextractor.process()
        was_ever_trained = True
        if result == STOP:
            break

    if not was_ever_trained:
        # It's needed to run some process before asking for predictions
        iextractor.process()
    predictions = iextractor.predict()
    output.dump_output_loop(predictions)
Example #5
0
    labeled_evidences = load_labeled_evidences(relation, candidates)
    iextractor = ActiveLearningCore(relation, labeled_evidences,
                                    extractor_config)
    iextractor.start()

    STOP = u'STOP'
    term = TerminalAdministration(relation,
                                  extra_options=[(STOP, u'Stop execution ASAP')
                                                 ])

    while iextractor.questions:
        questions = list(iextractor.questions)  # copying the list
        term.update_candidate_evidences_to_label(questions)
        result = term()
        if result == STOP:
            break

        i = 0
        for c, label_value in load_labeled_evidences(relation,
                                                     questions).items():
            if label_value is not None:
                iextractor.add_answer(c, label_value)
                i += 1
        print('Added %s new human labels to the extractor core' % i)
        iextractor.process()

    predictions = iextractor.predict()
    print("Predictions:")
    for prediction, value in predictions.items():
        print("({} -- {})".format(prediction, value))
Example #6
0
 def test_every_question_answered_is_not_a_question_any_more(self):
     c = ActiveLearningCore(self.relation, self.lbl_evs([None]*3))
     c.add_answer(self.ev1, False)
     self.assertEqual(len(c.questions), 2)
     self.assertNotIn(self.ev1, c.questions)
 def test_every_question_answered_is_not_a_question_any_more(self):
     c = ActiveLearningCore(self.relation, self.lbl_evs([None]*3))
     c.add_answer(self.ev1, False)
     self.assertEqual(len(c.questions), 2)
     self.assertNotIn(self.ev1, c.questions)