예제 #1
0
파일: trainer.py 프로젝트: zeta1999/delft
    def on_epoch_end(self, epoch, logs={}):
        y_pred = None
        y_true = None
        for i, (data, label) in enumerate(self.valid_batches):
            if i == self.valid_steps:
                break
            y_true_batch = label
            y_true_batch = np.argmax(y_true_batch, -1)
            sequence_lengths = data[-1]  # shape of (batch_size, 1)
            sequence_lengths = np.reshape(sequence_lengths, (-1, ))

            y_pred_batch = self.model.predict_on_batch(data)
            y_pred_batch = np.argmax(y_pred_batch, -1)

            y_pred_batch = [
                self.p.inverse_transform(y[:l])
                for y, l in zip(y_pred_batch, sequence_lengths)
            ]
            y_true_batch = [
                self.p.inverse_transform(y[:l])
                for y, l in zip(y_true_batch, sequence_lengths)
            ]

            if i == 0:
                y_pred = y_pred_batch
                y_true = y_true_batch
            else:
                y_pred = y_pred + y_pred_batch
                y_true = y_true + y_true_batch

        #for i in range(0,len(y_pred)):
        #    print("pred", y_pred[i])
        #    print("true", y_true[i])
        has_data = y_true is not None and y_pred is not None
        f1 = f1_score(y_true, y_pred) if has_data else 0.0
        print("\tf1 (micro): {:04.2f}".format(f1 * 100))

        if self.evaluation:
            self.accuracy = accuracy_score(y_true, y_pred) if has_data else 0.0
            self.precision = precision_score(y_true,
                                             y_pred) if has_data else 0.0
            self.recall = recall_score(y_true, y_pred) if has_data else 0.0
            self.report_as_map = compute_metrics(
                y_true, y_pred) if has_data else compute_metrics([], [])
            self.report = get_report(self.report_as_map, digits=4)
            print(self.report)

        # save eval
        logs['f1'] = f1
        self.f1 = f1
    def on_epoch_end(self, epoch: int, logs: dict = None):
        prediction_results = get_model_results(self.model,
                                               self.valid_batches,
                                               preprocessor=self.p)
        y_pred = prediction_results.y_pred
        y_true = prediction_results.y_true

        f1 = f1_score(y_true, y_pred)
        print("\tf1 (micro): {:04.2f}".format(f1 * 100))

        if self.evaluation:
            self.accuracy = accuracy_score(y_true, y_pred)
            self.precision = precision_score(y_true, y_pred)
            self.recall = recall_score(y_true, y_pred)
            self.report = classification_report(y_true, y_pred, digits=4)
            print(self.report)

        # save eval
        if logs:
            logs['f1'] = f1
        self.f1 = f1
예제 #3
0
    def eval_nfold(self, x_test, y_test, features=None):
        if self.models is not None:
            total_f1 = 0
            best_f1 = 0
            best_index = 0
            worst_f1 = 1
            worst_index = 0
            reports = []
            reports_as_map = []
            total_precision = 0
            total_recall = 0
            for i in range(self.model_config.fold_number):
                print('\n------------------------ fold ' + str(i) +
                      ' --------------------------------------')

                if 'bert' not in self.model_config.model_type.lower():
                    # Prepare test data(steps, generator)
                    test_generator = DataGenerator(
                        x_test,
                        y_test,
                        batch_size=self.model_config.batch_size,
                        preprocessor=self.p,
                        char_embed_size=self.model_config.char_embedding_size,
                        max_sequence_length=self.model_config.
                        max_sequence_length,
                        embeddings=self.embeddings,
                        shuffle=False,
                        features=features)

                    # Build the evaluator and evaluate the model
                    scorer = Scorer(test_generator, self.p, evaluation=True)
                    scorer.model = self.models[i]
                    scorer.on_epoch_end(epoch=-1)
                    f1 = scorer.f1
                    precision = scorer.precision
                    recall = scorer.recall
                    reports.append(scorer.report)
                    reports_as_map.append(scorer.report_as_map)

                else:
                    # BERT architecture model
                    dir_path = 'data/models/sequenceLabelling/'
                    self.model_config = ModelConfig.load(
                        os.path.join(dir_path, self.model_config.model_name,
                                     self.config_file))
                    self.p = WordPreprocessor.load(
                        os.path.join(dir_path, self.model_config.model_name,
                                     self.preprocessor_file))
                    self.model = get_model(self.model_config,
                                           self.p,
                                           ntags=len(self.p.vocab_tag))
                    self.model.load_model(i)

                    y_pred = self.model.predict(x_test, fold_id=i)

                    nb_alignment_issues = 0
                    for j in range(len(y_test)):
                        if len(y_test[i]) != len(y_pred[j]):
                            nb_alignment_issues += 1
                            # BERT tokenizer appears to introduce some additional tokens without ## prefix,
                            # but this is normally handled when predicting.
                            # To be very conservative, the following ensure the number of tokens always
                            # match, but it should never be used in practice.
                            if len(y_test[j]) < len(y_pred[j]):
                                y_test[j] = y_test[j] + ["O"] * (
                                    len(y_pred[j]) - len(y_test[j]))
                            if len(y_test[j]) > len(y_pred[j]):
                                y_pred[j] = y_pred[j] + ["O"] * (
                                    len(y_test[j]) - len(y_pred[j]))

                    if nb_alignment_issues > 0:
                        print("number of alignment issues with test set:",
                              nb_alignment_issues)

                    f1 = f1_score(y_test, y_pred)
                    precision = precision_score(y_test, y_pred)
                    recall = recall_score(y_test, y_pred)

                    print("\tf1: {:04.2f}".format(f1 * 100))
                    print("\tprecision: {:04.2f}".format(precision * 100))
                    print("\trecall: {:04.2f}".format(recall * 100))

                    report, report_as_map = classification_report(y_test,
                                                                  y_pred,
                                                                  digits=4)
                    reports.append(report)
                    reports_as_map.append(report_as_map)

                if best_f1 < f1:
                    best_f1 = f1
                    best_index = i
                if worst_f1 > f1:
                    worst_f1 = f1
                    worst_index = i
                total_f1 += f1
                total_precision += precision
                total_recall += recall

            fold_average_evaluation = {'labels': {}, 'micro': {}, 'macro': {}}

            micro_f1 = total_f1 / self.model_config.fold_number
            micro_precision = total_precision / self.model_config.fold_number
            micro_recall = total_recall / self.model_config.fold_number

            micro_eval_block = {
                'f1': micro_f1,
                'precision': micro_precision,
                'recall': micro_recall
            }
            fold_average_evaluation['micro'] = micro_eval_block

            # field-level average over the n folds
            labels = []
            for label in sorted(self.p.vocab_tag):
                if label == 'O' or label == '<PAD>':
                    continue
                if label.startswith("B-") or label.startswith(
                        "S-") or label.startswith("I-") or label.startswith(
                            "E-"):
                    label = label[2:]

                if label in labels:
                    continue
                labels.append(label)

                sum_p = 0
                sum_r = 0
                sum_f1 = 0
                sum_support = 0
                for j in range(0, self.model_config.fold_number):
                    if not label in reports_as_map[j]['labels']:
                        continue
                    report_as_map = reports_as_map[j]['labels'][label]
                    sum_p += report_as_map["precision"]
                    sum_r += report_as_map["recall"]
                    sum_f1 += report_as_map["f1"]
                    sum_support += report_as_map["support"]

                avg_p = sum_p / self.model_config.fold_number
                avg_r = sum_r / self.model_config.fold_number
                avg_f1 = sum_f1 / self.model_config.fold_number
                avg_support = sum_support / self.model_config.fold_number
                avg_support_dec = str(avg_support - int(avg_support))[1:]
                if avg_support_dec != '0':
                    avg_support = math.floor(avg_support)

                block_label = {
                    'precision': avg_p,
                    'recall': avg_r,
                    'support': avg_support,
                    'f1': avg_f1
                }
                fold_average_evaluation['labels'][label] = block_label

            print(
                "----------------------------------------------------------------------"
            )
            print("\n** Worst ** model scores - run", str(worst_index))
            print(reports[worst_index])

            print("\n** Best ** model scores - run", str(best_index))
            print(reports[best_index])

            if 'bert' not in self.model_config.model_type.lower():
                self.model = self.models[best_index]
            else:
                # copy best BERT model fold_number
                best_model_dir = 'data/models/sequenceLabelling/' + self.model_config.model_name + str(
                    best_index)
                new_model_dir = 'data/models/sequenceLabelling/' + self.model_config.model_name
                # update new_model_dir if it already exists, keep its existing config content
                merge_folders(best_model_dir, new_model_dir)
                # clean other fold directory
                for i in range(self.model_config.fold_number):
                    shutil.rmtree('data/models/sequenceLabelling/' +
                                  self.model_config.model_name + str(i))

            print(
                "----------------------------------------------------------------------"
            )
            print("\nAverage over", self.model_config.fold_number, "folds")
            print(
                get_report(fold_average_evaluation,
                           digits=4,
                           include_avgs=['micro']))
예제 #4
0
파일: trainer.py 프로젝트: kermitt2/delft
    def on_epoch_end(self, epoch, logs={}):
        y_pred = None
        y_true = None
        for i, (data, label) in enumerate(self.valid_batches):
            if i == self.valid_steps:
                break
            y_true_batch = label

            if isinstance(self.valid_batches, DataGeneratorTransformers):
                y_true_batch = np.asarray(y_true_batch, dtype=object)

                # we need to remove one vector of the data corresponding to the token offsets, this vector is not
                # expected by the model, but we need it to restore correctly the labels (which are produced
                # according to the sub-segmentation of wordpiece, not the expected segmentation)
                input_offsets = data[-1]
                data = data[:-1]

                y_pred_batch = self.model.predict_on_batch(data)

                if not self.use_crf:
                    y_pred_batch = np.argmax(y_pred_batch, -1)

                if self.use_chain_crf:
                    y_pred_batch = np.argmax(y_pred_batch, -1)

                # results have been produced by a model using a transformer layer, so a few things to do
                # the labels are sparse, so integers and not one hot encoded
                # we need to restore back the labels for wordpiece to the labels for normal tokens
                # for this we can use the marked tokens provided by the generator
                new_y_pred_batch = []
                new_y_true_batch = []
                for y_pred_text, y_true_text, offsets_text in zip(
                        y_pred_batch, y_true_batch, input_offsets):
                    new_y_pred_text = []
                    new_y_true_text = []
                    # this is the result per sequence, realign labels:
                    for q in range(len(offsets_text)):
                        if offsets_text[q][0] == 0 and offsets_text[q][1] == 0:
                            # special token
                            continue
                        if offsets_text[q][0] != 0:
                            # added sub-token
                            continue
                        new_y_pred_text.append(y_pred_text[q])
                        new_y_true_text.append(y_true_text[q])
                    new_y_pred_batch.append(new_y_pred_text)
                    new_y_true_batch.append(new_y_true_text)
                y_pred_batch = new_y_pred_batch
                y_true_batch = new_y_true_batch

                y_true_batch = [
                    self.p.inverse_transform(y) for y in y_true_batch
                ]
                y_pred_batch = [
                    self.p.inverse_transform(y) for y in y_pred_batch
                ]
            else:
                # no transformer layer around, no mess to manage with the sub-tokenization...
                y_pred_batch = self.model.predict_on_batch(data)

                if not self.use_crf:
                    # one hot encoded predictions
                    y_pred_batch = np.argmax(y_pred_batch, -1)

                if self.use_chain_crf:
                    # one hot encoded predictions and labels
                    y_pred_batch = np.argmax(y_pred_batch, -1)
                    y_true_batch = np.argmax(y_true_batch, -1)

                # we also have the input length available
                sequence_lengths = data[
                    -1]  # this is the vectors "length_input" of the models input, always last
                # shape of (batch_size, 1), we want (batch_size)
                sequence_lengths = np.reshape(sequence_lengths, (-1, ))

                y_pred_batch = [
                    self.p.inverse_transform(y[:l])
                    for y, l in zip(y_pred_batch, sequence_lengths)
                ]
                y_true_batch = [
                    self.p.inverse_transform(y[:l])
                    for y, l in zip(y_true_batch, sequence_lengths)
                ]

            if i == 0:
                y_pred = y_pred_batch
                y_true = y_true_batch
            else:
                y_pred.extend(y_pred_batch)
                y_true.extend(y_true_batch)
        '''
        for i in range(0,len(y_pred)):
            print("pred", y_pred[i])
            print("true", y_true[i])
        '''
        has_data = y_true is not None and y_pred is not None
        f1 = f1_score(y_true, y_pred) if has_data else 0.0
        print("\tf1 (micro): {:04.2f}".format(f1 * 100))

        if self.evaluation:
            self.accuracy = accuracy_score(y_true, y_pred) if has_data else 0.0
            self.precision = precision_score(y_true,
                                             y_pred) if has_data else 0.0
            self.recall = recall_score(y_true, y_pred) if has_data else 0.0
            self.report_as_map = compute_metrics(
                y_true, y_pred) if has_data else compute_metrics([], [])
            self.report = get_report(self.report_as_map, digits=4)
            print(self.report)

        # save eval
        logs['f1'] = f1
        self.f1 = f1