예제 #1
0
    async def predict(
        self, sources: SourcesContext
    ) -> AsyncIterator[Tuple[Record, Any, float]]:
        if not os.path.isfile(
            os.path.join(self.parent.config.output_dir, "tf_model.h5")
        ):
            raise ModelNotTrained("Train model before prediction.")
        self.tokenizer = AutoTokenizer.from_pretrained(
            self.parent.config.output_dir
        )

        with self.parent.config.strategy.scope():
            self.model = TFAutoModelForSequenceClassification.from_pretrained(
                self.parent.config.output_dir
            )
        trainer = TFTrainer(model=self.model, args=self.parent.config,)
        async for record in sources.with_features(self.features):
            to_predict = record.features(self.features)
            eval_example = [
                InputExample(
                    0,
                    to_predict[self.features[0]],
                    None,
                    self.parent.config.label_list[0],
                )
            ]
            eval_features = glue_convert_examples_to_features(
                eval_example,
                self.tokenizer,
                self.parent.config.max_seq_length,
                self.parent.config.task_name,
                self.parent.config.label_list,
            )
            eval_dataset = await self.example_features_to_dataset(
                eval_features
            )

            all_prob = trainer.predict(eval_dataset).predictions
            max_prob_idx = all_prob.argmax(axis=-1)
            self.logger.debug(
                "Predicted probability of {} for {}: {}".format(
                    self.parent.config.predict.name, to_predict, all_prob[0],
                )
            )
            record.predicted(
                self.parent.config.predict.name,
                self.parent.config.label_list[max_prob_idx[0]],
                all_prob[0][max_prob_idx[0]],
            )
            yield record
예제 #2
0
파일: ner_model.py 프로젝트: oliverob/dffml
    async def predict(
        self, sources: SourcesContext
    ) -> AsyncIterator[Tuple[Record, Any, float]]:
        if not os.path.isfile(
            os.path.join(self.parent.config.output_dir, "tf_model.h5")
        ):
            raise ModelNotTrained("Train model before prediction.")
        with self.parent.config.strategy.scope():
            self.model = TFAutoModelForTokenClassification.from_pretrained(
                self.parent.config.output_dir,
                config=self.config,
                cache_dir=self.parent.config.cache_dir,
            )

        async for record in sources.with_features(
            [self.parent.config.words.name]
        ):
            sentence = record.features([self.parent.config.words.name])
            df = self.pd.DataFrame(sentence, index=[0])
            test_dataset = self.get_dataset(df, self.tokenizer, mode="test",)
            trainer = TFTrainer(
                model=self.model,
                args=self.parent.config,
                train_dataset=None,
                eval_dataset=None,
                compute_metrics=self.compute_metrics,
            )
            predictions, label_ids, _ = trainer.predict(
                test_dataset.get_dataset()
            )
            preds_list, labels_list = self.align_predictions(
                predictions, label_ids
            )
            preds = [
                {word: preds_list[0][i]}
                for i, word in enumerate(
                    sentence[self.parent.config.words.name].split()
                )
            ]

            record.predicted(self.parent.config.predict.name, preds, "Nan")
            yield record
예제 #3
0
def main():
    # See all possible arguments in src/transformers/training_args.py
    # or by passing the --help flag to this script.
    # We now keep distinct sets of args, for a cleaner separation of concerns.
    parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments))
    model_args, data_args, training_args = parser.parse_args_into_dataclasses()

    if (
        os.path.exists(training_args.output_dir)
        and os.listdir(training_args.output_dir)
        and training_args.do_train
        and not training_args.overwrite_output_dir
    ):
        raise ValueError(
            f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome."
        )

    # Setup logging
    logging.basicConfig(
        format="%(asctime)s - %(levelname)s - %(name)s -   %(message)s",
        datefmt="%m/%d/%Y %H:%M:%S",
        level=logging.INFO,
    )
    logger.info(
        "n_replicas: %s, distributed training: %s, 16-bits training: %s",
        training_args.n_replicas,
        bool(training_args.n_replicas > 1),
        training_args.fp16,
    )
    logger.info("Training/evaluation parameters %s", training_args)

    # Prepare Token Classification task
    labels = get_labels(data_args.labels)
    label_map: Dict[int, str] = {i: label for i, label in enumerate(labels)}
    num_labels = len(labels)

    # Load pretrained model and tokenizer
    #
    # Distributed training:
    # The .from_pretrained methods guarantee that only one local process can concurrently
    # download model & vocab.

    config = AutoConfig.from_pretrained(
        model_args.config_name if model_args.config_name else model_args.model_name_or_path,
        num_labels=num_labels,
        id2label=label_map,
        label2id={label: i for i, label in enumerate(labels)},
        cache_dir=model_args.cache_dir,
    )
    tokenizer = AutoTokenizer.from_pretrained(
        model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
        cache_dir=model_args.cache_dir,
        use_fast=model_args.use_fast,
    )

    with training_args.strategy.scope():
        model = TFAutoModelForTokenClassification.from_pretrained(
            model_args.model_name_or_path,
            from_pt=bool(".bin" in model_args.model_name_or_path),
            config=config,
            cache_dir=model_args.cache_dir,
        )

    # Get datasets
    train_dataset = (
        TFNerDataset(
            data_dir=data_args.data_dir,
            tokenizer=tokenizer,
            labels=labels,
            model_type=config.model_type,
            max_seq_length=data_args.max_seq_length,
            overwrite_cache=data_args.overwrite_cache,
            mode=Split.train,
        )
        if training_args.do_train
        else None
    )
    eval_dataset = (
        TFNerDataset(
            data_dir=data_args.data_dir,
            tokenizer=tokenizer,
            labels=labels,
            model_type=config.model_type,
            max_seq_length=data_args.max_seq_length,
            overwrite_cache=data_args.overwrite_cache,
            mode=Split.dev,
        )
        if training_args.do_eval
        else None
    )

    def align_predictions(predictions: np.ndarray, label_ids: np.ndarray) -> Tuple[List[int], List[int]]:
        preds = np.argmax(predictions, axis=2)
        batch_size, seq_len = preds.shape
        out_label_list = [[] for _ in range(batch_size)]
        preds_list = [[] for _ in range(batch_size)]

        for i in range(batch_size):
            for j in range(seq_len):
                if label_ids[i, j] != -1:
                    out_label_list[i].append(label_map[label_ids[i][j]])
                    preds_list[i].append(label_map[preds[i][j]])

        return preds_list, out_label_list

    def compute_metrics(p: EvalPrediction) -> Dict:
        preds_list, out_label_list = align_predictions(p.predictions, p.label_ids)

        return {
            "precision": precision_score(out_label_list, preds_list),
            "recall": recall_score(out_label_list, preds_list),
            "f1": f1_score(out_label_list, preds_list),
        }

    # Initialize our Trainer
    trainer = TFTrainer(
        model=model,
        args=training_args,
        train_dataset=train_dataset.get_dataset() if train_dataset else None,
        eval_dataset=eval_dataset.get_dataset() if eval_dataset else None,
        compute_metrics=compute_metrics,
    )

    # Training
    if training_args.do_train:
        trainer.train()
        trainer.save_model()
        tokenizer.save_pretrained(training_args.output_dir)

    # Evaluation
    results = {}
    if training_args.do_eval:
        logger.info("*** Evaluate ***")

        result = trainer.evaluate()
        output_eval_file = os.path.join(training_args.output_dir, "eval_results.txt")

        with open(output_eval_file, "w") as writer:
            logger.info("***** Eval results *****")

            for key, value in result.items():
                logger.info("  %s = %s", key, value)
                writer.write("%s = %s\n" % (key, value))

            results.update(result)

    # Predict
    if training_args.do_predict:
        test_dataset = TFNerDataset(
            data_dir=data_args.data_dir,
            tokenizer=tokenizer,
            labels=labels,
            model_type=config.model_type,
            max_seq_length=data_args.max_seq_length,
            overwrite_cache=data_args.overwrite_cache,
            mode=Split.test,
        )

        predictions, label_ids, metrics = trainer.predict(test_dataset.get_dataset())
        preds_list, labels_list = align_predictions(predictions, label_ids)
        report = classification_report(labels_list, preds_list)

        logger.info("\n%s", report)

        output_test_results_file = os.path.join(training_args.output_dir, "test_results.txt")

        with open(output_test_results_file, "w") as writer:
            writer.write("%s\n" % report)

        # Save predictions
        output_test_predictions_file = os.path.join(training_args.output_dir, "test_predictions.txt")

        with open(output_test_predictions_file, "w") as writer:
            with open(os.path.join(data_args.data_dir, "test.txt"), "r") as f:
                example_id = 0

                for line in f:
                    if line.startswith("-DOCSTART-") or line == "" or line == "\n":
                        writer.write(line)

                        if not preds_list[example_id]:
                            example_id += 1
                    elif preds_list[example_id]:
                        output_line = line.split()[0] + " " + preds_list[example_id].pop(0) + "\n"

                        writer.write(output_line)
                    else:
                        logger.warning("Maximum sequence length exceeded: No prediction for '%s'.", line.split()[0])

    return results
예제 #4
0
    'E:\Projects\A_Idiom_detection_gihan\idiom_detection_nlp\models\\',  # output directory
    num_train_epochs=15,  # total number of training epochs
    per_device_train_batch_size=16,  # batch size per device during training
    per_device_eval_batch_size=5,  # batch size for evaluation
    warmup_steps=500,  # number of warmup steps for learning rate scheduler
    weight_decay=0.01,  # strength of weight decay
    logging_dir=
    'E:\Projects\A_Idiom_detection_gihan\idiom_detection_nlp\models\\',  # directory for storing logs
    logging_steps=10,
    evaluation_strategy=EvaluationStrategy.NO,
)

with training_args.strategy.scope():
    model = TFDistilBertForTokenClassification.from_pretrained(
        'distilbert-base-cased', num_labels=2)

trainer = TFTrainer(
    model=model,  # the instantiated 🤗 Transformers model to be trained
    args=training_args,  # training arguments, defined above
    train_dataset=train_dataset,  # training dataset
    # eval_dataset=val_dataset             # evaluation dataset
)

trainer.train()

#predictions
prediction_results = trainer.predict(test_dataset=test_dataset)
trainer.save_model(
    "E:\Projects\A_Idiom_detection_gihan\idiom_detection_nlp\models\\")

compute_metrics(prediction_results, val_labels)
예제 #5
0
with training_args.strategy.scope():
    model = TFDistilBertForTokenClassification.from_pretrained('distilbert-base-cased', num_labels=2)

trainer = TFTrainer(
    model=model,                         # the instantiated 🤗 Transformers model to be trained
    args=training_args,                  # training arguments, defined above
    train_dataset=train_dataset,         # training dataset
    # eval_dataset=val_dataset             # evaluation dataset
)

trainer.train()

# test_dataset = make_test_data([['this','is','a','moot','point'],['my','name','is','gihan']])
# a_dataset = make_test_data(val_texts[:2])
results1 = trainer.predict(test_dataset=test_dataset)
trainer.save_model("E:\Projects\A_Idiom_detection_gihan\idiom_detection_nlp\models\\")
print(results1)
print("predictions>>")
print(results1.predictions[:2])
print("labels>>")
print(results1.label_ids[:2])


print("lennnnnn")
print([len(results1.predictions)])
print("mannn")
def compute_metrics(pred):

    labels = val_labels
    preds = pred.predictions.argmax(-1)
예제 #6
0
def main():
    # See all possible arguments in src/transformers/training_args.py
    # or by passing the --help flag to this script.
    # We now keep distinct sets of args, for a cleaner separation of concerns.
    parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments))
    model_args, data_args, training_args = parser.parse_args_into_dataclasses()

    if (
        os.path.exists(training_args.output_dir)
        and os.listdir(training_args.output_dir)
        and training_args.do_train
        and not training_args.overwrite_output_dir
    ):
        raise ValueError(
            f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome."
        )

    module = import_module("tasks")

    try:
        token_classification_task_clazz = getattr(module, model_args.task_type)
        token_classification_task: TokenClassificationTask = token_classification_task_clazz()
    except AttributeError:
        raise ValueError(
            f"Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. "
            f"Available tasks classes are: {TokenClassificationTask.__subclasses__()}"
        )

    # Setup logging
    
    logging.basicConfig(
        format="%(asctime)s - %(levelname)s - %(name)s -   %(message)s",
        datefmt="%m/%d/%Y %H:%M:%S",
        level=logging.DEBUG,
    )
    """
    logging.basicConfig(
	filename="/scratch/project_2001426/harttu/july-2020/transformers-ner/test.log",
        format="%(asctime)s - %(levelname)s - %(name)s -   %(message)s",
        datefmt="%m/%d/%Y %H:%M:%S",
        level=logging.DEBUG,
        filemode='w'
    )
    """
    logger.info("FROM run_tf_ner.py")
    
    logger.info(
        "n_replicas: %s, distributed training: %s, 16-bits training: %s",
        training_args.n_replicas,
        bool(training_args.n_replicas > 1),
        training_args.fp16,
    )
    logger.info("Training/evaluation parameters %s", training_args)

    # Prepare Token Classification task
    labels = token_classification_task.get_labels(data_args.labels)
    print("LABELS:")
    print(labels)
    label_map: Dict[int, str] = {i: label for i, label in enumerate(labels)}
    print("LABEL_MAP:")
    print(label_map)
    num_labels = len(labels)

    # Load pretrained model and tokenizer
    #
    # Distributed training:
    # The .from_pretrained methods guarantee that only one local process can concurrently
    # download model & vocab.

    config = AutoConfig.from_pretrained(
        model_args.config_name if model_args.config_name else model_args.model_name_or_path,
        num_labels=num_labels,
        id2label=label_map,
        label2id={label: i for i, label in enumerate(labels)},
        cache_dir=model_args.cache_dir,
    )
    print("CONFIG")
    print(config)
    tokenizer = AutoTokenizer.from_pretrained(
        model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
        cache_dir=model_args.cache_dir,
        use_fast=model_args.use_fast,
    )

    with training_args.strategy.scope():
        model = TFAutoModelForTokenClassification.from_pretrained(
            model_args.model_name_or_path,
            from_pt=True,#bool(".bin" in model_args.model_name_or_path),
            config=config,
            cache_dir=model_args.cache_dir,
        )
    print("SETTING DATASETS")
    # Get datasets
    train_dataset = (
        TFTokenClassificationDataset(
            token_classification_task=token_classification_task,
            data_dir=data_args.data_dir,
            tokenizer=tokenizer,
            labels=labels,
            model_type=config.model_type,
            max_seq_length=data_args.max_seq_length,
            overwrite_cache=data_args.overwrite_cache,
            mode=Split.train,
        )
        if training_args.do_train
        else None
    )
    eval_dataset = (
        TFTokenClassificationDataset(
            token_classification_task=token_classification_task,
            data_dir=data_args.data_dir,
            tokenizer=tokenizer,
            labels=labels,
            model_type=config.model_type,
            max_seq_length=data_args.max_seq_length,
            overwrite_cache=data_args.overwrite_cache,
            mode=Split.dev,
        )
        if training_args.do_eval
        else None
    )

    def align_predictions(predictions: np.ndarray, label_ids: np.ndarray) -> Tuple[List[int], List[int]]:
        preds = np.argmax(predictions, axis=2)
        batch_size, seq_len = preds.shape
        out_label_list = [[] for _ in range(batch_size)]
        preds_list = [[] for _ in range(batch_size)]

        for i in range(batch_size):
            for j in range(seq_len):
                if label_ids[i, j] != -100:
                    out_label_list[i].append(label_map[label_ids[i][j]])
                    preds_list[i].append(label_map[preds[i][j]])

        return preds_list, out_label_list

    def compute_metrics(p: EvalPrediction) -> Dict:
        preds_list, out_label_list = align_predictions(p.predictions, p.label_ids)

        return {
            "precision": precision_score(out_label_list, preds_list),
            "recall": recall_score(out_label_list, preds_list),
            "f1": f1_score(out_label_list, preds_list),
        }

    """
    # Prepare optimizer and schedule (linear warmup and decay)
    no_decay = ["bias", "LayerNorm.weight"]
    optimizer_grouped_parameters = [
        {
            "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
            "weight_decay": args.weight_decay,
        },
        {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
    ]
    optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
    scheduler = get_linear_schedule_with_warmup(
        optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
    )
    """

    import tensorflow as tf

    initial_learning_rate=0.001
    decay_steps=10000
    print("INITIALIZING TRAINER")
    # Initialize our Trainer
    trainer = TFTrainer(
        model=model,
        #optimizers=(tf.keras.optimizers.Adam(
        #learning_rate=initial_learning_rate, beta_1=0.9, 
        #beta_2=0.999, epsilon=1e-07, amsgrad=False,
        #name='Adam'),tf.keras.optimizers.schedules.PolynomialDecay(
        #initial_learning_rate, decay_steps, 
        #end_learning_rate=0.0001, power=1.0,
        #cycle=False, name=None
        #)),
        args=training_args,
        train_dataset=train_dataset.get_dataset() if train_dataset else None,
        eval_dataset=eval_dataset.get_dataset() if eval_dataset else None,
        compute_metrics=compute_metrics,
    )
    print("TRAINING")
    # Training
    if training_args.do_train:
        trainer.train()
        trainer.save_model()
        tokenizer.save_pretrained(training_args.output_dir)
    
    print("EVALUATING")
    # Evaluation
    results = {}
    if training_args.do_eval:
        logger.info("*** Evaluate ***")

        result = trainer.evaluate()
        output_eval_file = os.path.join(training_args.output_dir, "eval_results.txt")

        with open(output_eval_file, "w") as writer:
            logger.info("***** Eval results *****")

            for key, value in result.items():
                logger.info("  %s = %s", key, value)
                writer.write("%s = %s\n" % (key, value))

            results.update(result)

    # Predict
    print("PREDICTING")
    if training_args.do_predict:
        test_dataset = TFTokenClassificationDataset(
            token_classification_task=token_classification_task,
            data_dir=data_args.data_dir,
            tokenizer=tokenizer,
            labels=labels,
            model_type=config.model_type,
            max_seq_length=data_args.max_seq_length,
            overwrite_cache=data_args.overwrite_cache,
            mode=Split.test,
        )

        predictions, label_ids, metrics = trainer.predict(test_dataset.get_dataset())
        preds_list, labels_list = align_predictions(predictions, label_ids)
        report = classification_report(labels_list, preds_list)

        logger.info("\n%s", report)

        output_test_results_file = os.path.join(training_args.output_dir, "test_results.txt")

        with open(output_test_results_file, "w") as writer:
            writer.write("%s\n" % report)

        # Save predictions
        output_test_predictions_file = os.path.join(training_args.output_dir, "test_predictions.txt")

        with open(output_test_predictions_file, "w") as writer:
            with open(os.path.join(data_args.data_dir, "test.txt"), "r") as f:
                example_id = 0

                for line in f:
                    if line.startswith("-DOCSTART-") or line == "" or line == "\n":
                        writer.write(line)

                        if not preds_list[example_id]:
                            example_id += 1
                    elif preds_list[example_id]:
                        output_line = line.split()[0] + " " + preds_list[example_id].pop(0) + "\n"

                        writer.write(output_line)
                    else:
                        logger.warning("Maximum sequence length exceeded: No prediction for '%s'.", line.split()[0])

    return results
예제 #7
0
    def main(self):

        model_args = ModelArguments(model_name_or_path=self.model_name_or_path)
        data_args = DataTrainingArguments(data_dir=self.data_dir,
                                          labels='./labels.txt',
                                          max_seq_length=self.max_seq_length)
        training_args = TFTrainingArguments(
            output_dir=self.output_dir,
            do_eval=self.do_eval,
            do_predict=self.do_predict,
            do_train=self.do_train,
            per_device_train_batch_size=self.per_device_train_batch_size,
            save_steps=self.save_steps,
            seed=self.seed)

        if (os.path.exists(training_args.output_dir)
                and os.listdir(training_args.output_dir)
                and training_args.do_train
                and not training_args.overwrite_output_dir):
            raise ValueError(
                f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome."
            )

        module = import_module("tasks")

        try:
            token_classification_task_clazz = getattr(module,
                                                      model_args.task_type)
            token_classification_task: TokenClassificationTask = token_classification_task_clazz(
            )
        except AttributeError:
            raise ValueError(
                f"Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. "
                f"Available tasks classes are: {TokenClassificationTask.__subclasses__()}"
            )

        # Setup logging
        logging.basicConfig(
            format="%(asctime)s - %(levelname)s - %(name)s -   %(message)s",
            datefmt="%m/%d/%Y %H:%M:%S",
            level=logging.INFO,
        )
        logger.info(
            "n_replicas: %s, distributed training: %s, 16-bits training: %s",
            training_args.n_replicas,
            bool(training_args.n_replicas > 1),
            training_args.fp16,
        )
        logger.info("Training/evaluation parameters %s", training_args)

        # Prepare Token Classification task
        labels = token_classification_task.get_labels(data_args.labels)

        # JQ
        labels.extend('PAD')

        label_map: Dict[int,
                        str] = {i: label
                                for i, label in enumerate(labels)}

        # JQ
        label_map[9] = 'PAD'

        num_labels = len(labels)

        # Load pretrained model and tokenizer
        #
        # Distributed training:
        # The .from_pretrained methods guarantee that only one local process can concurrently
        # download model & vocab.

        # START HERE

        config = AutoConfig.from_pretrained(
            model_args.config_name
            if model_args.config_name else model_args.model_name_or_path,
            num_labels=num_labels,
            id2label=label_map,
            label2id={label: i
                      for i, label in enumerate(labels)},
            cache_dir=model_args.cache_dir,
        )
        tokenizer = AutoTokenizer.from_pretrained(
            model_args.tokenizer_name
            if model_args.tokenizer_name else model_args.model_name_or_path,
            cache_dir=model_args.cache_dir,
            use_fast=model_args.use_fast,
        )

        with training_args.strategy.scope():
            model = TFAutoModelForTokenClassification.from_pretrained(
                model_args.model_name_or_path,
                from_pt=bool(".bin" in model_args.model_name_or_path),
                config=config,
                cache_dir=model_args.cache_dir,
            )

        # Get datasets
        train_dataset = (TFTokenClassificationDataset(
            token_classification_task=token_classification_task,
            data_dir=data_args.data_dir,
            tokenizer=tokenizer,
            labels=labels,
            model_type=config.model_type,
            max_seq_length=data_args.max_seq_length,
            overwrite_cache=data_args.overwrite_cache,
            mode=Split.train,
        ) if training_args.do_train else None)
        eval_dataset = (TFTokenClassificationDataset(
            token_classification_task=token_classification_task,
            data_dir=data_args.data_dir,
            tokenizer=tokenizer,
            labels=labels,
            model_type=config.model_type,
            max_seq_length=data_args.max_seq_length,
            overwrite_cache=data_args.overwrite_cache,
            mode=Split.dev,
        ) if training_args.do_eval else None)

        def align_predictions(
                predictions: np.ndarray,
                label_ids: np.ndarray) -> Tuple[List[int], List[int]]:
            preds = np.argmax(predictions, axis=2)
            batch_size, seq_len = preds.shape
            out_label_list = [[] for _ in range(batch_size)]
            preds_list = [[] for _ in range(batch_size)]

            for i in range(batch_size):
                for j in range(seq_len):
                    if label_ids[i, j] != -100:
                        out_label_list[i].append(label_map[label_ids[i][j]])
                        preds_list[i].append(label_map[preds[i][j]])

            return preds_list, out_label_list

        def compute_metrics(p: EvalPrediction) -> Dict:
            preds_list, out_label_list = align_predictions(
                p.predictions, p.label_ids)

            return {
                "precision": precision_score(out_label_list, preds_list),
                "recall": recall_score(out_label_list, preds_list),
                "f1": f1_score(out_label_list, preds_list),
            }

        # Initialize our Trainer
        trainer = TFTrainer(
            model=model,
            args=training_args,
            train_dataset=train_dataset.get_dataset()
            if train_dataset else None,
            eval_dataset=eval_dataset.get_dataset() if eval_dataset else None,
            compute_metrics=compute_metrics,
        )

        # Training
        if training_args.do_train:
            trainer.train()
            trainer.save_model()
            tokenizer.save_pretrained(training_args.output_dir)

        # Evaluation
        results = {}
        if training_args.do_eval:
            logger.info("*** Evaluate ***")

            result = trainer.evaluate()
            output_eval_file = os.path.join(training_args.output_dir,
                                            "eval_results.txt")

            with open(output_eval_file, "w") as writer:
                logger.info("***** Eval results *****")

                for key, value in result.items():
                    logger.info("  %s = %s", key, value)
                    writer.write("%s = %s\n" % (key, value))

                results.update(result)

        # Predict
        if training_args.do_predict:
            test_dataset = TFTokenClassificationDataset(
                token_classification_task=token_classification_task,
                data_dir=data_args.data_dir,
                tokenizer=tokenizer,
                labels=labels,
                model_type=config.model_type,
                max_seq_length=data_args.max_seq_length,
                overwrite_cache=data_args.overwrite_cache,
                mode=Split.test,
            )

            predictions, label_ids, metrics = trainer.predict(
                test_dataset.get_dataset())
            preds_list, labels_list = align_predictions(predictions, label_ids)
            report = classification_report(labels_list, preds_list)

            logger.info("\n%s", report)

            output_test_results_file = os.path.join(training_args.output_dir,
                                                    "test_results.txt")

            with open(output_test_results_file, "w") as writer:
                writer.write("%s\n" % report)

            # Save predictions
            output_test_predictions_file = os.path.join(
                training_args.output_dir, "test_predictions.txt")

            with open(output_test_predictions_file, "w") as writer:
                with open(os.path.join(data_args.data_dir, "test.txt"),
                          "r") as f:
                    example_id = 0

                    for line in f:
                        if line.startswith(
                                "-DOCSTART-") or line == "" or line == "\n":
                            writer.write(line)

                            if not preds_list[example_id]:
                                example_id += 1
                        elif preds_list[example_id]:
                            output_line = line.split(
                            )[0] + " " + preds_list[example_id].pop(0) + "\n"

                            writer.write(output_line)
                        else:
                            logger.warning(
                                "Maximum sequence length exceeded: No prediction for '%s'.",
                                line.split()[0])

        return results
예제 #8
0
    model=model,  # the instantiated 🤗 Transformers model to be trained
    args=training_args,  # training arguments, defined above
    train_dataset=train_dataset,  # training dataset
    # eval_dataset=val_dataset             # evaluation dataset
    # compute_metrics=compute_metrics,
)

trainer.train()

model.save_pretrained('./EPIE_idiom_model')
tokenizer.save_pretrained('./EPIE_idiom_model')
# preds_output = trainer.predict(emotions_encoded["validation"])
# print(preds_output.metrics)

#predictions
prediction_results = trainer.predict(test_dataset=test_dataset)
# print(prediction_results.label_ids)
# print(val_texts)
# print(val_tags)
trainer.save_model(
    "E:\Projects\A_Idiom_detection_gihan\idiom_detection_nlp\models\\epie_models\\"
)

compute_metrics(prediction_results, val_labels)

out_df = pd.DataFrame()
out_df['test_text'] = val_texts
out_df['test_ground_labels'] = val_labels
out_df['test_predictions'] = list(prediction_results.label_ids)

out_df.to_csv('pred_investingation.csv')