예제 #1
0
파일: ner_model.py 프로젝트: oliverob/dffml
    async def accuracy(self, sources: Sources):
        if not os.path.isfile(
            os.path.join(self.parent.config.output_dir, "tf_model.h5")
        ):
            raise ModelNotTrained("Train model before assessing for accuracy.")

        data_df = await self._preprocess_data(sources)
        eval_dataset = self.get_dataset(data_df, self.tokenizer, mode="eval",)
        with self.parent.config.strategy.scope():
            self.model = TFAutoModelForTokenClassification.from_pretrained(
                self.parent.config.output_dir,
                config=self.config,
                cache_dir=self.parent.config.cache_dir,
            )

        trainer = TFTrainer(
            model=self.model,
            args=self.parent.config,
            train_dataset=None,
            eval_dataset=eval_dataset.get_dataset(),
            compute_metrics=self.compute_metrics,
        )

        result = trainer.evaluate()
        return Accuracy(result["eval_f1"])
예제 #2
0
    async def accuracy(self, sources: Sources):
        if not os.path.isfile(
                os.path.join(self.parent.config.output_dir, "tf_model.h5")):
            raise ModelNotTrained("Train model before assessing for accuracy.")

        config = self.parent.config._asdict()
        self.tokenizer = AutoTokenizer.from_pretrained(
            self.parent.config.output_dir)
        eval_features = await self._preprocess_data(sources)
        eval_dataset = await self.example_features_to_dataset(eval_features)

        def compute_metrics(p: EvalPrediction) -> Dict:
            preds = self.np.argmax(p.predictions, axis=1)
            return classification_compute_metrics(preds, p.label_ids)

        with self.parent.config.strategy.scope():
            self.model = TFAutoModelForSequenceClassification.from_pretrained(
                config["directory"])
        trainer = TFTrainer(
            model=self.model,
            args=self.parent.config,
            eval_dataset=eval_dataset,
            compute_metrics=compute_metrics,
        )
        result = trainer.evaluate()
        return Accuracy(result["eval_acc"])
예제 #3
0
    async def predict(
        self, sources: SourcesContext
    ) -> AsyncIterator[Tuple[Record, Any, float]]:
        if not os.path.isfile(
            os.path.join(self.parent.config.output_dir, "tf_model.h5")
        ):
            raise ModelNotTrained("Train model before prediction.")
        self.tokenizer = AutoTokenizer.from_pretrained(
            self.parent.config.output_dir
        )

        with self.parent.config.strategy.scope():
            self.model = TFAutoModelForSequenceClassification.from_pretrained(
                self.parent.config.output_dir
            )
        trainer = TFTrainer(model=self.model, args=self.parent.config,)
        async for record in sources.with_features(self.features):
            to_predict = record.features(self.features)
            eval_example = [
                InputExample(
                    0,
                    to_predict[self.features[0]],
                    None,
                    self.parent.config.label_list[0],
                )
            ]
            eval_features = glue_convert_examples_to_features(
                eval_example,
                self.tokenizer,
                self.parent.config.max_seq_length,
                self.parent.config.task_name,
                self.parent.config.label_list,
            )
            eval_dataset = await self.example_features_to_dataset(
                eval_features
            )

            all_prob = trainer.predict(eval_dataset).predictions
            max_prob_idx = all_prob.argmax(axis=-1)
            self.logger.debug(
                "Predicted probability of {} for {}: {}".format(
                    self.parent.config.predict.name, to_predict, all_prob[0],
                )
            )
            record.predicted(
                self.parent.config.predict.name,
                self.parent.config.label_list[max_prob_idx[0]],
                all_prob[0][max_prob_idx[0]],
            )
            yield record
예제 #4
0
def pretrain_and_evaluate(args, model, eval_only):
    val_dataset = tf.data.TFRecordDataset(file_path=args.val_datapath)
    if eval_only:
        train_dataset = val_dataset
    else:
        logger.info(
            f'Loading and tokenizing training data is usually slow: {args.train_datapath}'
        )
        train_dataset = tf.data.Dataset(file_path=args.train_datapath)

    trainer = TFTrainer(model=model,
                        args=args,
                        train_dataset=train_dataset,
                        eval_dataset=val_dataset)

    eval_loss = trainer.evaluate()
    eval_loss = eval_loss['eval_loss']
    logger.info(f'Initial eval bpc: {eval_loss / math.log(2)}')

    if not eval_only:
        trainer.train()
        trainer.save_model()

        eval_loss = trainer.evaluate()
        eval_loss = eval_loss['eval_loss']
        logger.info(f'Eval bpc after pretraining: {eval_loss / math.log(2)}')
예제 #5
0
    def _hf_train(self):
        """Train the model using HuggingFace Trainer"""
        self._training_args = TFTrainingArguments(
            output_dir='./results',  # output directory
            num_train_epochs=3,  # total number of training epochs
            per_device_train_batch_size=self.
            batch_size,  # batch size per device during training
            per_device_eval_batch_size=self.
            batch_size,  # batch size for evaluation
            warmup_steps=
            500,  # number of warmup steps for learning rate scheduler
            weight_decay=0.01,  # strength of weight decay
            logging_dir='./logs',  # directory for storing logs
            logging_steps=10,
        )

        # with self._training_args.strategy.scope():
        #     self._model = TFDistilBertForSequenceClassification.from_pretrained(self.model_name)

        self._trainer = TFTrainer(
            model=self.model,
            args=self._training_args,  # training arguments
            train_dataset=self.X,  # training dataset
            eval_dataset=self.V  # evaluation dataset
        )
        self.trainer.train()
예제 #6
0
파일: ner_model.py 프로젝트: oliverob/dffml
    async def predict(
        self, sources: SourcesContext
    ) -> AsyncIterator[Tuple[Record, Any, float]]:
        if not os.path.isfile(
            os.path.join(self.parent.config.output_dir, "tf_model.h5")
        ):
            raise ModelNotTrained("Train model before prediction.")
        with self.parent.config.strategy.scope():
            self.model = TFAutoModelForTokenClassification.from_pretrained(
                self.parent.config.output_dir,
                config=self.config,
                cache_dir=self.parent.config.cache_dir,
            )

        async for record in sources.with_features(
            [self.parent.config.words.name]
        ):
            sentence = record.features([self.parent.config.words.name])
            df = self.pd.DataFrame(sentence, index=[0])
            test_dataset = self.get_dataset(df, self.tokenizer, mode="test",)
            trainer = TFTrainer(
                model=self.model,
                args=self.parent.config,
                train_dataset=None,
                eval_dataset=None,
                compute_metrics=self.compute_metrics,
            )
            predictions, label_ids, _ = trainer.predict(
                test_dataset.get_dataset()
            )
            preds_list, labels_list = self.align_predictions(
                predictions, label_ids
            )
            preds = [
                {word: preds_list[0][i]}
                for i, word in enumerate(
                    sentence[self.parent.config.words.name].split()
                )
            ]

            record.predicted(self.parent.config.predict.name, preds, "Nan")
            yield record
예제 #7
0
 async def train(self, sources: Sources):
     self.tokenizer = AutoTokenizer.from_pretrained(
         self.parent.config.tokenizer_name
         if self.parent.config.tokenizer_name else
         self.parent.config.model_name_or_path,
         cache_dir=self.parent.config.cache_dir,
     )
     with self.parent.config.strategy.scope():
         self.model = TFAutoModelForSequenceClassification.from_pretrained(
             self.parent.config.model_name_or_path,
             from_pt=self.parent.config.from_pt,
             config=self.config,
             cache_dir=self.parent.config.cache_dir,
         )
     train_features = await self._preprocess_data(sources)
     train_dataset = await self.example_features_to_dataset(train_features)
     trainer = TFTrainer(
         model=self.model,
         args=self.parent.config,
         train_dataset=train_dataset,
     )
     trainer.train()
     self.logger.info("Saving model to %s", self.parent.config.output_dir)
     trainer.save_model()
     self.tokenizer.save_pretrained(self.parent.config.output_dir)
예제 #8
0
    async def train(self, sources: Sources):
        with self.parent.config.strategy.scope():
            self.model = TFAutoModelForTokenClassification.from_pretrained(
                self.parent.config.model_name_or_path,
                from_pt=self.parent.config.from_pt,
                config=self.config,
                cache_dir=self.parent.config.cache_dir,
            )

        data_df = await self._preprocess_data(sources)
        train_dataset = self.get_dataset(
            data_df,
            self.tokenizer,
            mode="train",
        )
        trainer = TFTrainer(
            model=self.model,
            args=self.parent.config,
            train_dataset=train_dataset.get_dataset(),
            eval_dataset=None,
            compute_metrics=self.compute_metrics,
        )
        trainer.train()
        trainer.save_model()
        self.tokenizer.save_pretrained(self.parent.config.output_dir)
예제 #9
0
def finetune_with_tftrainer():
    """ Fine tune with TFTrainer but it's not working with some package error
        Found an issue from github of the same problem I have, follow up there:
        https://github.com/huggingface/transformers/issues/5151
    """
    tokenizer = AutoTokenizer.from_pretrained(os.getenv('MODEL_NAME'))

    # Get data for fine-tuning
    dataset, dataset_size, num_labels = build_dataset(tokenizer)

    config = AutoConfig.from_pretrained(os.getenv('MODEL_NAME'),
                                        num_labels=num_labels)

    training_args = TFTrainingArguments(output_dir=os.getenv('OUTPUT_DIR'),
                                        logging_dir=os.getenv('OUTPUT_DIR'),
                                        logging_first_step=True,
                                        logging_steps=1,
                                        overwrite_output_dir=True,
                                        do_train=True,
                                        do_eval=True,
                                        learning_rate=2e-5,
                                        debug=True)

    with training_args.strategy.scope():
        # model = TFAutoModel.from_pretrained(os.getenv('MODEL_NAME'), config=config, cache_dir=os.getenv('OUTPUT_DIR'))
        model = TCPMDistilBertClassification.from_pretrained(
            os.getenv('MODEL_NAME'), config=config)

    # shuffle and split train/test tasks manuanly
    dataset = dataset.shuffle(dataset_size)
    train_size = int(dataset_size * (4 / 5))
    # test_size = dataset_size - train_size # didn't use so commented out.

    train_data = dataset.take(train_size)  #.batch(TRAIN_BATCH_SIZE)
    test_data = dataset.skip(train_size)  #.batch(TEST_BATCH_SIZE)

    trainer = TFTrainer(model=model,
                        args=training_args,
                        train_dataset=train_data,
                        eval_dataset=test_data,
                        compute_metrics=compute_metrics)

    print('Sample training data:')
    for i in train_data.take(2):
        print(f'Input #{i}:')
        pprint(i[0])
        print(f'Target #{i}:')
        pprint(i[1])

    print('Sample test data:')
    for i in train_data.take(2):
        print(f'Input #{i}:')
        pprint(i[0])
        print(f'Output #{i}:')
        pprint(i[1])

    print('TF Trainer args:')
    pprint(training_args)
    print('TCPM Model config:')
    pprint(config)
    print('Trainning start..')

    # Train the model
    trainer.train()
    trainer.save_model()
    tokenizer.save_pretrained(os.getenv('OUTPUT_DIR'))

    # Evaluate the model
    result = trainer.evaluate()
    pprint(result)
    with open(os.path.join(os.getenv('OUTPUT_DIR'), 'eval_results.json'),
              'w') as fwrite:
        json.dump(result, fwrite, indent=4)
def main():
    # See all possible arguments in src/transformers/training_args.py
    # or by passing the --help flag to this script.
    # We now keep distinct sets of args, for a cleaner separation of concerns.
    parser = HfArgumentParser(
        (ModelArguments, DataTrainingArguments, TFTrainingArguments))
    model_args, data_args, training_args = parser.parse_args_into_dataclasses()

    if (os.path.exists(training_args.output_dir)
            and os.listdir(training_args.output_dir) and training_args.do_train
            and not training_args.overwrite_output_dir):
        raise ValueError(
            f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome."
        )

    # Setup logging
    logging.basicConfig(
        format="%(asctime)s - %(levelname)s - %(name)s -   %(message)s",
        datefmt="%m/%d/%Y %H:%M:%S",
        level=logging.INFO,
    )
    logger.info(
        "n_replicas: %s, distributed training: %s, 16-bits training: %s",
        training_args.n_replicas,
        bool(training_args.n_replicas > 1),
        training_args.fp16,
    )
    logger.info("Training/evaluation parameters %s", training_args)

    # Load pretrained model and tokenizer
    #
    # Distributed training:
    # The .from_pretrained methods guarantee that only one local process can concurrently
    # download model & vocab.

    tokenizer = AutoTokenizer.from_pretrained(
        model_args.tokenizer_name
        if model_args.tokenizer_name else model_args.model_name_or_path,
        cache_dir=model_args.cache_dir,
    )

    train_dataset, eval_dataset, test_ds, label2id = get_tfds(
        train_file=data_args.train_file,
        eval_file=data_args.dev_file,
        test_file=data_args.test_file,
        tokenizer=tokenizer,
        label_column_id=data_args.label_column_id,
        max_seq_length=data_args.max_seq_length,
    )

    config = AutoConfig.from_pretrained(
        model_args.config_name
        if model_args.config_name else model_args.model_name_or_path,
        num_labels=len(label2id),
        label2id=label2id,
        id2label={id: label
                  for label, id in label2id.items()},
        finetuning_task="text-classification",
        cache_dir=model_args.cache_dir,
    )

    with training_args.strategy.scope():
        model = TFAutoModelForSequenceClassification.from_pretrained(
            model_args.model_name_or_path,
            from_pt=bool(".bin" in model_args.model_name_or_path),
            config=config,
            cache_dir=model_args.cache_dir,
        )

    def compute_metrics(p: EvalPrediction) -> Dict:
        preds = np.argmax(p.predictions, axis=1)

        return {"acc": (preds == p.label_ids).mean()}

    # Initialize our Trainer
    trainer = TFTrainer(
        model=model,
        args=training_args,
        train_dataset=train_dataset,
        eval_dataset=eval_dataset,
        compute_metrics=compute_metrics,
    )

    # Training
    if training_args.do_train:
        trainer.train()
        trainer.save_model()
        tokenizer.save_pretrained(training_args.output_dir)

    # Evaluation
    results = {}
    if training_args.do_eval:
        logger.info("*** Evaluate ***")
        result = trainer.evaluate()
        trainer.log_metrics("eval", result)
        trainer.save_metrics("eval", result)
        results.update(result)

    return results
예제 #11
0
def main():
    # See all possible arguments in src/transformers/training_args.py
    # or by passing the --help flag to this script.
    # We now keep distinct sets of args, for a cleaner separation of concerns.
    parser = HfArgumentParser((ModelArguments, GlueDataTrainingArguments, TFTrainingArguments))
    model_args, data_args, training_args = parser.parse_args_into_dataclasses()

    if (
        os.path.exists(training_args.output_dir)
        and os.listdir(training_args.output_dir)
        and training_args.do_train
        and not training_args.overwrite_output_dir
    ):
        raise ValueError(
            f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome."
        )

    # Setup logging
    logging.basicConfig(
        format="%(asctime)s - %(levelname)s - %(name)s -   %(message)s",
        datefmt="%m/%d/%Y %H:%M:%S",
        level=logging.INFO,
    )
    logger.info(
        "n_replicas: %s, distributed training: %s, 16-bits training: %s",
        training_args.n_replicas,
        bool(training_args.n_replicas > 1),
        training_args.fp16,
    )
    logger.info("Training/evaluation parameters %s", training_args)

    try:
        num_labels = glue_tasks_num_labels["mnli" if data_args.task_name == "mnli-mm" else data_args.task_name]
        output_mode = glue_output_modes[data_args.task_name]
    except KeyError:
        raise ValueError("Task not found: %s" % (data_args.task_name))

    # Load pretrained model and tokenizer
    #
    # Distributed training:
    # The .from_pretrained methods guarantee that only one local process can concurrently
    # download model & vocab.

    config = AutoConfig.from_pretrained(
        model_args.config_name if model_args.config_name else model_args.model_name_or_path,
        num_labels=num_labels,
        finetuning_task=data_args.task_name,
        cache_dir=model_args.cache_dir,
    )
    tokenizer = AutoTokenizer.from_pretrained(
        model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
        cache_dir=model_args.cache_dir,
    )

    with training_args.strategy.scope():
        model = TFAutoModelForSequenceClassification.from_pretrained(
            model_args.model_name_or_path,
            from_pt=bool(".bin" in model_args.model_name_or_path),
            config=config,
            cache_dir=model_args.cache_dir,
        )

    # Get datasets
    train_dataset = (
        get_tfds(
            task_name=data_args.task_name,
            tokenizer=tokenizer,
            max_seq_length=data_args.max_seq_length,
            data_dir=data_args.data_dir,
        )
        if training_args.do_train
        else None
    )
    eval_dataset = (
        get_tfds(
            task_name=data_args.task_name,
            tokenizer=tokenizer,
            max_seq_length=data_args.max_seq_length,
            mode=Split.dev,
            data_dir=data_args.data_dir,
        )
        if training_args.do_eval
        else None
    )

    def compute_metrics(p: EvalPrediction) -> Dict:
        if output_mode == "classification":
            preds = np.argmax(p.predictions, axis=1)
        elif output_mode == "regression":
            preds = np.squeeze(p.predictions)
        return glue_compute_metrics(data_args.task_name, preds, p.label_ids)

    # Initialize our Trainer
    trainer = TFTrainer(
        model=model,
        args=training_args,
        train_dataset=train_dataset,
        eval_dataset=eval_dataset,
        compute_metrics=compute_metrics,
    )

    # Training
    if training_args.do_train:
        trainer.train()
        trainer.save_model()
        tokenizer.save_pretrained(training_args.output_dir)

    # Evaluation
    results = {}
    if training_args.do_eval:
        logger.info("*** Evaluate ***")

        result = trainer.evaluate()
        output_eval_file = os.path.join(training_args.output_dir, "eval_results.txt")

        with open(output_eval_file, "w") as writer:
            logger.info("***** Eval results *****")

            for key, value in result.items():
                logger.info("  %s = %s", key, value)
                writer.write("%s = %s\n" % (key, value))

            results.update(result)

    return results
예제 #12
0
def run_bert_regression_trainer():
    """ Run bert single class classification(a.k.a regression) model."""
    print('START TRAINNING FOR REGRESSION')
    log_dir = os.path.join(os.getenv('OUTPUT_DIR'), 'hf_trainer')

    tokenizer = AutoTokenizer.from_pretrained(os.getenv('MODEL_NAME'))
    config = AutoConfig.from_pretrained(os.getenv('MODEL_NAME'), num_labels=1)

    training_args = TFTrainingArguments(
        output_dir=log_dir,
        logging_dir=log_dir,
        logging_first_step=True,
        logging_steps=1,
        overwrite_output_dir=True,
        do_train=True,
        do_eval=True,
        learning_rate=2e-5,
        debug=True,
    )

    with training_args.strategy.scope():
        # model = TFDistilBertForSequenceClassification.from_pretrained(os.getenv('MODEL_NAME'), config=config)
        model = TCPMDistilBertRegression.from_pretrained(
            os.getenv('MODEL_NAME'), config=config)

    print('\nModel Config:')
    print(config)
    print('Tokenizer: ', tokenizer)
    print('Model: ', model)
    print('\nTFTraingArguments:')
    print(training_args)

    tc = TopCoder()
    encoded_text = tc.get_bert_encoded_txt_features(tokenizer)
    metadata = tc.get_meta_data_features(encoded_tech=True, softmax_tech=True)
    target = tc.get_target()

    split = int((4 / 5) * len(target))
    dataset = tf.data.Dataset.from_tensor_slices(
        (dict(**encoded_text, meta_input=metadata), target))
    dataset = dataset.shuffle(len(target))
    train_ds, test_ds = dataset.take(split), dataset.skip(split)

    print('\nTrain dataset samples:')
    for el in train_ds.take(3):
        pprint(el)
    print('\nTest dataset samples:')
    for el in test_ds.take(3):
        pprint(el)

    trainer = TFTrainer(
        model=model,
        args=training_args,
        train_dataset=train_ds,
        eval_dataset=test_ds,
        compute_metrics=compute_metrics,
    )

    trainer.train()
    trainer.save_model()
    tokenizer.save_pretrained(log_dir)

    result = trainer.evaluate()
    print('\nTrainning eval:')
    pprint(result)
    with open(os.path.join(log_dir, 'eval_results.json'), 'w') as fwrite:
        json.dump(result, fwrite, indent=4)
예제 #13
0
test_dataset = tf.data.Dataset.from_tensor_slices(
    (dict(test_encodings), test_labels))

print("Start training...")
from transformers import TFDistilBertForSequenceClassification, TFTrainer, TFTrainingArguments

training_args = TFTrainingArguments(
    output_dir='./results',
    num_train_epochs=3,
    per_device_train_batch_size=16,
    per_device_eval_batch_size=64,
    warmup_steps=500,
    weight_decay=0.01,
    logging_dir='./logs',
    logging_steps=10,
    evaluation_strategy="steps",
    eval_steps=30,
)

with training_args.strategy.scope():
    model = TFBertForSequenceClassification.from_pretrained(
        model_str,
        num_labels=len(label_vocab),
        output_attentions=False,
        output_hidden_states=False)

trainer = TFTrainer(model=model,
                    args=training_args,
                    train_dataset=train_dataset,
                    eval_dataset=valid_dataset)
def main():
    # See all possible arguments in src/transformers/training_args.py
    # or by passing the --help flag to this script.
    # We now keep distinct sets of args, for a cleaner separation of concerns.
    parser = HfArgumentParser(
        (ModelArguments, DataTrainingArguments, TFTrainingArguments))
    model_args, data_args, training_args = parser.parse_args_into_dataclasses()

    if (os.path.exists(training_args.output_dir)
            and os.listdir(training_args.output_dir) and training_args.do_train
            and not training_args.overwrite_output_dir):
        raise ValueError(
            f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome."
        )

    # Setup logging
    logging.basicConfig(
        format="%(asctime)s - %(levelname)s - %(name)s -   %(message)s",
        datefmt="%m/%d/%Y %H:%M:%S",
        level=logging.INFO,
    )
    logger.info(
        "n_replicas: %s, distributed training: %s, 16-bits training: %s",
        training_args.n_replicas,
        bool(training_args.n_replicas > 1),
        training_args.fp16,
    )
    logger.info("Training/evaluation parameters %s", training_args)

    # Prepare Question-Answering task
    # Load pretrained model and tokenizer
    #
    # Distributed training:
    # The .from_pretrained methods guarantee that only one local process can concurrently
    # download model & vocab.

    config = AutoConfig.from_pretrained(
        model_args.config_name
        if model_args.config_name else model_args.model_name_or_path,
        cache_dir=model_args.cache_dir,
    )
    tokenizer = AutoTokenizer.from_pretrained(
        model_args.tokenizer_name
        if model_args.tokenizer_name else model_args.model_name_or_path,
        cache_dir=model_args.cache_dir,
        use_fast=model_args.use_fast,
    )

    with training_args.strategy.scope():
        model = TFAutoModelForQuestionAnswering.from_pretrained(
            model_args.model_name_or_path,
            from_pt=bool(".bin" in model_args.model_name_or_path),
            config=config,
            cache_dir=model_args.cache_dir,
        )

    # Get datasets
    if data_args.use_tfds:
        if data_args.version_2_with_negative:
            logger.warn(
                "tensorflow_datasets does not handle version 2 of SQuAD. Switch to version 1 automatically"
            )

        try:
            import tensorflow_datasets as tfds
        except ImportError:
            raise ImportError(
                "If not data_dir is specified, tensorflow_datasets needs to be installed."
            )

        tfds_examples = tfds.load("squad", data_dir=data_args.data_dir)
        train_examples = (SquadV1Processor().get_examples_from_dataset(
            tfds_examples, evaluate=False) if training_args.do_train else None)
        eval_examples = (SquadV1Processor().get_examples_from_dataset(
            tfds_examples, evaluate=True) if training_args.do_eval else None)
    else:
        processor = SquadV2Processor(
        ) if data_args.version_2_with_negative else SquadV1Processor()
        train_examples = processor.get_train_examples(
            data_args.data_dir) if training_args.do_train else None
        eval_examples = processor.get_dev_examples(
            data_args.data_dir) if training_args.do_eval else None

    train_dataset = (squad_convert_examples_to_features(
        examples=train_examples,
        tokenizer=tokenizer,
        max_seq_length=data_args.max_seq_length,
        doc_stride=data_args.doc_stride,
        max_query_length=data_args.max_query_length,
        is_training=True,
        return_dataset="tf",
    ) if training_args.do_train else None)

    train_dataset = train_dataset.apply(
        tf.data.experimental.assert_cardinality(len(train_examples)))

    eval_dataset = (squad_convert_examples_to_features(
        examples=eval_examples,
        tokenizer=tokenizer,
        max_seq_length=data_args.max_seq_length,
        doc_stride=data_args.doc_stride,
        max_query_length=data_args.max_query_length,
        is_training=False,
        return_dataset="tf",
    ) if training_args.do_eval else None)

    eval_dataset = eval_dataset.apply(
        tf.data.experimental.assert_cardinality(len(eval_examples)))

    # Initialize our Trainer
    trainer = TFTrainer(
        model=model,
        args=training_args,
        train_dataset=train_dataset,
        eval_dataset=eval_dataset,
    )

    # Training
    if training_args.do_train:
        trainer.train()
        trainer.save_model()
        tokenizer.save_pretrained(training_args.output_dir)
예제 #15
0
def main():
    # See all possible arguments in src/transformers/training_args.py
    # or by passing the --help flag to this script.
    # We now keep distinct sets of args, for a cleaner separation of concerns.
    parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments))
    model_args, data_args, training_args = parser.parse_args_into_dataclasses()

    if (
        os.path.exists(training_args.output_dir)
        and os.listdir(training_args.output_dir)
        and training_args.do_train
        and not training_args.overwrite_output_dir
    ):
        raise ValueError(
            f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome."
        )

    # Setup logging
    logging.basicConfig(
        format="%(asctime)s - %(levelname)s - %(name)s -   %(message)s",
        datefmt="%m/%d/%Y %H:%M:%S",
        level=logging.INFO,
    )
    logger.info(
        "n_replicas: %s, distributed training: %s, 16-bits training: %s",
        training_args.n_replicas,
        bool(training_args.n_replicas > 1),
        training_args.fp16,
    )
    logger.info("Training/evaluation parameters %s", training_args)

    # Prepare Token Classification task
    labels = get_labels(data_args.labels)
    label_map: Dict[int, str] = {i: label for i, label in enumerate(labels)}
    num_labels = len(labels)

    # Load pretrained model and tokenizer
    #
    # Distributed training:
    # The .from_pretrained methods guarantee that only one local process can concurrently
    # download model & vocab.

    config = AutoConfig.from_pretrained(
        model_args.config_name if model_args.config_name else model_args.model_name_or_path,
        num_labels=num_labels,
        id2label=label_map,
        label2id={label: i for i, label in enumerate(labels)},
        cache_dir=model_args.cache_dir,
    )
    tokenizer = AutoTokenizer.from_pretrained(
        model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
        cache_dir=model_args.cache_dir,
        use_fast=model_args.use_fast,
    )

    with training_args.strategy.scope():
        model = TFAutoModelForTokenClassification.from_pretrained(
            model_args.model_name_or_path,
            from_pt=bool(".bin" in model_args.model_name_or_path),
            config=config,
            cache_dir=model_args.cache_dir,
        )

    # Get datasets
    train_dataset = (
        TFNerDataset(
            data_dir=data_args.data_dir,
            tokenizer=tokenizer,
            labels=labels,
            model_type=config.model_type,
            max_seq_length=data_args.max_seq_length,
            overwrite_cache=data_args.overwrite_cache,
            mode=Split.train,
        )
        if training_args.do_train
        else None
    )
    eval_dataset = (
        TFNerDataset(
            data_dir=data_args.data_dir,
            tokenizer=tokenizer,
            labels=labels,
            model_type=config.model_type,
            max_seq_length=data_args.max_seq_length,
            overwrite_cache=data_args.overwrite_cache,
            mode=Split.dev,
        )
        if training_args.do_eval
        else None
    )

    def align_predictions(predictions: np.ndarray, label_ids: np.ndarray) -> Tuple[List[int], List[int]]:
        preds = np.argmax(predictions, axis=2)
        batch_size, seq_len = preds.shape
        out_label_list = [[] for _ in range(batch_size)]
        preds_list = [[] for _ in range(batch_size)]

        for i in range(batch_size):
            for j in range(seq_len):
                if label_ids[i, j] != -1:
                    out_label_list[i].append(label_map[label_ids[i][j]])
                    preds_list[i].append(label_map[preds[i][j]])

        return preds_list, out_label_list

    def compute_metrics(p: EvalPrediction) -> Dict:
        preds_list, out_label_list = align_predictions(p.predictions, p.label_ids)

        return {
            "precision": precision_score(out_label_list, preds_list),
            "recall": recall_score(out_label_list, preds_list),
            "f1": f1_score(out_label_list, preds_list),
        }

    # Initialize our Trainer
    trainer = TFTrainer(
        model=model,
        args=training_args,
        train_dataset=train_dataset.get_dataset() if train_dataset else None,
        eval_dataset=eval_dataset.get_dataset() if eval_dataset else None,
        compute_metrics=compute_metrics,
    )

    # Training
    if training_args.do_train:
        trainer.train()
        trainer.save_model()
        tokenizer.save_pretrained(training_args.output_dir)

    # Evaluation
    results = {}
    if training_args.do_eval:
        logger.info("*** Evaluate ***")

        result = trainer.evaluate()
        output_eval_file = os.path.join(training_args.output_dir, "eval_results.txt")

        with open(output_eval_file, "w") as writer:
            logger.info("***** Eval results *****")

            for key, value in result.items():
                logger.info("  %s = %s", key, value)
                writer.write("%s = %s\n" % (key, value))

            results.update(result)

    # Predict
    if training_args.do_predict:
        test_dataset = TFNerDataset(
            data_dir=data_args.data_dir,
            tokenizer=tokenizer,
            labels=labels,
            model_type=config.model_type,
            max_seq_length=data_args.max_seq_length,
            overwrite_cache=data_args.overwrite_cache,
            mode=Split.test,
        )

        predictions, label_ids, metrics = trainer.predict(test_dataset.get_dataset())
        preds_list, labels_list = align_predictions(predictions, label_ids)
        report = classification_report(labels_list, preds_list)

        logger.info("\n%s", report)

        output_test_results_file = os.path.join(training_args.output_dir, "test_results.txt")

        with open(output_test_results_file, "w") as writer:
            writer.write("%s\n" % report)

        # Save predictions
        output_test_predictions_file = os.path.join(training_args.output_dir, "test_predictions.txt")

        with open(output_test_predictions_file, "w") as writer:
            with open(os.path.join(data_args.data_dir, "test.txt"), "r") as f:
                example_id = 0

                for line in f:
                    if line.startswith("-DOCSTART-") or line == "" or line == "\n":
                        writer.write(line)

                        if not preds_list[example_id]:
                            example_id += 1
                    elif preds_list[example_id]:
                        output_line = line.split()[0] + " " + preds_list[example_id].pop(0) + "\n"

                        writer.write(output_line)
                    else:
                        logger.warning("Maximum sequence length exceeded: No prediction for '%s'.", line.split()[0])

    return results
예제 #16
0
    'E:\Projects\A_Idiom_detection_gihan\idiom_detection_nlp\models\\',  # output directory
    num_train_epochs=15,  # total number of training epochs
    per_device_train_batch_size=16,  # batch size per device during training
    per_device_eval_batch_size=5,  # batch size for evaluation
    warmup_steps=500,  # number of warmup steps for learning rate scheduler
    weight_decay=0.01,  # strength of weight decay
    logging_dir=
    'E:\Projects\A_Idiom_detection_gihan\idiom_detection_nlp\models\\',  # directory for storing logs
    logging_steps=10,
    evaluation_strategy=EvaluationStrategy.NO,
)

with training_args.strategy.scope():
    model = TFDistilBertForTokenClassification.from_pretrained(
        'distilbert-base-cased', num_labels=2)

trainer = TFTrainer(
    model=model,  # the instantiated 🤗 Transformers model to be trained
    args=training_args,  # training arguments, defined above
    train_dataset=train_dataset,  # training dataset
    # eval_dataset=val_dataset             # evaluation dataset
)

trainer.train()

#predictions
prediction_results = trainer.predict(test_dataset=test_dataset)
trainer.save_model(
    "E:\Projects\A_Idiom_detection_gihan\idiom_detection_nlp\models\\")

compute_metrics(prediction_results, val_labels)
예제 #17
0
def main():
    # See all possible arguments in src/transformers/training_args.py
    # or by passing the --help flag to this script.
    # We now keep distinct sets of args, for a cleaner separation of concerns.
    parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments))
    model_args, data_args, training_args = parser.parse_args_into_dataclasses()

    if (
        os.path.exists(training_args.output_dir)
        and os.listdir(training_args.output_dir)
        and training_args.do_train
        and not training_args.overwrite_output_dir
    ):
        raise ValueError(
            f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome."
        )

    module = import_module("tasks")

    try:
        token_classification_task_clazz = getattr(module, model_args.task_type)
        token_classification_task: TokenClassificationTask = token_classification_task_clazz()
    except AttributeError:
        raise ValueError(
            f"Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. "
            f"Available tasks classes are: {TokenClassificationTask.__subclasses__()}"
        )

    # Setup logging
    
    logging.basicConfig(
        format="%(asctime)s - %(levelname)s - %(name)s -   %(message)s",
        datefmt="%m/%d/%Y %H:%M:%S",
        level=logging.DEBUG,
    )
    """
    logging.basicConfig(
	filename="/scratch/project_2001426/harttu/july-2020/transformers-ner/test.log",
        format="%(asctime)s - %(levelname)s - %(name)s -   %(message)s",
        datefmt="%m/%d/%Y %H:%M:%S",
        level=logging.DEBUG,
        filemode='w'
    )
    """
    logger.info("FROM run_tf_ner.py")
    
    logger.info(
        "n_replicas: %s, distributed training: %s, 16-bits training: %s",
        training_args.n_replicas,
        bool(training_args.n_replicas > 1),
        training_args.fp16,
    )
    logger.info("Training/evaluation parameters %s", training_args)

    # Prepare Token Classification task
    labels = token_classification_task.get_labels(data_args.labels)
    print("LABELS:")
    print(labels)
    label_map: Dict[int, str] = {i: label for i, label in enumerate(labels)}
    print("LABEL_MAP:")
    print(label_map)
    num_labels = len(labels)

    # Load pretrained model and tokenizer
    #
    # Distributed training:
    # The .from_pretrained methods guarantee that only one local process can concurrently
    # download model & vocab.

    config = AutoConfig.from_pretrained(
        model_args.config_name if model_args.config_name else model_args.model_name_or_path,
        num_labels=num_labels,
        id2label=label_map,
        label2id={label: i for i, label in enumerate(labels)},
        cache_dir=model_args.cache_dir,
    )
    print("CONFIG")
    print(config)
    tokenizer = AutoTokenizer.from_pretrained(
        model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
        cache_dir=model_args.cache_dir,
        use_fast=model_args.use_fast,
    )

    with training_args.strategy.scope():
        model = TFAutoModelForTokenClassification.from_pretrained(
            model_args.model_name_or_path,
            from_pt=True,#bool(".bin" in model_args.model_name_or_path),
            config=config,
            cache_dir=model_args.cache_dir,
        )
    print("SETTING DATASETS")
    # Get datasets
    train_dataset = (
        TFTokenClassificationDataset(
            token_classification_task=token_classification_task,
            data_dir=data_args.data_dir,
            tokenizer=tokenizer,
            labels=labels,
            model_type=config.model_type,
            max_seq_length=data_args.max_seq_length,
            overwrite_cache=data_args.overwrite_cache,
            mode=Split.train,
        )
        if training_args.do_train
        else None
    )
    eval_dataset = (
        TFTokenClassificationDataset(
            token_classification_task=token_classification_task,
            data_dir=data_args.data_dir,
            tokenizer=tokenizer,
            labels=labels,
            model_type=config.model_type,
            max_seq_length=data_args.max_seq_length,
            overwrite_cache=data_args.overwrite_cache,
            mode=Split.dev,
        )
        if training_args.do_eval
        else None
    )

    def align_predictions(predictions: np.ndarray, label_ids: np.ndarray) -> Tuple[List[int], List[int]]:
        preds = np.argmax(predictions, axis=2)
        batch_size, seq_len = preds.shape
        out_label_list = [[] for _ in range(batch_size)]
        preds_list = [[] for _ in range(batch_size)]

        for i in range(batch_size):
            for j in range(seq_len):
                if label_ids[i, j] != -100:
                    out_label_list[i].append(label_map[label_ids[i][j]])
                    preds_list[i].append(label_map[preds[i][j]])

        return preds_list, out_label_list

    def compute_metrics(p: EvalPrediction) -> Dict:
        preds_list, out_label_list = align_predictions(p.predictions, p.label_ids)

        return {
            "precision": precision_score(out_label_list, preds_list),
            "recall": recall_score(out_label_list, preds_list),
            "f1": f1_score(out_label_list, preds_list),
        }

    """
    # Prepare optimizer and schedule (linear warmup and decay)
    no_decay = ["bias", "LayerNorm.weight"]
    optimizer_grouped_parameters = [
        {
            "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
            "weight_decay": args.weight_decay,
        },
        {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
    ]
    optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
    scheduler = get_linear_schedule_with_warmup(
        optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
    )
    """

    import tensorflow as tf

    initial_learning_rate=0.001
    decay_steps=10000
    print("INITIALIZING TRAINER")
    # Initialize our Trainer
    trainer = TFTrainer(
        model=model,
        #optimizers=(tf.keras.optimizers.Adam(
        #learning_rate=initial_learning_rate, beta_1=0.9, 
        #beta_2=0.999, epsilon=1e-07, amsgrad=False,
        #name='Adam'),tf.keras.optimizers.schedules.PolynomialDecay(
        #initial_learning_rate, decay_steps, 
        #end_learning_rate=0.0001, power=1.0,
        #cycle=False, name=None
        #)),
        args=training_args,
        train_dataset=train_dataset.get_dataset() if train_dataset else None,
        eval_dataset=eval_dataset.get_dataset() if eval_dataset else None,
        compute_metrics=compute_metrics,
    )
    print("TRAINING")
    # Training
    if training_args.do_train:
        trainer.train()
        trainer.save_model()
        tokenizer.save_pretrained(training_args.output_dir)
    
    print("EVALUATING")
    # Evaluation
    results = {}
    if training_args.do_eval:
        logger.info("*** Evaluate ***")

        result = trainer.evaluate()
        output_eval_file = os.path.join(training_args.output_dir, "eval_results.txt")

        with open(output_eval_file, "w") as writer:
            logger.info("***** Eval results *****")

            for key, value in result.items():
                logger.info("  %s = %s", key, value)
                writer.write("%s = %s\n" % (key, value))

            results.update(result)

    # Predict
    print("PREDICTING")
    if training_args.do_predict:
        test_dataset = TFTokenClassificationDataset(
            token_classification_task=token_classification_task,
            data_dir=data_args.data_dir,
            tokenizer=tokenizer,
            labels=labels,
            model_type=config.model_type,
            max_seq_length=data_args.max_seq_length,
            overwrite_cache=data_args.overwrite_cache,
            mode=Split.test,
        )

        predictions, label_ids, metrics = trainer.predict(test_dataset.get_dataset())
        preds_list, labels_list = align_predictions(predictions, label_ids)
        report = classification_report(labels_list, preds_list)

        logger.info("\n%s", report)

        output_test_results_file = os.path.join(training_args.output_dir, "test_results.txt")

        with open(output_test_results_file, "w") as writer:
            writer.write("%s\n" % report)

        # Save predictions
        output_test_predictions_file = os.path.join(training_args.output_dir, "test_predictions.txt")

        with open(output_test_predictions_file, "w") as writer:
            with open(os.path.join(data_args.data_dir, "test.txt"), "r") as f:
                example_id = 0

                for line in f:
                    if line.startswith("-DOCSTART-") or line == "" or line == "\n":
                        writer.write(line)

                        if not preds_list[example_id]:
                            example_id += 1
                    elif preds_list[example_id]:
                        output_line = line.split()[0] + " " + preds_list[example_id].pop(0) + "\n"

                        writer.write(output_line)
                    else:
                        logger.warning("Maximum sequence length exceeded: No prediction for '%s'.", line.split()[0])

    return results
예제 #18
0
 def init_trainer(self, model, args, dataset):
     return TFTrainer(model=model, args=args, train_dataset=dataset)
예제 #19
0
def main():
    # See all possible arguments in src/transformers/training_args.py
    # or by passing the --help flag to this script.
    # We now keep distinct sets of args, for a cleaner separation of concerns.

    parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments))
    model_args, data_args, training_args = parser.parse_args_into_dataclasses()

    if (
        os.path.exists(training_args.output_dir)
        and os.listdir(training_args.output_dir)
        and training_args.do_train
        and not training_args.overwrite_output_dir
    ):
        raise ValueError(
            f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome."
        )

    # Setup logging
    logging.basicConfig(
        format="%(asctime)s - %(levelname)s - %(name)s -   %(message)s",
        datefmt="%m/%d/%Y %H:%M:%S",
        level=logging.INFO,
    )
    logger.warning(
        "device: %s, n_replicas: %s, 16-bits training: %s",
        training_args.device,
        training_args.n_replicas,
        training_args.fp16,
    )
    logger.info("Training/evaluation parameters %s", training_args)

    # Set seed
    set_seed(training_args.seed)

    try:
        processor = processors[data_args.task_name]()
        label_list = processor.get_labels()
        num_labels = len(label_list)
    except KeyError:
        raise ValueError("Task not found: %s" % (data_args.task_name))

    # Load pretrained model and tokenizer
    #
    # Distributed training:
    # The .from_pretrained methods guarantee that only one local process can concurrently
    # download model & vocab.
    config = AutoConfig.from_pretrained(
        model_args.config_name if model_args.config_name else model_args.model_name_or_path,
        num_labels=num_labels,
        finetuning_task=data_args.task_name,
        cache_dir=model_args.cache_dir,
    )
    tokenizer = AutoTokenizer.from_pretrained(
        model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
        cache_dir=model_args.cache_dir,
    )
    with training_args.strategy.scope():
        model = TFAutoModelForMultipleChoice.from_pretrained(
            model_args.model_name_or_path,
            from_pt=bool(".bin" in model_args.model_name_or_path),
            config=config,
            cache_dir=model_args.cache_dir,
        )
    # Get datasets
    train_dataset = (
        TFMultipleChoiceDataset(
            data_dir=data_args.data_dir,
            tokenizer=tokenizer,
            task=data_args.task_name,
            max_seq_length=data_args.max_seq_length,
            overwrite_cache=data_args.overwrite_cache,
            mode=Split.train,
        )
        if training_args.do_train
        else None
    )
    eval_dataset = (
        TFMultipleChoiceDataset(
            data_dir=data_args.data_dir,
            tokenizer=tokenizer,
            task=data_args.task_name,
            max_seq_length=data_args.max_seq_length,
            overwrite_cache=data_args.overwrite_cache,
            mode=Split.dev,
        )
        if training_args.do_eval
        else None
    )

    def compute_metrics(p: EvalPrediction) -> Dict:
        preds = np.argmax(p.predictions, axis=1)
        return {"acc": simple_accuracy(preds, p.label_ids)}

    # Initialize our Trainer
    trainer = TFTrainer(
        model=model,
        args=training_args,
        train_dataset=train_dataset.get_dataset() if train_dataset else None,
        eval_dataset=eval_dataset.get_dataset() if eval_dataset else None,
        compute_metrics=compute_metrics,
    )

    # Training
    if training_args.do_train:
        trainer.train()
        trainer.save_model()
        tokenizer.save_pretrained(training_args.output_dir)
    # Evaluation
    results = {}
    if training_args.do_eval:
        logger.info("*** Evaluate ***")

        result = trainer.evaluate()

        trainer.log_metrics("eval", results)
        trainer.save_metrics("eval", results)

        results.update(result)

    return results
예제 #20
0
    def train(self):
        import tensorflow_addons as tfa
        import tensorflow as tf

        self.scheduler = None
        # Encoding train_texts and val_texts

        # x-digit code -> unique number
        self.classes = utils.Bidict(
            {c: i
             for i, c in enumerate(self.df_train['cat1'].unique())})

        # Convert datasets to transformer format
        train_dataset = self._create_transformer_dataset(self.df_train)

        val_data = train_dataset
        if settings.VALID_PER_CLASS:
            val_data = self._create_transformer_dataset(self.df_valid)
        val_data = val_data.batch(settings.MINIBATCH)

        # Fine-tune the pre-trained model
        if self.train_with_tensorflow_directly:
            from transformers import TFAutoModelForSequenceClassification
            model = TFAutoModelForSequenceClassification.from_pretrained(
                self.pretrained_model_name,
                num_labels=len(self.classes),
                from_pt=True)

            # In AdamW weight decay value had no effect in our tests.
            # hard to find stable tuning, harder to beat vanilla Adam
            # optimizer = AdamW(
            #     learning_rate=0.00005,
            #     # 0.1 (default but might be too large), 0.01
            #     weight_decay=0.002
            # )
            if 1:
                learning_rate = settings.LEARNING_RATE

                if learning_rate is None:
                    if settings.FULL_TEXT:
                        # 5e-5 can fail on L1 e.g. Seed 7
                        # 1e-5 looks OK (TBC) on L1
                        # but not on L3 (no convergence)
                        # 5e-6 very slow
                        # Candidates: 5E-06 (40%), 7E-06 (49%)
                        # FOR L3
                        # learning_rate = 7E-06
                        # FOR L1
                        # learning_rate = 5e-6
                        # legal-small
                        ## learning_rate = 7e-6
                        # legal-base
                        learning_rate = 1e-5
                    else:
                        # learning_rate = 5e-5
                        learning_rate = 3e-5

                print(f'\n LR = {learning_rate:0.0E}')
                from transformers import AdamWeightDecay
                optimizer = AdamWeightDecay(
                    learning_rate=learning_rate,
                    # 0.1 (default but might be too large), 0.01
                    # weight_decay_rate=0.0
                )
            else:
                if settings.FULL_TEXT:
                    learning_rate = 1e-3
                else:
                    learning_rate = 7e-3

                print(f'\n LR = {learning_rate:0.0E}')
                optimizer = tfa.optimizers.SGDW(
                    learning_rate=learning_rate,
                    momentum=0.0,
                    weight_decay=0.00
                    # 0.1 (default but might be too large), 0.01
                    # weight_decay_rate=0.5
                )

            # optimizer = tf.keras.optimizers.MomentumOptimizer(
            #     learning_rate=self.learning_rate
            # )
            model.compile(optimizer=optimizer,
                          loss=model.compute_loss,
                          metrics=['accuracy'])

            callbacks = None

            self.scheduler = None
            if self.scheduler_class:
                self.scheduler = self.scheduler_class()
                callbacks = [self.scheduler]

                self.scheduler.pretrain(model, train_dataset)

            class_weight = None
            if settings.CLASS_WEIGHT:
                class_sizes = self.df_train.cat1.value_counts()
                class_sizes_max = class_sizes.max()
                class_weight = {
                    i: class_sizes_max / class_sizes[c]
                    for c, i in self.classes.items()
                }

            self.history = model.fit(
                train_dataset.batch(settings.MINIBATCH),
                epochs=self.max_epochs,
                batch_size=settings.MINIBATCH,
                validation_data=val_data,
                callbacks=callbacks,
                class_weight=class_weight,
            )
            self.model = model

            self.render_training()
        else:
            # TODO: fix this. Almost immediate but random results
            #  compared to TF method above.
            from transformers import (TFTrainer, TFTrainingArguments,
                                      TFAutoModelForSequenceClassification)
            # from transformers import TFAutoModel

            training_args = TFTrainingArguments(
                output_dir=self.results_path,  # output directory
                num_train_epochs=self.
                max_epochs,  # total number of training epochs
                per_device_train_batch_size=settings.MINIBATCH,
                # batch size per device during training
                per_device_eval_batch_size=64,  # batch size for evaluation
                warmup_steps=500,
                # number of warmup steps for learning rate scheduler
                weight_decay=0.01,  # strength of weight decay
                logging_dir=self.logs_path,  # directory for storing logs
            )

            print('h2')

            with training_args.strategy.scope():
                # trainer_model = TFSequenceClassification.from_pretrained(
                #     Transformers.pretrained_model, num_labels=len(self.classes)
                # )
                trainer_model = TFAutoModelForSequenceClassification.from_pretrained(
                    Transformers.pretrained_model_name,
                    num_labels=len(self.classes))

            print('h3')

            trainer = TFTrainer(
                model=trainer_model,
                args=training_args,
                train_dataset=train_dataset,
                eval_dataset=train_dataset,
            )

            print('h4')

            trainer.train()

            # print(trainer.evaluate())

            self.model = trainer_model
예제 #21
0
    def main(self):

        model_args = ModelArguments(model_name_or_path=self.model_name_or_path)
        data_args = DataTrainingArguments(data_dir=self.data_dir,
                                          labels='./labels.txt',
                                          max_seq_length=self.max_seq_length)
        training_args = TFTrainingArguments(
            output_dir=self.output_dir,
            do_eval=self.do_eval,
            do_predict=self.do_predict,
            do_train=self.do_train,
            per_device_train_batch_size=self.per_device_train_batch_size,
            save_steps=self.save_steps,
            seed=self.seed)

        if (os.path.exists(training_args.output_dir)
                and os.listdir(training_args.output_dir)
                and training_args.do_train
                and not training_args.overwrite_output_dir):
            raise ValueError(
                f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome."
            )

        module = import_module("tasks")

        try:
            token_classification_task_clazz = getattr(module,
                                                      model_args.task_type)
            token_classification_task: TokenClassificationTask = token_classification_task_clazz(
            )
        except AttributeError:
            raise ValueError(
                f"Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. "
                f"Available tasks classes are: {TokenClassificationTask.__subclasses__()}"
            )

        # Setup logging
        logging.basicConfig(
            format="%(asctime)s - %(levelname)s - %(name)s -   %(message)s",
            datefmt="%m/%d/%Y %H:%M:%S",
            level=logging.INFO,
        )
        logger.info(
            "n_replicas: %s, distributed training: %s, 16-bits training: %s",
            training_args.n_replicas,
            bool(training_args.n_replicas > 1),
            training_args.fp16,
        )
        logger.info("Training/evaluation parameters %s", training_args)

        # Prepare Token Classification task
        labels = token_classification_task.get_labels(data_args.labels)

        # JQ
        labels.extend('PAD')

        label_map: Dict[int,
                        str] = {i: label
                                for i, label in enumerate(labels)}

        # JQ
        label_map[9] = 'PAD'

        num_labels = len(labels)

        # Load pretrained model and tokenizer
        #
        # Distributed training:
        # The .from_pretrained methods guarantee that only one local process can concurrently
        # download model & vocab.

        # START HERE

        config = AutoConfig.from_pretrained(
            model_args.config_name
            if model_args.config_name else model_args.model_name_or_path,
            num_labels=num_labels,
            id2label=label_map,
            label2id={label: i
                      for i, label in enumerate(labels)},
            cache_dir=model_args.cache_dir,
        )
        tokenizer = AutoTokenizer.from_pretrained(
            model_args.tokenizer_name
            if model_args.tokenizer_name else model_args.model_name_or_path,
            cache_dir=model_args.cache_dir,
            use_fast=model_args.use_fast,
        )

        with training_args.strategy.scope():
            model = TFAutoModelForTokenClassification.from_pretrained(
                model_args.model_name_or_path,
                from_pt=bool(".bin" in model_args.model_name_or_path),
                config=config,
                cache_dir=model_args.cache_dir,
            )

        # Get datasets
        train_dataset = (TFTokenClassificationDataset(
            token_classification_task=token_classification_task,
            data_dir=data_args.data_dir,
            tokenizer=tokenizer,
            labels=labels,
            model_type=config.model_type,
            max_seq_length=data_args.max_seq_length,
            overwrite_cache=data_args.overwrite_cache,
            mode=Split.train,
        ) if training_args.do_train else None)
        eval_dataset = (TFTokenClassificationDataset(
            token_classification_task=token_classification_task,
            data_dir=data_args.data_dir,
            tokenizer=tokenizer,
            labels=labels,
            model_type=config.model_type,
            max_seq_length=data_args.max_seq_length,
            overwrite_cache=data_args.overwrite_cache,
            mode=Split.dev,
        ) if training_args.do_eval else None)

        def align_predictions(
                predictions: np.ndarray,
                label_ids: np.ndarray) -> Tuple[List[int], List[int]]:
            preds = np.argmax(predictions, axis=2)
            batch_size, seq_len = preds.shape
            out_label_list = [[] for _ in range(batch_size)]
            preds_list = [[] for _ in range(batch_size)]

            for i in range(batch_size):
                for j in range(seq_len):
                    if label_ids[i, j] != -100:
                        out_label_list[i].append(label_map[label_ids[i][j]])
                        preds_list[i].append(label_map[preds[i][j]])

            return preds_list, out_label_list

        def compute_metrics(p: EvalPrediction) -> Dict:
            preds_list, out_label_list = align_predictions(
                p.predictions, p.label_ids)

            return {
                "precision": precision_score(out_label_list, preds_list),
                "recall": recall_score(out_label_list, preds_list),
                "f1": f1_score(out_label_list, preds_list),
            }

        # Initialize our Trainer
        trainer = TFTrainer(
            model=model,
            args=training_args,
            train_dataset=train_dataset.get_dataset()
            if train_dataset else None,
            eval_dataset=eval_dataset.get_dataset() if eval_dataset else None,
            compute_metrics=compute_metrics,
        )

        # Training
        if training_args.do_train:
            trainer.train()
            trainer.save_model()
            tokenizer.save_pretrained(training_args.output_dir)

        # Evaluation
        results = {}
        if training_args.do_eval:
            logger.info("*** Evaluate ***")

            result = trainer.evaluate()
            output_eval_file = os.path.join(training_args.output_dir,
                                            "eval_results.txt")

            with open(output_eval_file, "w") as writer:
                logger.info("***** Eval results *****")

                for key, value in result.items():
                    logger.info("  %s = %s", key, value)
                    writer.write("%s = %s\n" % (key, value))

                results.update(result)

        # Predict
        if training_args.do_predict:
            test_dataset = TFTokenClassificationDataset(
                token_classification_task=token_classification_task,
                data_dir=data_args.data_dir,
                tokenizer=tokenizer,
                labels=labels,
                model_type=config.model_type,
                max_seq_length=data_args.max_seq_length,
                overwrite_cache=data_args.overwrite_cache,
                mode=Split.test,
            )

            predictions, label_ids, metrics = trainer.predict(
                test_dataset.get_dataset())
            preds_list, labels_list = align_predictions(predictions, label_ids)
            report = classification_report(labels_list, preds_list)

            logger.info("\n%s", report)

            output_test_results_file = os.path.join(training_args.output_dir,
                                                    "test_results.txt")

            with open(output_test_results_file, "w") as writer:
                writer.write("%s\n" % report)

            # Save predictions
            output_test_predictions_file = os.path.join(
                training_args.output_dir, "test_predictions.txt")

            with open(output_test_predictions_file, "w") as writer:
                with open(os.path.join(data_args.data_dir, "test.txt"),
                          "r") as f:
                    example_id = 0

                    for line in f:
                        if line.startswith(
                                "-DOCSTART-") or line == "" or line == "\n":
                            writer.write(line)

                            if not preds_list[example_id]:
                                example_id += 1
                        elif preds_list[example_id]:
                            output_line = line.split(
                            )[0] + " " + preds_list[example_id].pop(0) + "\n"

                            writer.write(output_line)
                        else:
                            logger.warning(
                                "Maximum sequence length exceeded: No prediction for '%s'.",
                                line.split()[0])

        return results