Пример #1
0
    def from_text_vision_configs(cls, text_config: PretrainedConfig,
                                 vision_config: PretrainedConfig, **kwargs):
        r"""
        Instantiate a :class:`HybridCLIPConfig` (or a derived class) from text model configuration and
        vision model configuration.

        Returns:
            :class:`HybridCLIPConfig`: An instance of a configuration object
        """

        return cls(text_config=text_config.to_dict(),
                   vision_config=vision_config.to_dict(),
                   **kwargs)
Пример #2
0
    def from_layout_lm_bert_configs(cls, layout_lm_config: PretrainedConfig,
                                    bert_config: PretrainedConfig,
                                    **kwargs) -> PretrainedConfig:

        # logger.info(
        #     "Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config"
        # )
        # decoder_config.is_decoder = True
        # decoder_config.add_cross_attention = True

        return cls(layout_lm=layout_lm_config.to_dict(),
                   bert=bert_config.to_dict(),
                   **kwargs)
Пример #3
0
    def from_encoder_decoder_configs(cls, encoder_config: PretrainedConfig,
                                     decoder_config: PretrainedConfig,
                                     **kwargs) -> PretrainedConfig:
        r"""
        Instantiate a [`EncoderDecoderConfig`] (or a derived class) from a pre-trained encoder model configuration and
        decoder model configuration.
        Returns:
            [`EncoderDecoderConfig`]: An instance of a configuration object
        """
        logger.info(
            "Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config"
        )
        decoder_config.is_decoder = True
        decoder_config.add_cross_attention = True

        return cls(encoder=encoder_config.to_dict(),
                   decoder=decoder_config.to_dict(),
                   **kwargs)
Пример #4
0
    def __init__(
        self,
        embedding,
        rnn_hidden_size,
        rnn_num_layers,
        num_attn_heads,
        num_encoder_layers,
        num_decoder_layers,
        ffn_dim,
        dropout,
    ):

        super().__init__(PretrainedConfig())
        self.embedding = embedding
        self.word_embed_dim = embedding.embedding_dim

        # model config
        self.model_config = {
            "rnn_hidden_size": rnn_hidden_size,
            "rnn_num_layers": rnn_num_layers,
            "num_attn_heads": num_attn_heads,
            "num_encoder_layers": num_encoder_layers,
            "num_decoder_layers": num_decoder_layers,
            "ffn_dim": ffn_dim,
            "dropout": dropout,
        }

        self.word_encoder = WordEncoder(
            hidden_size=rnn_hidden_size,
            embedding_size=self.word_embed_dim,
            embedding=embedding,
            num_layers=rnn_num_layers,
            dropout=dropout,
        )

        self.sentence_embed_dim = rnn_hidden_size

        self.config = BartConfig(
            d_model=self.sentence_embed_dim,
            encoder_attention_heads=num_attn_heads,
            decoder_attention_heads=num_attn_heads,
            attention_dropout=0.0,
            dropout=dropout,
            activation_dropout=0.0,
            encoder_ffn_dim=ffn_dim,
            decoder_ffn_dim=ffn_dim,
        )

        self.encoder_layers = nn.ModuleList(
            [EncoderLayer(self.config) for _ in range(num_encoder_layers)])
        self.decoder_layers = nn.ModuleList(
            [DecoderLayer(self.config) for _ in range(num_decoder_layers)])

        self.pointer_head = PointerHead(self.sentence_embed_dim)

        self.apply(self._init_weights)
Пример #5
0
 def test_config_common_kwargs_is_complete(self):
     base_config = PretrainedConfig()
     missing_keys = [key for key in base_config.__dict__ if key not in config_common_kwargs]
     # If this part of the test fails, you have arguments to addin config_common_kwargs above.
     self.assertListEqual(missing_keys, ["is_encoder_decoder", "_name_or_path", "transformers_version"])
     keys_with_defaults = [key for key, value in config_common_kwargs.items() if value == getattr(base_config, key)]
     if len(keys_with_defaults) > 0:
         raise ValueError(
             "The following keys are set with the default values in `test_configuration_common.config_common_kwargs` "
             f"pick another value for them: {', '.join(keys_with_defaults)}."
         )
Пример #6
0
def main():

    # processors need to be updated
    processors = {
        'agnews-processor': AgNewsDataProcessor,
        'thcnews-processor': THCNewsDataProcessor
    }

    config: Config = Config.instance()

    if not config.do_train and not config.do_eval and not config.do_predict:
        raise ValueError(
            "At least one of `do_train`, `do_eval` or `do_predict' must be True.")

    bert_config = PretrainedConfig.from_pretrained(config.pretrained_model_name)
    # 根据不同的任务,处理不同的数据集
    task_name = config.task_name.lower()
    if task_name not in processors:
        raise ValueError("Task not found: %s" % (task_name))

    processor = processors[task_name]()
    label_list = processor.get_labels()

    tokenizer = AutoTokenizer.from_pretrained(config.pretrained_model_name)

    train_examples = None
    num_train_steps = None
    num_warmup_steps = None

    if config.do_train:

        train_examples: List[InputExample] = processor.get_train_examples(config.data_dir)
        # TODO: complete the code
        train_dataset_loader = None
        num_train_steps = int(
            len(train_examples) / config.train_batch_size * config.epochs
        )
        num_warmup_steps = int(num_train_steps * config.warmup_proportion)
        
        model = create_model(config=config)
        training_arguments = TrainingArguments(
            output_dir=config.output_dir,
            overwrite_output_dir=True,
        )
        trainer = SequenceClassificationTrainer(
            model=model,
            
        )

    # If TPU is not available, this will fall back to normal Estimator on CPU
    # or GPUs

    if config.do_train:
        train_file = os.path.join(config.output_dir, "train.tf_record")
         file_based_convert_examples_to_features(
            train_examples, label_list, config.max_seq_length, tokenizer, train_file)
        # tf.logging.info("***** Running training *****")
        # tf.logging.info("  Num examples = %d", len(train_examples))
        # tf.logging.info("  Batch size = %d", config.train_batch_size)
        # tf.logging.info("  Num steps = %d", num_train_steps)
        train_input_fn = file_based_input_fn_builder(
            input_file=train_file,
            seq_length=config.max_seq_length,
            is_training=True,
            drop_remainder=True)
        estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
Пример #7
0
    def __init__(self, config: DIETClassifierConfig):
        """
        Create DIETClassifier model

        :param config: config for model
        """
        if path.exists(config.model):
            try:
                json_config = json.load(
                    open(f"{config.model}/config.json", "r"))
            except Exception as ex:
                raise RuntimeError(
                    f"Cannot load configuration fil from {config.model} by error: {ex}"
                )

            try:
                checkpoint = torch.load(f"{config.model}/pytorch_model.bin")
            except Exception as ex:
                raise RuntimeError(
                    f"Cannot load model from {config.model} by error: {ex}")

            pretrained_model = None
            config = PretrainedConfig.from_dict(json_config)
        else:
            pretrained_model = BertForTokenClassification.from_pretrained(
                config.model)
            checkpoint = None
            if config.intents is None or config.entities is None:
                raise ValueError(
                    f"Using pretrained from transformers should specific entities and intents"
                )
            pretrained_model.config.update({
                "model": config.model,
                "entities": config.entities,
                "intents": config.intents
            })
            config = pretrained_model.config

        super().__init__(config)

        self.entities_list = ["O"] + config.entities
        self.num_entities = len(self.entities_list)
        self.intents_list = config.intents
        self.num_intents = len(self.intents_list)

        self.bert = BertModel(
            config, add_pooling_layer=False
        ) if not pretrained_model else pretrained_model.bert

        self.dropout = nn.Dropout(config.hidden_dropout_prob)

        self.entities_classifier = nn.Linear(config.hidden_size,
                                             self.num_entities)
        self.intents_classifier = nn.Linear(config.hidden_size,
                                            self.num_intents)

        self.init_weights()

        if not pretrained_model:
            try:
                self.load_state_dict(checkpoint)
            except Exception as ex:
                raise RuntimeError(
                    f"Cannot load state dict from checkpoint by error: {ex}")
Пример #8
0
def main():
    # See all possible arguments in src/transformers/training_args.py
    # or by passing the --help flag to this script.
    # We now keep distinct sets of args, for a cleaner separation of concerns.

    parser = HfArgumentParser(
        (ModelArguments, DataTrainingArguments, TrainingArguments))

    model_args, data_args, training_args = parser.parse_args_into_dataclasses()

    print("********")
    print(model_args)
    print("********")

    model2 = Wav2Vec2ForCTC.from_pretrained(
        model_args.model_name_or_path, cache_dir=model_args.cache_dir)

    config = PretrainedConfig.from_json_file("wav2vec_config.json")
    model = Wav2Vec2ForCTC(config)

    processor = Wav2Vec2Processor.from_pretrained(
        model_args.model_name_or_path, cache_dir=model_args.cache_dir)

    train_dataset = datasets.load_dataset(
        data_args.dataset_name, data_args.dataset_config_name, split=data_args.train_split_name)
    val_dataset = datasets.load_dataset(
        data_args.dataset_name, data_args.dataset_config_name, split="validation")

    wer_metric = datasets.load_metric("wer")

    def map_to_array(batch):
        speech_array, sampling_rate = sf.read(batch["file"])
        batch["speech"] = speech_array
        batch["sampling_rate"] = sampling_rate
        return batch

    train_dataset = train_dataset.map(map_to_array, remove_columns=["file"])
    val_dataset = val_dataset.map(map_to_array, remove_columns=["file"])

    def prepare_dataset(batch):
        # check that all files have the correct sampling rate
        assert (
            len(set(batch["sampling_rate"])) == 1
        ), f"Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."

        batch["input_values"] = processor(
            batch["speech"], sampling_rate=batch["sampling_rate"][0]).input_values
        with processor.as_target_processor():
            batch["labels"] = processor(batch["text"]).input_ids
        return batch

    train_dataset = train_dataset.map(
        prepare_dataset,
        batch_size=training_args.per_device_train_batch_size,
        batched=True,
        num_proc=data_args.preprocessing_num_workers,
    )
    val_dataset = val_dataset.map(
        prepare_dataset,
        batch_size=training_args.per_device_train_batch_size,
        batched=True,
        num_proc=data_args.preprocessing_num_workers,
    )

    data_collator = DataCollatorCTCWithPadding(
        processor=processor, padding=True)

    def compute_metrics(pred):
        pred_logits = pred.predictions
        pred_ids = np.argmax(pred_logits, axis=-1)

        pred.label_ids[pred.label_ids == -100] = 0

        pred_str = processor.batch_decode(pred_ids)
        # we do not want to group tokens when computing the metrics
        label_str = processor.batch_decode(pred.label_ids, group_tokens=False)

        wer = wer_metric.compute(predictions=pred_str, references=label_str)

        return {"wer": wer}

    if model_args.freeze_feature_extractor:
        model.freeze_feature_extractor()

    trainer = CTCTrainer(
        model=model,
        data_collator=data_collator,
        args=training_args,
        compute_metrics=compute_metrics,
        train_dataset=train_dataset,
        eval_dataset=val_dataset,
        tokenizer=processor.feature_extractor,
    )

    trainer.train()