Ejemplo n.º 1
0
    def __call__(
        self,
        trainer: GradientDescentTrainer,
        metrics: Dict[str, Any],
        epoch: int,
        is_master: bool,
    ):
        """Callback call implementation."""
        batch = next(iter(trainer._validation_data_loader))
        outputs = trainer.model.make_output_human_readable(
                trainer.batch_outputs(batch, for_training=False),
        )['predicted_sentences']
        idx = random.randrange(0, len(outputs))

        vocab = trainer.model.vocab
        removal_tokens = {START_SYMBOL, END_SYMBOL, vocab._padding_token}

        pred_sentence = outputs[idx]
        source_sentence = ' '.join(
            [
                vocab.get_token_from_index(tidx.item())
                for tidx in batch['source_tokens']['tokens']['tokens'][idx]
                if vocab.get_token_from_index(tidx.item()) not in removal_tokens
            ],
        )

        logger.info('{0} -> {1}'.format(source_sentence, pred_sentence))
Ejemplo n.º 2
0
    def __call__(
        self,
        trainer: GradientDescentTrainer,
        batch_inputs: List[List[TensorDict]],
        batch_outputs: List[Dict[str, Any]],
        epoch: int,
        batch_number: int,
        is_training: bool,
        is_master: bool,
    ) -> None:

        if is_training:

            attacker = Attacker(classifier=trainer.model,
                                reader=self.reader,
                                device=-1)
            for batch in batch_inputs:

                instances = []
                for element in batch:
                    data = TransactionsData.from_tensors(
                        inputs=element, vocab=trainer.model.vocab)
                    adv_data = attacker.attack(data)

                    instance = self.reader.text_to_instance(**adv_data)
                    instances.append(instance)

                new_batch = Batch(instances)
                new_batch.index_instances(vocab=trainer.model.vocab)

                new_batch = new_batch.as_tensor_dict()

                batch_outputs = trainer.batch_outputs(new_batch,
                                                      for_training=True)
                loss = batch_outputs.get("loss")
                _ = batch_outputs.get("reg_loss")
                loss.backward()
                trainer.optimizer.step()
                trainer.optimizer.zero_grad()