Example #1
0
    def __init__(self,
                 vocab: Vocabulary,
                 sentence_embedder: TextFieldEmbedder,
                 action_embedding_dim: int,
                 encoder: Seq2SeqEncoder,
                 attention_function: SimilarityFunction,
                 beam_size: int,
                 max_num_finished_states: int,
                 max_decoding_steps: int,
                 dropout: float = 0.0,
                 normalize_beam_score_by_length: bool = False,
                 checklist_cost_weight: float = 0.6,
                 dynamic_cost_weight: Dict[str, Union[int, float]] = None,
                 penalize_non_agenda_actions: bool = False,
                 initial_mml_model_file: str = None) -> None:
        super(NlvrCoverageSemanticParser, self).__init__(vocab=vocab,
                                                         sentence_embedder=sentence_embedder,
                                                         action_embedding_dim=action_embedding_dim,
                                                         encoder=encoder,
                                                         dropout=dropout)
        self._agenda_coverage = Average()
        self._decoder_trainer: DecoderTrainer[Callable[[NlvrDecoderState], torch.Tensor]] = \
                ExpectedRiskMinimization(beam_size=beam_size,
                                         normalize_by_length=normalize_beam_score_by_length,
                                         max_decoding_steps=max_decoding_steps,
                                         max_num_finished_states=max_num_finished_states)

        # Instantiating an empty NlvrWorld just to get the number of terminals.
        self._terminal_productions = set(NlvrWorld([]).terminal_productions.values())
        self._decoder_step = NlvrDecoderStep(encoder_output_dim=self._encoder.get_output_dim(),
                                             action_embedding_dim=action_embedding_dim,
                                             attention_function=attention_function,
                                             dropout=dropout,
                                             use_coverage=True)
        self._checklist_cost_weight = checklist_cost_weight
        self._dynamic_cost_wait_epochs = None
        self._dynamic_cost_rate = None
        if dynamic_cost_weight:
            self._dynamic_cost_wait_epochs = dynamic_cost_weight["wait_num_epochs"]
            self._dynamic_cost_rate = dynamic_cost_weight["rate"]
        self._penalize_non_agenda_actions = penalize_non_agenda_actions
        self._last_epoch_in_forward: int = None
        # TODO (pradeep): Checking whether file exists here to avoid raising an error when we've
        # copied a trained ERM model from a different machine and the original MML model that was
        # used to initialize it does not exist on the current machine. This may not be the best
        # solution for the problem.
        if initial_mml_model_file is not None:
            if os.path.isfile(initial_mml_model_file):
                archive = load_archive(initial_mml_model_file)
                self._initialize_weights_from_archive(archive)
            else:
                # A model file is passed, but it does not exist. This is expected to happen when
                # you're using a trained ERM model to decode. But it may also happen if the path to
                # the file is really just incorrect. So throwing a warning.
                logger.warning("MML model file for initializing weights is passed, but does not exist."
                               " This is fine if you're just decoding.")
 def __init__(self, vocab: Vocabulary, sentence_embedder: TextFieldEmbedder,
              action_embedding_dim: int, encoder: Seq2SeqEncoder,
              attention_function: SimilarityFunction,
              decoder_beam_search: BeamSearch,
              max_decoding_steps: int) -> None:
     super(NlvrDirectSemanticParser,
           self).__init__(vocab=vocab,
                          sentence_embedder=sentence_embedder,
                          action_embedding_dim=action_embedding_dim,
                          encoder=encoder)
     self._decoder_trainer = MaximumMarginalLikelihood()
     self._decoder_step = NlvrDecoderStep(
         encoder_output_dim=self._encoder.get_output_dim(),
         action_embedding_dim=action_embedding_dim,
         attention_function=attention_function)
     self._decoder_beam_search = decoder_beam_search
     self._max_decoding_steps = max_decoding_steps
     self._action_padding_index = -1
Example #3
0
 def __init__(self,
              vocab,
              sentence_embedder,
              action_embedding_dim,
              encoder,
              attention,
              decoder_beam_search,
              max_decoding_steps,
              dropout=0.0):
     super(NlvrDirectSemanticParser,
           self).__init__(vocab=vocab,
                          sentence_embedder=sentence_embedder,
                          action_embedding_dim=action_embedding_dim,
                          encoder=encoder,
                          dropout=dropout)
     self._decoder_trainer = MaximumMarginalLikelihood()
     self._decoder_step = NlvrDecoderStep(
         encoder_output_dim=self._encoder.get_output_dim(),
         action_embedding_dim=action_embedding_dim,
         input_attention=attention,
         dropout=dropout)
     self._decoder_beam_search = decoder_beam_search
     self._max_decoding_steps = max_decoding_steps
     self._action_padding_index = -1