예제 #1
0
 def from_params(cls, vocab, params: Params) -> 'WikiTablesMmlSemanticParser':
     question_embedder = TextFieldEmbedder.from_params(vocab, params.pop("question_embedder"))
     action_embedding_dim = params.pop_int("action_embedding_dim")
     encoder = Seq2SeqEncoder.from_params(params.pop("encoder"))
     entity_encoder = Seq2VecEncoder.from_params(params.pop('entity_encoder'))
     max_decoding_steps = params.pop_int("max_decoding_steps")
     mixture_feedforward_type = params.pop('mixture_feedforward', None)
     if mixture_feedforward_type is not None:
         mixture_feedforward = FeedForward.from_params(mixture_feedforward_type)
     else:
         mixture_feedforward = None
     decoder_beam_search = BeamSearch.from_params(params.pop("decoder_beam_search"))
     input_attention = Attention.from_params(params.pop("attention"))
     training_beam_size = params.pop_int('training_beam_size', None)
     use_neighbor_similarity_for_linking = params.pop_bool('use_neighbor_similarity_for_linking', False)
     dropout = params.pop_float('dropout', 0.0)
     num_linking_features = params.pop_int('num_linking_features', 10)
     tables_directory = params.pop('tables_directory', '/wikitables/')
     rule_namespace = params.pop('rule_namespace', 'rule_labels')
     params.assert_empty(cls.__name__)
     return cls(vocab,
                question_embedder=question_embedder,
                action_embedding_dim=action_embedding_dim,
                encoder=encoder,
                entity_encoder=entity_encoder,
                mixture_feedforward=mixture_feedforward,
                decoder_beam_search=decoder_beam_search,
                max_decoding_steps=max_decoding_steps,
                input_attention=input_attention,
                training_beam_size=training_beam_size,
                use_neighbor_similarity_for_linking=use_neighbor_similarity_for_linking,
                dropout=dropout,
                num_linking_features=num_linking_features,
                tables_directory=tables_directory,
                rule_namespace=rule_namespace)
 def from_params(cls, vocab,
                 params: Params) -> 'WikiTablesErmSemanticParser':
     question_embedder = TextFieldEmbedder.from_params(
         vocab, params.pop("question_embedder"))
     action_embedding_dim = params.pop_int("action_embedding_dim")
     encoder = Seq2SeqEncoder.from_params(params.pop("encoder"))
     entity_encoder = Seq2VecEncoder.from_params(
         params.pop('entity_encoder'))
     mixture_feedforward_type = params.pop('mixture_feedforward', None)
     if mixture_feedforward_type is not None:
         mixture_feedforward = FeedForward.from_params(
             mixture_feedforward_type)
     else:
         mixture_feedforward = None
     input_attention = Attention.from_params(params.pop("attention"))
     decoder_beam_size = params.pop_int("decoder_beam_size")
     decoder_num_finished_states = params.pop_int(
         "decoder_num_finished_states", None)
     max_decoding_steps = params.pop_int("max_decoding_steps")
     normalize_beam_score_by_length = params.pop(
         "normalize_beam_score_by_length", False)
     use_neighbor_similarity_for_linking = params.pop_bool(
         "use_neighbor_similarity_for_linking", False)
     dropout = params.pop_float('dropout', 0.0)
     num_linking_features = params.pop_int('num_linking_features', 10)
     tables_directory = params.pop('tables_directory', '/wikitables/')
     rule_namespace = params.pop('rule_namespace', 'rule_labels')
     checklist_cost_weight = params.pop_float("checklist_cost_weight", 0.6)
     mml_model_file = params.pop('mml_model_file', None)
     params.assert_empty(cls.__name__)
     return cls(
         vocab,
         question_embedder=question_embedder,
         action_embedding_dim=action_embedding_dim,
         encoder=encoder,
         entity_encoder=entity_encoder,
         mixture_feedforward=mixture_feedforward,
         input_attention=input_attention,
         decoder_beam_size=decoder_beam_size,
         decoder_num_finished_states=decoder_num_finished_states,
         max_decoding_steps=max_decoding_steps,
         normalize_beam_score_by_length=normalize_beam_score_by_length,
         checklist_cost_weight=checklist_cost_weight,
         use_neighbor_similarity_for_linking=
         use_neighbor_similarity_for_linking,
         dropout=dropout,
         num_linking_features=num_linking_features,
         tables_directory=tables_directory,
         rule_namespace=rule_namespace,
         initial_mml_model_file=mml_model_file)
예제 #3
0
 def from_params(cls, vocab, params: Params) -> 'NlvrDirectSemanticParser':
     sentence_embedder_params = params.pop("sentence_embedder")
     sentence_embedder = TextFieldEmbedder.from_params(
         vocab, sentence_embedder_params)
     action_embedding_dim = params.pop_int('action_embedding_dim')
     encoder = Seq2SeqEncoder.from_params(params.pop("encoder"))
     dropout = params.pop_float('dropout', 0.0)
     input_attention = Attention.from_params(params.pop("attention"))
     decoder_beam_search = BeamSearch.from_params(
         params.pop("decoder_beam_search"))
     max_decoding_steps = params.pop_int("max_decoding_steps")
     params.assert_empty(cls.__name__)
     return cls(vocab,
                sentence_embedder=sentence_embedder,
                action_embedding_dim=action_embedding_dim,
                encoder=encoder,
                input_attention=input_attention,
                decoder_beam_search=decoder_beam_search,
                max_decoding_steps=max_decoding_steps,
                dropout=dropout)
 def from_params(cls, vocab,
                 params: Params) -> 'NlvrCoverageSemanticParser':
     sentence_embedder_params = params.pop("sentence_embedder")
     sentence_embedder = TextFieldEmbedder.from_params(
         vocab, sentence_embedder_params)
     action_embedding_dim = params.pop_int('action_embedding_dim')
     encoder = Seq2SeqEncoder.from_params(params.pop("encoder"))
     dropout = params.pop_float('dropout', 0.0)
     input_attention = Attention.from_params(params.pop("attention"))
     beam_size = params.pop_int('beam_size')
     max_num_finished_states = params.pop_int('max_num_finished_states',
                                              None)
     normalize_beam_score_by_length = params.pop_bool(
         'normalize_beam_score_by_length', False)
     max_decoding_steps = params.pop_int("max_decoding_steps")
     checklist_cost_weight = params.pop_float("checklist_cost_weight", 0.6)
     dynamic_cost_weight = params.pop("dynamic_cost_weight", None)
     penalize_non_agenda_actions = params.pop_bool(
         "penalize_non_agenda_actions", False)
     initial_mml_model_file = params.pop("initial_mml_model_file", None)
     params.assert_empty(cls.__name__)
     return cls(
         vocab,
         sentence_embedder=sentence_embedder,
         action_embedding_dim=action_embedding_dim,
         encoder=encoder,
         input_attention=input_attention,
         beam_size=beam_size,
         max_num_finished_states=max_num_finished_states,
         dropout=dropout,
         max_decoding_steps=max_decoding_steps,
         normalize_beam_score_by_length=normalize_beam_score_by_length,
         checklist_cost_weight=checklist_cost_weight,
         dynamic_cost_weight=dynamic_cost_weight,
         penalize_non_agenda_actions=penalize_non_agenda_actions,
         initial_mml_model_file=initial_mml_model_file)
 def test_can_build_from_params(self):
     params = Params({'similarity_function': {'type': 'cosine'}, 'normalize': False})
     attention = Attention.from_params(params)
     # pylint: disable=protected-access
     assert attention._similarity_function.__class__.__name__ == 'CosineSimilarity'
     assert attention._normalize is False
예제 #6
0
    def from_params(cls, vocab: Vocabulary, params: Params) -> 'MtGan':  # type: ignore
        # pylint: disable=arguments-differ
        vocab_namespace_A = params.pop("vocab_namespace_A", "vocab_A")
        vocab_namespace_B = params.pop("vocab_namespace_B", "vocab_B")

        num_classes_A = vocab.get_vocab_size(namespace=vocab_namespace_A)
        num_classes_B = vocab.get_vocab_size(namespace=vocab_namespace_B)

        params_generators = params.pop("generators")
        if params_generators.pop("type") == "rnn2rnn":
            generators_embedding_dim = params_generators.pop("embedding_dim")
            embedding_A_generator = Embedding(num_embeddings=num_classes_A, embedding_dim=generators_embedding_dim)
            embedding_B_generator = Embedding(num_embeddings=num_classes_B, embedding_dim=generators_embedding_dim)

            params_encoder_generators = params_generators.pop("encoder")
            generator_A_to_B_encoder = Seq2SeqEncoder.from_params(params_encoder_generators.duplicate())
            generator_B_to_A_encoder = Seq2SeqEncoder.from_params(params_encoder_generators.duplicate())

            generator_attention_params = params_generators.pop("attention")
            attention_generator_A_to_B = Attention.from_params(generator_attention_params.duplicate())
            attention_generator_B_to_A = Attention.from_params(generator_attention_params.duplicate())

            generators_max_decoding_steps = params_generators.pop("max_decoding_steps")

            generator_A_to_B = Rnn2Rnn(vocab=vocab,
                                       source_embedding=embedding_A_generator,
                                       target_embedding=embedding_B_generator,
                                       encoder=generator_A_to_B_encoder,
                                       max_decoding_steps=generators_max_decoding_steps,
                                       target_namespace=vocab_namespace_B,
                                       attention=attention_generator_A_to_B)

            generator_B_to_A = Rnn2Rnn(vocab=vocab,
                                       source_embedding=embedding_B_generator,
                                       target_embedding=embedding_A_generator,
                                       encoder=generator_B_to_A_encoder,
                                       max_decoding_steps=generators_max_decoding_steps,
                                       target_namespace=vocab_namespace_A,
                                       attention=attention_generator_B_to_A)
        else:
            raise ConfigurationError(message="This generators model type is not supported")

        discriminators_params = params.pop("discriminators")
        if discriminators_params.pop("type") == "seq2prob":
            params_encoder_discriminators = discriminators_params.pop("encoder")
            discriminator_A_encoder = Seq2VecEncoder.from_params(params_encoder_discriminators.duplicate())
            discriminator_B_encoder = Seq2VecEncoder.from_params(params_encoder_discriminators.duplicate())

            discriminators_embedding_dim = discriminators_params.pop("embedding_dim")
            embedding_A_discriminator = Embedding(num_classes_A, discriminators_embedding_dim)
            embedding_B_discriminator = Embedding(num_classes_B, discriminators_embedding_dim)

            discriminator_A = Seq2Prob(vocab=vocab, encoder=discriminator_A_encoder, embedding=embedding_A_discriminator)
            discriminator_B = Seq2Prob(vocab=vocab, encoder=discriminator_B_encoder, embedding=embedding_B_discriminator)
        else:
            raise ConfigurationError(message="This discriminators model type is not supported")

        return cls(vocab=vocab,
                   generator_A_to_B=generator_A_to_B,
                   generator_B_to_A=generator_B_to_A,
                   discriminator_A=discriminator_A,
                   discriminator_B=discriminator_B,
                   vocab_namespace_A=vocab_namespace_A,
                   vocab_namespace_B=vocab_namespace_B)