def from_params(cls, vocab: Vocabulary, params: Params) -> 'DecomposableAttention':
        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params)

        premise_encoder_params = params.pop("premise_encoder", None)
        if premise_encoder_params is not None:
            premise_encoder = Seq2SeqEncoder.from_params(premise_encoder_params)
        else:
            premise_encoder = None

        hypothesis_encoder_params = params.pop("hypothesis_encoder", None)
        if hypothesis_encoder_params is not None:
            hypothesis_encoder = Seq2SeqEncoder.from_params(hypothesis_encoder_params)
        else:
            hypothesis_encoder = None

        attend_feedforward = FeedForward.from_params(params.pop('attend_feedforward'))
        similarity_function = SimilarityFunction.from_params(params.pop("similarity_function"))
        compare_feedforward = FeedForward.from_params(params.pop('compare_feedforward'))
        aggregate_feedforward = FeedForward.from_params(params.pop('aggregate_feedforward'))
        initializer = InitializerApplicator.from_params(params.pop('initializer', []))
        regularizer = RegularizerApplicator.from_params(params.pop('regularizer', []))

        params.assert_empty(cls.__name__)
        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   attend_feedforward=attend_feedforward,
                   similarity_function=similarity_function,
                   compare_feedforward=compare_feedforward,
                   aggregate_feedforward=aggregate_feedforward,
                   premise_encoder=premise_encoder,
                   hypothesis_encoder=hypothesis_encoder,
                   initializer=initializer,
                   regularizer=regularizer)
Ejemplo n.º 2
0
    def from_params(cls, vocab: Vocabulary, params: Params) -> 'SpanConstituencyParser':
        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params)
        span_extractor = SpanExtractor.from_params(params.pop("span_extractor"))
        encoder = Seq2SeqEncoder.from_params(params.pop("encoder"))

        feed_forward_params = params.pop("feedforward", None)
        if feed_forward_params is not None:
            feedforward_layer = FeedForward.from_params(feed_forward_params)
        else:
            feedforward_layer = None
        pos_tag_embedding_params = params.pop("pos_tag_embedding", None)
        if pos_tag_embedding_params is not None:
            pos_tag_embedding = Embedding.from_params(vocab, pos_tag_embedding_params)
        else:
            pos_tag_embedding = None
        initializer = InitializerApplicator.from_params(params.pop('initializer', []))
        regularizer = RegularizerApplicator.from_params(params.pop('regularizer', []))
        evalb_directory_path = params.pop("evalb_directory_path", None)
        params.assert_empty(cls.__name__)

        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   span_extractor=span_extractor,
                   encoder=encoder,
                   feedforward_layer=feedforward_layer,
                   pos_tag_embedding=pos_tag_embedding,
                   initializer=initializer,
                   regularizer=regularizer,
                   evalb_directory_path=evalb_directory_path)
Ejemplo n.º 3
0
 def test_from_params_requires_batch_first(self):
     params = Params({
             "type": "lstm",
             "batch_first": False,
             })
     with pytest.raises(ConfigurationError):
         # pylint: disable=unused-variable
         encoder = Seq2SeqEncoder.from_params(params)
    def from_params(cls, vocab: Vocabulary,
                    params: Params) -> 'DecomposableAttention':
        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(
            vocab, embedder_params)

        premise_encoder_params = params.pop("premise_encoder", None)
        if premise_encoder_params is not None:
            premise_encoder = Seq2SeqEncoder.from_params(
                premise_encoder_params)
        else:
            premise_encoder = None

        hypothesis_encoder_params = params.pop("hypothesis_encoder", None)
        if hypothesis_encoder_params is not None:
            hypothesis_encoder = Seq2SeqEncoder.from_params(
                hypothesis_encoder_params)
        else:
            hypothesis_encoder = None

        attend_feedforward = FeedForward.from_params(
            params.pop('attend_feedforward'))
        similarity_function = SimilarityFunction.from_params(
            params.pop("similarity_function"))
        compare_feedforward = FeedForward.from_params(
            params.pop('compare_feedforward'))
        aggregate_feedforward = FeedForward.from_params(
            params.pop('aggregate_feedforward'))
        initializer = InitializerApplicator.from_params(
            params.pop('initializer', []))
        regularizer = RegularizerApplicator.from_params(
            params.pop('regularizer', []))

        params.assert_empty(cls.__name__)
        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   attend_feedforward=attend_feedforward,
                   similarity_function=similarity_function,
                   compare_feedforward=compare_feedforward,
                   aggregate_feedforward=aggregate_feedforward,
                   premise_encoder=premise_encoder,
                   hypothesis_encoder=hypothesis_encoder,
                   initializer=initializer,
                   regularizer=regularizer)
Ejemplo n.º 5
0
    def from_params(cls, vocab: Vocabulary, params: Params) -> 'SimpleTagger':
        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(
            vocab, embedder_params)
        stacked_encoder = Seq2SeqEncoder.from_params(
            params.pop("stacked_encoder"))

        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   stacked_encoder=stacked_encoder)
Ejemplo n.º 6
0
    def from_params(  # type: ignore
        cls, vocab: Vocabulary, params: Params
    ) -> "BiattentiveClassificationNetwork":

        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(vocab=vocab, params=embedder_params)
        embedding_dropout = params.pop("embedding_dropout")
        pre_encode_feedforward = FeedForward.from_params(params.pop("pre_encode_feedforward"))
        encoder = Seq2SeqEncoder.from_params(params.pop("encoder"))
        integrator = Seq2SeqEncoder.from_params(params.pop("integrator"))
        integrator_dropout = params.pop("integrator_dropout")

        output_layer_params = params.pop("output_layer")
        if "activations" in output_layer_params:
            output_layer = FeedForward.from_params(output_layer_params)
        else:
            output_layer = Maxout.from_params(output_layer_params)

        elmo = params.pop("elmo", None)
        if elmo is not None:
            elmo = Elmo.from_params(elmo)
        use_input_elmo = params.pop_bool("use_input_elmo", False)
        use_integrator_output_elmo = params.pop_bool("use_integrator_output_elmo", False)

        initializer = InitializerApplicator.from_params(params.pop("initializer", []))
        regularizer = RegularizerApplicator.from_params(params.pop("regularizer", []))
        params.assert_empty(cls.__name__)

        return cls(
            vocab=vocab,
            text_field_embedder=text_field_embedder,
            embedding_dropout=embedding_dropout,
            pre_encode_feedforward=pre_encode_feedforward,
            encoder=encoder,
            integrator=integrator,
            integrator_dropout=integrator_dropout,
            output_layer=output_layer,
            elmo=elmo,
            use_input_elmo=use_input_elmo,
            use_integrator_output_elmo=use_integrator_output_elmo,
            initializer=initializer,
            regularizer=regularizer,
        )
    def from_params(cls, vocab: Vocabulary, params: Params) -> 'EvidenceExtraction':
        embedder_params = params.pop("text_field_embedder")
        embedder = TextFieldEmbedder.from_params(vocab, embedder_params)
        question_encoder = Seq2SeqEncoder.from_params(params.pop("question_encoder"))
        passage_encoder = Seq2SeqEncoder.from_params(params.pop("passage_encoder"))
        dropout = params.pop_float('dropout', 0.1)
        r = params.pop_float('r', 0.8)

        initializer = InitializerApplicator.from_params(params.pop('initializer', []))
        regularizer = RegularizerApplicator.from_params(params.pop('regularizer', []))

        return cls(vocab=vocab,
                   embedder=embedder,
                   question_encoder=question_encoder,
                   passage_encoder=passage_encoder,
                   r=r,
                   dropout=dropout,
                   initializer=initializer,
                   regularizer=regularizer)
Ejemplo n.º 8
0
    def from_params(cls, vocab, params):
        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params)
        num_highway_layers = params.pop("num_highway_layers")
        phrase_layer = Seq2SeqEncoder.from_params(params.pop("phrase_layer"))
        similarity_function = SimilarityFunction.from_params(params.pop("similarity_function"))
        modeling_layer = Seq2SeqEncoder.from_params(params.pop("modeling_layer"))
        dropout = params.pop('dropout', 0.2)

        initializer = InitializerApplicator.from_params(params.pop('initializer', []))
        regularizer = RegularizerApplicator.from_params(params.pop('regularizer', []))

        mask_lstms = params.pop('mask_lstms', True)
        params.assert_empty(cls.__name__)
        return cls(vocab=vocab, text_field_embedder=text_field_embedder,
                   num_highway_layers=num_highway_layers, phrase_layer=phrase_layer,
                   attention_similarity_function=similarity_function, modeling_layer=modeling_layer,
                   dropout=dropout, mask_lstms=mask_lstms,
                   initializer=initializer, regularizer=regularizer)
Ejemplo n.º 9
0
    def from_params(cls, vocab: Vocabulary,
                    params: Params) -> 'BidirectionalAttentionFlow':
        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(
            vocab, embedder_params)
        num_highway_layers = params.pop("num_highway_layers")
        phrase_layer = Seq2SeqEncoder.from_params(params.pop("phrase_layer"))
        similarity_function = SimilarityFunction.from_params(
            params.pop("similarity_function"))
        modeling_layer = Seq2SeqEncoder.from_params(
            params.pop("modeling_layer"))
        span_end_encoder = Seq2SeqEncoder.from_params(
            params.pop("span_end_encoder"))
        dropout = params.pop('dropout', 0.2)

        # TODO: Remove the following when fully deprecated
        evaluation_json_file = params.pop('evaluation_json_file', None)
        if evaluation_json_file is not None:
            logger.warning(
                "the 'evaluation_json_file' model parameter is deprecated, please remove"
            )

        init_params = params.pop('initializer', None)
        reg_params = params.pop('regularizer', None)
        initializer = (InitializerApplicator.from_params(init_params)
                       if init_params is not None else InitializerApplicator())
        regularizer = RegularizerApplicator.from_params(
            reg_params) if reg_params is not None else None

        mask_lstms = params.pop('mask_lstms', True)
        params.assert_empty(cls.__name__)
        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   num_highway_layers=num_highway_layers,
                   phrase_layer=phrase_layer,
                   attention_similarity_function=similarity_function,
                   modeling_layer=modeling_layer,
                   span_end_encoder=span_end_encoder,
                   dropout=dropout,
                   mask_lstms=mask_lstms,
                   initializer=initializer,
                   regularizer=regularizer)
Ejemplo n.º 10
0
    def from_params(cls, vocab: Vocabulary,
                    params: Params) -> 'HierAttnNetworkClassifier':
        """Constructs class from ``Params`` and ``Vocabulary``
        """
        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(
            vocab, embedder_params)
        encoder_word = Seq2SeqEncoder.from_params(params.pop("encoder_word"))
        encoder_sent = Seq2SeqEncoder.from_params(params.pop("encoder_sent"))
        attention_word_params = params.pop("attention_word")
        attention_type = attention_word_params.pop("type")
        attn_word = getattr(attention_module,
                            attention_type).from_params(attention_word_params)

        attention_sent_params = params.pop("attention_sent")
        attention_type = attention_sent_params.pop("type")
        attn_sent = getattr(attention_module,
                            attention_type).from_params(attention_sent_params)

        label_namespace = params.pop("label_namespace", "labels")

        dropout = params.pop("dropout", None)
        threshold = params.pop("threshold", 0.5)
        initializer = InitializerApplicator.from_params(
            params.pop('initializer', []))
        regularizer = RegularizerApplicator.from_params(
            params.pop('regularizer', []))
        label_indexer = params.pop("label_indexer", "LabelIndicesBiMap")

        params.assert_empty(cls.__name__)
        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   encoder_word=encoder_word,
                   attn_word=attn_word,
                   attn_sent=attn_sent,
                   encoder_sent=encoder_sent,
                   thresh=threshold,
                   label_namespace=label_namespace,
                   dropout=dropout,
                   initializer=initializer,
                   regularizer=regularizer,
                   label_indexer=label_indexer)
Ejemplo n.º 11
0
    def __init__(self,
                 vocab: Vocabulary,
                 params: Params,
                 regularizer: RegularizerApplicator = None):

        super(LayerCoref, self).__init__(vocab=vocab, regularizer=regularizer)

        # Base text Field Embedder
        text_field_embedder_params = params.pop("text_field_embedder")
        text_field_embedder = BasicTextFieldEmbedder.from_params(
            vocab=vocab, params=text_field_embedder_params)
        self._text_field_embedder = text_field_embedder

        ##############
        # Coref Stuffs
        ##############
        coref_params = params.pop("coref")

        # Encoder
        encoder_coref_params = coref_params.pop("encoder")
        encoder_coref = Seq2SeqEncoder.from_params(encoder_coref_params)
        self._encoder_coref = encoder_coref

        # Tagger: Coreference
        tagger_coref_params = coref_params.pop("tagger")
        eval_on_gold_mentions = tagger_coref_params.pop_bool(
            "eval_on_gold_mentions", False)
        init_params = tagger_coref_params.pop("initializer", None)
        initializer = (InitializerApplicator.from_params(init_params)
                       if init_params is not None else InitializerApplicator())

        tagger_coref = CoreferenceCustom(
            vocab=vocab,
            text_field_embedder=self._text_field_embedder,
            context_layer=self._encoder_coref,
            mention_feedforward=FeedForward.from_params(
                tagger_coref_params.pop("mention_feedforward")),
            antecedent_feedforward=FeedForward.from_params(
                tagger_coref_params.pop("antecedent_feedforward")),
            feature_size=tagger_coref_params.pop_int("feature_size"),
            max_span_width=tagger_coref_params.pop_int("max_span_width"),
            spans_per_word=tagger_coref_params.pop_float("spans_per_word"),
            max_antecedents=tagger_coref_params.pop_int("max_antecedents"),
            lexical_dropout=tagger_coref_params.pop_float(
                "lexical_dropout", 0.2),
            initializer=initializer,
            regularizer=regularizer,
            eval_on_gold_mentions=eval_on_gold_mentions,
        )
        self._tagger_coref = tagger_coref
        if eval_on_gold_mentions:
            self._tagger_coref._eval_on_gold_mentions = True

        logger.info("Multi-Task Learning Model has been instantiated.")
Ejemplo n.º 12
0
 def from_params(cls, vocab,
                 params: Params) -> 'WikiTablesErmSemanticParser':
     question_embedder = TextFieldEmbedder.from_params(
         vocab, params.pop("question_embedder"))
     action_embedding_dim = params.pop_int("action_embedding_dim")
     encoder = Seq2SeqEncoder.from_params(params.pop("encoder"))
     entity_encoder = Seq2VecEncoder.from_params(
         params.pop('entity_encoder'))
     mixture_feedforward_type = params.pop('mixture_feedforward', None)
     if mixture_feedforward_type is not None:
         mixture_feedforward = FeedForward.from_params(
             mixture_feedforward_type)
     else:
         mixture_feedforward = None
     # If no attention function is specified, we should not use attention, not attention with
     # default similarity function.
     attention_function_type = params.pop("attention_function", None)
     if attention_function_type is not None:
         attention_function = SimilarityFunction.from_params(
             attention_function_type)
     else:
         attention_function = None
     decoder_beam_size = params.pop_int("decoder_beam_size")
     max_decoding_steps = params.pop_int("max_decoding_steps")
     normalize_beam_score_by_length = params.pop(
         "normalize_beam_score_by_length", False)
     use_neighbor_similarity_for_linking = params.pop_bool(
         "use_neighbor_similarity_for_linking", False)
     dropout = params.pop_float('dropout', 0.0)
     num_linking_features = params.pop_int('num_linking_features', 10)
     tables_directory = params.pop('tables_directory', '/wikitables/')
     rule_namespace = params.pop('rule_namespace', 'rule_labels')
     checklist_cost_weight = params.pop_float("checklist_cost_weight", 0.6)
     mml_model_file = params.pop('mml_model_file', None)
     params.assert_empty(cls.__name__)
     return cls(
         vocab,
         question_embedder=question_embedder,
         action_embedding_dim=action_embedding_dim,
         encoder=encoder,
         entity_encoder=entity_encoder,
         mixture_feedforward=mixture_feedforward,
         attention_function=attention_function,
         decoder_beam_size=decoder_beam_size,
         max_decoding_steps=max_decoding_steps,
         normalize_beam_score_by_length=normalize_beam_score_by_length,
         checklist_cost_weight=checklist_cost_weight,
         use_neighbor_similarity_for_linking=
         use_neighbor_similarity_for_linking,
         dropout=dropout,
         num_linking_features=num_linking_features,
         tables_directory=tables_directory,
         rule_namespace=rule_namespace,
         initial_mml_model_file=mml_model_file)
Ejemplo n.º 13
0
    def from_params(cls, vocab: Vocabulary,
                    params: Params) -> "CoreferenceResolver":
        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(
            vocab, embedder_params)
        context_layer = Seq2SeqEncoder.from_params(params.pop("context_layer"))
        context_layer_back = params.pop("context_layer_back", None)
        if context_layer_back is not None:
            context_layer_back = Seq2SeqEncoder.from_params(context_layer_back)
        mention_feedforward = FeedForward.from_params(
            params.pop("mention_feedforward"))
        antecedent_feedforward = FeedForward.from_params(
            params.pop("antecedent_feedforward"))

        feature_size = params.pop_int("feature_size")
        max_span_width = params.pop_int("max_span_width")
        spans_per_word = params.pop_float("spans_per_word")
        max_antecedents = params.pop_int("max_antecedents")
        lexical_dropout = params.pop_float("lexical_dropout", 0.2)

        init_params = params.pop("initializer", None)
        reg_params = params.pop("regularizer", None)
        initializer = (InitializerApplicator.from_params(init_params)
                       if init_params is not None else InitializerApplicator())
        regularizer = RegularizerApplicator.from_params(
            reg_params) if reg_params is not None else None

        params.assert_empty(cls.__name__)
        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   context_layer=context_layer,
                   context_layer_back=context_layer_back,
                   mention_feedforward=mention_feedforward,
                   antecedent_feedforward=antecedent_feedforward,
                   feature_size=feature_size,
                   max_span_width=max_span_width,
                   spans_per_word=spans_per_word,
                   max_antecedents=max_antecedents,
                   lexical_dropout=lexical_dropout,
                   initializer=initializer,
                   regularizer=regularizer)
Ejemplo n.º 14
0
    def from_params(cls, vocab: Vocabulary, params: Params) -> 'Tagger':
        token_representation_dim = params.pop_int("token_representation_dim")

        encoder = params.pop("encoder", None)
        if encoder is not None:
            encoder = Seq2SeqEncoder.from_params(encoder)
        decoder = params.pop("decoder", None)
        if decoder is not None and not isinstance(decoder, str):
            decoder = FeedForward.from_params(decoder)

        use_crf = params.pop_bool("use_crf", False)
        constrain_crf_decoding = params.pop_bool("constrain_crf_decoding", False)
        include_start_end_transitions = params.pop_bool("include_start_end_transitions", True)

        contextualizer = params.pop('contextualizer', None)
        if contextualizer:
            contextualizer = Contextualizer.from_params(contextualizer)
        calculate_per_label_f1 = params.pop_bool("calculate_per_label_f1", False)
        calculate_span_f1 = params.pop_bool("calculate_span_f1", False)
        calculate_perplexity = params.pop_bool("calculate_perplexity", False)
        loss_average = params.pop("loss_average", "batch")
        label_encoding = params.pop_choice("label_encoding", [None, "BIO", "BIOUL", "IOB1"],
                                           default_to_first_choice=True)

        pretrained_file = params.pop("pretrained_file", None)
        transfer_contextualizer_from_pretrained_file = params.pop_bool(
            "transfer_contextualizer_from_pretrained_file", False)
        transfer_encoder_from_pretrained_file = params.pop_bool(
            "transfer_encoder_from_pretrained_file", False)
        freeze_encoder = params.pop_bool("freeze_encoder", False)

        initializer = InitializerApplicator.from_params(params.pop('initializer', []))
        regularizer = RegularizerApplicator.from_params(params.pop('regularizer', []))
        params.assert_empty(cls.__name__)
        return cls(vocab=vocab,
                   token_representation_dim=token_representation_dim,
                   encoder=encoder,
                   decoder=decoder,
                   use_crf=use_crf,
                   constrain_crf_decoding=constrain_crf_decoding,
                   include_start_end_transitions=include_start_end_transitions,
                   label_encoding=label_encoding,
                   contextualizer=contextualizer,
                   calculate_per_label_f1=calculate_per_label_f1,
                   calculate_span_f1=calculate_span_f1,
                   calculate_perplexity=calculate_perplexity,
                   loss_average=loss_average,
                   pretrained_file=pretrained_file,
                   transfer_contextualizer_from_pretrained_file=transfer_contextualizer_from_pretrained_file,
                   transfer_encoder_from_pretrained_file=transfer_encoder_from_pretrained_file,
                   freeze_encoder=freeze_encoder,
                   initializer=initializer,
                   regularizer=regularizer)
    def from_params(cls, vocab: Vocabulary, params: Params) -> 'AnswerSynthesis':
        embedder_params = params.pop("text_field_embedder")
        embedder = TextFieldEmbedder.from_params(vocab, embedder_params)
        question_encoder = Seq2SeqEncoder.from_params(params.pop("question_encoder"))
        passage_encoder = Seq2SeqEncoder.from_params(params.pop("passage_encoder"))
        feed_forward = FeedForward.from_params(params.pop("feed_forward"))
        dropout = params.pop_float('dropout', 0.1)
        num_decoding_steps = params.pop_int("num_decoding_steps", 40)

        initializer = InitializerApplicator.from_params(params.pop('initializer', []))
        regularizer = RegularizerApplicator.from_params(params.pop('regularizer', []))

        return cls(vocab=vocab,
                   embedder=embedder,
                   question_encoder=question_encoder,
                   passage_encoder=passage_encoder,
                   feed_forward=feed_forward,
                   dropout=dropout,
                   num_decoding_steps=num_decoding_steps,
                   initializer=initializer,
                   regularizer=regularizer)
Ejemplo n.º 16
0
    def from_params(cls, vocab, params):
        ''' Initialize from a Params object '''
        similarity_function = SimilarityFunction.from_params(params.pop("similarity_function"))
        modeling_layer = Seq2SeqEncoder.from_params(params.pop("modeling_layer"))
        dropout = params.pop('dropout', 0.2)
        initializer = InitializerApplicator.from_params(params.pop('initializer', []))

        mask_lstms = params.pop('mask_lstms', True)
        params.assert_empty(cls.__name__)
        return cls(vocab=vocab, attention_similarity_function=similarity_function,
                   modeling_layer=modeling_layer, dropout=dropout,
                   mask_lstms=mask_lstms, initializer=initializer)
Ejemplo n.º 17
0
    def from_params(cls, vocab: Vocabulary, params: Params) -> 'SemanticRoleLabeler':
        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params)
        stacked_encoder = Seq2SeqEncoder.from_params(params.pop("stacked_encoder"))
        binary_feature_dim = params.pop("binary_feature_dim")
        initializer = InitializerApplicator.from_params(params.pop("initializer", []))

        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   stacked_encoder=stacked_encoder,
                   binary_feature_dim=binary_feature_dim,
                   initializer=initializer)
    def from_params(
            cls, vocab: Vocabulary,
            params: Params) -> 'SemEvalClassifierAttention':  # type: ignore
        # pylint: disable=arguments-differ
        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(
            vocab=vocab, params=embedder_params)
        embedding_dropout = params.pop("embedding_dropout")
        encoder = Seq2SeqEncoder.from_params(params.pop("encoder"))
        integrator = Seq2SeqEncoder.from_params(params.pop("integrator"))
        integrator_dropout = params.pop("integrator_dropout")

        output_layer_params = params.pop("output_layer")
        if "activations" in output_layer_params:
            output_layer = FeedForward.from_params(output_layer_params)
        else:
            output_layer = Maxout.from_params(output_layer_params)

        elmo = params.pop("elmo", None)
        if elmo is not None:
            elmo = Elmo.from_params(elmo)
        use_input_elmo = params.pop_bool("use_input_elmo", False)
        use_integrator_output_elmo = params.pop_bool(
            "use_integrator_output_elmo", False)

        initializer = InitializerApplicator.from_params(
            params.pop('initializer', []))
        regularizer = RegularizerApplicator.from_params(
            params.pop('regularizer', []))
        params.assert_empty(cls.__name__)

        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   embedding_dropout=embedding_dropout,
                   encoder=encoder,
                   integrator=integrator,
                   integrator_dropout=integrator_dropout,
                   output_layer=output_layer,
                   initializer=initializer,
                   regularizer=regularizer)
Ejemplo n.º 19
0
    def from_params(cls, vocab: Vocabulary, params: Params) -> 'DecAccSRL':
        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params)

        premise_encoder_params = params.pop("premise_encoder", None)
        if premise_encoder_params is not None:
            premise_encoder = Seq2SeqEncoder.from_params(premise_encoder_params)
        else:
            premise_encoder = None

        hypothesis_encoder_params = params.pop("hypothesis_encoder", None)
        if hypothesis_encoder_params is not None:
            hypothesis_encoder = Seq2SeqEncoder.from_params(hypothesis_encoder_params)
        else:
            hypothesis_encoder = None

        srl_model_archive = params.pop('srl_model_archive', None)
        if srl_model_archive is not None:
            logger.info("Loaded pretrained SRL model from {}".format(srl_model_archive))
            archive = load_archive(srl_model_archive)
            srl_model = archive.model
        else:
            srl_model = None

        attend_feedforward = FeedForward.from_params(params.pop('attend_feedforward'))
        similarity_function = SimilarityFunction.from_params(params.pop("similarity_function"))
        compare_feedforward = FeedForward.from_params(params.pop('compare_feedforward'))
        aggregate_feedforward = FeedForward.from_params(params.pop('aggregate_feedforward'))
        initializer = InitializerApplicator.from_params(params.pop("initializer", []))

        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   attend_feedforward=attend_feedforward,
                   similarity_function=similarity_function,
                   compare_feedforward=compare_feedforward,
                   aggregate_feedforward=aggregate_feedforward,
                   initializer=initializer,
                   srl_model=srl_model,
                   premise_encoder=premise_encoder,
                   hypothesis_encoder=hypothesis_encoder)
Ejemplo n.º 20
0
    def from_params(cls, vocab: Vocabulary, params: Params) -> 'EtdBCN':
        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(
            vocab=vocab, params=embedder_params)
        title_text_encoder = Seq2SeqEncoder.from_params(
            params.pop("title_text_encoder"))
        abstract_text_encoder = Seq2SeqEncoder.from_params(
            params.pop("abstract_text_encoder"))
        title_text_projection = FeedForward.from_params(
            params.pop("title_text_projection"))
        abstract_text_projection = FeedForward.from_params(
            params.pop("abstract_text_projection"))
        bi_attention_encoder = BiAttentionEncoder.from_params(
            params.pop("attention_encoder"))
        classifier_feedforward = params.pop("classifier_feedforward")
        if classifier_feedforward.pop('type') == 'feedforward':
            classifier_feedforward = FeedForward.from_params(
                classifier_feedforward)
        else:
            classifier_feedforward = Maxout.from_params(classifier_feedforward)
        use_positional_encoding = params.pop("use_positional_encoding", False)
        bce_pos_weight = params.pop_int("bce_pos_weight", 10)

        initializer = InitializerApplicator.from_params(
            params.pop('initializer', []))
        regularizer = RegularizerApplicator.from_params(
            params.pop('regularizer', []))

        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   title_text_encoder=title_text_encoder,
                   abstract_text_encoder=abstract_text_encoder,
                   title_text_projection=title_text_projection,
                   abstract_text_projection=abstract_text_projection,
                   bi_attention_encoder=bi_attention_encoder,
                   classifier_feedforward=classifier_feedforward,
                   bce_pos_weight=bce_pos_weight,
                   use_positional_encoding=use_positional_encoding,
                   initializer=initializer,
                   regularizer=regularizer)
    def from_params(cls, vocab: Vocabulary, params: Params) -> 'BiattentiveClassificationNetwork':  # type: ignore
        # pylint: disable=arguments-differ
        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(vocab=vocab, params=embedder_params)
        embedding_dropout = params.pop("embedding_dropout")
        pre_encode_feedforward = FeedForward.from_params(params.pop("pre_encode_feedforward"))
        encoder = Seq2SeqEncoder.from_params(params.pop("encoder"))
        integrator = Seq2SeqEncoder.from_params(params.pop("integrator"))
        integrator_dropout = params.pop("integrator_dropout")

        output_layer_params = params.pop("output_layer")
        if "activations" in output_layer_params:
            output_layer = FeedForward.from_params(output_layer_params)
        else:
            output_layer = Maxout.from_params(output_layer_params)

        elmo = params.pop("elmo", None)
        if elmo is not None:
            elmo = Elmo.from_params(elmo)
        use_input_elmo = params.pop_bool("use_input_elmo", False)
        use_integrator_output_elmo = params.pop_bool("use_integrator_output_elmo", False)

        initializer = InitializerApplicator.from_params(params.pop('initializer', []))
        regularizer = RegularizerApplicator.from_params(params.pop('regularizer', []))
        params.assert_empty(cls.__name__)

        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   embedding_dropout=embedding_dropout,
                   pre_encode_feedforward=pre_encode_feedforward,
                   encoder=encoder,
                   integrator=integrator,
                   integrator_dropout=integrator_dropout,
                   output_layer=output_layer,
                   elmo=elmo,
                   use_input_elmo=use_input_elmo,
                   use_integrator_output_elmo=use_integrator_output_elmo,
                   initializer=initializer,
                   regularizer=regularizer)
Ejemplo n.º 22
0
    def from_params(cls, vocab: Vocabulary, params: Params) -> 'SimpleTagger':
        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params)
        encoder = Seq2SeqEncoder.from_params(params.pop("encoder"))

        initializer = InitializerApplicator.from_params(params.pop('initializer', []))
        regularizer = RegularizerApplicator.from_params(params.pop('regularizer', []))
        params.assert_empty(cls.__name__)
        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   encoder=encoder,
                   initializer=initializer,
                   regularizer=regularizer)
Ejemplo n.º 23
0
    def from_params(cls, vocab: Vocabulary, params: Params) -> 'SimpleTagger':
        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params)
        stacked_encoder = Seq2SeqEncoder.from_params(params.pop("stacked_encoder"))

        initializer = InitializerApplicator.from_params(params.pop('initializer', []))
        regularizer = RegularizerApplicator.from_params(params.pop('regularizer', []))

        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   stacked_encoder=stacked_encoder,
                   initializer=initializer,
                   regularizer=regularizer)
Ejemplo n.º 24
0
    def from_params(cls, vocab            , params        )                                      :  # type: ignore
        # pylint: disable=arguments-differ
        embedder_params = params.pop(u"text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(vocab=vocab, params=embedder_params)
        embedding_dropout = params.pop(u"embedding_dropout")
        pre_encode_feedforward = FeedForward.from_params(params.pop(u"pre_encode_feedforward"))
        encoder = Seq2SeqEncoder.from_params(params.pop(u"encoder"))
        integrator = Seq2SeqEncoder.from_params(params.pop(u"integrator"))
        integrator_dropout = params.pop(u"integrator_dropout")

        output_layer_params = params.pop(u"output_layer")
        if u"activations" in output_layer_params:
            output_layer = FeedForward.from_params(output_layer_params)
        else:
            output_layer = Maxout.from_params(output_layer_params)

        elmo = params.pop(u"elmo", None)
        if elmo is not None:
            elmo = Elmo.from_params(elmo)
        use_input_elmo = params.pop_bool(u"use_input_elmo", False)
        use_integrator_output_elmo = params.pop_bool(u"use_integrator_output_elmo", False)

        initializer = InitializerApplicator.from_params(params.pop(u'initializer', []))
        regularizer = RegularizerApplicator.from_params(params.pop(u'regularizer', []))
        params.assert_empty(cls.__name__)

        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   embedding_dropout=embedding_dropout,
                   pre_encode_feedforward=pre_encode_feedforward,
                   encoder=encoder,
                   integrator=integrator,
                   integrator_dropout=integrator_dropout,
                   output_layer=output_layer,
                   elmo=elmo,
                   use_input_elmo=use_input_elmo,
                   use_integrator_output_elmo=use_integrator_output_elmo,
                   initializer=initializer,
                   regularizer=regularizer)
Ejemplo n.º 25
0
def s2s_test(nsteps):
    params = Params(params={"type": "gru",
                            "input_size": 4,
                            "hidden_size": 20,
                            "num_layers": 3,
                            "bidirectional": True})
    gru_net = Seq2SeqEncoder.from_params(params=params)
    gru_net.cuda(device=0)

    with Profile("s2s"):
        for i in range(nsteps):
            s2s_func(max_length=450, seq_length=350, gru_net=gru_net)
    print(Profile.to_string())
Ejemplo n.º 26
0
    def from_params(cls, vocab: Vocabulary,
                    params: Params) -> 'SemanticRoleLabeler':
        """
        With an empty ``params`` argument, this will instantiate a SRL model with the same
        configuration as published in the "Deep Semantic Role Labeling - What works and what's
        next" paper, as long as you've set ``allennlp.common.constants.GLOVE_PATH`` to the
        location of your gzipped 100-dimensional glove vectors.

        If you want to change parameters, the keys in the ``params`` object must match the
        constructor arguments above.
        """
        default_embedder_params = {
            'tokens': {
                'type': 'embedding',
                'pretrained_file': GLOVE_PATH,
                'trainable': True
            }
        }

        embedder_params = params.pop("text_field_embedder",
                                     default_embedder_params)
        text_field_embedder = TextFieldEmbedder.from_params(
            vocab, embedder_params)

        default_lstm_params = {
            'type': 'alternating_lstm',
            'input_size': 101,  # Because of the verb_indicator feature.
            'hidden_size': 300,
            'num_layers': 8,
            'recurrent_dropout_probability': 0.1,
            'use_highway': True
        }
        encoder_params = params.pop("stacked_encoder", default_lstm_params)
        stacked_encoder = Seq2SeqEncoder.from_params(encoder_params)

        default_initializer_params = {
            'bias': {
                'type': 'normal',
                'std': 0.1
            },
            'default': 'orthogonal'
        }

        initializer_params = params.pop('initializer',
                                        default_initializer_params)
        initializer = InitializerApplicator.from_params(initializer_params)

        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   stacked_encoder=stacked_encoder,
                   initializer=initializer)
    def from_params(cls, vocab: Vocabulary, params: Params) -> 'ModelMSMARCO':
        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(
            vocab, embedder_params)
        #num_highway_layers = params.pop_int("num_highway_layers")
        phrase_layer = Seq2SeqEncoder.from_params(params.pop("phrase_layer"))
        similarity_function = SimilarityFunction.from_params(
            params.pop("similarity_function"))
        residual_encoder = Seq2SeqEncoder.from_params(
            params.pop("residual_encoder"))
        span_start_encoder = Seq2SeqEncoder.from_params(
            params.pop("span_start_encoder"))
        span_end_encoder = Seq2SeqEncoder.from_params(
            params.pop("span_end_encoder"))
        #feed_forward = FeedForward.from_params(params.pop("feed_forward"))
        dropout = params.pop_float('dropout', 0.2)

        initializer = InitializerApplicator.from_params(
            params.pop('initializer', []))
        regularizer = RegularizerApplicator.from_params(
            params.pop('regularizer', []))

        mask_lstms = params.pop_bool('mask_lstms', True)
        params.assert_empty(cls.__name__)
        return cls(
            vocab=vocab,
            text_field_embedder=text_field_embedder,
            #       num_highway_layers=num_highway_layers,
            phrase_layer=phrase_layer,
            attention_similarity_function=similarity_function,
            residual_encoder=residual_encoder,
            span_start_encoder=span_start_encoder,
            span_end_encoder=span_end_encoder,
            dropout=dropout,
            mask_lstms=mask_lstms,
            initializer=initializer,
            regularizer=regularizer)
 def from_params(cls, vocab,
                 params: Params) -> 'WikiTablesMmlSemanticParser':
     question_embedder = TextFieldEmbedder.from_params(
         vocab, params.pop("question_embedder"))
     action_embedding_dim = params.pop_int("action_embedding_dim")
     encoder = Seq2SeqEncoder.from_params(params.pop("encoder"))
     entity_encoder = Seq2VecEncoder.from_params(
         params.pop('entity_encoder'))
     max_decoding_steps = params.pop_int("max_decoding_steps")
     mixture_feedforward_type = params.pop('mixture_feedforward', None)
     if mixture_feedforward_type is not None:
         mixture_feedforward = FeedForward.from_params(
             mixture_feedforward_type)
     else:
         mixture_feedforward = None
     decoder_beam_search = BeamSearch.from_params(
         params.pop("decoder_beam_search"))
     # If no attention function is specified, we should not use attention, not attention with
     # default similarity function.
     attention_function_type = params.pop("attention_function", None)
     if attention_function_type is not None:
         attention_function = SimilarityFunction.from_params(
             attention_function_type)
     else:
         attention_function = None
     training_beam_size = params.pop_int('training_beam_size', None)
     use_neighbor_similarity_for_linking = params.pop_bool(
         'use_neighbor_similarity_for_linking', False)
     dropout = params.pop_float('dropout', 0.0)
     num_linking_features = params.pop_int('num_linking_features', 10)
     tables_directory = params.pop('tables_directory', '/wikitables/')
     rule_namespace = params.pop('rule_namespace', 'rule_labels')
     params.assert_empty(cls.__name__)
     return cls(vocab,
                question_embedder=question_embedder,
                action_embedding_dim=action_embedding_dim,
                encoder=encoder,
                entity_encoder=entity_encoder,
                mixture_feedforward=mixture_feedforward,
                decoder_beam_search=decoder_beam_search,
                max_decoding_steps=max_decoding_steps,
                attention_function=attention_function,
                training_beam_size=training_beam_size,
                use_neighbor_similarity_for_linking=
                use_neighbor_similarity_for_linking,
                dropout=dropout,
                num_linking_features=num_linking_features,
                tables_directory=tables_directory,
                rule_namespace=rule_namespace)
Ejemplo n.º 29
0
 def from_params(cls, vocab: Vocabulary, params: Params) -> 'MetaphoreClassifier':
     embedder_params = params.pop("model_sentence_field_embedder")
     model_sentence_field_embedder = MetaphoreFieldEmbedder.from_params(embedder_params, vocab=vocab)
     input_encoder_dropout = Dropout(params.pop("input_encoder_dropout"))
     internal_sentence_encoder = Seq2SeqEncoder.from_params(params.pop("internal_sentence_encoder"))
     linear_attention_feedforward = FeedForward.from_params(params.pop("linear_attention_feedforward"))
     input_classifier_dropout = Dropout(params.pop("input_classifier_dropout"))
     linear_classifier_feedforward = FeedForward.from_params(params.pop("linear_classifier_feedforward"))
     return cls(vocab=vocab,
                model_sentence_field_embedder=model_sentence_field_embedder,
                input_encoder_dropout = input_encoder_dropout,
                internal_sentence_encoder=internal_sentence_encoder,
                linear_attention_feedforward = linear_attention_feedforward,
                input_classifier_dropout = input_classifier_dropout,
                linear_classifier_feedforward=linear_classifier_feedforward,)
Ejemplo n.º 30
0
    def from_params(cls, vocab, params):
        """ Initialize from a Params object """
        modeling_layer = Seq2SeqEncoder.from_params(params.pop("modeling_layer"))
        dropout = params.pop("dropout", 0.2)
        initializer = InitializerApplicator.from_params(params.pop("initializer", []))

        mask_lstms = params.pop("mask_lstms", True)
        params.assert_empty(cls.__name__)
        return cls(
            vocab=vocab,
            modeling_layer=modeling_layer,
            dropout=dropout,
            mask_lstms=mask_lstms,
            initializer=initializer,
        )
Ejemplo n.º 31
0
    def from_params(cls, vocab: Vocabulary, params: Params) -> 'ProLocalModel':
        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params)

        seq2seq_encoder_params = params.pop("seq2seq_encoder")
        seq2seq_encoder = Seq2SeqEncoder.from_params(seq2seq_encoder_params)

        initializer = InitializerApplicator.from_params(params.pop("initializer", []))

        params.assert_empty(cls.__name__)

        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   seq2seq_encoder=seq2seq_encoder,
                   initializer=initializer)
Ejemplo n.º 32
0
    def from_params(cls, vocab: Vocabulary, params: Params) -> 'ProGlobal':
        token_embedder_params = params.pop("text_field_embedder")
        pos_embedder_params = params.pop("pos_field_embedder")
        sent_pos_embedder_params = params.pop("sent_pos_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(
            vocab, token_embedder_params)
        pos_field_embedder = TextFieldEmbedder.from_params(
            vocab, pos_embedder_params)
        sent_pos_field_embedder = TextFieldEmbedder.from_params(
            vocab, sent_pos_embedder_params)

        modeling_layer = Seq2SeqEncoder.from_params(
            params.pop("modeling_layer"))
        span_end_encoder_before = Seq2SeqEncoder.from_params(
            params.pop("span_end_encoder_bef"))
        span_start_encoder_after = Seq2SeqEncoder.from_params(
            params.pop("span_start_encoder_aft"))
        span_end_encoder_after = Seq2SeqEncoder.from_params(
            params.pop("span_end_encoder_aft"))
        dropout = params.pop('dropout', 0.2)

        init_params = params.pop('initializer', None)
        initializer = (InitializerApplicator.from_params(init_params)
                       if init_params is not None else InitializerApplicator())

        params.assert_empty(cls.__name__)
        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   pos_field_embedder=pos_field_embedder,
                   sent_pos_field_embedder=sent_pos_field_embedder,
                   modeling_layer=modeling_layer,
                   span_start_encoder_after=span_start_encoder_after,
                   span_end_encoder_before=span_end_encoder_before,
                   span_end_encoder_after=span_end_encoder_after,
                   dropout=dropout,
                   initializer=initializer)
Ejemplo n.º 33
0
    def from_params(cls, vocab: Vocabulary, params: Params) -> 'DialogueContextHierarchicalCoherenceAttentionClassifier':
        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params)

        utterance_encoder = Seq2VecEncoder.from_params(params.pop("utterance_encoder"))
        context_encoder = Seq2SeqEncoder.from_params(params.pop("context_encoder"))

        response_encoder_params = params.pop("response_encoder", None)
        if response_encoder_params is not None:
            response_encoder = Seq2SeqEncoder.from_params(response_encoder_params)
        else:
            response_encoder = None

        attend_feedforward = FeedForward.from_params(params.pop('attend_feedforward'))
        #similarity_function = SimilarityFunction.from_params(params.pop("similarity_function"))
        compare_feedforward = FeedForward.from_params(params.pop('compare_feedforward'))
        classifier_feedforward = FeedForward.from_params(params.pop("classifier_feedforward"))
        final_classifier_feedforward = FeedForward.from_params(params.pop("final_classifier_feedforward"))

        initializer = InitializerApplicator.from_params(params.pop("initializer", []))
        regularizer = RegularizerApplicator.from_params(params.pop("regularizer", []))

        matrix_attention = MatrixAttention().from_params(params.pop("similarity_function"))

        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   attend_feedforward=attend_feedforward,
                   matrix_attention=matrix_attention,
                   compare_feedforward=compare_feedforward,
                   classifier_feedforward=classifier_feedforward,
                   final_classifier_feedforward=final_classifier_feedforward,
                   utterance_encoder=utterance_encoder,
                   context_encoder=context_encoder,
                   response_encoder=response_encoder,
                   initializer=initializer,
                   regularizer=regularizer)
Ejemplo n.º 34
0
 def from_params(cls, vocab: Vocabulary, params: Params) -> 'BidirectionalAttentionFlow':
     embedder_params = params.pop("text_field_embedder")
     text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params)
     num_highway_layers = params.pop("num_highway_layers")
     phrase_layer = Seq2SeqEncoder.from_params(params.pop("phrase_layer"))
     similarity_function = SimilarityFunction.from_params(params.pop("similarity_function"))
     modeling_layer = Seq2SeqEncoder.from_params(params.pop("modeling_layer"))
     span_end_encoder = Seq2SeqEncoder.from_params(params.pop("span_end_encoder"))
     initializer = InitializerApplicator.from_params(params.pop("initializer", []))
     dropout = params.pop('dropout', 0.2)
     evaluation_json_file = params.pop('evaluation_json_file', None)
     mask_lstms = params.pop('mask_lstms', True)
     params.assert_empty(cls.__name__)
     return cls(vocab=vocab,
                text_field_embedder=text_field_embedder,
                num_highway_layers=num_highway_layers,
                phrase_layer=phrase_layer,
                attention_similarity_function=similarity_function,
                modeling_layer=modeling_layer,
                span_end_encoder=span_end_encoder,
                initializer=initializer,
                dropout=dropout,
                mask_lstms=mask_lstms,
                evaluation_json_file=evaluation_json_file)
Ejemplo n.º 35
0
def make_model2(params, vocab, glove_path):
    # parameters for original model are specified here:
    # https://github.com/allenai/allennlp/blob/master/training_config/semantic_role_labeler.jsonnet

    # do not change - these values are used in original model
    glove_size = 100
    binary_feature_dim = 100
    max_grad_norm = 1.0

    # encoder
    encoder_params = AllenParams(
        {'type': 'alternating_lstm',
         'input_size': glove_size + binary_feature_dim,
         'hidden_size': params.hidden_size,
         'num_layers': params.num_layers,
         'use_highway': True,
         'recurrent_dropout_probability': 0.1})
    encoder = Seq2SeqEncoder.from_params(encoder_params)

    # embedder
    embedder_params = AllenParams({
        "token_embedders": {
            "tokens": {
                "type": "embedding",
                "embedding_dim": glove_size,  # must match glove dimension
                "pretrained_file": str(glove_path),
                "trainable": True
            }
        }
    })
    text_field_embedder = TextFieldEmbedder.from_params(embedder_params, vocab=vocab)

    # initializer
    initializer_params = [
        ("tag_projection_layer.*weight",
         AllenParams({"type": "orthogonal"}))
    ]
    initializer = InitializerApplicator.from_params(initializer_params)

    # model
    model = Model2(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   encoder=encoder,
                   initializer=initializer,
                   binary_feature_dim=binary_feature_dim)
    model.cuda()
    model.max_grad_norm = max_grad_norm
    return model
Ejemplo n.º 36
0
    def from_params(cls, vocab: Vocabulary,
                    params: Params) -> 'AttentionClassifier':
        """Constructs class from ``Params`` and ``Vocabulary``
        """
        embedder_params = params.pop("text_field_embedder")
        embedder_type = embedder_params.get("type", None)
        if embedder_type is None:
            text_field_embedder = TextFieldEmbedder.from_params(
                vocab, embedder_params)
        else:
            embedder_type = embedder_params.pop("type")
            text_field_embedder = getattr(text_field_embedder_modules,
                                          embedder_type).from_params(
                                              vocab, embedder_params)

        encoder = Seq2SeqEncoder.from_params(params.pop("encoder"))
        attention_params = params.pop("attention_module")
        attention_type = attention_params.pop("type")
        key_dim = attention_params.get("key_emb_size")
        attention_module = getattr(
            attention_modules, attention_type).from_params(attention_params)
        label_namespace = params.pop("label_namespace", "labels")

        dropout = params.pop("dropout", None)
        entropy_scalar = params.pop("entropy_scalar", 0.)
        coverage_scalar = params.pop("coverage_scalar", 0.)
        threshold = params.pop("threshold", 0.5)
        initializer = InitializerApplicator.from_params(
            params.pop('initializer', []))
        regularizer = RegularizerApplicator.from_params(
            params.pop('regularizer', []))
        label_indexer = params.pop("label_indexer", "LabelIndicesBiMap")

        params.assert_empty(cls.__name__)

        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   encoder=encoder,
                   attention_module=attention_module,
                   key_dim=key_dim,
                   entropy_scalar=entropy_scalar,
                   coverage_scalar=coverage_scalar,
                   label_namespace=label_namespace,
                   dropout=dropout,
                   thresh=threshold,
                   initializer=initializer,
                   regularizer=regularizer,
                   label_indexer=label_indexer)
Ejemplo n.º 37
0
    def from_params(cls, vocab: Vocabulary, params: Params) -> 'CrfTagger':
        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params)
        encoder = Seq2SeqEncoder.from_params(params.pop("encoder"))
        label_namespace = params.pop("label_namespace", "labels")
        constraint_type = params.pop("constraint_type", None)
        initializer = InitializerApplicator.from_params(params.pop('initializer', []))
        regularizer = RegularizerApplicator.from_params(params.pop('regularizer', []))

        params.assert_empty(cls.__name__)

        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   encoder=encoder,
                   label_namespace=label_namespace,
                   constraint_type=constraint_type,
                   initializer=initializer,
                   regularizer=regularizer)
Ejemplo n.º 38
0
 def test_from_params_builders_encoder_correctly(self):
     # We're just making sure parameters get passed through correctly here, and that the basic
     # API works.
     params = Params({
             "type": "lstm",
             "bidirectional": True,
             "num_layers": 3,
             "input_size": 5,
             "hidden_size": 7
             })
     encoder = Seq2SeqEncoder.from_params(params)
     # pylint: disable=protected-access
     assert encoder.__class__.__name__ == 'PytorchSeq2SeqWrapper'
     assert encoder._module.__class__.__name__ == 'LSTM'
     assert encoder._module.num_layers == 3
     assert encoder._module.input_size == 5
     assert encoder._module.hidden_size == 7
     assert encoder._module.bidirectional is True
     assert encoder._module.batch_first is True
Ejemplo n.º 39
0
 def from_params(cls, vocab, params: Params) -> 'SimpleSeq2Seq':
     source_embedder_params = params.pop("source_embedder")
     source_embedder = TextFieldEmbedder.from_params(vocab, source_embedder_params)
     encoder = Seq2SeqEncoder.from_params(params.pop("encoder"))
     max_decoding_steps = params.pop("max_decoding_steps")
     target_namespace = params.pop("target_namespace", "tokens")
     # If no attention function is specified, we should not use attention, not attention with
     # default similarity function.
     attention_function_type = params.pop("attention_function", None)
     if attention_function_type is not None:
         attention_function = SimilarityFunction.from_params(attention_function_type)
     else:
         attention_function = None
     scheduled_sampling_ratio = params.pop_float("scheduled_sampling_ratio", 0.0)
     params.assert_empty(cls.__name__)
     return cls(vocab,
                source_embedder=source_embedder,
                encoder=encoder,
                max_decoding_steps=max_decoding_steps,
                target_namespace=target_namespace,
                attention_function=attention_function,
                scheduled_sampling_ratio=scheduled_sampling_ratio)