def from_params(cls, vocab: Vocabulary, params: Params) -> 'DecomposableAttention':
        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params)

        premise_encoder_params = params.pop("premise_encoder", None)
        if premise_encoder_params is not None:
            premise_encoder = Seq2SeqEncoder.from_params(premise_encoder_params)
        else:
            premise_encoder = None

        hypothesis_encoder_params = params.pop("hypothesis_encoder", None)
        if hypothesis_encoder_params is not None:
            hypothesis_encoder = Seq2SeqEncoder.from_params(hypothesis_encoder_params)
        else:
            hypothesis_encoder = None

        attend_feedforward = FeedForward.from_params(params.pop('attend_feedforward'))
        similarity_function = SimilarityFunction.from_params(params.pop("similarity_function"))
        compare_feedforward = FeedForward.from_params(params.pop('compare_feedforward'))
        aggregate_feedforward = FeedForward.from_params(params.pop('aggregate_feedforward'))
        initializer = InitializerApplicator.from_params(params.pop('initializer', []))
        regularizer = RegularizerApplicator.from_params(params.pop('regularizer', []))

        params.assert_empty(cls.__name__)
        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   attend_feedforward=attend_feedforward,
                   similarity_function=similarity_function,
                   compare_feedforward=compare_feedforward,
                   aggregate_feedforward=aggregate_feedforward,
                   premise_encoder=premise_encoder,
                   hypothesis_encoder=hypothesis_encoder,
                   initializer=initializer,
                   regularizer=regularizer)
    def from_params(cls, vocab: Vocabulary, params: Params) -> 'BiattentiveClassificationNetwork':
        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params)
        embedding_dropout = params.pop("embedding_dropout")
        pre_encode_feedforward = FeedForward.from_params(params.pop("pre_encode_feedforward"))
        encoder = Seq2SeqEncoder.from_params(params.pop("encoder"))
        integrator = Seq2SeqEncoder.from_params(params.pop("integrator"))
        integrator_dropout = params.pop("integrator_dropout")

        output_layer_params = params.pop("output_layer")
        if "activations" in output_layer_params:
            output_layer = FeedForward.from_params(output_layer_params)
        else:
            output_layer = Maxout.from_params(output_layer_params)
        initializer = InitializerApplicator.from_params(params.pop('initializer', []))
        regularizer = RegularizerApplicator.from_params(params.pop('regularizer', []))

        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   embedding_dropout=embedding_dropout,
                   pre_encode_feedforward=pre_encode_feedforward,
                   encoder=encoder,
                   integrator=integrator,
                   integrator_dropout=integrator_dropout,
                   output_layer=output_layer,
                   initializer=initializer,
                   regularizer=regularizer)
    def test_can_construct_from_params(self):
        params = Params({
            "input_dim": 2,
            "hidden_dims": 3,
            "activations": "relu",
            "num_layers": 2
        })
        feedforward = FeedForward.from_params(params)
        assert len(feedforward._activations) == 2
        assert [isinstance(a, torch.nn.ReLU) for a in feedforward._activations]
        assert len(feedforward._linear_layers) == 2
        assert [l.weight.size(-1) == 3 for l in feedforward._linear_layers]

        params = Params({
            "input_dim": 2,
            "hidden_dims": [3, 4, 5],
            "activations": ["relu", "relu", "linear"],
            "dropout": 0.2,
            "num_layers": 3,
        })
        feedforward = FeedForward.from_params(params)
        assert len(feedforward._activations) == 3
        assert isinstance(feedforward._activations[0], torch.nn.ReLU)
        assert isinstance(feedforward._activations[1], torch.nn.ReLU)
        # It's hard to check that the last activation is the lambda function we use for `linear`,
        # so this is good enough.
        assert not isinstance(feedforward._activations[2], torch.nn.ReLU)

        assert len(feedforward._linear_layers) == 3
        assert feedforward._linear_layers[0].weight.size(0) == 3
        assert feedforward._linear_layers[1].weight.size(0) == 4
        assert feedforward._linear_layers[2].weight.size(0) == 5

        assert len(feedforward._dropout) == 3
        assert [d.p == 0.2 for d in feedforward._dropout]
    def from_params(cls, vocab: Vocabulary, params: Params) -> 'DialogueContextCoherenceAttentionClassifier':
        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params)

        context_encoder_params = params.pop("context_encoder", None)
        if context_encoder_params is not None:
            context_encoder = Seq2SeqEncoder.from_params(context_encoder_params)
        else:
            context_encoder = None

        response_encoder_params = params.pop("response_encoder", None)
        if response_encoder_params is not None:
            response_encoder = Seq2SeqEncoder.from_params(response_encoder_params)
        else:
            response_encoder = None

        attend_feedforward = FeedForward.from_params(params.pop('attend_feedforward'))
        similarity_function = SimilarityFunction.from_params(params.pop("similarity_function"))
        compare_feedforward = FeedForward.from_params(params.pop('compare_feedforward'))
        classifier_feedforward = FeedForward.from_params(params.pop("classifier_feedforward"))

        initializer = InitializerApplicator.from_params(params.pop("initializer", []))
        regularizer = RegularizerApplicator.from_params(params.pop("regularizer", []))

        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   attend_feedforward=attend_feedforward,
                   similarity_function=similarity_function,
                   compare_feedforward=compare_feedforward,
                   classifier_feedforward=classifier_feedforward,
                   context_encoder=context_encoder,
                   response_encoder=response_encoder,
                   initializer=initializer,
                   regularizer=regularizer)
Exemple #5
0
    def test_can_construct_from_params(self):
        params = Params({
            'input_dim': 2,
            'hidden_dims': 3,
            'activations': 'relu',
            'num_layers': 2,
        })
        feedforward = FeedForward.from_params(params)
        assert len(feedforward._activations) == 2
        assert [isinstance(a, torch.nn.ReLU) for a in feedforward._activations]
        assert len(feedforward._linear_layers) == 2
        assert [l.weight.size(-1) == 3 for l in feedforward._linear_layers]

        params = Params({
            'input_dim': 2,
            'hidden_dims': [3, 4, 5],
            'activations': ['relu', 'relu', 'linear'],
            'dropout': .2,
            'num_layers': 3,
        })
        feedforward = FeedForward.from_params(params)
        assert len(feedforward._activations) == 3
        assert isinstance(feedforward._activations[0], torch.nn.ReLU)
        assert isinstance(feedforward._activations[1], torch.nn.ReLU)
        # It's hard to check that the last activation is the lambda function we use for `linear`,
        # so this is good enough.
        assert not isinstance(feedforward._activations[2], torch.nn.ReLU)

        assert len(feedforward._linear_layers) == 3
        assert feedforward._linear_layers[0].weight.size(0) == 3
        assert feedforward._linear_layers[1].weight.size(0) == 4
        assert feedforward._linear_layers[2].weight.size(0) == 5

        assert len(feedforward._dropout) == 3
        assert [d.p == 0.2 for d in feedforward._dropout]
Exemple #6
0
    def from_params(cls, vocab: Vocabulary, params: Params) -> 'ESIM':
        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(
            vocab, embedder_params)

        encoder = Seq2SeqEncoder.from_params(params.pop("encoder"))
        similarity_function = SimilarityFunction.from_params(
            params.pop("similarity_function"))
        projection_feedforward = FeedForward.from_params(
            params.pop('projection_feedforward'))
        inference_encoder = Seq2SeqEncoder.from_params(
            params.pop("inference_encoder"))
        output_feedforward = FeedForward.from_params(
            params.pop('output_feedforward'))
        output_logit = FeedForward.from_params(params.pop('output_logit'))
        initializer = InitializerApplicator.from_params(
            params.pop('initializer', []))
        regularizer = RegularizerApplicator.from_params(
            params.pop('regularizer', []))

        dropout = params.pop("dropout", 0)

        params.assert_empty(cls.__name__)
        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   encoder=encoder,
                   similarity_function=similarity_function,
                   projection_feedforward=projection_feedforward,
                   inference_encoder=inference_encoder,
                   output_feedforward=output_feedforward,
                   output_logit=output_logit,
                   initializer=initializer,
                   dropout=dropout,
                   regularizer=regularizer)
Exemple #7
0
    def from_params(cls, vocab: Vocabulary, params: Params) -> 'OntoEmmaNN':
        name_embedder = TextFieldEmbedder.from_params(
            vocab, params.pop("name_embedder"))
        definition_embedder = TextFieldEmbedder.from_params(
            vocab, params.pop("definition_embedder"))
        name_encoder = Seq2VecEncoder.from_params(params.pop("name_encoder"))
        definition_encoder = Seq2VecEncoder.from_params(
            params.pop("definition_encoder"))
        siamese_feedforward = FeedForward.from_params(
            params.pop("siamese_feedforward"))
        decision_feedforward = FeedForward.from_params(
            params.pop("decision_feedforward"))

        init_params = params.pop('initializer', None)
        reg_params = params.pop('regularizer', None)
        initializer = (InitializerApplicator.from_params(init_params)
                       if init_params is not None else InitializerApplicator())
        regularizer = RegularizerApplicator.from_params(
            reg_params) if reg_params is not None else None

        return cls(vocab=vocab,
                   name_embedder=name_embedder,
                   definition_embedder=definition_embedder,
                   name_encoder=name_encoder,
                   definition_encoder=definition_encoder,
                   siamese_feedforward=siamese_feedforward,
                   decision_feedforward=decision_feedforward,
                   initializer=initializer,
                   regularizer=regularizer)
    def from_params(cls, vocab: Vocabulary,
                    params: Params) -> 'SarcasmClassifier':
        bert_model_name = params.pop("bert_model_name")
        quote_response_encoder = Seq2VecEncoder.from_params(
            params.pop("quote_response_encoder"))
        classifier_feedforward = FeedForward.from_params(
            params.pop("classifier_feedforward"))
        classifier_feedforward_2 = FeedForward.from_params(
            params.pop("classifier_feedforward_2"))

        initializer = InitializerApplicator.from_params(
            params.pop('initializer', []))
        regularizer = RegularizerApplicator.from_params(
            params.pop('regularizer', []))
        report_auxiliary_metrics = params.pop_bool("report_auxiliary_metrics",
                                                   False)

        # predict_mode = params.pop_bool("predict_mode", False)
        # print(f"pred mode: {predict_mode}")

        return cls(vocab=vocab,
                   bert_model_name=bert_model_name,
                   quote_response_encoder=quote_response_encoder,
                   classifier_feedforward=classifier_feedforward,
                   classifier_feedforward_2=classifier_feedforward_2,
                   initializer=initializer,
                   regularizer=regularizer,
                   report_auxiliary_metrics=report_auxiliary_metrics)
Exemple #9
0
    def from_params(cls, vocab: Vocabulary, params: Params) -> 'TreeAttention':
        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params)

        premise_encoder_params = params.pop("premise_encoder", None)
        premise_encoder = Seq2SeqEncoder.from_params(premise_encoder_params)

        attention_similarity = SimilarityFunction.from_params(params.pop('attention_similarity'))
        phrase_probability = FeedForward.from_params(params.pop('phrase_probability'))
        edge_probability = FeedForward.from_params(params.pop('edge_probability'))

        edge_embedding = Embedding.from_params(vocab, params.pop('edge_embedding'))
        use_encoding_for_node = params.pop('use_encoding_for_node')
        ignore_edges = params.pop('ignore_edges', False)

        init_params = params.pop('initializer', None)
        initializer = (InitializerApplicator.from_params(init_params)
                       if init_params is not None
                       else InitializerApplicator())

        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   phrase_probability=phrase_probability,
                   edge_probability=edge_probability,
                   premise_encoder=premise_encoder,
                   edge_embedding=edge_embedding,
                   use_encoding_for_node=use_encoding_for_node,
                   attention_similarity=attention_similarity,
                   ignore_edges=ignore_edges,
                   initializer=initializer)
Exemple #10
0
    def from_params(cls, vocab: Vocabulary, params: Params) -> "CoreferenceResolver":
        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params)
        context_layer = Seq2SeqEncoder.from_params(params.pop("context_layer"))
        mention_feedforward = FeedForward.from_params(params.pop("mention_feedforward"))
        antecedent_feedforward = FeedForward.from_params(params.pop("antecedent_feedforward"))

        feature_size = params.pop_int("feature_size")
        max_span_width = params.pop_int("max_span_width")
        spans_per_word = params.pop_float("spans_per_word")
        max_antecedents = params.pop_int("max_antecedents")
        lexical_dropout = params.pop_float("lexical_dropout", 0.2)

        init_params = params.pop("initializer", None)
        reg_params = params.pop("regularizer", None)
        initializer = (InitializerApplicator.from_params(init_params)
                       if init_params is not None
                       else InitializerApplicator())
        regularizer = RegularizerApplicator.from_params(reg_params) if reg_params is not None else None

        params.assert_empty(cls.__name__)
        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   context_layer=context_layer,
                   mention_feedforward=mention_feedforward,
                   antecedent_feedforward=antecedent_feedforward,
                   feature_size=feature_size,
                   max_span_width=max_span_width,
                   spans_per_word=spans_per_word,
                   max_antecedents=max_antecedents,
                   lexical_dropout=lexical_dropout,
                   initializer=initializer,
                   regularizer=regularizer)
    def from_params(cls, vocab: Vocabulary, params: Params) -> 'DecomposableAttention':
        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params)

        premise_encoder_params = params.pop("premise_encoder", None)
        if premise_encoder_params is not None:
            premise_encoder = Seq2SeqEncoder.from_params(premise_encoder_params)
        else:
            premise_encoder = None

        hypothesis_encoder_params = params.pop("hypothesis_encoder", None)
        if hypothesis_encoder_params is not None:
            hypothesis_encoder = Seq2SeqEncoder.from_params(hypothesis_encoder_params)
        else:
            hypothesis_encoder = None

        attend_feedforward = FeedForward.from_params(params.pop('attend_feedforward'))
        similarity_function = SimilarityFunction.from_params(params.pop("similarity_function"))
        compare_feedforward = FeedForward.from_params(params.pop('compare_feedforward'))
        aggregate_feedforward = FeedForward.from_params(params.pop('aggregate_feedforward'))
        initializer = InitializerApplicator.from_params(params.pop('initializer', []))
        regularizer = RegularizerApplicator.from_params(params.pop('regularizer', []))

        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   attend_feedforward=attend_feedforward,
                   similarity_function=similarity_function,
                   compare_feedforward=compare_feedforward,
                   aggregate_feedforward=aggregate_feedforward,
                   premise_encoder=premise_encoder,
                   hypothesis_encoder=hypothesis_encoder,
                   initializer=initializer,
                   regularizer=regularizer)
Exemple #12
0
    def from_params(cls, vocab: Vocabulary,
                    params: Params) -> 'SarcasmClassifier':
        embedder_params1 = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(embedder_params1,
                                                            vocab=vocab)
        quote_response_encoder = Seq2VecEncoder.from_params(
            params.pop("quote_response_encoder"))
        classifier_feedforward = FeedForward.from_params(
            params.pop("classifier_feedforward"))
        classifier_feedforward_2 = FeedForward.from_params(
            params.pop("classifier_feedforward_2"))

        initializer = InitializerApplicator.from_params(
            params.pop('initializer', []))
        regularizer = RegularizerApplicator.from_params(
            params.pop('regularizer', []))
        report_auxiliary_metrics = params.pop_bool("report_auxiliary_metrics",
                                                   False)

        predict_mode = params.pop_bool("predict_mode", False)
        # print(f"pred mode: {predict_mode}")

        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   quote_response_encoder=quote_response_encoder,
                   classifier_feedforward=classifier_feedforward,
                   classifier_feedforward_2=classifier_feedforward_2,
                   initializer=initializer,
                   regularizer=regularizer,
                   report_auxiliary_metrics=report_auxiliary_metrics,
                   predict_mode=predict_mode)
Exemple #13
0
    def from_params(cls, params: Params, vocab: Vocabulary) -> 'CMVPredictor':

        response_embedder_params = params.pop("response_embedder")
        response_embedder = TextFieldEmbedder.from_params(
            vocab=vocab, params=response_embedder_params)

        response_word_attention_params = params.pop("response_word_attention")
        response_word_attention = Seq2VecEncoder.from_params(
            params=response_word_attention_params)

        response_encoder_params = params.pop("response_encoder")
        response_encoder = Seq2SeqEncoder.from_params(
            params=response_encoder_params)

        response_sentence_attention_params = params.pop(
            "response_sentence_attention")
        response_sentence_attention = Seq2VecEncoder.from_params(
            params=response_sentence_attention_params)

        op_embedder_params = params.pop("op_embedder", None)
        op_embedder = None
        if op_embedder_params is not None:
            op_embedder = TextFieldEmbedder.from_params(
                vocab=vocab, params=op_embedder_params)

        op_word_attention_params = params.pop("op_word_attention", None)
        op_word_attention = None
        if op_word_attention_params is not None:
            op_word_attention = Seq2VecEncoder.from_params(
                params=op_word_attention_params)

        op_encoder_params = params.pop("op_encoder", None)
        op_encoder = None
        if op_encoder_params is not None:
            op_encoder = Seq2SeqEncoder.from_params(params=op_encoder_params)

        output_feedforward = FeedForward.from_params(
            params=params.pop('output_feedforward'))

        feature_feedforward = None
        feature_feedforward_params = params.pop('feature_feedforward', None)
        if feature_feedforward_params is not None:
            feature_feedforward = FeedForward.from_params(
                params=feature_feedforward_params)

        dropout = params.pop("dropout", 0)

        initializer = InitializerApplicator.from_params(
            params=params.pop('initializer', []))
        regularizer = RegularizerApplicator.from_params(
            params=params.pop('regularizer', []))

        params.assert_empty(cls.__name__)

        return cls(vocab, response_embedder, response_word_attention,
                   response_encoder, response_sentence_attention,
                   output_feedforward, op_embedder, op_word_attention,
                   op_encoder, dropout, initializer, regularizer,
                   feature_feedforward)
Exemple #14
0
    def __init__(self,
                 vocab: Vocabulary,
                 params: Params,
                 regularizer: RegularizerApplicator = None):

        super(LayerCoref, self).__init__(vocab=vocab, regularizer=regularizer)

        # Base text Field Embedder
        text_field_embedder_params = params.pop("text_field_embedder")
        text_field_embedder = BasicTextFieldEmbedder.from_params(
            vocab=vocab, params=text_field_embedder_params)
        self._text_field_embedder = text_field_embedder

        ##############
        # Coref Stuffs
        ##############
        coref_params = params.pop("coref")

        # Encoder
        encoder_coref_params = coref_params.pop("encoder")
        encoder_coref = Seq2SeqEncoder.from_params(encoder_coref_params)
        self._encoder_coref = encoder_coref

        # Tagger: Coreference
        tagger_coref_params = coref_params.pop("tagger")
        eval_on_gold_mentions = tagger_coref_params.pop_bool(
            "eval_on_gold_mentions", False)
        init_params = tagger_coref_params.pop("initializer", None)
        initializer = (InitializerApplicator.from_params(init_params)
                       if init_params is not None else InitializerApplicator())

        tagger_coref = CoreferenceCustom(
            vocab=vocab,
            text_field_embedder=self._text_field_embedder,
            context_layer=self._encoder_coref,
            mention_feedforward=FeedForward.from_params(
                tagger_coref_params.pop("mention_feedforward")),
            antecedent_feedforward=FeedForward.from_params(
                tagger_coref_params.pop("antecedent_feedforward")),
            feature_size=tagger_coref_params.pop_int("feature_size"),
            max_span_width=tagger_coref_params.pop_int("max_span_width"),
            spans_per_word=tagger_coref_params.pop_float("spans_per_word"),
            max_antecedents=tagger_coref_params.pop_int("max_antecedents"),
            lexical_dropout=tagger_coref_params.pop_float(
                "lexical_dropout", 0.2),
            initializer=initializer,
            regularizer=regularizer,
            eval_on_gold_mentions=eval_on_gold_mentions,
        )
        self._tagger_coref = tagger_coref
        if eval_on_gold_mentions:
            self._tagger_coref._eval_on_gold_mentions = True

        logger.info("Multi-Task Learning Model has been instantiated.")
Exemple #15
0
 def from_params(cls, vocab: Vocabulary, params: Params) -> 'MetaphoreClassifier':
     embedder_params = params.pop("model_sentence_field_embedder")
     model_sentence_field_embedder = MetaphoreFieldEmbedder.from_params(embedder_params, vocab=vocab)
     input_encoder_dropout = Dropout(params.pop("input_encoder_dropout"))
     internal_sentence_encoder = Seq2SeqEncoder.from_params(params.pop("internal_sentence_encoder"))
     linear_attention_feedforward = FeedForward.from_params(params.pop("linear_attention_feedforward"))
     input_classifier_dropout = Dropout(params.pop("input_classifier_dropout"))
     linear_classifier_feedforward = FeedForward.from_params(params.pop("linear_classifier_feedforward"))
     return cls(vocab=vocab,
                model_sentence_field_embedder=model_sentence_field_embedder,
                input_encoder_dropout = input_encoder_dropout,
                internal_sentence_encoder=internal_sentence_encoder,
                linear_attention_feedforward = linear_attention_feedforward,
                input_classifier_dropout = input_classifier_dropout,
                linear_classifier_feedforward=linear_classifier_feedforward,)
Exemple #16
0
    def from_params(  # type: ignore
            cls, vocab: Vocabulary,
            params: Params) -> "BiattentiveClassificationNetwork":

        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(
            vocab=vocab, params=embedder_params)
        embedding_dropout = params.pop("embedding_dropout")
        pre_encode_feedforward = FeedForward.from_params(
            params.pop("pre_encode_feedforward"))
        encoder = Seq2SeqEncoder.from_params(params.pop("encoder"))
        integrator = Seq2SeqEncoder.from_params(params.pop("integrator"))
        integrator_dropout = params.pop("integrator_dropout")

        output_layer_params = params.pop("output_layer")
        if "activations" in output_layer_params:
            output_layer = FeedForward.from_params(output_layer_params)
        else:
            output_layer = Maxout.from_params(output_layer_params)

        elmo = params.pop("elmo", None)
        if elmo is not None:
            elmo = Elmo.from_params(elmo)
        use_input_elmo = params.pop_bool("use_input_elmo", False)
        use_integrator_output_elmo = params.pop_bool(
            "use_integrator_output_elmo", False)

        initializer = InitializerApplicator.from_params(
            params.pop("initializer", []))
        regularizer = RegularizerApplicator.from_params(
            params.pop("regularizer", []))
        params.assert_empty(cls.__name__)

        return cls(
            vocab=vocab,
            text_field_embedder=text_field_embedder,
            embedding_dropout=embedding_dropout,
            pre_encode_feedforward=pre_encode_feedforward,
            encoder=encoder,
            integrator=integrator,
            integrator_dropout=integrator_dropout,
            output_layer=output_layer,
            elmo=elmo,
            use_input_elmo=use_input_elmo,
            use_integrator_output_elmo=use_integrator_output_elmo,
            initializer=initializer,
            regularizer=regularizer,
        )
    def from_params(cls, vocab: Vocabulary, params: Params) -> 'SpanConstituencyParser':
        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params)
        span_extractor = SpanExtractor.from_params(params.pop("span_extractor"))
        encoder = Seq2SeqEncoder.from_params(params.pop("encoder"))

        feed_forward_params = params.pop("feedforward", None)
        if feed_forward_params is not None:
            feedforward_layer = FeedForward.from_params(feed_forward_params)
        else:
            feedforward_layer = None
        pos_tag_embedding_params = params.pop("pos_tag_embedding", None)
        if pos_tag_embedding_params is not None:
            pos_tag_embedding = Embedding.from_params(vocab, pos_tag_embedding_params)
        else:
            pos_tag_embedding = None
        initializer = InitializerApplicator.from_params(params.pop('initializer', []))
        regularizer = RegularizerApplicator.from_params(params.pop('regularizer', []))
        evalb_directory_path = params.pop("evalb_directory_path", None)
        params.assert_empty(cls.__name__)

        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   span_extractor=span_extractor,
                   encoder=encoder,
                   feedforward_layer=feedforward_layer,
                   pos_tag_embedding=pos_tag_embedding,
                   initializer=initializer,
                   regularizer=regularizer,
                   evalb_directory_path=evalb_directory_path)
    def from_params(cls, vocab: Vocabulary, params: Params) -> 'EtdRNN':
        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(vocab=vocab, params=embedder_params)
        abstract_text_encoder = Seq2SeqEncoder.from_params(params.pop("abstract_text_encoder"))
        attention_encoder = params.pop("attention_encoder")
        attention_type = attention_encoder.pop('type')
        if attention_type == 'linear_attention':
            attention_encoder = AttentionEncoder.from_params(attention_encoder)
        elif attention_type == 'self_attention':
            attention_encoder = SelfAttentionEncoder.from_params(attention_encoder)
        elif attention_type == 'multi_head':
            attention_encoder = MultiHeadAttentionEncoder.from_params(attention_encoder)
        else:
            attention_encoder = Pooling.from_params(attention_encoder)
        classifier_feedforward = params.pop("classifier_feedforward")
        if classifier_feedforward.pop('type') == 'feedforward':
            classifier_feedforward = FeedForward.from_params(classifier_feedforward)
        else:
            classifier_feedforward = Maxout.from_params(classifier_feedforward)
        use_positional_encoding = params.pop("use_positional_encoding", False)
        bce_pos_weight = params.pop_int("bce_pos_weight", 10)

        initializer = InitializerApplicator.from_params(params.pop('initializer', []))
        regularizer = RegularizerApplicator.from_params(params.pop('regularizer', []))

        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   abstract_text_encoder=abstract_text_encoder,
                   attention_encoder=attention_encoder,
                   classifier_feedforward=classifier_feedforward,
                   bce_pos_weight=bce_pos_weight,
                   use_positional_encoding=use_positional_encoding,
                   initializer=initializer,
                   regularizer=regularizer)
Exemple #19
0
    def from_params(cls, vocab: Vocabulary,
                    params: Params) -> 'EtdTransformer':
        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(
            vocab=vocab, params=embedder_params)
        abstract_text_encoder = Seq2SeqEncoder.from_params(
            params.pop("abstract_text_encoder"))
        attention_encoder = AttentionEncoder.from_params(
            params.pop("attention_encoder"))
        classifier_feedforward = FeedForward.from_params(
            params.pop("classifier_feedforward"))
        use_positional_encoding = params.pop("use_positional_encoding", False)

        initializer = InitializerApplicator.from_params(
            params.pop('initializer', []))
        regularizer = RegularizerApplicator.from_params(
            params.pop('regularizer', []))

        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   abstract_text_encoder=abstract_text_encoder,
                   attention_encoder=attention_encoder,
                   classifier_feedforward=classifier_feedforward,
                   use_positional_encoding=use_positional_encoding,
                   initializer=initializer,
                   regularizer=regularizer)
Exemple #20
0
    def from_params(cls, vocab: Vocabulary,
                    params: Params) -> 'SemiCrfSemanticRoleLabeler':
        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(
            vocab, embedder_params)
        stacked_encoder = Seq2SeqEncoder.from_params(
            params.pop("stacked_encoder"))
        span_feedforward = FeedForward.from_params(
            params.pop("span_feedforward"))
        binary_feature_dim = params.pop("binary_feature_dim")
        max_span_width = params.pop("max_span_width")
        binary_feature_size = params.pop("feature_size")
        distance_feature_size = params.pop("distance_feature_size", 5)
        fast_mode = params.pop("fast_mode", True)
        loss_type = params.pop("loss_type", "logloss")
        label_namespace = params.pop("label_namespace", "labels")
        initializer = InitializerApplicator.from_params(
            params.pop('initializer', []))
        regularizer = RegularizerApplicator.from_params(
            params.pop('regularizer', []))

        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   stacked_encoder=stacked_encoder,
                   binary_feature_dim=binary_feature_dim,
                   span_feedforward=span_feedforward,
                   max_span_width=max_span_width,
                   binary_feature_size=binary_feature_size,
                   distance_feature_size=distance_feature_size,
                   label_namespace=label_namespace,
                   loss_type=loss_type,
                   fast_mode=fast_mode,
                   initializer=initializer,
                   regularizer=regularizer)
Exemple #21
0
    def from_params(cls, vocab: Vocabulary, params: Params) -> 'HBMP':
        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(
            vocab, embedder_params)

        encoder_params = params.pop("encoder")
        rnn1 = Seq2SeqEncoder.from_params(encoder_params.duplicate())
        rnn2 = Seq2SeqEncoder.from_params(encoder_params.duplicate())
        rnn3 = Seq2SeqEncoder.from_params(encoder_params.duplicate())

        aggregated_params = params.pop('aggregate_feedforward', None)
        aggregate_feedforward = FeedForward.from_params(
            aggregated_params) if aggregated_params else None

        initializer = InitializerApplicator.from_params(
            params.pop('initializer', []))
        regularizer = RegularizerApplicator.from_params(
            params.pop('regularizer', []))

        params.assert_empty(cls.__name__)
        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   rnn1=rnn1,
                   rnn2=rnn2,
                   rnn3=rnn3,
                   aggregate_feedforward=aggregate_feedforward,
                   initializer=initializer,
                   regularizer=regularizer)
Exemple #22
0
    def from_params(cls, vocab: Vocabulary, params: Params) -> 'EtdHANLinAtt':
        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(
            vocab=vocab, params=embedder_params)
        word_encoder = Seq2SeqEncoder.from_params(params.pop("word_encoder"))
        sentence_encoder = Seq2SeqEncoder.from_params(
            params.pop("sentence_encoder"))
        classifier_feedforward = params.pop("classifier_feedforward")
        if classifier_feedforward.pop('type') == 'feedforward':
            classifier_feedforward = FeedForward.from_params(
                classifier_feedforward)
        else:
            classifier_feedforward = Maxout.from_params(classifier_feedforward)
        use_positional_encoding = params.pop("use_positional_encoding", False)
        bce_pos_weight = params.pop_int("bce_pos_weight", 10)
        attended_text_dropout = params.pop_float("attended_text_dropout", 0.0)

        initializer = InitializerApplicator.from_params(
            params.pop('initializer', []))
        regularizer = RegularizerApplicator.from_params(
            params.pop('regularizer', []))

        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   word_encoder=word_encoder,
                   sentence_encoder=sentence_encoder,
                   classifier_feedforward=classifier_feedforward,
                   attended_text_dropout=attended_text_dropout,
                   bce_pos_weight=bce_pos_weight,
                   use_positional_encoding=use_positional_encoding,
                   initializer=initializer,
                   regularizer=regularizer)
Exemple #23
0
    def from_params(cls, params: Params) -> 'MemoryAttention':
        attention = IntraAttention.from_params(params.pop('attention'))
        memory_feedforward = FeedForward.from_params(
            params.pop('memory_feedforward'))
        n_hops = params.pop('n_hops', 3)

        return cls(attention, memory_feedforward, n_hops)
 def from_params(cls, vocab, params: Params) -> 'WikiTablesSemanticParser':
     question_embedder = TextFieldEmbedder.from_params(vocab, params.pop("question_embedder"))
     action_embedding_dim = params.pop_int("action_embedding_dim")
     encoder = Seq2SeqEncoder.from_params(params.pop("encoder"))
     entity_encoder = Seq2VecEncoder.from_params(params.pop('entity_encoder'))
     max_decoding_steps = params.pop_int("max_decoding_steps")
     mixture_feedforward_type = params.pop('mixture_feedforward', None)
     if mixture_feedforward_type is not None:
         mixture_feedforward = FeedForward.from_params(mixture_feedforward_type)
     else:
         mixture_feedforward = None
     decoder_beam_search = BeamSearch.from_params(params.pop("decoder_beam_search"))
     # If no attention function is specified, we should not use attention, not attention with
     # default similarity function.
     attention_function_type = params.pop("attention_function", None)
     if attention_function_type is not None:
         attention_function = SimilarityFunction.from_params(attention_function_type)
     else:
         attention_function = None
     dropout = params.pop_float('dropout', 0.0)
     num_linking_features = params.pop_int('num_linking_features', 8)
     rule_namespace = params.pop('rule_namespace', 'rule_labels')
     params.assert_empty(cls.__name__)
     return cls(vocab,
                question_embedder=question_embedder,
                action_embedding_dim=action_embedding_dim,
                encoder=encoder,
                entity_encoder=entity_encoder,
                mixture_feedforward=mixture_feedforward,
                decoder_beam_search=decoder_beam_search,
                max_decoding_steps=max_decoding_steps,
                attention_function=attention_function,
                dropout=dropout,
                num_linking_features=num_linking_features,
                rule_namespace=rule_namespace)
Exemple #25
0
 def from_params(cls, vocab, params: Params) -> 'WikiTablesMmlSemanticParser':
     question_embedder = TextFieldEmbedder.from_params(vocab, params.pop("question_embedder"))
     action_embedding_dim = params.pop_int("action_embedding_dim")
     encoder = Seq2SeqEncoder.from_params(params.pop("encoder"))
     entity_encoder = Seq2VecEncoder.from_params(params.pop('entity_encoder'))
     max_decoding_steps = params.pop_int("max_decoding_steps")
     mixture_feedforward_type = params.pop('mixture_feedforward', None)
     if mixture_feedforward_type is not None:
         mixture_feedforward = FeedForward.from_params(mixture_feedforward_type)
     else:
         mixture_feedforward = None
     decoder_beam_search = BeamSearch.from_params(params.pop("decoder_beam_search"))
     input_attention = Attention.from_params(params.pop("attention"))
     training_beam_size = params.pop_int('training_beam_size', None)
     use_neighbor_similarity_for_linking = params.pop_bool('use_neighbor_similarity_for_linking', False)
     dropout = params.pop_float('dropout', 0.0)
     num_linking_features = params.pop_int('num_linking_features', 10)
     tables_directory = params.pop('tables_directory', '/wikitables/')
     rule_namespace = params.pop('rule_namespace', 'rule_labels')
     params.assert_empty(cls.__name__)
     return cls(vocab,
                question_embedder=question_embedder,
                action_embedding_dim=action_embedding_dim,
                encoder=encoder,
                entity_encoder=entity_encoder,
                mixture_feedforward=mixture_feedforward,
                decoder_beam_search=decoder_beam_search,
                max_decoding_steps=max_decoding_steps,
                input_attention=input_attention,
                training_beam_size=training_beam_size,
                use_neighbor_similarity_for_linking=use_neighbor_similarity_for_linking,
                dropout=dropout,
                num_linking_features=num_linking_features,
                tables_directory=tables_directory,
                rule_namespace=rule_namespace)
    def from_params(
            cls, vocab: Vocabulary, params: Params
    ) -> 'DialogueContextHierarchicalCoherenceClassifier':
        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(
            vocab, embedder_params)
        utterance_encoder = Seq2VecEncoder.from_params(
            params.pop("utterance_encoder"))
        context_encoder = Seq2VecEncoder.from_params(
            params.pop("context_encoder"))
        response_encoder = Seq2VecEncoder.from_params(
            params.pop("response_encoder"))
        classifier_feedforward = FeedForward.from_params(
            params.pop("classifier_feedforward"))

        initializer = InitializerApplicator.from_params(
            params.pop("initializer", []))
        regularizer = RegularizerApplicator.from_params(
            params.pop("regularizer", []))

        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   utterance_encoder=utterance_encoder,
                   context_encoder=context_encoder,
                   response_encoder=response_encoder,
                   classifier_feedforward=classifier_feedforward,
                   initializer=initializer,
                   regularizer=regularizer)
    def from_params(cls, vocab: Vocabulary, params: Params) -> 'SelectiveRegressor':
        token_representation_dim = params.pop_int("token_representation_dim")

        encoder = params.pop("encoder", None)
        if encoder is not None:
            encoder = Seq2SeqEncoder.from_params(encoder)
        decoder = params.pop("decoder", None)
        if decoder is not None and not isinstance(decoder, str):
            decoder = FeedForward.from_params(decoder)
        contextualizer = params.pop('contextualizer', None)
        if contextualizer:
            contextualizer = Contextualizer.from_params(contextualizer)

        pretrained_file = params.pop("pretrained_file", None)
        transfer_contextualizer_from_pretrained_file = params.pop_bool(
            "transfer_contextualizer_from_pretrained_file", False)
        transfer_encoder_from_pretrained_file = params.pop_bool(
            "transfer_encoder_from_pretrained_file", False)
        freeze_encoder = params.pop_bool("freeze_encoder", False)

        initializer = InitializerApplicator.from_params(params.pop('initializer', []))
        regularizer = RegularizerApplicator.from_params(params.pop('regularizer', []))
        params.assert_empty(cls.__name__)
        return cls(vocab=vocab,
                   token_representation_dim=token_representation_dim,
                   encoder=encoder,
                   decoder=decoder,
                   contextualizer=contextualizer,
                   pretrained_file=pretrained_file,
                   transfer_contextualizer_from_pretrained_file=transfer_contextualizer_from_pretrained_file,
                   transfer_encoder_from_pretrained_file=transfer_encoder_from_pretrained_file,
                   freeze_encoder=freeze_encoder,
                   initializer=initializer,
                   regularizer=regularizer)
Exemple #28
0
    def from_params(cls, vocab: Vocabulary,
                    params: Params) -> 'AnswerSynthesis':
        embedder_params = params.pop("text_field_embedder")
        embedder = TextFieldEmbedder.from_params(vocab, embedder_params)
        question_encoder = Seq2SeqEncoder.from_params(
            params.pop("question_encoder"))
        passage_encoder = Seq2SeqEncoder.from_params(
            params.pop("passage_encoder"))
        feed_forward = FeedForward.from_params(params.pop("feed_forward"))
        dropout = params.pop_float('dropout', 0.1)
        num_decoding_steps = params.pop_int("num_decoding_steps", 40)

        initializer = InitializerApplicator.from_params(
            params.pop('initializer', []))
        regularizer = RegularizerApplicator.from_params(
            params.pop('regularizer', []))

        return cls(vocab=vocab,
                   embedder=embedder,
                   question_encoder=question_encoder,
                   passage_encoder=passage_encoder,
                   feed_forward=feed_forward,
                   dropout=dropout,
                   num_decoding_steps=num_decoding_steps,
                   initializer=initializer,
                   regularizer=regularizer)
Exemple #29
0
    def test_textual_representation_contains_activations(self):
        params = Params({
            "input_dim": 2,
            "hidden_dims": 3,
            "activations": ["linear", "relu", "swish"],
            "num_layers": 3,
        })
        feedforward = FeedForward.from_params(params)
        expected_text_representation = inspect.cleandoc("""
            FeedForward(
              (_activations): ModuleList(
                (0): Linear()
                (1): ReLU()
                (2): Swish()
              )
              (_linear_layers): ModuleList(
                (0): Linear(in_features=2, out_features=3, bias=True)
                (1): Linear(in_features=3, out_features=3, bias=True)
                (2): Linear(in_features=3, out_features=3, bias=True)
              )
              (_dropout): ModuleList(
                (0): Dropout(p=0.0, inplace=False)
                (1): Dropout(p=0.0, inplace=False)
                (2): Dropout(p=0.0, inplace=False)
              )
            )
            """)
        actual_text_representation = str(feedforward)

        assert actual_text_representation == expected_text_representation
    def from_params(cls, vocab: Vocabulary,
                    params: Params) -> 'SpanConstituencyParser':
        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(
            vocab, embedder_params)
        span_extractor = SpanExtractor.from_params(
            params.pop("span_extractor"))
        encoder = Seq2SeqEncoder.from_params(params.pop("encoder"))
        feed_forward_params = params.pop("feedforward", None)
        if feed_forward_params is not None:
            feedforward_layer = FeedForward.from_params(feed_forward_params)
        else:
            feedforward_layer = None
        initializer = InitializerApplicator.from_params(
            params.pop('initializer', []))
        regularizer = RegularizerApplicator.from_params(
            params.pop('regularizer', []))
        evalb_directory_path = params.pop("evalb_directory_path", None)
        params.assert_empty(cls.__name__)

        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   span_extractor=span_extractor,
                   encoder=encoder,
                   feedforward_layer=feedforward_layer,
                   initializer=initializer,
                   regularizer=regularizer,
                   evalb_directory_path=evalb_directory_path)
Exemple #31
0
    def from_params(cls, vocab: Vocabulary, params: Params) -> 'DeIsTe':
        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(
            vocab, embedder_params)

        inter_attention = MatrixAttention.from_params(
            params.pop("inter_attention"))
        param_dyn_encoder = Seq2VecEncoder.from_params(
            params.pop("param_dyn_encoder"))

        pos_embedder = TokenEmbedder.from_params(
            vocab=None, params=params.pop("pos_embedder"))
        pos_attn_encoder = Seq2VecEncoder.from_params(
            params.pop("pos_attn_encoder"))

        output_feedforward_params = params.pop('output_feedforward', None)
        output_feedforward = FeedForward.from_params(
            output_feedforward_params) if output_feedforward_params else None

        initializer = InitializerApplicator.from_params(
            params.pop('initializer', []))
        regularizer = RegularizerApplicator.from_params(
            params.pop('regularizer', []))

        params.assert_empty(cls.__name__)
        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   inter_attention=inter_attention,
                   param_dyn_encoder=param_dyn_encoder,
                   pos_embedder=pos_embedder,
                   pos_attn_encoder=pos_attn_encoder,
                   output_feedforward=output_feedforward,
                   initializer=initializer,
                   regularizer=regularizer)
Exemple #32
0
 def from_params(cls, params: Params):
     feedforward = FeedForward.from_params(params.pop('feedforward'))
     output_dim = params.pop_int('output_dim')
     layer_norm = params.pop('layer_norm', True)
     params.assert_empty(cls.__name__)
     return cls(feedforward=feedforward,
                output_dim=output_dim,
                layer_norm=layer_norm)
    def from_params(cls, vocab: Vocabulary, params: Params) -> 'BiattentiveClassificationNetwork':  # type: ignore
        # pylint: disable=arguments-differ
        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(vocab=vocab, params=embedder_params)
        embedding_dropout = params.pop("embedding_dropout")
        pre_encode_feedforward = FeedForward.from_params(params.pop("pre_encode_feedforward"))
        encoder = Seq2SeqEncoder.from_params(params.pop("encoder"))
        integrator = Seq2SeqEncoder.from_params(params.pop("integrator"))
        integrator_dropout = params.pop("integrator_dropout")

        output_layer_params = params.pop("output_layer")
        if "activations" in output_layer_params:
            output_layer = FeedForward.from_params(output_layer_params)
        else:
            output_layer = Maxout.from_params(output_layer_params)

        elmo = params.pop("elmo", None)
        if elmo is not None:
            elmo = Elmo.from_params(elmo)
        use_input_elmo = params.pop_bool("use_input_elmo", False)
        use_integrator_output_elmo = params.pop_bool("use_integrator_output_elmo", False)

        initializer = InitializerApplicator.from_params(params.pop('initializer', []))
        regularizer = RegularizerApplicator.from_params(params.pop('regularizer', []))
        params.assert_empty(cls.__name__)

        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   embedding_dropout=embedding_dropout,
                   pre_encode_feedforward=pre_encode_feedforward,
                   encoder=encoder,
                   integrator=integrator,
                   integrator_dropout=integrator_dropout,
                   output_layer=output_layer,
                   elmo=elmo,
                   use_input_elmo=use_input_elmo,
                   use_integrator_output_elmo=use_integrator_output_elmo,
                   initializer=initializer,
                   regularizer=regularizer)
Exemple #34
0
    def test_forward_gives_correct_output(self):
        params = Params({
                'input_dim': 2,
                'hidden_dims': 3,
                'activations': 'relu',
                'num_layers': 2
                })
        feedforward = FeedForward.from_params(params)

        constant_init = lambda tensor: torch.nn.init.constant_(tensor, 1.)
        initializer = InitializerApplicator([(".*", constant_init)])
        initializer(feedforward)

        input_tensor = torch.FloatTensor([[-3, 1]])
        output = feedforward(input_tensor).data.numpy()
        assert output.shape == (1, 3)
        # This output was checked by hand - ReLU makes output after first hidden layer [0, 0, 0],
        # which then gets a bias added in the second layer to be [1, 1, 1].
        assert_almost_equal(output, [[1, 1, 1]])