예제 #1
0
    def from_params(cls, vocab: Vocabulary, params: Params) -> 'DecomposableAttention':
        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params)

        premise_encoder_params = params.pop("premise_encoder", None)
        if premise_encoder_params is not None:
            premise_encoder = Seq2SeqEncoder.from_params(premise_encoder_params)
        else:
            premise_encoder = None

        hypothesis_encoder_params = params.pop("hypothesis_encoder", None)
        if hypothesis_encoder_params is not None:
            hypothesis_encoder = Seq2SeqEncoder.from_params(hypothesis_encoder_params)
        else:
            hypothesis_encoder = None

        attend_feedforward = FeedForward.from_params(params.pop('attend_feedforward'))
        similarity_function = SimilarityFunction.from_params(params.pop("similarity_function"))
        compare_feedforward = FeedForward.from_params(params.pop('compare_feedforward'))
        aggregate_feedforward = FeedForward.from_params(params.pop('aggregate_feedforward'))
        initializer = InitializerApplicator.from_params(params.pop("initializer", []))

        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   attend_feedforward=attend_feedforward,
                   similarity_function=similarity_function,
                   compare_feedforward=compare_feedforward,
                   aggregate_feedforward=aggregate_feedforward,
                   initializer=initializer,
                   premise_encoder=premise_encoder,
                   hypothesis_encoder=hypothesis_encoder)
예제 #2
0
    def from_params(cls, vocab: Vocabulary, params: Params) -> 'SemanticRoleLabeler':
        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params)
        stacked_encoder = Seq2SeqEncoder.from_params(params.pop("stacked_encoder"))
        binary_feature_dim = params.pop("binary_feature_dim")
        initializer = InitializerApplicator.from_params(params.pop("initializer", []))

        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   stacked_encoder=stacked_encoder,
                   binary_feature_dim=binary_feature_dim,
                   initializer=initializer)
예제 #3
0
    def from_params(cls, vocab: Vocabulary,
                    params: Params) -> 'SemanticRoleLabeler':
        """
        With an empty ``params`` argument, this will instantiate a SRL model with the same
        configuration as published in the "Deep Semantic Role Labeling - What works and what's
        next" paper, as long as you've set ``allennlp.common.constants.GLOVE_PATH`` to the
        location of your gzipped 100-dimensional glove vectors.

        If you want to change parameters, the keys in the ``params`` object must match the
        constructor arguments above.
        """
        default_embedder_params = {
            'tokens': {
                'type': 'embedding',
                'pretrained_file': GLOVE_PATH,
                'trainable': True
            }
        }

        embedder_params = params.pop("text_field_embedder",
                                     default_embedder_params)
        text_field_embedder = TextFieldEmbedder.from_params(
            vocab, embedder_params)

        default_lstm_params = {
            'type': 'alternating_lstm',
            'input_size': 101,  # Because of the verb_indicator feature.
            'hidden_size': 300,
            'num_layers': 8,
            'recurrent_dropout_probability': 0.1,
            'use_highway': True
        }
        encoder_params = params.pop("stacked_encoder", default_lstm_params)
        stacked_encoder = Seq2SeqEncoder.from_params(encoder_params)

        default_initializer_params = {
            'bias': {
                'type': 'normal',
                'std': 0.1
            },
            'default': 'orthogonal'
        }

        initializer_params = params.pop('initializer',
                                        default_initializer_params)
        initializer = InitializerApplicator.from_params(initializer_params)

        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   stacked_encoder=stacked_encoder,
                   initializer=initializer)
    def from_params(cls, vocab: Vocabulary, params: Params) -> 'SentenceClassifier':
        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params)

        question_encoder = Seq2VecEncoder.from_params(params.pop("question_encoder"))

        initializer = InitializerApplicator.from_params(params.pop('initializer', []))
        regularizer = RegularizerApplicator.from_params(params.pop('regularizer', []))

        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   question_encoder=question_encoder,
                   initializer=initializer,
                   regularizer=regularizer)
예제 #5
0
    def from_params(cls, vocab: Vocabulary, params: Params) -> 'ToxicModel':
        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params)
        encoder = Seq2VecEncoder.from_params(params.pop("encoder"))
        classifier_feedforward = FeedForward.from_params(params.pop("classifier_feedforward"))

        initializer = InitializerApplicator.from_params(params.pop('initializer', []))
        regularizer = RegularizerApplicator.from_params(params.pop('regularizer', []))

        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   encoder=encoder,
                   classifier_feedforward=classifier_feedforward,
                   initializer=initializer,
                   regularizer=regularizer)
예제 #6
0
    def from_params(cls, vocab: Vocabulary, params: Params) -> 'DecAccSRL':
        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params)

        premise_encoder_params = params.pop("premise_encoder", None)
        if premise_encoder_params is not None:
            premise_encoder = Seq2SeqEncoder.from_params(premise_encoder_params)
        else:
            premise_encoder = None

        hypothesis_encoder_params = params.pop("hypothesis_encoder", None)
        if hypothesis_encoder_params is not None:
            hypothesis_encoder = Seq2SeqEncoder.from_params(hypothesis_encoder_params)
        else:
            hypothesis_encoder = None

        srl_model_archive = params.pop('srl_model_archive', None)
        if srl_model_archive is not None:
            logger.info("Loaded pretrained SRL model from {}".format(srl_model_archive))
            archive = load_archive(srl_model_archive)
            srl_model = archive.model
        else:
            srl_model = None

        attend_feedforward = FeedForward.from_params(params.pop('attend_feedforward'))
        similarity_function = SimilarityFunction.from_params(params.pop("similarity_function"))
        compare_feedforward = FeedForward.from_params(params.pop('compare_feedforward'))
        aggregate_feedforward = FeedForward.from_params(params.pop('aggregate_feedforward'))
        initializer = InitializerApplicator.from_params(params.pop("initializer", []))

        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   attend_feedforward=attend_feedforward,
                   similarity_function=similarity_function,
                   compare_feedforward=compare_feedforward,
                   aggregate_feedforward=aggregate_feedforward,
                   initializer=initializer,
                   srl_model=srl_model,
                   premise_encoder=premise_encoder,
                   hypothesis_encoder=hypothesis_encoder)
예제 #7
0
파일: sent_rep.py 프로젝트: luheng/allennlp
    def from_params(cls, vocab: Vocabulary,
                    params: Params) -> 'SentenceRepresentationModel':
        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(
            vocab, embedder_params)

        sentence_encoder_params = params.pop("sentence_encoder", None)
        if sentence_encoder_params is not None:
            sentence_encoder = Seq2SeqEncoder.from_params(
                sentence_encoder_params)
        else:
            sentence_encoder = None

        #hypothesis_encoder_params = params.pop("hypothesis_encoder", None)
        #if hypothesis_encoder_params is not None:
        #    hypothesis_encoder = Seq2SeqEncoder.from_params(hypothesis_encoder_params)
        #else:
        #    hypothesis_encoder = None

        #srl_model_archive = params.pop('srl_model_archive', None)
        #if srl_model_archive is not None:
        #    logger.info("Loaded pretrained SRL model from {}".format(srl_model_archive))
        #    archive = load_archive(srl_model_archive)
        #    srl_model = archive.model
        #else:
        srl_model = None

        aggregate_feedforward = FeedForward.from_params(
            params.pop('aggregate_feedforward'))
        initializer = InitializerApplicator.from_params(
            params.pop("initializer", []))

        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   aggregate_feedforward=aggregate_feedforward,
                   initializer=initializer,
                   srl_model=srl_model,
                   sentence_encoder=sentence_encoder)