def from_params(cls, vocab: Vocabulary, params: Params) -> 'EtdHANLinAtt': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params( vocab=vocab, params=embedder_params) word_encoder = Seq2SeqEncoder.from_params(params.pop("word_encoder")) sentence_encoder = Seq2SeqEncoder.from_params( params.pop("sentence_encoder")) classifier_feedforward = params.pop("classifier_feedforward") if classifier_feedforward.pop('type') == 'feedforward': classifier_feedforward = FeedForward.from_params( classifier_feedforward) else: classifier_feedforward = Maxout.from_params(classifier_feedforward) use_positional_encoding = params.pop("use_positional_encoding", False) bce_pos_weight = params.pop_int("bce_pos_weight", 10) attended_text_dropout = params.pop_float("attended_text_dropout", 0.0) initializer = InitializerApplicator.from_params( params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params( params.pop('regularizer', [])) return cls(vocab=vocab, text_field_embedder=text_field_embedder, word_encoder=word_encoder, sentence_encoder=sentence_encoder, classifier_feedforward=classifier_feedforward, attended_text_dropout=attended_text_dropout, bce_pos_weight=bce_pos_weight, use_positional_encoding=use_positional_encoding, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'EtdDebugModel': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params( vocab=vocab, params=embedder_params) abstract_text_encoder = Seq2SeqEncoder.from_params( params.pop("abstract_text_encoder")) attention_encoder = AttentionEncoder.from_params( params.pop("attention_encoder")) classifier_feedforward = params.pop("classifier_feedforward") if classifier_feedforward.pop('type') == 'feedforward': classifier_feedforward = FeedForward.from_params( classifier_feedforward) else: classifier_feedforward = Maxout.from_params(classifier_feedforward) use_positional_encoding = params.pop("use_positional_encoding", False) initializer = InitializerApplicator.from_params( params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params( params.pop('regularizer', [])) return cls(vocab=vocab, text_field_embedder=text_field_embedder, abstract_text_encoder=abstract_text_encoder, attention_encoder=attention_encoder, classifier_feedforward=classifier_feedforward, use_positional_encoding=use_positional_encoding, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'EtdRNN': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params(vocab=vocab, params=embedder_params) abstract_text_encoder = Seq2SeqEncoder.from_params(params.pop("abstract_text_encoder")) attention_encoder = params.pop("attention_encoder") attention_type = attention_encoder.pop('type') if attention_type == 'linear_attention': attention_encoder = AttentionEncoder.from_params(attention_encoder) elif attention_type == 'self_attention': attention_encoder = SelfAttentionEncoder.from_params(attention_encoder) elif attention_type == 'multi_head': attention_encoder = MultiHeadAttentionEncoder.from_params(attention_encoder) else: attention_encoder = Pooling.from_params(attention_encoder) classifier_feedforward = params.pop("classifier_feedforward") if classifier_feedforward.pop('type') == 'feedforward': classifier_feedforward = FeedForward.from_params(classifier_feedforward) else: classifier_feedforward = Maxout.from_params(classifier_feedforward) use_positional_encoding = params.pop("use_positional_encoding", False) bce_pos_weight = params.pop_int("bce_pos_weight", 10) initializer = InitializerApplicator.from_params(params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params(params.pop('regularizer', [])) return cls(vocab=vocab, text_field_embedder=text_field_embedder, abstract_text_encoder=abstract_text_encoder, attention_encoder=attention_encoder, classifier_feedforward=classifier_feedforward, bce_pos_weight=bce_pos_weight, use_positional_encoding=use_positional_encoding, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'EtdBCN': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params( vocab=vocab, params=embedder_params) title_text_encoder = Seq2SeqEncoder.from_params( params.pop("title_text_encoder")) abstract_text_encoder = Seq2SeqEncoder.from_params( params.pop("abstract_text_encoder")) title_text_projection = FeedForward.from_params( params.pop("title_text_projection")) abstract_text_projection = FeedForward.from_params( params.pop("abstract_text_projection")) bi_attention_encoder = BiAttentionEncoder.from_params( params.pop("attention_encoder")) classifier_feedforward = params.pop("classifier_feedforward") if classifier_feedforward.pop('type') == 'feedforward': classifier_feedforward = FeedForward.from_params( classifier_feedforward) else: classifier_feedforward = Maxout.from_params(classifier_feedforward) use_positional_encoding = params.pop("use_positional_encoding", False) bce_pos_weight = params.pop_int("bce_pos_weight", 10) initializer = InitializerApplicator.from_params( params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params( params.pop('regularizer', [])) return cls(vocab=vocab, text_field_embedder=text_field_embedder, title_text_encoder=title_text_encoder, abstract_text_encoder=abstract_text_encoder, title_text_projection=title_text_projection, abstract_text_projection=abstract_text_projection, bi_attention_encoder=bi_attention_encoder, classifier_feedforward=classifier_feedforward, bce_pos_weight=bce_pos_weight, use_positional_encoding=use_positional_encoding, initializer=initializer, regularizer=regularizer)