def from_params(cls, params: Params) -> 'Seq2Seq2VecEncoder': seq2seq_encoder_params = params.pop("seq2seq_encoder") seq2vec_encoder_params = params.pop("seq2vec_encoder") seq2seq_encoder = Seq2SeqEncoder.from_params(seq2seq_encoder_params) seq2vec_encoder = Seq2VecEncoder.from_params(seq2vec_encoder_params) return cls(seq2seq_encoder=seq2seq_encoder, seq2vec_encoder=seq2vec_encoder)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'TokenCharactersEncoder': embedding_params: Params = params.pop("embedding") # Embedding.from_params() uses "tokens" as the default namespace, but we need to change # that to be "tokens" by default. embedding_params.setdefault("vocab_namespace", "token_bpe") embedding = Embedding.from_params(vocab, embedding_params) encoder_params: Params = params.pop("encoder") encoder = Seq2VecEncoder.from_params(encoder_params) dropout = params.pop("dropout", 0.0) params.assert_empty(cls.__name__) return cls(embedding, encoder, dropout)
def from_params(cls, vocab, params): # type: ignore # pylint: disable=arguments-differ embedding_params = params.pop(u"embedding") # Embedding.from_params() uses "tokens" as the default namespace, but we need to change # that to be "token_characters" by default. embedding_params.setdefault(u"vocab_namespace", u"token_characters") embedding = Embedding.from_params(vocab, embedding_params) encoder_params = params.pop(u"encoder") encoder = Seq2VecEncoder.from_params(encoder_params) dropout = params.pop_float(u"dropout", 0.0) params.assert_empty(cls.__name__) return cls(embedding, encoder, dropout)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'TokenCharactersEncoder': # type: ignore # pylint: disable=arguments-differ embedding_params: Params = params.pop("embedding") # Embedding.from_params() uses "tokens" as the default namespace, but we need to change # that to be "token_characters" by default. embedding_params.setdefault("vocab_namespace", "token_characters") embedding = Embedding.from_params(vocab, embedding_params) encoder_params: Params = params.pop("encoder") encoder = Seq2VecEncoder.from_params(encoder_params) dropout = params.pop_float("dropout", 0.0) params.assert_empty(cls.__name__) return cls(embedding, encoder, dropout)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'GlyphEmbeddingWrapper': # glyph_config glyph_config = GlyphEmbeddingConfig() glyph_config.output_size = params.pop_int("output_size", 300) glyph_config.use_highway = True glyph_config.dropout = params.pop_float("dropout", 0.0) glyph_config.font_channels = params.pop_int("font_channels", 8) glyph_config.glyph_embsize = params.pop_int("glyph_embsize", 256) glyph_config.use_batch_norm = params.pop_bool("use_batch_norm", False) # encoder_config encoder_params: Params = params.pop("encoder") encoder = Seq2VecEncoder.from_params(encoder_params) params.assert_empty(cls.__name__) return cls(vocab, glyph_config, encoder)
def from_params( # type: ignore cls, vocab: Vocabulary, params: Params) -> "TokenCharactersEncoder": embedding_params: Params = params.pop("embedding") # Embedding.from_params() uses "tokens" as the default namespace, but we need to change # that to be "token_characters" by default. If num_embeddings is present, set default namespace # to None so that extend_vocab call doesn't misinterpret that some namespace was originally used. default_namespace = (None if embedding_params.get( "num_embeddings", None) else "token_characters") embedding_params.setdefault("vocab_namespace", default_namespace) embedding = Embedding.from_params(vocab, embedding_params) encoder_params: Params = params.pop("encoder") encoder = Seq2VecEncoder.from_params(encoder_params) dropout = params.pop_float("dropout", 0.0) params.assert_empty(cls.__name__) return cls(embedding, encoder, dropout)