def test_multi_model_load_missing_dims(): model = chain(Maxout(5, 10, nP=2), Maxout(2, 3)).initialize() b = model.layers[0].get_param("b") b += 1 b = model.layers[1].get_param("b") b += 2 data = model.to_bytes() model2 = chain(Maxout(5, nP=None), Maxout(nP=None)) model2 = model2.from_bytes(data) assert model2.layers[0].get_param("b")[0, 0] == 1 assert model2.layers[1].get_param("b")[0, 0] == 2
def test_multi_model_roundtrip_bytes(): model = chain(Maxout(5, 10, nP=2), Maxout(2, 3)).initialize() b = model.layers[0].get_param("b") b += 1 b = model.layers[1].get_param("b") b += 2 data = model.to_bytes() b = model.layers[0].get_param("b") b -= 1 b = model.layers[1].get_param("b") b -= 2 model = model.from_bytes(data) assert model.layers[0].get_param("b")[0, 0] == 1 assert model.layers[1].get_param("b")[0, 0] == 2
def test_serialize_model_shims_roundtrip_bytes(): fwd = lambda model, X, is_train: (X, lambda dY: dY) test_shim = SerializableShim(None) shim_model = Model("shimmodel", fwd, shims=[test_shim]) model = chain(Linear(2, 3), shim_model, Maxout(2, 3)) model.initialize() assert model.layers[1].shims[0].value == "shimdata" model_bytes = model.to_bytes() with pytest.raises(ValueError): Linear(2, 3).from_bytes(model_bytes) test_shim = SerializableShim(None) shim_model = Model("shimmodel", fwd, shims=[test_shim]) new_model = chain(Linear(2, 3), shim_model, Maxout(2, 3)).from_bytes(model_bytes) assert new_model.layers[1].shims[0].value == "shimdata from bytes"
def build_text_classifier_v2( tok2vec: Model[List[Doc], List[Floats2d]], linear_model: Model[List[Doc], Floats2d], nO: Optional[int] = None, ) -> Model[List[Doc], Floats2d]: exclusive_classes = not linear_model.attrs["multi_label"] with Model.define_operators({">>": chain, "|": concatenate}): width = tok2vec.maybe_get_dim("nO") attention_layer = ParametricAttention( width) # TODO: benchmark performance difference of this layer maxout_layer = Maxout(nO=width, nI=width) norm_layer = LayerNorm(nI=width) cnn_model = ( tok2vec >> list2ragged() >> attention_layer >> reduce_sum() >> residual(maxout_layer >> norm_layer >> Dropout(0.0))) nO_double = nO * 2 if nO else None if exclusive_classes: output_layer = Softmax(nO=nO, nI=nO_double) else: output_layer = Linear(nO=nO, nI=nO_double) >> Logistic() model = (linear_model | cnn_model) >> output_layer model.set_ref("tok2vec", tok2vec) if model.has_dim("nO") is not False: model.set_dim("nO", nO) model.set_ref("output_layer", linear_model.get_ref("output_layer")) model.set_ref("attention_layer", attention_layer) model.set_ref("maxout_layer", maxout_layer) model.set_ref("norm_layer", norm_layer) model.attrs["multi_label"] = not exclusive_classes model.init = init_ensemble_textcat return model
def MaxoutWindowEncoder_v1(width: int, window_size: int, maxout_pieces: int, depth: int) -> Model[Floats2d, Floats2d]: """Encode context using convolutions with maxout activation, layer normalization and residual connections. width (int): The input and output width. These are required to be the same, to allow residual connections. This value will be determined by the width of the inputs. Recommended values are between 64 and 300. window_size (int): The number of words to concatenate around each token to construct the convolution. Recommended value is 1. maxout_pieces (int): The number of maxout pieces to use. Recommended values are 2 or 3. depth (int): The number of convolutional layers. Recommended value is 4. """ cnn = chain( expand_window(window_size=window_size), Maxout( nO=width, nI=width * ((window_size * 2) + 1), nP=maxout_pieces, dropout=0.0, normalize=True, ), ) model = clone(residual(cnn), depth) model.set_dim("nO", width) model.attrs["receptive_field"] = window_size * depth return model
def build_mean_max_reducer(hidden_size: int) -> Model[Ragged, Floats2d]: """Reduce sequences by concatenating their mean and max pooled vectors, and then combine the concatenated vectors with a hidden layer. """ return chain( concatenate(reduce_last(), reduce_first(), reduce_mean(), reduce_max()), Maxout(nO=hidden_size, normalize=True, dropout=0.0), )
def build_nel_encoder(tok2vec: Model, nO: Optional[int] = None) -> Model: with Model.define_operators({">>": chain, "**": clone}): token_width = tok2vec.get_dim("nO") output_layer = Linear(nO=nO, nI=token_width) model = (tok2vec >> list2ragged() >> reduce_mean() >> residual( Maxout(nO=token_width, nI=token_width, nP=2, dropout=0.0)) >> output_layer) model.set_ref("output_layer", output_layer) model.set_ref("tok2vec", tok2vec) return model
def test_simple_model_roundtrip_bytes(): model = Maxout(5, 10, nP=2).initialize() b = model.get_param("b") b += 1 data = model.to_bytes() b = model.get_param("b") b -= 1 model = model.from_bytes(data) assert model.get_param("b")[0, 0] == 1
def build_cloze_characters_multi_task_model( vocab: "Vocab", tok2vec: Model, maxout_pieces: int, hidden_size: int, nr_char: int ) -> Model: output_layer = chain( list2array(), Maxout(nO=hidden_size, nP=maxout_pieces), LayerNorm(nI=hidden_size), MultiSoftmax([256] * nr_char, nI=hidden_size), ) model = build_masked_language_model(vocab, chain(tok2vec, output_layer)) model.set_ref("tok2vec", tok2vec) model.set_ref("output_layer", output_layer) return model
def build_nel_encoder(tok2vec: Model, nO: Optional[int] = None) -> Model[List[Doc], Floats2d]: with Model.define_operators({">>": chain, "&": tuplify}): token_width = tok2vec.maybe_get_dim("nO") output_layer = Linear(nO=nO, nI=token_width) model = (((tok2vec >> list2ragged()) & build_span_maker()) >> extract_spans() >> reduce_mean() >> residual( Maxout(nO=token_width, nI=token_width, nP=2, dropout=0.0)) >> output_layer) model.set_ref("output_layer", output_layer) model.set_ref("tok2vec", tok2vec) # flag to show this isn't legacy model.attrs["include_span_maker"] = True return model
def build_cloze_multi_task_model( vocab: "Vocab", tok2vec: Model, maxout_pieces: int, hidden_size: int ) -> Model: nO = vocab.vectors.data.shape[1] output_layer = chain( list2array(), Maxout( nO=hidden_size, nI=tok2vec.get_dim("nO"), nP=maxout_pieces, normalize=True, dropout=0.0, ), Linear(nO=nO, nI=hidden_size, init_W=zero_init), ) model = chain(tok2vec, output_layer) model = build_masked_language_model(vocab, model) model.set_ref("tok2vec", tok2vec) model.set_ref("output_layer", output_layer) return model
def build_multi_task_model( tok2vec: Model, maxout_pieces: int, token_vector_width: int, nO: Optional[int] = None, ) -> Model: softmax = Softmax(nO=nO, nI=token_vector_width * 2) model = chain( tok2vec, Maxout( nO=token_vector_width * 2, nI=token_vector_width, nP=maxout_pieces, dropout=0.0, ), LayerNorm(token_vector_width * 2), softmax, ) model.set_ref("tok2vec", tok2vec) model.set_ref("output_layer", softmax) return model
def CharacterEmbed( width: int, rows: int, nM: int, nC: int, include_static_vectors: bool, feature: Union[int, str] = "LOWER", ) -> Model[List[Doc], List[Floats2d]]: """Construct an embedded representation based on character embeddings, using a feed-forward network. A fixed number of UTF-8 byte characters are used for each word, taken from the beginning and end of the word equally. Padding is used in the centre for words that are too short. For instance, let's say nC=4, and the word is "jumping". The characters used will be jung (two from the start, two from the end). If we had nC=8, the characters would be "jumpping": 4 from the start, 4 from the end. This ensures that the final character is always in the last position, instead of being in an arbitrary position depending on the word length. The characters are embedded in a embedding table with a given number of rows, and the vectors concatenated. A hash-embedded vector of the LOWER of the word is also concatenated on, and the result is then passed through a feed-forward network to construct a single vector to represent the information. feature (int or str): An attribute to embed, to concatenate with the characters. width (int): The width of the output vector and the feature embedding. rows (int): The number of rows in the LOWER hash embedding table. nM (int): The dimensionality of the character embeddings. Recommended values are between 16 and 64. nC (int): The number of UTF-8 bytes to embed per word. Recommended values are between 3 and 8, although it may depend on the length of words in the language. include_static_vectors (bool): Whether to also use static word vectors. Requires a vectors table to be loaded in the Doc objects' vocab. """ feature = intify_attr(feature) if feature is None: raise ValueError(Errors.E911.format(feat=feature)) char_embed = chain( _character_embed.CharacterEmbed(nM=nM, nC=nC), cast(Model[List[Floats2d], Ragged], list2ragged()), ) feature_extractor: Model[List[Doc], Ragged] = chain( FeatureExtractor([feature]), cast(Model[List[Ints2d], Ragged], list2ragged()), with_array(HashEmbed(nO=width, nV=rows, column=0, seed=5)), # type: ignore ) max_out: Model[Ragged, Ragged] if include_static_vectors: max_out = with_array( Maxout(width, nM * nC + (2 * width), nP=3, normalize=True, dropout=0.0) # type: ignore ) model = chain( concatenate( char_embed, feature_extractor, StaticVectors(width, dropout=0.0), ), max_out, cast(Model[Ragged, List[Floats2d]], ragged2list()), ) else: max_out = with_array( Maxout(width, nM * nC + width, nP=3, normalize=True, dropout=0.0) # type: ignore ) model = chain( concatenate( char_embed, feature_extractor, ), max_out, cast(Model[Ragged, List[Floats2d]], ragged2list()), ) return model
def MultiHashEmbed( width: int, attrs: List[Union[str, int]], rows: List[int], include_static_vectors: bool, ) -> Model[List[Doc], List[Floats2d]]: """Construct an embedding layer that separately embeds a number of lexical attributes using hash embedding, concatenates the results, and passes it through a feed-forward subnetwork to build a mixed representation. The features used can be configured with the 'attrs' argument. The suggested attributes are NORM, PREFIX, SUFFIX and SHAPE. This lets the model take into account some subword information, without constructing a fully character-based representation. If pretrained vectors are available, they can be included in the representation as well, with the vectors table will be kept static (i.e. it's not updated). The `width` parameter specifies the output width of the layer and the widths of all embedding tables. If static vectors are included, a learned linear layer is used to map the vectors to the specified width before concatenating it with the other embedding outputs. A single Maxout layer is then used to reduce the concatenated vectors to the final width. The `rows` parameter controls the number of rows used by the `HashEmbed` tables. The HashEmbed layer needs surprisingly few rows, due to its use of the hashing trick. Generally between 2000 and 10000 rows is sufficient, even for very large vocabularies. A number of rows must be specified for each table, so the `rows` list must be of the same length as the `attrs` parameter. width (int): The output width. Also used as the width of the embedding tables. Recommended values are between 64 and 300. attrs (list of attr IDs): The token attributes to embed. A separate embedding table will be constructed for each attribute. rows (List[int]): The number of rows in the embedding tables. Must have the same length as attrs. include_static_vectors (bool): Whether to also use static word vectors. Requires a vectors table to be loaded in the Doc objects' vocab. """ if len(rows) != len(attrs): raise ValueError(f"Mismatched lengths: {len(rows)} vs {len(attrs)}") seed = 7 def make_hash_embed(index): nonlocal seed seed += 1 return HashEmbed(width, rows[index], column=index, seed=seed, dropout=0.0) embeddings = [make_hash_embed(i) for i in range(len(attrs))] concat_size = width * (len(embeddings) + include_static_vectors) max_out: Model[Ragged, Ragged] = with_array( Maxout(width, concat_size, nP=3, dropout=0.0, normalize=True) # type: ignore ) if include_static_vectors: feature_extractor: Model[List[Doc], Ragged] = chain( FeatureExtractor(attrs), cast(Model[List[Ints2d], Ragged], list2ragged()), with_array(concatenate(*embeddings)), ) model = chain( concatenate( feature_extractor, StaticVectors(width, dropout=0.0), ), max_out, cast(Model[Ragged, List[Floats2d]], ragged2list()), ) else: model = chain( FeatureExtractor(list(attrs)), cast(Model[List[Ints2d], Ragged], list2ragged()), with_array(concatenate(*embeddings)), max_out, cast(Model[Ragged, List[Floats2d]], ragged2list()), ) return model
def TextCatEnsemble_v1( width: int, embed_size: int, pretrained_vectors: Optional[bool], exclusive_classes: bool, ngram_size: int, window_size: int, conv_depth: int, dropout: Optional[float], nO: Optional[int] = None, ) -> Model: # Don't document this yet, I'm not sure it's right. cols = [ORTH, LOWER, PREFIX, SUFFIX, SHAPE, ID] with Model.define_operators({">>": chain, "|": concatenate, "**": clone}): lower = HashEmbed(nO=width, nV=embed_size, column=cols.index(LOWER), dropout=dropout, seed=10) prefix = HashEmbed( nO=width // 2, nV=embed_size, column=cols.index(PREFIX), dropout=dropout, seed=11, ) suffix = HashEmbed( nO=width // 2, nV=embed_size, column=cols.index(SUFFIX), dropout=dropout, seed=12, ) shape = HashEmbed( nO=width // 2, nV=embed_size, column=cols.index(SHAPE), dropout=dropout, seed=13, ) width_nI = sum( layer.get_dim("nO") for layer in [lower, prefix, suffix, shape]) trained_vectors = FeatureExtractor(cols) >> with_array( uniqued( (lower | prefix | suffix | shape) >> Maxout( nO=width, nI=width_nI, normalize=True), column=cols.index(ORTH), )) if pretrained_vectors: static_vectors = StaticVectors(width) vector_layer = trained_vectors | static_vectors vectors_width = width * 2 else: vector_layer = trained_vectors vectors_width = width tok2vec = vector_layer >> with_array( Maxout(width, vectors_width, normalize=True) >> residual((expand_window(window_size=window_size) >> Maxout( nO=width, nI=width * ((window_size * 2) + 1), normalize=True)))**conv_depth, pad=conv_depth, ) cnn_model = (tok2vec >> list2ragged() >> ParametricAttention(width) >> reduce_sum() >> residual(Maxout(nO=width, nI=width)) >> Linear(nO=nO, nI=width) >> Dropout(0.0)) linear_model = build_bow_text_classifier( nO=nO, ngram_size=ngram_size, exclusive_classes=exclusive_classes, no_output_layer=False, ) nO_double = nO * 2 if nO else None if exclusive_classes: output_layer = Softmax(nO=nO, nI=nO_double) else: output_layer = Linear(nO=nO, nI=nO_double) >> Dropout(0.0) >> Logistic() model = (linear_model | cnn_model) >> output_layer model.set_ref("tok2vec", tok2vec) if model.has_dim("nO") is not False: model.set_dim("nO", nO) model.set_ref("output_layer", linear_model.get_ref("output_layer")) model.attrs["multi_label"] = not exclusive_classes return model