Esempio n. 1
0
def cnn_tagger(width: int, vector_width: int, nr_classes: int = 17):
    with Model.define_operators({">>": chain}):
        model = strings2arrays() >> with_array(
            HashEmbed(nO=width, nV=vector_width, column=0) >> expand_window(
                window_size=1) >> Relu(nO=width, nI=width * 3) >> Relu(
                    nO=width, nI=width) >> Softmax(nO=nr_classes, nI=width))
    return model
Esempio n. 2
0
 def make_hash_embed(index):
     nonlocal seed
     seed += 1
     return HashEmbed(width,
                      rows[index],
                      column=index,
                      seed=seed,
                      dropout=0.0)
def test_seed_changes_bucket():
    model1 = HashEmbed(64, 1000, seed=2).initialize()
    model2 = HashEmbed(64, 1000, seed=1).initialize()
    arr = numpy.ones((1,), dtype="uint64")
    vector1 = model1.predict(arr)
    vector2 = model2.predict(arr)
    assert vector1.sum() != vector2.sum()
Esempio n. 4
0
def create_embed_relu_relu_softmax(depth, width, vector_length):
    with Model.define_operators({">>": chain}):
        model = strings2arrays() >> with_array(
            HashEmbed(width, vector_length) >> expand_window(window_size=1) >>
            ReLu(width, width * 3) >> ReLu(width, width) >> Softmax(17, width))
    return model
Esempio n. 5
0
def CharacterEmbed(
    width: int,
    rows: int,
    nM: int,
    nC: int,
    include_static_vectors: bool,
    feature: Union[int, str] = "LOWER",
) -> Model[List[Doc], List[Floats2d]]:
    """Construct an embedded representation based on character embeddings, using
    a feed-forward network. A fixed number of UTF-8 byte characters are used for
    each word, taken from the beginning and end of the word equally. Padding is
    used in the centre for words that are too short.

    For instance, let's say nC=4, and the word is "jumping". The characters
    used will be jung (two from the start, two from the end). If we had nC=8,
    the characters would be "jumpping": 4 from the start, 4 from the end. This
    ensures that the final character is always in the last position, instead
    of being in an arbitrary position depending on the word length.

    The characters are embedded in a embedding table with a given number of rows,
    and the vectors concatenated. A hash-embedded vector of the LOWER of the word is
    also concatenated on, and the result is then passed through a feed-forward
    network to construct a single vector to represent the information.

    feature (int or str): An attribute to embed, to concatenate with the characters.
    width (int): The width of the output vector and the feature embedding.
    rows (int): The number of rows in the LOWER hash embedding table.
    nM (int): The dimensionality of the character embeddings. Recommended values
        are between 16 and 64.
    nC (int): The number of UTF-8 bytes to embed per word. Recommended values
        are between 3 and 8, although it may depend on the length of words in the
        language.
    include_static_vectors (bool): Whether to also use static word vectors.
        Requires a vectors table to be loaded in the Doc objects' vocab.
    """
    feature = intify_attr(feature)
    if feature is None:
        raise ValueError(Errors.E911.format(feat=feature))
    char_embed = chain(
        _character_embed.CharacterEmbed(nM=nM, nC=nC),
        cast(Model[List[Floats2d], Ragged], list2ragged()),
    )
    feature_extractor: Model[List[Doc], Ragged] = chain(
        FeatureExtractor([feature]),
        cast(Model[List[Ints2d], Ragged], list2ragged()),
        with_array(HashEmbed(nO=width, nV=rows, column=0,
                             seed=5)),  # type: ignore
    )
    max_out: Model[Ragged, Ragged]
    if include_static_vectors:
        max_out = with_array(
            Maxout(width,
                   nM * nC + (2 * width),
                   nP=3,
                   normalize=True,
                   dropout=0.0)  # type: ignore
        )
        model = chain(
            concatenate(
                char_embed,
                feature_extractor,
                StaticVectors(width, dropout=0.0),
            ),
            max_out,
            cast(Model[Ragged, List[Floats2d]], ragged2list()),
        )
    else:
        max_out = with_array(
            Maxout(width, nM * nC + width, nP=3, normalize=True,
                   dropout=0.0)  # type: ignore
        )
        model = chain(
            concatenate(
                char_embed,
                feature_extractor,
            ),
            max_out,
            cast(Model[Ragged, List[Floats2d]], ragged2list()),
        )
    return model
def test_init():
    model = HashEmbed(64, 1000).initialize()
    assert model.get_dim("nV") == 1000
    assert model.get_dim("nO") == 64
    assert model.get_param("E").shape == (1000, 64)
Esempio n. 7
0
def TextCatEnsemble_v1(
    width: int,
    embed_size: int,
    pretrained_vectors: Optional[bool],
    exclusive_classes: bool,
    ngram_size: int,
    window_size: int,
    conv_depth: int,
    dropout: Optional[float],
    nO: Optional[int] = None,
) -> Model:
    # Don't document this yet, I'm not sure it's right.
    cols = [ORTH, LOWER, PREFIX, SUFFIX, SHAPE, ID]
    with Model.define_operators({">>": chain, "|": concatenate, "**": clone}):
        lower = HashEmbed(nO=width,
                          nV=embed_size,
                          column=cols.index(LOWER),
                          dropout=dropout,
                          seed=10)
        prefix = HashEmbed(
            nO=width // 2,
            nV=embed_size,
            column=cols.index(PREFIX),
            dropout=dropout,
            seed=11,
        )
        suffix = HashEmbed(
            nO=width // 2,
            nV=embed_size,
            column=cols.index(SUFFIX),
            dropout=dropout,
            seed=12,
        )
        shape = HashEmbed(
            nO=width // 2,
            nV=embed_size,
            column=cols.index(SHAPE),
            dropout=dropout,
            seed=13,
        )
        width_nI = sum(
            layer.get_dim("nO") for layer in [lower, prefix, suffix, shape])
        trained_vectors = FeatureExtractor(cols) >> with_array(
            uniqued(
                (lower | prefix | suffix | shape) >> Maxout(
                    nO=width, nI=width_nI, normalize=True),
                column=cols.index(ORTH),
            ))
        if pretrained_vectors:
            static_vectors = StaticVectors(width)
            vector_layer = trained_vectors | static_vectors
            vectors_width = width * 2
        else:
            vector_layer = trained_vectors
            vectors_width = width
        tok2vec = vector_layer >> with_array(
            Maxout(width, vectors_width, normalize=True) >>
            residual((expand_window(window_size=window_size) >> Maxout(
                nO=width, nI=width *
                ((window_size * 2) + 1), normalize=True)))**conv_depth,
            pad=conv_depth,
        )
        cnn_model = (tok2vec >> list2ragged() >> ParametricAttention(width) >>
                     reduce_sum() >> residual(Maxout(nO=width, nI=width)) >>
                     Linear(nO=nO, nI=width) >> Dropout(0.0))

        linear_model = build_bow_text_classifier(
            nO=nO,
            ngram_size=ngram_size,
            exclusive_classes=exclusive_classes,
            no_output_layer=False,
        )
        nO_double = nO * 2 if nO else None
        if exclusive_classes:
            output_layer = Softmax(nO=nO, nI=nO_double)
        else:
            output_layer = Linear(nO=nO,
                                  nI=nO_double) >> Dropout(0.0) >> Logistic()
        model = (linear_model | cnn_model) >> output_layer
        model.set_ref("tok2vec", tok2vec)
    if model.has_dim("nO") is not False:
        model.set_dim("nO", nO)
    model.set_ref("output_layer", linear_model.get_ref("output_layer"))
    model.attrs["multi_label"] = not exclusive_classes
    return model