Esempio n. 1
0
def build_text_classifier_v2(
    tok2vec: Model[List[Doc], List[Floats2d]],
    linear_model: Model[List[Doc], Floats2d],
    nO: Optional[int] = None,
) -> Model[List[Doc], Floats2d]:
    exclusive_classes = not linear_model.attrs["multi_label"]
    with Model.define_operators({">>": chain, "|": concatenate}):
        width = tok2vec.maybe_get_dim("nO")
        attention_layer = ParametricAttention(
            width)  # TODO: benchmark performance difference of this layer
        maxout_layer = Maxout(nO=width, nI=width)
        norm_layer = LayerNorm(nI=width)
        cnn_model = (
            tok2vec >> list2ragged() >> attention_layer >> reduce_sum() >>
            residual(maxout_layer >> norm_layer >> Dropout(0.0)))

        nO_double = nO * 2 if nO else None
        if exclusive_classes:
            output_layer = Softmax(nO=nO, nI=nO_double)
        else:
            output_layer = Linear(nO=nO, nI=nO_double) >> Logistic()
        model = (linear_model | cnn_model) >> output_layer
        model.set_ref("tok2vec", tok2vec)
    if model.has_dim("nO") is not False:
        model.set_dim("nO", nO)
    model.set_ref("output_layer", linear_model.get_ref("output_layer"))
    model.set_ref("attention_layer", attention_layer)
    model.set_ref("maxout_layer", maxout_layer)
    model.set_ref("norm_layer", norm_layer)
    model.attrs["multi_label"] = not exclusive_classes

    model.init = init_ensemble_textcat
    return model
Esempio n. 2
0
def build_spancat_model(
    tok2vec: Model[List[Doc], List[Floats2d]],
    reducer: Model[Ragged, Floats2d],
    scorer: Model[Floats2d, Floats2d],
) -> Model[Tuple[List[Doc], Ragged], Floats2d]:
    """Build a span categorizer model, given a token-to-vector model, a
    reducer model to map the sequence of vectors for each span down to a single
    vector, and a scorer model to map the vectors to probabilities.

    tok2vec (Model[List[Doc], List[Floats2d]]): The tok2vec model.
    reducer (Model[Ragged, Floats2d]): The reducer model.
    scorer (Model[Floats2d, Floats2d]): The scorer model.
    """
    model = chain(
        cast(
            Model[Tuple[List[Doc], Ragged], Tuple[Ragged, Ragged]],
            with_getitem(
                0,
                chain(tok2vec,
                      cast(Model[List[Floats2d], Ragged], list2ragged()))),
        ),
        extract_spans(),
        reducer,
        scorer,
    )
    model.set_ref("tok2vec", tok2vec)
    model.set_ref("reducer", reducer)
    model.set_ref("scorer", scorer)
    return model
Esempio n. 3
0
def test_validation_complex():
    good_model = chain(list2ragged(), reduce_sum(), Relu(12, dropout=0.5), Relu(1))
    X = [good_model.ops.xp.zeros((4, 75), dtype="f")]
    Y = good_model.ops.xp.zeros((1,), dtype="f")
    good_model.initialize(X, Y)
    good_model.predict(X)

    bad_model = chain(
        list2ragged(),
        reduce_sum(),
        Relu(12, dropout=0.5),
        # ERROR: Why can't I attach a Relu to an attention layer?
        ParametricAttention(12),
        Relu(1),
    )
    with pytest.raises(DataValidationError):
        bad_model.initialize(X, Y)
Esempio n. 4
0
def build_nel_encoder(tok2vec: Model, nO: Optional[int] = None) -> Model:
    with Model.define_operators({">>": chain, "**": clone}):
        token_width = tok2vec.get_dim("nO")
        output_layer = Linear(nO=nO, nI=token_width)
        model = (tok2vec >> list2ragged() >> reduce_mean() >> residual(
            Maxout(nO=token_width, nI=token_width, nP=2, dropout=0.0)) >>
                 output_layer)
        model.set_ref("output_layer", output_layer)
        model.set_ref("tok2vec", tok2vec)
    return model
Esempio n. 5
0
def build_text_classifier_lowdata(
        width: int,
        dropout: Optional[float],
        nO: Optional[int] = None) -> Model[List[Doc], Floats2d]:
    # Don't document this yet, I'm not sure it's right.
    # Note, before v.3, this was the default if setting "low_data" and "pretrained_dims"
    with Model.define_operators({">>": chain, "**": clone}):
        model = (StaticVectors(width) >> list2ragged() >>
                 ParametricAttention(width) >> reduce_sum() >> residual(
                     Relu(width, width))**2 >> Linear(nO, width))
        if dropout:
            model = model >> Dropout(dropout)
        model = model >> Logistic()
    return model
Esempio n. 6
0
def build_nel_encoder(tok2vec: Model,
                      nO: Optional[int] = None) -> Model[List[Doc], Floats2d]:
    with Model.define_operators({">>": chain, "&": tuplify}):
        token_width = tok2vec.maybe_get_dim("nO")
        output_layer = Linear(nO=nO, nI=token_width)
        model = (((tok2vec >> list2ragged()) & build_span_maker()) >>
                 extract_spans() >> reduce_mean() >> residual(
                     Maxout(nO=token_width, nI=token_width, nP=2,
                            dropout=0.0)) >> output_layer)
        model.set_ref("output_layer", output_layer)
        model.set_ref("tok2vec", tok2vec)
    # flag to show this isn't legacy
    model.attrs["include_span_maker"] = True
    return model
Esempio n. 7
0
def build_simple_cnn_text_classifier(
        tok2vec: Model,
        exclusive_classes: bool,
        nO: Optional[int] = None) -> Model[List[Doc], Floats2d]:
    """
    Build a simple CNN text classifier, given a token-to-vector model as inputs.
    If exclusive_classes=True, a softmax non-linearity is applied, so that the
    outputs sum to 1. If exclusive_classes=False, a logistic non-linearity
    is applied instead, so that outputs are in the range [0, 1].
    """
    fill_defaults = {"b": 0, "W": 0}
    with Model.define_operators({">>": chain}):
        cnn = tok2vec >> list2ragged() >> reduce_mean()
        nI = tok2vec.maybe_get_dim("nO")
        if exclusive_classes:
            output_layer = Softmax(nO=nO, nI=nI)
            fill_defaults["b"] = NEG_VALUE
            resizable_layer: Model = resizable(
                output_layer,
                resize_layer=partial(resize_linear_weighted,
                                     fill_defaults=fill_defaults),
            )
            model = cnn >> resizable_layer
        else:
            output_layer = Linear(nO=nO, nI=nI)
            resizable_layer = resizable(
                output_layer,
                resize_layer=partial(resize_linear_weighted,
                                     fill_defaults=fill_defaults),
            )
            model = cnn >> resizable_layer >> Logistic()
        model.set_ref("output_layer", output_layer)
        model.attrs["resize_output"] = partial(
            resize_and_set_ref,
            resizable_layer=resizable_layer,
        )
    model.set_ref("tok2vec", tok2vec)
    model.set_dim(
        "nO", nO
    )  # type: ignore  # TODO: remove type ignore once Thinc has been updated
    model.attrs["multi_label"] = not exclusive_classes
    return model
def build_simple_cnn_text_classifier(
    tok2vec: Model, exclusive_classes: bool, nO: Optional[int] = None
) -> Model[List[Doc], Floats2d]:
    """
    Build a simple CNN text classifier, given a token-to-vector model as inputs.
    If exclusive_classes=True, a softmax non-linearity is applied, so that the
    outputs sum to 1. If exclusive_classes=False, a logistic non-linearity
    is applied instead, so that outputs are in the range [0, 1].
    """
    with Model.define_operators({">>": chain}):
        cnn = tok2vec >> list2ragged() >> reduce_mean()
        if exclusive_classes:
            output_layer = Softmax(nO=nO, nI=tok2vec.maybe_get_dim("nO"))
            model = cnn >> output_layer
            model.set_ref("output_layer", output_layer)
        else:
            linear_layer = Linear(nO=nO, nI=tok2vec.maybe_get_dim("nO"))
            model = cnn >> linear_layer >> Logistic()
            model.set_ref("output_layer", linear_layer)
    model.set_ref("tok2vec", tok2vec)
    model.set_dim("nO", nO)
    model.attrs["multi_label"] = not exclusive_classes
    return model
Esempio n. 9
0
def CharacterEmbed(
    width: int,
    rows: int,
    nM: int,
    nC: int,
    include_static_vectors: bool,
    feature: Union[int, str] = "LOWER",
) -> Model[List[Doc], List[Floats2d]]:
    """Construct an embedded representation based on character embeddings, using
    a feed-forward network. A fixed number of UTF-8 byte characters are used for
    each word, taken from the beginning and end of the word equally. Padding is
    used in the centre for words that are too short.

    For instance, let's say nC=4, and the word is "jumping". The characters
    used will be jung (two from the start, two from the end). If we had nC=8,
    the characters would be "jumpping": 4 from the start, 4 from the end. This
    ensures that the final character is always in the last position, instead
    of being in an arbitrary position depending on the word length.

    The characters are embedded in a embedding table with a given number of rows,
    and the vectors concatenated. A hash-embedded vector of the LOWER of the word is
    also concatenated on, and the result is then passed through a feed-forward
    network to construct a single vector to represent the information.

    feature (int or str): An attribute to embed, to concatenate with the characters.
    width (int): The width of the output vector and the feature embedding.
    rows (int): The number of rows in the LOWER hash embedding table.
    nM (int): The dimensionality of the character embeddings. Recommended values
        are between 16 and 64.
    nC (int): The number of UTF-8 bytes to embed per word. Recommended values
        are between 3 and 8, although it may depend on the length of words in the
        language.
    include_static_vectors (bool): Whether to also use static word vectors.
        Requires a vectors table to be loaded in the Doc objects' vocab.
    """
    feature = intify_attr(feature)
    if feature is None:
        raise ValueError(Errors.E911.format(feat=feature))
    char_embed = chain(
        _character_embed.CharacterEmbed(nM=nM, nC=nC),
        cast(Model[List[Floats2d], Ragged], list2ragged()),
    )
    feature_extractor: Model[List[Doc], Ragged] = chain(
        FeatureExtractor([feature]),
        cast(Model[List[Ints2d], Ragged], list2ragged()),
        with_array(HashEmbed(nO=width, nV=rows, column=0,
                             seed=5)),  # type: ignore
    )
    max_out: Model[Ragged, Ragged]
    if include_static_vectors:
        max_out = with_array(
            Maxout(width,
                   nM * nC + (2 * width),
                   nP=3,
                   normalize=True,
                   dropout=0.0)  # type: ignore
        )
        model = chain(
            concatenate(
                char_embed,
                feature_extractor,
                StaticVectors(width, dropout=0.0),
            ),
            max_out,
            cast(Model[Ragged, List[Floats2d]], ragged2list()),
        )
    else:
        max_out = with_array(
            Maxout(width, nM * nC + width, nP=3, normalize=True,
                   dropout=0.0)  # type: ignore
        )
        model = chain(
            concatenate(
                char_embed,
                feature_extractor,
            ),
            max_out,
            cast(Model[Ragged, List[Floats2d]], ragged2list()),
        )
    return model
Esempio n. 10
0
def MultiHashEmbed(
    width: int,
    attrs: List[Union[str, int]],
    rows: List[int],
    include_static_vectors: bool,
) -> Model[List[Doc], List[Floats2d]]:
    """Construct an embedding layer that separately embeds a number of lexical
    attributes using hash embedding, concatenates the results, and passes it
    through a feed-forward subnetwork to build a mixed representation.

    The features used can be configured with the 'attrs' argument. The suggested
    attributes are NORM, PREFIX, SUFFIX and SHAPE. This lets the model take into
    account some subword information, without constructing a fully character-based
    representation. If pretrained vectors are available, they can be included in
    the representation as well, with the vectors table will be kept static
    (i.e. it's not updated).

    The `width` parameter specifies the output width of the layer and the widths
    of all embedding tables. If static vectors are included, a learned linear
    layer is used to map the vectors to the specified width before concatenating
    it with the other embedding outputs. A single Maxout layer is then used to
    reduce the concatenated vectors to the final width.

    The `rows` parameter controls the number of rows used by the `HashEmbed`
    tables. The HashEmbed layer needs surprisingly few rows, due to its use of
    the hashing trick. Generally between 2000 and 10000 rows is sufficient,
    even for very large vocabularies. A number of rows must be specified for each
    table, so the `rows` list must be of the same length as the `attrs` parameter.

    width (int): The output width. Also used as the width of the embedding tables.
        Recommended values are between 64 and 300.
    attrs (list of attr IDs): The token attributes to embed. A separate
        embedding table will be constructed for each attribute.
    rows (List[int]): The number of rows in the embedding tables. Must have the
        same length as attrs.
    include_static_vectors (bool): Whether to also use static word vectors.
        Requires a vectors table to be loaded in the Doc objects' vocab.
    """
    if len(rows) != len(attrs):
        raise ValueError(f"Mismatched lengths: {len(rows)} vs {len(attrs)}")
    seed = 7

    def make_hash_embed(index):
        nonlocal seed
        seed += 1
        return HashEmbed(width,
                         rows[index],
                         column=index,
                         seed=seed,
                         dropout=0.0)

    embeddings = [make_hash_embed(i) for i in range(len(attrs))]
    concat_size = width * (len(embeddings) + include_static_vectors)
    max_out: Model[Ragged, Ragged] = with_array(
        Maxout(width, concat_size, nP=3, dropout=0.0,
               normalize=True)  # type: ignore
    )
    if include_static_vectors:
        feature_extractor: Model[List[Doc], Ragged] = chain(
            FeatureExtractor(attrs),
            cast(Model[List[Ints2d], Ragged], list2ragged()),
            with_array(concatenate(*embeddings)),
        )
        model = chain(
            concatenate(
                feature_extractor,
                StaticVectors(width, dropout=0.0),
            ),
            max_out,
            cast(Model[Ragged, List[Floats2d]], ragged2list()),
        )
    else:
        model = chain(
            FeatureExtractor(list(attrs)),
            cast(Model[List[Ints2d], Ragged], list2ragged()),
            with_array(concatenate(*embeddings)),
            max_out,
            cast(Model[Ragged, List[Floats2d]], ragged2list()),
        )
    return model
Esempio n. 11
0
def TextCatEnsemble_v1(
    width: int,
    embed_size: int,
    pretrained_vectors: Optional[bool],
    exclusive_classes: bool,
    ngram_size: int,
    window_size: int,
    conv_depth: int,
    dropout: Optional[float],
    nO: Optional[int] = None,
) -> Model:
    # Don't document this yet, I'm not sure it's right.
    cols = [ORTH, LOWER, PREFIX, SUFFIX, SHAPE, ID]
    with Model.define_operators({">>": chain, "|": concatenate, "**": clone}):
        lower = HashEmbed(nO=width,
                          nV=embed_size,
                          column=cols.index(LOWER),
                          dropout=dropout,
                          seed=10)
        prefix = HashEmbed(
            nO=width // 2,
            nV=embed_size,
            column=cols.index(PREFIX),
            dropout=dropout,
            seed=11,
        )
        suffix = HashEmbed(
            nO=width // 2,
            nV=embed_size,
            column=cols.index(SUFFIX),
            dropout=dropout,
            seed=12,
        )
        shape = HashEmbed(
            nO=width // 2,
            nV=embed_size,
            column=cols.index(SHAPE),
            dropout=dropout,
            seed=13,
        )
        width_nI = sum(
            layer.get_dim("nO") for layer in [lower, prefix, suffix, shape])
        trained_vectors = FeatureExtractor(cols) >> with_array(
            uniqued(
                (lower | prefix | suffix | shape) >> Maxout(
                    nO=width, nI=width_nI, normalize=True),
                column=cols.index(ORTH),
            ))
        if pretrained_vectors:
            static_vectors = StaticVectors(width)
            vector_layer = trained_vectors | static_vectors
            vectors_width = width * 2
        else:
            vector_layer = trained_vectors
            vectors_width = width
        tok2vec = vector_layer >> with_array(
            Maxout(width, vectors_width, normalize=True) >>
            residual((expand_window(window_size=window_size) >> Maxout(
                nO=width, nI=width *
                ((window_size * 2) + 1), normalize=True)))**conv_depth,
            pad=conv_depth,
        )
        cnn_model = (tok2vec >> list2ragged() >> ParametricAttention(width) >>
                     reduce_sum() >> residual(Maxout(nO=width, nI=width)) >>
                     Linear(nO=nO, nI=width) >> Dropout(0.0))

        linear_model = build_bow_text_classifier(
            nO=nO,
            ngram_size=ngram_size,
            exclusive_classes=exclusive_classes,
            no_output_layer=False,
        )
        nO_double = nO * 2 if nO else None
        if exclusive_classes:
            output_layer = Softmax(nO=nO, nI=nO_double)
        else:
            output_layer = Linear(nO=nO,
                                  nI=nO_double) >> Dropout(0.0) >> Logistic()
        model = (linear_model | cnn_model) >> output_layer
        model.set_ref("tok2vec", tok2vec)
    if model.has_dim("nO") is not False:
        model.set_dim("nO", nO)
    model.set_ref("output_layer", linear_model.get_ref("output_layer"))
    model.attrs["multi_label"] = not exclusive_classes
    return model