import pytest
from spacy.vocab import Vocab
from spacy.language import Language
from whatlies.language import SpacyLanguage
from whatlies.transformers import Umap, Pca, Noise, AddRandom, Tsne, OpenTsne


vocab = Vocab().from_disk("tests/custom_test_vocab/")
words = list(vocab.strings)
lang = SpacyLanguage(nlp=Language(vocab=vocab, meta={"lang": "en"}))
emb = lang[words]

transformers = [
    Umap(2),
    Umap(3),
    Pca(2),
    Pca(3),
    Noise(0.1),
    Noise(0.01),
    AddRandom(n=4),
    AddRandom(n=1),
    lambda d: d | (d["man"] - d["woman"]),
    Tsne(2, n_iter=250),
    Tsne(3, n_iter=250),
    OpenTsne(2, n_iter=100),
]
extra_sizes = [2, 3, 2, 3, 0, 0, 4, 1, 0, 2, 3, 2]
tfm_ids = [_.__class__.__name__ for _ in transformers]


@pytest.mark.parametrize(
Beispiel #2
0
        "Range of ngrams", min_value=1, max_value=5, step=1, value=(2, 3)
    )

reduction_method = st.sidebar.selectbox("Reduction Method", ("Umap", "Pca"))
if reduction_method == "Umap":
    n_neighbors = st.sidebar.slider(
        "Number of UMAP neighbors", min_value=1, max_value=100, value=15, step=1
    )
    min_dist = st.sidebar.slider(
        "Minimum Distance for UMAP",
        min_value=0.01,
        max_value=0.99,
        value=0.8,
        step=0.01,
    )
    reduction = Umap(2, n_neighbors=n_neighbors, min_dist=min_dist)
else:
    reduction = Pca(2)

st.markdown("# Simple Text Clustering")
st.markdown(
    "Let's say you've gotten a lot of feedback from clients on different channels. You might like to be able to distill main topics and get an overview. It might even inspire some intents that will be used in a virtual assistant!"
)
st.markdown(
    "This tool will help you discover them. This app will attempt to cluster whatever text you give it. The chart will try to clump text together and you can explore underlying patterns."
)

if method == "CountVector SVD":
    lang = CountVectorLanguage(n_svd, ngram_range=(min_ngram, max_ngram))
    embset = lang[texts]
if method == "Lite Sentence Encoding":