Example #1
0
 def cope_lookups():
     lookups = Lookups()
     lookups.add_table("lemma_lookup", {"cope": "cope", "coped": "cope"})
     lookups.add_table("lemma_index", {"verb": ("cope", "cop")})
     lookups.add_table("lemma_exc", {"verb": {"coping": ("cope", )}})
     lookups.add_table("lemma_rules", {"verb": [["ing", ""]]})
     return lookups
Example #2
0
    def __init__(
        self,
        vocab: Vocab,
        name: str = "morphologizer",
        *,
        overwrite_lemma: bool = False,
    ) -> None:
        super().__init__()

        self.name = name
        self.vocab = vocab
        self.voikko = libvoikko.Voikko("fi")
        self.lookups = Lookups()
        self.overwrite_lemma = overwrite_lemma
        self.aux_labels = [vocab.strings.add(x) for x in ["aux", "aux:pass"]]
        self.cop_labels = [vocab.strings.add(x) for x in ["cop", "cop:own"]]
        self.nsubj_labels = [
            vocab.strings.add(x) for x in ["nsubj", "nsubj:cop"]
        ]
        self.ccomp_labels = [
            vocab.strings.add(x)
            for x in ["csubj", "csubj:cop", "xcomp", "xcomp:ds"]
        ]
        self.relcl_labels = [
            vocab.strings.add(x) for x in ["acl:relcl", "ccomp"]
        ]
        self.foreign_tag = vocab.strings.add('Foreign')
Example #3
0
    def concept_sets(self, value):
        """
        Sets concepts_sets and the attributes derived from it.

        Args:
            value (list of list of str): A list of lists of strings; each string being a concept,
                each set in the larger list corresponding to a document which has the tags seen in the set.
        """
        self._concept_sets = value
        LOG.debug("Extracting raw keywords as concepts.")
        all_concepts = [
            concept
            for concept_set in tqdm(self._concept_sets)
            for concept in concept_set
            if concept.strip() != ""
        ]
        raw_concepts = set(all_concepts)

        LOG.debug("Lemmatizing {} raw concepts.".format(len(raw_concepts)))
        concepts = [c.lower() for c in raw_concepts]

        self.raw2lemma = {rc: c for rc, c in zip(raw_concepts, concepts)}
        lookups = Lookups()
        lookups.add_table("lemma_lookup", self.raw2lemma)
        self.lemmatizer = Lemmatizer(lookups)
        self.lemma2raw = {v: k for k, v in self.raw2lemma.items()}
        lemma_concepts = [
            self.lemmatizer(concept, "NOUN")[0] for concept in all_concepts
        ]
        self.concepts_frequencies = Counter(lemma_concepts)
        self.concepts = set(lemma_concepts)
        self._fit_concept_indices()
Example #4
0
def create_lookups_from_json_reader(path: Path) -> Lookups:
    lookups = Lookups()
    for p in path.glob("*.json"):
        table_name = p.stem
        data = srsly.read_json(p)
        lookups.add_table(table_name, data)
    return lookups
def test_lookups_api():
    table_name = "test"
    data = {"foo": "bar", "hello": "world"}
    lookups = Lookups()
    lookups.add_table(table_name, data)
    assert len(lookups) == 1
    assert table_name in lookups
    assert lookups.has_table(table_name)
    table = lookups.get_table(table_name)
    assert table.name == table_name
    assert len(table) == 2
    assert table["hello"] == "world"
    table["a"] = "b"
    assert table["a"] == "b"
    table = lookups.get_table(table_name)
    assert len(table) == 3
    with pytest.raises(KeyError):
        lookups.get_table("xyz")
    with pytest.raises(ValueError):
        lookups.add_table(table_name)
    table = lookups.remove_table(table_name)
    assert table.name == table_name
    assert len(lookups) == 0
    assert table_name not in lookups
    with pytest.raises(KeyError):
        lookups.get_table(table_name)
Example #6
0
def lemmatizer():
    lookups = Lookups()
    lookups.add_table("lemma_lookup", {
        "dogs": "dog",
        "boxen": "box",
        "mice": "mouse"
    })
    return Lemmatizer(lookups)
Example #7
0
 def initialize(
         self,
         get_examples: Callable[[], Iterable[Example]],
         *,
         nlp: Language = None
 ) -> None:
     lookups = Lookups()
     self._lookups = lookups.from_disk(path=self.source)
def test_lookups_to_from_bytes():
    lookups = Lookups()
    lookups.add_table("table1", {"foo": "bar", "hello": "world"})
    lookups.add_table("table2", {"a": 1, "b": 2, "c": 3})
    lookups_bytes = lookups.to_bytes()
    new_lookups = Lookups()
    new_lookups.from_bytes(lookups_bytes)
    assert len(new_lookups) == 2
    assert "table1" in new_lookups
    assert "table2" in new_lookups
    table1 = new_lookups.get_table("table1")
    assert len(table1) == 2
    assert table1["foo"] == "bar"
    table2 = new_lookups.get_table("table2")
    assert len(table2) == 3
    assert table2["b"] == 2
    assert new_lookups.to_bytes() == lookups_bytes
def test_lookups_to_from_disk():
    lookups = Lookups()
    lookups.add_table("table1", {"foo": "bar", "hello": "world"})
    lookups.add_table("table2", {"a": 1, "b": 2, "c": 3})
    with make_tempdir() as tmpdir:
        lookups.to_disk(tmpdir)
        new_lookups = Lookups()
        new_lookups.from_disk(tmpdir)
    assert len(new_lookups) == 2
    assert "table1" in new_lookups
    assert "table2" in new_lookups
    table1 = new_lookups.get_table("table1")
    assert len(table1) == 2
    assert table1["foo"] == "bar"
    table2 = new_lookups.get_table("table2")
    assert len(table2) == 3
    assert table2["b"] == 2
Example #10
0
def test_issue595():
    """Test lemmatization of base forms"""
    words = ["Do", "n't", "feed", "the", "dog"]
    lookups = Lookups()
    lookups.add_table("lemma_rules", {"verb": [["ed", "e"]]})
    lookups.add_table("lemma_index", {"verb": {}})
    lookups.add_table("lemma_exc", {"verb": {}})
    vocab = Vocab()
    doc = Doc(vocab, words=words)
    doc[2].tag_ = "VB"
    assert doc[2].text == "feed"
    assert doc[2].lemma_ == "feed"
Example #11
0
def test_issue1387():
    tag_map = {"VBG": {POS: VERB, VerbForm_part: True}}
    lookups = Lookups()
    lookups.add_table("lemma_index", {"verb": ("cope", "cop")})
    lookups.add_table("lemma_exc", {"verb": {"coping": ("cope", )}})
    lookups.add_table("lemma_rules", {"verb": [["ing", ""]]})
    lemmatizer = Lemmatizer(lookups)
    vocab = Vocab(lemmatizer=lemmatizer, tag_map=tag_map)
    doc = Doc(vocab, words=["coping"])
    doc[0].tag_ = "VBG"
    assert doc[0].text == "coping"
    assert doc[0].lemma_ == "cope"
Example #12
0
 def __init__(self):
     self.entities = []
     self.columns = []
     self.relationships = []
     self.synonyms_col = []
     self.synonyms_tab = []
     self.entity_graph = []
     self.loaded_entities = []
     self.config = Configuration()
     self.conn = pyodbc.connect(self.config.get_sql_connection_string())
     lookups = Lookups()
     self.lemmatizer = Lemmatizer(lookups)
     self.load_db_model()
Example #13
0
def test_ner_warns_no_lookups():
    nlp = Language()
    nlp.vocab.lookups = Lookups()
    assert not len(nlp.vocab.lookups)
    ner = nlp.create_pipe("ner")
    nlp.add_pipe(ner)
    with pytest.warns(UserWarning):
        nlp.begin_training()
    nlp.vocab.lookups.add_table("lexeme_norm")
    nlp.vocab.lookups.get_table("lexeme_norm")["a"] = "A"
    with pytest.warns(None) as record:
        nlp.begin_training()
        assert not record.list
Example #14
0
def test_tagger_warns_no_lemma_lookups():
    nlp = Language()
    nlp.vocab.lookups = Lookups()
    assert not len(nlp.vocab.lookups)
    tagger = nlp.create_pipe("tagger")
    with pytest.warns(UserWarning):
        tagger.begin_training()
    nlp.add_pipe(tagger)
    with pytest.warns(UserWarning):
        nlp.begin_training()
    nlp.vocab.lookups.add_table("lemma_lookup")
    with pytest.warns(None) as record:
        nlp.begin_training()
        assert not record.list
Example #15
0
def test_issue595():
    """Test lemmatization of base forms"""
    words = ["Do", "n't", "feed", "the", "dog"]
    tag_map = {"VB": {POS: VERB, VerbForm_inf: True}}
    lookups = Lookups()
    lookups.add_table("lemma_rules", {"verb": [["ed", "e"]]})
    lookups.add_table("lemma_index", {"verb": {}})
    lookups.add_table("lemma_exc", {"verb": {}})
    lemmatizer = Lemmatizer(lookups)
    vocab = Vocab(lemmatizer=lemmatizer, tag_map=tag_map)
    doc = Doc(vocab, words=words)
    doc[2].tag_ = "VB"
    assert doc[2].text == "feed"
    assert doc[2].lemma_ == "feed"
Example #16
0
def test_ner_warns_no_lookups(caplog):
    nlp = English()
    assert nlp.lang in util.LEXEME_NORM_LANGS
    nlp.vocab.lookups = Lookups()
    assert not len(nlp.vocab.lookups)
    nlp.add_pipe("ner")
    with caplog.at_level(logging.DEBUG):
        nlp.initialize()
        assert "W033" in caplog.text
    caplog.clear()
    nlp.vocab.lookups.add_table("lexeme_norm")
    nlp.vocab.lookups.get_table("lexeme_norm")["a"] = "A"
    with caplog.at_level(logging.DEBUG):
        nlp.initialize()
        assert "W033" not in caplog.text
Example #17
0
def test_lemmatizer_init(nlp):
    lemmatizer = nlp.add_pipe("lemmatizer", config={"mode": "lookup"})
    assert isinstance(lemmatizer.lookups, Lookups)
    assert not lemmatizer.lookups.tables
    assert lemmatizer.mode == "lookup"
    with pytest.raises(ValueError):
        nlp("test")
    nlp.initialize()
    assert lemmatizer.lookups.tables
    assert nlp("cope")[0].lemma_ == "cope"
    assert nlp("coped")[0].lemma_ == "cope"
    # replace any tables from spacy-lookups-data
    lemmatizer.lookups = Lookups()
    # lookup with no tables sets text as lemma
    assert nlp("cope")[0].lemma_ == "cope"
    assert nlp("coped")[0].lemma_ == "coped"
    nlp.remove_pipe("lemmatizer")
    lemmatizer = nlp.add_pipe("lemmatizer", config={"mode": "lookup"})
    with pytest.raises(ValueError):
        # Can't initialize without required tables
        lemmatizer.initialize(lookups=Lookups())
    lookups = Lookups()
    lookups.add_table("lemma_lookup", {})
    lemmatizer.initialize(lookups=lookups)
Example #18
0
 def make_lookups_bin(self,
                      lookup_name_pattern='lemma_lookup_{}',
                      filename_pattern='it_lemma_lookup_{}.json'):
     lookups = Lookups()
     lookup_keys = list(self.tag_map.keys())
     for lookup_pos in lookup_keys:
         lookup_name = lookup_name_pattern.format(lookup_pos.lower())
         filename = filename_pattern.format(lookup_pos.lower())
         with open(os.path.join(self.out_path, filename)) as json_file:
             lookup_dict = json.load(json_file)
         lookups.add_table(lookup_name, lookup_dict)
     with open(os.path.join(self.out_path,
                            'it_lemma_lookup.json')) as json_file:
         lookup_dict = json.load(json_file)
     lookups.add_table('lemma_lookup', lookup_dict)
     lookups.to_disk(self.out_path, 'lookups.bin')
Example #19
0
    def lemmatize(self, tokens, toke=False):

        lookups = Lookups()
        lookups.add_table('lemma_index', lemma_index)
        lookups.add_table('lemma_exc', lemma_exc)
        lookups.add_table('lemma_rules', lemma_rules)
        lemmatizer = Lemmatizer(lookups)

        lemmas = []
        for t in tokens:
            lemmas.append(lemmatizer(token.text, token.tag_))

        if toke:
            return lemmas

        return " ".join(lemmas)
def test_lemmatizer_without_is_base_form_implementation():
    # Norwegian example from #5658
    lookups = Lookups()
    lookups.add_table("lemma_rules", {"noun": []})
    lookups.add_table("lemma_index", {"noun": {}})
    lookups.add_table("lemma_exc",
                      {"noun": {
                          "formuesskatten": ["formuesskatt"]
                      }})

    lemmatizer = Lemmatizer(lookups, is_base_form=None)
    assert lemmatizer("Formuesskatten", "noun", {
        'Definite': 'def',
        'Gender': 'masc',
        'Number': 'sing'
    }) == ["formuesskatt"]
Example #21
0
def test_lookups_api():
    table_name = "test"
    data = {"foo": "bar", "hello": "world"}
    lookups = Lookups()
    lookups.add_table(table_name, data)
    assert table_name in lookups
    assert lookups.has_table(table_name)
    table = lookups.get_table(table_name)
    assert table.name == table_name
    assert len(table) == 2
    assert table.get("hello") == "world"
    table.set("a", "b")
    assert table.get("a") == "b"
    table = lookups.get_table(table_name)
    assert len(table) == 3
    with pytest.raises(KeyError):
        lookups.get_table("xyz")
Example #22
0
    def from_jsons(
        self, in_indices, in_raw2lemma
    ):  # a little strange because it does not fill in all attributes
        """
        Load index and raw2lemma dictionaries into empty ConceptExtractor

        Args:
            in_indices ():
            in_raw2lemma ():
        """
        with open(in_indices, "r") as f0:
            self.concept_index_mapping = json.load(f0)
        with open(in_raw2lemma, "r") as f0:
            self.raw2lemma = json.load(f0)
        lookups = Lookups()
        lookups.add_table("lemma_lookup", self.raw2lemma)
        self.lemmatizer = Lemmatizer(lookups)
        self.lemma2raw = {v: k for k, v in self.raw2lemma.items()}
        self.concepts = self.concept_index_mapping.keys()
        tmp_frequencies = {
            concept: len(index) for concept, index in self.concept_index_mapping.items()
        }
        self.concepts_frequencies = Counter(tmp_frequencies)
Example #23
0
def lemmatize():
    """"""
    lookups = Lookups()
    lookups.add_table("lemma_rules", {"noun": [["s", ""]]})
    lemmatizer = Lemmatizer(lookups)
    return lemmatizer
Example #24
0
 def from_disk(self, path, exclude=tuple()) -> "LookupLemmatizer":
     path: Path = ensure_path(path)
     lookups = Lookups()
     self._lookups = lookups.from_disk(path=path)
     return self
Example #25
0
def create_lemmatizer():
    lookups = Lookups()
    with open("lookups/fi_lemma_exc.json") as f:
        lookups.add_table("lemma_exc", json.load(f))
    return FinnishLemmatizer(lookups)
Example #26
0
def P(T):
    import pandas as pd
    import emoji  #checking if a character is an emojis
    from collections import Counter

    #remove the formating of source
    T['source'] = T['source'].str.lower()
    T['source'] = T['source'].str.findall('>([^<]+?)<').apply(
        lambda x: x[0] if len(x) >= 1 else '')

    #import location dictionary and generate country
    T['location'] = [
        T.loc[k, 'place']['country_code']
        if not pd.isnull(T.loc[k, 'place']) else i['location']
        for k, i in enumerate(T['user'])
    ]

    Trans = pd.read_csv(
        '/Users/livi/Documents/2020 Fall/data mining/Proposal/Tweepy related files/transloc.csv',
        index_col=0)
    Trans['googlemap'] = Trans['googlemap'].apply(eval)
    Trans.set_index('UserInfo', inplace=True)
    locdict = Trans.T.to_dict('records')
    locdict = locdict[0]
    kys = list(locdict.keys())
    for k in kys:
        if locdict[k] == None:
            del locdict[k]
        elif len(locdict[k]) != 0:
            if 'address_components' in locdict[k][0]:
                for ii in locdict[k][0]['address_components']:
                    if 'country' in ii['types']:
                        locdict[k] = ii['long_name']
                    else:
                        del locdict[k]
            elif len(locdict[k]) > 1:
                if 'address_components' in locdict[k][1]:
                    for ii in locdict[k][1]['address_components']:
                        if 'country' in ii['types']:
                            locdict[k] = ii['long_name']
                        else:
                            del locdict[k]
            else:
                del locdict[k]
        else:
            del locdict[k]

    ## Generate the column
    l = []
    for i in T['location']:
        try:
            l.append(locdict[i])
        except:
            l.append(float('nan'))
    T['CountryCode'] = l
    print('Finish Generate Country Code')

    #Generate Extended tweets and SDGs
    for i in range(len(T)):
        quote = None
        comment = None
        #prepare quote part
        if not pd.isnull(T.loc[i, 'quoted_status']):
            try:
                quote = T.loc[i,
                              'quoted_status']['extended_tweet']['full_text']
            except:
                quote = T.loc[i, 'quoted_status']['text']
                #print('no extended_tweet for quote',i)
        #prepare comment part
        if pd.isnull(T.loc[i, 'extended_tweet']):
            if pd.isnull(T.loc[i, 'retweeted_status']):
                try:
                    comment = T.loc[i, 'text']
                except:
                    print('no text', i)
            else:
                try:
                    comment = T.loc[
                        i, 'retweeted_status']['extended_tweet']['full_text']
                except:
                    comment = T.loc[i, 'retweeted_status']['text']
                    #print('no extended_tweet for retweeted status',i)
        else:
            try:
                comment = T.loc[i, 'extended_tweet']['full_text']
            except:
                print('no extended_tweet', i)
        #combine quote and comments
        if pd.isnull(quote):
            T.loc[i, 'extended_tweet'] = comment
        else:
            T.loc[i, 'extended_tweet'] = '\"' + comment + ' \" ' + quote
    ## remove some useless information
    T['extended_tweet'] = T['extended_tweet'].str.replace("http\S+", "")
    #T['extended_tweet']=T['extended_tweet'].str.replace("@\S+","")
    T['extended_tweet'] = T['extended_tweet'].str.replace("&amp", "")
    print('Finish Generate Extended Tweets')

    T = T.reset_index(drop=True)
    T['extended_tweet'] = T['extended_tweet'].str.lower()
    T['SDG'] = T['extended_tweet'].str.upper()
    T['SDG'] = T['SDG'].str.findall('(SDG\d+)')
    print('Finish Generate SDGs')

    # Generate User Information and hashtags
    T['id'] = [i['id'] for i in T['user']]
    #T['name']=[i['name']for i in T['user']]
    T['screen_name'] = [i['screen_name'] for i in T['user']]
    T['url'] = [i['url'] for i in T['user']]
    T['friends_count'] = T['user'].apply(lambda x: x['friends_count'])
    T['followers_count'] = T['user'].apply(lambda x: x['followers_count'])
    T['hashtags'] = T['extended_tweet'].str.findall('#\S+')
    print('Finish Generate UserInfo and Hashtags')

    # Prepare lemmatized analysis and tokenized extended tweets
    def char_is_emoji(character):
        return character in emoji.UNICODE_EMOJI  #does the text contain an emoji?

    def text_has_emoji(text):
        for character in text:
            if character in emoji.UNICODE_EMOJI:
                return True
        return False  #remove the emoji

    def deEmojify(inputString):
        return inputString.encode('ascii', 'ignore').decode('ascii')

    T['extended_tweet'] = T['extended_tweet'].apply(lambda x: deEmojify(x))

    import spacy
    from spacy.lemmatizer import Lemmatizer
    from spacy.lookups import Lookups
    sp = spacy.load('en')
    lookups = Lookups()
    lemm = Lemmatizer(lookups)

    def lemma_function(text):
        dummy = []
        #this is just a test to see if it works
        for word in sp(text):
            dummy.append(word.lemma_)
        return ' '.join(dummy)

    T['extended_tweet_lemmatized'] = T['extended_tweet'].apply(
        lambda x: lemma_function(x))
    T['extended_tweet_lemmatized'] = T['extended_tweet_lemmatized'].apply(
        lambda x: x.replace('-PRON-', ''))
    print('Finish deemoji and lemmatization')

    # Generate Sentiment Scores
    from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
    analyser = SentimentIntensityAnalyzer()

    def sentiment_analyzer_scores(sentence):
        score = analyser.polarity_scores(sentence)
        print("{:-<40} {}".format(sentence, str(score)))

    T['neg'] = T['extended_tweet_lemmatized'].apply(
        lambda x: analyser.polarity_scores(x)['neg'])
    T['neu'] = T['extended_tweet_lemmatized'].apply(
        lambda x: analyser.polarity_scores(x)['neu'])
    T['pos'] = T['extended_tweet_lemmatized'].apply(
        lambda x: analyser.polarity_scores(x)['pos'])
    T['compound'] = T['extended_tweet_lemmatized'].apply(
        lambda x: analyser.polarity_scores(x)['compound'])
    print('Finish Generate Sentiment Score')
    return T
    docx = ""
    returnedSearch = HeadingSearch(filename)
    #If we didnt find anything with the heading search, just use the whole document.
    if (returnedSearch == False):
        document1 = docx2txt.process(filename)
        docx = nlp(document1)
        #Otherwise, send the headingsearch result through nlp
    else:
        docx = nlp(returnedSearch)

    word_frequencies = {}  # how many times each word occurs int the document
    words = [
    ]  # a list of every word in the document stores in the same index of the frequency array

    #spacy lemmatizer to get root words
    lookups = Lookups()
    lemmatizer = Lemmatizer(lookups)

    for word in docx:  # go through every word in document
        if word.text not in stopwords:  # as long as the word isnt a stop word
            if lemmatizer.lookup(word.text) not in word_frequencies.keys(
            ):  # if we havent come across the word yet
                word_frequencies[lemmatizer.lookup(
                    word.text)] = 1  # its frequency is one
                words.append(lemmatizer.lookup(word.text))  # add it to words
            else:
                word_frequencies[lemmatizer.lookup(
                    word.text
                )] += 1  # otherwise it is already in the list, so increment it

#Sort through the array by bubble sort
def morphology():
    lemmatizer = Lemmatizer(Lookups())
    return Morphology(StringStore(), {}, lemmatizer)