示例#1
0
    def __init__(self,
                 nlp,
                 merge_spans=True,
                 lookup={},
                 pattern_id='EMOJI',
                 attrs=('has_emoji', 'is_emoji', 'emoji_desc', 'emoji')):
        """Initialise the pipeline component.

        nlp (Language): The shared nlp object. Used to initialise the matcher
            with the shared `Vocab`, and create `Doc` match patterns.
        attrs (tuple): Attributes to set on the ._ property. Defaults to
            ('has_emoji', 'is_emoji', 'emoji_desc', 'emoji').
        pattern_id (unicode): ID of match pattern, defaults to 'EMOJI'. Can be
            changed to avoid ID clashes.
        merge_spans (bool): Merge spans containing multi-character emoji. Will
            only merge combined emoji resulting in one icon, not sequences.
        lookup (dict): Optional lookup table that maps emoji unicode strings
            to custom descriptions, e.g. translations or other annotations.
        RETURNS (callable): A spaCy pipeline component.
        """
        self._has_emoji, self._is_emoji, self._emoji_desc, self._emoji = attrs
        self.merge_spans = merge_spans
        self.lookup = lookup
        self.matcher = PhraseMatcher(nlp.vocab)
        emoji_patterns = [nlp(emoji) for emoji in EMOJI.keys()]
        self.matcher.add(pattern_id, None, *emoji_patterns)
        # Add attributes
        Doc.set_extension(self._has_emoji, getter=self.has_emoji)
        Doc.set_extension(self._emoji, getter=self.iter_emoji)
        Span.set_extension(self._has_emoji, getter=self.has_emoji)
        Span.set_extension(self._emoji, getter=self.iter_emoji)
        Token.set_extension(self._is_emoji, default=False)
        Token.set_extension(self._emoji_desc, getter=self.get_emoji_desc)
    def __init__(self, nlp, label='GPE'):
        """Initialise the pipeline component. The shared nlp instance is used
        to initialise the matcher with the shared vocab, get the label ID and
        generate Doc objects as phrase match patterns.
        """
        # Make request once on initialisation and store the data
        r = requests.get('https://restcountries.eu/rest/v2/all')
        r.raise_for_status()  # make sure requests raises an error if it fails
        countries = r.json()

        # Convert API response to dict keyed by country name for easy lookup
        # This could also be extended using the alternative and foreign language
        # names provided by the API
        self.countries = {c['name']: c for c in countries}
        self.label = nlp.vocab.strings[label]  # get entity label ID

        # Set up the PhraseMatcher with Doc patterns for each country name
        patterns = [nlp(c) for c in self.countries.keys()]
        self.matcher = PhraseMatcher(nlp.vocab)
        self.matcher.add('COUNTRIES', None, *patterns)

        # Register attribute on the Token. We'll be overwriting this based on
        # the matches, so we're only setting a default value, not a getter.
        # If no default value is set, it defaults to None.
        Token.set_extension('is_country', default=False)
        Token.set_extension('country_capital')
        Token.set_extension('country_latlng')
        Token.set_extension('country_flag')

        # Register attributes on Doc and Span via a getter that checks if one of
        # the contained tokens is set to is_country == True.
        Doc.set_extension('has_country', getter=self.has_country)
        Span.set_extension('has_country', getter=self.has_country)
示例#3
0
    def __init__(self, nlp, label='GPE'):
        """Initialise the pipeline component. The shared nlp instance is used
        to initialise the matcher with the shared vocab, get the label ID and
        generate Doc objects as phrase match patterns.
        """
        # Make request once on initialisation and store the data
        r = requests.get('https://restcountries.eu/rest/v2/all')
        r.raise_for_status()  # make sure requests raises an error if it fails
        countries = r.json()

        # Convert API response to dict keyed by country name for easy lookup
        # This could also be extended using the alternative and foreign language
        # names provided by the API
        self.countries = {c['name']: c for c in countries}
        self.label = nlp.vocab.strings[label]  # get entity label ID

        # Set up the PhraseMatcher with Doc patterns for each country name
        patterns = [nlp(c) for c in self.countries.keys()]
        self.matcher = PhraseMatcher(nlp.vocab)
        self.matcher.add('COUNTRIES', None, *patterns)

        # Register attribute on the Token. We'll be overwriting this based on
        # the matches, so we're only setting a default value, not a getter.
        # If no default value is set, it defaults to None.
        Token.set_extension('is_country', default=False)
        Token.set_extension('country_capital')
        Token.set_extension('country_latlng')
        Token.set_extension('country_flag')

        # Register attributes on Doc and Span via a getter that checks if one of
        # the contained tokens is set to is_country == True.
        Doc.set_extension('has_country', getter=self.has_country)
        Span.set_extension('has_country', getter=self.has_country)
示例#4
0
    def add_cat(self, spacy_cat):
        self.nlp.add_pipe(spacy_cat, name='cat', last=True)

        # Add custom fields needed for this usecase
        # Doc.set_extension('ents', default=None, force=True)
        Doc.set_extension('tags', default=[], force=True)
        Doc.set_extension('tags_as_df', method=_tags_as_dataframe)
示例#5
0
    def handle_extension_functions_for_token_02(self):
        # Register the Doc property extension 'has_number' with the getter get_has_number
        Doc.set_extension('has_number', getter=self.__get_has_number__)

        # Process the text and check the custom has_number attribute
        doc = self.nlp("The museum closed for five years in 2012.")
        print('has_number:', doc._.has_number)
示例#6
0
 def __init__(self, model, word_vocab, char_vocab, chunk_vocab, batch_size: int = 32):
     self.model = model
     self.bs = batch_size
     self.word_vocab = word_vocab
     self.char_vocab = char_vocab
     self.chunk_vocab = chunk_vocab
     Doc.set_extension('noun_phrases', default=[], force=True)
示例#7
0
    def __init__(self, hunspell_object=None):
        self.stopwords_list = [
            'the',
            'a',
            'an',
            'are',
            'on',
            'to',
            'at',
            'every',
            'this'
        ]

        if hunspell_object is None:
            hunspell_object = get_hunspell_default()

        self.hobj = hunspell_object

        self.nlp = spacy.load('en_core_web_lg')  # we need another spaCy model as we want to tag the document with
        # corrected spelling mistakes
        # Todo: maybe always do that at the beginning and just save the
        #       spelling mistakes

        if not Doc.has_extension("lstFilteredDepParseCorpus"):
            Doc.set_extension("lstFilteredDepParseCorpus", default=[])
示例#8
0
def main():
    # Load the small English model
    nlp = spacy.load("en_core_web_sm")
    
    # Add the component first in the pipeline and print the pipe names
    nlp.add_pipe(length_component, first=True)
    print(nlp.pipe_names)
    
    nlp.add_pipe(preprocessor, first=True)
    print(nlp.pipe_names)
    
    # Process a text
    doc = nlp("This is a sentence.")
    
    # add custom metadata
    Doc._.title = 'My document'
    Token._.is_color = True
    Span._.has_color = False

    # set extensions on fht eDoc, Token, and Span
    Doc.set_extension('title', default=None)
    Token.set_extension('is_color', default=False)
    Span.set_extension('has_color', default=False)
    
    
    # end program
    print('Done.')
示例#9
0
    def __init__(self,
                 nlp: Language,
                 matcher: Matcher,
                 component_name: str,
                 attribute_name: str,
                 merge_on_match: bool = True):
        """
        Constructor of MatcherComponent object

        Args:
            nlp (Doc): Spacy Doc object
            component_name (str): Unique name of this component
            attribute_name (str): label to add for custom attribute
            merge_on_match (bool): optional merging on match option, default to True
            validate (bool): optional pattern validation, default to False
        """
        self.nlp = nlp
        self.matcher = matcher
        self.name = component_name
        self.is_attribute = f"is_{attribute_name}"
        self.has_attribute = f"has_{attribute_name}"
        self.merge_on_match = merge_on_match

        # Register attribute on the Token
        Token.set_extension(self.is_attribute, default=False, force=True)
        # Register attributes on Doc and Span via a getter function
        Doc.set_extension(self.has_attribute,
                          getter=self.get_attribute,
                          force=True)
        Span.set_extension(self.has_attribute,
                           getter=self.get_attribute,
                           force=True)
    def __init__(self, nlp: Language):
        """Initialise components"""

        extensions = [
            "_n_sentences",
            "_n_tokens",
            "_n_syllables",
            "token_length",
            "sentence_length",
            "syllables",
            "counts",
        ]
        ext_funs = [
            n_sentences,
            n_tokens,
            n_syllables,
            self.token_length,
            self.sentence_length,
            self.syllables,
            self.counts,
        ]
        for ext, fun in zip(extensions, ext_funs):
            if ext not in ["_n_sentences", "sentence_length", "syllables"]:
                if not Span.has_extension(ext):
                    Span.set_extension(ext, getter=fun)
            if not Doc.has_extension(ext):
                Doc.set_extension(ext, getter=fun)

        if not Doc.has_extension("_filtered_tokens"):
            Doc.set_extension("_filtered_tokens", default=[])
        if not Span.has_extension("_filtered_tokens"):
            Span.set_extension("_filtered_tokens", getter=filtered_tokens)
示例#11
0
 def __init__(self, nlp) -> None:
     Doc.set_extension("compound_cases", default=[], force=True)
     self.matcher = Matcher(nlp.vocab)
     common_pattern = [{
         "ent_type": "CASENAME"
     }, {
         "ent_type": "CITATION",
         "OP": "+"
     }]
     possessive_pattern = [
         {
             "ent_type": "CASENAME"
         },
         {
             "lower": "case"
         },
         {
             "ent_type": "CITATION"
         },
     ]
     self.matcher.add("compound_case", None, common_pattern,
                      possessive_pattern)
     self.global_matcher = Matcher(nlp.vocab)
     merge_ents = nlp.create_pipe("merge_entities")
     nlp.add_pipe(merge_ents)
示例#12
0
    def add_meta_cat(self, meta_cat, name):
        self.nlp.add_pipe(meta_cat, name=name, last=True)

        # Only the meta_anns field is needed, it will be a dictionary
        #of {category_name: value, ...}
        # Span.set_extension('meta_anns', default=None, force=True)
        Doc.set_extension('meta_anns', default=None, force=True)
示例#13
0
    def __init__(self, nlp) -> None:
        Doc.set_extension("abbreviations", default=[], force=True)
        Span.set_extension("long_form", default=None, force=True)

        self.matcher = Matcher(nlp.vocab)
        self.matcher.add("parenthesis", None, [{'ORTH': '('}, {'OP': '+'}, {'ORTH': ')'}])
        self.global_matcher = Matcher(nlp.vocab)
示例#14
0
    def __init__(self, nlp):
        """
        Initialise the pipeline component. The shared nlp instance is used
        to initialise the matcher with the configured lines (config.ini) and
        generate Doc objects as phrase match patterns.

        :param nlp: spaCy nlp instance
        """
        self.label = nlp.vocab.strings['EVENT']  # get entity label ID
        self.matcher = Matcher(nlp.vocab)
        self.matcher.add('METRO_DELAYS', None, [{'LOWER': 'retrasos'}], [{'LOWER': 'retraso'}],
                         [{'LOWER': 'frequencia'}],
                         [{'LOWER': 'minutos'}, {'LOWER': 'de'}, {'LOWER': 'espera'}],
                         [{'LOWER': 'min'}, {'LOWER': 'de'}, {'LOWER': 'espera'}],
                         [{'LOWER': 'minutos'}, {'LOWER': 'de'}, {'LOWER': 'retraso'}],
                         [{'LOWER': 'min'}, {'LOWER': 'de'}, {'LOWER': 'retraso'}],
                         [{'LOWER': 'minutos'}, {'LOWER': 'esperando'}],
                         [{'LOWER': 'tiempo'}, {'LOWER': 'de'}, {'LOWER': 'espera'}],
                         [{'LOWER': 'tiempos'}, {'LOWER': 'de'}, {'LOWER': 'espera'}],
                         [{'LOWER': 'frecuencia'}, {'LOWER': 'de'}, {'LOWER': 'paso'}],
                         [{'LOWER': 'frecuencias'}, {'LOWER': 'de'}, {'LOWER': 'paso'}],
                         [{'LOWER': 'frecuencias'}, {'LOWER': 'de'}, {'LOWER': 'trenes'}])

        # Register attribute on the Token. We'll be overwriting this based on
        # the matches, so we're only setting a default value, not a getter.
        Token.set_extension('is_metro_delay', default=False)

        # Register attributes on Doc and Span via a getter that checks if one of
        # the contained tokens is set to is_element_matched == True.
        Doc.set_extension('has_metro_delay', getter=self.has_metro_delay)
        Span.set_extension('has_metro_delay', getter=self.has_metro_delay)
示例#15
0
    def __init__(self, nlp):
        """
        Initialise the pipeline component. The shared nlp instance is used
        to initialise the matcher with the configured lines (config.ini) and
        generate Doc objects as phrase match patterns.

        :param nlp: spaCy nlp instance
        """
        self.label = nlp.vocab.strings['EVENT']  # get entity label ID
        self.matcher = Matcher(nlp.vocab)
        self.matcher.add('METRO_SOLUTIONS', None, [{'LOWER': 'circulación'}, {'LOWER': 'normalizada'}],
                         [{'LOWER': 'circulacion'}, {'LOWER': 'normalizada'}],
                         [{'LOWER': 'servicio'}, {'LOWER': 'normalizado'}],
                         [{'LOWER': 'normalizado'}, {'LOWER': 'el'}, {'LOWER': 'servicio'}],
                         [{'LOWER': 'restablecido'}, {'LOWER': 'el'}, {'LOWER': 'servicio'}],
                         [{'LOWER': 'ya'}, {'LOWER': 'efectúan'}, {'LOWER': 'parada'}],
                         [{'LOWER': 'ya'}, {'LOWER': 'efectuan'}, {'LOWER': 'parada'}])

        # Register attribute on the Token. We'll be overwriting this based on
        # the matches, so we're only setting a default value, not a getter.
        Token.set_extension('is_metro_solution', default=False)

        # Register attributes on Doc and Span via a getter that checks if one of
        # the contained tokens is set to is_element_matched == True.
        Doc.set_extension('has_metro_solution', getter=self.has_metro_solution)
        Span.set_extension('has_metro_solution', getter=self.has_metro_solution)
示例#16
0
 def __init__(self, attrs=None):
     if attrs is None:
         self.attrs = (
             'n_sents',
             'n_words',
             'n_chars',
             'n_syllables',
             'n_unique_words',
             'n_long_words',
             'n_monosyllable_words',
             'n_polysyllable_words',
             'flesch_kincaid_grade_level',
             'flesch_reading_ease',
             'smog_index',
             'gunning_fog_index',
             'coleman_liau_index',
             'automated_readability_index',
             'lix',
             'gulpease_index',
             'wiener_sachtextformel',
         )
     elif isinstance(attrs, compat.string_types):
         self.attrs = (attrs, )
     else:
         self.attrs = tuple(attrs)
     for attr in self.attrs:
         SpacyDoc.set_extension(attr, default=None, force=True)
         LOGGER.debug('"%s" custom attribute added to `spacy.tokens.Doc`')
示例#17
0
    def __init__(self, nlp, terminology, label='Match', function=None):  # el constructor de nuestro Matcher

        self.matcher = Matcher(nlp.vocab)  # creamos un objeto Matcher
        for topic, patterns in terminology.items():  # cargamos los términos de la mini ontología
            for term in patterns:
                self.matcher.add(topic, function, term)
        Doc.set_extension('rule_match', default=False, force=True)  # creamos una extensión al objeto Doc para poder
 def __init__(self, attrs=None):
     if attrs is None:
         self.attrs = (
             "n_sents",
             "n_words",
             "n_chars",
             "n_syllables",
             "n_unique_words",
             "n_long_words",
             "n_monosyllable_words",
             "n_polysyllable_words",
             "flesch_kincaid_grade_level",
             "flesch_reading_ease",
             "smog_index",
             "gunning_fog_index",
             "coleman_liau_index",
             "automated_readability_index",
             "lix",
             "gulpease_index",
             "wiener_sachtextformel",
         )
     elif isinstance(attrs, (str, bytes)):
         self.attrs = (attrs,)
     else:
         self.attrs = tuple(attrs)
     for attr in self.attrs:
         # TODO: see if there's a better way to handle this
         # that doesn't involve clobbering existing property extensions
         Doc.set_extension(attr, default=None, force=True)
         LOGGER.debug('"%s" custom attribute added to `spacy.tokens.Doc`')
示例#19
0
 def __init__(self, nlp) -> None:
     Doc.set_extension("concepts", default={}, force=True)
     self.ruler = EntityRuler(nlp)
     self.ruler.add_patterns(CONCEPT_PATTERNS)
     pipes = nlp.pipe_names
     if "EntityRuler" not in pipes:
         nlp.add_pipe(self.ruler, last=True)
示例#20
0
 def __init__(self, attrs: Optional[Union[str, Collection[str]]] = None):
     self._set_attrs(attrs)
     for attr in self.attrs:
         # TODO: see if there's a better way to handle this
         # that doesn't involve clobbering existing property extensions
         Doc.set_extension(attr, default=None, force=True)
         LOGGER.debug('"%s" custom attribute added to `spacy.tokens.Doc`')
示例#21
0
    def __init__(self, nlp, label="PRODUCT"):
       
        labels = []
        APP_ROOT = os.path.dirname(os.path.abspath(__file__))
        
        with open(os.path.join(APP_ROOT, 'labels.json')) as f:
        
            labels = json.loads(f.read())
        
        self.labels = { c["name"].lower(): c for c in labels}
        self.label = nlp.vocab.strings[label]  # get entity label ID
        
        
        patterns = [nlp(c) for c in self.labels.keys()]
        self.matcher = PhraseMatcher(nlp.vocab,attr='LOWER')
        self.matcher.add("PRODUCTS", None, *patterns)

        # Register attribute on the Token. We'll be overwriting this based on the matches
        
        Token.set_extension("is_product", default=False, force=True)
        Token.set_extension("type", default=False, force=True)


        # Register attributes on Doc and Span via a getter that checks if one of
        # the contained tokens is set to is_product== True.
        Doc.set_extension("has_product", getter=self.has_product, force=True)
        Span.set_extension("has_product", getter=self.has_product, force=True)
示例#22
0
    def __init__(self, nlp, companies=tuple(), label='PERSON'):
        """Initialise the pipeline component. The shared nlp instance is used

        to initialise the matcher with the shared vocab, get the label ID and

        generate Doc objects as phrase match patterns.

        """

        self.label = nlp.vocab.strings[label]  # get entity label ID

        # Set up the PhraseMatcher – it can now take Doc objects as patterns,

        # so even if the list of companies is long, it's very efficient

        patterns = [nlp(org) for org in companies]

        self.matcher = PhraseMatcher(nlp.vocab)

        self.matcher.add('DANISH_NAMES', None, *patterns)

        # Register attribute on the Token. We'll be overwriting this based on

        # the matches, so we're only setting a default value, not a getter.

        Token.set_extension('is_danish_name', default=False)

        # Register attributes on Doc and Span via a getter that checks if one of

        # the contained tokens is set to is_tech_org == True.

        Doc.set_extension('has_danish_name', getter=self.has_tech_org)

        Span.set_extension('has_danish_name', getter=self.has_tech_org)
示例#23
0
    def __init__(self, nlp, lei, label="LEI"):
        """Initialise the pipeline component. The shared nlp instance is used
        to initialise the ruler with the shared vocab, generate Doc objects as
        phrase match patterns.
        """
        # Make request once on initialisation and store the data
        self.label = label

        # Set up the EntityRuler
        patterns = [{
            "label":
            self.label,
            "pattern": [{
                "LOWER": r.lower()
            } for r in result["LegalName"].split()]
        } for result in lei["records"]]
        self.ruler = EntityRuler(nlp, overwrite_ents=True)
        self.ruler.add_patterns(patterns=patterns)

        # Register attribute on the Token with default value.
        # I will overwrite this based on the matches.
        Token.set_extension("is_lei", default=False)

        # Register attributes on Doc and Span via a getter that checks if one of
        # the contained tokens is set to is_lei == True.
        Doc.set_extension("has_lei", getter=self.has_lei)
        Span.set_extension("has_lei", getter=self.has_lei)
示例#24
0
    def _set_spacy_extensions(self):
        def synset_getter(token):

            if not Disambiguator._wn:
                from nltk.corpus import wordnet as wn
                Disambiguator._wn = wn

            else:
                wn = Disambiguator._wn

            offset = token._.offset
            if offset:
                return wn.synset_from_pos_and_offset(offset[-1],
                                                     int(offset[3:-1]))
            else:
                return None

        def bnid_getter(token):
            return babelnet_map.get(token._.offset)

        from spacy.tokens import Doc, Token
        Doc.set_extension('lang', default='en')

        Token.set_extension('lemma_preset_', default=None)
        Token.set_extension('pos_preset_', default=None)

        Token.set_extension('lemma_preset_else_spacy',
                            getter=lambda t: t._.lemma_preset_ or t.lemma_)
        Token.set_extension('pos_preset_else_spacy',
                            getter=lambda t: t._.pos_preset_ or t.pos_)

        Token.set_extension('offset', default=None)
        Token.set_extension('synset', getter=synset_getter)
        Token.set_extension('bnid', getter=bnid_getter)
        Token.set_extension('disambiguator_internals', default=None)
示例#25
0
    def __call__(
        self,
        doc: Doc,
    ) -> Doc:
        """
Set the extension attributes on a `spaCy` [`Doc`](https://spacy.io/api/doc)
document to create a *pipeline component* for `TextRank` as
a stateful component, invoked when the document gets processed.

See: <https://spacy.io/usage/processing-pipelines#pipelines>

    doc:
a document container, providing the annotations produced by earlier stages of the `spaCy` pipeline
        """
        Doc.set_extension("textrank", force=True, default=None)
        Doc.set_extension("phrases", force=True, default=[])

        doc._.textrank = BaseTextRank(
            doc,
            edge_weight=self.edge_weight,
            pos_kept=self.pos_kept,
            token_lookback=self.token_lookback,
            scrubber=self.scrubber,
            stopwords=self.stopwords,
        )

        doc._.phrases = doc._.textrank.calc_textrank()
        return doc
示例#26
0
    def __init__(self,
                 *,
                 from_pretrained: Dict[LANG_ISO_639_1, str],
                 attr_name: str = 'bert_repr',
                 max_seq_len: int = 512,
                 pooling_strategy: str = 'REDUCE_MEAN',
                 set_extension: bool = True,
                 force_extension: bool = True):
        """
        Use with spacy pipeline for getting BERT (PyTorch) language-specific
        tensor representations when multiple languages are present in the
        dataset and the languages are pre-known.

        Use after applying spacy_langdetect.LanguageDetector() (in pipeline)
        # https://spacy.io/universe/project/spacy-langdetect

        Keyword arguments only!

        Params
        ------
        from_pretrained: Dict[LANG_ISO_639_1, str]
            Mapping between two-letter language codes to path to model
            directory or HuggingFace transformers pre-trained Bert weights.

        attr_name: str (default='bert_repr')
            Same as in BertInference.

        max_seq_len: int (default=512)
            Same as in BertInference.

        pooling_strategy: str (default='REDUCE_MEAN')
            Same as in BertInference.

        set_extension: bool (default=True)
            Same as in BertInference.

        force_extension: bool (default=True)
            Same as in BertInference.
        """
        self.from_pretrained = from_pretrained
        self.attr_name = attr_name
        self.max_seq_len = max_seq_len
        self.pooling_strategy = pooling_strategy
        self.set_extension = set_extension
        self.force_extension = force_extension

        if set_extension:
            Doc.set_extension(attr_name,
                              getter=self.__call__,
                              force=force_extension)
            Span.set_extension(attr_name,
                               getter=self.__call__,
                               force=force_extension)
            Token.set_extension(attr_name,
                                getter=self.__call__,
                                force=force_extension)
        else:
            Doc.set_extension(attr_name, default=None, force=force_extension)
            Span.set_extension(attr_name, default=None, force=force_extension)
            Token.set_extension(attr_name, default=None, force=force_extension)
示例#27
0
    def __init__(self, commoditiesIdNm={}, commoditiesIdCode={}):
        
#        self.countriesIdNm = countriesIdNm if len(countriesIdNm) else None
        self.commoditiesIdNm = commoditiesIdNm if len(commoditiesIdNm) else None
        self.commoditiesIdCode = commoditiesIdCode if len(commoditiesIdCode) else None

        ########## stop words
        self.commStopWords = {    
            0: reCompile("partner|independ|executive|director|trade|negotiator"\
                +"|role|auditor|declar|represent|manager|offer|engage|long|time"\
                +"|underwriter|party|integrated|business|sponsor|joint|role"\
                +"|co-lead|agents|assignment|arranger|plaintiff|lender|engineer"\
                +"|player|dealer|private|rich|corporate|communications|email"\
                +"|advisor|arranger|regulation|estimator|consultant|up", reIGNORECASE )
                }

        ########## Commodities
        if self.commoditiesIdNm is not None:
            comPat = ''    
            for commid, names in self.commoditiesIdNm.items():
                namespat = '\W|\W'.join( names )
                comPat = comPat + f"|(?P<com{commid}>\\W{namespat}\\W)" 
            comPat = comPat.strip('|')
            
            comCodePat = ''    
            for commid, commcode in self.commoditiesIdCode.items():
                comCodePat = comCodePat + f"|(?P<com{commid}>\\W{commcode}\\W)" 
            comCodePat = comCodePat.strip('|')   
    
            self.comCodePatCompiled = reCompile(comCodePat)
            self.comPatCompiled = reCompile(comPat, reIGNORECASE)
            Span.set_extension('commodities', getter=self.Commodities, force=True)
        Doc.set_extension('commodities', default=[], force=True)
示例#28
0
    def __init__(self,
                 nlp: Language,
                 name: str = "hyponym_detector",
                 extended: bool = False):

        self.nlp = nlp

        self.patterns = BASE_PATTERNS
        if extended:
            self.patterns.extend(EXTENDED_PATTERNS)

        self.matcher = Matcher(self.nlp.vocab)

        Doc.set_extension("hearst_patterns", default=[], force=True)

        self.first = set()
        self.last = set()

        # add patterns to matcher
        for pattern in self.patterns:
            self.matcher.add(pattern["label"], [pattern["pattern"]])

            # gather list of predicates where the hypernym appears first
            if pattern["position"] == "first":
                self.first.add(pattern["label"])

            # gather list of predicates where the hypernym appears last
            if pattern["position"] == "last":
                self.last.add(pattern["label"])
示例#29
0
    def __init__(self, nlp):
        """
        Initialise the pipeline component. The shared nlp instance is used
        to initialise the matcher with the configured lines (config.ini) and
        generate Doc objects as phrase match patterns.

        :param nlp: spaCy nlp instance
        """
        self.label = nlp.vocab.strings['EVENT']  # get entity label ID
        self.matcher = Matcher(nlp.vocab)
        self.matcher.add('METRO_STRIKES', None, [{'LOWER': 'huelga'}], [{'LOWER': 'paros'}],
                         [{'LOWER': 'servicios'}, {'LOWER': 'minimos'}],
                         [{'LOWER': 'servicios'}, {'LOWER': 'mínimos'}],
                         [{'LOWER': 'paros'}, {'LOWER': 'convocados'}, {'LOWER': 'para'}, {'LOWER': 'hoy'}],
                         [{'LOWER': 'paros'}, {'LOWER': 'convocados'}, {'LOWER': 'para'}, {'LOWER': 'mañana'}],
                         [{'LOWER': 'paros'}, {'LOWER': 'convocados'}, {'LOWER': 'para'}, {'LOWER': 'el'},
                          {'LOWER': 'dia'}, {'IS_DIGIT': True}],
                         [{'LOWER': 'paros'}, {'LOWER': 'convocados'}, {'LOWER': 'para'}, {'LOWER': 'el'},
                          {'LOWER': 'día'}, {'IS_DIGIT': True}],
                         [{'LOWER': 'paros'}, {'LOWER': 'convocados'}])

        # Register attribute on the Token. We'll be overwriting this based on
        # the matches, so we're only setting a default value, not a getter.
        Token.set_extension('is_metro_strike', default=False)

        # Register attributes on Doc and Span via a getter that checks if one of
        # the contained tokens is set to is_element_matched == True.
        Doc.set_extension('has_metro_strike', getter=self.has_metro_strike)
        Span.set_extension('has_metro_strike', getter=self.has_metro_strike)
示例#30
0
    def __init__(self, nlp):
        """
        Initialise the pipeline component. The shared nlp instance is used
        to initialise the matcher with the configured stations (config.ini) and
        generate Doc objects as phrase match patterns.

        :param nlp: spaCy nlp instance
        """
        self.label = nlp.vocab.strings['FACILITY']  # get entity label ID

        # Set up the PhraseMatcher – it can now take Doc objects as patterns,
        # so even if the list of companies is long, it's very efficient
        metro_stations = config['keywords']['stations'].split(',')
        metro_stations += config['keywords']['stations_lw'].split(',')
        patterns = [nlp(org) for org in metro_stations]
        self.matcher = PhraseMatcher(nlp.vocab)
        self.matcher.add('METRO_STATIONS', None, *patterns)

        # Register attribute on the Token. We'll be overwriting this based on
        # the matches, so we're only setting a default value, not a getter.
        Token.set_extension('is_metro_station', default=False)

        # Register attributes on Doc and Span via a getter that checks if one of
        # the contained tokens is set to is_element_matched == True.
        Doc.set_extension('has_metro_station', getter=self.has_metro_station)
        Span.set_extension('has_metro_station', getter=self.has_metro_station)
示例#31
0
    def __init__(self,
                 nlp,
                 func: Callable[..., pd.Series] = combo_basic,
                 force: bool = True,
                 *args,
                 **kwargs) -> None:
        """
        This is for initializing the TermExtractionPipeline.
        """
        self.func = func
        self.args = args
        self.kwargs = kwargs
        self.__name__ = self.func.__name__
        self.matcher = Matcher(nlp.vocab)
        Doc.set_extension(self.__name__, default=None, force=force)
        self.term_counter = None

        def add_to_counter(matcher, doc, i, matches) -> Doc:
            match_id, start, end = matches[i]
            candidate = str(doc[start:end])
            if (TermExtraction.word_length(candidate) <=
                    TermExtraction.config["MAX_WORD_LENGTH"]):
                self.term_counter[candidate] += 1

        for i, pattern in enumerate(TermExtraction.patterns):
            self.matcher.add("term{}".format(i), add_to_counter, pattern)
示例#32
0
def test_doc_to_json_underscore(doc):
    Doc.set_extension("json_test1", default=False)
    Doc.set_extension("json_test2", default=False)
    doc._.json_test1 = "hello world"
    doc._.json_test2 = [1, 2, 3]
    json_doc = doc.to_json(underscore=["json_test1", "json_test2"])
    assert "_" in json_doc
    assert json_doc["_"]["json_test1"] == "hello world"
    assert json_doc["_"]["json_test2"] == [1, 2, 3]
示例#33
0
def test_underscore_dir(en_vocab):
    """Test that dir() correctly returns extension attributes. This enables
    things like tab-completion for the attributes in doc._."""
    Doc.set_extension("test_dir", default=None)
    doc = Doc(en_vocab, words=["hello", "world"])
    assert "_" in dir(doc)
    assert "test_dir" in dir(doc._)
    assert "test_dir" not in dir(doc[0]._)
    assert "test_dir" not in dir(doc[0:2]._)
示例#34
0
def test_underscore_docstring(en_vocab):
    """Test that docstrings are available for extension methods, even though
    they're partials."""

    def test_method(doc, arg1=1, arg2=2):
        """I am a docstring"""
        return (arg1, arg2)

    Doc.set_extension("test_docstrings", method=test_method)
    doc = Doc(en_vocab, words=["hello", "world"])
    assert test_method.__doc__ == "I am a docstring"
    assert doc._.test_docstrings.__doc__.rsplit(". ")[-1] == "I am a docstring"
示例#35
0
def test_underscore_mutable_defaults_list(en_vocab):
    """Test that mutable default arguments are handled correctly (see #2581)."""
    Doc.set_extension("mutable", default=[])
    doc1 = Doc(en_vocab, words=["one"])
    doc2 = Doc(en_vocab, words=["two"])
    doc1._.mutable.append("foo")
    assert len(doc1._.mutable) == 1
    assert doc1._.mutable[0] == "foo"
    assert len(doc2._.mutable) == 0
    doc1._.mutable = ["bar", "baz"]
    doc1._.mutable.append("foo")
    assert len(doc1._.mutable) == 3
    assert len(doc2._.mutable) == 0
示例#36
0
def main(output_dir=None):
    nlp = English()  # start off with blank English class

    Doc.set_extension('overlap', method=overlap_tokens)
    doc1 = nlp(u"Peach emoji is where it has always been.")
    doc2 = nlp(u"Peach is the superior emoji.")
    print("Text 1:", doc1.text)
    print("Text 2:", doc2.text)
    print("Overlapping tokens:", doc1._.overlap(doc2))

    Doc.set_extension('to_html', method=to_html)
    doc = nlp(u"This is a sentence about Apple.")
    # add entity manually for demo purposes, to make it work without a model
    doc.ents = [Span(doc, 5, 6, label=nlp.vocab.strings['ORG'])]
    print("Text:", doc.text)
    doc._.to_html(output=output_dir, style='ent')
    def __init__(self, nlp, companies=tuple(), label='ORG'):
        """Initialise the pipeline component. The shared nlp instance is used
        to initialise the matcher with the shared vocab, get the label ID and
        generate Doc objects as phrase match patterns.
        """
        self.label = nlp.vocab.strings[label]  # get entity label ID

        # Set up the PhraseMatcher – it can now take Doc objects as patterns,
        # so even if the list of companies is long, it's very efficient
        patterns = [nlp(org) for org in companies]
        self.matcher = PhraseMatcher(nlp.vocab)
        self.matcher.add('TECH_ORGS', None, *patterns)

        # Register attribute on the Token. We'll be overwriting this based on
        # the matches, so we're only setting a default value, not a getter.
        Token.set_extension('is_tech_org', default=False)

        # Register attributes on Doc and Span via a getter that checks if one of
        # the contained tokens is set to is_tech_org == True.
        Doc.set_extension('has_tech_org', getter=self.has_tech_org)
        Span.set_extension('has_tech_org', getter=self.has_tech_org)
def doc_w_attrs(en_tokenizer):
    Doc.set_extension("_test_attr", default=False)
    Doc.set_extension("_test_prop", getter=lambda doc: len(doc.text))
    Doc.set_extension(
        "_test_method", method=lambda doc, arg: "{}{}".format(len(doc.text), arg)
    )
    doc = en_tokenizer("This is a test.")
    doc._._test_attr = "test"
    return doc
示例#39
0
def test_underscore_accepts_valid(valid_kwargs):
    valid_kwargs["force"] = True
    Doc.set_extension("test", **valid_kwargs)
示例#40
0
def test_underscore_raises_for_invalid(invalid_kwargs):
    invalid_kwargs["force"] = True
    with pytest.raises(ValueError):
        Doc.set_extension("test", **invalid_kwargs)
示例#41
0
def test_doc_to_json_underscore_error_serialize(doc):
    """Test that Doc.to_json() raises an error if a custom attribute value
    isn't JSON-serializable."""
    Doc.set_extension("json_test4", method=lambda doc: doc.text)
    with pytest.raises(ValueError):
        doc.to_json(underscore=["json_test4"])