def annotate( lang: Language = Language(), model: Model = Model("[treetagger.model]"), tt_binary: Binary = Binary("[treetagger.binary]"), out_upos: Output = Output("<token>:treetagger.upos", cls="token:upos", description="Part-of-speeches in UD"), out_pos: Output = Output( "<token>:treetagger.pos", cls="token:pos", description="Part-of-speeches from TreeTagger"), out_baseform: Output = Output("<token>:treetagger.baseform", description="Baseforms from TreeTagger"), word: Annotation = Annotation("<token:word>"), sentence: Annotation = Annotation("<sentence>"), encoding: str = util.UTF8): """POS/MSD tag and lemmatize using TreeTagger.""" sentences, _orphans = sentence.get_children(word) word_annotation = list(word.read()) stdin = SENT_SEP.join( TOK_SEP.join(word_annotation[token_index] for token_index in sent) for sent in sentences) args = ["-token", "-lemma", "-no-unknown", "-eos-tag", "<eos>", model.path] stdout, stderr = util.system.call_binary(tt_binary, args, stdin, encoding=encoding) log.debug("Message from TreeTagger:\n%s", stderr) # Write pos and upos annotations. out_upos_annotation = word.create_empty_attribute() out_pos_annotation = word.create_empty_attribute() for sent, tagged_sent in zip(sentences, stdout.strip().split(SENT_SEP)): for token_id, tagged_token in zip(sent, tagged_sent.strip().split(TOK_SEP)): tag = tagged_token.strip().split(TAG_SEP)[TAG_COLUMN] out_pos_annotation[token_id] = tag out_upos_annotation[token_id] = util.tagsets.pos_to_upos( tag, lang, TAG_SETS.get(lang)) out_pos.write(out_pos_annotation) out_upos.write(out_upos_annotation) # Write lemma annotations. out_lemma_annotation = word.create_empty_attribute() for sent, tagged_sent in zip(sentences, stdout.strip().split(SENT_SEP)): for token_id, tagged_token in zip(sent, tagged_sent.strip().split(TOK_SEP)): lem = tagged_token.strip().split(TAG_SEP)[LEM_COLUMN] out_lemma_annotation[token_id] = lem out_baseform.write(out_lemma_annotation)
def msdtag(out: Output = Output( "<token>:hunpos.msd", cls="token:msd", description="Part-of-speeches with morphological descriptions"), word: Annotation = Annotation("<token:word>"), sentence: Annotation = Annotation("<sentence>"), binary: Binary = Binary("[hunpos.binary]"), model: Model = Model("[hunpos.model]"), morphtable: Optional[Model] = Model("[hunpos.morphtable]"), patterns: Optional[Model] = Model("[hunpos.patterns]"), tag_mapping=None, encoding: str = util.UTF8): """POS/MSD tag using the Hunpos tagger.""" if isinstance(tag_mapping, str) and tag_mapping: tag_mapping = util.tagsets.mappings[tag_mapping] elif tag_mapping is None or tag_mapping == "": tag_mapping = {} pattern_list = [] if patterns: with open(patterns.path, encoding="utf-8") as pat: for line in pat: if line.strip() and not line.startswith("#"): name, pattern, tags = line.strip().split("\t", 2) pattern_list.append( (name, re.compile("^%s$" % pattern), tags)) def replace_word(w): """Replace word with alias if word matches a regex pattern.""" for p in pattern_list: if re.match(p[1], w): return "[[%s]]" % p[0] return w sentences, _orphans = sentence.get_children(word) token_word = list(word.read()) stdin = SENT_SEP.join( TOK_SEP.join( replace_word(token_word[token_index]) for token_index in sent) for sent in sentences) args = [model.path] if morphtable: args.extend(["-m", morphtable.path]) stdout, _ = util.system.call_binary(binary, args, stdin, encoding=encoding) out_annotation = word.create_empty_attribute() for sent, tagged_sent in zip(sentences, stdout.strip().split(SENT_SEP)): for token_index, tagged_token in zip( sent, tagged_sent.strip().split(TOK_SEP)): tag = tagged_token.strip().split(TAG_SEP)[TAG_COLUMN] tag = tag_mapping.get(tag, tag) out_annotation[token_index] = tag out.write(out_annotation)
def contextual(out: Output = Output("{chunk}:geo.geo_context", description="Geographical places with coordinates"), chunk: Annotation = Annotation("{chunk}"), context: Annotation = Annotation("[geo.context_chunk]"), ne_type: Annotation = Annotation("swener.ne:swener.type"), ne_subtype: Annotation = Annotation("swener.ne:swener.subtype"), ne_name: Annotation = Annotation("swener.ne:swener.name"), model: Model = Model("[geo.model]"), method: str = "populous", language: list = []): """Annotate chunks with location data, based on locations contained within the text. context = text chunk to use for disambiguating places (when applicable). chunk = text chunk to which the annotation will be added. """ model = load_model(model, language=language) ne_type_annotation = list(ne_type.read()) ne_subtype_annotation = list(ne_subtype.read()) ne_name_annotation = list(ne_name.read()) children_context_chunk, _orphans = context.get_children(chunk) children_chunk_ne, _orphans = chunk.get_children(ne_type) out_annotation = chunk.create_empty_attribute() for chunks in children_context_chunk: all_locations = [] # TODO: Maybe not needed for anything? context_locations = [] chunk_locations = defaultdict(list) for ch in chunks: for n in children_chunk_ne[ch]: if ne_type_annotation[n] == "LOC" and "PPL" in ne_subtype_annotation[n]: location_text = ne_name_annotation[n].replace("\n", " ").replace(" ", " ") location_data = model.get(location_text.lower()) if location_data: all_locations.append((location_text, list(location_data))) context_locations.append((location_text, list(location_data))) chunk_locations[ch].append((location_text, list(location_data))) else: pass # log.info("No location found for %s" % ne_name_annotation[n].replace("%", "%%")) chunk_locations = most_populous(chunk_locations) for c in chunks: out_annotation[c] = _format_location(chunk_locations.get(c, ())) out.write(out_annotation)
def process_output(word: Annotation, out: Output, stdout, in_sentences, saldo_annotation, prob_format, default_prob): """Parse WSD output and write annotation.""" out_annotation = word.create_empty_attribute() # Split output into sentences out_sentences = stdout.strip() out_sentences = out_sentences.split("\t".join( ["_", "_", "_", "_", SENT_SEP, "_", "_"])) out_sentences = [i for i in out_sentences if i] # Split output into tokens for out_sent, in_sent in zip(out_sentences, in_sentences): out_tokens = [t for t in out_sent.split("\n") if t] for (out_tok, in_tok) in zip(out_tokens, in_sent): out_prob = out_tok.split("\t")[6] out_prob = [i for i in out_prob.split("|") if i != "_"] out_meanings = [ i for i in out_tok.split("\t")[5].split("|") if i != "_" ] saldo = [ i for i in saldo_annotation[in_tok].strip(util.AFFIX).split( util.DELIM) if i ] new_saldo = [] if out_prob: for meaning in saldo: if meaning in out_meanings: i = out_meanings.index(meaning) new_saldo.append((meaning, float(out_prob[i]))) else: new_saldo.append((meaning, default_prob)) else: new_saldo = [(meaning, default_prob) for meaning in saldo] # Sort by probability new_saldo.sort(key=lambda x: (-x[1], x[0])) # Format probability according to prob_format new_saldo = [ saldo + prob_format % prob if prob_format else saldo for saldo, prob in new_saldo ] out_annotation[in_tok] = util.cwbset(new_saldo) out.write(out_annotation)
def metadata(out: Output = Output("{chunk}:geo.geo_metadata", description="Geographical places with coordinates"), chunk: Annotation = Annotation("{chunk}"), source: Annotation = Annotation("[geo.metadata_source]"), model: Model = Model("[geo.model]"), method: str = "populous", language: list = []): """Get location data based on metadata containing location names.""" geomodel = load_model(model, language=language) same_target_source = chunk.split()[0] == source.split()[0] chunk_annotation = list(chunk.read()) source_annotation = list(source.read()) # If location source and target chunk are not the same, we need # to find the parent/child relations between them. if not same_target_source: target_source_parents = list(source.get_parents(chunk)) chunk_locations = {} for i, _ in enumerate(chunk_annotation): if same_target_source: location_source = source_annotation[i] else: location_source = source_annotation[target_source_parents[i]] if target_source_parents[ i] is not None else None if location_source: location_data = geomodel.get(location_source.strip().lower()) if location_data: chunk_locations[i] = [(location_source, list(location_data))] else: chunk_locations[i] = [] chunk_locations = most_populous(chunk_locations) out_annotation = chunk.create_empty_attribute() for c in chunk_locations: out_annotation[c] = _format_location(chunk_locations.get(c, ())) out.write(out_annotation)
def _read_chunks_and_write_new_ordering(out: Output, chunk: Annotation, order, prefix="", zfill=False, start=START_DEFAULT): """Common function called by other numbering functions.""" new_order = defaultdict(list) in_annotation = list(chunk.read()) for i, val in enumerate(in_annotation): val = order(i, val) new_order[val].append(i) out_annotation = chunk.create_empty_attribute() nr_digits = len(str(len(new_order) - 1 + start)) for nr, key in enumerate(sorted(new_order), start): for index in new_order[key]: out_annotation[index] = "{prefix}{nr:0{length}d}".format(prefix=prefix, length=nr_digits if zfill else 0, nr=nr) out.write(out_annotation)
def text_headtail(text: Text = Text(), chunk: Annotation = Annotation("<token>"), out_head: Output = Output("<token>:misc.head"), out_tail: Output = Output("<token>:misc.tail")): """Extract "head" and "tail" whitespace characters for tokens.""" def escape(t): """Escape whitespace characters.""" return t.replace(" ", "\\s").replace("\n", "\\n").replace("\t", "\\t") out_head_annotation = chunk.create_empty_attribute() out_tail_annotation = chunk.create_empty_attribute() head_text = None corpus_text = text.read() chunk = list(chunk.read()) for i, span in enumerate(chunk): if head_text: out_head_annotation[i] = escape(head_text) head_text = None if i < len(chunk) - 1: tail_start = span[1][0] tail_end = chunk[i + 1][0][0] tail_text = corpus_text[tail_start:tail_end] try: n_pos = tail_text.rindex("\n") except ValueError: n_pos = None if n_pos is not None and n_pos + 1 < len(tail_text): head_text = tail_text[n_pos + 1:] tail_text = tail_text[:n_pos + 1] if tail_text: out_tail_annotation[i] = escape(tail_text) out_head.write(out_head_annotation) out_tail.write(out_tail_annotation)
def annotate( maltjar: Binary = Binary("[malt.jar]"), model: Model = Model("[malt.model]"), out_dephead: Output = Output( "<token>:malt.dephead", cls="token:dephead", description="Positions of the dependency heads"), out_dephead_ref: Output = Output( "<token>:malt.dephead_ref", cls="token:dephead_ref", description="Sentence-relative positions of the dependency heads"), out_deprel: Output = Output( "<token>:malt.deprel", cls="token:deprel", description="Dependency relations to the head"), word: Annotation = Annotation("<token:word>"), pos: Annotation = Annotation("<token:pos>"), msd: Annotation = Annotation("<token:msd>"), ref: Annotation = Annotation("<token>:misc.number_rel_<sentence>"), sentence: Annotation = Annotation("<sentence>"), token: Annotation = Annotation("<token>"), encoding: str = util.UTF8, process_dict=None): """ Run the malt parser, in an already started process defined in process_dict, or start a new process (default). The process_dict argument should never be set from the command line. """ if process_dict is None: process = maltstart(maltjar, model, encoding) else: process = process_dict["process"] # If process seems dead, spawn a new if process.stdin.closed or process.stdout.closed or process.poll(): util.system.kill_process(process) process = maltstart(maltjar, model, encoding, send_empty_sentence=True) process_dict["process"] = process sentences, orphans = sentence.get_children(token) sentences.append(orphans) word_annotation = list(word.read()) pos_annotation = list(pos.read()) msd_annotation = list(msd.read()) ref_annotation = list(ref.read()) def conll_token(nr, token_index): form = word_annotation[token_index] lemma = UNDEF pos = cpos = pos_annotation[token_index] feats = re.sub(r"[ ,.]", "|", msd_annotation[token_index]).replace("+", "/") return TAG_SEP.join((str(nr), form, lemma, cpos, pos, feats)) stdin = SENT_SEP.join( TOK_SEP.join( conll_token(n + 1, token_index) for n, token_index in enumerate(sent)) for sent in sentences) if encoding: stdin = stdin.encode(encoding) keep_process = len( stdin) < RESTART_THRESHOLD_LENGTH and process_dict is not None log.info("Stdin length: %s, keep process: %s", len(stdin), keep_process) if process_dict is not None: process_dict["restart"] = not keep_process if keep_process: # Chatting with malt: send a SENT_SEP and read correct number of lines stdin_fd, stdout_fd = process.stdin, process.stdout stdin_fd.write(stdin + SENT_SEP.encode(util.UTF8)) stdin_fd.flush() malt_sentences = [] for sent in sentences: malt_sent = [] for _ in sent: line = stdout_fd.readline() if encoding: line = line.decode(encoding) malt_sent.append(line) line = stdout_fd.readline() assert line == b"\n" malt_sentences.append(malt_sent) else: # Otherwise use communicate which buffers properly stdout, _ = process.communicate(stdin) if encoding: stdout = stdout.decode(encoding) malt_sentences = (malt_sent.split(TOK_SEP) for malt_sent in stdout.split(SENT_SEP)) out_dephead_annotation = word.create_empty_attribute() out_dephead_ref_annotation = out_dephead_annotation.copy() out_deprel_annotation = out_dephead_annotation.copy() for (sent, malt_sent) in zip(sentences, malt_sentences): for (token_index, malt_tok) in zip(sent, malt_sent): cols = [(None if col == UNDEF else col) for col in malt_tok.split(TAG_SEP)] out_deprel_annotation[token_index] = cols[DEPREL_COLUMN] head = int(cols[HEAD_COLUMN]) out_dephead_annotation[token_index] = str(sent[head - 1]) if head else "-" out_dephead_ref_annotation[token_index] = str( ref_annotation[sent[head - 1]]) if head else "" out_dephead.write(out_dephead_annotation) out_dephead_ref.write(out_dephead_ref_annotation) out_deprel.write(out_deprel_annotation)
def annotate_words(out: Output, model: Model, saldoids: Annotation, pos: Annotation, annotate, pos_limit: List[str], class_set=None, disambiguate=True, connect_ids=False, delimiter=util.DELIM, affix=util.AFFIX, scoresep=util.SCORESEP, lexicon=None): """ Annotate words with blingbring classes (rogetID). - out_sent: resulting annotation file. - model: pickled lexicon with saldoIDs as keys. - saldoids, pos: existing annotation with saldoIDs/parts of speech. - annotate: annotation function, returns an iterable containing annotations for one token ID. (annotate_bring() or annotate_swefn()) - pos_limit: parts of speech that will be annotated. Set to None to annotate all pos. - class_set: output Bring classes or Roget IDs ("bring", "roget_head", "roget_subsection", "roget_section" or "roget_class"). Set to None when not annotating blingbring. - disambiguate: use WSD and use only the most likely saldo ID. - connect_IDs: for sweFN: paste saldo ID after each sweFN ID. - delimiter: delimiter character to put between ambiguous results - affix: optional character to put before and after results to mark a set. - lexicon: this argument cannot be set from the command line, but is used in the catapult. This argument must be last. """ if not lexicon: lexicon = util.PickledLexicon(model.path) # Otherwise use pre-loaded lexicon (from catapult) sense = saldoids.read() token_pos = list(pos.read()) out_annotation = pos.create_empty_attribute() # Check if the saldo IDs are ranked (= word senses have been disambiguated) wsd = saldoids.split()[1].split(".")[0] == "wsd" for token_index, token_sense in enumerate(sense): # Check if part of speech of this token is allowed if not pos_ok(token_pos, token_index, pos_limit): saldo_ids = None out_annotation[token_index] = affix continue if wsd and util.SCORESEP in token_sense: ranked_saldo = token_sense.strip(util.AFFIX).split(util.DELIM) \ if token_sense != util.AFFIX else None saldo_tuples = [(i.split(util.SCORESEP)[0], i.split(util.SCORESEP)[1]) for i in ranked_saldo] if not disambiguate: saldo_ids = [i[0] for i in saldo_tuples] # Only take the most likely analysis into account. # Handle wsd with equal probability for several words else: saldo_ids = [saldo_tuples[0]] del saldo_tuples[0] while saldo_tuples and (saldo_tuples[0][1] == saldo_ids[0][1]): saldo_ids = [saldo_tuples[0]] del saldo_tuples[0] saldo_ids = [i[0] for i in saldo_ids] else: # No WSD saldo_ids = token_sense.strip(util.AFFIX).split(util.DELIM) \ if token_sense != util.AFFIX else None result = annotate(saldo_ids, lexicon, connect_ids, scoresep) out_annotation[token_index] = util.cwbset(result, delimiter, affix) if result else affix out.write(out_annotation)
def annotate(token: Annotation = Annotation("<token>"), word: Annotation = Annotation("<token:word>"), sentence: Annotation = Annotation("<sentence>"), reference: Annotation = Annotation( "<token>:misc.number_rel_<sentence>"), out_sense: Output = Output("<token>:saldo.sense", cls="token:sense", description="SALDO identifier"), out_lemgram: Output = Output("<token>:saldo.lemgram", description="SALDO lemgram"), out_baseform: Output = Output("<token>:saldo.baseform", cls="token:baseform", description="Baseform from SALDO"), models: List[Model] = [Model("[saldo.model]")], msd: Optional[Annotation] = Annotation("<token:msd>"), delimiter: str = util.DELIM, affix: str = util.AFFIX, precision: str = Config("saldo.precision"), precision_filter: str = "max", min_precision: float = 0.66, skip_multiword: bool = False, allow_multiword_overlap: bool = False, word_separator: str = "", lexicons=None): """Use the Saldo lexicon model (and optionally other older lexicons) to annotate pos-tagged words. - token, word, msd, sentence, reference: existing annotations - out_baseform, out_lemgram, out_sense: resulting annotations to be written - models: a list of pickled lexica, typically the Saldo model (saldo.pickle) and optional lexicons for older Swedish. - delimiter: delimiter character to put between ambiguous results - affix: an optional character to put before and after results - precision: a format string for how to print the precision for each annotation, e.g. ":%.3f" (use empty string for no precision) - precision_filter: an optional filter, currently there are the following values: max: only use the annotations that are most probable first: only use the most probable annotation (or one of the most probable if more than one) none: use all annotations - min_precision: only use annotations with a probability score higher than this - skip_multiword: set to True to disable multi word annotations - allow_multiword_overlap: by default we do some cleanup among overlapping multi word annotations. By setting this to True, all overlaps will be allowed. - word_separator: an optional character used to split the values of "word" into several word variations - lexicons: this argument cannot be set from the command line, but is used in the catapult. This argument must be last. """ # Allow use of multiple lexicons models_list = [(m.path.stem, m) for m in models] if not lexicons: lexicon_list = [(name, SaldoLexicon(lex.path)) for name, lex in models_list] # Use pre-loaded lexicons (from catapult) else: lexicon_list = [] for name, _lex in models_list: assert lexicons.get( name, None) is not None, "Lexicon %s not found!" % name lexicon_list.append((name, lexicons[name])) # Maximum number of gaps in multi-word units. # TODO: Set to 0 for hist-mode? since many (most?) multi-word in the old lexicons are inseparable (half öre etc) max_gaps = 1 # Combine annotation names i SALDO lexicon with out annotations annotations = [] if out_baseform: annotations.append((out_baseform, "gf")) if out_lemgram: annotations.append((out_lemgram, "lem")) if out_sense: annotations.append((out_sense, "saldo")) if skip_multiword: log.info("Skipping multi word annotations") min_precision = float(min_precision) # If min_precision is 0, skip almost all part-of-speech checking (verb multi-word expressions still won't be # allowed to span over other verbs) skip_pos_check = (min_precision == 0.0) word_annotation = list(word.read()) ref_annotation = list(reference.read()) if msd: msd_annotation = list(msd.read()) sentences, orphans = sentence.get_children(token) sentences.append(orphans) out_annotation = word.create_empty_attribute() for sent in sentences: incomplete_multis = [ ] # [{annotation, words, [ref], is_particle, lastwordWasGap, numberofgaps}] complete_multis = [] # ([ref], annotation) sentence_tokens = {} for token_index in sent: theword = word_annotation[token_index] ref = ref_annotation[token_index] msdtag = msd_annotation[token_index] if msd else "" annotation_info = {} sentence_tokens[ref] = { "token_index": token_index, "annotations": annotation_info } # Support for multiple values of word if word_separator: thewords = [w for w in theword.split(word_separator) if w] else: thewords = [theword] # First use MSD tags to find the most probable single word annotations ann_tags_words = find_single_word(thewords, lexicon_list, msdtag, precision, min_precision, precision_filter, annotation_info) # Find multi-word expressions if not skip_multiword: find_multiword_expressions(incomplete_multis, complete_multis, thewords, ref, msdtag, max_gaps, ann_tags_words, msd_annotation, sent, skip_pos_check) # Loop to next token if not allow_multiword_overlap: # Check that we don't have any unwanted overlaps remove_unwanted_overlaps(complete_multis) # Then save the rest of the multi word expressions in sentence_tokens save_multiwords(complete_multis, sentence_tokens) for tok in list(sentence_tokens.values()): out_annotation[tok["token_index"]] = _join_annotation( tok["annotations"], delimiter, affix) # Loop to next sentence for out_annotation_obj, annotation_name in annotations: out_annotation_obj.write( [v.get(annotation_name, delimiter) for v in out_annotation])
def _formatter(in_from: Annotation, in_to: Optional[Annotation], out_from: Output, out_to: Output, informat: str, outformat: str, splitter: str, regex: str): """Take existing dates/times and input formats and convert to specified output format.""" def get_smallest_unit(informat): smallest_unit = 0 # No date if "%y" not in informat and "%Y" not in informat: pass elif "%b" not in informat and "%B" not in informat and "%m" not in informat: smallest_unit = 1 # year elif "%d" not in informat: smallest_unit = 2 # month elif "%H" not in informat and "%I" not in informat: smallest_unit = 3 # day elif "%M" not in informat: smallest_unit = 4 # hour elif "%S" not in informat: smallest_unit = 5 # minute else: smallest_unit = 6 # second return smallest_unit def get_date_length(informat): parts = informat.split("%") length = len( parts[0]) # First value is either blank or not part of date lengths = { "Y": 4, "3Y": 3, "y": 2, "m": 2, "b": None, "B": None, "d": 2, "H": None, "I": None, "M": 2, "S": 2 } for part in parts[1:]: add = lengths.get(part[0], None) if add: length += add + len(part[1:]) else: return None return length if not in_to: in_to = in_from informat = informat.split("|") outformat = outformat.split("|") if splitter: splitter = splitter assert len(outformat) == 1 or (len(outformat) == len(informat)), "The number of out-formats must be equal to one " \ "or the number of in-formats." ifrom = list(in_from.read()) ofrom = in_from.create_empty_attribute() for index, val in enumerate(ifrom): val = val.strip() if not val: ofrom[index] = None continue tries = 0 for inf in informat: if splitter and splitter in inf: values = re.findall("%[YybBmdHMS]", inf) if len(set(values)) < len(values): vals = val.split(splitter) inf = inf.split(splitter) else: vals = [val] inf = [inf] if regex: temp = [] for v in vals: matches = re.search(regex, v) if matches: temp.append([x for x in matches.groups() if x][0]) if not temp: # If the regex doesn't match, treat as no date ofrom[index] = None continue vals = temp tries += 1 try: fromdates = [] for i, v in enumerate(vals): if "%3Y" in inf[i]: datelen = get_date_length(inf[i]) if datelen and not datelen == len(v): raise ValueError inf[i] = inf[i].replace("%3Y", "%Y") v = "0" + v if "%0m" in inf[i] or "%0d" in inf[i]: inf[i] = inf[i].replace("%0m", "%m").replace("%0d", "%d") datelen = get_date_length(inf[i]) if datelen and not datelen == len(v): raise ValueError fromdates.append(datetime.datetime.strptime(v, inf[i])) if len(fromdates) == 1 or out_to: ofrom[index] = fromdates[0].strftime(outformat[0] if len( outformat) == 1 else outformat[tries - 1]) else: outstrings = [ fromdate.strftime(outformat[0] if len(outformat) == 1 else outformat[tries - 1]) for fromdate in fromdates ] ofrom[index] = outstrings[0] + splitter + outstrings[1] break except ValueError: if tries == len(informat): log.error("Could not parse: %s", str(vals)) raise continue out_from.write(ofrom) del ofrom if out_to: ito = list(in_to.read()) oto = in_to.create_empty_attribute() for index, val in enumerate(ito): if not val: oto[index] = None continue tries = 0 for inf in informat: if splitter and splitter in inf: values = re.findall("%[YybBmdHMS]", inf) if len(set(values)) < len(values): vals = val.split(splitter) inf = inf.split(splitter) else: vals = [val] inf = [inf] if regex: temp = [] for v in vals: matches = re.search(regex, v) if matches: temp.append([x for x in matches.groups() if x][0]) if not temp: # If the regex doesn't match, treat as no date oto[index] = None continue vals = temp tries += 1 try: todates = [] for i, v in enumerate(vals): if "%3Y" in inf[i]: datelen = get_date_length(inf[i]) if datelen and not datelen == len(v): raise ValueError inf[i] = inf[i].replace("%3Y", "%Y") v = "0" + v if "%0m" in inf[i] or "%0d" in inf[i]: inf[i] = inf[i].replace("%0m", "%m").replace("%0d", "%d") datelen = get_date_length(inf[i]) if datelen and not datelen == len(v): raise ValueError todates.append(datetime.datetime.strptime(v, inf[i])) smallest_unit = get_smallest_unit(inf[0]) if smallest_unit == 1: add = relativedelta(years=1) elif smallest_unit == 2: add = relativedelta(months=1) elif smallest_unit == 3: add = relativedelta(days=1) elif smallest_unit == 4: add = relativedelta(hours=1) elif smallest_unit == 5: add = relativedelta(minutes=1) elif smallest_unit == 6: add = relativedelta(seconds=1) todates = [ todate + add - relativedelta(seconds=1) for todate in todates ] oto[index] = todates[-1].strftime(outformat[0] if len( outformat) == 1 else outformat[tries - 1]) break except ValueError: if tries == len(informat): log.error("Could not parse: %s", str(vals)) raise continue out_to.write(oto)
def annotate_text(out: Output, lexical_classes_token: Annotation, text: Annotation, token: Annotation, saldoids, cutoff, types, delimiter, affix, freq_model, decimals): """ Annotate text chuncs with lexical classes. - out: resulting annotation file - lexical_classes_token: existing annotation with lexical classes on token level. - text, token: existing annotations for the text-IDs and the tokens. - saldoids: existing annotation with saldoIDs, needed when types=True. - cutoff: value for limiting the resulting bring classes. The result will contain all words with the top x frequencies. Words with frequency = 1 will be removed from the result. - types: if True, count every class only once per saldo ID occurrence. - delimiter: delimiter character to put between ambiguous results. - affix: optional character to put before and after results to mark a set. - freq_model: pickled file with reference frequencies. - decimals: number of decimals to keep in output. """ cutoff = int(cutoff) text_children, _orphans = text.get_children(token, preserve_parent_annotation_order=True) classes = list(lexical_classes_token.read()) sense = list(saldoids.read()) if types else None if freq_model: freq_model = util.PickledLexicon(freq_model.path) out_annotation = text.create_empty_attribute() for text_index, words in enumerate(text_children): seen_types = set() class_freqs = defaultdict(int) for token_index in words: # Count only sense types if types: senses = str(sorted([s.split(util.SCORESEP)[0] for s in sense[token_index].strip(util.AFFIX).split(util.DELIM)])) if senses in seen_types: continue else: seen_types.add(senses) rogwords = classes[token_index].strip(util.AFFIX).split(util.DELIM) if classes[token_index] != util.AFFIX else [] for w in rogwords: class_freqs[w] += 1 if freq_model: for c in class_freqs: # Relative frequency rel = class_freqs[c] / len(words) # Calculate class dominance ref_freq = freq_model.lookup(c.replace("_", " "), 0) if not ref_freq: log.error("Class '%s' is missing" % ref_freq) class_freqs[c] = (rel / ref_freq) # Sort words according to frequency/dominance ordered_words = sorted(class_freqs.items(), key=lambda x: x[1], reverse=True) if freq_model: # Remove words with dominance < 1 ordered_words = [w for w in ordered_words if w[1] >= 1] else: # Remove words with frequency 1 ordered_words = [w for w in ordered_words if w[1] > 1] if len(ordered_words) > cutoff: cutoff_freq = ordered_words[cutoff - 1][1] ordered_words = [w for w in ordered_words if w[1] >= cutoff_freq] # Join words and frequencies/dominances ordered_words = [util.SCORESEP.join([word, str(round(freq, decimals))]) for word, freq in ordered_words] out_annotation[text_index] = util.cwbset(ordered_words, delimiter, affix) if ordered_words else affix out.write(out_annotation)