def lemgram_sql(corpus: Corpus = Corpus(), docs: AllDocuments = AllDocuments(), out: Export = Export("korp_lemgram_index/lemgram_index.sql"), lemgram: AnnotationAllDocs = AnnotationAllDocs( "<token>:saldo.lemgram")): """Create lemgram index SQL file.""" corpus = corpus.upper() result = defaultdict(int) for doc in docs: for lg in lemgram.read(doc): for value in lg.split("|"): if value and ":" not in value: result[value] += 1 mysql = MySQL(output=out) mysql.create_table(MYSQL_TABLE, drop=False, **MYSQL_INDEX) mysql.delete_rows(MYSQL_TABLE, {"corpus": corpus}) mysql.set_names() rows = [] for lemgram, freq in list(result.items()): rows.append({"lemgram": lemgram, "corpus": corpus, "freq": freq}) log.info("Creating SQL") mysql.add_row(MYSQL_TABLE, rows)
def install_original(corpus: Corpus = Corpus(), xmlfile: ExportInput = ExportInput("[xml_export.filename_compressed]"), out: OutputCommonData = OutputCommonData("xml_export.install_export_pretty_marker"), export_path: str = Config("xml_export.export_original_path"), host: str = Config("xml_export.export_original_host")): """Copy compressed combined unscrambled XML to remote host.""" xml_utils.install_compressed_xml(corpus, xmlfile, out, export_path, host)
def encode_scrambled( corpus: Corpus = Corpus(), annotations: ExportAnnotations = ExportAnnotations("cwb.annotations", is_input=False), source_annotations: SourceAnnotations = SourceAnnotations( "cwb.source_annotations"), docs: AllDocuments = AllDocuments(), words: AnnotationAllDocs = AnnotationAllDocs("[export.word]"), vrtfiles: ExportInput = ExportInput("vrt_scrambled/{doc}.vrt", all_docs=True), out: Export = Export("[cwb.corpus_registry]/[metadata.id]", absolute_path=True), out_marker: Export = Export( "[cwb.cwb_datadir]/[metadata.id]/.scrambled_marker", absolute_path=True), token: AnnotationAllDocs = AnnotationAllDocs("<token>"), bin_path: Config = Config("cwb.bin_path"), encoding: str = Config("cwb.encoding"), datadir: str = Config("cwb.cwb_datadir"), registry: str = Config("cwb.corpus_registry"), remove_namespaces: bool = Config("export.remove_module_namespaces", False), sparv_namespace: str = Config("export.sparv_namespace"), source_namespace: str = Config("export.source_namespace"), skip_compression: Optional[bool] = Config("cwb.skip_compression"), skip_validation: Optional[bool] = Config("cwb.skip_validation")): """Do cwb encoding with vrt files in scrambled order.""" cwb_encode(corpus, annotations, source_annotations, docs, words, vrtfiles, out, out_marker, token.name, bin_path, encoding, datadir, registry, remove_namespaces, sparv_namespace, source_namespace, skip_compression, skip_validation)
def timespan_sql_no_dateinfo( corpus: Corpus = Corpus(), out: Export = Export("korp_timespan/timespan.sql"), docs: AllDocuments = AllDocuments(), token: AnnotationAllDocs = AnnotationAllDocs("<token>")): """Create timespan SQL data for use in Korp.""" corpus_name = corpus.upper() token_count = 0 for doc in docs: tokens = token.read_spans(doc) token_count += len(list(tokens)) rows_date = [{ "corpus": corpus_name, "datefrom": "0" * 8, "dateto": "0" * 8, "tokens": token_count }] rows_datetime = [{ "corpus": corpus_name, "datefrom": "0" * 14, "dateto": "0" * 14, "tokens": token_count }] create_sql(corpus_name, out, rows_date, rows_datetime)
def timespan_sql_with_dateinfo( corpus: Corpus = Corpus(), out: Export = Export("korp_timespan/timespan.sql"), docs: AllDocuments = AllDocuments(), token: AnnotationAllDocs = AnnotationAllDocs("<token>"), datefrom: AnnotationAllDocs = AnnotationAllDocs( "<text>:dateformat.datefrom"), dateto: AnnotationAllDocs = AnnotationAllDocs( "<text>:dateformat.dateto"), timefrom: AnnotationAllDocs = AnnotationAllDocs( "<text>:dateformat.timefrom"), timeto: AnnotationAllDocs = AnnotationAllDocs( "<text>:dateformat.timeto")): """Create timespan SQL data for use in Korp.""" corpus_name = corpus.upper() datespans = defaultdict(int) datetimespans = defaultdict(int) for doc in docs: text_tokens, orphans = Annotation(datefrom.name, doc=doc).get_children(token) if orphans: datespans[("0" * 8, "0" * 8)] += len(orphans) datetimespans[("0" * 14, "0" * 14)] += len(orphans) dateinfo = datefrom.read_attributes( doc, (datefrom, dateto, timefrom, timeto)) for text in text_tokens: d = next(dateinfo) datespans[(d[0].zfill(8), d[1].zfill(8))] += len(text) datetimespans[(d[0].zfill(8) + d[2].zfill(6), d[1].zfill(8) + d[3].zfill(6))] += len(text) rows_date = [] rows_datetime = [] for span in datespans: rows_date.append({ "corpus": corpus_name, "datefrom": span[0], "dateto": span[1], "tokens": datespans[span] }) for span in datetimespans: rows_datetime.append({ "corpus": corpus_name, "datefrom": span[0], "dateto": span[1], "tokens": datetimespans[span] }) create_sql(corpus_name, out, rows_date, rows_datetime)
def freq_list_simple(corpus: Corpus = Corpus(), docs: AllDocuments = AllDocuments(), word: AnnotationAllDocs = AnnotationAllDocs("<token:word>"), pos: AnnotationAllDocs = AnnotationAllDocs("<token:pos>"), baseform: AnnotationAllDocs = AnnotationAllDocs("<token:baseform>"), out: Export = Export("frequency_list/stats_[metadata.id].csv"), delimiter: str = Config("stats_export.delimiter"), cutoff: int = Config("stats_export.cutoff")): """Create a word frequency list for a corpus without sense, lemgram and complemgram annotations.""" freq_dict = defaultdict(int) for doc in docs: simple_tokens = word.read_attributes(doc, [word, pos, baseform]) # Add empty annotations for sense, lemgram and complemgram tokens = [] for w, p, b in simple_tokens: tokens.append((w, p, b, "|", "|", "|")) update_freqs(tokens, freq_dict) write_csv(out, freq_dict, delimiter, cutoff)
def freq_list(corpus: Corpus = Corpus(), docs: AllDocuments = AllDocuments(), word: AnnotationAllDocs = AnnotationAllDocs("<token:word>"), msd: AnnotationAllDocs = AnnotationAllDocs("<token:msd>"), baseform: AnnotationAllDocs = AnnotationAllDocs("<token:baseform>"), sense: AnnotationAllDocs = AnnotationAllDocs("<token:sense>"), lemgram: AnnotationAllDocs = AnnotationAllDocs("<token>:saldo.lemgram"), complemgram: AnnotationAllDocs = AnnotationAllDocs("<token>:saldo.complemgram"), out: Export = Export("frequency_list/stats_[metadata.id].csv"), delimiter: str = Config("stats_export.delimiter"), cutoff: int = Config("stats_export.cutoff"), include_all_compounds: bool = Config("stats_export.include_all_compounds")): """Create a word frequency list for the entire corpus. Args: corpus (str, optional): The corpus ID. Defaults to Corpus. docs (list, optional): The documents belonging to this corpus. Defaults to AllDocuments. word (str, optional): Word annotations. Defaults to AnnotationAllDocs("<token:word>"). msd (str, optional): MSD annotations. Defaults to AnnotationAllDocs("<token:msd>"). baseform (str, optional): Baseform annotations. Defaults to AnnotationAllDocs("<token:baseform>"). sense (str, optional): Sense annotations. Defaults to AnnotationAllDocs("<token:sense>"). lemgram (str, optional): Lemgram annotations. Defaults to AnnotationAllDocs("<token>:saldo.lemgram"). complemgram (str, optional): Compound lemgram annotations. Defaults to AnnotationAllDocs("<token>:saldo.complemgram"). out (str, optional): The output word frequency file. Defaults to Export("frequency_list/[metadata.id].csv"). delimiter (str, optional): Column delimiter to use in the csv. Defaults to Config("stats_export.delimiter"). cutoff (int, optional): The minimum frequency a word must have in order to be included in the result. Defaults to Config("stats_export.cutoff"). include_all_compounds (bool, optional): Whether to include compound analyses for every word or just for the words that are lacking a sense annotation. Defaults to Config("stats_export.include_all_compounds"). """ freq_dict = defaultdict(int) for doc in docs: tokens = word.read_attributes(doc, [word, msd, baseform, sense, lemgram, complemgram]) update_freqs(tokens, freq_dict, include_all_compounds) write_csv(out, freq_dict, delimiter, cutoff)
def combined(corpus: Corpus = Corpus(), out: Export = Export("[xml_export.filename_combined]"), docs: AllDocuments = AllDocuments(), xml_input: ExportInput = ExportInput("xml_pretty/[xml_export.filename]", all_docs=True)): """Combine XML export files into a single XML file.""" xml_utils.combine(corpus, out, docs, xml_input)
def json_export(out: Export = Export("sbx_metadata/[metadata.id].json"), corpus_id: Corpus = Corpus(), lang: Language = Language(), metadata: dict = Config("metadata"), sentences: AnnotationCommonData = AnnotationCommonData( "misc.<sentence>_count"), tokens: AnnotationCommonData = AnnotationCommonData( "misc.<token>_count"), korp_protected: bool = Config("korp.protected"), korp_mode: bool = Config("korp.mode"), md_trainingdata: bool = Config("sbx_metadata.trainingdata"), md_xml_export: str = Config("sbx_metadata.xml_export"), md_stats_export: bool = Config("sbx_metadata.stats_export"), md_korp: bool = Config("sbx_metadata.korp"), md_downloads: list = Config("sbx_metadata.downloads"), md_interface: list = Config("sbx_metadata.interface"), md_contact: dict = Config("sbx_metadata.contact_info")): """Export corpus metadata to JSON format.""" md_obj = {} md_obj["id"] = corpus_id md_obj["type"] = "corpus" md_obj["trainingdata"] = md_trainingdata # Set language info md_obj["lang"] = [{ "code": lang, "name_en": languages.get(part3=lang).name if lang in languages.part3 else lang, "name_sv": Language.get(lang).display_name("swe"), }] # Set name and description md_obj["name_en"] = metadata.get("name", {}).get("eng") md_obj["name_sv"] = metadata.get("name", {}).get("swe") md_obj["description_en"] = metadata.get("description", {}).get("eng") md_obj["description_sv"] = metadata.get("description", {}).get("swe") # Set downloads downloads = [] downloads.append( metadata_utils.make_standard_xml_export(md_xml_export, corpus_id)) downloads.append( metadata_utils.make_standard_stats_export(md_stats_export, corpus_id)) downloads.append(metadata_utils.make_metashare(corpus_id)) downloads.extend(md_downloads) md_obj["downloads"] = [d for d in downloads if d] # Set interface interface = [] interface.append(metadata_utils.make_korp(md_korp, corpus_id, korp_mode)) interface.extend(md_interface) md_obj["interface"] = [d for d in interface if d] # Set contact info if md_contact == "sbx-default": md_obj["contact_info"] = metadata_utils.SBX_DEFAULT_CONTACT else: md_obj["contact_info"] = md_contact # Set size md_obj["size"] = {"tokens": tokens.read(), "sentences": sentences.read()} # Write JSON to file os.makedirs(os.path.dirname(out), exist_ok=True) json_str = json.dumps(md_obj, ensure_ascii=False, indent=4) with open(out, "w") as f: f.write(json_str) logger.info("Exported: %s", out)
def info_date( corpus: Corpus = Corpus(), out_datefirst: OutputCommonData = OutputCommonData("cwb.datefirst"), out_datelast: OutputCommonData = OutputCommonData("cwb.datelast"), corpus_data_file: ExportInput = ExportInput( "[cwb.corpus_registry]/[metadata.id]"), datefrom: AnnotationAllDocs = AnnotationAllDocs( "[dateformat.out_annotation]:dateformat.datefrom"), dateto: AnnotationAllDocs = AnnotationAllDocs( "[dateformat.out_annotation]:dateformat.dateto"), timefrom: AnnotationAllDocs = AnnotationAllDocs( "[dateformat.out_annotation]:dateformat.timefrom"), timeto: AnnotationAllDocs = AnnotationAllDocs( "[dateformat.out_annotation]:dateformat.timeto"), remove_namespaces: bool = Config("export.remove_module_namespaces", False), cwb_bin_path: Config = Config("cwb.bin_path", ""), registry: str = Config("cwb.corpus_registry")): """Create datefirst and datelast file (needed for .info file).""" def fix_name(name: str): """Remove invalid characters from annotation names and optionally remove namespaces.""" if remove_namespaces: prefix, part, suffix = name.partition(":") suffix = suffix.split(".")[-1] name = prefix + part + suffix return name.replace(":", "_") def _parse_cwb_output(output): lines = output.decode("utf8").split("\n") values = [ "%s %s" % (line.split("\t")[1], line.split("\t")[2]) for line in lines if line.split("\t")[-1] ] # Fix dates with less than 8 digits (e.g. 800 --> 0800), needed by strptime values = [ "%s %s" % (v.split()[0].zfill(8), v.split()[1]) for v in values ] # Convert to dates and sort, then convert to human readable format values = sorted( [datetime.strptime(v, "%Y%m%d %H%M%S") for v in values]) return [v.strftime("%Y-%m-%d %H:%M:%S") for v in values] # Get date and time annotation names datefrom_name = fix_name(datefrom.name) timefrom_name = fix_name(timefrom.name) dateto_name = fix_name(dateto.name) timeto_name = fix_name(timeto.name) # Get datefirst and write to file datefirst_args = [ "-r", registry, "-q", corpus, datefrom_name, timefrom_name ] datefirst_out, _ = util.system.call_binary( os.path.join(cwb_bin_path, "cwb-scan-corpus"), datefirst_args) datefirst = _parse_cwb_output(datefirst_out)[0] out_datefirst.write(str(datefirst)) # Get datelast and write to file datelast_args = ["-r", registry, "-q", corpus, dateto_name, timeto_name] datelast_out, _ = util.system.call_binary( os.path.join(cwb_bin_path, "cwb-scan-corpus"), datelast_args) datelast = _parse_cwb_output(datelast_out)[-1] out_datelast.write(str(datelast))
def relations_sql(corpus: Corpus = Corpus(), out: Export = Export("korp_wordpicture/relations.sql"), relations: AnnotationDataAllDocs = AnnotationDataAllDocs( "korp.relations"), docs: Optional[AllDocuments] = AllDocuments(), doclist: str = "", split: bool = False): """Calculate statistics of the dependencies and saves to SQL files. - corpus is the corpus name. - out is the name for the SQL file which will contain the resulting SQL statements. - relations is the name of the relations annotation. - docs is a list of documents. - doclist can be used instead of docs, and should be a file containing the name of docs, one per row. - split set to true leads to SQL commands being split into several parts, requiring less memory during creation, but installing the data will take much longer. """ db_table = MYSQL_TABLE + "_" + corpus.upper() # Relations that will be grouped together rel_grouping = { "OO": "OBJ", "IO": "OBJ", "RA": "ADV", "TA": "ADV", "OA": "ADV" } MAX_SENTENCES = 5000 index = 0 string_index = -1 strings = {} # ID -> string table freq_index = {} sentence_count = defaultdict(int) doc_count = 0 assert (docs or doclist), "Missing source" if doclist: with open(doclist) as insource: docs = [line.strip() for line in insource] if len(docs) == 1: split = False for doc in docs: doc_count += 1 sentences = {} if doc_count == 1 or split: freq = {} # Frequency of (head, rel, dep) rel_count = defaultdict(int) # Frequency of (rel) head_rel_count = defaultdict(int) # Frequency of (head, rel) dep_rel_count = defaultdict(int) # Frequency of (rel, dep) relations_data = relations.read(doc) for triple in relations_data.splitlines(): head, headpos, rel, dep, deppos, extra, sid, refh, refd, bfhead, bfdep, wfhead, wfdep = triple.split( u"\t") bfhead, bfdep, wfhead, wfdep = int(bfhead), int(bfdep), int( wfhead), int(wfdep) if not (head, headpos) in strings: string_index += 1 head = strings.setdefault((head, headpos), string_index) if not (dep, deppos, extra) in strings: string_index += 1 dep = strings.setdefault((dep, deppos, extra), string_index) rel = rel_grouping.get(rel, rel) if (head, rel, dep) in freq_index: this_index = freq_index[(head, rel, dep)] else: this_index = index freq_index[(head, rel, dep)] = this_index index += 1 # freq bf/wf freq.setdefault(head, {}).setdefault(rel, {}).setdefault( dep, [this_index, 0, [0, 0, 0, 0]]) freq[head][rel][dep][1] += 1 # Frequency if sentence_count[this_index] < MAX_SENTENCES: sentences.setdefault(this_index, set()) sentences[this_index].add( (sid, refh, refd)) # Sentence ID and "ref" for both head and dep sentence_count[this_index] += 1 freq[head][rel][dep][2][0] = freq[head][rel][dep][2][0] or bfhead freq[head][rel][dep][2][1] = freq[head][rel][dep][2][1] or bfdep freq[head][rel][dep][2][2] = freq[head][rel][dep][2][2] or wfhead freq[head][rel][dep][2][3] = freq[head][rel][dep][2][3] or wfdep if bfhead and bfdep: rel_count[rel] += 1 if (bfhead and bfdep) or wfhead: head_rel_count[(head, rel)] += 1 if (bfhead and bfdep) or wfdep: dep_rel_count[(dep, rel)] += 1 # If not the last file if not doc_count == len(docs): if split: # Don't print string table until the last file _write_sql({}, sentences, freq, rel_count, head_rel_count, dep_rel_count, out, db_table, split, first=(doc_count == 1)) else: # Only save sentences data, save the rest for the last file _write_sql({}, sentences, {}, {}, {}, {}, out, db_table, split, first=(doc_count == 1)) # Create the final file, including the string table _write_sql(strings, sentences, freq, rel_count, head_rel_count, dep_rel_count, out, db_table, split, first=(doc_count == 1), last=True) log.info("Done creating SQL files")
def metashare( out: Export = Export("sbx_metadata/[metadata.id].xml"), template: Model = Model("sbx_metadata/sbx-metashare-template.xml"), corpus_id: Corpus = Corpus(), lang: Language = Language(), metadata: dict = Config("metadata"), sentences: AnnotationCommonData = AnnotationCommonData( "misc.<sentence>_count"), tokens: AnnotationCommonData = AnnotationCommonData( "misc.<token>_count"), annotations: ExportAnnotations = ExportAnnotations( "xml_export.annotations", is_input=False), korp_protected: bool = Config("korp.protected"), korp_mode: bool = Config("korp.mode"), # md_linguality: str = Config("sbx_metadata.linguality"), md_script: str = Config("sbx_metadata.script"), md_xml_export: str = Config("sbx_metadata.xml_export"), md_stats_export: bool = Config("sbx_metadata.stats_export"), md_korp: bool = Config("sbx_metadata.korp"), md_downloads: list = Config("sbx_metadata.downloads"), md_interface: list = Config("sbx_metadata.interface"), md_contact: dict = Config("sbx_metadata.contact_info")): """Export corpus metadata to META-SHARE format.""" # Parse template and handle META SHARE namespace xml = etree.parse(template.path).getroot() etree.register_namespace("", META_SHARE_URL) ns = META_SHARE_NAMESPACE # Set idenfification info identificationInfo = xml.find(ns + "identificationInfo") for i in identificationInfo.findall(ns + "resourceShortName"): i.text = corpus_id identificationInfo.find(ns + "identifier").text = corpus_id _set_texts(identificationInfo.findall(ns + "resourceName"), metadata.get("name", {})) _set_texts(identificationInfo.findall(ns + "description"), metadata.get("description", {})) # Set metadata creation date in metadataInfo xml.find(".//" + ns + "metadataCreationDate").text = str( time.strftime("%Y-%m-%d")) # Set availability if korp_protected: xml.find(".//" + ns + "availability").text = "available-restrictedUse" else: xml.find(".//" + ns + "availability").text = "available-unrestrictedUse" # Set licenceInfos distInfo = xml.find(".//" + ns + "distributionInfo") _set_licence_info( [metadata_utils.make_standard_xml_export(md_xml_export, corpus_id)], distInfo) _set_licence_info([ metadata_utils.make_standard_stats_export(md_stats_export, corpus_id) ], distInfo) _set_licence_info( [metadata_utils.make_korp(md_korp, corpus_id, korp_mode)], distInfo, download=False) _set_licence_info([metadata_utils.make_metashare(corpus_id)], distInfo) # Add non-standard licenseInfos _set_licence_info(md_downloads, distInfo) _set_licence_info(md_interface, distInfo, download=False) # Set contactPerson _set_contact_info(md_contact, xml.find(".//" + ns + "contactPerson")) # Set samplesLocation xml.find(".//" + ns + "samplesLocation").text = f"{SBX_SAMPLES_LOCATION}{corpus_id}" # Set lingualityType xml.find(".//" + ns + "lingualityType").text = "monolingual" # Set languageInfo (languageId, languageName, languageScript) xml.find(".//" + ns + "languageId").text = lang xml.find(".//" + ns + "languageName").text = languages.get( part3=lang).name if lang in languages.part3 else lang xml.find(".//" + ns + "languageScript").text = md_script # Set sizeInfo sizeInfos = xml.findall(".//" + ns + "sizeInfo") sizeInfos[0].find(ns + "size").text = tokens.read() sizeInfos[1].find(ns + "size").text = sentences.read() # Set annotationInfo corpusTextInfo = xml.find(".//" + ns + "corpusTextInfo") _set_annotation_info(annotations, corpusTextInfo) # Write XML to file os.makedirs(os.path.dirname(out), exist_ok=True) etree.ElementTree(xml).write(out, encoding="unicode", method="xml", xml_declaration=True) logger.info("Exported: %s", out)