def pretty(doc: Document = Document(), docid: AnnotationData = AnnotationData("<docid>"), out: Export = Export("xml_pretty/[xml_export.filename]"), token: Annotation = Annotation("<token>"), word: Annotation = Annotation("[export.word]"), annotations: ExportAnnotations = ExportAnnotations("xml_export.annotations"), source_annotations: SourceAnnotations = SourceAnnotations("xml_export.source_annotations"), header_annotations: SourceAnnotations = SourceAnnotations("xml_export.header_annotations"), remove_namespaces: bool = Config("export.remove_module_namespaces", False), sparv_namespace: str = Config("export.sparv_namespace"), source_namespace: str = Config("export.source_namespace"), include_empty_attributes: bool = Config("xml_export.include_empty_attributes")): """Export annotations to pretty XML in export_dir. Args: doc: Name of the original document. docid: Annotation with document IDs. out: Path and filename pattern for resulting file. token: Annotation containing the token strings. word: Annotation containing the token strings. annotations: List of elements:attributes (annotations) to include. source_annotations: List of elements:attributes from the original document to be kept. If not specified, everything will be kept. header_annotations: List of header elements from the original document to include in the export. If not specified, all headers will be kept. remove_namespaces: Whether to remove module "namespaces" from element and attribute names. Disabled by default. sparv_namespace: The namespace to be added to all Sparv annotations. source_namespace: The namespace to be added to all annotations present in the source. include_empty_attributes: Whether to include attributes even when they are empty. Disabled by default. """ # Create export dir os.makedirs(os.path.dirname(out), exist_ok=True) token_name = token.name # Read words and document ID word_annotation = list(word.read()) docid_annotation = docid.read() # Get annotation spans, annotations list etc. annotation_list, _, export_names = util.get_annotation_names(annotations, source_annotations, doc=doc, token_name=token_name, remove_namespaces=remove_namespaces, sparv_namespace=sparv_namespace, source_namespace=source_namespace) h_annotations, h_export_names = util.get_header_names(header_annotations, doc=doc) export_names.update(h_export_names) span_positions, annotation_dict = util.gather_annotations(annotation_list, export_names, h_annotations, doc=doc, split_overlaps=True) xmlstr = xml_utils.make_pretty_xml(span_positions, annotation_dict, export_names, token_name, word_annotation, docid_annotation, include_empty_attributes, sparv_namespace) # Write XML to file with open(out, mode="w") as outfile: outfile.write(xmlstr) log.info("Exported: %s", out)
def vrt_scrambled( doc: Document = Document(), out: Export = Export("vrt_scrambled/{doc}.vrt"), chunk: Annotation = Annotation("[cwb.scramble_on]"), chunk_order: Annotation = Annotation( "[cwb.scramble_on]:misc.number_random"), token: Annotation = Annotation("<token>"), word: Annotation = Annotation("[export.word]"), annotations: ExportAnnotations = ExportAnnotations("cwb.annotations"), source_annotations: SourceAnnotations = SourceAnnotations( "cwb.source_annotations"), remove_namespaces: bool = Config("export.remove_module_namespaces", False), sparv_namespace: str = Config("export.sparv_namespace"), source_namespace: str = Config("export.source_namespace")): """Export annotations to vrt in scrambled order.""" # Get annotation spans, annotations list etc. annotation_list, token_attributes, export_names = util.get_annotation_names( annotations, source_annotations, doc=doc, token_name=token.name, remove_namespaces=remove_namespaces, sparv_namespace=sparv_namespace, source_namespace=source_namespace) if chunk not in annotation_list: raise util.SparvErrorMessage( "The annotation used for scrambling ({}) needs to be included in the output." .format(chunk)) span_positions, annotation_dict = util.gather_annotations( annotation_list, export_names, doc=doc, split_overlaps=True) # Read words and document ID word_annotation = list(word.read()) chunk_order_data = list(chunk_order.read()) # Reorder chunks and open/close tags in correct order new_span_positions = util.scramble_spans(span_positions, chunk.name, chunk_order_data) # Make vrt format vrt_data = create_vrt(new_span_positions, token.name, word_annotation, token_attributes, annotation_dict, export_names) # Create export dir os.makedirs(os.path.dirname(out), exist_ok=True) # Write result to file with open(out, "w") as f: f.write(vrt_data) log.info("Exported: %s", out)
def vrt(doc: Document = Document(), out: Export = Export("vrt/{doc}.vrt"), token: Annotation = Annotation("<token>"), word: Annotation = Annotation("[export.word]"), annotations: ExportAnnotations = ExportAnnotations("cwb.annotations"), source_annotations: SourceAnnotations = SourceAnnotations( "cwb.source_annotations"), remove_namespaces: bool = Config("export.remove_module_namespaces", False), sparv_namespace: str = Config("export.sparv_namespace"), source_namespace: str = Config("export.source_namespace")): """Export annotations to vrt. - annotations: list of elements:attributes (annotations) to include. - source_annotations: list of elements:attributes from the original document to be kept. If not specified, everything will be kept. """ # Create export dir os.makedirs(os.path.dirname(out), exist_ok=True) # Read words word_annotation = list(word.read()) # Get annotation spans, annotations list etc. annotation_list, token_attributes, export_names = util.get_annotation_names( annotations, source_annotations, doc=doc, token_name=token.name, remove_namespaces=remove_namespaces, sparv_namespace=sparv_namespace, source_namespace=source_namespace) span_positions, annotation_dict = util.gather_annotations(annotation_list, export_names, doc=doc) vrt_data = create_vrt(span_positions, token.name, word_annotation, token_attributes, annotation_dict, export_names) # Write result to file with open(out, "w") as f: f.write(vrt_data) log.info("Exported: %s", out)
def preserved_format( doc: Document = Document(), text: Text = Text(), docid: AnnotationData = AnnotationData("<docid>"), out: Export = Export( "xml_preserved_format/[xml_export.filename_formatted]"), annotations: ExportAnnotations = ExportAnnotations( "xml_export.annotations"), source_annotations: SourceAnnotations = SourceAnnotations( "xml_export.source_annotations"), header_annotations: SourceAnnotations = SourceAnnotations( "xml_export.header_annotations"), remove_namespaces: bool = Config("export.remove_module_namespaces", False), sparv_namespace: str = Config("export.sparv_namespace"), source_namespace: str = Config("export.source_namespace"), include_empty_attributes: bool = Config( "xml_export.include_empty_attributes")): """Export annotations to XML in export_dir and keep whitespaces and indentation from original file. Args: doc: Name of the original document. text: The corpus text. docid: Annotation with document IDs. out: Path and filename pattern for resulting file. annotations: List of elements:attributes (annotations) to include. source_annotations: List of elements:attributes from the original document to be kept. If not specified, everything will be kept. header_annotations: List of header elements from the original document to include in the export. If not specified, all headers will be kept. remove_namespaces: Whether to remove module "namespaces" from element and attribute names. Disabled by default. sparv_namespace: The namespace to be added to all Sparv annotations. source_namespace: The namespace to be added to all annotations present in the source. include_empty_attributes: Whether to include attributes even when they are empty. Disabled by default. """ # Create export dir os.makedirs(os.path.dirname(out), exist_ok=True) # Read corpus text and document ID corpus_text = text.read() docid = docid.read() # Get annotation spans, annotations list etc. annotation_list, _, export_names = util.get_annotation_names( annotations, source_annotations, doc=doc, remove_namespaces=remove_namespaces, sparv_namespace=sparv_namespace, source_namespace=source_namespace) h_annotations, h_export_names = util.get_header_names(header_annotations, doc=doc) export_names.update(h_export_names) span_positions, annotation_dict = util.gather_annotations( annotation_list, export_names, h_annotations, doc=doc, flatten=False, split_overlaps=True) sorted_positions = [(pos, span[0], span[1]) for pos, spans in sorted(span_positions.items()) for span in spans] # Root tag sanity check if not xml_utils.valid_root(sorted_positions[0], sorted_positions[-1]): raise util.SparvErrorMessage( "Root tag is missing! If you have manually specified which elements to include, " "make sure to include an element that encloses all other included elements and " "text content.") # Create root node root_span = sorted_positions[0][2] root_span.set_node() node_stack = [] last_pos = 0 # Keeps track of the position of the processed text for x, (_pos, instruction, span) in enumerate(sorted_positions): # Open node: Create child node under the top stack node if instruction == "open": # Set tail for previous node if necessary if last_pos < span.start: # Get last closing node in this position _, tail_span = [ i for i in span_positions[last_pos] if i[0] == "close" ][-1] tail_span.node.tail = corpus_text[last_pos:span.start] last_pos = span.start # Handle headers if span.is_header: header = annotation_dict[span.name][util.HEADER_CONTENTS][ span.index] header_xml = etree.fromstring(header) header_xml.tag = span.export # Rename element if needed span.node = header_xml node_stack[-1].node.append(header_xml) else: if node_stack: # Don't create root node, it already exists span.set_node(parent_node=node_stack[-1].node) xml_utils.add_attrs(span.node, span.name, annotation_dict, export_names, span.index, include_empty_attributes) if span.overlap_id: if sparv_namespace: span.node.set(f"{sparv_namespace}.{util.OVERLAP_ATTR}", f"{docid}-{span.overlap_id}") else: span.node.set( f"{util.SPARV_DEFAULT_NAMESPACE}.{util.OVERLAP_ATTR}", f"{docid}-{span.overlap_id}") node_stack.append(span) # Set text if there should be any between this node and the next one next_item = sorted_positions[x + 1] if next_item[1] == "open" and next_item[2].start > span.start: span.node.text = corpus_text[last_pos:next_item[2].start] last_pos = next_item[2].start # Close node else: if span.is_header: continue if last_pos < span.end: # Set node text if necessary if span.start == last_pos: span.node.text = corpus_text[last_pos:span.end] # Set tail for previous node if necessary else: # Get last closing node in this position _, tail_span = [ i for i in span_positions[last_pos] if i[0] == "close" ][-1] tail_span.node.tail = corpus_text[last_pos:span.end] last_pos = span.end # Make sure closing node == top stack node assert span == node_stack[ -1], "Overlapping elements found: {}".format(node_stack[-2:]) # Pop stack and move on to next span node_stack.pop() # Write xml to file etree.ElementTree(root_span.node).write(out, encoding="unicode", method="xml", xml_declaration=True) log.info("Exported: %s", out)
def csv(doc: Document = Document(), out: Export = Export("csv/{doc}.csv"), token: Annotation = Annotation("<token>"), word: Annotation = Annotation("[export.word]"), sentence: Annotation = Annotation("<sentence>"), annotations: ExportAnnotations = ExportAnnotations( "csv_export.annotations"), source_annotations: SourceAnnotations = SourceAnnotations( "csv_export.source_annotations"), remove_namespaces: bool = Config("export.remove_module_namespaces", False), sparv_namespace: str = Config("export.sparv_namespace"), source_namespace: str = Config("export.source_namespace"), delimiter: str = Config("csv_export.delimiter")): """Export annotations to CSV format.""" # Create export dir os.makedirs(os.path.dirname(out), exist_ok=True) token_name = token.name # Read words word_annotation = list(word.read()) # Get annotation spans, annotations list etc. annotation_list, token_attributes, export_names = util.get_annotation_names( annotations, source_annotations, doc=doc, token_name=token_name, remove_namespaces=remove_namespaces, sparv_namespace=sparv_namespace, source_namespace=source_namespace) span_positions, annotation_dict = util.gather_annotations(annotation_list, export_names, doc=doc) # Make csv header csv_data = [ _make_header(token_name, token_attributes, export_names, delimiter) ] # Go through spans_dict and add to csv, line by line for _pos, instruction, span in span_positions: if instruction == "open": # Create token line if span.name == token_name: csv_data.append( _make_token_line(word_annotation[span.index], token_name, token_attributes, annotation_dict, span.index, delimiter)) # Create line with structural annotation else: attrs = _make_attrs(span.name, annotation_dict, export_names, span.index) for attr in attrs: csv_data.append(f"# {attr}") if not attrs: csv_data.append(f"# {span.export}") # Insert blank line after each closing sentence elif span.name == sentence.name and instruction == "close": csv_data.append("") # Write result to file with open(out, "w") as f: f.write("\n".join(csv_data)) logger.info("Exported: %s", out)
def conllu(doc: Document = Document(), out: Export = Export("conll/{doc}.conllu"), token: Annotation = Annotation("<token>"), sentence: Annotation = Annotation("<sentence>"), sentence_id: Annotation = Annotation("[conll_export.conll_fields.sentid]"), source_annotations: SourceAnnotations = SourceAnnotations("conll_export.source_annotations"), id_ref: Optional[Annotation] = Annotation("[conll_export.conll_fields.id]"), form: Optional[Annotation] = Annotation("[export.word]"), lemma: Optional[Annotation] = Annotation("[conll_export.conll_fields.lemma]"), upos: Optional[Annotation] = Annotation("[conll_export.conll_fields.upos]"), xpos: Optional[Annotation] = Annotation("[conll_export.conll_fields.xpos]"), feats: Optional[Annotation] = Annotation("[conll_export.conll_fields.feats]"), head: Optional[Annotation] = Annotation("[conll_export.conll_fields.head]"), deprel: Optional[Annotation] = Annotation("[conll_export.conll_fields.deprel]"), deps: Optional[Annotation] = Annotation("[conll_export.conll_fields.deps]"), misc: Optional[Annotation] = Annotation("[conll_export.conll_fields.misc]")): """Export annotations to CoNLL-U format.""" # CoNLLU specification: https://universaldependencies.org/format.html # ID: Word index, integer starting at 1 for each new sentence; may be a range for multiword tokens; may be a decimal number for empty nodes (decimal numbers can be lower than 1 but must be greater than 0). # FORM: Word form or punctuation symbol. # LEMMA: Lemma or stem of word form. # UPOS: Universal part-of-speech tag. # XPOS: Language-specific part-of-speech tag; underscore if not available. # FEATS: List of morphological features from the universal feature inventory or from a defined language-specific extension; underscore if not available. # HEAD: Head of the current word, which is either a value of ID or zero (0). # DEPREL: Universal dependency relation to the HEAD (root iff HEAD = 0) or a defined language-specific subtype of one. # DEPS: Enhanced dependency graph in the form of a list of head-deprel pairs. # MISC: Any other annotation. conll_fields = [id_ref, form, lemma, upos, xpos, feats, head, deprel, deps, misc] conll_fields = [f if isinstance(f, Annotation) else Annotation() for f in conll_fields] # Create export dir os.makedirs(os.path.dirname(out), exist_ok=True) token_name = token.name # Get annotation spans, annotations list etc. # TODO: Add structural annotations from 'annotations'? This is a bit annoying though because then we'd have to # take annotations as a requirement which results in Sparv having to run all annotations, even the ones we don't # want to use here. annotations = [sentence, sentence_id, token] + conll_fields annotations = [(annot, None) for annot in annotations] annotation_list, _, export_names = util.get_annotation_names(annotations, source_annotations, remove_namespaces=True, doc=doc, token_name=token_name) span_positions, annotation_dict = util.gather_annotations(annotation_list, export_names, doc=doc) csv_data = ["# global.columns = ID FORM LEMMA UPOS XPOS FEATS HEAD DEPREL DEPS MISC"] # Go through spans_dict and add to csv, line by line for _pos, instruction, span in span_positions: if instruction == "open": # Create token line if span.name == token_name: csv_data.append(_make_conll_token_line(conll_fields, token_name, annotation_dict, span.index)) # Create line with structural annotation else: attrs = _make_attrs(span.name, annotation_dict, export_names, span.index) for attr in attrs: csv_data.append(f"# {attr}") if not attrs: csv_data.append(f"# {span.export}") # Insert blank line after each closing sentence elif span.name == sentence.name and instruction == "close": csv_data.append("") # Insert extra blank line to make CoNLL-U validator happy csv_data.append("") # Write result to file with open(out, "w") as f: f.write("\n".join(csv_data)) logger.info("Exported: %s", out)