def get_parameters_code(event_fields: dict, parameter_text: str, qm_class: etree._Element, qm_documentation: etree._Element): """ add parameters data to qm structure :param parameter_text: text with parameters data :param qm_class: xml field to add data :return: """ wrong_data = "" parameters = parameter_text.split("\n") for parameter in parameters: parameter = parameter.strip() if is_parameter_event_field(parameter): update_event_fields(parameter, event_fields) param_name, param_type, param_vis, param_comment = get_parameters_data( parameter) if param_name: qm_attribute = etree.SubElement(qm_class, "attribute", name=param_name, type=param_type, visibility=param_vis, properties="0x00") qm_documentation = etree.SubElement(qm_attribute, "documentation") qm_documentation.text = param_comment else: wrong_data += ("\n" + parameter) qm_documentation.text = wrong_data
def _handle_text(cls, node: etree._Element, do_handle_tail_instead=False): if do_handle_tail_instead: if not node.tail or not node.tail.strip(): return text = node.tail node.tail = '' insert_node = node.getparent() insert_start = insert_node.index(node) + 1 else: if not node.text or not node.text.strip(): return text = node.text.strip() node.text = '' insert_node = node insert_start = 0 word_nodes = cls._str_2_word_nodes(text) # the child nodes all get the classes of the parents. that's used later in postproc for word_node in word_nodes: word_node.attrib[ cls.PARENT_CLASS_ATTRIB_NAME] = insert_node.attrib.get( 'class', '') # set the newly created word nodes as children of the parent node. # for text they go below the current node, at the beginning. # for tail, they get inserted into the current node's parent after the current node. for word_ind, word_node in enumerate(word_nodes): insert_node.insert(word_ind + insert_start, word_node) return
def _handle_fill(elem: etree._Element, val: Union[str, list]): if not isinstance(val, list): val = [val] text = elem.text if elem.text is not None else "" num_tokens = text.count(FILL_TOKEN) if not text or not num_tokens: if len(val) == 1: # TODO: log weak warning if existing text? e.g.: # if len(text.strip()): # print(f"overwriting {text} with {val[0]}") text = val[0] else: _issue = "is empty" if not text else "contains no format tokens" raise PTEvalError( f"{PT_NS['prefix']}:fill yielded {len(val)} substitutions but the" f" element's text {_issue}", elem, ) else: if num_tokens != len(val): _only = " only" if num_tokens < len(val) else "" _s = "s" if len(val) > 1 else "" raise PTEvalError( f"{PT_NS['prefix']}:fill yielded {len(val)} substitution{_s} but" f" the element's text{_only} contains {num_tokens} format tokens", elem, ) text = elem.text.format(*val) elem.text = text
def render( self, node: etree._Element, value: typing.Union[list, dict, "CompoundValue"], xsd_type: "ComplexType" = None, render_path=None, ) -> None: assert xsd_type is None node.text = self.xmlvalue(value)
def add_italics(parent_element: etree._Element, title: str) -> None: """Appends the title to the parent_element and inserts emph elements if necessary.""" if "_" in title: if not title.count("_") // 2: raise ValueError( f"Unbalanced amount of italics indicators '_' in {title}") title_split = title.split("_") parent_element.text = title_split[0] # Insert a emph element for every odd index, skipping zero for index, string in enumerate(title_split[1:-1]): if not index // 2: emph = etree.Element("emph", {"render": "italic"}) emph.text = string emph.tail = title_split[index + 2] parent_element.append(emph) else: parent_element.text = title
def render( self, node: etree._Element, value: typing.Union[list, dict, CompoundValue], xsd_type: "ComplexType" = None, render_path=None, ) -> None: assert xsd_type is None if value is Nil: node.set(xsi_ns("nil"), "true") return node.text = self.xmlvalue(value)
def append_string(elem: ET._Element, string: Optional[str]) -> ET._Element: if string is None: return elem children = list(elem) if children: if children[-1].tail is None: children[-1].tail = string else: children[-1].tail += string else: if elem.text is None: elem.text = string else: elem.text += string return elem
def _manage_blur_image_link(self, item: etree._Element, description: etree._Element): imgs: list = xpath(description, ".//img") if len(imgs) > 0: for img in imgs: img.attrib["src"] = "%s/thumbnails?url=%s&blur=true" % ( self.serving_url_prefix, quote_plus(cast(str, img.attrib["src"]))) else: srcs = re.findall('src="([^"]*)"', cast(str, description.text)) for src in srcs: description.text = description.text.replace( src, "%s/thumbnails?url=%s&blur=true" % (self.serving_url_prefix, quote_plus(src))) self.replace_img_links( item, self.serving_url_prefix + "/thumbnails?url=%s&blur=true")
def render( self, node: etree._Element, value: typing.Union[list, dict, CompoundValue], xsd_type: "ComplexType" = None, render_path=None, ) -> None: assert xsd_type is None if isinstance(value, AnyObject): if value.xsd_type is None: node.set(xsi_ns("nil"), "true") else: value.xsd_type.render(node, value.value, None, render_path) node.set(xsi_ns("type"), value.xsd_type.qname) elif isinstance(value, CompoundValue): value._xsd_elm.render(node, value, render_path) node.set(xsi_ns("type"), value._xsd_elm.qname) else: node.text = self.xmlvalue(value)
def set_url_from_link(self, link: etree._Element, url: str): link.text = url
def set_url_from_link(self, link: etree._Element, url: str): link.attrib["href"] = url link.text = url
def _process_elem(self, parent_state: PTState, t_elem: etree._Element): if isinstance(t_elem, etree._Comment): return self._ext.set_elem_context(t_elem) qname = etree.QName(t_elem.tag) state = PTState(parent_state, t_elem) if state["reorder"]: self._reorder.append(state) # duplicate subtree for each source if len(state["sources"].secondary): # prevent triggering this processing branch on sibling passes del t_elem.attrib[self._pt_clark("sources")] # We temporarily detach the t_elem subtree and insert each elem subtree at # the original location of t_elem before populating, which ensures that # resolved paths are always in the form /path/to/elem[1]/child, which will # match corresponding source elements (e.g. /path/to/elem/child) in the # multi source fetch scenario. Caveat: downstream deferred pt:fill or # pt:required will be evaluated in the context of their element's final # path (e.g. /path/to/elem[3]/child). # # Inserting and populating the subtrees in reverse order ensures that their # final document order for multi source fetches is aligned with the order of # the source_map sources. parent = t_elem.getparent() idx = parent.index(t_elem) parent.remove(t_elem) for source in reversed( (state["sources"].primary, *state["sources"].secondary) ): elem = ( t_elem if source is state["sources"].primary else deepcopy(t_elem) ) state["sources"] = SourceGroup(source) parent.insert(idx, elem) self._process_elem(state, elem) return if state["fetch"]: path = self.label.getelementpath(t_elem) s_elems = state["sources"].primary.findall(path) if len(s_elems) > 1: if state["multi"] is not True and len(s_elems) != state["multi"]: raise PTFetchError( f"{len(s_elems)} source elements found but pt:multi is set to" f" expect {int(state['multi'])}", t_elem, ) # cast False to 0 for readability self._process_multi_branch(t_elem, parent_state, len(s_elems) - 1) return elif not len(s_elems): if state["required"]: url = state["sources"].primary.docinfo.URL source_file = ( Path(url).name if url is not None else "<unresolved filename>" ) raise PTFetchError( f"{qname.localname} could not be located at path {path} in" f" source {state.exp['sources']} from {source_file}", # FIXME: .exp is None in descendants where source is inherited... t_elem, ) t_elem.getparent().remove(t_elem) return elif not len(t_elem): # len(s_elems) == 1: t_elem.attrib.update(s_elems[0].attrib) t_elem.text = s_elems[0].text else: if isinstance(state["multi"], int) and state["multi"] > 1: self._process_multi_branch(t_elem, parent_state, state["multi"] - 1) return # non-fetch required condition; should be evaluated at export if state.exp["required"] is not None: self._deferred_reqs.append(state) if len(t_elem): for child_elem in t_elem.getchildren(): self._process_elem(state, child_elem) elif state.exp["fill"]: if state["defer"]: self._deferred_fills.append(state) else: self._handle_fill(state.t_elem, state.eval_deferred("fill")) state.remove_elem_pt_attrs()
def _add_kobo_spans_to_node( self, node: etree._Element, name: str ) -> etree._Element: # process node only if it is not a comment or a processing instruction if ( node is None or isinstance(node, etree._Comment) or isinstance(node, etree._ProcessingInstruction) ): if node is not None: node.tail = None self.log.debug(f"[{name}] Skipping comment/ProcessingInstruction node") return node # Special case some tags special_tag_match = re.search(r"^(?:\{[^\}]+\})?(\w+)$", node.tag) if special_tag_match: # Skipped tags are just flat out skipped if special_tag_match.group(1) in SKIPPED_TAGS: self.log.debug(f"[{name}] Skipping '{special_tag_match.group(1)}' tag") return node # Special tags get wrapped in a span and their children are ignored if special_tag_match.group(1) in SPECIAL_TAGS: self.log.debug( f"[{name}] Wrapping '{special_tag_match.group(1)}' tag and " + "ignoring children" ) span = etree.Element( f"{{{XHTML_NAMESPACE}}}span", attrib={ "id": f"kobo.{self.paragraph_counter[name]}.1", "class": "koboSpan", }, ) span.append(node) return span # save node content for later node_text = node.text node_children = deepcopy(node.getchildren()) node_attrs = {} for key in list(node.keys()): node_attrs[key] = node.get(key) # reset current node, to start from scratch node.clear() # restore node attributes for key in node_attrs: node.set(key, node_attrs[key]) # the node text is converted to spans if node_text is not None: if not self._append_kobo_spans_from_text(node, node_text, name): # didn't add spans, restore text node.text = node_text else: self.paragraph_counter[name] += 1 # re-add the node children for child in node_children: # save child tail for later child_tail = child.tail child.tail = None node.append(self._add_kobo_spans_to_node(child, name)) # the child tail is converted to spans if child_tail is not None: if not self._append_kobo_spans_from_text(node, child_tail, name): # didn't add spans, restore tail on last child node[-1].tail = child_tail else: self.paragraph_counter[name] += 1 return node