border: 1px solid; padding: 0 10px; } ''' html = '''<html> <body> <a href="#1" style="color: green;">link</a> <p><a href="#2">coming: <b>b</b>link</a></p> </body> </html>''' from lxml import etree from lxml.cssselect import CSSSelector # noqa document = etree.HTML(html) e = etree.Element('pre', {'class': 'cssutils'}) e.text = css document.find('body').append(e) sheet = cssutils.parseString(css) view = {} specificities = {} # temporarily needed # TODO: filter rules simpler?, add @media rules = (rule for rule in sheet.cssRules if rule.type == rule.STYLE_RULE) for rule in rules: for selector in rule.selectorList: cssselector = CSSSelector(selector.selectorText) elements = cssselector.evaluate(document) for element in elements:
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False): result = super(MassEditingWizard, self).fields_view_get(cr, uid, view_id, view_type, context, toolbar, submenu) if context.get('mass_editing_object'): mass_object = self.pool['mass.object'] editing_data = mass_object.browse( cr, uid, context.get('mass_editing_object'), context) all_fields = {} xml_form = etree.Element('form', { 'string': tools.ustr(editing_data.name), 'version': '7.0' }) xml_group = etree.SubElement(xml_form, 'group', {'colspan': '4'}) etree.SubElement(xml_group, 'label', { 'string': '', 'colspan': '2' }) xml_group = etree.SubElement(xml_form, 'group', { 'colspan': '4', 'col': '4' }) model_obj = self.pool[context.get('active_model')] field_info = model_obj.fields_get(cr, uid, [], context) for field in editing_data.field_ids: if field.ttype == "many2many": all_fields[field.name] = field_info[field.name] all_fields["selection__" + field.name] = { 'type': 'selection', 'string': field_info[field.name]['string'], 'selection': [('set', 'Set'), ('remove_m2m', 'Remove'), ('add', 'Add')] } xml_group = etree.SubElement(xml_group, 'group', {'colspan': '4'}) etree.SubElement(xml_group, 'separator', { 'string': field_info[field.name]['string'], 'colspan': '2' }) etree.SubElement( xml_group, 'field', { 'name': "selection__" + field.name, 'colspan': '2', 'nolabel': '1' }) etree.SubElement( xml_group, 'field', { 'name': field.name, 'colspan': '4', 'nolabel': '1', 'attrs': ("{'invisible':[('selection__" + field.name + "','=','remove_m2m')]}") }) elif field.ttype == "one2many": all_fields["selection__" + field.name] = { 'type': 'selection', 'string': field_info[field.name]['string'], 'selection': [('set', 'Set'), ('remove', 'Remove')] } all_fields[field.name] = { 'type': field.ttype, 'string': field.field_description, 'relation': field.relation } etree.SubElement(xml_group, 'field', { 'name': "selection__" + field.name, 'colspan': '2' }) etree.SubElement( xml_group, 'field', { 'name': field.name, 'colspan': '4', 'nolabel': '1', 'attrs': ("{'invisible':[('selection__" + field.name + "','=','remove_o2m')]}") }) elif field.ttype == "many2one": all_fields["selection__" + field.name] = { 'type': 'selection', 'string': field_info[field.name]['string'], 'selection': [('set', 'Set'), ('remove', 'Remove')] } all_fields[field.name] = { 'type': field.ttype, 'string': field.field_description, 'relation': field.relation } etree.SubElement(xml_group, 'field', { 'name': "selection__" + field.name, 'colspan': '2' }) etree.SubElement( xml_group, 'field', { 'name': field.name, 'nolabel': '1', 'colspan': '2', 'attrs': ("{'invisible':[('selection__" + field.name + "','=','remove')]}") }) elif field.ttype == "char": all_fields["selection__" + field.name] = { 'type': 'selection', 'string': field_info[field.name]['string'], 'selection': [('set', 'Set'), ('remove', 'Remove')] } all_fields[field.name] = { 'type': field.ttype, 'string': field.field_description, 'size': field.size or 256 } etree.SubElement(xml_group, 'field', { 'name': "selection__" + field.name, 'colspan': '2', }) etree.SubElement( xml_group, 'field', { 'name': field.name, 'nolabel': '1', 'attrs': ("{'invisible':[('selection__" + field.name + "','=','remove')]}"), 'colspan': '2' }) elif field.ttype == 'selection': all_fields["selection__" + field.name] = { 'type': 'selection', 'string': field_info[field.name]['string'], 'selection': [('set', 'Set'), ('remove', 'Remove')] } etree.SubElement(xml_group, 'field', { 'name': "selection__" + field.name, 'colspan': '2' }) etree.SubElement( xml_group, 'field', { 'name': field.name, 'nolabel': '1', 'colspan': '2', 'attrs': ("{'invisible':[('selection__" + field.name + "','=','remove')]}") }) all_fields[field.name] = { 'type': field.ttype, 'string': field.field_description, 'selection': field_info[field.name]['selection'] } else: all_fields[field.name] = { 'type': field.ttype, 'string': field.field_description } all_fields["selection__" + field.name] = { 'type': 'selection', 'string': field_info[field.name]['string'], 'selection': [('set', 'Set'), ('remove', 'Remove')] } if field.ttype == 'text': xml_group = etree.SubElement(xml_group, 'group', {'colspan': '6'}) etree.SubElement( xml_group, 'separator', { 'string': all_fields[field.name]['string'], 'colspan': '2' }) etree.SubElement( xml_group, 'field', { 'name': "selection__" + field.name, 'colspan': '2', 'nolabel': '1' }) etree.SubElement( xml_group, 'field', { 'name': field.name, 'colspan': '4', 'nolabel': '1', 'attrs': ("{'invisible':[('selection__" + field.name + "','=','remove')]}") }) else: all_fields["selection__" + field.name] = { 'type': 'selection', 'string': field_info[field.name]['string'], 'selection': [('set', 'Set'), ('remove', 'Remove')] } etree.SubElement(xml_group, 'field', { 'name': "selection__" + field.name, 'colspan': '2', }) etree.SubElement( xml_group, 'field', { 'name': field.name, 'nolabel': '1', 'attrs': ("{'invisible':[('selection__" + field.name + "','=','remove')]}"), 'colspan': '2', }) etree.SubElement(xml_form, 'separator', { 'string': '', 'colspan': '4' }) xml_group3 = etree.SubElement(xml_form, 'footer', {}) etree.SubElement( xml_group3, 'button', { 'string': 'Apply', 'icon': "gtk-execute", 'type': 'object', 'name': "action_apply", 'class': "oe_highlight" }) etree.SubElement(xml_group3, 'button', { 'string': 'Close', 'icon': "gtk-close", 'special': 'cancel' }) root = xml_form.getroottree() result['arch'] = etree.tostring(root) result['fields'] = all_fields return result
def encode_xml(self): xml = etree.Element(self.LITE_POOL_ONLINE_STATUS_ELEMENT) etree.SubElement(xml, self.WORKER_ELEMENT).text = self.worker etree.SubElement(xml, self.HASHRATE_ELEMENT).text = self.hashrate etree.SubElement(xml, self.COINS_ELEMENT).text = self.coins return xml
def importRecords(self, node): # May raise ImportError if interface can't be found or KeyError if # attribute is missing. interfaceName = node.attrib.get('interface', None) if interfaceName is None: raise KeyError( u"A <records /> node must have an 'interface' attribute." ) __traceback_info__ = "records name: " + interfaceName prefix = node.attrib.get( 'prefix', None # None means use interface.__identifier__ ) if node.attrib.get('delete') is not None: self.logger.warning( u"The 'delete' attribute of <record /> nodes is deprecated, " u"it should be replaced with 'remove'." ) remove = node.attrib.get('remove', node.attrib.get('delete', 'false')) remove = remove.lower() == 'true' # May raise ImportError interface = resolve(interfaceName) omit = [] values = [] # Fields that should have their value set as they don't exist yet for child in node: if not isinstance(child.tag, str): continue elif child.tag.lower() == 'omit': if child.text: omit.append(six.text_type(child.text)) elif child.tag.lower() == 'value': values.append(child) if remove and values: raise ValueError( "A <records /> node with 'remove=\"true\"' must not contain " "<value /> nodes." ) elif remove: for f in getFieldNames(interface): if f in omit: continue child = etree.Element('value', key=f, purge='True') values.append(child) # May raise TypeError self.context.registerInterface( interface, omit=tuple(omit), prefix=prefix ) if not values and not remove: # Skip out if there are no value records to handle return # The prefix we ended up needs to be found if prefix is None: prefix = interface.__identifier__ for value in values: field = etree.Element( "record", interface=interface.__identifier__, field=value.attrib["key"], prefix=prefix, remove=repr(remove).lower() ) field.append(value) self.importRecord(field)
def _prepare_html(self, html): '''Divide and recreate the header/footer html by merging all found in html. The bodies are extracted and added to a list. Then, extract the specific_paperformat_args. The idea is to put all headers/footers together. Then, we will use a javascript trick (see minimal_layout template) to set the right header/footer during the processing of wkhtmltopdf. This allows the computation of multiple reports in a single call to wkhtmltopdf. :param html: The html rendered by render_qweb_html. :type: bodies: list of string representing each one a html body. :type header: string representing the html header. :type footer: string representing the html footer. :type specific_paperformat_args: dictionary of prioritized paperformat values. :return: bodies, header, footer, specific_paperformat_args ''' IrConfig = self.env['ir.config_parameter'].sudo() base_url = IrConfig.get_param('report.url') or IrConfig.get_param('web.base.url') # Return empty dictionary if 'web.minimal_layout' not found. layout = self.env.ref('web.minimal_layout', False) if not layout: return {} layout = self.env['ir.ui.view'].browse(self.env['ir.ui.view'].get_view_id('web.minimal_layout')) root = lxml.html.fromstring(html) match_klass = "//div[contains(concat(' ', normalize-space(@class), ' '), ' {} ')]" header_node = etree.Element('div', id='minimal_layout_report_headers') footer_node = etree.Element('div', id='minimal_layout_report_footers') bodies = [] res_ids = [] body_parent = root.xpath('//main')[0] # Retrieve headers for node in root.xpath(match_klass.format('header')): body_parent = node.getparent() node.getparent().remove(node) header_node.append(node) # Retrieve footers for node in root.xpath(match_klass.format('footer')): body_parent = node.getparent() node.getparent().remove(node) footer_node.append(node) # Retrieve bodies for node in root.xpath(match_klass.format('article')): layout_with_lang = layout # set context language to body language if node.get('data-oe-lang'): layout_with_lang = layout_with_lang.with_context(lang=node.get('data-oe-lang')) body = layout_with_lang.render(dict(subst=False, body=lxml.html.tostring(node), base_url=base_url)) bodies.append(body) if node.get('data-oe-model') == self.model: res_ids.append(int(node.get('data-oe-id', 0))) else: res_ids.append(None) if not bodies: body = bytearray().join([lxml.html.tostring(c) for c in body_parent.getchildren()]) bodies.append(body) # Get paperformat arguments set in the root html tag. They are prioritized over # paperformat-record arguments. specific_paperformat_args = {} for attribute in root.items(): if attribute[0].startswith('data-report-'): specific_paperformat_args[attribute[0]] = attribute[1] header = layout.render(dict(subst=True, body=lxml.html.tostring(header_node), base_url=base_url)) footer = layout.render(dict(subst=True, body=lxml.html.tostring(footer_node), base_url=base_url)) return bodies, res_ids, header, footer, specific_paperformat_args
def generate_payment_file(self): """Creates the Bank Specific Credit Transfer file. That's the important code!""" self.ensure_one() if self.payment_method_id.code != 'chile_credit_transfer': return super(AccountPaymentOrder, self).generate_payment_file() pain_flavor = self.payment_method_id.pain_version # We use pain_flavor.startswith('pain.BBB.name.VVV') # to support bank-specific extensions. "BBB" is the Bank code # provided by SBIF, "name" is a short identifier and "VVV" is the # file format version since some banks have more than one or # will change formats for some customers if not pain_flavor: raise UserError( _("Bank File version '%s' is not supported.") % pain_flavor) if pain_flavor.startswith('pain.001.chile.001'): bic_xml_tag = 'BIC' name_maxsize = 70 root_xml_tag = '001.chile.001' elif pain_flavor.startswith('pain.016.bci.001'): bic_xml_tag = 'BIC' name_maxsize = 70 root_xml_tag = '016.bci.001' else: raise UserError( _("PAIN version '%s' is not supported.") % pain_flavor) xsd_file = self.payment_method_id.get_xsd_file_path() gen_args = { 'bic_xml_tag': bic_xml_tag, 'name_maxsize': name_maxsize, 'convert_to_ascii': self.payment_method_id.convert_to_ascii, 'payment_method': 'TRF', 'file_prefix': 'sct_', 'pain_flavor': pain_flavor, 'pain_xsd_file': xsd_file, } nsmap = self.generate_pain_nsmap() attrib = self.generate_pain_attrib() xml_root = etree.Element('Document', nsmap=nsmap, attrib=attrib) pain_root = etree.SubElement(xml_root, root_xml_tag) # A. Group header group_header, nb_of_transactions_a, control_sum_a = \ self.generate_group_header_block(pain_root, gen_args) transactions_count_a = 0 amount_control_sum_a = 0.0 lines_per_group = {} # key = (requested_date, priority, local_instrument, categ_purpose) # values = list of lines as object for line in self.bank_line_ids: priority = line.priority local_instrument = line.local_instrument categ_purpose = line.category_purpose # The field line.date is the requested payment date # taking into account the 'date_prefered' setting # cf account_banking_payment_export/models/account_payment.py # in the inherit of action_open() key = (line.date, priority, local_instrument, categ_purpose) if key in lines_per_group: lines_per_group[key].append(line) else: lines_per_group[key] = [line] # for (requested_date, priority, local_instrument, categ_purpose),\ # lines in list(lines_per_group.items()): # # B. Payment info # requested_date = fields.Date.to_string(requested_date) # payment_info, nb_of_transactions_b, control_sum_b = \ # self.generate_start_payment_info_block( # pain_root, # "self.name + '-' " # "+ requested_date.replace('-', '') + '-' + priority + " # "'-' + local_instrument + '-' + category_purpose", # priority, local_instrument, categ_purpose, # False, requested_date, { # 'self': self, # 'priority': priority, # 'requested_date': requested_date, # 'local_instrument': local_instrument or 'NOinstr', # 'category_purpose': categ_purpose or 'NOcateg', # }, gen_args) # self.generate_party_block( # payment_info, 'Dbtr', 'B', # self.company_partner_bank_id, gen_args) # charge_bearer = etree.SubElement(payment_info, 'ChrgBr') # if self.sepa: # charge_bearer_text = 'SLEV' # else: # charge_bearer_text = self.charge_bearer # charge_bearer.text = charge_bearer_text # transactions_count_b = 0 # amount_control_sum_b = 0.0 # for line in lines: # transactions_count_a += 1 # transactions_count_b += 1 # # C. Credit Transfer Transaction Info # credit_transfer_transaction_info = etree.SubElement( # payment_info, 'CdtTrfTxInf') # payment_identification = etree.SubElement( # credit_transfer_transaction_info, 'PmtId') # instruction_identification = etree.SubElement( # payment_identification, 'InstrId') ## instruction_identification.text = self._prepare_field( # 'Instruction Identification', 'line.name', # {'line': line}, 35, gen_args=gen_args) # end2end_identification = etree.SubElement( # payment_identification, 'EndToEndId') # end2end_identification.text = self._prepare_field( # 'End to End Identification', 'line.name', # {'line': line}, 35, gen_args=gen_args) # currency_name = self._prepare_field( # 'Currency Code', 'line.currency_id.name', # {'line': line}, 3, gen_args=gen_args) # amount = etree.SubElement( # credit_transfer_transaction_info, 'Amt') # instructed_amount = etree.SubElement( # amount, 'InstdAmt', Ccy=currency_name) # instructed_amount.text = '%.2f' % line.amount_currency # amount_control_sum_a += line.amount_currency # amount_control_sum_b += line.amount_currency # if not line.partner_bank_id: # raise UserError( # _("Bank account is missing on the bank payment line " # "of partner '%s' (reference '%s').") # % (line.partner_id.name, line.name)) # self.generate_party_block( # credit_transfer_transaction_info, 'Cdtr', # 'C', line.partner_bank_id, gen_args, line) # if line.purpose: # purpose = etree.SubElement( # credit_transfer_transaction_info, 'Purp') # etree.SubElement(purpose, 'Cd').text = line.purpose # self.generate_remittance_info_block( # credit_transfer_transaction_info, line, gen_args) # if not pain_flavor.startswith('pain.001.001.02'): # nb_of_transactions_b.text = str(transactions_count_b) # control_sum_b.text = '%.2f' % amount_control_sum_b # if not pain_flavor.startswith('pain.001.001.02'): # nb_of_transactions_a.text = str(transactions_count_a) # control_sum_a.text = '%.2f' % amount_control_sum_a # else: # nb_of_transactions_a.text = str(transactions_count_a) # control_sum_a.text = '%.2f' % amount_control_sum_a return self.finalize_sepa_file_creation(xml_root, gen_args)
import os XSL_root = os.path.normpath(os.path.join(os.path.dirname(__file__), 'xsl')) XML_root = os.path.normpath(os.path.join(os.path.dirname(__file__), 'xml')) template_root = os.path.normpath( os.path.join(os.path.dirname(__file__), 'templates')) XSL_cache_limit = 1 XML_cache_step = 1 def post(self, data, cb): self.log.debug('posprocessor called') cb(data) postprocessor = post from lxml import etree version = [etree.Element('app-version', number='last version')]
def _tag_template(self, el): # This helper transforms a <template> element into a <record> and forwards it tpl_id = el.get('id', el.get('t-name')) full_tpl_id = tpl_id if '.' not in full_tpl_id: full_tpl_id = '%s.%s' % (self.module, tpl_id) # set the full template name for qweb <module>.<id> if not el.get('inherit_id'): el.set('t-name', full_tpl_id) el.tag = 't' else: el.tag = 'data' el.attrib.pop('id', None) if self.module.startswith('theme_'): model = 'theme.ir.ui.view' else: model = 'ir.ui.view' record_attrs = { 'id': tpl_id, 'model': model, } for att in ['forcecreate', 'context']: if att in el.attrib: record_attrs[att] = el.attrib.pop(att) Field = builder.E.field name = el.get('name', tpl_id) record = etree.Element('record', attrib=record_attrs) record.append(Field(name, name='name')) record.append(Field(full_tpl_id, name='key')) record.append(Field("qweb", name='type')) if 'priority' in el.attrib: record.append(Field(el.get('priority'), name='priority')) if 'inherit_id' in el.attrib: record.append(Field(name='inherit_id', ref=el.get('inherit_id'))) if 'website_id' in el.attrib: record.append(Field(name='website_id', ref=el.get('website_id'))) if 'key' in el.attrib: record.append(Field(el.get('key'), name='key')) if el.get('active') in ("True", "False"): view_id = self.id_get(tpl_id, raise_if_not_found=False) if self.mode != "update" or not view_id: record.append(Field(name='active', eval=el.get('active'))) if el.get('customize_show') in ("True", "False"): record.append( Field(name='customize_show', eval=el.get('customize_show'))) groups = el.attrib.pop('groups', None) if groups: grp_lst = [("ref('%s')" % x) for x in groups.split(',')] record.append( Field(name="groups_id", eval="[(6, 0, [" + ', '.join(grp_lst) + "])]")) if el.get('primary') == 'True': # Pseudo clone mode, we'll set the t-name to the full canonical xmlid el.append( builder.E.xpath( builder.E.attribute(full_tpl_id, name='t-name'), expr=".", position="attributes", )) record.append(Field('primary', name='mode')) # inject complete <template> element (after changing node name) into # the ``arch`` field record.append(Field(el, name="arch", type="xml")) return self._tag_record(record)
def main(): if len(sys.argv) != 4: print( 'usage: python gaff2xml.py [path_to_smarts_defs] [path_to_gaff_dat_file] [path_to_output_xml_file]' ) exit(1) else: smarts_def_path = sys.argv[1] gaff_parm_path = sys.argv[2] xml_out_path = sys.argv[3] start_mass = 1 end_mass = 84 start_lj = 7119 end_lj = 7202 start_bond = 86 end_bond = 1014 start_angle = 1015 end_angle = 6330 start_torsion = 6331 end_torsion = 7075 start_improper = 7076 end_improper = 7114 data = [] with open(gaff_parm_path) as f: for line in f: data.append(line.strip()) # Create dict with smarts definitions smarts = {} smarts_tree = ET.parse(smarts_def_path) defs = smarts_tree.getroot() for atype in defs.findall('Definition'): name = atype.get('name') iclass = atype.get('class') idef = atype.get('def') over = atype.get('overrides') desc = atype.get('desc') doi = atype.get('doi') smarts.update({name: [iclass, idef, over, desc, doi]}) # Extract gaff parameters mass_parms = { x.split()[0]: x.split()[1] for x in data[start_mass:end_mass] } lj_parms = {x.split()[0]: (x.split()[1:3]) for x in data[start_lj:end_lj]} bond_parms = [x for x in data[start_bond:end_bond]] angle_parms = [x for x in data[start_angle:end_angle]] torsion_parms = [x for x in data[start_torsion:end_torsion]] improper_parms = [x for x in data[start_improper:end_improper]] # Create our force field root = ET.Element("ForceField") atomtypes = ET.SubElement(root, 'AtomTypes') nonbonded = ET.SubElement(root, 'NonbondedForce') nonbonded.set('coulomb14scale', '0.833333333') nonbonded.set('lj14scale', '0.5') for atype in smarts.keys(): atomtype = ET.SubElement(atomtypes, 'Type') nb_force = ET.SubElement(nonbonded, 'Atom') atomtype.set('name', atype) iclass = smarts[atype][0] atomtype.set('class', iclass) atomtype.set('element', determine_element(mass_parms[iclass])) atomtype.set('mass', mass_parms[iclass]) if smarts[atype][1] is not None: atomtype.set('def', smarts[atype][1]) if smarts[atype][2] is not None: atomtype.set('overrides', smarts[atype][2]) atomtype.set('desc', smarts[atype][3]) atomtype.set('doi', smarts[atype][4]) nb_force.set('type', atype) nb_force.set('charge', '0.0') nb_force.set('sigma', convert_sigma(lj_parms[iclass][0])) nb_force.set('epsilon', convert_epsilon(lj_parms[iclass][1])) # Bonds bond_forces = ET.SubElement(root, 'HarmonicBondForce') for bond in bond_parms: bond_force = ET.SubElement(bond_forces, 'Bond') classes = re.split('\s+-|-|\s+', bond[0:5]) parms = bond[5:24].split() bond_force.set('class1', classes[0]) bond_force.set('class2', classes[1]) bond_force.set('length', convert_bondlength(parms[1])) bond_force.set('k', convert_bondk(parms[0])) # Angles angle_forces = ET.SubElement(root, 'HarmonicAngleForce') for angle in angle_parms: angle_force = ET.SubElement(angle_forces, 'Angle') classes = re.split('\s+-|-|\s+', angle[0:8]) parms = angle[8:30].split() angle_force.set('class1', classes[0]) angle_force.set('class2', classes[1]) angle_force.set('class3', classes[2]) angle_force.set('angle', convert_theta(parms[1])) angle_force.set('k', convert_anglek(parms[0])) # Proper dihedrals torsion_forces = ET.SubElement(root, 'PeriodicTorsionForce') continue_reading = False for torsion in torsion_parms: # Amber leap convention...if periodicity < 0 it means it is # there will follow additional torsional terms for the same set of atoms # See http://ambermd.org/FileFormats.php#parm.dat classes = re.split('\s+-|-|\s+', torsion[0:11]) parms = torsion[11:54].split() if continue_reading == False: torsion_force = ET.SubElement(torsion_forces, 'Proper') torsion_ctr = 1 if classes[0].upper() == 'X': torsion_force.set('class1', '') else: torsion_force.set('class1', classes[0]) if classes[1].upper() == 'X': torsion_force.set('class2', '') else: torsion_force.set('class2', classes[1]) if classes[2].upper() == 'X': torsion_force.set('class3', '') else: torsion_force.set('class3', classes[2]) if classes[3].upper() == 'X': torsion_force.set('class4', '') else: torsion_force.set('class4', classes[3]) else: torsion_ctr += 1 if float(parms[3]) < 0.0: continue_reading = True else: continue_reading = False name = 'periodicity' + str(torsion_ctr) torsion_force.set(name, str(int(abs(float(parms[3]))))) name = 'k' + str(torsion_ctr) torsion_force.set(name, convert_torsionk(parms[1], parms[0])) name = 'phase' + str(torsion_ctr) torsion_force.set(name, convert_theta(parms[2])) # Improper dihedrals continue_reading = False for torsion in improper_parms: classes = re.split('\s+-|-|\s+', torsion[0:11]) parms = torsion[11:54].split() if continue_reading == False: torsion_force = ET.SubElement(torsion_forces, 'Improper') torsion_ctr = 1 if classes[2].upper() == 'X': torsion_force.set('class1', '') else: torsion_force.set('class1', classes[2]) if classes[0].upper() == 'X': torsion_force.set('class2', '') else: torsion_force.set('class2', classes[0]) if classes[1].upper() == 'X': torsion_force.set('class3', '') else: torsion_force.set('class3', classes[1]) if classes[3].upper() == 'X': torsion_force.set('class4', '') else: torsion_force.set('class4', classes[3]) else: torsion_ctr += 1 if float(parms[2]) < 0.0: continue_reading = True else: continue_reading = False name = 'periodicity' + str(torsion_ctr) torsion_force.set(name, str(int(abs(float(parms[2]))))) name = 'k' + str(torsion_ctr) torsion_force.set(name, convert_improperk(parms[0])) name = 'phase' + str(torsion_ctr) torsion_force.set(name, convert_theta(parms[1])) # Write XML with SMARTS defs ET.ElementTree(root).write(xml_out_path, pretty_print=True)
def test_reject_t_tag(self): field = etree.Element('t', {'t-field': u'company.name'}) with self.assertRaisesRegexp( AssertionError, r'^t-field can not be used on a t element'): self.engine.render_node(field, self.context({'company': None}))
def gen_script(username, payload): """ Generate Netconf / Restconf RPC """ payload = payload.replace('<metadata>', '') payload = payload.replace('</metadata>', '') _, device, fmt, lock, rpc = Adapter.parse_request(payload) if fmt == 'xpath' and rpc == '': rpc = Adapter.gen_rpc(username, payload) if rpc is None: logging.error('gen_script: Invalid RPC Generated') return None parser = NetconfParser(rpc) op = parser.get_operation() data = ET.tostring(parser.get_data(), pretty_print=True) datastore = parser.get_datastore() # setup template args args = dict() args['data'] = data.strip() args['datastore'] = datastore args['host'] = device.get('host', '') args['port'] = device.get('port', '830') args['user'] = device.get('user', '') args['passwd'] = device.get('passwd', '') args['platform'] = device.get('platform', '') if not args['host']: args['host'] = '<address>' if not args['user']: args['user'] = '******' if not args['passwd']: args['passwd'] = '<password>' if not args['platform']: args['platform'] = 'csr' if op == 'get': args['nccall'] = 'm.get(payload).xml' elif op == 'get-config': args[ 'nccall'] = "m.get_config(source='%s', filter=payload).xml" % datastore elif op == 'edit-config': e_opt = parser.get_error_option() if e_opt is None or e_opt == '': args[ 'nccall'] = "m.edit_config(target='%s', config=payload).xml" % datastore else: args[ 'nccall'] = "m.edit_config(target='%s', error_option='%s', config=payload).xml" % ( datastore, e_opt) args['lock'] = lock else: args['nccall'] = "m.dispatch(ET.fromstring(payload)).xml" # generate script rendered = render_to_string('pyscript.py', args) script = ET.Element('script') script.text = ET.CDATA(rendered) return script
def test_reject_crummy_tags(self): field = etree.Element('td', {'t-field': u'company.name'}) with self.assertRaisesRegexp(AssertionError, r'^RTE widgets do not work correctly'): self.engine.render_node(field, self.context({'company': None}))
urls = list() for key, value in read_dictionary.items(): urls.append(key) for k,v in value.items(): for k in v: urls.append(k) #Delete duplicates. print (len(urls)) urls = f7(urls) print(len(urls)) #print (urls) #Translate to xml. #We can add speak context later root = ET.Element('AIMind') rroot = ET.SubElement(root, 'Root') rroot.attrib["id"] = "0" features = ET.SubElement(root, 'Features') #Fill in the features for url in urls: feature = ET.SubElement(features, 'Feature') feature.attrib["data"] = url feature.attrib["zh-data"] = "" feature.attrib["id"] = str(urls.index(url)+1) feature.attrib["uri"] = "" neighbors = ET.SubElement(feature, 'neighbors') #Find the neighbor in the dict. if url in read_dictionary.keys():
def get_page(self): self.set_xsl('simple.xsl') self.doc.put(etree.Element('ok'))
def index(self, exts_dict): exts = etree.Element('extensions', nsmap=self.nsmap) for ext_dict in exts_dict['extensions']: ext = etree.SubElement(exts, 'extension') self._populate_ext(ext, ext_dict) return self._to_xml(exts)
def getContent(self): if self._caller != "databrowse": return None else: if self._content_mode == "full": try: st = os.stat(self._fullpath) except IOError: return "Failed To Get File Information: %s" % (self._fullpath) else: file_size = st[ST_SIZE] file_mtime = time.asctime(time.localtime(st[ST_MTIME])) file_ctime = time.asctime(time.localtime(st[ST_CTIME])) file_atime = time.asctime(time.localtime(st[ST_ATIME])) magicstore = magic.open(magic.MAGIC_MIME) magicstore.load() contenttype = magicstore.file(self._fullpath) extension = os.path.splitext(self._fullpath)[1][1:] icon = self._handler_support.GetIcon(contenttype, extension) downlink = self.getURL(self._relpath, content_mode="raw", download="true") xmlroot = etree.Element('{%s}dbhdf' % self._namespace_uri, nsmap=self.nsmap, name=os.path.basename(self._relpath), resurl=self._web_support.resurl, downlink=downlink, icon=icon) xmlchild = etree.SubElement(xmlroot, "filename", nsmap=self.nsmap) xmlchild.text = os.path.basename(self._fullpath) xmlchild = etree.SubElement(xmlroot, "path", nsmap=self.nsmap) xmlchild.text = os.path.dirname(self._fullpath) xmlchild = etree.SubElement(xmlroot, "size", nsmap=self.nsmap) xmlchild.text = self.ConvertUserFriendlySize(file_size) xmlchild = etree.SubElement(xmlroot, "mtime", nsmap=self.nsmap) xmlchild.text = file_mtime xmlchild = etree.SubElement(xmlroot, "ctime", nsmap=self.nsmap) xmlchild.text = file_ctime xmlchild = etree.SubElement(xmlroot, "atime", nsmap=self.nsmap) xmlchild.text = file_atime # Content Type xmlchild = etree.SubElement(xmlroot, "contenttype", nsmap=self.nsmap) xmlchild.text = contenttype # File Permissions xmlchild = etree.SubElement(xmlroot, "permissions", nsmap=self.nsmap) xmlchild.text = self.ConvertUserFriendlyPermissions(st[ST_MODE]) # User and Group username = pwd.getpwuid(st[ST_UID])[0] groupname = grp.getgrgid(st[ST_GID])[0] xmlchild = etree.SubElement(xmlroot, "owner", nsmap=self.nsmap) xmlchild.text = "%s:%s" % (username, groupname) # Contents of File f = open(self._fullpath) xmlchild = etree.SubElement(xmlroot, "contents", nsmap=self.nsmap) output, error = subprocess.Popen(['/usr/bin/h5dump', '-x', '-H', self._fullpath], stdout=subprocess.PIPE).communicate() output = output.replace('xmlns:hdf5="http://hdfgroup.org/HDF5/XML/schema/HDF5-File.xsd"', 'xmlns:hdf5="http://hdfgroup.org/DTDs/HDF5-File"') xmlchild.append(etree.XML(output)) #xmlchild.text = f.read() return xmlroot elif self._content_mode == "raw" and self._web_support.req.form['getimage'].value == "true" and 'hdfloc' in self._web_support.req.form: hdfpath = self._web_support.req.form['hdfloc'].value tagname = base64.urlsafe_b64encode(hdfpath) ext='png' if self.CacheFileExists(tagname, extension=ext): size = os.path.getsize(self.getCacheFileName(tagname, extension=ext)) f = self.getCacheFileHandler('rb', tagname, extension=ext) self._web_support.req.response_headers['Content-Type'] = 'image/png' self._web_support.req.response_headers['Content-Length'] = str(size) self._web_support.req.start_response(self._web_support.req.status, self._web_support.req.response_headers.items()) self._web_support.req.output_done = True if 'wsgi.file_wrapper' in self._web_support.req.environ: return self._web_support.req.environ['wsgi.file_wrapper'](f, 1024) else: return iter(lambda: f.read(1024)) else: print(self._fullpath) f = h5py.File(self._fullpath, 'r') data = f.get(self._web_support.req.form['hdfloc'].value) if len(data.value.shape) == 1: pylab.figure() pylab.plot(data.value) imgf = self.getCacheFileHandler('w', tagname, 'png') pylab.savefig(imgf) imgf.close() pylab.clf() elif len(data.value.shape) == 2: pylab.figure() pylab.imshow(data.value, origin='lower') imgf = self.getCacheFileHandler('w', tagname, 'png') pylab.savefig(imgf) imgf.close() pylab.clf() f.close() size = os.path.getsize(self.getCacheFileName(tagname, extension=ext)) f = self.getCacheFileHandler('rb', tagname, extension=ext) self._web_support.req.response_headers['Content-Type'] = 'image/png' self._web_support.req.response_headers['Content-Length'] = str(size) self._web_support.req.start_response(self._web_support.req.status, self._web_support.req.response_headers.items()) self._web_support.req.output_done = True if 'wsgi.file_wrapper' in self._web_support.req.environ: return self._web_support.req.environ['wsgi.file_wrapper'](f, 1024) else: return iter(lambda: f.read(1024)) elif self._content_mode == "raw": size = os.path.getsize(self._fullpath) magicstore = magic.open(magic.MAGIC_MIME) magicstore.load() contenttype = magicstore.file(self._fullpath) f = open(self._fullpath, "rb") self._web_support.req.response_headers['Content-Type'] = contenttype self._web_support.req.response_headers['Content-Length'] = str(size) self._web_support.req.response_headers['Content-Disposition'] = "attachment; filename=" + os.path.basename(self._fullpath) self._web_support.req.start_response(self._web_support.req.status, self._web_support.req.response_headers.items()) self._web_support.req.output_done = True if 'wsgi.file_wrapper' in self._web_support.req.environ: return self._web_support.req.environ['wsgi.file_wrapper'](f, 1024) else: return iter(lambda: f.read(1024)) else: raise self.RendererException("Invalid Content Mode") pass
def src2nrml(sz, hik10_df, depth_list, out_file): """ Function to take params read into dict from csv and returns site model file in xml INPUT: site parameters lists OUTPUT: nrml site model file """ # Some XML definitions NAMESPACE = 'http://openquake.org/xmlns/nrml/0.4' GML_NAMESPACE = 'http://www.opengis.net/gml' SERIALIZE_NS_MAP = {None: NAMESPACE, 'gml': GML_NAMESPACE} gml_ns = SERIALIZE_NS_MAP['gml'] # Head matter root = etree.Element(_tag='nrml', nsmap={'gml': 'http://www.opengis.net/gml'}) root.set('xmlns', 'http://openquake.org/xmlns/nrml/0.4') root.append(etree.Comment('%s' % sz + ' sz sources from contour files')) # Define Source Model Name sMod = etree.SubElement(root, "sourceModel") sMod.set('name', 'Seismic Source Model') j = 0 # source id counter i = 0 # will eventually need to iterate through cols in dataframe for col in hik10_df: #create list that says whether contour is empty or not notempty = [True] * len(depth_list) for ix in range(0, len(depth_list)): if not hik10_df.loc[depth_list[ix]][i]: notempty[ix] = False if ix != 0 and notempty[ix - 1]: bottom_ix = ix - 1 if True not in notempty: print("PROBLEM - need to skip & throw exception eventually") else: top_ix = notempty.index(True) if False not in notempty: bottom_ix = len(depth_list) if top_ix == bottom_ix: print(hik10_df.columns.values[i]) # Define Fault Source Type fS = etree.SubElement(sMod, "characteristicFaultSource") fS.set('id', '%s' % str(j)) j += 1 #fS.set('name', '%s' % faultData[i][0]) fS.set('name', '%s' % sz + str(hik10_df.columns.values[i])) #fS.set('tectonicRegion', '%s' % faultData[i][1]) fS.set('tectonicRegion', '%s' % 'Subduction Interface') # Set MFD and rates # FIX ME - currently hardcoded to be pure characteristic # add module in function readNSHMFlt to take char mag and output dist # then here, loop over the data like coords MFD = etree.SubElement(fS, 'incrementalMFD') MFD.set('minMag', '%s' % hik10_df.loc['mag'][i]) MFD.set('binWidth', '0.1') # Set occurence rates rates = etree.SubElement(MFD, 'occurRates') rates.text = '%s' % str(hik10_df.loc['occurRate'][i]) # Set rake (Taken from fault sense in NSHM fault file) rake = etree.SubElement(fS, 'rake') rake.text = '%s' % 90 # Fault Type surf = etree.SubElement(fS, 'surface') cfg = etree.SubElement(surf, 'complexFaultGeometry') # top edge cfte = etree.SubElement(cfg, 'faultTopEdge') gmlLS = etree.SubElement(cfte, '{%s}LineString' % gml_ns) gmlPos = etree.SubElement(gmlLS, '{%s}posList' % gml_ns) topCoords = hik10_df.loc[depth_list[top_ix]][i] top_cont = insertDepths(topCoords, depth_list[top_ix]) gmlPos.text = ' '.join([str("%.3f" % x) for x in top_cont]) # intermediate edges for d in depth_list[top_ix + 1:bottom_ix]: cfie = etree.SubElement(cfg, 'intermediateEdge') gmlLS = etree.SubElement(cfie, '{%s}LineString' % gml_ns) gmlPos = etree.SubElement(gmlLS, '{%s}posList' % gml_ns) intCoords = hik10_df.loc[d][i] int_cont = insertDepths(intCoords, d) gmlPos.text = ' '.join([str("%.3f" % x) for x in int_cont]) cfbe = etree.SubElement(cfg, 'faultBottomEdge') # bottom edge gmlLS = etree.SubElement(cfbe, '{%s}LineString' % gml_ns) gmlPos = etree.SubElement(gmlLS, '{%s}posList' % gml_ns) bottomCoords = hik10_df.loc[depth_list[bottom_ix]][i] bot_cont = insertDepths(bottomCoords, depth_list[bottom_ix]) gmlPos.text = ' '.join([str("%.3f" % x) for x in bot_cont]) i += 1 # Form Tree and Write to XML root_tree = etree.ElementTree(root) nrml_file = open(out_file, 'wb') root_tree.write(nrml_file, encoding="utf-8", xml_declaration=True, pretty_print=True)
# 将 第 0014 题中的 student.xls 文件中的内容写到 student.xml 文件中,如 import xlrd from lxml import etree root = etree.Element('network') root.set('name', 'Network') tree = etree.ElementTree(root) name = etree.Element('nodes') root.append(name) wb = xlrd.open_workbook("emme_nodes1.xls") sh = wb.sheet_by_index(0) for row in range(1, sh.nrows): val = sh.row_values(row) element = etree.SubElement(name, 'node') element.set('id', str(int(val[0]))) element.set('x', str(val[1])) element.set('y', str(val[2])) print etree.tostring(root,pretty_print=True)
def main(): """ Creates an XMF file with support for multiple variables (that share the same grid). """ # get the user configuration args = parse_command_line() print('User-configuration:\n{}'.format(args)) # read file containing index and corresponding # time at which solution was saved with open(args.times_file, 'r') as infile: time_values = numpy.genfromtxt(infile, dtype=None) if time_values.size == 1: time_values = numpy.array([time_values]) # start xmf tree xdmf = ET.Element('Xdmf', Version='2.2') info = ET.SubElement(xdmf, 'Information', Name='MetaData', Value='ID-23454') domain = ET.SubElement(xdmf, 'Domain') grid_time_series = ET.SubElement(domain, 'Grid', Name='TimeSeries', GridType='Collection', CollectionType='Temporal') # define type of field if len(args.grid_size) == 2: topology_type = '2DRectMesh' geometry_type = 'VXVY' components = ('x', 'y') elif len(args.grid_size) == 3: topology_type = '3DRectMesh' geometry_type = 'VXVYVZ' components = ('x', 'y', 'z') number_of_elements = ' '.join(str(n) for n in args.grid_size[::-1]) precision = '8' # create an xmf block for each time-step saved for it, time_value in time_values: grid = ET.SubElement(grid_time_series, 'Grid', Name='Grid', GridType='Uniform') time = ET.SubElement(grid, 'Time', Value=str(time_value)) topology = ET.SubElement(grid, 'Topology', TopologyType=topology_type, NumberOfElements=number_of_elements) geometry = ET.SubElement(grid, 'Geometry', GeometryType=geometry_type) # loop over the 3 directions (for code-reuse purpose) for d, n in zip(components, args.grid_size): dataitem = ET.SubElement(geometry, 'DataItem', Dimensions=str(n), NumberType='Float', Precision=precision, Format='HDF') dataitem.text = '{}:/{}'.format( os.path.join(args.directory, args.grid_file), d) # create a block for each variable to insert for variable in args.variables: attribute = ET.SubElement(grid, 'Attribute', Name=variable, AttributeType='Scalar', Center='Node') dataitem = ET.SubElement(attribute, 'DataItem', Dimensions=number_of_elements, NumberType='Float', Precision=precision, Format='HDF') variable_file_path = os.path.join(args.directory, '{:0>7}'.format(it), '{}.h5'.format(variable)) dataitem.text = '{}:/{}'.format(variable_file_path, variable) # write the xmf file print('\nWriting XMF file: {} ...'.format(args.outfile)), tree = ET.ElementTree(xdmf) tree.write(args.outfile, pretty_print=True, xml_declaration=True) print('done')
def _create_envelope(self): return ElementTree.Element('{%s}Envelope' % NS_SOAP_ENV, nsmap=NS_MAP)
def make_plot(date, upload=False, mark_end=False): print("Make plot for {}.".format(date)) year = date.year month = date.month day = date.day data1, data2, minT, maxT, fanIntervals = read_log(year, month, day) minTf = minT maxTf = maxT minT = int(np.floor(minT)) maxT = int(np.ceil(maxT)) spanT = maxT - minT for dt in intervals: if dt > spanT: spanT = dt break minT = min(minT, int(np.round((minTf + maxTf - spanT) * .5))) maxT = minT + spanT T1color = np.array([0, 0, 255], dtype=np.uint8) tau1color = np.array([0, 127, 0], dtype=np.uint8) T2color = np.array([255, 0, 0], dtype=np.uint8) tau2color = np.array([255, 0, 255], dtype=np.uint8) tempdirname = None try: svg = etree.Element('svg', nsmap={ None: 'http://www.w3.org/2000/svg', 'xlink': 'http://www.w3.org/1999/xlink' }, width="{}px".format(wplus), height="{}px".format(hplus), viewBox="0 0 {} {}".format(wplus, hplus), version="1.1") style = etree.SubElement(svg, 'style', type="text/css") style.text = etree.CDATA('''\ *{fill:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:round;}\ line{stroke:black;}\ polyline{stroke-linecap:round;}\ text,tspan{stroke:none;fill:black;font-family:sans-serif;font-size:13px;}\ g.ylabel text{dominant-baseline:mathematical;text-anchor:end;}\ rect{fill:rgb(180,180,180)}\ .thin line{stroke-width:.1px}\ line.thicker{stroke-width:.25px}''') defs = etree.SubElement(svg, 'defs') SE = etree.SubElement SE(defs, 'line', id="htick", x1="0", y1="0", x2="0", y2="10") SE(defs, 'line', id="vtick", x1="0", y1="0", x2="10", y2="0") SE(svg, 'rect', width=str(wplus), height=str(hplus), style="fill:white") text = SE(svg, 'text', y="13") text.text = 'Date: {year:04}-{month:02}-{day:02} '.format(year=year, month=month, day=day) tspan = SE(text, 'tspan', dx="2em") tspan.text = 'Legend:' tspan.tail = ' ' tspan = SE(text, 'tspan', dx=".5em", style="fill:blue") tspan.text = u'■' tspan.tail = ' Temperature indoors ' tspan = SE(text, 'tspan', dx="1em", style="fill:green") tspan.text = u'■' tspan.tail = ' Dew point indoors ' tspan = SE(text, 'tspan', dx="1em", style="fill:red") tspan.text = u'■' tspan.tail = ' Temperature outdoors ' tspan = SE(text, 'tspan', dx="1em", style="fill:magenta") tspan.text = u'■' tspan.tail = ' Dew point outdoors' tspan = SE(text, 'tspan', dx="1em", style="fill:rgb(180,180,180)") tspan.text = u'■' tspan.tail = ' Fan is on' text = SE(svg, 'text', x=str(wplus), y='13', style="text-anchor:end") text.text = u'Temperature/dew point in °C' text = SE(svg, 'text', x="0", y=str(h + 72)) text.text = 'Time in hours' g1 = SE(svg, 'g', transform="translate(44,30)") for x1, x2 in fanIntervals: SE(g1, 'rect', x=str(x1), y='.5', width=str(x2 - x1 + 1), height=str(h)) g2 = SE(g1, 'g', transform="translate(.5,.5)") g3 = SE(g2, 'g', transform="translate(0,{})".format(h)) SE(g3, 'line', x1="0", y1="0", x2=str(w), y2="0") for x in range(0, w + 1, w // 24): use = SE(g3, 'use', x=str(x)) use.set('{http://www.w3.org/1999/xlink}href', "#htick") g4 = SE(g3, 'g', transform="translate(0,24)", style="text-anchor:middle") for i, x in enumerate(range(0, w + 1, w // 24)): text = SE(g4, 'text', x=str(x)) text.text = str(i % 24) SE(g2, 'line', x1="0", y1="0", x2="0", y2=str(h)) g9 = SE(g2, 'g', transform="translate(-10,0)") for T in range(minT, maxT + 1, 1): y = '{:.2f}'.format(h - (T - minT) / float(maxT - minT) * h).rstrip('0').rstrip('.') use = SE(g9, 'use', y=y) use.set('{http://www.w3.org/1999/xlink}href', "#vtick") g10 = SE(g9, 'g', transform="translate(-5,0)") g10.set('class', "ylabel") for T in range(minT, maxT + 1, 1): y = '{:.2f}'.format(h - (T - minT) / float(maxT - minT) * h).rstrip('0').rstrip('.') text = SE(g10, 'text', y=y) text.text = ('' if T >= 0 else u'−') + str(abs(T)) g5 = SE(g2, 'g', transform="translate({},0)".format(w)) SE(g5, 'line', x1="0", y1="0", x2="0", y2=str(h)) g6 = SE(g5, 'g', x="0") for T in range(minT, maxT + 1, 1): y = '{:.2f}'.format(h - (T - minT) / float(maxT - minT) * h).rstrip('0').rstrip('.') use = SE(g6, 'use', y=y) use.set('{http://www.w3.org/1999/xlink}href', "#vtick") g7 = SE(g6, 'g', transform="translate(40,0)") g7.set('class', "ylabel") for T in range(minT, maxT + 1, 1): y = '{:.2f}'.format(h - (T - minT) / float(maxT - minT) * h).rstrip('0').rstrip('.') text = SE(g7, 'text', y=y) text.text = ('' if T >= 0 else u'−') + str(abs(T)) g8 = SE(g2, 'g') g8.set('class', "thin") for T in range(minT, maxT + 1): y = '{:.2f}'.format(h - (T - minT) / float(maxT - minT) * h).rstrip('0').rstrip('.') l = SE(g8, 'line', x1="0", y1=y, x2=str(w), y2=y) if T % 5 == 0: l.attrib['class'] = 'thicker' if mark_end: l = 0 for ii in reversed(range(len(data1))): if data1[ii, 0] == data1[ii, 0]: l = ii + 1 break SE(g2, 'line', x1=str(l), y1="0", x2=str(l), y2=str(h - .5), style="stroke-dasharray:8; stroke:orange") plot(SE, g2, data1[:, 0], maxT, minT, 'blue') plot(SE, g2, data1[:, 1], maxT, minT, 'green') plot(SE, g2, data2[:, 0], maxT, minT, 'red') plot(SE, g2, data2[:, 1], maxT, minT, 'magenta') ET = etree.ElementTree(svg) filename = 'fancontrol_{year:04}-{month:02}-{day:02}.svg'.format( year=year, month=month, day=day) if upload: tempdirname = tempfile.mkdtemp() tempfilename = 'fancontrol.svg.tmp' tempfilepath = os.path.join(tempdirname, tempfilename) ET.write(tempfilepath, pretty_print=False) print('Upload') retval = subprocess.call( '/usr/bin/lftp -c "open ftp.kundencontroller.de; ' 'cd www/data/fangraphs; ' 'put {}; ' 'mv {} {}"'.format(tempfilepath, tempfilename, filename), shell=True) print('Return value: {}'.format(retval)) if retval != 0: raise RuntimeError('Upload failed') else: dirname = 'graphs' filepath = os.path.join(dirname, filename) ET.write(filepath, pretty_print=False) except: print('Error!') raise finally: if tempdirname is not None: shutil.rmtree(tempdirname) print('Removed temp dir')
x = re.sub(r'\s+', ' ', x) return x def codelist_item_todict(codelist_item, default_lang='', lang='en'): out = dict([ (child.tag, normalize_whitespace(child.text)) for child in codelist_item if child.tag not in ['name', 'description'] or child.attrib.get(xml_lang) == lang or (child.attrib.get(xml_lang) == None and lang == default_lang) ]) if 'public-database' in codelist_item.attrib: out['public-database'] = True if codelist_item.attrib['public-database'] in ['1','true'] else False return out def utf8_encode_dict(d): def enc(a): if type(a) == str or type(a) == unicode: return a.encode('utf8') else: return None return dict( (enc(k), enc(v)) for k, v in d.items() ) codelists = ET.Element('codelists') codelists_list = [] for language in languages: try: os.makedirs(os.path.join(OUTPUTDIR,'json',language)) os.makedirs(os.path.join(OUTPUTDIR,'csv',language)) except OSError: pass for fname in os.listdir(os.path.join('out','clv2','xml')): codelist = ET.parse(os.path.join('out','clv2','xml',fname)) attrib = codelist.getroot().attrib assert attrib['name'] == fname.replace('.xml','') default_lang = codelist.getroot().attrib.get(xml_lang) codelist_dicts = map(partial(codelist_item_todict, default_lang=default_lang, lang=language), codelist.getroot().find('codelist-items').findall('codelist-item'))
def search(filename1, filename2, input_key, output_name): keyword = re.split(r"\s|[!\"#$%&()*+,\-./:;<=>?@\[\]\\^_`{|}~]", input_key) keywords = [] for i in keyword: if i != '': keywords.append(i) if len(keywords) < 1: raise ValueError('The format of input is wrong!') else: results_xml = etree.Element('results') f1 = open(filename1) tree1 = etree.parse(f1) f2 = open(filename2) tree2 = etree.parse(f2) #find common if len(keywords) > 1: dic1 = {} for element in keywords: element = element.lower() dic = {} for i in tree2.xpath( '/Root/index[keyword="{}"]/books/book/id/text()'. format(element)): dic[i] = [] attribute_name = tree2.xpath( '/Root/index[keyword="{}"]/books/book[id="{}"]/attribute/text()' .format(element, i)) for e in attribute_name: dic[i].append(e) dic1[element] = dic pair_list = [] for k, v in dic1.items(): for k1, v1 in v.items(): for i in v1: pair_list.append(str(k1) + ' ' + str(i)) evidence = set( [x for x in pair_list if pair_list.count(x) == len(dic1)]) if len(evidence) != 0: # else: for i in evidence: pair = i.split() book = etree.SubElement(results_xml, 'book', id='{}'.format(pair[0])) attribute_content = tree1.xpath( '/catalog/book[@id="{}"]/{}/text()'.format( pair[0], pair[1])) etree.SubElement(book, '{}'.format( pair[1])).text = attribute_content[0] else: dic = {} for i in tree2.xpath( '/Root/index[keyword="{}"]/books/book/id/text()'.format( keywords[0].lower())): dic[i] = [] attribute_name = tree2.xpath( '/Root/index[keyword="{}"]/books/book[id="{}"]/attribute/text()' .format(keywords[0].lower(), i)) for e in attribute_name: dic[i].append(e) for k, v in dic.items(): if len(v) > 1: book = etree.SubElement(results_xml, 'book', id='{}'.format(k)) for i in v: #book=etree.SubElement(results_xml,'book',id='{}'.format(k)) attribute_content = tree1.xpath( '/catalog/book[@id="{}"]/{}/text()'.format(k, i)) etree.SubElement( book, '{}'.format(i)).text = attribute_content[0] else: book = etree.SubElement(results_xml, 'book', id='{}'.format(k)) attribute_content = tree1.xpath( '/catalog/book[@id="{}"]/{}/text()'.format(k, v[0])) etree.SubElement(book, '{}'.format( v[0])).text = attribute_content[0] tree = etree.ElementTree(results_xml) tree.write(output_name, encoding='UTF-8', xml_declaration=True, pretty_print=True)
for index, value in enumerate(timecodesList): if index == indexToKeep: # See note above for this variable act = value, timecodesList[ index + 1] # create a tuple of needed timecodes for each act finalTimecodeList.append(act) indexToKeep += 4 if matID.startswith( 'M' ): # if matID starts with M, it's a movie. Just use the title, not completeTitle for shows completeTitle = title else: matID = 'M' + matID # if ID doesn't start with M, add it # create XML tree using lxml root = etree.Element("Pharos") pharosChild_1 = etree.SubElement(root, "ImportMetaData") etree.SubElement(pharosChild_1, "Note").text = "Dublist for Hallmark" pharosChild_2 = etree.SubElement(root, "Material") etree.SubElement(pharosChild_2, "MatId").text = matID etree.SubElement(pharosChild_2, "Duration").text = duration etree.SubElement(pharosChild_2, "Title").text = completeTitle etree.SubElement(pharosChild_2, "MaterialType").text = "Program" etree.SubElement(pharosChild_2, "AspectRatio").text = "16:9" etree.SubElement(pharosChild_2, "FrameRate").text = "DF30" for actIndex in range( 0, len(finalTimecodeList)): # generate all the Marker tags. for timeCodeIndex in range(0, 2):
def run(_prxdoc, _step, _xmldoc, _element, prx_sheetspec_doc, prx_sheetspec, prx_outfilenamexpath_doc=None, prx_outfilenamexpath=None, prx_outfilename_str=None, linktag="prx:spreadsheet"): docdb = {} docdb[_prxdoc.get_filehref()] = _prxdoc docdb[_xmldoc.get_filehref()] = _xmldoc #sheetspec=prx_sheetspec_xmltree.get_xmldoc() docdb[prx_sheetspec_doc.get_filehref( )] = prx_sheetspec_doc # don't wrap it in another xmldoc, so context lookups work properly stylesheet = etree.XML(transformation) prx_lookupfcn = prx_lookupfcn_ext(docdb, prx_sheetspec, _xmldoc, _element) # .getroot()) # Monkeypatch in nsmap from sheetspec into # all xsl:elements of stylesheet with a select attribute that contains # dyn:evaluate els_to_patch = stylesheet.xpath( "//xsl:*[contains(@select,'dyn:evaluate')]", namespaces={"xsl": "http://www.w3.org/1999/XSL/Transform"}) for el in els_to_patch: parent = el.getparent() index = parent.index(el) parent.remove(el) # New element, with desired nsmap and copying all attributes newel = etree.Element(el.tag, nsmap=prx_sheetspec.nsmap, attrib=el.attrib) # Move element's children newel[:] = el[:] newel.text = el.text newel.tail = el.tail parent.insert(index, newel) pass # stylesheetdoc=etree.ElementTree(stylesheet) # stylesheetdoc.write("/tmp/foo.xsl") transform = etree.XSLT(stylesheet, extensions=prx_lookupfcn.extensions) ods = transform( etree.XML("<dummy/>") ) # Stylesheet calls sheetspec() function to get actual sheetspec. This avoids cutting sheespec out of its source document. # result=etree.tostring(ods) resultdoc = xmldoc.xmldoc.frometree(ods, contexthref=_xmldoc.getcontexthref()) # evaluate prx_outfilename_str or prx_outfilenamexpath if prx_outfilename_str is None: namespaces = copy.deepcopy(prx_outfilenamexpath.nsmap) if None in namespaces: del namespaces[None] # nsmap param cannot have None pass prx_outfilename = _xmldoc.xpathcontext( _element, prx_outfilenamexpath_doc.getattr(prx_outfilenamexpath, "select"), namespaces=namespaces) if not prx_outfilename.endswith(".ods"): raise ValueError("Output spreadsheet requires .ods extension") pass else: if prx_outfilenamexpath is not None: raise ValueError( "Both prx_outfilenamexpath and prx_outfilename specified (only one at a time is permitted)" ) prx_outfilename = prx_outfilename_str pass ## put in dest dir if present #dest=_xmldoc.xpathsingle("dc:summary/dc:dest",default=None,namespaces={"dc": "http://limatix.org/datacollect"} ) #if dest is None: # Put in same directory as _xmldoc outdirhref = _xmldoc.getcontexthref().leafless() # pass #else: # outdirhref=dc_value.hrefvalue.fromxml(_xmldoc,dest).leafless() # pass prx_outfilehref = dc_value.hrefvalue(quote(prx_outfilename), contexthref=outdirhref) # ods spreadsheet context is a "content.xml" file inside the .ods file interpreted as a directory odscontext = dc_value.hrefvalue(quote(prx_outfilename) + "/", contexthref=outdirhref) resultdoc.setcontexthref( dc_value.hrefvalue("content.xml", contexthref=odscontext)) # fix up xlink hrefs write_output(prx_outfilehref.getpath(), resultdoc.tostring()) return {linktag: prx_outfilehref}
def init_element(self, tag): return etree.Element(tag)
def encode_xml(self): return etree.Element(self.ONLINE_STATUS_ELEMENT)
def show(self, ext_dict): ext = etree.Element('extension', nsmap=self.nsmap) self._populate_ext(ext, ext_dict['extension']) return self._to_xml(ext)
huge_tree=self.__huge_tree) self.__xslt_doc = etree.parse(io.BytesIO(self.__xslt), self.__parser) self.__transform = etree.XSLT(self.__xslt_doc) self.__root = etree.fromstring(str( self.__transform( etree.parse(StringIO(str(rpc_reply)), parser=self.__parser))), parser=self.__parser) return self.__root def parent_ns(node): if node.prefix: return node.nsmap[node.prefix] return None new_ele_nsmap = lambda tag, nsmap, attrs={}, **extra: etree.Element( qualify(tag), attrs, nsmap, **extra) new_ele = lambda tag, attrs={}, **extra: etree.Element(qualify(tag), attrs, ** extra) new_ele_ns = lambda tag, ns, attrs={}, **extra: etree.Element( qualify(tag, ns), attrs, **extra) sub_ele = lambda parent, tag, attrs={}, **extra: etree.SubElement( parent, qualify(tag, parent_ns(parent)), attrs, **extra) sub_ele_ns = lambda parent, tag, ns, attrs={}, **extra: etree.SubElement( parent, qualify(tag, ns), attrs, **extra)
def collect_entries(self): all_p = self.tree.xpath(u'//text:p', namespaces=self.namespaces) print len(all_p) for p in all_p: dashes = p.xpath(u'./text:span[@text:style-name = /office:document-content/office:automatic-styles/' u'style:style[style:text-properties/@fo:color="#6666ff"]/@style:name]', namespaces=self.namespaces) if dashes: entry = Entry() entry.type = 'word' siblings = dashes[0].xpath(u'(preceding-sibling::* | following-sibling::*)', namespaces=self.namespaces) preceding_siblings = dashes[0].xpath(u'preceding-sibling::*', namespaces=self.namespaces) following_siblings = dashes[0].xpath(u'following-sibling::*', namespaces=self.namespaces) lemmata = [] for sibling in siblings: lemmata += sibling.xpath(u'descendant-or-self::text:span[@text:style-name = ' u'/office:document-content/' u'office:automatic-styles/style:style[style:text-properties/' u'@fo:color="#ff3333"]/@style:name]', namespaces=self.namespaces) localities = [] for sibling in siblings: localities += sibling.xpath(u'descendant-or-self::text:span[@text:style-name = ' u'/office:document-content/' u'office:automatic-styles/style:style[style:text-properties/' u'@fo:color="#9900ff"]/@style:name]', namespaces=self.namespaces) labels = [] for sibling in siblings: labels += sibling.xpath(u'descendant-or-self::text:span[@text:style-name = ' u'/office:document-content/' u'office:automatic-styles/style:style[style:text-properties/' u'@fo:color="#00ccff"]/@style:name]', namespaces=self.namespaces) examples = [] for sibling in siblings: examples += sibling.xpath(u'descendant-or-self::text:span[@text:style-name = ' u'/office:document-content/' u'office:automatic-styles/style:style[style:text-properties/' u'@fo:color="#ff00cc"]/@style:name]', namespaces=self.namespaces) inflections = [] for sibling in siblings: inflections += sibling.xpath(u'descendant-or-self::text:span[@text:style-name = ' u'/office:document-content/' u'office:automatic-styles/style:style[style:text-properties/' u'@fo:color="#cc9900"]/@style:name]', namespaces=self.namespaces) references = [] for sibling in siblings: references += sibling.xpath(u'descendant-or-self::text:span[@text:style-name = ' u'/office:document-content/' u'office:automatic-styles/style:style[style:text-properties/' u'@fo:color="#ffcc00"]/@style:name]', namespaces=self.namespaces) italics = [] for sibling in siblings: italics += sibling.xpath(u'descendant-or-self::text:span[@text:style-name = ' u'/office:document-content/' u'office:automatic-styles/style:style[style:text-properties/' u'@fo:font-style="italic"]/@style:name]', namespaces=self.namespaces) qs = [] for sibling in siblings: qs += sibling.xpath(u'descendant-or-self::text:span[@text:style-name = ' u'/office:document-content/' u'office:automatic-styles/style:style[style:text-properties/' u'@fo:color="#00cc00"]/@style:name]', namespaces=self.namespaces) # everything before dash is head elements head = [] for sibling in preceding_siblings: head += sibling.xpath(u'descendant-or-self::text:span[text()]', namespaces=self.namespaces) if head: entry.head = Head() for item in head: element = Element() entry.head.elements.append(element) element.text = item.text if item in lemmata: element.tag = u'lemma' elif item in localities: element.tag = u'locality' elif item in labels: element.tag = u'word-label' elif item in inflections: element.tag = u'inflection' elif item in references: element.tag = u'reference' elif item in italics: element.tag = u'italic' else: element.tag = u'def' # everything after dash is content elements content = [] for sibling in following_siblings: content += sibling.xpath(u'descendant-or-self::text:span[text()]', namespaces=self.namespaces) meanings = [] for sibling in following_siblings: meanings += sibling.xpath(u'descendant-or-self::text:span[@text:style-name = ' u'/office:document-content/' u'office:automatic-styles/style:style[style:text-properties/' u'@fo:color="#ff6600"]/@style:name]', namespaces=self.namespaces) if content: entry.content = Content() if content[0] in meanings: entry.content.several = True if not entry.content.several: entry.content.meaning_on() for item in content: item.set('in_q', 'not_in_q') for i, item in enumerate(content): if item in meanings and entry.content.several: entry.content.meaning_on() else: if entry.content.meanings: element = Element() if item in qs: new_p = P() for xml_element in content[i+1::]: if xml_element in qs or xml_element in meanings: break xml_element.set('in_q', 'in_q') new_p.items.append(xml_element) new_p.lemmata += lemmata new_p.localities += localities new_p.labels += labels new_p.examples += examples new_p.dashes += dashes new_p.inflections += inflections new_p.references += references new_p.italics += italics new_p.meanings += meanings ref = etree.Element('ref') ref.text = u'См.' new_p.references.append(ref) new_p.items.append(ref) lemma = etree.Element('lemma') lemma.text = new_p.lemmata[0].text new_p.lemmata.append(lemma) new_p.items.append(lemma) self.additional_p.append(new_p) else: if item.get('in_q') != 'in_q': entry.content.meanings[-1].elements.append(element) element.text = item.text if item in lemmata: element.tag = u'lemma' elif item in localities: element.tag = u'locality' elif item in labels: element.tag = u'word-label' elif item in examples: element.tag = u'example' elif item in dashes: element.tag = u'dash' elif item in inflections: element.tag = u'inflection' elif item in references: element.tag = u'reference' elif item in italics: element.tag = u'italic' else: element.tag = u'def' self.entries.append(entry)