def test_xml_valid(self): """backends.junit.JUnitBackend.write_test(): (once) produces valid xml""" utils.module_check('lxml') schema = etree.XMLSchema(file=JUNIT_SCHEMA) with open(self.test_file, 'r') as f: nt.ok_(schema.validate(etree.parse(f)), msg='xml is not valid')
def get_xsd(name): xsd_f = open("wms_xsds/" + name) schema_doc = etree.parse(xsd_f) return etree.XMLSchema(schema_doc)
def __init__(self, xsd, xml, hostname="fgcz-i-202"): """ :param xsd: BioBeamer.xsd :param xml: BioBeamer.xml :return: """ self.logger = create_logger() self.parameters = {} xml_url = xml # read config files from url try: f = urllib.urlopen(xml) xml = f.read() f = urllib.urlopen(xsd) xsd = f.read() except: self.logger.error("can not fetch xml or xsd information") raise schema = etree.XMLSchema(etree.XML(xsd)) try: parser = etree.XMLParser(remove_blank_text=True, schema=schema) xml_bio_beamer = etree.fromstring(xml, parser) except: self.logger.error( "config xml '{0}' can not be parsed.".format(xml)) raise found_host_config = False # init para dictionary for i in xml_bio_beamer: if i.tag == 'host' and 'name' in i.attrib.keys(): pass else: continue if i.attrib['name'] == hostname: for k in i.attrib.keys(): if k == 'source_path' or k == 'target_path': self.parameters[k] = os.path.normpath(i.attrib[k]) elif k == 'pattern': self.parameters[k] = i.attrib[k] try: self.regex = re.compile(self.parameters['pattern']) except: self.logger.error("re.compile pattern failed.") raise elif k == 'simulate': if i.attrib[k] == "false": self.parameters['simulate'] = False else: self.parameters['simulate'] = True else: try: self.parameters[k] = int(i.attrib[k]) except ValueError: self.parameters[k] = i.attrib[k] found_host_config = True if found_host_config is False: msg = "no host configuration could be found in '{0}'.".format( xml_url) print(msg) self.logger.error(msg) sys.exit(1)
import re from shutil import copyfile import subprocess import datetime from copy import deepcopy import logging from lxml import etree as ET from utilities import parse_xlsx_file from utilities import fix_permissions from utilities import setup_logging from utilities import group_by_simple_cpd MODS_DEF = ET.parse('schema/mods-3-6.xsd') MODS_SCHEMA = ET.XMLSchema(MODS_DEF) def main(xlsx_file): alias = os.path.splitext(os.path.split(xlsx_file)[-1])[0] remove_previous_mods(alias) mappings, metadata, xsls = parse_xlsx_file(xlsx_file) simples, compounds = group_by_simple_cpd(metadata) for item_metadata in simples: output_path = os.path.join('output', f"{alias}_simples", 'original_format') os.makedirs(output_path, exist_ok=True) try: output_file = f"{os.path.splitext(item_metadata['FileName'])[0]}.xml" except TypeError: logging.fatal(
def is_valid(xml, schema_path='../util/mtmt1.xsd'): xml_schema = etree.XMLSchema(etree.parse(schema_path)) valid = xml_schema.validate(xml) if not valid: print(xml_schema.error_log) return valid
def check_facturx_xsd(facturx_xml, flavor='autodetect', facturx_level='autodetect'): """ Validate the XML file against the XSD :param facturx_xml: the Factur-X XML :type facturx_xml: string, file or etree object :param flavor: possible values: 'factur-x', 'zugferd' or 'autodetect' :type flavor: string :param facturx_level: the level of the Factur-X XML file. Default value is 'autodetect'. The only advantage to specifiy a particular value instead of using the autodetection is for a small perf improvement. Possible values: minimum, basicwl, basic, en16931, extended. :return: True if the XML is valid against the XSD raise an error if it is not valid against the XSD """ logger.debug('check_facturx_xsd with factur-x lib %s', __version__) if not facturx_xml: raise ValueError('Missing facturx_xml argument') if not isinstance(flavor, (str, unicode)): raise ValueError('Wrong type for flavor argument') if not isinstance(facturx_level, (type(None), str, unicode)): raise ValueError('Wrong type for facturx_level argument') facturx_xml_etree = None if isinstance(facturx_xml, (str, bytes)): xml_string = facturx_xml elif isinstance(facturx_xml, unicode): xml_string = facturx_xml.encode('utf8') elif isinstance(facturx_xml, type(etree.Element('pouet'))): facturx_xml_etree = facturx_xml xml_string = etree.tostring(facturx_xml, pretty_print=True, encoding='UTF-8', xml_declaration=True) elif isinstance(facturx_xml, file): facturx_xml.seek(0) xml_string = facturx_xml.read() facturx_xml.close() if flavor not in ('factur-x', 'facturx', 'zugferd'): # autodetect if facturx_xml_etree is None: try: facturx_xml_etree = etree.fromstring(xml_string) except Exception as e: raise Exception("The XML syntax is invalid: %s." % unicode(e)) flavor = get_facturx_flavor(facturx_xml_etree) if flavor in ('factur-x', 'facturx'): if facturx_level not in FACTURX_LEVEL2xsd: if facturx_xml_etree is None: try: facturx_xml_etree = etree.fromstring(xml_string) except Exception as e: raise Exception("The XML syntax is invalid: %s." % unicode(e)) facturx_level = get_facturx_level(facturx_xml_etree) if facturx_level not in FACTURX_LEVEL2xsd: raise ValueError("Wrong level '%s' for Factur-X invoice." % facturx_level) xsd_filename = FACTURX_LEVEL2xsd[facturx_level] xsd_file = resource_filename(__name__, 'xsd/%s' % xsd_filename) elif flavor == 'zugferd': xsd_file = resource_filename(__name__, 'xsd/zugferd/ZUGFeRD1p0.xsd') logger.debug('Using XSD file %s', xsd_file) xsd_etree_obj = etree.parse(open(xsd_file)) official_schema = etree.XMLSchema(xsd_etree_obj) try: t = etree.parse(BytesIO(xml_string)) official_schema.assertValid(t) logger.info('Factur-X XML file successfully validated against XSD') except Exception as e: # if the validation of the XSD fails, we arrive here logger.error( "The XML file is invalid against the XML Schema Definition") logger.error('XSD Error: %s', e) raise Exception("The %s XML file is not valid against the official " "XML Schema Definition. " "Here is the error, which may give you an idea on the " "cause of the problem: %s." % (flavor.capitalize(), unicode(e))) return True
xml, xmlns="http://espa.cr.usgs.gov/v1.0", xmlns_xsi="http://www.w3.org/2001/XMLSchema-instance", schema_uri= "http://espa.cr.usgs.gov/static/schema/espa_internal_metadata_v1_0.xsd") f.close() # This method does not validate the schema f = open('exported_2.xml', 'w') ns_def = metadata_api.build_ns_def( xmlns='http://espa.cr.usgs.gov/v1.0', xmlns_xsi='http://www.w3.org/2001/XMLSchema-instance', schema_uri= 'http://espa.cr.usgs.gov/static/schema/espa_internal_metadata_v1_0.xsd') xml.export(f, 0, namespacedef_=ns_def) f.close() # LXML - Validation Example try: f = open('../../../htdocs/schema/espa_internal_metadata_v1_0.xsd') schema_root = etree.parse(f) f.close() schema = etree.XMLSchema(schema_root) tree = etree.parse('exported_1.xml') schema.assertValid(tree) except Exception, e: print "lxml Validation Error: %s" % e print str(e)
def test_multiple_extension(): node_a = etree.fromstring(""" <?xml version="1.0"?> <xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema" xmlns:tns="http://tests.python-zeep.org/a" targetNamespace="http://tests.python-zeep.org/a" xmlns:b="http://tests.python-zeep.org/b" elementFormDefault="qualified"> <xs:import schemaLocation="http://tests.python-zeep.org/b.xsd" namespace="http://tests.python-zeep.org/b"/> <xs:complexType name="type_a"> <xs:complexContent> <xs:extension base="b:type_b"/> </xs:complexContent> </xs:complexType> <xs:element name="typetje" type="tns:type_a"/> </xs:schema> """.strip()) node_b = etree.fromstring(""" <?xml version="1.0"?> <xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema" xmlns:tns="http://tests.python-zeep.org/b" targetNamespace="http://tests.python-zeep.org/b" xmlns:c="http://tests.python-zeep.org/c" elementFormDefault="qualified"> <xs:import schemaLocation="http://tests.python-zeep.org/c.xsd" namespace="http://tests.python-zeep.org/c"/> <xs:complexType name="type_b"> <xs:complexContent> <xs:extension base="c:type_c"/> </xs:complexContent> </xs:complexType> </xs:schema> """.strip()) node_c = etree.fromstring(""" <?xml version="1.0"?> <xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema" xmlns:tns="http://tests.python-zeep.org/c" targetNamespace="http://tests.python-zeep.org/c" elementFormDefault="qualified"> <xs:complexType name="type_c"> <xs:complexContent> <xs:extension base="tns:type_d"/> </xs:complexContent> </xs:complexType> <xs:complexType name="type_d"> <xs:attribute name="wat" type="xs:string" /> </xs:complexType> </xs:schema> """.strip()) etree.XMLSchema(node_c) transport = DummyTransport() transport.bind("http://tests.python-zeep.org/b.xsd", node_b) transport.bind("http://tests.python-zeep.org/c.xsd", node_c) schema = xsd.Schema(node_a, transport=transport) type_a = schema.get_type("ns0:type_a") type_a(wat="x")
def test_global_element_and_type(): node_a = etree.fromstring(""" <?xml version="1.0"?> <xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema" xmlns:tns="http://tests.python-zeep.org/a" targetNamespace="http://tests.python-zeep.org/a" xmlns:b="http://tests.python-zeep.org/b" elementFormDefault="qualified"> <xs:import schemaLocation="http://tests.python-zeep.org/b.xsd" namespace="http://tests.python-zeep.org/b"/> <xs:complexType name="refs"> <xs:sequence> <xs:element ref="b:ref_elm"/> </xs:sequence> <xs:attribute ref="b:ref_attr"/> </xs:complexType> </xs:schema> """.strip()) node_b = etree.fromstring(""" <?xml version="1.0"?> <xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema" xmlns:tns="http://tests.python-zeep.org/b" targetNamespace="http://tests.python-zeep.org/b" xmlns:c="http://tests.python-zeep.org/c" elementFormDefault="qualified"> <xs:import schemaLocation="http://tests.python-zeep.org/c.xsd" namespace="http://tests.python-zeep.org/c"/> <xs:element name="ref_elm" type="xs:string"/> <xs:attribute name="ref_attr" type="xs:string"/> </xs:schema> """.strip()) node_c = etree.fromstring(""" <?xml version="1.0"?> <xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema" xmlns:tns="http://tests.python-zeep.org/c" targetNamespace="http://tests.python-zeep.org/c" elementFormDefault="qualified"> <xs:complexType name="type_a"> <xs:sequence> <xs:element name="item_a" type="xs:string"/> </xs:sequence> </xs:complexType> <xs:element name="item" type="xs:string"/> </xs:schema> """.strip()) etree.XMLSchema(node_c) transport = DummyTransport() transport.bind("http://tests.python-zeep.org/b.xsd", node_b) transport.bind("http://tests.python-zeep.org/c.xsd", node_c) schema = xsd.Schema(node_a, transport=transport) type_a = schema.get_type("{http://tests.python-zeep.org/c}type_a") type_a = schema.get_type("{http://tests.python-zeep.org/c}type_a") type_a(item_a="x") elm = schema.get_element("{http://tests.python-zeep.org/c}item") elm("x") elm = schema.get_type("{http://tests.python-zeep.org/a}refs") elm(ref_elm="foo", ref_attr="bar")
def assert_is_invalid(self, xml_string, soapfish_schema): xml = etree.fromstring(xml_string) xmlschema = etree.XMLSchema(generate_xsd(soapfish_schema)) self.assertIs(xmlschema.validate(xml), False, msg='XML should fail to validate: %r' % xml_string)
"""Routines for handling etrees representing VOEvent packets.""" from __future__ import absolute_import from __future__ import unicode_literals import copy import collections import pytz from lxml import objectify, etree from six import string_types import voeventparse.definitions voevent_v2_0_schema = etree.XMLSchema( etree.fromstring(voeventparse.definitions.v2_0_schema_str)) from ._version import get_versions __version__ = get_versions()['version'] def Voevent(stream, stream_id, role): """Create a new VOEvent element tree, with specified IVORN and role. Args: stream (str): used to construct the IVORN like so:: ivorn = 'ivo://' + stream + '#' + stream_id (N.B. ``stream_id`` is converted to string if required.)
def assert_is_valid(self, xml_string, soapfish_schema): xml = etree.fromstring(xml_string) xmlschema = etree.XMLSchema(generate_xsd(soapfish_schema)) self.assertIs(xmlschema.validate(xml), True, msg='XML did not validate: %r' % xml_string)
from tyko import pbcore, data_provider, schema PBCORE_XSD_URL = "https://raw.githubusercontent.com/PBCore-AV-Metadata/PBCore_2.1/master/pbcore-2.1.xsd" if os.path.exists("pbcore-2.1.xsd"): with open("pbcore-2.1.xsd", "r") as f: PBCORE_XSD = f.read() else: with urllib.request.urlopen(PBCORE_XSD_URL) as f: assert f.code == 200 PBCORE_XSD = str(f.read(), encoding="utf8") with open("pbcore-2.1.xsd", "w") as wf: wf.write(PBCORE_XSD) assert PBCORE_XSD is not None xsd = etree.XML(PBCORE_XSD) PBCORE_SCHEMA = etree.XMLSchema(xsd) def test_pbcore_fail_invalid_id(): db = sqlalchemy.create_engine("sqlite:///:memory:") empty_data_provider = data_provider.DataProvider(db) with pytest.raises(tyko.exceptions.DataError): pbcore.create_pbcore_from_object(object_id=1, data_provider=empty_data_provider) def test_pbcore_valid_id(tmpdir): app = flask.Flask(__name__, template_folder="../tyko/" "templates") app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///:memory:" db = SQLAlchemy(app)
def readXSD(self): """Open a scheme file to validate the to-be-read file.""" schema = etree.parse(self.fileXSD) return etree.XMLSchema(schema)
def load_definition(dir): iii_dir = dir + '/.iii' iii_xml_dir = dir + '/.iii/iii.xml' iii_schema_dir = dir + 'debug/iii2.xsd' try: #try opening definition and verify against schema with open(iii_schema_dir, 'r') as schema_file: schema_raw = et.XML(schema_file.read()) schema_f = et.XMLSchema(schema_raw) parser = et.XMLParser(schema=schema_f, remove_comments=True, remove_blank_text=True) with open(iii_dir, 'r') as service_def: iii_root = et.XML(service_def.read(), parser) #get vars service = iii_root[0] static_files = [] var_files = [] for child in service: if child.tag == 'desc': description = child.text name = child.get('name') elif child.tag == 'address': address = child.text elif child.tag == 'data': version_hash = Cryptographer.generate_hash( et.tostring(child).decode('utf8')) for schild in child: if schild.tag == 'files': xml_files = schild elif schild.tag == 'dependencies': dependencies = [] for source in schild: depend = [ source.get('type'), source.get('name'), source.text ] dependencies.append(depend) elif schild.tag == 'tags': tags = [] for tag in schild: if tag.tag == 'application': service_type = [tag.tag, tag.get('os')] tags.append(service_type) elif tag.tag == 'resource': service_type = [tag.tag] tags.append(service_type) elif tag.tag == 'DELETE': delete = True #add files to lists for file in xml_files: if file.get('type') == 'static': s_file = [file.get('rdir'), file.text] static_files.append(s_file) elif file.get('type') == 'variable': v_file = file.get('rdir') var_files.append(v_file) #verify file hashes and matches root_directory = glob.glob(iii_dir + '\\**\\*', recursive=True) file_list = [] for file in root_directory: new_path = os.path.relpath(os.path.realpath(file)) file_list.append(new_path) for file in static_files: if file[0] in file_list: if os.path.isfile(file): if file[1] == Cryptographer.generate_hash( message=None, filepath=file): file_list.remove(file) if os.path.isdir(file): file_list.remove(file) var_dirs = [] for file in var_files: if file in file_list: if os.path.isfile(file): file_list.remove(file) if os.path.isdir(file): var_dirs.append(file) file_list.remove(file) #check/ignore any files in variable directory for file in file_list: for var_dir in var_dirs: var_dir_path = Path(var_dir) dir_path = Path(file) if var_dir_path in dir_path.parents: file_list.remove(file) if len(file_list) != 0: print( '[ERROR] Files in service do not match service definition') return False #verify version hash if version_hash != service.get('version'): print('[ERROR] Definition version does not match data hash') return False counter = service.get('counter') #return service data in a list #dir, version, count, name, description, address, dependencies, tags [OS, type, etc], delete command return [ dir, version_hash, counter, name, description, address, dependencies, tags, delete ] except: print( '[ERROR] Service defnition failed to parse (are .iii files ok?)' ) return False
def test_include_no_parent_default_namespace(): schema_root = """ <?xml version="1.0"?> <xs:schema xmlns="http://tests.python-zeep.org/rootns" xmlns:tns="http://tests.python-zeep.org/tns" xmlns:xs="http://www.w3.org/2001/XMLSchema" targetNamespace="http://tests.python-zeep.org/rootns" elementFormDefault="qualified"> <xs:import namespace="http://tests.python-zeep.org/tns" schemaLocation="http://tests.python-zeep.org/tns.xsd"/> <xs:element name="root"> <xs:complexType> <xs:sequence> <xs:element name="container" type="tns:containerType" /> </xs:sequence> </xs:complexType> </xs:element> </xs:schema> """.strip() # no default namespace, but targetNamespace schema_tns = """ <?xml version="1.0"?> <xs:schema xmlns:tns="http://tests.python-zeep.org/tns" targetNamespace="http://tests.python-zeep.org/tns" xmlns:xs="http://www.w3.org/2001/XMLSchema" elementFormDefault="qualified"> <xs:include schemaLocation="http://tests.python-zeep.org/include.xsd" /> </xs:schema> """.strip() # no default namespace and no targetNamespace schema_include = """ <?xml version="1.0"?> <xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema" elementFormDefault="qualified"> <xs:complexType name="containerType"> <xs:sequence> <xs:element name="item" type="itemType" /> </xs:sequence> </xs:complexType> <xs:complexType name="itemType"> <xs:sequence> <xs:element name="intVal" type="xs:int" /> <xs:element name="boolVal" type="xs:boolean" /> </xs:sequence> </xs:complexType> </xs:schema> """.strip() class IncludeSchemaResolver(etree.Resolver): def resolve(self, url, id, context): if url == "http://tests.python-zeep.org/tns.xsd": return self.resolve_string(schema_tns, context) elif url == "http://tests.python-zeep.org/include.xsd": return self.resolve_string(schema_include, context) parser = etree.XMLParser() parser.resolvers.add(IncludeSchemaResolver()) schema = etree.XMLSchema(etree.fromstring(schema_root, parser=parser)) xml = """ <?xml version="1.0"?> <root xmlns="http://tests.python-zeep.org/rootns"> <container xmlns:tns="http://tests.python-zeep.org/tns"> <tns:item> <tns:intVal>42</tns:intVal> <tns:boolVal>true</tns:boolVal> </tns:item> </container> </root> """.strip() xml = etree.fromstring(xml) schema.assertValid(xml) # schema is ok for lxml schema_root = etree.fromstring(schema_root) schema_tns = etree.fromstring(schema_tns) schema_include = etree.fromstring(schema_include) transport = DummyTransport() transport.bind("http://tests.python-zeep.org/tns.xsd", schema_tns) transport.bind("http://tests.python-zeep.org/include.xsd", schema_include) xsd.Schema(schema_root, transport=transport)
def validates_against_xml_schema(xml_instance_path, schema_path): xml_doc = etree.parse(xml_instance_path) xmlschema = etree.XMLSchema(etree.parse(schema_path)) return xmlschema.validate(xml_doc)
.. _IOF XML v3.0: http://orienteering.org/resources/it/data-standard-3-0/ """ from collections import defaultdict import logging from pkg_resources import resource_stream import iso8601 from lxml import etree from . import model from .tools import camelcase_to_snakecase _logger = logging.getLogger(__name__) _schema = etree.XMLSchema( etree.parse(resource_stream('holper.resources.IOF', 'IOF_3.0.xsd'))) _NS = 'http://www.orienteering.org/datastandard/3.0' def detect(input_file): try: document = etree.parse(input_file) except etree.ParseError: return False return _schema.validate(document) def read(input_file): parser = etree.XMLParser(remove_comments=True, remove_pis=True, collect_ids=False)
async def setup_forms(context, company): schema_path = os.path.join(os.path.dirname(__main__.__file__), 'schemas') parser = etree.XMLParser( schema=etree.XMLSchema(file=os.path.join(schema_path, 'form.xsd')), attribute_defaults=True, remove_comments=True, remove_blank_text=True) form_path = os.path.join(os.path.dirname(__main__.__file__), 'init', 'forms') form_defn = await db.objects.get_db_object(context, company, 'sys_form_defns') db_table = await db.objects.get_db_object(context, company, 'db_tables') async def setup_form(form_name): xml = open('{}/{}.xml'.format(form_path, form_name)).read() await form_defn.init() await form_defn.setval('form_name', form_name) # await form_defn.setval('title', title) xml = xml.replace('`', '"').replace('<<', '<').replace( '>>', '>').replace('&&', '&') form_xml = etree.fromstring(xml, parser=parser) await form_defn.setval('title', form_xml.get('title')) await form_defn.setval('form_xml', form_xml) await form_defn.save() await setup_form('setup_grid') await setup_form('grid_lookup') await setup_form('tree_lookup') await setup_form('login_form') await setup_form('chg_pwd_form') await setup_form('setup_form') await setup_form('setup_form_dbobj') await setup_form('setup_form_memobj') await setup_form('setup_form_ioparams') await setup_form('setup_form_inline') await setup_form('setup_form_body') await setup_form('setup_form_toolbar') await setup_form('setup_form_methods') await setup_form('setup_form_buttonrow') await setup_form('setup_process') await setup_form('setup_proc_dbobj') await setup_form('setup_proc_memobj') await setup_form('setup_proc_ioparams') await setup_form('foreign_key') await setup_form('choices') await setup_form('dbcols_setup') await setup_form('setup_company') await setup_form('setup_cursor') await setup_form('setup_menu') await setup_form('setup_user') await setup_form('setup_table') await setup_form('setup_table_dbcols') await setup_form('actions') await setup_form('checks') await setup_form('hooks') await setup_form('updates') await setup_form('setup_bpmn') await setup_form('setup_roles') await setup_form('users_roles') await setup_form('select_dates') await setup_form('select_balance_date') await setup_form('select_date_range')
import codecs from lxml import etree import os from django.utils.translation import ugettext as _ from six import text_type from wirecloud.commons.utils.template.base import ObsoleteFormatError, parse_contacts_info, TemplateParseException from wirecloud.commons.utils.translation import get_trans_index from wirecloud.platform.wiring.utils import get_behaviour_skeleton, get_wiring_skeleton, parse_wiring_old_version XMLSCHEMA_FILE = codecs.open(os.path.join(os.path.dirname(__file__), '../schemas/xml_schema.xsd'), 'rb') XMLSCHEMA_DOC = etree.parse(XMLSCHEMA_FILE) XMLSCHEMA_FILE.close() XMLSCHEMA = etree.XMLSchema(XMLSCHEMA_DOC) WIRECLOUD_TEMPLATE_NS = 'http://wirecloud.conwet.fi.upm.es/ns/macdescription/1' OLD_TEMPLATE_NAMESPACES = ('http://wirecloud.conwet.fi.upm.es/ns/template#', 'http://morfeo-project.org/2007/Template') RESOURCE_DESCRIPTION_XPATH = 't:details' DISPLAY_NAME_XPATH = 't:title' DESCRIPTION_XPATH = 't:description' LONG_DESCRIPTION_XPATH = 't:longdescription' AUTHORS_XPATH = 't:authors' CONTRIBUTORS_XPATH = 't:contributors' IMAGE_URI_XPATH = 't:image' IPHONE_IMAGE_URI_XPATH = 't:smartphoneimage' MAIL_XPATH = 't:email' HOMEPAGE_XPATH = 't:homepage' DOC_URI_XPATH = 't:doc'
def setup_module(module): global schema os.chdir(path.dirname(__file__)) schema = etree.XMLSchema(etree.parse('xliff-core-1.1.xsd'))
def parseXMLSchema(strSchemaPath): """ Parse an xsd schema file """ xmlSchema_doc = etree.parse(strSchemaPath) return etree.XMLSchema(xmlSchema_doc)
def act_export_ats(self, cr, uid, ids, context): inv_obj = self.pool.get('account.invoice') elect_obj = self.pool.get('fact.elect.docs') wiz = self.browse(cr, uid, ids)[0] period_id = wiz.period_id.id ruc = wiz.company_id.partner_id.ced_ruc if not ruc: raise osv.except_osv('Datos incompletos', 'No ha ingresado RUC para la compañía') ats = etree.Element('iva') etree.SubElement(ats, 'TipoIDInformante').text = 'R' etree.SubElement(ats, 'IdInformante').text = str(ruc) razon = self.elimina_tildes(wiz.company_id.name.replace('.', '')) social = re.search('(.*?)(([a-zA-Z]\.){2,})(.*)', razon) if social: replacement = ''.join(social.group(2).split('.')) razon = social.group(1) + replacement + social.group(4) etree.SubElement(ats, 'razonSocial').text = razon period = self.pool.get('account.period').browse(cr, uid, [period_id])[0] etree.SubElement(ats, 'Anio').text = time.strftime( '%Y', time.strptime(period.date_start, '%Y-%m-%d')) etree.SubElement(ats, 'Mes').text = time.strftime( '%m', time.strptime(period.date_start, '%Y-%m-%d')) estabRuc = '001' #pos = self.pool.get('pos.config').browse(cr,uid,uid) #if pos: # estabRuc = pos.local_id # ptoemi = True # if not (estabRuc and ptoemi): # raise osv.except_osv(_('UserError'), _('No se ha definido el establecimiento')) # elif estabRuc == '000': # raise osv.except_osv(_('UserError'), _('Establecimiento no puede ser 000')) #else: # raise osv.except_osv(_('UserError'), _('No se ha definido el establecimiento')) etree.SubElement(ats, 'numEstabRuc').text = estabRuc ventas_ids = inv_obj.search(cr, uid, [('state', 'in', ['open', 'paid']), ('period_id', '=', period_id), ('type', '=', 'out_invoice'), ('company_id', '=', wiz.company_id.id)]) sub_ventas = 0.0 for i in inv_obj.browse(cr, uid, ventas_ids): #sub_ventas += i.amount_untaxed #sub_ventas += (i.amount_tax + i.amount_vat + i.amount_vat_cero) sub_ventas += (i.amount_vat + i.amount_vat_cero) # total de notas de credito sub_ndc = 0.0 ndc_ids = inv_obj.search(cr, uid, [('state', 'in', ['open', 'paid']), ('period_id', '=', period_id), ('type', '=', 'out_refund'), ('company_id', '=', wiz.company_id.id)]) for i in inv_obj.browse(cr, uid, ndc_ids): sub_ndc += (i.amount_tax + i.amount_vat) if sub_ventas > 0.00: sub_ventas = sub_ventas - sub_ndc total_ventas = '%.2f' % sub_ventas etree.SubElement(ats, 'totalVentas').text = total_ventas etree.SubElement(ats, 'codigoOperativo').text = 'IVA' compras = etree.Element('compras') '''Facturas de Compra con retenciones ''' inv_ids = inv_obj.search( cr, uid, [('state', 'in', ['open', 'paid']), ('period_id', '=', period_id), ('type', 'in', ['in_invoice', 'liq_purchase', 'in_refund']), ('company_id', '=', wiz.company_id.id)]) for inv in inv_obj.browse(cr, uid, inv_ids): if inv.auth_inv_id: #print inv.auth_inv_id.id detallecompras = etree.Element('detalleCompras') #if inv.sustento_id.code == '00': # raise osv.except_osv(_('UserError'), _('Codigo de sustento no puede ser 00')) if inv.type == 'liq_purchase': etree.SubElement(detallecompras, 'codSustento').text = '02' else: etree.SubElement(detallecompras, 'codSustento').text = inv.sustento_id.code if not inv.partner_id.parent_id: if not inv.partner_id.ced_ruc: raise osv.except_osv( 'Datos incompletos', 'No ha ingresado Ced/RUC de %s' % inv.partner_id.name) etree.SubElement(detallecompras, 'tpIdProv').text = tpIdProv[ inv.partner_id.type_ced_ruc] etree.SubElement(detallecompras, 'idProv').text = inv.partner_id.ced_ruc #etree.SubElement(detallecompras, 'razonSocial').text = self.elimina_tildes(inv.partner_id.name) else: etree.SubElement(detallecompras, 'tpIdProv').text = tpIdProv[ inv.partner_id.parent_id.type_ced_ruc] etree.SubElement( detallecompras, 'idProv').text = inv.partner_id.parent_id.ced_ruc #etree.SubElement(detallecompras, 'razonSocial').text = self.elimina_tildes(inv.partner_id.parent_id.name) if inv.auth_inv_id: tcomp = inv.auth_inv_id.type_id.code else: tcomp = '03' etree.SubElement(detallecompras, 'tipoComprobante').text = tcomp if not inv.partner_id.parent_id: etree.SubElement(detallecompras, 'tipoProv').text = tpProv[ inv.partner_id.property_account_position.name] else: etree.SubElement(detallecompras, 'tipoProv').text = tpProv[ inv.partner_id.parent_id.property_account_position. name] if inv.type == 'liq_purchase': if not inv.partner_id.parent_id: etree.SubElement( detallecompras, 'denoProv').text = self.elimina_tildes( inv.partner_id.name.replace('.', '')) else: etree.SubElement( detallecompras, 'denoProv').text = self.elimina_tildes( inv.partner_id.parent_id.name.replace('.', '')) if inv.partner_id.parte_relacion: etree.SubElement(detallecompras, 'parteRel').text = 'SI' else: etree.SubElement(detallecompras, 'parteRel').text = 'NO' etree.SubElement(detallecompras, 'fechaRegistro').text = self.convertir_fecha( inv.date_invoice) if inv.type in ('in_invoice', 'in_refund'): se = inv.auth_inv_id.serie_entidad pe = inv.auth_inv_id.serie_emision sec = '%09d' % int(inv.reference) auth = inv.auth_inv_id.name elif inv.type == 'liq_purchase': se = inv.journal_id.auth_id.serie_entidad pe = inv.journal_id.auth_id.serie_emision sec = inv.number[8:] auth = inv.journal_id.auth_id.name etree.SubElement(detallecompras, 'establecimiento').text = se etree.SubElement(detallecompras, 'puntoEmision').text = pe etree.SubElement(detallecompras, 'secuencial').text = sec etree.SubElement(detallecompras, 'fechaEmision').text = self.convertir_fecha( inv.date_invoice) #aumentar en la autorizacion si es o no electronica. #Para VENTA (cliente) va la autorizacion del SRI o electronica #validar 10 para dada por el SRI o 37 si es electronica etree.SubElement(detallecompras, 'autorizacion').text = auth etree.SubElement( detallecompras, 'baseNoGraIva' ).text = inv.amount_novat == 0 and '0.00' or '%.2f' % inv.amount_novat etree.SubElement( detallecompras, 'baseImponible').text = '%.2f' % inv.amount_vat_cero #imp_vat = inv.amount_vat_cero + inv.amount_vat etree.SubElement(detallecompras, 'baseImpGrav').text = '%.2f' % inv.amount_vat #etree.SubElement(detallecompras, 'baseImpGrav').text = '%.2f' %imp_vat etree.SubElement(detallecompras, 'baseImpExe').text = '0.00' etree.SubElement(detallecompras, 'montoIce').text = '%.2f' % inv.amount_ice if inv.reference == '10699': print inv.payment_ids # if inv.date_invoice >= '2016-06-01': # etree.SubElement(detallecompras, 'montoIva').text = '%.2f' %(inv.amount_vat * 0.14) # else: # etree.SubElement(detallecompras, 'montoIva').text = '%.2f' %(inv.amount_vat * 0.12) etree.SubElement(detallecompras, 'montoIva').text = '%.2f' % inv.amount_tax etree.SubElement(detallecompras, 'valRetBien10').text = '0.00' etree.SubElement(detallecompras, 'valRetServ20').text = '0.00' if inv.reference == '277': print inv.amount_tax == abs( inv.taxed_ret_vatsrv) and '%.2f' % abs( inv.taxed_ret_vatsrv) or '0.00' etree.SubElement( detallecompras, 'valorRetBienes').text = '%.2f' % abs(inv.taxed_ret_vatb) etree.SubElement(detallecompras, 'valRetServ50').text = '0.00' if inv.amount_tax > abs(inv.taxed_ret_vatsrv): etree.SubElement(detallecompras, 'valorRetServicios').text = '%.2f' % abs( inv.taxed_ret_vatsrv) else: etree.SubElement(detallecompras, 'valorRetServicios').text = '0.00' if inv.amount_tax == abs(inv.taxed_ret_vatsrv): etree.SubElement(detallecompras, 'valRetServ100').text = '%.2f' % abs( inv.taxed_ret_vatsrv) else: etree.SubElement(detallecompras, 'valRetServ100').text = '0.00' # etree.SubElement(detallecompras, 'valorRetServicios').text = aux_ret_srv = inv.amount_tax==inv.taxed_ret_vatsrv and '%.2f' %abs(inv.taxed_ret_vatsrv) or '0.00' #'%.2f' % abs(inv.taxed_ret_vatsrv) # etree.SubElement(detallecompras, 'valRetServ100').text = aux_ret_100 = inv.amount_tax==abs(inv.taxed_ret_vatsrv) and '%.2f' %abs(inv.taxed_ret_vatsrv) or '0.00' etree.SubElement(detallecompras, 'totbasesImpReemb').text = '0.00' pago_exterior = etree.Element('pagoExterior') if inv.pago_ext: etree.SubElement(pago_exterior, 'pagoLocExt').text = '02' etree.SubElement(pago_exterior, 'tipoRegi').text = inv.tipo_RegFis #etree.SubElement(pago_exterior, 'paisEfecPago').text = inv.pais.code2 or '000' # if inv.doble_tributo: # etree.SubElement(pago_exterior, 'aplicConvDobTrib').text = 'SI' # else: # etree.SubElement(pago_exterior, 'aplicConvDobTrib').text = 'NO' # if inv.ext_retencion: # etree.SubElement(pago_exterior, 'pagExtSujRetNorLeg').text = 'SI' # elif inv.doble_tributo: # etree.SubElement(pago_exterior, 'pagExtSujRetNorLeg').text = 'NA' # else: # etree.SubElement(pago_exterior, 'pagExtSujRetNorLeg').text = 'NO' # if inv.pago_RegFis: # etree.SubElement(pago_exterior, 'pagoRegFis').text = 'SI' #else: # etree.SubElement(pago_exterior, 'pagoRegFis').text = 'NO' # # Camio para la versi'on del 31 de agosto de 2016 # if inv.tipo_RegFis == '01': etree.SubElement( pago_exterior, 'paisEfecPagoGen').text = inv.pais_RegGen.code2 elif inv.tipo_RegFis == '02': etree.SubElement( pago_exterior, 'paisEfecPagoParFis').text = inv.pais_ParFis.code2 elif inv.tipo_RegFis == '03': etree.SubElement(pago_exterior, 'denopagoRegFis').text = inv.deno_pago etree.SubElement( pago_exterior, 'paisEfecPago').text = inv.pais.code2 or '000' if inv.doble_tributo: etree.SubElement(pago_exterior, 'aplicConvDobTrib').text = 'SI' etree.SubElement(pago_exterior, 'pagExtSujRetNorLeg').text = 'NA' else: etree.SubElement(pago_exterior, 'aplicConvDobTrib').text = 'NO' if inv.ext_retencion: etree.SubElement(pago_exterior, 'pagExtSujRetNorLeg').text = 'SI' else: etree.SubElement(pago_exterior, 'pagExtSujRetNorLeg').text = 'NO' else: etree.SubElement(pago_exterior, 'pagoLocExt').text = '01' etree.SubElement(pago_exterior, 'paisEfecPago').text = 'NA' etree.SubElement(pago_exterior, 'aplicConvDobTrib').text = 'NA' etree.SubElement(pago_exterior, 'pagExtSujRetNorLeg').text = 'NA' detallecompras.append(pago_exterior) if inv.reference == '6877': print inv.payment_ids if (inv.amount_novat + inv.amount_tax + inv.amount_vat_cero + inv.amount_vat) >= 1000: #if inv.amount_pay > 1000: if inv.payment_ids: #raise osv.except_osv('Datos incompletos', 'Las facturas con montos de bases imponibles e impuestos superiores a 1000 requieren la forma de pago') #else: #agregar campo CODIGO ATS en diario (efectivo, banco-cheques) #validar que el diario sea efectivo, banco-cheques #que el numero no sea 00 #valores catalogo tabla 16 #etree.SubElement(forma_pago, 'formaPago').text = formaPago[i.journal_id.name] forma_pago = etree.Element('formasDePago') for i in inv.payment_ids: if not i.journal_id.payment_method: raise osv.except_osv( 'Datos incompletos', 'No ha definido forma de pago del diario %s' % i.journal_id.name) etree.SubElement( forma_pago, 'formaPago').text = i.journal_id.payment_method detallecompras.append(forma_pago) else: if inv.type != 'in_refund': forma_pago = etree.Element('formasDePago') if inv.pago_ext: etree.SubElement(forma_pago, 'formaPago').text = '09' else: etree.SubElement(forma_pago, 'formaPago').text = '02' detallecompras.append(forma_pago) #else: # forma_pago = etree.Element('formasDePago') # etree.SubElement(forma_pago, 'formaPago').text = '01' # detallecompras.append(forma_pago) air = etree.Element('air') if inv.retention_ir: if inv.reference == '277': print inv data_air = self.process_lines(cr, uid, inv.tax_line) for j, tax in data_air.items(): if tax['codRetAir'] in ('604'): continue detalleAir = etree.Element('detalleAir') etree.SubElement(detalleAir, 'codRetAir').text = tax['codRetAir'] etree.SubElement( detalleAir, 'baseImpAir').text = '%.2f' % tax['baseImpAir'] etree.SubElement(detalleAir, 'porcentajeAir' ).text = '%.2f' % tax['porcentajeAir'] etree.SubElement( detalleAir, 'valRetAir').text = '%.2f' % tax['valRetAir'] air.append(detalleAir) detallecompras.append(air) flag = False if inv.retention_ir or inv.retention_vat: flag = True if inv.retention_id: etree.SubElement( detallecompras, 'estabRetencion1' ).text = flag and inv.journal_id.auth_ret_id.serie_entidad or '000' etree.SubElement( detallecompras, 'ptoEmiRetencion1' ).text = flag and inv.journal_id.auth_ret_id.serie_emision or '000' etree.SubElement( detallecompras, 'secRetencion1' ).text = flag and inv.retention_id.number[6:] or '%09d' % 0 etree.SubElement( detallecompras, 'autRetencion1' ).text = flag and inv.journal_id.auth_ret_id.name or '%010d' % 0 etree.SubElement( detallecompras, 'fechaEmiRet1').text = flag and self.convertir_fecha( inv.retention_id.date) or '00/00/0000' if inv.type == 'in_refund': etree.SubElement(detallecompras, 'docModificado').text = '01' etree.SubElement(detallecompras, 'estabModificado' ).text = inv.supplier_invoice_number[:3] etree.SubElement(detallecompras, 'ptoEmiModificado' ).text = inv.supplier_invoice_number[4:7] etree.SubElement( detallecompras, 'secModificado').text = inv.supplier_invoice_number[8:] ndd_ids = inv_obj.search( cr, uid, [ ('state', 'in', ['open', 'paid']), #('period_id','=',period_id), ('type', 'in', ['in_invoice']), ('supplier_invoice_number', '=', inv.supplier_invoice_number), ('company_id', '=', wiz.company_id.id) ]) if ndd_ids: for ndd in inv_obj.browse(cr, uid, ndd_ids): etree.SubElement( detallecompras, 'autModificado').text = ndd.auth_inv_id.name compras.append(detallecompras) ats.append(compras) """VENTAS DECLARADAS""" ventas = etree.Element('ventas') inv_ids = inv_obj.search( cr, uid, [('state', 'in', ['open', 'paid']), ('period_id', '=', period_id), ('type', 'in', ['out_invoice', 'out_refund']), ('company_id', '=', wiz.company_id.id)]) pdata = {} base_imponible = 0.0 compensa_ventas = 0.0 for inv in inv_obj.browse(cr, uid, inv_ids): if not inv.partner_id.parent_id: partner_id = inv.partner_id.id switch = 0 if inv.partner_id.ced_ruc == '1707961346001': print '' else: if inv.partner_id.parent_id.ced_ruc == '1707961346001': print '' partner_id = inv.partner_id.parent_id.id switch = 1 if not pdata or not pdata.get(partner_id, False): if inv.partner_id.parte_relacion: parte_rel = 'SI' else: parte_rel = 'NO' if switch == 0: partner_data = { inv.partner_id.id: { 'tpIdCliente': inv.partner_id.type_ced_ruc, 'idCliente': inv.partner_id.ced_ruc, 'parteRelVtas': parte_rel, 'numeroComprobantes': 0, 'basenoGraIva': 0, 'baseImponible': 0, 'baseImpGrav': 0, 'montoIva': 0, 'montoIce': 0, 'valorRetRenta': 0, 'valorRetIva': 0, 'compensacion': 0 } } else: partner_data = { inv.partner_id.parent_id.id: { 'tpIdCliente': inv.partner_id.parent_id.type_ced_ruc, 'idCliente': inv.partner_id.parent_id.ced_ruc, 'parteRelVtas': parte_rel, 'numeroComprobantes': 0, 'basenoGraIva': 0, 'baseImponible': 0, 'baseImpGrav': 0, 'montoIva': 0, 'montoIce': 0, 'valorRetRenta': 0, 'valorRetIva': 0, 'compensacion': 0 } } pdata.update(partner_data) if inv.type == 'out_refund': pdata[partner_id]['tipoComprobante'] = '04' else: pdata[partner_id]['tipoComprobante'] = '18' pdata[partner_id]['numeroComprobantes'] += 1 if not elect_obj: pdata[partner_id]['tipoEmision'] = 'F' else: invid = elect_obj.search(cr, uid, [('invoice_id', '=', inv.id)]) if invid: pdata[partner_id]['tipoEmision'] = 'E' else: pdata[partner_id]['tipoEmision'] = 'F' pdata[partner_id]['basenoGraIva'] += inv.amount_novat #base_imponible = inv.amount_vat / 0.12 base_imponible = inv.amount_vat #pdata[partner_id]['baseImponible'] += inv.amount_tax pdata[partner_id]['baseImponible'] += inv.amount_vat_cero pdata[partner_id]['baseImpGrav'] += base_imponible pdata[partner_id]['montoIva'] += inv.amount_tax self.__logger.info('pdata[partner_id][montoIce] %s - %s' % (partner_id, pdata[partner_id]['montoIce'])) pdata[partner_id]['montoIce'] += inv.amount_ice data_compensa = self.process_lines(cr, uid, inv.tax_line) if inv.retention_ir: data_air = self.process_lines(cr, uid, inv.tax_line) for j, tax in data_air.items(): if tax['codRetAir'] not in ('604'): pdata[partner_id]['valorRetRenta'] += tax['valRetAir'] else: compensa_ventas += tax['valRetAir'] pdata[partner_id]['valorRetIva'] += abs(inv.taxed_ret_vatb) + abs( inv.taxed_ret_vatsrv) pdata[partner_id]['reference'] = inv.reference base_imponible = 0.0 for k, v in pdata.items(): detalleVentas = etree.Element('detalleVentas') etree.SubElement( detalleVentas, 'tpIdCliente').text = tpIdCliente[v['tpIdCliente']] etree.SubElement(detalleVentas, 'idCliente').text = v['idCliente'] etree.SubElement(detalleVentas, 'parteRelVtas').text = v['parteRelVtas'] etree.SubElement(detalleVentas, 'tipoComprobante').text = v['tipoComprobante'] etree.SubElement(detalleVentas, 'tipoEmision').text = v['tipoEmision'] etree.SubElement(detalleVentas, 'numeroComprobantes').text = str( v['numeroComprobantes']) etree.SubElement(detalleVentas, 'baseNoGraIva').text = '%.2f' % v['basenoGraIva'] etree.SubElement( detalleVentas, 'baseImponible').text = '%.2f' % v['baseImponible'] #base_imponible = v['montoIva'] / 0.12 etree.SubElement(detalleVentas, 'baseImpGrav').text = '%.2f' % v['baseImpGrav'] #etree.SubElement(detalleVentas, 'baseImpGrav').text = '%.2f' % base_imponible etree.SubElement(detalleVentas, 'montoIva').text = '%.2f' % v['montoIva'] if data_compensa.items(): compensacion = etree.Element('compensaciones') for j, compensa in data_compensa.items(): compe = etree.Element('compensacion') if compensa['codRetAir'] == '604': etree.SubElement(compe, 'tipoCompe').text = '01' else: etree.SubElement(compe, 'tipoCompe').text = '02' etree.SubElement( compe, 'monto').text = '%.2f' % compensa['valRetAir'] compensacion.append(compe) detalleVentas.append(compensacion) etree.SubElement(detalleVentas, 'montoIce').text = '%.2f' % v['montoIce'] etree.SubElement(detalleVentas, 'valorRetIva').text = '%.2f' % v['valorRetIva'] etree.SubElement( detalleVentas, 'valorRetRenta').text = '%.2f' % v['valorRetRenta'] if inv.payment_ids: forma_pago = etree.Element('formasDePago') for i in inv.payment_ids: if not i.journal_id.payment_method: raise osv.except_osv( 'Datos incompletos', 'No ha definido forma de pago del diario %s' % i.journal_id.name) etree.SubElement( forma_pago, 'formaPago').text = i.journal_id.payment_method detalleVentas.append(forma_pago) else: if inv.type != 'in_refund': forma_pago = etree.Element('formasDePago') if inv.pago_ext: etree.SubElement(forma_pago, 'formaPago').text = '09' else: etree.SubElement(forma_pago, 'formaPago').text = '02' detalleVentas.append(forma_pago) detalleVentas.append(forma_pago) ventas.append(detalleVentas) ats.append(ventas) ventas_establecimiento = etree.Element('ventasEstablecimiento') ventas_est = etree.Element('ventaEst') #Cambiar al campo de la configuracion de la CO. etree.SubElement(ventas_est, 'codEstab').text = estabRuc etree.SubElement(ventas_est, 'ventasEstab').text = total_ventas etree.SubElement(ventas_est, 'ivaComp').text = '%.2f' % compensa_ventas ventas_establecimiento.append(ventas_est) ats.append(ventas_establecimiento) """Documentos Anulados""" anulados = etree.Element('anulados') inv_ids = inv_obj.search(cr, uid, [('state', '=', 'cancel'), ('period_id', '=', period_id), ('type', '=', 'out_invoice'), ('company_id', '=', wiz.company_id.id)]) for inv in inv_obj.browse(cr, uid, inv_ids): detalleAnulados = etree.Element('detalleAnulados') etree.SubElement( detalleAnulados, 'tipoComprobante').text = inv.journal_id.auth_id.type_id.code etree.SubElement( detalleAnulados, 'establecimiento').text = inv.journal_id.auth_id.serie_entidad etree.SubElement( detalleAnulados, 'puntoEmision').text = inv.journal_id.auth_id.serie_emision etree.SubElement(detalleAnulados, 'secuencialInicio').text = str( int(inv.number[8:])) etree.SubElement(detalleAnulados, 'secuencialFin').text = str(int(inv.number[8:])) etree.SubElement(detalleAnulados, 'autorizacion').text = inv.journal_id.auth_id.name anulados.append(detalleAnulados) liq_ids = inv_obj.search(cr, uid, [('state', '=', 'cancel'), ('period_id', '=', period_id), ('type', '=', 'liq_purchase'), ('company_id', '=', wiz.company_id.id)]) for inv in inv_obj.browse(cr, uid, liq_ids): detalleAnulados = etree.Element('detalleAnulados') etree.SubElement( detalleAnulados, 'tipoComprobante').text = inv.journal_id.auth_id.type_id.code etree.SubElement( detalleAnulados, 'establecimiento').text = inv.journal_id.auth_id.serie_entidad etree.SubElement( detalleAnulados, 'puntoEmision').text = inv.journal_id.auth_id.serie_emision etree.SubElement(detalleAnulados, 'secuencialInicio').text = str( int(inv.number[8:])) etree.SubElement(detalleAnulados, 'secuencialFin').text = str(int(inv.number[8:])) etree.SubElement(detalleAnulados, 'autorizacion').text = inv.journal_id.auth_id.name anulados.append(detalleAnulados) retention_obj = self.pool.get('account.retention') ret_ids = retention_obj.search( cr, uid, [('state', '=', 'cancel'), ('in_type', '=', 'ret_out_invoice'), ('date', '>=', wiz.period_id.date_start), ('date', '<=', wiz.period_id.date_stop)]) for ret in retention_obj.browse(cr, uid, ret_ids): detalleAnulados = etree.Element('detalleAnulados') etree.SubElement(detalleAnulados, 'tipoComprobante').text = ret.auth_id.type_id.code etree.SubElement( detalleAnulados, 'establecimiento').text = ret.auth_id.serie_entidad etree.SubElement(detalleAnulados, 'puntoEmision').text = ret.auth_id.serie_emision etree.SubElement(detalleAnulados, 'secuencialInicio').text = str( int(ret.number[8:])) etree.SubElement(detalleAnulados, 'secuencialFin').text = str(int(ret.number[8:])) etree.SubElement(detalleAnulados, 'autorizacion').text = ret.auth_id.name anulados.append(detalleAnulados) ats.append(anulados) file_path = os.path.join(os.path.dirname(__file__), 'XSD/at.xsd') schema_file = open(file_path) file_ats = etree.tostring(ats, pretty_print=True, encoding='iso-8859-1') #validata schema xmlschema_doc = etree.parse(schema_file) xmlschema = etree.XMLSchema(xmlschema_doc) if not wiz.no_validate: try: xmlschema.assertValid(ats) except DocumentInvalid as e: raise osv.except_osv( 'Error de Datos', """El sistema generó el XML pero los datos no pasan la validación XSD del SRI. \nLos errores mas comunes son:\n* RUC,Cédula o Pasaporte contiene caracteres no válidos.\n* Números de documentos están duplicados.\n\nEl siguiente error contiene el identificador o número de documento en conflicto:\n\n %s""" % str(e)) buf = StringIO.StringIO() buf.write(file_ats) out = base64.encodestring(buf.getvalue()) buf.close() name = "%s%s%s.xml" % ( "AT", time.strftime('%m', time.strptime(period.date_start, '%Y-%m-%d')), time.strftime('%Y', time.strptime(period.date_start, '%Y-%m-%d'))) self.write(cr, uid, ids, { 'state': 'export', 'data': out, 'name': name }) return { 'view_type': 'form', 'view_mode': 'form', 'res_model': 'wizard.ats', 'target': 'new', 'res_id': ids[0], 'type': 'ir.actions.act_window', 'context': "{ 'active_model': 'wizard.ats', 'active_id': %s }" % (ids[0]) }
from quantityfield import ureg from copy import deepcopy try: from cStringIO import StringIO except ImportError: from StringIO import StringIO from lxml import etree, objectify import probablepeople as pp import usaddress as usadd _log = logging.getLogger(__name__) here = os.path.dirname(os.path.abspath(__file__)) hpxml_parser = objectify.makeparser(schema=etree.XMLSchema( etree.parse(os.path.join(here, 'schemas', 'HPXML.xsd')))) class HPXMLError(Exception): pass class HPXML(object): NS = 'http://hpxmlonline.com/2014/6' HPXML_STRUCT = { 'address_line_1': { 'path': 'h:Site/h:Address/h:Address1', }, 'address_line_2': { 'path': 'h:Site/h:Address/h:Address2',
xsd_path2 = '/home/lsy/LTBPCommonTypes.xsd' xml_path = '/home/lsy/Desktop/NDE001.xml' def validate(xmlparser, xmlfilename): try: with open(xmlfilename, 'r') as f: etree.fromstring(f.read(), xmlparser) return True except etree.XMLSchemaError: return False xsd_1 = open(xsd_path1, 'r') text = xsd_1.read() test = StringIO.StringIO(text) schema_doc = etree.parse(test) schema = etree.XMLSchema(schema_doc) valid = open(xml_path, 'r') text = valid.read() doc = etree.parse(StringIO.StringIO(text)) print schema.assertValid(doc) # setBridgeInfo("10","testing", "01") # insert_inspector("Siyuan", "Li", "infratek") # insert_inspector("Meng", "Xiao", "infratek") # Test = setTestInfo("2017-10-13", 20, 20, 2.2, "warren county") # GetOutPut("/home/lsy/Major Data Sets/George Washington Bus Bridge (Unit 3)/Region 01/ER/ER.ER", # ["/home/lsy/Major Data Sets/George Washington Bus Bridge (Unit 3)/Region 01/AA/AA_0", # "/home/lsy/Major Data Sets/George Washington Bus Bridge (Unit 3)/Region 01/AA/AA_1"], "/home/lsy/Desktop", "test")
def parse_xml_string(xml_data_raw): #all elements must have xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance in the root for the signature verification to work #read the xml schema and create a schema object with open('index-schema.xsd', 'r') as schema_file: schema_raw = et.XML(schema_file.read()) schema = et.XMLSchema(schema_raw) #add data to a root element root = et.Element("root") root.append(et.XML(xml_data_raw)) xml_data = et.tostring(root) parser = et.XMLParser(schema=schema) try: root = et.fromstring(xml_data, parser) except: print('Schema validation failed!') return False #verify address root = et.fromstring(xml_data_raw) #check iii data version if root.attrib['iiiVersion'] != '1': print('Node is not from this version and not supported') return False node_type = root.tag services = [] for child in root: if child.tag == 'address': address = child.text elif child.tag == 'sign': signature = child.text salt = child.attrib['salt'] elif child.tag == 'services': data = et.tostring(child).decode('utf8') for service in child: services.append(service) try: crypto = Cryptographer(address, True) if crypto.verify_data(data, signature, salt): print('Signature verified successfully!') else: print('Signature failed to verify') return False except: print('Invalid key / key is corrupt') return False services_copy = services.copy() #verify counter is latest check = XMLIndex.get_data(node_type, address)[0] if check != None: for child in check: if child.tag == 'services': for check_service in child: check_version = check_service.attrib['version'] check_counter = int(check_service.attrib['counter']) for service in services_copy: version = check_service.attrib['version'] counter = int(service.attrib['counter']) if check_version == version: if counter > check_counter: services_copy.remove(service) if len(services_copy) != 0: print('Service counters do not match!') return False #parse any special tags for service in services: for child in service: if child.tag == 'tags': for tag in child: if tag.tag == 'DELETE': service.getparent().remove(service) #send data to write method and socket sever XMLIndex.__write_xml( et.tostring(root).decode('utf8'), address, node_type) return True
def parsePackage(cntlr, filesource, metadataFile, fileBase, errors=[]): global ArchiveFileIOError if ArchiveFileIOError is None: from arelle.FileSource import ArchiveFileIOError unNamedCounter = 1 txmyPkgNSes = ("http://www.corefiling.com/xbrl/taxonomypackage/v1", "http://xbrl.org/PWD/2014-01-15/taxonomy-package", "http://xbrl.org/PWD/2015-01-14/taxonomy-package", "http://xbrl.org/PR/2015-12-09/taxonomy-package", "http://xbrl.org/2016/taxonomy-package", "http://xbrl.org/WGWD/YYYY-MM-DD/taxonomy-package") catalogNSes = ("urn:oasis:names:tc:entity:xmlns:xml:catalog", ) pkg = {} currentLang = Locale.getLanguageCode() _file = filesource.file(metadataFile)[ 0] # URL in zip, plain file in file system or web parser = lxmlResolvingParser(cntlr) try: tree = etree.parse(_file, parser=parser) # schema validate tp xml xsdTree = etree.parse(TP_XSD, parser=parser) etree.XMLSchema(xsdTree).assertValid(tree) except (etree.XMLSyntaxError, etree.DocumentInvalid) as err: cntlr.addToLog(_("Taxonomy package file syntax error %(error)s"), messageArgs={"error": str(err)}, messageCode="tpe:invalidMetaDataFile", file=os.path.basename(metadataFile), level=logging.ERROR) errors.append("tpe:invalidMetaDataFile") return pkg root = tree.getroot() ns = root.tag.partition("}")[0][1:] nsPrefix = "{{{}}}".format(ns) if ns in txmyPkgNSes: # package file for eltName in ("identifier", "version", "license", "publisher", "publisherURL", "publisherCountry", "publicationDate"): pkg[eltName] = '' for m in root.iterchildren(tag=nsPrefix + eltName): if eltName == "license": pkg[eltName] = m.get("name") else: pkg[eltName] = (m.text or "").strip() break # take first entry if several for eltName in ("name", "description"): closest = '' closestLen = 0 for m in root.iterchildren(tag=nsPrefix + eltName): s = (m.text or "").strip() eltLang = xmlLang(m) l = langCloseness(eltLang, currentLang) if l > closestLen: closestLen = l closest = s elif closestLen == 0 and eltLang.startswith("en"): closest = s # pick english if nothing better if not closest and eltName == "name": # assign default name when none in taxonomy package closest = os.path.splitext(os.path.basename( filesource.baseurl))[0] pkg[eltName] = closest for eltName in ("supersededTaxonomyPackages", "versioningReports"): pkg[eltName] = [] for m in root.iterchildren(tag=nsPrefix + "supersededTaxonomyPackages"): pkg['supersededTaxonomyPackages'] = [ r.text.strip() for r in m.iterchildren(tag=nsPrefix + "taxonomyPackageRef") ] for m in root.iterchildren(tag=nsPrefix + "versioningReports"): pkg['versioningReports'] = [ r.get("href") for r in m.iterchildren(tag=nsPrefix + "versioningReport") ] # check for duplicate multi-lingual elements (among children of nodes) langElts = defaultdict(list) for n in root.iter(tag=nsPrefix + "*"): for eltName in ("name", "description", "publisher"): langElts.clear() for m in n.iterchildren(tag=nsPrefix + eltName): langElts[xmlLang(m)].append(m) for lang, elts in langElts.items(): if not lang: cntlr.addToLog( _("Multi-lingual element %(element)s has no in-scope xml:lang attribute" ), messageArgs={"element": eltName}, messageCode="tpe:missingLanguageAttribute", refs=[{ "href": os.path.basename(metadataFile), "sourceLine": m.sourceline } for m in elts], level=logging.ERROR) errors.append("tpe:missingLanguageAttribute") elif len(elts) > 1: cntlr.addToLog( _("Multi-lingual element %(element)s has multiple (%(count)s) in-scope xml:lang %(lang)s elements" ), messageArgs={ "element": eltName, "lang": lang, "count": len(elts) }, messageCode="tpe:duplicateLanguagesForElement", refs=[{ "href": os.path.basename(metadataFile), "sourceLine": m.sourceline } for m in elts], level=logging.ERROR) errors.append("tpe:duplicateLanguagesForElement") del langElts # dereference else: # oasis catalog, use dirname as the package name # metadataFile may be a File object (with name) or string filename fileName = getattr( metadataFile, 'fileName', # for FileSource named objects getattr( metadataFile, 'name', # for io.file named objects metadataFile)) # for string pkg["name"] = os.path.basename(os.path.dirname(fileName)) pkg["description"] = "oasis catalog" pkg["version"] = "(none)" remappings = {} rewriteTree = tree catalogFile = metadataFile if ns in ("http://xbrl.org/PWD/2015-01-14/taxonomy-package", "http://xbrl.org/PR/2015-12-09/taxonomy-package", "http://xbrl.org/WGWD/YYYY-MM-DD/taxonomy-package", "http://xbrl.org/2016/taxonomy-package", "http://xbrl.org/REC/2016-04-19/taxonomy-package"): catalogFile = metadataFile.replace('taxonomyPackage.xml', 'catalog.xml') try: rewriteTree = etree.parse(filesource.file(catalogFile)[0], parser=parser) # schema validate tp xml xsdTree = etree.parse(CAT_XSD, parser=parser) etree.XMLSchema(xsdTree).assertValid(rewriteTree) except (etree.XMLSyntaxError, etree.DocumentInvalid) as err: cntlr.addToLog(_("Catalog file syntax error %(error)s"), messageArgs={"error": str(err)}, messageCode="tpe:invalidCatalogFile", file=os.path.basename(metadataFile), level=logging.ERROR) errors.append("tpe:invalidCatalogFile") except ArchiveFileIOError: pass for tag, prefixAttr, replaceAttr in ( (nsPrefix + "remapping", "prefix", "replaceWith"), # taxonomy package ("{urn:oasis:names:tc:entity:xmlns:xml:catalog}rewriteSystem", "systemIdStartString", "rewritePrefix"), ("{urn:oasis:names:tc:entity:xmlns:xml:catalog}rewriteURI", "uriStartString", "rewritePrefix")): # oasis catalog for m in rewriteTree.iter(tag=tag): prefixValue = m.get(prefixAttr) replaceValue = m.get(replaceAttr) if prefixValue and replaceValue is not None: if prefixValue not in remappings: base = baseForElement(m) if base: replaceValue = os.path.join(base, replaceValue) if replaceValue: # neither None nor '' if not isAbsolute(replaceValue): if not os.path.isabs(replaceValue): replaceValue = fileBase + replaceValue replaceValue = replaceValue.replace("/", os.sep) _normedValue = cntlr.webCache.normalizeUrl(replaceValue) if replaceValue.endswith( os.sep) and not _normedValue.endswith(os.sep): _normedValue += os.sep remappings[prefixValue] = _normedValue else: cntlr.addToLog( _("Package catalog duplicate rewrite start string %(rewriteStartString)s" ), messageArgs={"rewriteStartString": prefixValue}, messageCode="tpe:multipleRewriteURIsForStartString", file=os.path.basename(catalogFile), level=logging.ERROR) errors.append("tpe:multipleRewriteURIsForStartString") pkg["remappings"] = remappings entryPoints = defaultdict(list) pkg["entryPoints"] = entryPoints for entryPointSpec in tree.iter(tag=nsPrefix + "entryPoint"): name = None closestLen = 0 # find closest match name node given xml:lang match to current language or no xml:lang for nameNode in entryPointSpec.iter(tag=nsPrefix + "name"): s = (nameNode.text or "").strip() nameLang = xmlLang(nameNode) l = langCloseness(nameLang, currentLang) if l > closestLen: closestLen = l name = s elif closestLen == 0 and nameLang.startswith("en"): name = s # pick english if nothing better if not name: name = _("<unnamed {0}>").format(unNamedCounter) unNamedCounter += 1 epDocCount = 0 for epDoc in entryPointSpec.iterchildren(nsPrefix + "entryPointDocument"): epUrl = epDoc.get('href') base = epDoc.get('{http://www.w3.org/XML/1998/namespace}base' ) # cope with xml:base if base: resolvedUrl = urljoin(base, epUrl) else: resolvedUrl = epUrl epDocCount += 1 #perform prefix remappings remappedUrl = resolvedUrl longestPrefix = 0 for mapFrom, mapTo in remappings.items(): if remappedUrl.startswith(mapFrom): prefixLength = len(mapFrom) if prefixLength > longestPrefix: _remappedUrl = remappedUrl[prefixLength:] if not (_remappedUrl[0] in (os.sep, '/') or mapTo[-1] in (os.sep, '/')): _remappedUrl = mapTo + os.sep + _remappedUrl else: _remappedUrl = mapTo + _remappedUrl longestPrefix = prefixLength if longestPrefix: remappedUrl = _remappedUrl.replace( os.sep, "/") # always used as FileSource select # find closest language description closest = '' closestLen = 0 for m in entryPointSpec.iterchildren(tag=nsPrefix + "description"): s = (m.text or "").strip() eltLang = xmlLang(m) l = langCloseness(eltLang, currentLang) if l > closestLen: closestLen = l closest = s elif closestLen == 0 and eltLang.startswith("en"): closest = s # pick english if nothing better if not closest and name: # assign default name when none in taxonomy package closest = name entryPoints[name].append((remappedUrl, resolvedUrl, closest)) return pkg
def generate_new_definition(name, desc, dir, type, addr): #create xml root = et.Element('def') #xml data service = et.SubElement(root, 'service') description = et.SubElement(service, 'desc') description.text = desc description.set('name', name) address = et.SubElement(service, 'address') address.text = addr data = et.SubElement(service, 'data') files = et.SubElement(data, 'files') #setup default files list root_directory = glob.glob(dir + '\\**\\*', recursive=True) file_list = [] for file in root_directory: new_path = os.path.relpath(os.path.realpath(file)) file_list.append(new_path) for file in file_list: if os.path.isfile(file): file_el = et.SubElement(files, 'file') file_el.set('rdir', os.path.relpath(file, start=dir)) file_el.set('type', 'static') file_hash = Cryptographer.generate_hash(message=None, filepath=file) file_el.text = file_hash else: file_el = et.SubElement(files, 'file') file_el.set('rdir', os.path.relpath(file, start=dir)) file_el.set('type', 'static') file_el.text = '0' depend = et.SubElement(data, 'dependencies') tags = et.SubElement(data, 'tags') if type == 'resource': resource = et.SubElement(tags, 'resource') else: application = et.SubElement(tags, 'application') application.set('os', type) version_hash = Cryptographer.generate_hash( et.tostring(data).decode('utf8')) service.set('version', version_hash) service.set('counter', '1') #create files folder = os.path.join(dir, '.iii') os.mkdir(folder) copyfile('debug/iii2.xsd', os.path.join(folder, 'iii.xsd')) #change and replace iii.xsd with iii2.xsd later with open(os.path.join(dir, '.iii', 'iii.xsd'), 'r') as schema_file: schema_raw = et.XML(schema_file.read()) schema = et.XMLSchema(schema_raw) parser = et.XMLParser(schema=schema) #print(et.tostring(root)) try: root_check = et.fromstring( et.tostring(root).decode('utf8'), parser) except: print('Schema validation failed!') tree = et.ElementTree(root) tree.write(os.path.join(dir, '.iii', 'iii.xml'), pretty_print=True)
def validate(xml_filename_or_content, xsd_filename=None, application_schema_ns=None, ogc_schemas_location=None, inspire_schemas_location=None): try: if xml_filename_or_content.startswith('<'): doc = etree.XML(xml_filename_or_content) else: doc = etree.XML( ingest_file_and_strip_mime(xml_filename_or_content)) except etree.Error as e: print(str(e)) return False # Special case if this is a schema if doc.tag == '{http://www.w3.org/2001/XMLSchema}schema': for child in doc: if child.tag == '{http://www.w3.org/2001/XMLSchema}import': location = child.get('schemaLocation') location = substitute_ogc_schemas_location( location, ogc_schemas_location) location = substitute_inspire_schemas_location( location, inspire_schemas_location) child.set('schemaLocation', location) try: etree.XMLSchema(etree.XML(etree.tostring(doc))) return True except etree.Error as e: print(str(e)) return False schema_locations = doc.get( "{http://www.w3.org/2001/XMLSchema-instance}schemaLocation") if schema_locations is None: print('No schemaLocation found') return False # Our stripped GetFeature document have an empty timeStamp, put a # fake value one instead if doc.get('timeStamp') == '': doc.set('timeStamp', '1970-01-01T00:00:00Z') locations = schema_locations.split() # get schema locations schema_def = etree.Element( "schema", attrib={ "elementFormDefault": "qualified", "version": "1.0.0", }, nsmap={None: "http://www.w3.org/2001/XMLSchema"}) tempfiles = [] import_dict = {} # Special case for the main application schema for ns, location in zip(locations[::2], locations[1::2]): if ns == application_schema_ns: if xsd_filename is not None: location = xsd_filename else: location = os.path.splitext( xml_filename_or_content)[0] + '.xsd' # Remove mime-type header line if found to generate a valid .xsd sanitized_content = ingest_file_and_strip_mime(location) location = '/tmp/tmpschema%d.xsd' % len(tempfiles) f = open(location, 'wb') f.write(sanitized_content) tempfiles.append(location) f.close() xsd = etree.XML(sanitized_content) for child in xsd: if child.tag == '{http://www.w3.org/2001/XMLSchema}import': sub_ns = child.get('namespace') sub_location = child.get('schemaLocation') sub_location = substitute_ogc_schemas_location( sub_location, ogc_schemas_location) sub_location = substitute_inspire_schemas_location( sub_location, inspire_schemas_location) etree.SubElement(schema_def, "import", attrib={ "namespace": sub_ns, "schemaLocation": sub_location }) import_dict[sub_ns] = sub_location etree.SubElement(schema_def, "import", attrib={ "namespace": ns, "schemaLocation": location }) import_dict[ns] = location # Add each schemaLocation as an import for ns, location in zip(locations[::2], locations[1::2]): if ns == application_schema_ns: continue location = substitute_ogc_schemas_location(location, ogc_schemas_location) location = substitute_inspire_schemas_location( location, inspire_schemas_location) if ns not in import_dict: etree.SubElement(schema_def, "import", attrib={ "namespace": ns, "schemaLocation": location }) import_dict[ns] = location # TODO: ugly workaround. But otherwise, the doc is not recognized as schema # print(etree.tostring(schema_def)) schema = etree.XMLSchema(etree.XML(etree.tostring(schema_def))) try: schema.assertValid(doc) ret = True except etree.Error as e: print(str(e)) ret = False for filename in tempfiles: os.remove(filename) return ret
def write_gpx(gpxFile, df, row_date="2016-01-01", notes="Exported by rowingdata"): if notes == None: notes = "Exported by rowingdata" f = open(gpxFile, 'w') totalseconds = int(df['TimeStamp (sec)'].max() - df['TimeStamp (sec)'].min()) totalmeters = int(df['cum_dist'].max()) avghr = int(df[' HRCur (bpm)'].mean()) if avghr == 0: avghr = 1 maxhr = int(df[' HRCur (bpm)'].max()) if maxhr == 0: maxhr = 1 avgspm = int(df[' Cadence (stokes/min)'].mean()) seconds = df['TimeStamp (sec)'].values distancemeters = df['cum_dist'].values heartrate = df[' HRCur (bpm)'].values.astype(int) cadence = np.round(df[' Cadence (stokes/min)'].values).astype(int) nr_rows = len(seconds) try: lat = df[' latitude'].values except KeyError: lat = np.zeros(nr_rows) try: lon = df[' longitude'].values except KeyError: lon = np.zeros(nr_rows) haspower = 1 try: power = df[' Power (watts)'].values except KeyError: haspower = 0 s = "2000-01-01" tt = ps.parse(s) #timezero=time.mktime(tt.timetuple()) timezero = arrow.get(tt).timestamp if seconds[0] < timezero: # print("Taking Row_Date ",row_date) dateobj = ps.parse(row_date) #unixtimes=seconds+time.mktime(dateobj.timetuple()) unixtimes = seconds + arrow.get(dateobj).timestamp datetimestring = row_date lap_begin(f, datetimestring, totalmeters, avghr, maxhr, avgspm, totalseconds) ts = datetime.datetime.fromtimestamp(unixtimes[0]).isoformat() s = '<time>{ts}</time></metadata><trk><name>Export by rowingdata</name><trkseg>'.format( ts=ts, ) f.write(s) for i in range(nr_rows): s = ' <trkpt lat="{lat}" lon="{lon}">\n'.format(lat=lat[i], lon=lon[i]) f.write(s) #s=datetime.datetime.fromtimestamp(unixtimes[i]).isoformat() s = arrow.get(unixtimes[i]).isoformat() f.write(' <time>{s}</time>\n'.format(s=s)) f.write(' </trkpt>\n') f.write('</trkseg>') f.write('</trk>') f.write('</gpx>') f.close() file = open(gpxFile, 'r') some_xml_string = file.read() file.close() try: xsd_file = six.moves.urllib.request.urlopen( "http://www.topografix.com/GPX/1/1/gpx.xsd") output = open('gpx.xsd', 'w') if pythonversion <= 2: output.write(xsd_file.read().replace('\n', '')) else: output.write(xsd_file.read().decode('utf-8').replace('\n', '')) output.close() xsd_filename = "gpx.xsd" # Run some tests try: tree = objectify.parse(gpxFile) try: schema = etree.XMLSchema(file=xsd_filename) parser = objectify.makeparser(schema=schema) objectify.fromstring(some_xml_string, parser) # print("YEAH!, your xml file has validated") except XMLSyntaxError: print("Oh NO!, your xml file does not validate") pass except: print("Oh No!, your xml file does not validate") pass except six.moves.urllib.error.URLError: print("cannot download GPX schema") print("your GPX file is unvalidated. Good luck") return 1