def create_rml(self, cr, xml, uid, context=None): if self.tmpl=='' and not self.internal_header: self.internal_header=True env = odoo.api.Environment(cr, uid, context or {}) Translation = env['ir.translation'] # In some case we might not use xsl ... if not self.xsl: return xml stylesheet_file = tools.file_open(self.xsl) try: stylesheet = etree.parse(stylesheet_file) xsl_path, _ = os.path.split(self.xsl) for import_child in stylesheet.findall('./import'): if 'href' in import_child.attrib: imp_file = import_child.get('href') _, imp_file = tools.file_open(imp_file, subdir=xsl_path, pathinfo=True) import_child.set('href', urllib.quote(str(imp_file))) imp_file.close() finally: stylesheet_file.close() #TODO: get all the translation in one query. That means we have to: # * build a list of items to translate, # * issue the query to translate them, # * (re)build/update the stylesheet with the translated items def translate(doc, lang): translate_aux(doc, lang, False) def translate_aux(doc, lang, t): for node in doc: t = t or node.get("t") if t: text = None tail = None if node.text: text = node.text.strip().replace('\n',' ') if node.tail: tail = node.tail.strip().replace('\n',' ') if text: text1 = Translation._get_source(self.name2, 'xsl', lang, text) if text1: node.text = node.text.replace(text, text1) if tail: tail1 = Translation._get_source(self.name2, 'xsl', lang, tail) if tail1: node.tail = node.tail.replace(tail, tail1) translate_aux(node, lang, t) if env.lang: translate(stylesheet.iter(), env.lang) transform = etree.XSLT(stylesheet) xml = etree.tostring( transform(etree.fromstring(xml))) return xml
def load_information_from_description_file(module, mod_path=None): """ :param module: The name of the module (sale, purchase, ...) :param mod_path: Physical path of module, if not providedThe name of the module (sale, purchase, ...) """ if not mod_path: mod_path = get_module_path(module, downloaded=True) manifest_file = module_manifest(mod_path) if manifest_file: # default values for descriptor info = { 'application': False, 'author': 'Odoo S.A.', 'auto_install': False, 'category': 'Uncategorized', 'depends': [], 'description': '', 'icon': get_module_icon(module), 'installable': True, 'license': 'LGPL-3', 'post_load': None, 'version': '1.0', 'web': False, 'website': 'https://www.odoo.com', 'sequence': 100, 'summary': '', } info.update(itertools.izip( 'depends data demo test init_xml update_xml demo_xml'.split(), iter(list, None))) f = tools.file_open(manifest_file) try: info.update(ast.literal_eval(f.read())) finally: f.close() if not info.get('description'): readme_path = [opj(mod_path, x) for x in README if os.path.isfile(opj(mod_path, x))] if readme_path: readme_text = tools.file_open(readme_path[0]).read() info['description'] = readme_text if 'active' in info: # 'active' has been renamed 'auto_install' info['auto_install'] = info['active'] info['version'] = adapt_version(info['version']) return info _logger.debug('module %s: no manifest file found %s', module, MANIFEST_NAMES) return {}
def _get_icon_image(self): for module in self: module.icon_image = '' path = modules.get_module_resource(module.name, 'static', 'description', 'icon.png') if path: with tools.file_open(path, 'rb') as image_file: module.icon_image = image_file.read().encode('base64')
def test_private_convert_pdf(self, get_pdf, update_hold): update_hold.return_value = True f_path = 'addons/partner_communication_switzerland/static/src/test.pdf' with file_open(f_path) as pdf_file: get_pdf.return_value = pdf_file.read() child = self.create_child(self.ref(11)) sponsorship = self.create_contract( { 'partner_id': self.michel.id, 'group_id': self.sp_group.id, 'child_id': child.id, }, [{'amount': 50.0}] ) self.validate_sponsorship(sponsorship) default_template = self.env.ref('sbc_compassion.default_template') correspondence_data = { 'template_id': default_template.id, 'original_text': 'my text', 'sponsorship_id': sponsorship.id, 'base64string': 'base64data' } letter = self.env['correspondence'].create(correspondence_data) config = self.env.ref( 'partner_communication_switzerland.child_letter_config') job = self.env['partner.communication.job'].create({ 'partner_id': self.michel.id, 'object_ids': letter.ids, 'config_id': config.id }) self.assertEqual(len(job.attachment_ids), 1) self.assertRegexpMatches(job.attachment_ids[0].name, r'^Supporter Letter')
def gtc(self): html_file = file_open( 'muskathlon/static/src/html/muskathlon_gtc_{}.html' .format(self.env.lang) ) text = html_file.read() html_file.close() return text
def parse(self, filename, ids, model, context=None): # parses the xml template to memory src_file = tools.file_open(filename) try: self.dom = etree.XML(src_file.read()) self.doc = etree.Element(self.dom.tag) self.parse_tree(ids, model, context) finally: src_file.close()
def _report_content(self, name): data = self[name + "_content_data"] if not data and self[name]: try: with tools.file_open(self[name], mode="rb") as fp: data = fp.read() except Exception: data = False return data
def _get_header(self): try: header_file = tools.file_open(os.path.join("base", "report", "corporate_rml_header.rml")) try: return header_file.read() finally: header_file.close() except: return self._header_a4
def load_templates(self, **kwargs): base_url = request.httprequest.base_url templates = [ 'mail/static/src/xml/abstract_thread_window.xml', 'mail/static/src/xml/discuss.xml', 'mail/static/src/xml/thread.xml', 'im_livechat/static/src/xml/im_livechat.xml', ] return [tools.file_open(tmpl, 'rb').read() for tmpl in templates]
def read_image(self, path): if not path: return False path_info = path.split(',') icon_path = get_module_resource(path_info[0], path_info[1]) icon_image = False if icon_path: with tools.file_open(icon_path, 'rb') as icon_file: icon_image = base64.encodestring(icon_file.read()) return icon_image
def relaxng(view_type): """ Return a validator for the given view type, or None. """ if view_type not in _relaxng_cache: with tools.file_open(os.path.join('base', 'rng', '%s_view.rng' % view_type)) as frng: try: relaxng_doc = etree.parse(frng) _relaxng_cache[view_type] = etree.RelaxNG(relaxng_doc) except Exception: _logger.exception('Failed to load RelaxNG XML schema for views validation') _relaxng_cache[view_type] = None return _relaxng_cache[view_type]
def _get_icon_image(self): for module in self: module.icon_image = '' if module.icon: path_parts = module.icon.split('/') path = modules.get_module_resource(path_parts[1], *path_parts[2:]) else: path = modules.get_module_icon(module.name) if path: with tools.file_open(path, 'rb') as image_file: module.icon_image = image_file.read().encode('base64')
def get_demo_images(self): self.ensure_one() demo_images = self.demo_images and self.demo_images.split(',') res = [] mod_path = get_module_resource(self.name) for image_name in demo_images: full_name = os.path.join(mod_path, image_name) try: with tools.file_open(full_name, 'rb') as image_file: res.append((image_name, image_file.read().encode('base64'))) except: pass return res
def test_co_1272(self, get_pdf, update_hold): """ Test bug fix where mandate validation should not create a new dossier communication for the sponsor. """ update_hold.return_value = True f_path = 'addons/partner_communication_switzerland/static/src/test.pdf' with file_open(f_path) as pdf_file: get_pdf.return_value = pdf_file.read() # Creation of the sponsorship contract child = self.create_child(self.ref(11)) sponsorship = self.create_contract( { 'partner_id': self.michel.id, 'group_id': self.sp_group.id, 'child_id': child.id, }, [{'amount': 50.0}] ) # Check the sponsorship is in waiting mandate state self.validate_sponsorship(sponsorship) self.assertEqual(sponsorship.state, 'mandate') # Check the communication is generated after sponsorship validation new_dossier = self.env.ref( 'partner_communication_switzerland.planned_dossier') partner_communications = self.env['partner.communication.job'].search([ ('partner_id', '=', self.michel.id), ('state', '=', 'pending'), ('config_id', '=', new_dossier.id) ]) self.assertTrue(partner_communications) # Remove the new dossier communication partner_communications.unlink() # Validate a mandate to make the sponsorship in waiting state. self.mandates[0].validate() self.assertEqual(sponsorship.state, 'waiting') # Check that no new communication is generated partner_communications = self.env['partner.communication.job'].search([ ('partner_id', '=', self.michel.id), ('state', '=', 'pending'), ('config_id', '=', new_dossier.id) ]) self.assertFalse(partner_communications)
def create(self, cr, uid, ids, data, context=None): context = dict(context or {}) if self.internal_header: context.update(internal_header=self.internal_header) context.update(bin_raw=True) env = odoo.api.Environment(cr, uid, context) env['res.font'].sudo().font_scan(lazy=True) ir_obj = env['ir.actions.report.xml'] report_xml = ir_obj.search([('report_name', '=', self.name[7:])], limit=1) if not report_xml: title = '' report_file = tools.file_open(self.tmpl, subdir=None) try: rml = report_file.read() report_type= data.get('report_type', 'pdf') class a(object): def __init__(self, *args, **argv): for key,arg in argv.items(): setattr(self, key, arg) report_xml = a(title=title, report_type=report_type, report_rml_content=rml, name=title, attachment=False, header=self.header) finally: report_file.close() # We add an attribute on the ir.actions.report.xml instance. # This attribute 'use_global_header' will be used by # the create_single_XXX function of the report engine. # This change has been done to avoid a big change of the API. setattr(report_xml, 'use_global_header', self.header if report_xml.header else False) report_type = report_xml.report_type if report_type in ['sxw','odt']: fnct = self.create_source_odt elif report_type in ['pdf','raw','txt','html']: fnct = self.create_source_pdf elif report_type=='html2html': fnct = self.create_source_html2html elif report_type=='mako2html': fnct = self.create_source_mako2html else: raise NotImplementedError(_('Unknown report type: %s') % report_type) fnct_ret = fnct(cr, uid, ids, data, report_xml, context) if not fnct_ret: return False, False return fnct_ret
def load_script(path, module_name): fp, fname = tools.file_open(path, pathinfo=True) fp2 = None if not isinstance(fp, file): # pylint: disable=file-builtin # imp.load_source need a real file object, so we create # one from the file-like object we get from file_open fp2 = os.tmpfile() fp2.write(fp.read()) fp2.seek(0) try: return imp.load_source(module_name, fname, fp2 or fp) finally: if fp: fp.close() if fp2: fp2.close()
def _get_desc(self): for module in self: path = modules.get_module_resource(module.name, 'static/description/index.html') if path: with tools.file_open(path, 'rb') as desc_file: doc = desc_file.read() html = lxml.html.document_fromstring(doc) for element, attribute, link, pos in html.iterlinks(): if element.get('src') and not '//' in element.get('src') and not 'static/' in element.get('src'): element.set('src', "/%s/static/description/%s" % (module.name, element.get('src'))) module.description_html = tools.html_sanitize(lxml.html.tostring(html)) else: overrides = { 'embed_stylesheet': False, 'doctitle_xform': False, 'output_encoding': 'unicode', 'xml_declaration': False, } output = publish_string(source=module.description or '', settings_overrides=overrides, writer=MyWriter()) module.description_html = tools.html_sanitize(output)
def _get_default_favicon(self, original=False): img_path = get_resource_path('web', 'static/src/img/favicon.ico') with tools.file_open(img_path, 'rb') as f: if original: return base64.b64encode(f.read()) # Modify the source image to change the color of the 'O'. # This could seem overkill to modify the pixels 1 by 1, but # Pillow doesn't provide an easy way to do it, and this # is acceptable for a 16x16 image. color = (randrange(32, 224, 24), randrange(32, 224, 24), randrange(32, 224, 24)) original = Image.open(f) new_image = Image.new('RGBA', original.size) for y in range(original.size[1]): for x in range(original.size[0]): pixel = original.getpixel((x, y)) if pixel[0] == 0 and pixel[1] == 0 and pixel[2] == 0: new_image.putpixel((x, y), (0, 0, 0, 0)) else: new_image.putpixel((x, y), (color[0], color[1], color[2], pixel[3])) stream = io.BytesIO() new_image.save(stream, format="ICO") return base64.b64encode(stream.getvalue())
def _create_communication(self, get_pdf, update_hold): update_hold.return_value = True f_path = 'addons/partner_communication_switzerland/static/src/test.pdf' with file_open(f_path) as pdf_file: get_pdf.return_value = pdf_file.read() child = self.create_child(self.ref(11)) sponsorship = self.create_contract( { 'partner_id': self.michel.id, 'group_id': self.sp_group.id, 'child_id': child.id, }, [{'amount': 50.0}] ) self.validate_sponsorship(sponsorship) new_dossier = self.env.ref( 'partner_communication_switzerland.planned_dossier') return self.env['partner.communication.job'].search([ ('partner_id', '=', self.michel.id), ('state', '=', 'pending'), ('config_id', '=', new_dossier.id) ])
def _default_favicon(self): img_path = get_resource_path('web', 'static/src/img/favicon.ico') with tools.file_open(img_path, 'rb') as f: return base64.b64encode(f.read())
def get_value_icon_lazy_load(self): if self.is_lazy_load == False: img_path = get_resource_path('theme_clarico_vega', 'static/src/img/Lazyload.gif') with tools.file_open(img_path, 'rb') as f: self.lazy_load_image = base64.b64encode(f.read())
def execute(self): res = super(RomaniaConfigSettings, self).execute() data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data') # Load SIRUTA datas if field is checked wiz = self[0] if wiz.siruta_update: # First check if module is installed installed = self.env['ir.module.module'].search([ ('name', '=', 'l10n_ro_siruta'), ('state', '=', 'installed') ]) if installed: path = data_dir + '/l10n_ro_siruta/' files = [ 'res.country.zone.csv', 'res.country.state.csv', 'res.country.commune.csv', 'res.country.city.csv' ] for file1 in files: with tools.file_open(path + file1) as fp: tools.convert_csv_import(self._cr, 'l10n_ro_config', file1, fp.read(), {}, mode="init", noupdate=True) account_obj = self.env['account.account'] #------------- # Load Undeductible VAT Configuration installed = self.env['ir.module.module'].search([ ('name', '=', 'l10n_ro_invoice_line_not_deductible'), ('state', '=', 'installed') ]) if installed: tax_names = ('TVA deductibil 5%', 'TVA deductibil 9%', 'TVA deductibil 19%', 'TVA deductibil 20%', 'TVA deductibil 24%') taxes = self.env['account.tax'].search([('company_id', '=', self.company_id.id), ('name', 'in', tax_names)]) cols = [col[0] for col in self.env['account.tax']._columns.items()] if 'not_deductible_tax_id' in cols and taxes: for tax in taxes: if not tax.not_deductible_tax_id: not_deduct_tax = self.env['account.tax'].search([ ('company_id', '=', self.company_id.id), ('name', 'ilike', tax.name.replace('deductibil', 'colectat')) ]) if not_deduct_tax: tax.not_deductible_tax_id = not_deduct_tax[0].id # Load Chart of Asset Category installed = self.env['ir.module.module'].search([ ('name', '=', 'l10n_ro_asset'), ('state', '=', 'installed') ]) if installed: categ_obj = self.env['account.asset.category'] wiz = self[0] if wiz.asset_category_chart_installed: asset_categ = categ_obj.search([ ('name', '=', 'Catalog Mijloace Fixe'), ('company_id', '=', wiz.company_id.id) ]) if not asset_categ: journal_obj = self.env['account.journal'] journal_id = journal_obj.search([('code', '=', 'AMORT'), ('company_id', '=', wiz.company_id.id)]) # Search for Amortization Journal on company, if doesn't # exist create it. if not journal_id: default_account_id = account_obj.search([ ('code', '=', '681100'), ('company_id', '=', wiz.company_id.id) ]) if default_account_id: journal_id = journal_obj.create({ "name": 'Jurnal amortizare', "code": 'AMORT', "type": 'general', "user_id": self.env.user.id, "default_credit_account_id": default_account_id[0].id, "default_debit_account_id": default_account_id[0].id, "company_id": wiz.company_id.id }) journal_id = journal_id[0].id # Search for inventory sequence for fixed asset, if doesn't # exist create it inv_sequence_id = self.env['ir.sequence'].search([ ('name', '=', 'Inventar Mijloace Fixe'), ('company_id', '=', wiz.company_id.id) ]) if not inv_sequence_id: inv_sequence_id = self.env['ir.sequence'].create({ "name": 'Inventar Mijloace Fixe', "padding": 6, "implementation": 'no_gap', "number_next": 1, "number_increment": 1, "prefix": 'INV/', "company_id": wiz.company_id.id }) inv_sequence_id = inv_sequence_id[0].id f = open(os.path.join(data_dir, 'categoriiactive.csv'), 'rb') try: categorii = csv.DictReader(f) # id,parent_id,code,name,type,asset_type,method_number_min,method_number,account_asset_id,account_depreciation_id,account_expense_id,account_income_id,method,method_time,method_period for row in categorii: categ = categ_obj.search([ ('code', '=', row['code']), ('company_id', '=', wiz.company_id.id) ]) if not categ: if row['parent_code']: parent_category_id = categ_obj.search([ ('code', '=', row['parent_code']), ('company_id', '=', wiz.company_id.id) ]) if parent_category_id: parent_category_id = parent_category_id[ 0].id else: parent_category_id = False else: parent_category_id = False if row['type'] == 'normal': account_asset_id = account_obj.search([ ('code', '=', row['account_asset_id']), ('company_id', '=', wiz.company_id.id) ]) account_depreciation_id = account_obj.search( [('code', '=', row['account_depreciation_id']), ('company_id', '=', wiz.company_id.id) ]) account_expense_id = account_obj.search([ ('code', '=', row['account_expense_id']), ('company_id', '=', wiz.company_id.id) ]) account_income_id = account_obj.search([ ('code', '=', row['account_income_id']), ('company_id', '=', wiz.company_id.id) ]) categ = categ_obj.create({ 'parent_id': parent_category_id, 'code': row['code'], 'name': row['name'], 'type': row['type'], 'asset_type': row['asset_type'], 'method_number_min': row['method_number_min'], 'method_number': row['method_number'], 'sequence_id': row['asset_type'] == 'fixed' and inv_sequence_id or False, 'account_asset_id': account_asset_id and account_asset_id[0].id or False, 'account_depreciation_id': account_depreciation_id and account_depreciation_id[0].id or False, 'account_expense_depreciation_id': account_expense_id and account_expense_id[0].id or False, 'account_income_id': account_income_id and account_income_id[0].id or False, 'method': row['method'], 'method_time': row['method_time'], 'method_period': row['method_period'] }) else: categ = categ_obj.create({ 'parent_id': parent_category_id, 'code': row['code'], 'name': row['name'], 'type': row['type'], 'asset_type': row['asset_type'], }) finally: f.close() # Load Bank Statement Operation Templates installed = self.env['ir.module.module'].search([ ('name', '=', 'l10n_ro_account_bank_statement'), ('state', '=', 'installed') ]) if installed: statement_obj = self.env['account.statement.operation.template'] wiz = self[0] if wiz.bank_statement_template_installed: statements = statement_obj.search([('company_id', '=', wiz.company_id.id)]) if not statements: f = open( os.path.join( data_dir, 'account_statement_operation_template.csv'), 'rb') try: operations = csv.DictReader(f) for row in operations: account_id = account_obj.search([ ('code', '=', row['account_id']), ('company_id', '=', wiz.company_id.id) ]) if account_id: statement_obj.create({ 'label': row['label'], 'name': row['name'], 'account_id': account_id[0].id, 'amount_type': row['amount_type'], 'amount': row['amount'], 'company_id': wiz.company_id.id, }) finally: f.close() # Load Account Period Templates installed = self.env['ir.module.module'].search([ ('name', '=', 'l10n_ro_account_period_close'), ('state', '=', 'installed') ]) if installed: closing_obj = self.env['account.period.closing'] wiz = self[0] if wiz.account_period_close_template_installed: closings = closing_obj.search([('company_id', '=', wiz.company_id.id)]) if not closings: f = open( os.path.join(data_dir, 'account_period_close_templates.csv'), 'rb') try: operations = csv.DictReader(f) for row in operations: debit_account_id = account_obj.search([ ('code', '=', row['debit_account_id']), ('company_id', '=', wiz.company_id.id) ]) credit_account_id = account_obj.search([ ('code', '=', row['credit_account_id']), ('company_id', '=', wiz.company_id.id) ]) new_accounts = [] if row['account_ids']: accounts = row['account_ids'].split(",") for account in accounts: comp_account = account_obj.search([ ('code', '=', account), ('company_id', '=', wiz.company_id.id) ]) if comp_account: new_accounts.append(comp_account[0].id) if debit_account_id and credit_account_id: template = closing_obj.create({ 'name': row['name'], 'debit_account_id': debit_account_id[0].id, 'credit_account_id': credit_account_id[0].id, 'type': row['type'], 'account_ids': [(6, 0, new_accounts)], 'company_id': wiz.company_id.id, }) if row['type'] in ('income', 'expense'): template._onchange_type() finally: f.close() return res
def _get_facturae_schema_file(self, move): return tools.file_open( "Facturaev%s.xsd" % move.get_facturae_version(), subdir="addons/l10n_es_facturae/data", )
def test_xml_node(self): """Validates the XML node of the third party complement Validates that the XML node ``<terceros:PorCuentadeTerceros>`` is included, and that its content is generated correctly. This test covers all three possible cases of products sold on behalf of third parties: 1. The product is imported and sold first hand 2. The product is made from other products (parts or components). This also covers the case when one of its parts is imported and sold first hand. 3. The product is a lease """ invoice = self.create_invoice() # Case 1: the product is imported and sold first hand imported_product = self.env.ref('product.product_product_24') imported_product.write({ 'l10n_mx_edi_code_sat_id': self.ref('l10n_mx_edi.prod_code_sat_43201401'), }) line = self.create_invoice_line_to_3rd(invoice, imported_product) line.l10n_mx_edi_customs_number = '15 48 3009 0001234' line.l10n_mx_edi_customs_date = '2015-01-01' line.l10n_mx_edi_customs_name = "Mexico City's customs" # Case 2: the product is made from other products # There's a BoM for the default product, it doesn't need to be created self.create_invoice_line_to_3rd(invoice, self.product) self.product.bom_ids.bom_line_ids[0].write({ 'l10n_mx_edi_customs_number': '15 48 3009 0001234', 'l10n_mx_edi_customs_date': '2015-01-01', 'l10n_mx_edi_customs_name': "Mexico City's customs", }) # Case 3: the product is a lease lease_product = self.env.ref('product.service_cost_01') lease_product.write({ 'name': 'House Lease', 'l10n_mx_edi_code_sat_id': self.ref('l10n_mx_edi.prod_code_sat_80131501'), 'l10n_mx_edi_property_tax': 'CP1234', }) self.create_invoice_line_to_3rd(invoice, lease_product) invoice.action_invoice_open() self.assertEqual(invoice.l10n_mx_edi_pac_status, "signed", invoice.message_ids.mapped('body')) xml = objectify.fromstring(base64.b64decode(invoice.l10n_mx_edi_cfdi)) self.assertEqual(len(xml.Conceptos.Concepto), 4, "There should be exactly four nodes 'Concepto'") # Retrieve nodes <PorCuentadeTerceros> from all concepts node0 = xml.Conceptos.Concepto[0].find( 'cfdi:ComplementoConcepto/terceros:PorCuentadeTerceros', namespaces=self.namespaces) node1 = xml.Conceptos.Concepto[1].find( 'cfdi:ComplementoConcepto/terceros:PorCuentadeTerceros', namespaces=self.namespaces) node2 = xml.Conceptos.Concepto[2].find( 'cfdi:ComplementoConcepto/terceros:PorCuentadeTerceros', namespaces=self.namespaces) node3 = xml.Conceptos.Concepto[3].find( 'cfdi:ComplementoConcepto/terceros:PorCuentadeTerceros', namespaces=self.namespaces) # All but the first node shoulb be present error_msg = ("Node <terceros:PorCuentadeTerceros> should%sbe present " "for concept #%s") self.assertIsNone(node0, error_msg % (' not ', '1')) self.assertIsNotNone(node1, error_msg % (' ', '2')) self.assertIsNotNone(node2, error_msg % (' ', '3')) self.assertIsNotNone(node2, error_msg % (' ', '3')) xmlpath = os.path.join(os.path.dirname(__file__), 'expected_nodes.xml') with tools.file_open(xmlpath, mode='rb') as xmlfile: xml_expected = objectify.fromstring(xmlfile.read()) nodes_expected = xml_expected.findall('terceros:PorCuentadeTerceros', namespaces=self.namespaces) self.assertEqualXML(node1, nodes_expected[0]) self.assertEqualXML(node2, nodes_expected[1]) self.assertEqualXML(node3, nodes_expected[2])
def migrate_module(self, pkg, stage): assert stage in ('pre', 'post', 'end') stageformat = { 'pre': '[>%s]', 'post': '[%s>]', 'end': '[$%s]', } state = pkg.state if stage in ('pre', 'post') else getattr( pkg, 'load_state', None) if not (hasattr(pkg, 'update') or state == 'to upgrade') or state == 'to install': return def convert_version(version): if version.count('.') >= 2: return version # the version number already containt the server version return "%s.%s" % (release.major_version, version) def _get_migration_versions(pkg): versions = list( set(ver for lv in self.migrations[pkg.name].values() for ver, lf in lv.items() if lf)) versions.sort(key=lambda k: parse_version(convert_version(k))) return versions def _get_migration_files(pkg, version, stage): """ return a list of migration script files """ m = self.migrations[pkg.name] lst = [] mapping = { 'module': opj(pkg.name, 'migrations'), 'maintenance': opj('base', 'maintenance', 'migrations', pkg.name), } for x in mapping.keys(): if version in m.get(x): for f in m[x][version]: if not f.startswith(stage + '-'): continue lst.append(opj(mapping[x], version, f)) lst.sort() return lst installed_version = getattr(pkg, 'load_version', pkg.installed_version) or '' parsed_installed_version = parse_version(installed_version) current_version = parse_version(convert_version(pkg.data['version'])) versions = _get_migration_versions(pkg) for version in versions: if parsed_installed_version < parse_version( convert_version(version)) <= current_version: strfmt = { 'addon': pkg.name, 'stage': stage, 'version': stageformat[stage] % version, } for pyfile in _get_migration_files(pkg, version, stage): name, ext = os.path.splitext(os.path.basename(pyfile)) if ext.lower() != '.py': continue mod = fp = fp2 = None try: fp, fname = tools.file_open(pyfile, pathinfo=True) if not isinstance(fp, file): # imp.load_source need a real file object, so we create # one from the file-like object we get from file_open fp2 = os.tmpfile() fp2.write(fp.read()) fp2.seek(0) try: mod = imp.load_source(name, fname, fp2 or fp) _logger.info( 'module %(addon)s: Running migration %(version)s %(name)s' % dict(strfmt, name=mod.__name__)) migrate = mod.migrate except ImportError: _logger.exception( 'module %(addon)s: Unable to load %(stage)s-migration file %(file)s' % dict(strfmt, file=pyfile)) raise except AttributeError: _logger.error( 'module %(addon)s: Each %(stage)s-migration file must have a "migrate(cr, installed_version)" function' % strfmt) else: migrate(self.cr, installed_version) finally: if fp: fp.close() if fp2: fp2.close() if mod: del mod
def create_single_odt(self, cr, uid, ids, data, report_xml, context=None): context = dict(context or {}) context['parents'] = sxw_parents report_type = report_xml.report_type binary_report_content = report_xml.report_sxw_content if isinstance(report_xml.report_sxw_content, unicode): # if binary content was passed as unicode, we must # re-encode it as a 8-bit string using the pass-through # 'latin1' encoding, to restore the original byte values. binary_report_content = report_xml.report_sxw_content.encode("latin1") sxw_io = StringIO.StringIO(binary_report_content) sxw_z = zipfile.ZipFile(sxw_io, mode='r') rml = sxw_z.read('content.xml') meta = sxw_z.read('meta.xml') mime_type = sxw_z.read('mimetype') if mime_type == 'application/vnd.sun.xml.writer': mime_type = 'sxw' else : mime_type = 'odt' sxw_z.close() rml_parser = self.parser(cr, uid, self.name2, context=context) rml_parser.parents = sxw_parents rml_parser.tag = sxw_tag objs = self.getObjects(cr, uid, ids, context) rml_parser.set_context(objs, data, ids, mime_type) rml_dom_meta = node = etree.XML(meta) elements = node.findall(rml_parser.localcontext['name_space']["meta"]+"user-defined") for pe in elements: if pe.get(rml_parser.localcontext['name_space']["meta"]+"name"): if pe.get(rml_parser.localcontext['name_space']["meta"]+"name") == "Info 3": pe[0].text=data['id'] if pe.get(rml_parser.localcontext['name_space']["meta"]+"name") == "Info 4": pe[0].text=data['model'] meta = etree.tostring(rml_dom_meta, encoding='utf-8', xml_declaration=True) rml_dom = etree.XML(rml) elements = [] key1 = rml_parser.localcontext['name_space']["text"]+"p" key2 = rml_parser.localcontext['name_space']["text"]+"drop-down" for n in rml_dom.iterdescendants(): if n.tag == key1: elements.append(n) if mime_type == 'odt': for pe in elements: e = pe.findall(key2) for de in e: pp=de.getparent() if de.text or de.tail: pe.text = de.text or de.tail for cnd in de: if cnd.text or cnd.tail: if pe.text: pe.text += cnd.text or cnd.tail else: pe.text = cnd.text or cnd.tail pp.remove(de) else: for pe in elements: e = pe.findall(key2) for de in e: pp = de.getparent() if de.text or de.tail: pe.text = de.text or de.tail for cnd in de: text = cnd.get("{http://openoffice.org/2000/text}value",False) if text: if pe.text and text.startswith('[['): pe.text += text elif text.startswith('[['): pe.text = text if de.getparent(): pp.remove(de) rml_dom = self.preprocess_rml(rml_dom, mime_type) create_doc = self.generators[mime_type] odt = etree.tostring(create_doc(rml_dom, rml_parser.localcontext), encoding='utf-8', xml_declaration=True) sxw_contents = {'content.xml':odt, 'meta.xml':meta} if report_xml.use_global_header: #Add corporate header/footer rml_file = tools.file_open(os.path.join('base', 'report', 'corporate_%s_header.xml' % report_type)) try: rml = rml_file.read() rml_parser = self.parser(cr, uid, self.name2, context=context) rml_parser.parents = sxw_parents rml_parser.tag = sxw_tag objs = self.getObjects(cr, uid, ids, context) rml_parser.set_context(objs, data, ids, report_xml.report_type) rml_dom = self.preprocess_rml(etree.XML(rml),report_type) create_doc = self.generators[report_type] odt = create_doc(rml_dom,rml_parser.localcontext) if report_xml.use_global_header: rml_parser._add_header(odt) odt = etree.tostring(odt, encoding='utf-8', xml_declaration=True) sxw_contents['styles.xml'] = odt finally: rml_file.close() #created empty zip writing sxw contents to avoid duplication sxw_out = StringIO.StringIO() sxw_out_zip = zipfile.ZipFile(sxw_out, mode='w') sxw_template_zip = zipfile.ZipFile (sxw_io, 'r') for item in sxw_template_zip.infolist(): if item.filename not in sxw_contents: buffer = sxw_template_zip.read(item.filename) sxw_out_zip.writestr(item.filename, buffer) for item_filename, buffer in sxw_contents.iteritems(): sxw_out_zip.writestr(item_filename, buffer) sxw_template_zip.close() sxw_out_zip.close() final_op = sxw_out.getvalue() sxw_io.close() sxw_out.close() return final_op, mime_type
def _get_shape_svg(self, module, *segments): shape_path = get_resource_path(module, 'static', *segments) if not shape_path: raise werkzeug.exceptions.NotFound() with tools.file_open(shape_path, 'r', filter_ext=('.svg', )) as file: return file.read()
def export_icon_to_png( self, icon, color='#000', bg=None, size=100, alpha=255, font='/web/static/src/libs/fontawesome/fonts/fontawesome-webfont.ttf', width=None, height=None): """ This method converts an unicode character to an image (using Font Awesome font by default) and is used only for mass mailing because custom fonts are not supported in mail. :param icon : decimal encoding of unicode character :param color : RGB code of the color :param bg : RGB code of the background color :param size : Pixels in integer :param alpha : transparency of the image from 0 to 255 :param font : font path :param width : Pixels in integer :param height : Pixels in integer :returns PNG image converted from given font """ size = max(width, height, 1) if width else size width = width or size height = height or size # Make sure we have at least size=1 width = max(1, min(width, 512)) height = max(1, min(height, 512)) # Initialize font if font.startswith('/'): font = font[1:] font_obj = ImageFont.truetype(file_open(font, 'rb'), height) # if received character is not a number, keep old behaviour (icon is character) icon = chr(int(icon)) if icon.isdigit() else icon # Background standardization if bg is not None and bg.startswith('rgba'): bg = bg.replace('rgba', 'rgb') bg = ','.join(bg.split(',')[:-1]) + ')' # Determine the dimensions of the icon image = Image.new("RGBA", (width, height), color) draw = ImageDraw.Draw(image) boxw, boxh = draw.textsize(icon, font=font_obj) draw.text((0, 0), icon, font=font_obj) left, top, right, bottom = image.getbbox() # Create an alpha mask imagemask = Image.new("L", (boxw, boxh), 0) drawmask = ImageDraw.Draw(imagemask) drawmask.text((-left, -top), icon, font=font_obj, fill=255) # Create a solid color image and apply the mask if color.startswith('rgba'): color = color.replace('rgba', 'rgb') color = ','.join(color.split(',')[:-1]) + ')' iconimage = Image.new("RGBA", (boxw, boxh), color) iconimage.putalpha(imagemask) # Create output image outimage = Image.new("RGBA", (boxw, height), bg or (0, 0, 0, 0)) outimage.paste(iconimage, (left, top), iconimage) # output image output = io.BytesIO() outimage.save(output, format="PNG") response = werkzeug.wrappers.Response() response.mimetype = 'image/png' response.data = output.getvalue() response.headers['Cache-Control'] = 'public, max-age=604800' response.headers['Access-Control-Allow-Origin'] = '*' response.headers['Access-Control-Allow-Methods'] = 'GET, POST' response.headers['Connection'] = 'close' response.headers['Date'] = time.strftime("%a, %d-%b-%Y %T GMT", time.gmtime()) response.headers['Expires'] = time.strftime( "%a, %d-%b-%Y %T GMT", time.gmtime(time.time() + 604800 * 60)) return response
def test_import_facturx_invoice(self): sample_files = { # BASIC 'ZUGFeRD_1p0_BASIC_Einfach.pdf': { 'invoice_number': '471102', 'amount_untaxed': 198.0, 'amount_total': 235.62, 'date_invoice': '2013-03-05', 'partner_xmlid': 'lieferant', }, # Cannot handle BASIC with allowancecharge != 0 and multi-taxes # 'ZUGFeRD_1p0_BASIC_Rechnungskorrektur.pdf': { # 'type': 'in_refund', # 'invoice_number': 'RK21012345', # 'amount_untaxed': 7.67, # 'amount_total': 8.79, # 'date_invoice': '2013-09-16', # 'partner_xmlid': 'lieferant', # }, # COMFORT 'ZUGFeRD_1p0_COMFORT_Einfach.pdf': { 'invoice_number': '471102', 'amount_untaxed': 473.0, 'amount_total': 529.87, 'date_invoice': '2013-03-05', 'date_due': '2013-04-04', 'partner_xmlid': 'lieferant', }, 'ZUGFeRD_1p0_COMFORT_Einfach.pdf-ZUGFeRD-invoice.xml': { 'invoice_number': '471102', 'amount_untaxed': 473.0, 'amount_total': 529.87, 'date_invoice': '2013-03-05', 'partner_xmlid': 'lieferant', }, 'ZUGFeRD_1p0_COMFORT_Haftpflichtversicherung_' 'Versicherungssteuer.pdf': { 'invoice_number': '01.234.567.8-2014-1', 'amount_untaxed': 50.00, 'amount_total': 59.50, 'date_invoice': '2014-01-24', # stupid sample files: due date is before invoice date ! 'date_due': '2013-12-06', 'partner_xmlid': 'mvm_musterhafter', }, 'ZUGFeRD_1p0_COMFORT_Kraftfahrversicherung_' 'Bruttopreise.pdf': { 'invoice_number': '00.123.456.7-2014-1', 'amount_untaxed': 184.87, 'amount_total': 220.0, 'date_invoice': '2014-03-11', 'date_due': '2014-04-01', 'partner_xmlid': 'mvm_musterhafter', }, # Disabled due to a bug in the XML # Contains Charge + allowance # 'ZUGFeRD_1p0_COMFORT_Rabatte.pdf': { # 'invoice_number': '471102', # 'amount_untaxed': 193.77, # There is a bug in the total amount of the last line # (55.46 ; right value is 20 x 2.7700 = 55.40) # 'amount_total': 215.14, # 'date_invoice': '2013-06-05', # 'partner_xmlid': 'lieferant', # }, # has AllowanceTotalAmount 'ZUGFeRD_1p0_COMFORT_Rechnungskorrektur.pdf': { 'type': 'in_refund', 'invoice_number': 'RK21012345', 'date_invoice': '2013-09-16', 'amount_untaxed': 7.67, 'amount_total': 8.79, 'partner_xmlid': 'lieferant', }, 'ZUGFeRD_1p0_COMFORT_Sachversicherung_berechneter_' 'Steuersatz.pdf': { 'invoice_number': '00.123.456.7-2014-1', 'amount_untaxed': 1000.00, 'amount_total': 1163.40, 'date_invoice': '2014-04-18', 'date_due': '2014-05-21', 'partner_xmlid': 'mvm_musterhafter', }, 'ZUGFeRD_1p0_COMFORT_SEPA_Prenotification.pdf': { 'invoice_number': '471102', 'amount_untaxed': 473.00, 'amount_total': 529.87, 'date_invoice': '2014-03-05', 'date_due': '2014-03-20', 'partner_xmlid': 'lieferant', }, # EXTENDED # has AllowanceTotalAmount # 'ZUGFeRD_1p0_EXTENDED_Kostenrechnung.pdf': { # 'invoice_number': 'KR87654321012', # 'amount_untaxed': 1056.05, # 'amount_total': 1256.70, # 'date_invoice': '2013-10-06', # 'partner_xmlid': 'musterlieferant', # }, # disable for a malformed date "20139102" 'ZUGFeRD_1p0_EXTENDED_Rechnungskorrektur.pdf': { 'type': 'in_refund', 'invoice_number': 'RK21012345', 'amount_untaxed': 7.67, 'amount_total': 8.79, 'date_invoice': '2013-09-16', 'partner_xmlid': 'musterlieferant', }, 'ZUGFeRD_1p0_EXTENDED_Warenrechnung.pdf': { 'invoice_number': 'R87654321012345', 'amount_untaxed': 448.99, 'amount_total': 518.99, 'date_invoice': '2013-08-06', 'partner_xmlid': 'musterlieferant', }, 'Factur-X_MINIMUM-1.xml': { 'invoice_number': 'INV-1242', 'amount_untaxed': 100.00, 'amount_total': 120.00, 'date_invoice': '2017-08-09', 'partner_xmlid': 'test_noline', }, 'Factur-X_BASIC-1.xml': { 'invoice_number': 'FA12421242', 'amount_untaxed': 100.00, 'amount_total': 120.00, 'date_invoice': '2017-08-09', 'partner_xmlid': 'test_line', }, } aio = self.env['account.invoice'] cur_prec = self.env.ref('base.EUR').rounding # We need precision of product price at 4 # in order to import ZUGFeRD_1p0_EXTENDED_Kostenrechnung.pdf price_precision = self.env.ref('product.decimal_price') price_precision.digits = 4 for (inv_file, res_dict) in sample_files.items(): f = file_open( 'account_invoice_import_factur-x/tests/files/' + inv_file, 'rb') pdf_file = f.read() f.close() wiz = self.env['account.invoice.import'].create({ 'invoice_file': base64.b64encode(pdf_file), 'invoice_filename': inv_file, }) wiz.import_invoice() invoices = aio.search([ ('state', '=', 'draft'), ('type', 'in', ('in_invoice', 'in_refund')), ('reference', '=', res_dict['invoice_number']) ]) self.assertEqual(len(invoices), 1) inv = invoices[0] self.assertEqual(inv.type, res_dict.get('type', 'in_invoice')) self.assertEqual(inv.date_invoice, res_dict['date_invoice']) if res_dict.get('date_due'): self.assertEqual(inv.date_due, res_dict['date_due']) self.assertEqual(inv.partner_id, self.env.ref( 'account_invoice_import_factur-x.' + res_dict['partner_xmlid'])) self.assertFalse(float_compare( inv.amount_untaxed, res_dict['amount_untaxed'], precision_rounding=cur_prec)) self.assertFalse(float_compare( inv.amount_total, res_dict['amount_total'], precision_rounding=cur_prec)) # Delete because several sample invoices have the same number invoices.unlink()
def create_single_odt(self, cr, uid, ids, data, report_xml, context=None): context = dict(context or {}) context['parents'] = sxw_parents report_type = report_xml.report_type binary_report_content = report_xml.report_sxw_content if isinstance(report_xml.report_sxw_content, unicode): # if binary content was passed as unicode, we must # re-encode it as a 8-bit string using the pass-through # 'latin1' encoding, to restore the original byte values. binary_report_content = report_xml.report_sxw_content.encode( "latin1") sxw_io = StringIO.StringIO(binary_report_content) sxw_z = zipfile.ZipFile(sxw_io, mode='r') rml = sxw_z.read('content.xml') meta = sxw_z.read('meta.xml') mime_type = sxw_z.read('mimetype') if mime_type == 'application/vnd.sun.xml.writer': mime_type = 'sxw' else: mime_type = 'odt' sxw_z.close() rml_parser = self.parser(cr, uid, self.name2, context=context) rml_parser.parents = sxw_parents rml_parser.tag = sxw_tag objs = self.getObjects(cr, uid, ids, context) rml_parser.set_context(objs, data, ids, mime_type) rml_dom_meta = node = etree.XML(meta) elements = node.findall(rml_parser.localcontext['name_space']["meta"] + "user-defined") for pe in elements: if pe.get(rml_parser.localcontext['name_space']["meta"] + "name"): if pe.get(rml_parser.localcontext['name_space']["meta"] + "name") == "Info 3": pe[0].text = data['id'] if pe.get(rml_parser.localcontext['name_space']["meta"] + "name") == "Info 4": pe[0].text = data['model'] meta = etree.tostring(rml_dom_meta, encoding='utf-8', xml_declaration=True) rml_dom = etree.XML(rml) elements = [] key1 = rml_parser.localcontext['name_space']["text"] + "p" key2 = rml_parser.localcontext['name_space']["text"] + "drop-down" for n in rml_dom.iterdescendants(): if n.tag == key1: elements.append(n) if mime_type == 'odt': for pe in elements: e = pe.findall(key2) for de in e: pp = de.getparent() if de.text or de.tail: pe.text = de.text or de.tail for cnd in de: if cnd.text or cnd.tail: if pe.text: pe.text += cnd.text or cnd.tail else: pe.text = cnd.text or cnd.tail pp.remove(de) else: for pe in elements: e = pe.findall(key2) for de in e: pp = de.getparent() if de.text or de.tail: pe.text = de.text or de.tail for cnd in de: text = cnd.get( "{http://openoffice.org/2000/text}value", False) if text: if pe.text and text.startswith('[['): pe.text += text elif text.startswith('[['): pe.text = text if de.getparent(): pp.remove(de) rml_dom = self.preprocess_rml(rml_dom, mime_type) create_doc = self.generators[mime_type] odt = etree.tostring(create_doc(rml_dom, rml_parser.localcontext), encoding='utf-8', xml_declaration=True) sxw_contents = {'content.xml': odt, 'meta.xml': meta} if report_xml.use_global_header: #Add corporate header/footer rml_file = tools.file_open( os.path.join('base', 'report', 'corporate_%s_header.xml' % report_type)) try: rml = rml_file.read() rml_parser = self.parser(cr, uid, self.name2, context=context) rml_parser.parents = sxw_parents rml_parser.tag = sxw_tag objs = self.getObjects(cr, uid, ids, context) rml_parser.set_context(objs, data, ids, report_xml.report_type) rml_dom = self.preprocess_rml(etree.XML(rml), report_type) create_doc = self.generators[report_type] odt = create_doc(rml_dom, rml_parser.localcontext) if report_xml.use_global_header: rml_parser._add_header(odt) odt = etree.tostring(odt, encoding='utf-8', xml_declaration=True) sxw_contents['styles.xml'] = odt finally: rml_file.close() #created empty zip writing sxw contents to avoid duplication sxw_out = StringIO.StringIO() sxw_out_zip = zipfile.ZipFile(sxw_out, mode='w') sxw_template_zip = zipfile.ZipFile(sxw_io, 'r') for item in sxw_template_zip.infolist(): if item.filename not in sxw_contents: buffer = sxw_template_zip.read(item.filename) sxw_out_zip.writestr(item.filename, buffer) for item_filename, buffer in sxw_contents.iteritems(): sxw_out_zip.writestr(item_filename, buffer) sxw_template_zip.close() sxw_out_zip.close() final_op = sxw_out.getvalue() sxw_io.close() sxw_out.close() return final_op, mime_type
def migrate_module(self, pkg, stage): assert stage in ('pre', 'post', 'end') stageformat = { 'pre': '[>%s]', 'post': '[%s>]', 'end': '[$%s]', } state = pkg.state if stage in ('pre', 'post') else getattr( pkg, 'load_state', None) # In openupgrade, also run migration scripts upon installation. # We want to always pass in pre and post migration files and use a new # argument in the migrate decorator (explained in the docstring) # to decide if we want to do something if a new module is installed # during the migration. if not (hasattr(pkg, 'update') or state in ('to upgrade', 'to install')): return def convert_version(version): if version.count('.') >= 2: return version # the version number already containt the server version return "%s.%s" % (release.major_version, version) def _get_migration_versions(pkg): versions = list( set(ver for lv in self.migrations[pkg.name].values() for ver, lf in lv.items() if lf)) versions.sort(key=lambda k: parse_version(convert_version(k))) return versions def _get_migration_files(pkg, version, stage): """ return a list of migration script files """ m = self.migrations[pkg.name] lst = [] mapping = { 'module': opj(pkg.name, 'migrations'), 'maintenance': opj('base', 'maintenance', 'migrations', pkg.name), } for x in mapping.keys(): if version in m.get(x): for f in m[x][version]: if not f.startswith(stage + '-'): continue lst.append(opj(mapping[x], version, f)) lst.sort() return lst def mergedict(a, b): a = a.copy() a.update(b) return a parsed_installed_version = parse_version( getattr(pkg, 'load_version', pkg.installed_version) or '') current_version = parse_version(convert_version(pkg.data['version'])) versions = _get_migration_versions(pkg) for version in versions: if parsed_installed_version < parse_version( convert_version(version)) <= current_version: strfmt = { 'addon': pkg.name, 'stage': stage, 'version': stageformat[stage] % version, } for pyfile in _get_migration_files(pkg, version, stage): name, ext = os.path.splitext(os.path.basename(pyfile)) if ext.lower() != '.py': continue # OpenUpgrade edit start: # Removed a copy of migration script to temp directory # Replaced call to load_source with load_module so frame isn't lost and breakpoints can be set mod = fp = None try: fp, pathname = tools.file_open(pyfile, pathinfo=True) try: mod = imp.load_module(name, fp, pathname, ('.py', 'r', imp.PY_SOURCE)) _logger.info( 'module %(addon)s: Running migration %(version)s %(name)s', mergedict({'name': mod.__name__}, strfmt)) except ImportError: _logger.exception( 'module %(addon)s: Unable to load %(stage)s-migration file %(file)s', mergedict({'file': pyfile}, strfmt)) raise _logger.info( 'module %(addon)s: Running migration %(version)s %(name)s', mergedict({'name': mod.__name__}, strfmt)) if hasattr(mod, 'migrate'): mod.migrate(self.cr, pkg.installed_version) else: _logger.error( 'module %(addon)s: Each %(stage)s-migration file must have a "migrate(cr, installed_version)" function', strfmt) finally: if fp: fp.close() if mod: del mod
# Currency / Monetary fields float_compare(amount, 12, precision_rounding=currency.rounding) float_is_zero(value, precision_digits=None, precision_rounding=None) Returns true if ``value`` is small enough to be treated as zero at the given precision (smaller than the corresponding *epsilon*) float_round(value, precision_digits=None, precision_rounding=None, rounding_method='HALF-UP') rounding_method = 'UP' ou 'HALF-UP' exemple : float_round(1.3298, precision_digits=precision) # Tools from openerp.tools import file_open f = file_open( 'account_invoice_import_invoice2data/tests/pdf/' 'invoice_free_fiber_201507.pdf', 'rb') pdf_file = f.read() wiz = self.env['account.invoice.import'].create({ 'invoice_file': base64.b64encode(pdf_file), 'invoice_filename': 'invoice_free_fiber_201507.pdf', }) f.close() # fields.Binary # READ # Quand on a un recodset d'un object qui a un fields.Binary: wizard.picture => fichier en base64 # WRITE
def _placeholder(self, image=False): if not image: image = 'web/static/img/placeholder.png' with file_open(image, 'rb', filter_ext=('.png', '.jpg')) as fd: return fd.read()
def _template(self): assert self._template_path return file_open(self._template_path, 'r').read()
def _get_test_file_content(cls, filename): """ Get the content of a test file inside this module """ path = 'l10n_it_edi_sdicoop/tests/expected_xmls/' + filename with tools.file_open(path, mode='rb') as test_file: return test_file.read()
def load_information_from_description_file(module, mod_path=None): """ :param module: The name of the module (sale, purchase, ...) :param mod_path: Physical path of module, if not providedThe name of the module (sale, purchase, ...) """ if not mod_path: mod_path = get_module_path(module, downloaded=True) manifest_file = module_manifest(mod_path) if manifest_file: # default values for descriptor info = { 'application': False, 'author': 'Odoo S.A.', 'auto_install': False, 'category': 'Uncategorized', 'depends': [], 'description': '', 'icon': get_module_icon(module), 'installable': True, 'license': 'LGPL-3', 'post_load': None, 'version': '1.0', 'web': False, 'sequence': 100, 'summary': '', 'website': '', } info.update( zip('depends data demo test init_xml update_xml demo_xml'.split(), iter(list, None))) f = tools.file_open(manifest_file, mode='rb') try: info.update(ast.literal_eval(pycompat.to_text(f.read()))) finally: f.close() if not info.get('description'): readme_path = [ opj(mod_path, x) for x in README if os.path.isfile(opj(mod_path, x)) ] if readme_path: readme_text = tools.file_open(readme_path[0]).read() info['description'] = readme_text # auto_install is set to `False` if disabled, and a set of # auto_install dependencies otherwise. That way, we can set # auto_install: [] to always auto_install a module regardless of its # dependencies auto_install = info.get('auto_install', info.get('active', False)) if isinstance(auto_install, collections.Iterable): info['auto_install'] = set(auto_install) non_dependencies = info['auto_install'].difference(info['depends']) assert not non_dependencies,\ "auto_install triggers must be dependencies, found " \ "non-dependencies [%s] for module %s" % ( ', '.join(non_dependencies), module ) elif auto_install: info['auto_install'] = set(info['depends']) else: info['auto_install'] = False info['version'] = adapt_version(info['version']) return info _logger.debug('module %s: no manifest file found %s', module, MANIFEST_NAMES) return {}
def migrate_module(self, pkg, stage): assert stage in ('pre', 'post', 'end') stageformat = { 'pre': '[>%s]', 'post': '[%s>]', 'end': '[$%s]', } state = pkg.state if stage in ('pre', 'post') else getattr(pkg, 'load_state', None) if not (hasattr(pkg, 'update') or state == 'to upgrade') or state == 'to install': return def convert_version(version): if version.count('.') >= 2: return version # the version number already containt the server version return "%s.%s" % (release.major_version, version) def _get_migration_versions(pkg): versions = sorted({ ver for lv in pycompat.values(self.migrations[pkg.name]) for ver, lf in pycompat.items(lv) if lf }, key=lambda k: parse_version(convert_version(k))) return versions def _get_migration_files(pkg, version, stage): """ return a list of migration script files """ m = self.migrations[pkg.name] lst = [] mapping = { 'module': opj(pkg.name, 'migrations'), 'maintenance': opj('base', 'maintenance', 'migrations', pkg.name), } for x in mapping: if version in m.get(x): for f in m[x][version]: if not f.startswith(stage + '-'): continue lst.append(opj(mapping[x], version, f)) lst.sort() return lst parsed_installed_version = parse_version(getattr(pkg, 'load_version', pkg.installed_version) or '') current_version = parse_version(convert_version(pkg.data['version'])) versions = _get_migration_versions(pkg) for version in versions: if parsed_installed_version < parse_version(convert_version(version)) <= current_version: strfmt = {'addon': pkg.name, 'stage': stage, 'version': stageformat[stage] % version, } for pyfile in _get_migration_files(pkg, version, stage): name, ext = os.path.splitext(os.path.basename(pyfile)) if ext.lower() != '.py': continue mod = fp = fp2 = None try: fp, fname = tools.file_open(pyfile, pathinfo=True) # FIXME: imp.load_source removed in P3, and so is the ``file`` object... if not isinstance(fp, file):# pylint: disable=file-builtin # imp.load_source need a real file object, so we create # one from the file-like object we get from file_open fp2 = os.tmpfile() fp2.write(fp.read()) fp2.seek(0) try: mod = imp.load_source(name, fname, fp2 or fp) _logger.info('module %(addon)s: Running migration %(version)s %(name)s' % dict(strfmt, name=mod.__name__)) migrate = mod.migrate except ImportError: _logger.exception('module %(addon)s: Unable to load %(stage)s-migration file %(file)s' % dict(strfmt, file=pyfile)) raise except AttributeError: _logger.error('module %(addon)s: Each %(stage)s-migration file must have a "migrate(cr, installed_version)" function' % strfmt) else: migrate(self.cr, pkg.installed_version) finally: if fp: fp.close() if fp2: fp2.close() if mod: del mod
def shape(self, module, filename, **kwargs): """ Returns a color-customized svg (background shape or illustration). """ svg = None if module == 'illustration': attachment = request.env['ir.attachment'].sudo().search( [('url', '=like', request.httprequest.path), ('public', '=', True)], limit=1) if not attachment: raise werkzeug.exceptions.NotFound() svg = b64decode(attachment.datas).decode('utf-8') else: shape_path = get_resource_path(module, 'static', 'shapes', filename) if not shape_path: raise werkzeug.exceptions.NotFound() with tools.file_open(shape_path, 'r') as file: svg = file.read() user_colors = [] for key, value in kwargs.items(): colorMatch = re.match('^c([1-5])$', key) if colorMatch: # Check that color is hex or rgb(a) to prevent arbitrary injection if not re.match( r'(?i)^#[0-9A-F]{6,8}$|^rgba?\(\d{1,3},\d{1,3},\d{1,3}(?:,[0-9.]{1,4})?\)$', value.replace(' ', '')): raise werkzeug.exceptions.BadRequest() user_colors.append( [tools.html_escape(value), colorMatch.group(1)]) elif key == 'flip': if value == 'x': svg = svg.replace('<svg ', '<svg style="transform: scaleX(-1);" ') elif value == 'y': svg = svg.replace('<svg ', '<svg style="transform: scaleY(-1)" ') elif value == 'xy': svg = svg.replace('<svg ', '<svg style="transform: scale(-1)" ') default_palette = { '1': '#3AADAA', '2': '#7C6576', '3': '#F6F6F6', '4': '#FFFFFF', '5': '#383E45', } color_mapping = { default_palette[palette_number]: color for color, palette_number in user_colors } # create a case-insensitive regex to match all the colors to replace, eg: '(?i)(#3AADAA)|(#7C6576)' regex = '(?i)%s' % '|'.join('(%s)' % color for color in color_mapping.keys()) def subber(match): key = match.group().upper() return color_mapping[key] if key in color_mapping else key svg = re.sub(regex, subber, svg) return request.make_response(svg, [ ('Content-type', 'image/svg+xml'), ('Cache-control', 'max-age=%s' % http.STATIC_CACHE_LONG), ])
def load_information_from_description_file(module, mod_path=None): """ :param module: The name of the module (sale, purchase, ...) :param mod_path: Physical path of module, if not providedThe name of the module (sale, purchase, ...) """ if not mod_path: mod_path = get_module_path(module, downloaded=True) manifest_file = module_manifest(mod_path) if manifest_file: # default values for descriptor info = { 'application': False, 'author': 'Odoo S.A.', 'auto_install': False, 'category': 'Uncategorized', 'depends': [], 'description': '', 'icon': get_module_icon(module), 'installable': True, 'license': 'LGPL-3', 'post_load': None, 'version': '1.0', 'web': False, 'sequence': 100, 'summary': '', 'website': '', } info.update(zip( 'depends data demo test init_xml update_xml demo_xml'.split(), iter(list, None))) f = tools.file_open(manifest_file, mode='rb') try: info.update(ast.literal_eval(pycompat.to_text(f.read()))) finally: f.close() if not info.get('description'): readme_path = [opj(mod_path, x) for x in README if os.path.isfile(opj(mod_path, x))] if readme_path: readme_text = tools.file_open(readme_path[0]).read() info['description'] = readme_text # auto_install is set to `False` if disabled, and a set of # auto_install dependencies otherwise. That way, we can set # auto_install: [] to always auto_install a module regardless of its # dependencies auto_install = info.get('auto_install', info.get('active', False)) if isinstance(auto_install, collections.Iterable): info['auto_install'] = set(auto_install) non_dependencies = info['auto_install'].difference(info['depends']) assert not non_dependencies,\ "auto_install triggers must be dependencies, found " \ "non-dependencies [%s] for module %s" % ( ', '.join(non_dependencies), module ) elif auto_install: info['auto_install'] = set(info['depends']) else: info['auto_install'] = False info['version'] = adapt_version(info['version']) return info _logger.debug('module %s: no manifest file found %s', module, MANIFEST_NAMES) return {}
def action_stock_input_medicament_report(self): custom_value = {} #XLS report #filename = ('Report'+ '.xlsx') filename = ('/opt/odoo/Report'+ '.xlsx') workbook = xlsxwriter.Workbook(filename) sheet = workbook.add_worksheet() # FORMATOS DE CELDAS style_title_1 = workbook.add_format({'font_name':'Arial' , 'font_size':'12' , 'bold': True , 'align': 'center'}) style_num = workbook.add_format({'num_format': '#,##0' , 'font_name':'Arial' , 'font_size':'12'}) style_title = workbook.add_format({'font_name':'Arial' , 'font_size':'12' , 'bold': True}) style_text = workbook.add_format({'font_name':'Arial' , 'font_size':'12'}) style_money = workbook.add_format({'num_format': '[$$-240A]#,##0.00' , 'font_name':'Arial' , 'font_size':'12'}) style_date = workbook.add_format({'num_format': 'mm/dd/yyyy' , 'font_name':'Arial' , 'font_size':'12'}) style_date_1 = workbook.add_format({'num_format': 'mmm - yyyy' , 'font_name':'Arial' , 'font_size':'12'}) picking = (self.env['stock.picking'] .browse(self._context.get('active_ids', list()))) row_num = 0 for rec in picking: inputs = [] for sm in rec.move_lines: for sml in sm._get_move_lines(): for line in sml: product = {} if line.product_id.default_code: product ['product_code'] = line.product_id.default_code else: product ['product_code'] = '' if line.product_id.name: product ['product_id'] = line.product_id.name else: product ['product_id'] = '' if line.product_id.is_medicament and line.product_id.product_tmpl_id.commercial_presentation: product ['product_presentation'] = line.product_id.product_tmpl_id.commercial_presentation else: product ['product_presentation'] = '' if line.product_id.is_medicament and line.product_id.product_tmpl_id.concentration: product ['product_concent'] = line.product_id.product_tmpl_id.concentration else: product ['product_concent'] = '' # grname = "" # for group in line.product_id.product_tmpl_id.med_group_id: # grname += group.name + ', ' # if line.product_id.is_medicament: # product ['product_group'] = grname # else: # product ['product_group'] = '' if line.product_id.is_medicament and line.product_id.product_tmpl_id.med_group_id: if line.product_id.product_tmpl_id.med_group_id.parent_id.name: product ['product_group'] = line.product_id.product_tmpl_id.med_group_id.parent_id.name else: product ['product_group'] = line.product_id.product_tmpl_id.med_group_id.name else: product ['product_group'] = '' if line.product_id.is_medicament and line.product_id.product_tmpl_id.storage: if line.product_id.product_tmpl_id.storage == 'norm': product ['product_storage'] = 'Normal de 15ºC a 30ºC. Humedad entre 25% a 70%' elif line.product_id.product_tmpl_id.storage == 'cold': product ['product_storage'] = 'Cadena de frío de 2ºC a 8ºC' else: product ['product_storage'] = '' else: product ['product_storage'] = '' if line.product_id.is_medicament and line.product_id.product_tmpl_id.individual_presentation: product ['product_prest_ind'] = line.product_id.product_tmpl_id.individual_presentation else: product ['product_prest_ind'] = '' product ['product_qty'] = line.qty_done # sm.product_qty product ['product_price'] = self._get_unit_price(line.product_id, rec) #sm.price_unit if line.product_id.is_medicament and sml.lot_id.name: product ['prod_lot'] = sml.lot_id.name else: product ['prod_lot'] = '' if line.product_id.is_medicament and sml.lot_id.manufacturing_date: product ['prod_manufacturing_date'] = sml.lot_id.manufacturing_date else: product ['prod_manufacturing_date'] = '' if line.product_id.is_medicament and sml.lot_id.cums_consecutive: product ['prod_cums_cons'] = sml.lot_id.cums_consecutive else: product ['prod_cums_cons'] = '' if line.product_id.is_medicament and sml.lot_id.cums: product ['prod_cums_cod'] = sml.lot_id.cums else: product ['prod_cums_cod'] = '' if line.product_id.is_medicament and sml.lot_id.laboratory: product ['prod_laboratory'] = sml.lot_id.laboratory else: product ['prod_laboratory'] = '' if line.product_id.is_medicament and sml.lot_id.register_invima: product ['prod_invima'] = sml.lot_id.register_invima else: product ['prod_invima'] = '' if line.product_id.is_medicament and sml.lot_id.life_date: product ['prod_date_end_life'] = sml.lot_id.life_date else: product ['prod_date_end_life'] = '' inputs.append(product) custom_value ['products'] = inputs custom_value ['partner_id'] = sm.picking_id.partner_id.name custom_value ['date_done'] = sm.picking_id.date_done custom_value ['partner_no'] = sm.picking_id.name custom_value ['order'] = sm.origin custom_value ['reference'] = sm.picking_id.name if sm.picking_id.vendor_remission_date: custom_value ['vender_invo_date'] = sm.picking_id.vendor_remission_date else: custom_value ['vender_invo_date'] = '' if sm.picking_id.vendor_remission_number: custom_value ['vender_invo_num'] = sm.picking_id.vendor_remission_number else: custom_value ['vender_invo_num'] = '' # if picking.picking_type_id.warehouse_id.company_id.logo: # custom_value ['logo'] = ( # io.BytesIO(base64.b64decode(picking.picking_type_id.warehouse_id.company_id.logo)) # ) # else: # custom_value ['logo'] = '' path = get_module_resource('stock_report_input_medicament', 'src/img/escudo-de-colombia.png') if path: with tools.file_open(path, 'rb') as image_file: custom_value ['logo'] = ( io.BytesIO((image_file.read())) ) # path = './src/img/escudo-de-colombia.png' # # custom_value ['logo'] = path if picking.picking_type_id.warehouse_id.partner_id.city: custom_value ['address'] = picking.picking_type_id.warehouse_id.partner_id.city else: custom_value ['address'] = '' # OBTENER SECUENCIA seq = self._get_sequence( (rec.location_id.get_warehouse().id) ) # ENCABEZADO DEL REPORTE img_tmp = custom_value ['logo'] title = ('FORMATO DE RECEPCION TECNICA SF HOSPITAL NIVEL I ' + custom_value ['address'] ).upper() sheet.insert_image(row_num, 9 , 'logo.png' , {'image_data': img_tmp , 'x_scale': 0.2 , 'y_scale': 0.2} ) sheet.merge_range(row_num + 5, 5 , row_num + 5, 14 , title , style_title_1 ) sheet.merge_range(row_num + 7, 0 , row_num + 7, 4 , 'Acta de Recepción Técnica de medicamentos' , style_title) sheet.write(row_num + 7, 6, 'No', style_title) sheet.write(row_num + 7, 7, seq, style_title) sheet.merge_range(row_num + 7, 12 , row_num + 7, 13 , 'Fecha de generación' , style_title) sheet.write(row_num + 7, 14, (fields.Datetime.now()).strftime('%d/%m/%Y'), style_date) sheet.merge_range(row_num + 8, 12 , row_num + 8, 13 , 'Referencia' , style_title) sheet.write(row_num + 8, 14, custom_value ['reference'] , style_text) sheet.write(row_num + 8, 0, 'Proveedor', style_title) sheet.write(row_num + 8, 1, custom_value['partner_id'], style_text) sheet.write(row_num + 8, 6, 'Fecha', style_title) sheet.write(row_num + 8, 7, custom_value['date_done'], style_date) sheet.write(row_num + 10, 0, 'Fecha de llegada', style_title) sheet.write(row_num + 10, 1, custom_value['date_done'], style_date) sheet.write(row_num + 10, 6, 'Fecha de inspección', style_title) sheet.write(row_num + 10, 12, 'No Cajas', style_title) # ENCABEZADOS DE COLUMNAS sheet.write(row_num + 12, 0, 'Código Interno', style_title) sheet.write(row_num + 12, 1, 'Nombre del medicamento', style_title) sheet.write(row_num + 12, 2, 'Presentación comercial', style_title) sheet.write(row_num + 12, 3, 'Valor unitario ', style_title) sheet.write(row_num + 12, 4, 'Proveedor', style_title) sheet.write(row_num + 12, 5, 'Número de factura de Proveedor' , style_title) sheet.write(row_num + 12, 6, 'Fecha de la factura de Proveedor' , style_title) sheet.write(row_num + 12, 7, 'Cantidad', style_title) sheet.write(row_num + 12, 8 , 'Fecha de ingreso a farmacia', style_title) sheet.write(row_num + 12, 9, 'Laboratorio', style_title) sheet.write(row_num + 12, 10, 'Presentación', style_title) # presentación individual sheet.write(row_num + 12, 11, 'Concentración', style_title) sheet.write(row_num + 12, 12, 'Lote', style_title) sheet.write(row_num + 12, 13, 'Registro Invima', style_title) sheet.write(row_num + 12, 14, 'Fecha de Fabricación', style_title) sheet.write(row_num + 12, 15, 'CUMS', style_title) sheet.write(row_num + 12, 16, 'Consecutivo CUMS', style_title) sheet.write(row_num + 12, 17, 'Grupo', style_title) sheet.write(row_num + 12, 18, 'Almacenaje', style_title) sheet.write(row_num + 12, 19, 'Fecha de vencimiento', style_title) sheet.write(row_num + 12, 20, 'Semáforo', style_title) sheet.write(row_num + 12, 21, 'Se acepta', style_title) # DATOS n = row_num + 13 if rec.state == 'done': for product in custom_value ['products']: sheet.write(n, 0, product ['product_code'], style_text) sheet.write(n, 1, product ['product_id'], style_text) sheet.write(n, 2, product ['product_presentation'], style_text) sheet.write(n, 3, product ['product_price'] , style_money) sheet.write(n, 7, product ['product_qty'], style_num) sheet.write(n, 9, product ['prod_laboratory'], style_text) sheet.write(n, 10, product ['product_prest_ind'], style_text) sheet.write(n, 11, product ['product_concent'], style_text) sheet.write(n, 12, product ['prod_lot'], style_text) sheet.write(n, 13, product ['prod_invima'], style_text) sheet.write(n, 14, product ['prod_manufacturing_date'], style_date_1) sheet.write(n, 15, product ['prod_cums_cod'], style_text) sheet.write(n, 16, product ['prod_cums_cons'], style_text) sheet.write(n, 17, product ['product_group'], style_text) sheet.write(n, 18, product ['product_storage'], style_text) sheet.write(n, 19, product ['prod_date_end_life'], style_date_1) sheet.write(n, 4, custom_value['partner_id'], style_text) sheet.write(n, 5, custom_value ['vender_invo_num'], style_text) sheet.write(n, 6, custom_value ['vender_invo_date'], style_date) sheet.write(n, 8, custom_value ['date_done'], style_date) n += 1 # OBSERVACION sheet.write(n + 2, 0, 'Observaciones ', style_title) note = '' if rec.note: note = rec.note else: note = '' sheet.write(n + 2, 1, note, style_title) # FIRMAS sheet.write(n + 5, 0, 'Responsable de la Recepción Técnica ', style_title) sheet.write(n + 6, 5, 'Firma ', style_title) sheet.write(n + 6, 9, 'Verificado por ', style_title) row_num = n + 1 workbook.close() fp = open(filename, "rb") file_data = fp.read() out = base64.encodestring(file_data) mes = ((fields.Datetime.now()).strftime('%B %Y')).upper() # Files actions attach_vals = { 'report_data': 'ACTA DE RECEPCION DE MEDICAMENTOS '+ mes +'.xlsx', 'file_name': out, } act_id = self.env['report.stock.input.medicament'].create(attach_vals) fp.close() return { 'type': 'ir.actions.act_window', 'res_model': 'report.stock.input.medicament', 'res_id': act_id.id, 'view_type': 'form', 'view_mode': 'form', 'context': self.env.context, 'target': 'new', }
def get_cadena(cfdi_node, template): if cfdi_node is None: return None cadena_root = etree.parse(tools.file_open(template)) return str(etree.XSLT(cadena_root)(cfdi_node))
def test_import_free_invoice(self): filename = "invoice_free_fiber_201507.pdf" f = file_open( "account_invoice_import_invoice2data/tests/pdf/" + filename, "rb") pdf_file = f.read() pdf_file_b64 = base64.b64encode(pdf_file) wiz = self.env["account.invoice.import"].create({ "invoice_file": pdf_file_b64, "invoice_filename": filename, }) f.close() wiz.import_invoice() # Check result of invoice creation invoices = self.env["account.move"].search([ ("state", "=", "draft"), ("move_type", "=", "in_invoice"), ("ref", "=", "562044387"), ]) self.assertEqual(len(invoices), 1) inv = invoices[0] self.assertEqual(inv.move_type, "in_invoice") self.assertEqual(fields.Date.to_string(inv.invoice_date), "2015-07-02") self.assertEqual( inv.partner_id, self.env.ref("account_invoice_import_invoice2data.free")) self.assertEqual(inv.journal_id.type, "purchase") self.assertEqual( float_compare(inv.amount_total, 29.99, precision_digits=2), 0) self.assertEqual( float_compare(inv.amount_untaxed, 24.99, precision_digits=2), 0) self.assertEqual(len(inv.invoice_line_ids), 1) iline = inv.invoice_line_ids[0] self.assertEqual(iline.name, "Fiber optic access at the main office") self.assertEqual( iline.product_id, self.env.ref( "account_invoice_import_invoice2data.internet_access"), ) self.assertEqual( float_compare(iline.quantity, 1.0, precision_digits=0), 0) self.assertEqual( float_compare(iline.price_unit, 24.99, precision_digits=2), 0) # Prepare data for next test i.e. invoice update # (we re-use the invoice created by the first import !) inv.write({ "invoice_date": False, "ref": False, }) # New import with update of an existing draft invoice wiz2 = self.env["account.invoice.import"].create({ "invoice_file": pdf_file_b64, "invoice_filename": "invoice_free_fiber_201507.pdf", }) action = wiz2.import_invoice() self.assertEqual(action["res_model"], "account.invoice.import") # Choose to update the existing invoice wiz2.update_invoice() invoices = self.env["account.move"].search([ ("state", "=", "draft"), ("move_type", "=", "in_invoice"), ("ref", "=", "562044387"), ]) self.assertEqual(len(invoices), 1) inv = invoices[0] self.assertEqual(fields.Date.to_string(inv.invoice_date), "2015-07-02")
def _get_facturae_schema_file(self): return tools.file_open("Facturaev3_2.xsd", subdir="addons/l10n_es_facturae/data")
def _avatar_get_placeholder(self): path = "base/static/img/avatar_grey.png" if self.is_company: path = "base/static/img/company_image.png" return base64.b64encode(tools.file_open(path, 'rb').read())
def parse_statechart_file(filename): with tools.file_open(filename, 'r') as f: return parse_statechart(f)
def migrate_module(self, pkg, stage): assert stage in ('pre', 'post') stageformat = { 'pre': '[>%s]', 'post': '[%s>]', } if not (hasattr(pkg, 'update') or pkg.state == 'to upgrade') or pkg.state == 'to install': return def convert_version(version): if version.count('.') >= 2: return version # the version number already containt the server version return "%s.%s" % (release.major_version, version) def _get_migration_versions(pkg): def __get_dir(tree): return [d for d in tree if tree[d] is not None] versions = list(set( __get_dir(self.migrations[pkg.name]['module']) + __get_dir(self.migrations[pkg.name]['maintenance']) )) versions.sort(key=lambda k: parse_version(convert_version(k))) return versions def _get_migration_files(pkg, version, stage): """ return a list of tuple (module, file) """ m = self.migrations[pkg.name] lst = [] mapping = { 'module': opj(pkg.name, 'migrations'), 'maintenance': opj('base', 'maintenance', 'migrations', pkg.name), } for x in mapping.keys(): if version in m[x]: for f in m[x][version]: if m[x][version][f] is not None: continue if not f.startswith(stage + '-'): continue lst.append(opj(mapping[x], version, f)) lst.sort() return lst parsed_installed_version = parse_version(pkg.installed_version or '') current_version = parse_version(convert_version(pkg.data['version'])) versions = _get_migration_versions(pkg) for version in versions: if parsed_installed_version < parse_version(convert_version(version)) <= current_version: strfmt = {'addon': pkg.name, 'stage': stage, 'version': stageformat[stage] % version, } for pyfile in _get_migration_files(pkg, version, stage): name, ext = os.path.splitext(os.path.basename(pyfile)) if ext.lower() != '.py': continue mod = fp = fp2 = None try: fp, fname = tools.file_open(pyfile, pathinfo=True) if not isinstance(fp, file): # imp.load_source need a real file object, so we create # one from the file-like object we get from file_open fp2 = os.tmpfile() fp2.write(fp.read()) fp2.seek(0) try: mod = imp.load_source(name, fname, fp2 or fp) _logger.info('module %(addon)s: Running migration %(version)s %(name)s' % dict(strfmt, name=mod.__name__)) migrate = mod.migrate except ImportError: _logger.exception('module %(addon)s: Unable to load %(stage)s-migration file %(file)s' % dict(strfmt, file=pyfile)) raise except AttributeError: _logger.error('module %(addon)s: Each %(stage)s-migration file must have a "migrate(cr, installed_version)" function' % strfmt) else: migrate(self.cr, pkg.installed_version) finally: if fp: fp.close() if fp2: fp2.close() if mod: del mod
def load_templates(self, **kwargs): templates = [ 'im_livechat/static/src/legacy/public_livechat.xml', 'im_livechat/static/src/legacy/public_livechat_chatbot.xml', ] return [tools.file_open(tmpl, 'rb').read() for tmpl in templates]
def _get_aeroo_template_from_file(self): """Get an aeroo template from a file.""" with file_open(self.aeroo_template_path, 'rb') as file: return file.read()
def test_import_fe_cr_invoice(self): sample_files = { 'UBLKetentest_Referentiefactuur_20150100.xml': { 'invoice_number': '20150101', 'amount_untaxed': 420.0, 'amount_total': 475.20, 'date_invoice': '2015-02-16', 'due_date': '2015-02-16', 'partner_xmlid': 'account_invoice_import_fe_cr.ketentest', }, 'efff_BE0505890632_160421_Inv_16117778.xml': { 'invoice_number': '16117778', 'origin': '59137222', 'amount_untaxed': 31.00, 'amount_total': 37.51, 'date_invoice': '2016-04-21', 'due_date': '2016-04-28', 'partner_xmlid': 'account_invoice_import_fe_cr.exact_belgium', }, 'UBLInvoice-multitankcard-line_adjust.xml': { 'invoice_number': '6311117', 'amount_untaxed': 75.01, 'amount_total': 90.77, 'date_invoice': '2017-03-07', 'partner_xmlid': 'account_invoice_import_fe_cr.multi_tank', }, } aio = self.env['account.invoice'] aiio = self.env['account.invoice.import'] precision = self.env['decimal.precision'].precision_get('Account') for (sample_file, res_dict) in sample_files.iteritems(): f = file_open( 'account_invoice_import_fe_cr/tests/files/' + sample_file, 'rb') pdf_file = f.read() f.close() wiz = aiio.create({ 'invoice_file': base64.b64encode(pdf_file), 'invoice_filename': sample_file, }) wiz.import_invoice() invoices = aio.search([('state', '=', 'draft'), ('type', 'in', ('in_invoice', 'in_refund')), ('reference', '=', res_dict['invoice_number'])]) self.assertEquals(len(invoices), 1) inv = invoices[0] self.assertEquals(inv.type, res_dict.get('type', 'in_invoice')) self.assertEquals(inv.date_invoice, res_dict['date_invoice']) if res_dict.get('origin'): self.assertEquals(inv.origin, res_dict['origin']) if res_dict.get('date_due'): self.assertEquals(inv.date_due, res_dict['date_due']) self.assertEquals(inv.partner_id, self.env.ref(res_dict['partner_xmlid'])) self.assertEquals( float_compare(inv.amount_untaxed, res_dict['amount_untaxed'], precision_digits=precision), 0) self.assertEquals( float_compare(inv.amount_total, res_dict['amount_total'], precision_digits=precision), 0) invoices.unlink()
def get_xaf(self, options): def cust_sup_tp(partner_id): so_count = partner_id.sale_order_count po_count = partner_id.purchase_order_count if so_count and po_count: return 'B' if so_count: return 'C' if po_count: return 'S' return 'O' def acc_tp(account_id): if account_id.user_type_id.type in ['income', 'expense']: return 'P' if account_id.user_type_id.type in ['asset', 'liability']: return 'B' return 'M' def jrn_tp(journal_id): if journal_id.type == 'bank': return 'B' if journal_id.type == 'cash': return 'C' if journal_id.type == 'situation': return 'O' if journal_id.type in ['sale', 'sale_refund']: return 'S' if journal_id.type in ['purchase', 'purchase_refund']: return 'P' return 'Z' def amnt_tp(move_line_id): return 'C' if move_line_id.credit else 'D' def compute_period_number(date_str): date = fields.Date.from_string(date_str) return date.strftime('%y%m')[1:] def change_date_time(record): return record.write_date.strftime('%Y-%m-%dT%H:%M:%S') company_id = self.env.company msgs = [] if not company_id.vat: msgs.append(_('- VAT number')) if not company_id.country_id: msgs.append(_('- Country')) if msgs: msgs = [_('Some fields must be specified on the company:')] + msgs raise UserError('\n'.join(msgs)) date_from = options['date']['date_from'] date_to = options['date']['date_to'] partner_ids = self.env['res.partner'].search([ '|', ('company_id', '=', False), ('company_id', '=', company_id.id) ]) account_ids = self.env['account.account'].search([('company_id', '=', company_id.id)]) tax_ids = self.env['account.tax'].search([('company_id', '=', company_id.id)]) journal_ids = self.env['account.journal'].search([('company_id', '=', company_id.id)]) # Retrieve periods values periods = [] Period = namedtuple('Period', 'number name date_from date_to') for period in rrule(freq=MONTHLY, bymonth=(), dtstart=fields.Date.from_string(date_from), until=fields.Date.from_string(date_to)): period_from = fields.Date.to_string(period.date()) period_to = period.replace( day=calendar.monthrange(period.year, period.month)[1]) period_to = fields.Date.to_string(period_to.date()) periods.append( Period(number=compute_period_number(period_from), name=period.strftime('%B') + ' ' + date_from[0:4], date_from=period_from, date_to=period_to)) # Retrieve move lines values total_query = """ SELECT COUNT(*), SUM(l.debit), SUM(l.credit) FROM account_move_line l, account_move m WHERE l.move_id = m.id AND l.date >= %s AND l.date <= %s AND l.company_id = %s AND m.state != 'draft' """ self.env.cr.execute(total_query, ( date_from, date_to, company_id.id, )) moves_count, moves_debit, moves_credit = self.env.cr.fetchall()[0] journal_x_moves = {} for journal in journal_ids: journal_x_moves[journal] = self.env['account.move'].search([ ('date', '>=', date_from), ('date', '<=', date_to), ('state', '!=', 'draft'), ('journal_id', '=', journal.id) ]) values = { 'company_id': company_id, 'partner_ids': partner_ids, 'account_ids': account_ids, 'journal_ids': journal_ids, 'journal_x_moves': journal_x_moves, 'compute_period_number': compute_period_number, 'periods': periods, 'tax_ids': tax_ids, 'cust_sup_tp': cust_sup_tp, 'acc_tp': acc_tp, 'jrn_tp': jrn_tp, 'amnt_tp': amnt_tp, 'change_date_time': change_date_time, 'fiscal_year': date_from[0:4], 'date_from': date_from, 'date_to': date_to, 'date_created': fields.Date.context_today(self), 'software_version': release.version, 'moves_count': moves_count, 'moves_debit': moves_debit or 0.0, 'moves_credit': moves_credit or 0.0, } audit_content = self.env['ir.qweb'].render( 'l10n_nl_reports.xaf_audit_file', values) with tools.file_open('l10n_nl_reports/data/xml_audit_file_3_2.xsd', 'rb') as xsd: _check_with_xsd(audit_content, xsd) return audit_content
def _avatar_get_placeholder(self): return file_open(self._avatar_get_placeholder_path(), 'rb').read()
def _default_logo(self): image_path = get_resource_path('website', 'static/src/img', 'website_logo.png') with tools.file_open(image_path, 'rb') as f: return base64.b64encode(f.read())
def test_ubl_order_import(self): tests = { 'UBL-Order-2.1-Example.xml': { 'client_order_ref': '34', 'date_order': '2010-01-20', 'partner': self.env.ref('sale_order_import_ubl.johnssons'), 'shipping_partner': self.env.ref('sale_order_import_ubl.swedish_trucking'), 'currency': self.env.ref('base.SEK'), }, 'UBL-Order-2.0-Example.xml': { 'client_order_ref': 'AEG012345', 'date_order': '2010-06-20', 'partner': self.env.ref('sale_order_import_ubl.iyt'), 'shipping_partner': self.env.ref('sale_order_import_ubl.fred_churchill'), 'currency': self.env.ref('base.GBP'), }, 'UBL-RequestForQuotation-2.0-Example.xml': { 'partner': self.env.ref('sale_order_import_ubl.terminus'), 'shipping_partner': self.env.ref('sale_order_import_ubl.s_massiah'), }, 'UBL-RequestForQuotation-2.1-Example.xml': { 'partner': self.env.ref('sale_order_import_ubl.gentofte_kommune'), 'currency': self.env.ref('base.DKK'), 'shipping_partner': self.env.ref( 'sale_order_import_ubl.delivery_gentofte_kommune'), }, } for filename, res in tests.iteritems(): f = file_open('sale_order_import_ubl/tests/files/' + filename, 'rb') xml_file = f.read() wiz = self.env['sale.order.import'].create({ 'order_file': base64.b64encode(xml_file), 'order_filename': filename, }) f.close() action = wiz.import_order_button() so = self.env['sale.order'].browse(action['res_id']) self.assertEqual(so.partner_id.commercial_partner_id, res['partner']) if res.get('currency'): self.assertEqual(so.currency_id, res['currency']) if res.get('client_order_ref'): self.assertEqual(so.client_order_ref, res['client_order_ref']) if res.get('date_order'): self.assertEqual(so.date_order[:10], res['date_order']) if res.get('shipping_partner'): self.assertEqual(so.partner_shipping_id, res['shipping_partner'])
def load_templates(self, **kwargs): base_url = request.httprequest.base_url templates = [ 'im_livechat/static/src/legacy/public_livechat.xml', ] return [tools.file_open(tmpl, 'rb').read() for tmpl in templates]