def test_picture(self): ''' 测试 把图片的二进制数据(使用了base64编码)转化为一个docx.Document对象 ''' doc = DocxTemplate(misc.file_open( 'sell/template/sell.order.docx').name) # 读取图片的数据且使用base64编码 data_1 = open(misc.file_open( 'core/static/description/logo.png').name, 'rb').read().encode('base64') data = self.env['sell.order'].search([('name', '=', 'SO00001')]) ctx = {'obj': data, 'tpl': doc} # not data report_helper.picture(ctx, None) # not width, height report_helper.picture(ctx, data_1) # width, height 分别为 'cm', 'mm','inchs','pt','emu','twips' # align 分别为'left','center','center','middle' report_helper.picture(ctx, data_1, width='122mm') report_helper.picture(ctx, data_1, width='12cm', height='12cm', align='left') report_helper.picture(ctx, data_1, width='12inchs', height='12inchs', align='left') report_helper.picture(ctx, data_1, width='12pt', height='12pt', align='center') report_helper.picture(ctx, data_1, width='12emu', height='12emu', align='right') report_helper.picture(ctx, data_1, width='12twips', height='12twips', align='middle') # width, height 单位不写,为像素 report_helper.picture(ctx, data_1, width='12', height='12', align='middle') # width, height 不是 string 类型 : not isinstance(s, str) report_helper.picture(ctx, data_1, width=12, height=12, align='middle')
def test_save_file(self): doxc_file = self.report_docx_sell.create( self.cr, self.uid, self.sell_order.id, self.ir_actions, self.env.context) tempname = tempfile.mkdtemp() shutil.copy(misc.file_open( 'sell/template/sell.order.docx').name, tempname) self.report_docx_sell._save_file( tempname + "/sell.order.docx", doxc_file)
def from_data_excel(self, fields, rows_file_address): rows,file_address = rows_file_address if file_address: bk = xlrd.open_workbook(misc.file_open(file_address).name, formatting_info=True) workbook = copy(bk) worksheet = workbook.get_sheet(0) for i, fieldname in enumerate(fields): self.setOutCell(worksheet, 0, i, fieldname) for row, row_vals in enumerate(rows): for col, col_value in enumerate(row_vals): if isinstance(col_value, basestring): col_value = re.sub("\r", " ", col_value) self.setOutCell(worksheet, col, row + 1, col_value) else: workbook = xlwt.Workbook() worksheet = workbook.add_sheet('Sheet 1') style, colour_style, base_style, float_style, date_style, datetime_style = self.style_data() worksheet.write_merge(0, 0, 0, len(fields) - 1, fields[0], style=style) worksheet.row(0).height = 400 worksheet.row(2).height = 400 columnwidth = {} for row_index, row in enumerate(rows): for cell_index, cell_value in enumerate(row): if cell_index in columnwidth: if len("%s"%(cell_value)) > columnwidth.get(cell_index): columnwidth.update({cell_index: len("%s"%(cell_value))}) else: columnwidth.update({cell_index: len("%s"%(cell_value))}) if row_index == 1: cell_style = colour_style elif row_index != len(rows)-1: cell_style = base_style if isinstance(cell_value, basestring): cell_value = re.sub("\r", " ", cell_value) elif isinstance(cell_value, datetime.datetime): cell_style = datetime_style elif isinstance(cell_value, datetime.date): cell_style = date_style elif isinstance(cell_value, float) or isinstance(cell_value, int): cell_style = float_style else: cell_style = xlwt.easyxf() worksheet.write(row_index + 1, cell_index, cell_value, cell_style) for column, widthvalue in columnwidth.items(): """参考 下面链接关于自动列宽(探讨)的代码 http://stackoverflow.com/questions/6929115/python-xlwt-accessing-existing-cell-content-auto-adjust-column-width""" if (widthvalue + 3) * 367 >= 65536: widthvalue = 50 worksheet.col(column).width = (widthvalue+4) * 367 worksheet.set_panes_frozen(True) # frozen headings instead of split panes worksheet.set_horz_split_pos(3) # in general, freeze after last heading row worksheet.set_remove_splits(True) # if user does unfreeze, don't leave a split there fp_currency = StringIO.StringIO() workbook.save(fp_currency) fp_currency.seek(0) data = fp_currency.read() fp_currency.close() return data
def test_get_env(self): ''' 测试 get_env 方法 ''' doc = DocxTemplate(misc.file_open('sell/template/sell.order.docx').name) data = self.env['sell.order'].search([('name', '=', 'SO00001')]) ctx={'obj':data,'tpl':doc} jinja_env = report_helper.get_env() doc.render(ctx,jinja_env)
def setUp(self): super(tax_invoice_in, self).setUp() self.period_201605 = self.env.ref('finance.period_201605') self.tax_invoice_in = self.env['tax.invoice.in'].create({ 'name': self.period_201605.id }) in_file = open(misc.file_open('tax_invoice_in/tests/201805_3.xls').name, 'rb').read().encode('base64') self.invoice_wizard = self.env['create.cn.account.invoice.wizard'].create({ 'excel': in_file, })
def check_with_xsd(tree_or_str, xsd_path): if not isinstance(tree_or_str, etree._Element): tree_or_str = etree.fromstring(tree_or_str) xml_schema_doc = etree.parse(file_open(xsd_path)) xsd_schema = etree.XMLSchema(xml_schema_doc) try: xsd_schema.assertValid(tree_or_str) except etree.DocumentInvalid, xml_errors: #import UserError only here to avoid circular import statements with tools.func being imported in exceptions.py from odoo.exceptions import UserError raise UserError('\n'.join([e.message for e in xml_errors.error_log]))
def trans_load(cr, filename, lang, verbose=True, module_name=None, context=None): try: with file_open(filename) as fileobj: _logger.info("loading %s", filename) fileformat = os.path.splitext(filename)[-1][1:].lower() result = trans_load_data(cr, fileobj, fileformat, lang, verbose=verbose, module_name=module_name, context=context) return result except IOError: if verbose: _logger.error("couldn't read translation file %s", filename) return None
def schema_activity(arch): """ Check the activity view against its schema :type arch: etree._Element """ global _activity_validator if _activity_validator is None: with misc.file_open(os.path.join('mail', 'views', 'activity.rng')) as f: _activity_validator = etree.RelaxNG(etree.parse(f)) if _activity_validator.validate(arch): return True for error in _activity_validator.error_log: _logger.error(ustr(error)) return False
def _open_image(filename, path=None): """Attempt to open a binary file and return the descriptor """ if os.path.isfile(filename): return open(filename, 'rb') for p in (path or []): if p and os.path.isabs(p): fullpath = os.path.join(p, filename) if os.path.isfile(fullpath): return open(fullpath, 'rb') try: if p: fullpath = os.path.join(p, filename) else: fullpath = filename return file_open(fullpath) except IOError: pass raise IOError("File %s cannot be found in image path" % filename)
def _get_content_from_url(self, url, url_info=None, custom_attachments=None): """ Fetch the content of an asset (scss / js) file. That content is either the one of the related file on the disk or the one of the corresponding custom ir.attachment record. Params: url (str): the URL of the asset (scss / js) file/ir.attachment url_info (dict, optional): the related url info (see _get_data_from_url) (allows to optimize some code which already have the info and do not want this function to re-get it) custom_attachments (ir.attachment(), optional): the related custom ir.attachment records the function might need to search into (allows to optimize some code which already have that info and do not want this function to re-get it) Returns: utf-8 encoded content of the asset (scss / js) """ if url_info is None: url_info = self._get_data_from_url(url) if url_info["customized"]: # If the file is already customized, the content is found in the # corresponding attachment attachment = None if custom_attachments is None: attachment = self._get_custom_attachment(url) else: attachment = custom_attachments.filtered(lambda r: r.url == url) return attachment and base64.b64decode(attachment.datas) or False # If the file is not yet customized, the content is found by reading # the local file with misc.file_open(url.strip('/'), 'rb', filter_ext=EXTENSIONS) as f: return f.read()
def setUpClass(cls, chart_template_ref='l10n_es.account_chart_template_full', edi_format_ref='l10n_es_edi_sii.edi_es_sii'): super().setUpClass(chart_template_ref=chart_template_ref, edi_format_ref=edi_format_ref) cls.frozen_today = datetime(year=2019, month=1, day=1, hour=0, minute=0, second=0, tzinfo=timezone('utc')) # Allow to see the full result of AssertionError. cls.maxDiff = None # ==== Config ==== cls.certificate = cls.env['l10n_es_edi.certificate'].create({ 'content': base64.encodebytes( misc.file_open("l10n_es_edi_sii/demo/certificates/sello_entidad_act.p12", 'rb').read()), 'password': '******', }) cls.company_data['company'].write({ 'country_id': cls.env.ref('base.es').id, 'state_id': cls.env.ref('base.state_es_z').id, 'l10n_es_edi_certificate_id': cls.certificate.id, 'vat': 'ES59962470K', 'l10n_es_edi_test_env': True, 'l10n_es_edi_tax_agency': 'bizkaia', }) # ==== Business ==== cls.partner_a.write({ 'vat': 'BE0477472701', 'country_id': cls.env.ref('base.be').id, }) cls.partner_b.write({ 'vat': 'ESF35999705', }) cls.product_t = cls.env["product.product"].create( {"name": "Test product"}) cls.partner_t = cls.env["res.partner"].create({"name": "Test partner", "vat": "ESF35999705"})
def test_import_from_csv_file(self): """Test the import from a single CSV file works""" with file_open('test_translation_import/i18n/dot.csv', 'rb') as f: po_file = base64.encodestring(f.read()) import_tlh = self.env["base.language.import"].create({ 'name': 'Dothraki', 'code': 'dot', 'data': po_file, 'filename': 'dot.csv', }) with mute_logger('odoo.addons.base.models.res_lang'): import_tlh.import_lang() lang_count = self.env['res.lang'].search_count([('code', '=', 'dot')]) self.assertEqual(lang_count, 1, "The imported language was not creates") trans_count = self.env['ir.translation'].search_count([('lang', '=', 'dot')]) self.assertEqual(trans_count, 1, "The imported translations were not created") self.env.context = dict(self.env.context, lang="dot") self.assertEqual(_("Accounting"), "samva", "The code translation was not applied")
def test_import_from_po_file(self): """Test the import from a single po file works""" with file_open('test_translation_import/i18n/tlh.po', 'rb') as f: po_file = base64.encodestring(f.read()) import_tlh = self.env["base.language.import"].create({ 'name': 'Klingon', 'code': 'tlh', 'data': po_file, 'filename': 'tlh.po', }) with mute_logger('odoo.addons.base.models.res_lang'): import_tlh.import_lang() tlh_lang = self.env['res.lang']._lang_get('tlh') self.assertTrue(tlh_lang, "The imported language was not creates") trans_count = self.env['ir.translation'].search_count([('lang', '=', 'tlh')]) self.assertEqual(trans_count, 1, "The imported translations were not created") self.env.context = dict(self.env.context, lang="tlh") self.assertEqual(_("Klingon"), "tlhIngan", "The code translation was not applied")
def create_source_docx(self, cr, uid, ids, report, context=None): data = DataModelProxy(self.get_docx_data(cr, uid, ids, report, context)) tempname = tempfile.mkdtemp() temp_out_file = self.generate_temp_file(tempname) doc = DocxTemplate(misc.file_open(report.template_file).name) #2016-11-2 支持了图片 #1.导入依赖,python3语法 from . import report_helper #2. 需要添加一个"tpl"属性获得模版对象 doc.render({'obj': data,'tpl':doc},report_helper.get_env()) doc.save(temp_out_file) if report.output_type == 'pdf': temp_file = self.render_to_pdf(temp_out_file) else: temp_file = temp_out_file report_stream = '' with open(temp_file, 'rb') as input_stream: report_stream = input_stream.read() os.remove(temp_file) return report_stream, report.output_type
def _read_addon_file(self, path_or_url): """Read the content of a file or an ``ir.attachment`` record given by ``path_or_url``. :param str path_or_url: :returns: bytes :raises FileNotFoundError: if the path does not match a module file or an attachment """ try: with file_open(path_or_url, 'rb') as fp: contents = fp.read() except FileNotFoundError as e: attachment = request.env['ir.attachment'].sudo().search([ ('url', '=', path_or_url), ('type', '=', 'binary'), ], limit=1) if attachment: contents = attachment.raw else: raise e return contents
def setUp(self): super(TestL10nMxEdiInvoiceImpLocal, self).setUp() self.tag_model = self.env['account.account.tag'] self.isr_tag = self.env['account.account.tag'].search( [('name', '=', 'ISR')]) self.tax_negative.tag_ids |= self.isr_tag self.company.partner_id.write({ 'property_account_position_id': self.fiscal_position.id, }) self.tax_local = self.tax_positive.copy({ 'name': 'LOCAL(10%) VENTAS', 'amount': 10.000000, 'tag_ids': [(6, 0, self.env.ref( 'l10n_mx_edi_implocal.account_tax_local').ids)] }) self.product = self.env.ref("product.product_product_2") self.product.taxes_id = [self.tax_positive.id, self.tax_negative.id, self.tax_local.id] self.product.default_code = "TEST" self.product.l10n_mx_edi_code_sat_id = self.ref( 'l10n_mx_edi.prod_code_sat_01010101') self.xml_expected = misc.file_open(os.path.join( 'l10n_mx_edi_implocal', 'tests', 'expected.xml')).read().encode( 'UTF-8')
def setUp(self): super(TestL10nMxEdiInvoiceImpLocal, self).setUp() self.tag_model = self.env['account.account.tag'] isr_tag = self.env['account.account.tag'].search([('name', '=', 'ISR') ]) for rep_line in self.tax_negative.invoice_repartition_line_ids: rep_line.tag_ids |= isr_tag self.tax_local = self.tax_positive.copy({ 'name': 'LOCAL(10%) VENTAS', 'amount': 10.000000, }) for rep_line in self.tax_local.invoice_repartition_line_ids: rep_line.tag_ids |= self.env.ref( 'l10n_mx_edi_implocal.account_tax_local') self.product = self.env.ref("product.product_product_2") self.product.taxes_id = [ self.tax_positive.id, self.tax_negative.id, self.tax_local.id ] self.product.default_code = "TEST" self.product.l10n_mx_edi_code_sat_id = self.ref( 'l10n_mx_edi.prod_code_sat_01010101') self.xml_expected = misc.file_open( os.path.join('l10n_mx_edi_implocal', 'tests', 'expected.xml')).read().encode('UTF-8')
def test_l10n_mx_edi_invoice_basic_33(self): self.config_parameter.value = '3.3' self.xml_expected_str = misc.file_open( os.path.join('l10n_mx_edi', 'tests', 'expected_cfdi33.xml')).read().encode('UTF-8') self.xml_expected = objectify.fromstring(self.xml_expected_str) self.test_l10n_mx_edi_invoice_basic() # ----------------------- # Testing invoice refund to verify CFDI related section # ----------------------- invoice = self.create_invoice() invoice.action_invoice_open() refund = self.refund_model.with_context(active_ids=invoice.ids).create( { 'filter_refund': 'refund', 'description': 'Refund Test', 'date': invoice.date_invoice, }) result = refund.invoice_refund() refund_id = result.get('domain')[1][2] refund = self.invoice_model.browse(refund_id) refund.action_invoice_open() refund.refresh() xml = refund.l10n_mx_edi_get_xml_etree() self.assertEquals(xml.CfdiRelacionados.CfdiRelacionado.get('UUID'), invoice.l10n_mx_edi_cfdi_uuid, 'Invoice UUID is different to CFDI related') # ----------------------- # Testing invoice without product to verify no traceback # ----------------------- invoice = self.create_invoice() invoice.invoice_line_ids[0].product_id = False invoice.compute_taxes() invoice.action_invoice_open() self.assertEqual(invoice.state, "open") # ----------------------- # Testing send payment by email # ----------------------- invoice = self.create_invoice() invoice.action_invoice_open() ctx = {'active_model': 'account.invoice', 'active_ids': [invoice.id]} bank_journal = self.env['account.journal'].search( [('type', '=', 'bank')], limit=1) register_payments = self.env['account.register.payments'].with_context( ctx).create({ 'payment_date': invoice.date, 'l10n_mx_edi_payment_method_id': self.env.ref('l10n_mx_edi.payment_method_efectivo').id, 'payment_method_id': self.env.ref("account.account_payment_method_manual_in").id, 'journal_id': bank_journal.id, 'communication': invoice.number, 'amount': invoice.amount_total, }) payment = register_payments.create_payments() payment = self.env['account.payment'].search(payment.get('domain', [])) self.assertEqual(payment.l10n_mx_edi_pac_status, "signed", payment.message_ids.mapped('body')) default_template = self.env.ref( 'account.mail_template_data_payment_receipt') wizard_mail = self.env['mail.compose.message'].with_context({ 'default_template_id': default_template.id, 'default_model': 'account.payment', 'default_res_id': payment.id }).create({}) res = wizard_mail.onchange_template_id(default_template.id, wizard_mail.composition_mode, 'account_payment', payment.id) wizard_mail.write( {'attachment_ids': res.get('value', {}).get('attachment_ids', [])}) wizard_mail.send_mail() attachment = payment.l10n_mx_edi_retrieve_attachments() self.assertEqual(len(attachment), 2, 'Documents not attached correctly')
def _get_default_faq(self): with misc.file_open('website_forum/data/forum_default_faq.html', 'r') as f: return f.read()
import base64 import time from datetime import datetime import logging from odoo import _ from odoo.http import request, route, Controller from odoo.tools.misc import file_open from odoo.addons.website_compassion.tools.image_compression import compress_big_images SPONSOR_HEADER = compress_big_images(base64.b64encode(file_open( "crowdfunding_compassion/static/src/img/sponsor_children_banner.jpg", "rb" ).read()), max_bytes_size=2e4, max_width=400) SPONSOR_ICON = base64.b64encode(file_open( "crowdfunding_compassion/static/src/img/icn_children.png", "rb").read()) _logger = logging.getLogger(__name__) def sponsorship_card_content(): return {"type": "sponsorship", "value": 0, "name": _("Sponsor children"), "text": _("sponsored child"), "description": _(""" For 42 francs a month, you're opening the way out of poverty for a child. Sponsorship ensures that the child is known, loved and protected. In particular, it gives the child access to schooling, tutoring, regular balanced meals, medical care and training in the spiritual field, hygiene, etc. Every week, the child participates in the activities of
def trans_generate(lang, modules, cr): env = odoo.api.Environment(cr, SUPERUSER_ID, {}) to_translate = set() def push_translation(module, type, name, id, source, comments=None): # empty and one-letter terms are ignored, they probably are not meant to be # translated, and would be very hard to translate anyway. sanitized_term = (source or '').strip() try: # verify the minimal size without eventual xml tags # wrap to make sure html content like '<a>b</a><c>d</c>' is accepted by lxml wrapped = "<div>%s</div>" % sanitized_term node = etree.fromstring(wrapped) sanitized_term = etree.tostring(node, encoding='UTF-8', method='text') except etree.ParseError: pass # remove non-alphanumeric chars sanitized_term = re.sub(r'\W+', '', sanitized_term) if not sanitized_term or len(sanitized_term) <= 1: return tnx = (module, source, name, id, type, tuple(comments or ())) to_translate.add(tnx) query = 'SELECT name, model, res_id, module FROM ir_model_data' query_models = """SELECT m.id, m.model, imd.module FROM ir_model AS m, ir_model_data AS imd WHERE m.id = imd.res_id AND imd.model = 'ir.model'""" if 'all_installed' in modules: query += ' WHERE module IN ( SELECT name FROM ir_module_module WHERE state = \'installed\') ' query_models += " AND imd.module in ( SELECT name FROM ir_module_module WHERE state = 'installed') " if 'all' not in modules: query += ' WHERE module IN %s' query_models += ' AND imd.module IN %s' query_param = (tuple(modules),) else: query += ' WHERE module != %s' query_models += ' AND imd.module != %s' query_param = ('__export__',) query += ' ORDER BY module, model, name' query_models += ' ORDER BY module, model' cr.execute(query, query_param) for (xml_name, model, res_id, module) in cr.fetchall(): module = encode(module) model = encode(model) xml_name = "%s.%s" % (module, encode(xml_name)) if model not in env: _logger.error("Unable to find object %r", model) continue record = env[model].browse(res_id) if not record._translate: # explicitly disabled continue if not record.exists(): _logger.warning("Unable to find object %r with id %d", model, res_id) continue if model=='ir.model.fields': try: field_name = encode(record.name) except AttributeError as exc: _logger.error("name error in %s: %s", xml_name, str(exc)) continue field_model = env.get(record.model) if (field_model is None or not field_model._translate or field_name not in field_model._fields): continue field = field_model._fields[field_name] if isinstance(getattr(field, 'selection', None), (list, tuple)): name = "%s,%s" % (encode(record.model), field_name) for dummy, val in field.selection: push_translation(module, 'selection', name, 0, encode(val)) elif model=='ir.actions.report.xml': name = encode(record.report_name) fname = "" if record.report_rml: fname = record.report_rml parse_func = trans_parse_rml report_type = "report" elif record.report_xsl: continue if fname and record.report_type in ('pdf', 'xsl'): try: with file_open(fname) as report_file: d = etree.parse(report_file) for t in parse_func(d.iter()): push_translation(module, report_type, name, 0, t) except (IOError, etree.XMLSyntaxError): _logger.exception("couldn't export translation for report %s %s %s", name, report_type, fname) for field_name, field in record._fields.iteritems(): if field.translate: name = model + "," + field_name try: value = record[field_name] or '' except Exception: continue for term in set(field.get_trans_terms(value)): push_translation(module, 'model', name, xml_name, encode(term)) # End of data for ir.model.data query results def push_constraint_msg(module, term_type, model, msg): if not callable(msg): push_translation(encode(module), term_type, encode(model), 0, encode(msg)) def push_local_constraints(module, model, cons_type='sql_constraints'): """ Climb up the class hierarchy and ignore inherited constraints from other modules. """ term_type = 'sql_constraint' if cons_type == 'sql_constraints' else 'constraint' msg_pos = 2 if cons_type == 'sql_constraints' else 1 for cls in model.__class__.__mro__: if getattr(cls, '_module', None) != module: continue constraints = getattr(cls, '_local_' + cons_type, []) for constraint in constraints: push_constraint_msg(module, term_type, model._name, constraint[msg_pos]) cr.execute(query_models, query_param) for (_, model, module) in cr.fetchall(): if model not in env: _logger.error("Unable to find object %r", model) continue Model = env[model] if Model._constraints: push_local_constraints(module, Model, 'constraints') if Model._sql_constraints: push_local_constraints(module, Model, 'sql_constraints') installed_modules = [ m['name'] for m in env['ir.module.module'].search_read([('state', '=', 'installed')], fields=['name']) ] path_list = [(path, True) for path in odoo.modules.module.ad_paths] # Also scan these non-addon paths for bin_path in ['osv', 'report', 'modules', 'service', 'tools']: path_list.append((os.path.join(config['root_path'], bin_path), True)) # non-recursive scan for individual files in root directory but without # scanning subdirectories that may contain addons path_list.append((config['root_path'], False)) _logger.debug("Scanning modules at paths: %s", path_list) def get_module_from_path(path): for (mp, rec) in path_list: mp = os.path.join(mp, '') if rec and path.startswith(mp) and os.path.dirname(path) != mp: path = path[len(mp):] return path.split(os.path.sep)[0] return 'base' # files that are not in a module are considered as being in 'base' module def verified_module_filepaths(fname, path, root): fabsolutepath = join(root, fname) frelativepath = fabsolutepath[len(path):] display_path = "addons%s" % frelativepath module = get_module_from_path(fabsolutepath) if ('all' in modules or module in modules) and module in installed_modules: if os.path.sep != '/': display_path = display_path.replace(os.path.sep, '/') return module, fabsolutepath, frelativepath, display_path return None, None, None, None def babel_extract_terms(fname, path, root, extract_method="python", trans_type='code', extra_comments=None, extract_keywords={'_': None}): module, fabsolutepath, _, display_path = verified_module_filepaths(fname, path, root) extra_comments = extra_comments or [] if not module: return src_file = open(fabsolutepath, 'r') try: for extracted in extract.extract(extract_method, src_file, keywords=extract_keywords): # Babel 0.9.6 yields lineno, message, comments # Babel 1.3 yields lineno, message, comments, context lineno, message, comments = extracted[:3] push_translation(module, trans_type, display_path, lineno, encode(message), comments + extra_comments) except Exception: _logger.exception("Failed to extract terms from %s", fabsolutepath) finally: src_file.close() for (path, recursive) in path_list: _logger.debug("Scanning files of modules at %s", path) for root, dummy, files in walksymlinks(path): for fname in fnmatch.filter(files, '*.py'): babel_extract_terms(fname, path, root) # mako provides a babel extractor: http://docs.makotemplates.org/en/latest/usage.html#babel for fname in fnmatch.filter(files, '*.mako'): babel_extract_terms(fname, path, root, 'mako', trans_type='report') # Javascript source files in the static/src/js directory, rest is ignored (libs) if fnmatch.fnmatch(root, '*/static/src/js*'): for fname in fnmatch.filter(files, '*.js'): babel_extract_terms(fname, path, root, 'javascript', extra_comments=[WEB_TRANSLATION_COMMENT], extract_keywords={'_t': None, '_lt': None}) # QWeb template files if fnmatch.fnmatch(root, '*/static/src/xml*'): for fname in fnmatch.filter(files, '*.xml'): babel_extract_terms(fname, path, root, 'odoo.tools.translate:babel_extract_qweb', extra_comments=[WEB_TRANSLATION_COMMENT]) if not recursive: # due to topdown, first iteration is in first level break out = [] # translate strings marked as to be translated Translation = env['ir.translation'] for module, source, name, id, type, comments in sorted(to_translate): trans = Translation._get_source(name, type, lang, source) if lang else "" out.append((module, type, name, id, source, encode(trans) or '', comments)) return out
def _get_logo_impl(self): ''' 默认取 core/static/description 下的 logo.png 作为 logo''' return open( misc.file_open('core/static/description/logo.png').name, 'rb').read().encode('base64')
def test_save_file(self): doxc_file = self.report_docx_sell.create(self.cr, self.uid, self.sell_order.id, self.ir_actions, self.env.context) self.report_docx_sell._save_file(misc.file_open('sell/tests/sell.order.docx').name,doxc_file)
def import_scenario(env, module, scenario_xml, mode, directory, filename): model_obj = env['ir.model'] company_obj = env['res.company'] warehouse_obj = env['stock.warehouse'] user_obj = env['res.users'] group_obj = env['res.groups'] ir_model_data_obj = env['ir.model.data'] xml_doc = StringIO(scenario_xml) root = parse(xml_doc).getroot() steps = [] transitions = [] scenario_values = {} noupdate = root.get('noupdate', False) # parse of the scenario for node in root.getchildren(): # the node of the Step and Transition are put in other list if node.tag == 'Step': steps.append(node) elif node.tag == 'Transition': transitions.append(node) elif node.tag == 'warehouse_ids': if 'warehouse_ids' not in scenario_values: scenario_values['warehouse_ids'] = [] warehouse_ids = warehouse_obj.search([('name', '=', node.text)]) if warehouse_ids: scenario_values['warehouse_ids'].append( (4, warehouse_ids[0].id)) elif node.tag == 'group_ids': if 'group_ids' not in scenario_values: scenario_values['group_ids'] = [] group_ids = group_obj.search([ ('full_name', '=', node.text), ]) if group_ids: scenario_values['group_ids'].append((4, group_ids[0].id)) else: scenario_values['group_ids'].append( (4, env.ref(node.text).id), ) elif node.tag == 'user_ids': if 'user_ids' not in scenario_values: scenario_values['user_ids'] = [] user_ids = user_obj.search([ ('login', '=', node.text), ]) if user_ids: scenario_values['user_ids'].append((4, user_ids[0].id)) elif node.tag in ('active', 'shared_custom'): scenario_values[node.tag] = safe_eval(node.text) or False else: scenario_values[node.tag] = node.text or False # Transition from old format to new format scenario_xml_id = get_xml_id(_('scenario'), module, scenario_values) if scenario_values['model_id']: scenario_values['model_id'] = model_obj.search([ ('model', '=', scenario_values['model_id']), ]).id or False if not scenario_values['model_id']: raise ValueError('Model not found: %s' % scenario_values['model_id']) if scenario_values.get('company_id'): scenario_values['company_id'] = company_obj.search([ ('name', '=', scenario_values['company_id']), ]).id or False if not scenario_values['company_id']: raise ValueError('Company not found: %s' % scenario_values['company_id']) if scenario_values.get('parent_id'): if '.' not in scenario_values['parent_id']: scenario_values['parent_id'] = '%s.%s' % ( module, scenario_values['parent_id']) parent_scenario = env.ref(scenario_values['parent_id'], raise_if_not_found=False) if not parent_scenario: raise ValueError('Parent scenario not found: %s' % scenario_values['parent_id']) scenario_values['parent_id'] = parent_scenario.id # Create or update the scenario ir_model_data_obj._update( 'scanner.scenario', module, scenario_values, xml_id=scenario_xml_id, mode=mode, noupdate=noupdate, ) scenario = env.ref(scenario_xml_id) # Create or update steps resid = {} for node in steps: step_values = {} for key, item in node.items(): if item == 'False': item = False step_values[key] = item # Get scenario id step_values['scenario_id'] = scenario.id # Transition from old to new format step_xml_id = get_xml_id(_('step'), module, step_values) # Get python source python_filename = '%s/%s.py' % ( directory, step_xml_id, ) # Alow to use the id without module name for the current module try: python_file = misc.file_open(python_filename) except IOError: if module == step_xml_id.split('.')[0]: python_filename = '%s/%s.py' % ( directory, step_xml_id.split('.')[1], ) python_file = misc.file_open(python_filename) # Load python code and check syntax try: step_values['python_code'] = python_file.read() finally: python_file.close() # Create or update ir_model_data_obj._update( 'scanner.scenario.step', module, step_values, xml_id=step_xml_id, mode=mode, noupdate=noupdate, ) step = env.ref(step_xml_id) resid[step_xml_id] = step.id # Create or update transitions for node in transitions: transition_values = {} for key, item in node.items(): if key in ['to_id', 'from_id']: item = resid[get_xml_id(_('step'), module, {'id': item})] transition_values[key] = item # Create or update ir_model_data_obj._update( 'scanner.scenario.transition', module, transition_values, xml_id=get_xml_id(_('transition'), module, transition_values), mode=mode, noupdate=noupdate, )
def test_l10n_mx_edi_invoice_basic_33(self): self.xml_expected_str = misc.file_open( os.path.join('l10n_mx_edi', 'tests', 'expected_cfdi33.xml')).read().encode('UTF-8') self.xml_expected = objectify.fromstring(self.xml_expected_str) self.test_l10n_mx_edi_invoice_basic() # ----------------------- # Testing invoice refund to verify CFDI related section # ----------------------- invoice = self.create_invoice() invoice.post() refund = self.env['account.move.reversal'].with_context( active_ids=invoice.ids).create({ 'refund_method': 'refund', 'reason': 'Refund Test', 'date': invoice.invoice_date, }) result = refund.reverse_moves() refund_id = result.get('domain')[1][2] refund = self.invoice_model.browse(refund_id) refund.refresh() refund.post() xml = refund.l10n_mx_edi_get_xml_etree() self.assertEquals(xml.CfdiRelacionados.CfdiRelacionado.get('UUID'), invoice.l10n_mx_edi_cfdi_uuid, 'Invoice UUID is different to CFDI related') # ----------------------- # Testing invoice without product to verify no traceback # ----------------------- invoice = self.create_invoice() invoice.invoice_line_ids[0].product_id = False invoice.compute_taxes() invoice.post() self.assertEqual(invoice.state, "posted") # ----------------------- # Testing case with include base amount # ----------------------- invoice = self.create_invoice() tax_ieps = self.tax_positive.copy({ 'name': 'IEPS 9%', 'amount': 9.0, 'include_base_amount': True, }) self.tax_positive.sequence = 3 for line in invoice.invoice_line_ids: line.invoice_line_tax_id = [self.tax_positive.id, tax_ieps.id] invoice.compute_taxes() invoice.post() self.assertEqual(invoice.l10n_mx_edi_pac_status, "signed", invoice.message_ids.mapped('body')) xml_total = invoice.l10n_mx_edi_get_xml_etree().get('Total') self.assertEqual(invoice.amount_total, float(xml_total), 'The amount with include base amount is incorrect') # ----------------------- # Testing send payment by email # ----------------------- invoice = self.create_invoice() invoice.post() bank_journal = self.env['account.journal'].search( [('type', '=', 'bank')], limit=1) payment_register = Form(self.env['account.payment'].with_context( active_model='account.move', active_ids=invoice.ids)) payment_register.payment_date = invoice.date payment_register.l10n_mx_edi_payment_method_id = self.env.ref( 'l10n_mx_edi.payment_method_efectivo') payment_register.payment_method_id = self.env.ref( "account.account_payment_method_manual_in") payment_register.journal_id = bank_journal payment_register.communication = invoice.name payment_register.amount = invoice.amount_total payment = payment_register.save() payment.post() self.assertEqual(payment.l10n_mx_edi_pac_status, "signed", payment.message_ids.mapped('body')) default_template = self.env.ref( 'account.mail_template_data_payment_receipt') wizard_mail = self.env['mail.compose.message'].with_context({ 'default_template_id': default_template.id, 'default_model': 'account.payment', 'default_res_id': payment.id }).create({}) res = wizard_mail.onchange_template_id(default_template.id, wizard_mail.composition_mode, 'account_payment', payment.id) wizard_mail.write( {'attachment_ids': res.get('value', {}).get('attachment_ids', [])}) wizard_mail.send_mail() attachment = payment.l10n_mx_edi_retrieve_attachments() self.assertEqual(len(attachment), 2, 'Documents not attached correctly')
def convert_to_pdfa(self): """ Transform the opened PDF file into a PDF/A compliant file """ # Set the PDF version to 1.7 (as PDF/A-3 is based on version 1.7) and make it PDF/A compliant. # See https://github.com/veraPDF/veraPDF-validation-profiles/wiki/PDFA-Parts-2-and-3-rules#rule-612-1 # " The file header shall begin at byte zero and shall consist of "%PDF-1.n" followed by a single EOL marker, # where 'n' is a single digit number between 0 (30h) and 7 (37h) " # " The aforementioned EOL marker shall be immediately followed by a % (25h) character followed by at least four # bytes, each of whose encoded byte values shall have a decimal value greater than 127 " self._header = b"%PDF-1.7\n%\xFF\xFF\xFF\xFF" # Add a document ID to the trailer. This is only needed when using encryption with regular PDF, but is required # when using PDF/A pdf_id = ByteStringObject(md5(self._reader.stream.getvalue()).digest()) # The first string is based on the content at the time of creating the file, while the second is based on the # content of the file when it was last updated. When creating a PDF, both are set to the same value. self._ID = ArrayObject((pdf_id, pdf_id)) with file_open('tools/data/files/sRGB2014.icc', mode='rb') as icc_profile: icc_profile_file_data = compress(icc_profile.read()) icc_profile_stream_obj = DecodedStreamObject() icc_profile_stream_obj.setData(icc_profile_file_data) icc_profile_stream_obj.update({ NameObject("/Filter"): NameObject("/FlateDecode"), NameObject("/N"): NumberObject(3), NameObject("/Length"): NameObject(str(len(icc_profile_file_data))), }) icc_profile_obj = self._addObject(icc_profile_stream_obj) output_intent_dict_obj = DictionaryObject() output_intent_dict_obj.update({ NameObject("/S"): NameObject("/GTS_PDFA1"), NameObject("/OutputConditionIdentifier"): createStringObject("sRGB"), NameObject("/DestOutputProfile"): icc_profile_obj, NameObject("/Type"): NameObject("/OutputIntent"), }) output_intent_obj = self._addObject(output_intent_dict_obj) self._root_object.update({ NameObject("/OutputIntents"): ArrayObject([output_intent_obj]), }) pages = self._root_object['/Pages']['/Kids'] # PDF/A needs the glyphs width array embedded in the pdf to be consistent with the ones from the font file. # But it seems like it is not the case when exporting from wkhtmltopdf. if TTFont: fonts = {} # First browse through all the pages of the pdf file, to get a reference to all the fonts used in the PDF. for page in pages: for font in page.getObject()['/Resources']['/Font'].values(): for descendant in font.getObject()['/DescendantFonts']: fonts[descendant.idnum] = descendant.getObject() # Then for each font, rewrite the width array with the information taken directly from the font file. # The new width are calculated such as width = round(1000 * font_glyph_width / font_units_per_em) # See: http://martin.hoppenheit.info/blog/2018/pdfa-validation-and-inconsistent-glyph-width-information/ for font in fonts.values(): font_file = font['/FontDescriptor']['/FontFile2'] stream = io.BytesIO(decompress(font_file._data)) ttfont = TTFont(stream) font_upm = ttfont['head'].unitsPerEm glyphs = ttfont.getGlyphSet()._hmtx.metrics glyph_widths = [] for key, values in glyphs.items(): if key[:5] == 'glyph': glyph_widths.append(NumberObject(round(1000.0 * values[0] / font_upm))) font[NameObject('/W')] = ArrayObject([NumberObject(1), ArrayObject(glyph_widths)]) stream.close() else: _logger.warning('The fonttools package is not installed. Generated PDF may not be PDF/A compliant.') outlines = self._root_object['/Outlines'].getObject() outlines[NameObject('/Count')] = NumberObject(1) # Set odoo as producer self.addMetadata({ '/Creator': "Odoo", '/Producer': "Odoo", }) self.is_pdfa = True
def create_source_docx_partner(self, cr, uid, ids, report, records, init_pay, context=None): # 2016-11-2 支持了图片 # 1.导入依赖,python3语法 # from . import report_helper # 2. 需要添加一个"tpl"属性获得模版对象 tempname = tempfile.mkdtemp() temp_out_file = self.generate_temp_file(tempname) doc = DocxTemplate(misc.file_open(report.template_file).name) env = api.Environment(cr, uid, context) partner = env.get('partner').search([('id', '=', context.get('partner_id'))]) simple_dict = {'partner_name': partner.name, 'from_date': context.get('from_date'), 'to_date': context.get('to_date'), 'report_line': [], 'init_pay': {}, 'final_pay': {}} if not records: if init_pay: simple_dict['init_pay'] = init_pay simple_dict['final_pay'] = init_pay doc.render({'obj': simple_dict, 'tpl': doc}, report_helper.get_env()) doc.save(temp_out_file) report_stream = '' with open(temp_out_file, 'rb') as input_stream: report_stream = input_stream.read() os.remove(temp_out_file) return report_stream, report.output_type data = DataModelProxy(records) for p_value in data: simple_dict['report_line'].append({ 'date': p_value.date, 'name': p_value.name, 'note': p_value.note, 'amount': p_value.amount, 'pay_amount': p_value.pay_amount, 'discount_money': p_value.discount_money, 'balance_amount': p_value.balance_amount }) if data: simple_dict['init_pay'] = data[0].balance_amount - data[0].amount + data[0].pay_amount - data[ 0].discount_money simple_dict['final_pay'] = data[-1].balance_amount doc.render({'obj': simple_dict, 'tpl': doc}, report_helper.get_env()) doc.save(temp_out_file) if report.output_type == 'pdf': temp_file = self.render_to_pdf(temp_out_file) else: temp_file = temp_out_file report_stream = '' with open(temp_file, 'rb') as input_stream: report_stream = input_stream.read() os.remove(temp_file) return report_stream, report.output_type
def post_init_hook(cr, registry): import os from odoo import api, SUPERUSER_ID from odoo.tools import misc env = api.Environment(cr, SUPERUSER_ID, {}) # Load project workflow workflow_pathname = os.path.join("project_agile", "import", "project_workflow.xml") with misc.file_open(workflow_pathname) as stream: importer = env["project.workflow.importer"] reader = env["project.workflow.xml.reader"] workflow = importer.run(reader, stream) workflow.write({"state": "live"}) # Publish imported workflow # Assign simple workflow to all project types env["project.type"].search([]).write({"workflow_id": workflow.id}) # We need to assign initial agile order. # It would be nicer if latest tasks were in the top of the backlog. cr.execute("SELECT COUNT(*) FROM project_task") count = cr.fetchone()[0] cr.execute( """ UPDATE project_task SET agile_order = %s - id WHERE agile_order IS NULL """, (int(count), ), ) # Epics allow sub epics task_type_epic = env.ref("project_agile.project_task_type_epic") type_ids = task_type_epic.type_ids.ids type_ids.append(task_type_epic.id) task_type_epic.write({"type_ids": [(6, 0, type_ids)]}) # Set default project task type to the existing projects env["project.project"].sudo().with_context( no_workflow=True)._set_default_project_type_id() # and set ``type_id`` field to not null cr.execute( "ALTER TABLE project_project ALTER COLUMN type_id SET NOT NULL;") # Set default project task type to the existing tasks env["project.task"].sudo()._set_default_task_type_id() task_type_task = env.ref("project_agile.project_task_type_task") cr.execute( "UPDATE project_task SET type_id=%s WHERE type_id IS NULL;", (task_type_task.id, ), ) # and set ``type_id`` field to not null cr.execute("ALTER TABLE project_task ALTER COLUMN type_id SET NOT NULL;") # Set default task priority to the existing tasks env["project.task"].sudo()._set_default_task_priority_id() priority_minor = env.ref("project_agile.project_task_priority_minor") cr.execute( "UPDATE project_task SET priority_id=%s WHERE priority_id IS NULL;", (priority_minor.id, ), ) # and set ``priority_id`` field to not null cr.execute( "ALTER TABLE project_task ALTER COLUMN priority_id SET NOT NULL;")
def trans_load_data(cr, fileobj, fileformat, lang, lang_name=None, verbose=True, module_name=None, context=None): """Populates the ir_translation table.""" if verbose: _logger.info('loading translation file for language %s', lang) env = odoo.api.Environment(cr, SUPERUSER_ID, context or {}) Lang = env['res.lang'] Translation = env['ir.translation'] try: if not Lang.search_count([('code', '=', lang)]): # lets create the language with locale information Lang.load_lang(lang=lang, lang_name=lang_name) # Parse also the POT: it will possibly provide additional targets. # (Because the POT comments are correct on Launchpad but not the # PO comments due to a Launchpad limitation. See LP bug 933496.) pot_reader = [] # now, the serious things: we read the language file fileobj.seek(0) if fileformat == 'csv': reader = csv.reader(fileobj, quotechar='"', delimiter=',') # read the first line of the file (it contains columns titles) for row in reader: fields = row break elif fileformat == 'po': reader = PoFile(fileobj) fields = ['type', 'name', 'res_id', 'src', 'value', 'comments'] # Make a reader for the POT file and be somewhat defensive for the # stable branch. if fileobj.name.endswith('.po'): try: # Normally the path looks like /path/to/xxx/i18n/lang.po # and we try to find the corresponding # /path/to/xxx/i18n/xxx.pot file. # (Sometimes we have 'i18n_extra' instead of just 'i18n') addons_module_i18n, _ignored = os.path.split(fileobj.name) addons_module, i18n_dir = os.path.split(addons_module_i18n) addons, module = os.path.split(addons_module) pot_handle = file_open(os.path.join( addons, module, i18n_dir, module + '.pot')) pot_reader = PoFile(pot_handle) except: pass else: _logger.info('Bad file format: %s', fileformat) raise Exception(_('Bad file format: %s') % fileformat) # Read the POT references, and keep them indexed by source string. class Target(object): def __init__(self): self.value = None self.targets = set() # set of (type, name, res_id) self.comments = None pot_targets = defaultdict(Target) for type, name, res_id, src, _ignored, comments in pot_reader: if type is not None: target = pot_targets[src] target.targets.add((type, name, res_id)) target.comments = comments # read the rest of the file irt_cursor = Translation._get_import_cursor() def process_row(row): """Process a single PO (or POT) entry.""" # dictionary which holds values for this line of the csv file # {'lang': ..., 'type': ..., 'name': ..., 'res_id': ..., # 'src': ..., 'value': ..., 'module':...} dic = dict.fromkeys(('type', 'name', 'res_id', 'src', 'value', 'comments', 'imd_model', 'imd_name', 'module')) dic['lang'] = lang dic.update(zip(fields, row)) # discard the target from the POT targets. src = dic['src'] if src in pot_targets: target = pot_targets[src] target.value = dic['value'] target.targets.discard((dic['type'], dic['name'], dic['res_id'])) # This would skip terms that fail to specify a res_id res_id = dic['res_id'] if not res_id: return if isinstance(res_id, (int, long)) or \ (isinstance(res_id, basestring) and res_id.isdigit()): dic['res_id'] = int(res_id) if module_name: dic['module'] = module_name else: # res_id is an xml id dic['res_id'] = None dic['imd_model'] = dic['name'].split(',')[0] if '.' in res_id: dic['module'], dic['imd_name'] = res_id.split('.', 1) else: dic['module'], dic['imd_name'] = module_name, res_id irt_cursor.push(dic) # First process the entries from the PO file (doing so also fills/removes # the entries from the POT file). for row in reader: process_row(row) # Then process the entries implied by the POT file (which is more # correct w.r.t. the targets) if some of them remain. pot_rows = [] for src, target in pot_targets.iteritems(): if target.value: for type, name, res_id in target.targets: pot_rows.append((type, name, res_id, src, target.value, target.comments)) pot_targets.clear() for row in pot_rows: process_row(row) irt_cursor.finish() Translation.clear_caches() if verbose: _logger.info("translation file loaded succesfully") except IOError: iso_lang = get_iso_codes(lang) filename = '[lang: %s][format: %s]' % (iso_lang or 'new', fileformat) _logger.exception("couldn't read translation file %s", filename)
def setUpClass(cls, chart_template_ref='l10n_mx.mx_coa', edi_format_ref='mx_pac_edi.edi_cfdi_3_3'): super().setUpClass(chart_template_ref=chart_template_ref, edi_format_ref=edi_format_ref) cls.frozen_today = datetime.datetime(year=2017, month=1, day=1, hour=0, minute=0, second=0, tzinfo=timezone('utc')) # Allow to see the full result of AssertionError. cls.maxDiff = None # ==== Config ==== cls.certificate = cls.env['mx_pac_edi.certificate'].create({ 'content': base64.encodebytes( misc.file_open( os.path.join('mx_pac_edi', 'demo', 'pac_credentials', 'certificate.cer'), 'rb').read()), 'key': base64.encodebytes( misc.file_open( os.path.join('mx_pac_edi', 'demo', 'pac_credentials', 'certificate.key'), 'rb').read()), 'password': '******', }) cls.certificate.write({ 'date_start': '2016-01-01 01:00:00', 'date_end': '2018-01-01 01:00:00', }) cls.company_data['company'].write({ 'vat': 'EKU9003173C9', 'street_name': 'Campobasso Norte', 'street2': 'Fraccionamiento Montecarlo', 'street_number': '3206', 'street_number2': '9000', 'zip': '85134', 'city': 'Ciudad Obregón', 'country_id': cls.env.ref('base.mx').id, 'state_id': cls.env.ref('base.state_mx_son').id, 'mx_pac_edi_pac': 'solfact', 'mx_pac_edi_pac_test_env': True, 'mx_pac_edi_fiscal_regime': '601', 'mx_pac_edi_certificate_ids': [(6, 0, cls.certificate.ids)], }) cls.currency_data['currency'].mx_pac_edi_decimal_places = 3 # Replace the USD by Gol to test external trade. cls.fake_usd_data = cls.setup_multi_currency_data(default_values={ 'name': 'FUSD', 'symbol': '$', 'rounding': '0.01', 'mx_pac_edi_decimal_places': 2, }, rate2016=6.0, rate2017=4.0) cls.cr.execute( ''' UPDATE ir_model_data SET res_id = %s WHERE module = %s AND name = %s ''', [cls.fake_usd_data['currency'].id, 'base', 'USD']) # Prevent the xsd validation because it could lead to a not-deterministic behavior since the xsd is downloaded # by a CRON. xsd_attachment = cls.env.ref('mx_pac_edi.xsd_cached_cfdv33_xsd', False) if xsd_attachment: xsd_attachment.unlink() # ==== Business ==== cls.tax_16 = cls.env['account.tax'].create({ 'name': 'tax_16', 'amount_type': 'percent', 'amount': 16, 'type_tax_use': 'sale', 'l10n_mx_tax_type': 'Tasa', }) cls.tax_10_negative = cls.env['account.tax'].create({ 'name': 'tax_10_negative', 'amount_type': 'percent', 'amount': -10, 'type_tax_use': 'sale', 'l10n_mx_tax_type': 'Tasa', }) cls.tax_group = cls.env['account.tax'].create({ 'name': 'tax_group', 'amount_type': 'group', 'amount': 0.0, 'type_tax_use': 'sale', 'children_tax_ids': [(6, 0, (cls.tax_16 + cls.tax_10_negative).ids)], }) cls.product = cls.env['product.product'].create({ 'name': 'product_mx', 'weight': 2, 'uom_po_id': cls.env.ref('uom.product_uom_kgm').id, 'uom_id': cls.env.ref('uom.product_uom_kgm').id, 'lst_price': 1000.0, 'property_account_income_id': cls.company_data['default_account_revenue'].id, 'property_account_expense_id': cls.company_data['default_account_expense'].id, 'unspsc_code_id': cls.env.ref('product_unspsc.unspsc_code_01010101').id, }) cls.payment_term = cls.env['account.payment.term'].create({ 'name': 'test mx_pac_edi', 'line_ids': [(0, 0, { 'value': 'balance', 'value_amount': 0.0, 'days': 90, 'option': 'day_after_invoice_date', })], }) cls.partner_a.write({ 'property_supplier_payment_term_id': cls.payment_term.id, 'country_id': cls.env.ref('base.us').id, 'state_id': cls.env.ref('base.state_us_23').id, 'zip': 39301, 'vat': '123456789', }) # ==== Records needing CFDI ==== cls.invoice = cls.env['account.move'].with_context( edi_test_mode=True).create({ 'move_type': 'out_invoice', 'partner_id': cls.partner_a.id, 'invoice_date': '2017-01-01', 'date': '2017-01-01', 'currency_id': cls.currency_data['currency'].id, 'invoice_incoterm_id': cls.env.ref('account.incoterm_FCA').id, 'invoice_line_ids': [(0, 0, { 'product_id': cls.product.id, 'price_unit': 2000.0, 'quantity': 5, 'discount': 20.0, 'tax_ids': [(6, 0, (cls.tax_16 + cls.tax_10_negative).ids)], })], }) cls.expected_invoice_cfdi_values = ''' <Comprobante Certificado="___ignore___" Fecha="2016-12-31T17:00:00" Folio="1" FormaPago="99" LugarExpedicion="85134" MetodoPago="PUE" Moneda="Gol" NoCertificado="''' + cls.certificate.serial_number + '''" Serie="INV/2017/01/" Sello="___ignore___" Descuento="2000.000" SubTotal="10000.000" Total="8480.000" TipoCambio="0.500000" TipoDeComprobante="I" Version="3.3"> <Emisor Rfc="EKU9003173C9" Nombre="company_1_data" RegimenFiscal="601"/> <Receptor Rfc="XEXX010101000" Nombre="partner_a" UsoCFDI="P01"/> <Conceptos> <Concepto Cantidad="5.000000" ClaveProdServ="01010101" Descripcion="product_mx" Importe="10000.000" Descuento="2000.000" ValorUnitario="2000.000"> <Impuestos> <Traslados> <Traslado Base="8000.000" Importe="1280.00" TasaOCuota="0.160000" TipoFactor="Tasa"/> </Traslados> <Retenciones> <Retencion Base="8000.000" Importe="800.00" TasaOCuota="0.100000" TipoFactor="Tasa"/> </Retenciones> </Impuestos> </Concepto> </Conceptos> <Impuestos TotalImpuestosRetenidos="800.000" TotalImpuestosTrasladados="1280.000"> <Retenciones> <Retencion Importe="800.000"/> </Retenciones> <Traslados> <Traslado Importe="1280.000" TasaOCuota="0.160000" TipoFactor="Tasa"/> </Traslados> </Impuestos> </Comprobante> ''' cls.payment = cls.env['account.payment'].with_context( edi_test_mode=True).create({ 'date': '2017-01-01', 'amount': cls.invoice.amount_total, 'payment_type': 'inbound', 'partner_type': 'customer', 'partner_id': cls.partner_a.id, 'currency_id': cls.currency_data['currency'].id, 'payment_method_id': cls.env.ref('account.account_payment_method_manual_out').id, 'journal_id': cls.company_data['default_journal_bank'].id, }) cls.statement = cls.env['account.bank.statement'].with_context( edi_test_mode=True).create({ 'name': 'test_statement', 'date': '2017-01-01', 'journal_id': cls.company_data['default_journal_bank'].id, 'line_ids': [ (0, 0, { 'payment_ref': 'mx_st_line', 'partner_id': cls.partner_a.id, 'foreign_currency_id': cls.currency_data['currency'].id, 'amount': cls.invoice.amount_total_signed, 'amount_currency': cls.invoice.amount_total, }), ], }) cls.statement_line = cls.statement.line_ids # payment done on 2017-01-01 00:00:00 UTC is expected to be signed on 2016-12-31 17:00:00 in Mexico tz cls.expected_payment_cfdi_values = ''' <Comprobante Certificado="___ignore___" Fecha="2016-12-31T17:00:00" Folio="1" LugarExpedicion="85134" Moneda="XXX" NoCertificado="''' + cls.certificate.serial_number + '''"
def trans_generate(lang, modules, cr): env = odoo.api.Environment(cr, SUPERUSER_ID, {}) to_translate = set() def push_translation(module, type, name, id, source, comments=None): # empty and one-letter terms are ignored, they probably are not meant to be # translated, and would be very hard to translate anyway. sanitized_term = (source or '').strip() try: # verify the minimal size without eventual xml tags # wrap to make sure html content like '<a>b</a><c>d</c>' is accepted by lxml wrapped = "<div>%s</div>" % sanitized_term node = etree.fromstring(wrapped) sanitized_term = etree.tostring(node, encoding='UTF-8', method='text') except etree.ParseError: pass # remove non-alphanumeric chars sanitized_term = re.sub(r'\W+', '', sanitized_term) if not sanitized_term or len(sanitized_term) <= 1: return tnx = (module, source, name, id, type, tuple(comments or ())) to_translate.add(tnx) query = 'SELECT name, model, res_id, module FROM ir_model_data' query_models = """SELECT m.id, m.model, imd.module FROM ir_model AS m, ir_model_data AS imd WHERE m.id = imd.res_id AND imd.model = 'ir.model'""" if 'all_installed' in modules: query += ' WHERE module IN ( SELECT name FROM ir_module_module WHERE state = \'installed\') ' query_models += " AND imd.module in ( SELECT name FROM ir_module_module WHERE state = 'installed') " if 'all' not in modules: query += ' WHERE module IN %s' query_models += ' AND imd.module IN %s' query_param = (tuple(modules),) else: query += ' WHERE module != %s' query_models += ' AND imd.module != %s' query_param = ('__export__',) query += ' ORDER BY module, model, name' query_models += ' ORDER BY module, model' cr.execute(query, query_param) for (xml_name, model, res_id, module) in cr.fetchall(): module = encode(module) model = encode(model) xml_name = "%s.%s" % (module, encode(xml_name)) if model not in env: _logger.error("Unable to find object %r", model) continue record = env[model].browse(res_id) if not record._translate: # explicitly disabled continue if not record.exists(): _logger.warning("Unable to find object %r with id %d", model, res_id) continue if model=='ir.model.fields': try: field_name = encode(record.name) except AttributeError, exc: _logger.error("name error in %s: %s", xml_name, str(exc)) continue field_model = env.get(record.model) if (field_model is None or not field_model._translate or field_name not in field_model._fields): continue field = field_model._fields[field_name] if isinstance(getattr(field, 'selection', None), (list, tuple)): name = "%s,%s" % (encode(record.model), field_name) for dummy, val in field.selection: push_translation(module, 'selection', name, 0, encode(val)) elif model=='ir.actions.report.xml': name = encode(record.report_name) fname = "" if record.report_rml: fname = record.report_rml parse_func = trans_parse_rml report_type = "report" elif record.report_xsl: continue if fname and record.report_type in ('pdf', 'xsl'): try: with file_open(fname) as report_file: d = etree.parse(report_file) for t in parse_func(d.iter()): push_translation(module, report_type, name, 0, t) except (IOError, etree.XMLSyntaxError): _logger.exception("couldn't export translation for report %s %s %s", name, report_type, fname)
def trans_generate(lang, modules, cr): env = odoo.api.Environment(cr, SUPERUSER_ID, {}) to_translate = set() def push_translation(module, type, name, id, source, comments=None): # empty and one-letter terms are ignored, they probably are not meant to be # translated, and would be very hard to translate anyway. sanitized_term = (source or '').strip() try: # verify the minimal size without eventual xml tags # wrap to make sure html content like '<a>b</a><c>d</c>' is accepted by lxml wrapped = "<div>%s</div>" % sanitized_term node = etree.fromstring(wrapped) sanitized_term = etree.tostring(node, encoding='UTF-8', method='text') except etree.ParseError: pass # remove non-alphanumeric chars sanitized_term = re.sub(r'\W+', '', sanitized_term) if not sanitized_term or len(sanitized_term) <= 1: return tnx = (module, source, name, id, type, tuple(comments or ())) to_translate.add(tnx) query = 'SELECT name, model, res_id, module FROM ir_model_data' query_models = """SELECT m.id, m.model, imd.module FROM ir_model AS m, ir_model_data AS imd WHERE m.id = imd.res_id AND imd.model = 'ir.model'""" if 'all_installed' in modules: query += ' WHERE module IN ( SELECT name FROM ir_module_module WHERE state = \'installed\') ' query_models += " AND imd.module in ( SELECT name FROM ir_module_module WHERE state = 'installed') " if 'all' not in modules: query += ' WHERE module IN %s' query_models += ' AND imd.module IN %s' query_param = (tuple(modules), ) else: query += ' WHERE module != %s' query_models += ' AND imd.module != %s' query_param = ('__export__', ) query += ' ORDER BY module, model, name' query_models += ' ORDER BY module, model' cr.execute(query, query_param) for (xml_name, model, res_id, module) in cr.fetchall(): module = encode(module) model = encode(model) xml_name = "%s.%s" % (module, encode(xml_name)) if model not in env: _logger.error("Unable to find object %r", model) continue record = env[model].browse(res_id) if not record._translate: # explicitly disabled continue if not record.exists(): _logger.warning("Unable to find object %r with id %d", model, res_id) continue if model == 'ir.model.fields': try: field_name = encode(record.name) except AttributeError, exc: _logger.error("name error in %s: %s", xml_name, str(exc)) continue field_model = env.get(record.model) if (field_model is None or not field_model._translate or field_name not in field_model._fields): continue field = field_model._fields[field_name] if isinstance(getattr(field, 'selection', None), (list, tuple)): name = "%s,%s" % (encode(record.model), field_name) for dummy, val in field.selection: push_translation(module, 'selection', name, 0, encode(val)) elif model == 'ir.actions.report.xml': name = encode(record.report_name) fname = "" if record.report_rml: fname = record.report_rml parse_func = trans_parse_rml report_type = "report" elif record.report_xsl: continue if fname and record.report_type in ('pdf', 'xsl'): try: with file_open(fname) as report_file: d = etree.parse(report_file) for t in parse_func(d.iter()): push_translation(module, report_type, name, 0, t) except (IOError, etree.XMLSyntaxError): _logger.exception( "couldn't export translation for report %s %s %s", name, report_type, fname)
def trans_generate(lang, modules, cr): env = odoo.api.Environment(cr, SUPERUSER_ID, {}) to_translate = set() def push_translation(module, type, name, id, source, comments=None): # empty and one-letter terms are ignored, they probably are not meant to be # translated, and would be very hard to translate anyway. sanitized_term = (source or '').strip() try: # verify the minimal size without eventual xml tags # wrap to make sure html content like '<a>b</a><c>d</c>' is accepted by lxml wrapped = "<div>%s</div>" % sanitized_term node = etree.fromstring(wrapped) sanitized_term = etree.tostring(node, encoding='UTF-8', method='text') except etree.ParseError: pass # remove non-alphanumeric chars sanitized_term = re.sub(r'\W+', '', sanitized_term) if not sanitized_term or len(sanitized_term) <= 1: return tnx = (module, source, name, id, type, tuple(comments or ())) to_translate.add(tnx) query = 'SELECT name, model, res_id, module FROM ir_model_data' query_models = """SELECT m.id, m.model, imd.module FROM ir_model AS m, ir_model_data AS imd WHERE m.id = imd.res_id AND imd.model = 'ir.model'""" if 'all_installed' in modules: query += ' WHERE module IN ( SELECT name FROM ir_module_module WHERE state = \'installed\') ' query_models += " AND imd.module in ( SELECT name FROM ir_module_module WHERE state = 'installed') " if 'all' not in modules: query += ' WHERE module IN %s' query_models += ' AND imd.module IN %s' query_param = (tuple(modules), ) else: query += ' WHERE module != %s' query_models += ' AND imd.module != %s' query_param = ('__export__', ) query += ' ORDER BY module, model, name' query_models += ' ORDER BY module, model' cr.execute(query, query_param) for (xml_name, model, res_id, module) in cr.fetchall(): module = encode(module) model = encode(model) xml_name = "%s.%s" % (module, encode(xml_name)) if model not in env: _logger.error("Unable to find object %r", model) continue record = env[model].browse(res_id) if not record._translate: # explicitly disabled continue if not record.exists(): _logger.warning("Unable to find object %r with id %d", model, res_id) continue if model == 'ir.model.fields': try: field_name = encode(record.name) except AttributeError as exc: _logger.error("name error in %s: %s", xml_name, str(exc)) continue field_model = env.get(record.model) if (field_model is None or not field_model._translate or field_name not in field_model._fields): continue field = field_model._fields[field_name] if isinstance(getattr(field, 'selection', None), (list, tuple)): name = "%s,%s" % (encode(record.model), field_name) for dummy, val in field.selection: push_translation(module, 'selection', name, 0, encode(val)) elif model == 'ir.actions.report.xml': name = encode(record.report_name) fname = "" if record.report_rml: fname = record.report_rml parse_func = trans_parse_rml report_type = "report" elif record.report_xsl: continue if fname and record.report_type in ('pdf', 'xsl'): try: with file_open(fname) as report_file: d = etree.parse(report_file) for t in parse_func(d.iter()): push_translation(module, report_type, name, 0, t) except (IOError, etree.XMLSyntaxError): _logger.exception( "couldn't export translation for report %s %s %s", name, report_type, fname) for field_name, field in record._fields.iteritems(): if field.translate: name = model + "," + field_name try: value = record[field_name] or '' except Exception: continue for term in set(field.get_trans_terms(value)): push_translation(module, 'model', name, xml_name, encode(term)) # End of data for ir.model.data query results def push_constraint_msg(module, term_type, model, msg): if not callable(msg): push_translation(encode(module), term_type, encode(model), 0, encode(msg)) def push_local_constraints(module, model, cons_type='sql_constraints'): """ Climb up the class hierarchy and ignore inherited constraints from other modules. """ term_type = 'sql_constraint' if cons_type == 'sql_constraints' else 'constraint' msg_pos = 2 if cons_type == 'sql_constraints' else 1 for cls in model.__class__.__mro__: if getattr(cls, '_module', None) != module: continue constraints = getattr(cls, '_local_' + cons_type, []) for constraint in constraints: push_constraint_msg(module, term_type, model._name, constraint[msg_pos]) cr.execute(query_models, query_param) for (_, model, module) in cr.fetchall(): if model not in env: _logger.error("Unable to find object %r", model) continue Model = env[model] if Model._constraints: push_local_constraints(module, Model, 'constraints') if Model._sql_constraints: push_local_constraints(module, Model, 'sql_constraints') installed_modules = [ m['name'] for m in env['ir.module.module'].search_read( [('state', '=', 'installed')], fields=['name']) ] path_list = [(path, True) for path in odoo.modules.module.ad_paths] # Also scan these non-addon paths for bin_path in ['osv', 'report', 'modules', 'service', 'tools']: path_list.append((os.path.join(config['root_path'], bin_path), True)) # non-recursive scan for individual files in root directory but without # scanning subdirectories that may contain addons path_list.append((config['root_path'], False)) _logger.debug("Scanning modules at paths: %s", path_list) def get_module_from_path(path): for (mp, rec) in path_list: mp = os.path.join(mp, '') if rec and path.startswith(mp) and os.path.dirname(path) != mp: path = path[len(mp):] return path.split(os.path.sep)[0] return 'base' # files that are not in a module are considered as being in 'base' module def verified_module_filepaths(fname, path, root): fabsolutepath = join(root, fname) frelativepath = fabsolutepath[len(path):] display_path = "addons%s" % frelativepath module = get_module_from_path(fabsolutepath) if ('all' in modules or module in modules) and module in installed_modules: if os.path.sep != '/': display_path = display_path.replace(os.path.sep, '/') return module, fabsolutepath, frelativepath, display_path return None, None, None, None def babel_extract_terms(fname, path, root, extract_method="python", trans_type='code', extra_comments=None, extract_keywords={'_': None}): module, fabsolutepath, _, display_path = verified_module_filepaths( fname, path, root) extra_comments = extra_comments or [] if not module: return src_file = open(fabsolutepath, 'r') try: for extracted in extract.extract(extract_method, src_file, keywords=extract_keywords): # Babel 0.9.6 yields lineno, message, comments # Babel 1.3 yields lineno, message, comments, context lineno, message, comments = extracted[:3] push_translation(module, trans_type, display_path, lineno, encode(message), comments + extra_comments) except Exception: _logger.exception("Failed to extract terms from %s", fabsolutepath) finally: src_file.close() for (path, recursive) in path_list: _logger.debug("Scanning files of modules at %s", path) for root, dummy, files in walksymlinks(path): for fname in fnmatch.filter(files, '*.py'): babel_extract_terms(fname, path, root) # mako provides a babel extractor: http://docs.makotemplates.org/en/latest/usage.html#babel for fname in fnmatch.filter(files, '*.mako'): babel_extract_terms(fname, path, root, 'mako', trans_type='report') # Javascript source files in the static/src/js directory, rest is ignored (libs) if fnmatch.fnmatch(root, '*/static/src/js*'): for fname in fnmatch.filter(files, '*.js'): babel_extract_terms( fname, path, root, 'javascript', extra_comments=[WEB_TRANSLATION_COMMENT], extract_keywords={ '_t': None, '_lt': None }) # QWeb template files if fnmatch.fnmatch(root, '*/static/src/xml*'): for fname in fnmatch.filter(files, '*.xml'): babel_extract_terms( fname, path, root, 'odoo.tools.translate:babel_extract_qweb', extra_comments=[WEB_TRANSLATION_COMMENT]) if not recursive: # due to topdown, first iteration is in first level break out = [] # translate strings marked as to be translated Translation = env['ir.translation'] for module, source, name, id, type, comments in sorted(to_translate): trans = Translation._get_source(name, type, lang, source) if lang else "" out.append((module, type, name, id, source, encode(trans) or '', comments)) return out
def _get_logo_impl(self): ''' 默认取 core/static/description 下的 logo.png 作为 logo''' return open(misc.file_open('core/static/description/logo.png').name, 'rb') .read().encode('base64')
def trans_load_data(cr, fileobj, fileformat, lang, lang_name=None, verbose=True, module_name=None, context=None): """Populates the ir_translation table.""" if verbose: _logger.info('loading translation file for language %s', lang) env = odoo.api.Environment(cr, SUPERUSER_ID, context or {}) Lang = env['res.lang'] Translation = env['ir.translation'] try: if not Lang.search_count([('code', '=', lang)]): # lets create the language with locale information Lang.load_lang(lang=lang, lang_name=lang_name) # Parse also the POT: it will possibly provide additional targets. # (Because the POT comments are correct on Launchpad but not the # PO comments due to a Launchpad limitation. See LP bug 933496.) pot_reader = [] # now, the serious things: we read the language file fileobj.seek(0) if fileformat == 'csv': reader = csv.reader(fileobj, quotechar='"', delimiter=',') # read the first line of the file (it contains columns titles) for row in reader: fields = row break elif fileformat == 'po': reader = PoFile(fileobj) fields = ['type', 'name', 'res_id', 'src', 'value', 'comments'] # Make a reader for the POT file and be somewhat defensive for the # stable branch. if fileobj.name.endswith('.po'): try: # Normally the path looks like /path/to/xxx/i18n/lang.po # and we try to find the corresponding # /path/to/xxx/i18n/xxx.pot file. # (Sometimes we have 'i18n_extra' instead of just 'i18n') addons_module_i18n, _ignored = os.path.split(fileobj.name) addons_module, i18n_dir = os.path.split(addons_module_i18n) addons, module = os.path.split(addons_module) pot_handle = file_open( os.path.join(addons, module, i18n_dir, module + '.pot')) pot_reader = PoFile(pot_handle) except: pass else: _logger.info('Bad file format: %s', fileformat) raise Exception(_('Bad file format: %s') % fileformat) # Read the POT references, and keep them indexed by source string. class Target(object): def __init__(self): self.value = None self.targets = set() # set of (type, name, res_id) self.comments = None pot_targets = defaultdict(Target) for type, name, res_id, src, _ignored, comments in pot_reader: if type is not None: target = pot_targets[src] target.targets.add((type, name, res_id)) target.comments = comments # read the rest of the file irt_cursor = Translation._get_import_cursor() def process_row(row): """Process a single PO (or POT) entry.""" # dictionary which holds values for this line of the csv file # {'lang': ..., 'type': ..., 'name': ..., 'res_id': ..., # 'src': ..., 'value': ..., 'module':...} dic = dict.fromkeys( ('type', 'name', 'res_id', 'src', 'value', 'comments', 'imd_model', 'imd_name', 'module')) dic['lang'] = lang dic.update(zip(fields, row)) # discard the target from the POT targets. src = dic['src'] if src in pot_targets: target = pot_targets[src] target.value = dic['value'] target.targets.discard( (dic['type'], dic['name'], dic['res_id'])) # This would skip terms that fail to specify a res_id res_id = dic['res_id'] if not res_id: return if isinstance(res_id, (int, long)) or \ (isinstance(res_id, basestring) and res_id.isdigit()): dic['res_id'] = int(res_id) if module_name: dic['module'] = module_name else: # res_id is an xml id dic['res_id'] = None dic['imd_model'] = dic['name'].split(',')[0] if '.' in res_id: dic['module'], dic['imd_name'] = res_id.split('.', 1) else: dic['module'], dic['imd_name'] = module_name, res_id irt_cursor.push(dic) # First process the entries from the PO file (doing so also fills/removes # the entries from the POT file). for row in reader: process_row(row) # Then process the entries implied by the POT file (which is more # correct w.r.t. the targets) if some of them remain. pot_rows = [] for src, target in pot_targets.iteritems(): if target.value: for type, name, res_id in target.targets: pot_rows.append((type, name, res_id, src, target.value, target.comments)) pot_targets.clear() for row in pot_rows: process_row(row) irt_cursor.finish() Translation.clear_caches() if verbose: _logger.info("translation file loaded succesfully") except IOError: iso_lang = get_iso_codes(lang) filename = '[lang: %s][format: %s]' % (iso_lang or 'new', fileformat) _logger.exception("couldn't read translation file %s", filename)
def create_source_docx_partner(self, cr, uid, ids, report, records, init_pay, context=None): # 2016-11-2 支持了图片 # 1.导入依赖,python3语法 # from . import report_helper # 2. 需要添加一个"tpl"属性获得模版对象 tempname = tempfile.mkdtemp() temp_out_file = self.generate_temp_file(tempname) doc = DocxTemplate(misc.file_open(report.template_file).name) env = api.Environment(cr, uid, context) partner = env.get('partner').search([('id', '=', context.get('partner_id'))]) simple_dict = { 'partner_name': partner.name, 'from_date': context.get('from_date'), 'to_date': context.get('to_date'), 'report_line': [], 'init_pay': {}, 'final_pay': {} } if not records: if init_pay: simple_dict['init_pay'] = init_pay simple_dict['final_pay'] = init_pay doc.render({ 'obj': simple_dict, 'tpl': doc }, report_helper.get_env()) doc.save(temp_out_file) report_stream = '' with open(temp_out_file, 'rb') as input_stream: report_stream = input_stream.read() os.remove(temp_out_file) return report_stream, report.output_type data = DataModelProxy(records) for p_value in data: simple_dict['report_line'].append({ 'date': p_value.date, 'name': p_value.name, 'note': p_value.note, 'amount': p_value.amount, 'pay_amount': p_value.pay_amount, 'discount_money': p_value.discount_money, 'balance_amount': p_value.balance_amount }) if data: simple_dict['init_pay'] = data[0].balance_amount - data[ 0].amount + data[0].pay_amount - data[0].discount_money simple_dict['final_pay'] = data[-1].balance_amount doc.render({'obj': simple_dict, 'tpl': doc}, report_helper.get_env()) doc.save(temp_out_file) if report.output_type == 'pdf': temp_file = self.render_to_pdf(temp_out_file) else: temp_file = temp_out_file report_stream = '' with open(temp_file, 'rb') as input_stream: report_stream = input_stream.read() os.remove(temp_file) return report_stream, report.output_type
def from_data_excel(self, fields, rows_file_address): rows, file_address = rows_file_address if file_address: bk = xlrd.open_workbook(misc.file_open(file_address).name, formatting_info=True) workbook = copy(bk) worksheet = workbook.get_sheet(0) for i, fieldname in enumerate(fields): self.setOutCell(worksheet, 0, i, fieldname) for row, row_vals in enumerate(rows): for col, col_value in enumerate(row_vals): if isinstance(col_value, basestring): col_value = re.sub("\r", " ", col_value) self.setOutCell(worksheet, col, row + 1, col_value) else: workbook = xlwt.Workbook() worksheet = workbook.add_sheet('Sheet 1') style, colour_style, base_style, float_style, date_style, datetime_style = self.style_data( ) worksheet.write_merge(0, 0, 0, len(fields) - 1, fields[0], style=style) worksheet.row(0).height = 400 worksheet.row(2).height = 400 columnwidth = {} for row_index, row in enumerate(rows): for cell_index, cell_value in enumerate(row): if cell_index in columnwidth: if len("%s" % (cell_value)) > columnwidth.get(cell_index): columnwidth.update( {cell_index: len("%s" % (cell_value))}) else: columnwidth.update( {cell_index: len("%s" % (cell_value))}) if row_index == 0: cell_style = colour_style elif row_index != len(rows) - 1: cell_style = base_style if isinstance(cell_value, basestring): cell_value = re.sub("\r", " ", cell_value) elif isinstance(cell_value, datetime.datetime): cell_style = datetime_style elif isinstance(cell_value, datetime.date): cell_style = date_style elif isinstance(cell_value, float) or isinstance( cell_value, int): cell_style = float_style else: cell_style = xlwt.easyxf() worksheet.write(row_index + 1, cell_index, cell_value, cell_style) for column, widthvalue in columnwidth.items(): """参考 下面链接关于自动列宽(探讨)的代码 http://stackoverflow.com/questions/6929115/python-xlwt-accessing-existing-cell-content-auto-adjust-column-width""" if (widthvalue + 3) * 367 >= 65536: widthvalue = 50 worksheet.col(column).width = (widthvalue + 4) * 367 # frozen headings instead of split panes worksheet.set_panes_frozen(True) # in general, freeze after last heading row worksheet.set_horz_split_pos(3) # if user does unfreeze, don't leave a split there worksheet.set_remove_splits(True) fp_currency = StringIO.StringIO() workbook.save(fp_currency) fp_currency.seek(0) data = fp_currency.read() fp_currency.close() return data
def open_attachment_base64(self, name_file): xml_file = misc.file_open( os.path.join('l10n_mx_edi_vendor_bills', 'tests', name_file)) return base64.b64encode(bytes(xml_file.read(), 'utf-8'))
def test_payment_third_parties(self): """Create invoice for third + Odoo invoice, and are both paid""" # Invoice from third xml_str = misc.file_open( os.path.join('l10n_mx_edi_payment_third_parties', 'tests', 'bill.xml')).read().encode('UTF-8') res = self.env['attach.xmls.wizard'].with_context( l10n_mx_edi_invoice_type='out', l10n_mx_edi_cfdi_third=True).check_xml( {'bill.xml': base64.b64encode(xml_str).decode('UTF-8')}) invoices = res.get('invoices', {}) inv_id = invoices.get('bill.xml', {}).get('invoice_id', False) self.assertTrue(inv_id, "Error: Invoice creation") # Odoo invoice invoice = self.create_invoice() invoice.write({ 'number': 'INV/2018/1000', 'payment_term_id': self.payment_term.id, 'currency_id': self.mxn.id, }) invoice.invoice_line_ids.write({ 'quantity': 1.0, 'price_unit': 150000.00, 'invoice_line_tax_ids': [(6, 0, [])] }) invoice.compute_taxes() invoice.action_invoice_open() self.assertEqual(invoice.l10n_mx_edi_pac_status, "signed", invoice.message_ids.mapped('body')) payment = self.payment_obj.create({ 'name': 'CUST.IN/2018/999', 'currency_id': self.mxn.id, 'payment_type': 'inbound', 'partner_type': 'customer', 'partner_id': invoice.partner_id.id, 'payment_date': invoice.date, 'l10n_mx_edi_payment_method_id': self.payment_method_cash.id, 'payment_method_id': self.env.ref('account.account_payment_method_manual_in').id, 'journal_id': self.bank_journal.id, 'communication': invoice.number, 'amount': 225000.00, 'payment_difference_handling': 'reconcile', 'writeoff_account_id': self.account.id, 'invoice_ids': [(6, 0, invoice.ids + [inv_id])], }) payment.post() xml = payment.l10n_mx_edi_get_xml_etree() namespaces = {'pago10': 'http://www.sat.gob.mx/Pagos'} comp = xml.Complemento.xpath('//pago10:Pagos', namespaces=namespaces) self.assertTrue(comp[0], 'Complement to Pagos not added correctly') xml_expected = objectify.fromstring(self.xml_expected_str) self.xml_merge_dynamic_items(xml, xml_expected) xml_expected.attrib['Folio'] = xml.attrib['Folio'] self.assertEqualXML(xml, xml_expected)
def _get_logo(self): return open( misc.file_open('core/static/description/logo.png').name, 'rb').read().encode('base64')
def __enter__(self): pathname = os.path.join(Defines.MODULE_NAME, 'data/default_post_install.xml') self.fp = file_open(pathname) self.tree = ETree.fromstring(self.fp.read()) return self
def test_l10n_mx_edi_invoice_external_trade(self): self.xml_expected_str = misc.file_open(os.path.join( 'l10n_mx_edi', 'tests', 'expected_cfdi_external_trade_33.xml')).read().encode('UTF-8') self.xml_expected = objectify.fromstring(self.xml_expected_str) self.company.partner_id.write({ 'l10n_mx_edi_locality_id': self.env.ref( 'l10n_mx_edi.res_locality_mx_son_04').id, 'city_id': self.env.ref('l10n_mx_edi.res_city_mx_son_018').id, 'state_id': self.env.ref('base.state_mx_son').id, 'l10n_mx_edi_colony_code': '2883', 'zip': 85136, }) self.partner_agrolait.commercial_partner_id.write({ 'country_id': self.env.ref('base.us').id, 'state_id': self.env.ref('base.state_us_23').id, 'zip': 39301, }) self.partner_agrolait.write({ 'country_id': self.env.ref('base.us').id, 'state_id': self.env.ref('base.state_us_23').id, 'l10n_mx_edi_external_trade': True, 'zip': 39301, 'vat': '123456789', }) self.company._load_xsd_attachments() # ----------------------- # Testing sign process with External Trade # ----------------------- invoice = self.create_invoice() invoice.incoterm_id = self.incoterm invoice.post() self.assertEqual(invoice.l10n_mx_edi_pac_status, "signed", invoice.message_ids.mapped('body')) xml = objectify.fromstring(base64.b64decode(invoice.l10n_mx_edi_cfdi)) self.assertTrue(xml.Complemento.xpath( 'cce11:ComercioExterior', namespaces=self.namespaces), "The node '<cce11:ComercioExterior> should be present") xml_cce = xml.Complemento.xpath( 'cce11:ComercioExterior', namespaces=self.namespaces)[0] xml_cce_expected = self.xml_expected.Complemento.xpath( 'cce11:ComercioExterior', namespaces=self.namespaces)[0] self.assertEqualXML(xml_cce, xml_cce_expected) # ------------------------- # Testing case UMT Aduana, l10n_mx_edi_code_aduana == 1 # ------------------------- kg = self.env.ref('uom.product_uom_kgm') kg.l10n_mx_edi_code_aduana = '01' self.product.write({ 'weight': 2, 'l10n_mx_edi_umt_aduana_id': kg.id, 'l10n_mx_edi_tariff_fraction_id': self.ref( 'l10n_mx_edi_external_trade.tariff_fraction_72123099'), }) invoice = self.create_invoice() invoice.incoterm_id = self.incoterm invoice.post() line = invoice.invoice_line_ids self.assertEqual(line.l10n_mx_edi_qty_umt, line.product_id.weight * line.quantity, 'Qty UMT != weight * quantity') self.assertEqual(invoice.l10n_mx_edi_pac_status, "signed", invoice.message_ids.mapped('body')) # ------------------------ # Testing case UMT Aduana, UMT Custom != Kg and UMT Custom != uos_id # ------------------------ kg.l10n_mx_edi_code_aduana = '08' self.product.write({ 'l10n_mx_edi_tariff_fraction_id': self.ref( 'l10n_mx_edi_external_trade.tariff_fraction_27101299'), }) invoice = self.create_invoice() invoice.incoterm_id = self.incoterm # Manually add the value Qty UMT line = invoice.invoice_line_ids self.assertEqual(line.l10n_mx_edi_qty_umt, 0, 'Qty umt must be manually assigned') invoice.invoice_line_ids.l10n_mx_edi_qty_umt = 2 invoice.post() self.assertEqual(invoice.l10n_mx_edi_pac_status, "signed", invoice.message_ids.mapped('body'))
import base64 from datetime import datetime from odoo import _ from odoo.http import request, route, Controller from odoo.tools.misc import file_open SPONSOR_HEADER = base64.b64encode(file_open( "crowdfunding_compassion/static/src/img/sponsor_children_banner.jpg", "rb" ).read()) SPONSOR_ICON = base64.b64encode(file_open( "crowdfunding_compassion/static/src/img/icn_children.png", "rb").read()) def sponsorship_card_content(): return {"type": "sponsorship", "value": 0, "name": _("Sponsor children"), "text": _("sponsored child"), "description": _(""" For 42 francs a month, you're opening the way out of poverty for a child. Sponsorship ensures that the child is known, loved and protected. In particular, it gives the child access to schooling, tutoring, regular balanced meals, medical care and training in the spiritual field, hygiene, etc. Every week, the child participates in the activities of one of the project center of the 8,000 local churches that are partners of Compassion. They allow him or her to discover and develop his or her talents."""), "icon_image": SPONSOR_ICON, "header_image": SPONSOR_HEADER }