def create_rml(self, cr, xml, uid, context=None): if self.tmpl == "" and not self.internal_header: self.internal_header = True if not context: context = {} pool = pooler.get_pool(cr.dbname) ir_translation_obj = pool.get("ir.translation") # In some case we might not use xsl ... if not self.xsl: return xml stylesheet_file = tools.file_open(self.xsl) try: stylesheet = etree.parse(stylesheet_file) xsl_path, _ = os.path.split(self.xsl) for import_child in stylesheet.findall("./import"): if "href" in import_child.attrib: imp_file = import_child.get("href") _, imp_file = tools.file_open(imp_file, subdir=xsl_path, pathinfo=True) import_child.set("href", urllib.quote(str(imp_file))) imp_file.close() finally: stylesheet_file.close() # TODO: get all the translation in one query. That means we have to: # * build a list of items to translate, # * issue the query to translate them, # * (re)build/update the stylesheet with the translated items def translate(doc, lang): translate_aux(doc, lang, False) def translate_aux(doc, lang, t): for node in doc: t = t or node.get("t") if t: text = None tail = None if node.text: text = node.text.strip().replace("\n", " ") if node.tail: tail = node.tail.strip().replace("\n", " ") if text: translation1 = ir_translation_obj._get_source(cr, uid, self.name2, "xsl", lang, text) if translation1: node.text = node.text.replace(text, translation1) if tail: translation2 = ir_translation_obj._get_source(cr, uid, self.name2, "xsl", lang, tail) if translation2: node.tail = node.tail.replace(tail, translation2) translate_aux(node, lang, t) if context.get("lang", False): translate(stylesheet.iter(), context["lang"]) transform = etree.XSLT(stylesheet) xml = etree.tostring(transform(etree.fromstring(xml))) return xml
def create_rml(self, cr, xml, uid, context=None): if self.tmpl=='' and not self.internal_header: self.internal_header=True env = openerp.api.Environment(cr, uid, context or {}) Translation = env['ir.translation'] # In some case we might not use xsl ... if not self.xsl: return xml stylesheet_file = tools.file_open(self.xsl) try: stylesheet = etree.parse(stylesheet_file) xsl_path, _ = os.path.split(self.xsl) for import_child in stylesheet.findall('./import'): if 'href' in import_child.attrib: imp_file = import_child.get('href') _, imp_file = tools.file_open(imp_file, subdir=xsl_path, pathinfo=True) import_child.set('href', urllib.quote(str(imp_file))) imp_file.close() finally: stylesheet_file.close() #TODO: get all the translation in one query. That means we have to: # * build a list of items to translate, # * issue the query to translate them, # * (re)build/update the stylesheet with the translated items def translate(doc, lang): translate_aux(doc, lang, False) def translate_aux(doc, lang, t): for node in doc: t = t or node.get("t") if t: text = None tail = None if node.text: text = node.text.strip().replace('\n',' ') if node.tail: tail = node.tail.strip().replace('\n',' ') if text: text1 = Translation._get_source(self.name2, 'xsl', lang, text) if text1: node.text = node.text.replace(text, text1) if tail: tail1 = Translation._get_source(self.name2, 'xsl', lang, tail) if tail1: node.tail = node.tail.replace(tail, tail1) translate_aux(node, lang, t) if env.lang: translate(stylesheet.iter(), env.lang) transform = etree.XSLT(stylesheet) xml = etree.tostring( transform(etree.fromstring(xml))) return xml
def load_information_from_description_file(module, mod_path=None): """ :param module: The name of the module (sale, purchase, ...) :param mod_path: Physical path of module, if not providedThe name of the module (sale, purchase, ...) """ if not mod_path: mod_path = get_module_path(module) terp_file = mod_path and opj(mod_path, MANIFEST) or False if terp_file: info = {} if os.path.isfile(terp_file): # default values for descriptor info = { 'application': False, 'author': 'Odoo SA', 'auto_install': False, 'category': 'Uncategorized', 'depends': [], 'description': '', 'icon': get_module_icon(module), 'installable': True, 'license': 'LGPL-3', 'post_load': None, 'version': '1.0', 'web': False, 'website': 'http://www.odoo.com', 'sequence': 100, 'summary': '', } info.update(itertools.izip( 'depends data demo test init_xml update_xml demo_xml'.split(), iter(list, None))) f = tools.file_open(terp_file) try: info.update(eval(f.read())) finally: f.close() if not info.get('description'): readme_path = [opj(mod_path, x) for x in README if os.path.isfile(opj(mod_path, x))] if readme_path: readme_text = tools.file_open(readme_path[0]).read() info['description'] = readme_text if 'active' in info: # 'active' has been renamed 'auto_install' info['auto_install'] = info['active'] info['version'] = adapt_version(info['version']) return info #TODO: refactor the logger in this file to follow the logging guidelines # for 6.0 _logger.debug('module %s: no %s file found.', module, MANIFEST) return {}
def xml_import_report(self, cr, uid, ids, context=None): """ To get the module and extract the xml report definition @param self: The object pointer. @param cr: A database cursor @param uid: ID of the user currently logged in @param context: A standard dictionary @return : retrun report """ if context is None: context = {} list_report = [] values = self.read(cr, uid, ids, context=context)[0] module_id = values['module_id'][0] if module_id: module = self.pool.get('ir.module.module').browse(cr,uid, module_id) pathname = CD_ODOO_ADDONS + module.name + "/Report_def/" print 'PathName : ',pathname list_report = listdir(pathname) print 'liste des rapports à importer ', list_report for filename in list_report: open_file = pathname + filename fp = tools.file_open(open_file) obj_xml_odoo = odoo_xml(fp) my_models = obj_xml_odoo.get_xml_all_models() print my_models # Completion of external models new_list_models = list(my_models) for name_model in new_list_models: print 'name model ', name_model my_models = self.external_models(cr,uid,name_model,my_models) print 'export des Id', my_models pool_ir_data = self.pool.get('ir.model.data') for rec_model in my_models: print 'model exporte ',rec_model pool_ir_data.export_external_ids(cr,uid,rec_model,module_id) fp.close() print 'conversion des fichiers XML' for filename in list_report: open_file = pathname + filename print ' open_file ',open_file fp = tools.file_open(open_file) try: print 'XML to report',module.name tools.convert_xml_import(cr, module.name, fp, None, 'init', True, None) finally: fp.close()
def load_information_from_description_file(module, mod_path=None): """ :param module: The name of the module (sale, purchase, ...) :param mod_path: Physical path of module, if not providedThe name of the module (sale, purchase, ...) """ if not mod_path: mod_path = get_module_path(module) terp_file = mod_path and opj(mod_path, MANIFEST) or False if terp_file: info = {} if os.path.isfile(terp_file): # default values for descriptor info = { "application": False, "author": "Odoo S.A.", "auto_install": False, "category": "Uncategorized", "depends": [], "description": "", "icon": get_module_icon(module), "installable": True, "license": "LGPL-3", "post_load": None, "version": "1.0", "web": False, "website": "https://www.odoo.com", "sequence": 100, "summary": "", } info.update(itertools.izip("depends data demo test init_xml update_xml demo_xml".split(), iter(list, None))) f = tools.file_open(terp_file) try: info.update(eval(f.read())) finally: f.close() if not info.get("description"): readme_path = [opj(mod_path, x) for x in README if os.path.isfile(opj(mod_path, x))] if readme_path: readme_text = tools.file_open(readme_path[0]).read() info["description"] = readme_text if "active" in info: # 'active' has been renamed 'auto_install' info["auto_install"] = info["active"] info["version"] = adapt_version(info["version"]) return info # TODO: refactor the logger in this file to follow the logging guidelines # for 6.0 _logger.debug("module %s: no %s file found.", module, MANIFEST) return {}
def test_purchase(self, cr, uid, ids, context=None): if context == None: context = {} assertion_obj = assertion_report.assertion_report() this = self.browse(cr, uid, ids)[0] fp_data = tools.file_open(os.path.join( 'purchase_test_data_imp', 'test/purchase_order_test_data.xml')) fp_test = tools.file_open(os.path.join( 'purchase_test_data_imp', 'test/purchase_order_product_can_be_purchased.yml')) try: cr.execute("SAVEPOINT test_yaml_purchase_savepoint") context.update({'uid': uid}) tools.convert_xml_import(cr, 'purchase_test_data_imp', fp_data, { }, 'init', False, assertion_obj) tools.convert_yaml_import(cr, 'purchase_test_data_imp', fp_test, 'test', { }, 'init', False, assertion_obj, context=context) finally: if this.test_commit: cr.execute("RELEASE SAVEPOINT test_yaml_purchase_savepoint") else: cr.execute("ROLLBACK TO test_yaml_purchase_savepoint") fp_data.close() fp_test.close() tmp_path = tempfile.gettempdir() file_purchase_order_wrong = base64.encodestring( open(os.path.join(tmp_path, 'purchase_order_product_log.csv'), 'rb+').read()) file_purchase_order_log = base64.encodestring( open(os.path.join(tmp_path, 'purchase_order_general_log.csv'), 'rb+').read()) self.write(cr, uid, ids, { 'yaml_file': file_purchase_order_wrong, 'yaml_file_log': file_purchase_order_log, 'filename_product': 'purchase_order_product_log.csv', 'filename_log_general': 'purchase_order_general_log.csv', }, context=context) __, xml_id = self.pool.get('ir.model.data').get_object_reference( cr, uid, 'purchase_test_data_imp', 'view_wizard_purchase_test_data_result') return { 'res_model': 'test.yaml.data.purchase', 'view_type': 'form', 'view_mode': 'form', 'view_id': xml_id, 'res_id': this.id, 'context': context, 'type': 'ir.actions.act_window', 'target': 'new', }
def create_zipcodes(self, cr, uid, context=None): """Import spanish zipcodes information through an XML file.""" file_name = 'l10n_es_toponyms_zipcodes.xml' try: fp = tools.file_open(os.path.join('l10n_es_toponyms', os.path.join('wizard', file_name))) except IOError, e: fp = None
def _get_desc(self, cr, uid, ids, field_name=None, arg=None, context=None): res = dict.fromkeys(ids, "") for module in self.browse(cr, uid, ids, context=context): path = get_module_resource(module.name, "static/description/index.html") if path: with tools.file_open(path, "rb") as desc_file: doc = desc_file.read() html = lxml.html.document_fromstring(doc) for element, attribute, link, pos in html.iterlinks(): if ( element.get("src") and not "//" in element.get("src") and not "static/" in element.get("src") ): element.set("src", "/%s/static/description/%s" % (module.name, element.get("src"))) res[module.id] = html_sanitize(lxml.html.tostring(html)) else: overrides = { "embed_stylesheet": False, "doctitle_xform": False, "output_encoding": "unicode", "xml_declaration": False, } output = publish_string( source=module.description or "", settings_overrides=overrides, writer=MyWriter() ) res[module.id] = html_sanitize(output) return res
def _load_data(cr, module_name, idref, mode, kind): """ kind: data, demo, test, init_xml, update_xml, demo_xml. noupdate is False, unless it is demo data or it is csv data in init mode. """ for filename in package.data[kind]: _logger.info("module %s: loading %s", module_name, filename) _, ext = os.path.splitext(filename) pathname = os.path.join(module_name, filename) fp = tools.file_open(pathname) noupdate = False if kind in ('demo', 'demo_xml'): noupdate = True try: ext = ext.lower() if ext == '.csv': if kind in ('init', 'init_xml'): noupdate = True tools.convert_csv_import(cr, module_name, pathname, fp.read(), idref, mode, noupdate) elif ext == '.sql': process_sql_file(cr, fp) elif ext == '.yml': tools.convert_yaml_import(cr, module_name, fp, kind, idref, mode, noupdate, report) elif ext == '.xml': tools.convert_xml_import(cr, module_name, fp, idref, mode, noupdate, report) elif ext == '.js': pass # .js files are valid but ignored here. else: _logger.warning("Can't load unknown file type %s.", filename) finally: fp.close()
def _load_data(cr, module_name, idref, mode, kind): """ kind: data, demo, test, init_xml, update_xml, demo_xml. noupdate is False, unless it is demo data or it is csv data in init mode. """ for filename in package.data[kind]: log = logging.getLogger('init') log.info("module %s: loading %s", module_name, filename) _, ext = os.path.splitext(filename) pathname = os.path.join(module_name, filename) fp = tools.file_open(pathname) noupdate = False if kind in ('demo', 'demo_xml'): noupdate = True try: if ext == '.csv': if kind in ('init', 'init_xml'): noupdate = True tools.convert_csv_import(cr, module_name, pathname, fp.read(), idref, mode, noupdate) elif ext == '.sql': process_sql_file(cr, fp) elif ext == '.yml': tools.convert_yaml_import(cr, module_name, fp, idref, mode, noupdate) else: tools.convert_xml_import(cr, module_name, fp, idref, mode, noupdate, report) finally: fp.close()
def _report_content(recs): res = {} aeroo_ids = recs.search([('report_type','=','aeroo'),('id','in',recs.ids)]) orig_ids = list(set(recs.ids).difference(aeroo_ids.ids)) name = 'report_sxw_content' ancestor = recs.pool.get('ir.actions.report.xml') #TODO v8 how to call original function, where to get 'name' param? #res = orig_ids and super(report_xml, recs)._report_content({name=name) or {} for report in aeroo_ids: data = report[name + '_data'] if report.report_type == 'aeroo' and report.tml_source == 'file' or not data and report.report_sxw: fp = None try: #TODO: Probably there's a need to check if path to the report template actually present (???) fp = tools.file_open(report[name[:-8]], mode='rb') data = report.report_type == 'aeroo' and base64.encodestring(fp.read()) or fp.read() except IOError, e: if e.errno == 13: # Permission denied on the template file raise osv.except_osv(_(e.strerror), e.filename) else: logger.error("Error in '_report_content' method", exc_info=True) except Exception, e: logger.error("Error in '_report_content' method", exc_info=True) fp = False data = False finally:
def _get_icon(self, cr, uid, ids, name, value, args, context=None): if context is None: context = {} res = {} src = 'tg_box/static/src/img/fileext/' for obj in self.browse(cr, uid, ids, context=context): if obj.datas_fname: ext = os.path.splitext(obj.datas_fname)[1][1:] ext = ext.lower() icon_filename = "%s.png" % (ext) else: icon_filename = "web.png" path = addons.get_module_resource('tg_box', 'static', 'src', 'img', 'fileext', icon_filename) if path: icon_path = "%s%s" % (src, icon_filename) else: icon_path = "%s_blank.png" % (src) image_file = tools.file_open(icon_path, 'rb') try: res[obj.id] = image_file.read().encode('base64') finally: image_file.close() return res
def convert_xml_import(cr, module, xmlfile, idref=None, mode='init', noupdate=False, report=None): ''' Copy of openerp.tools.convert_xml_import save for the xml transformation part ''' doc = etree.parse(xmlfile) #local change relaxng = etree.parse(os.path.join(config['root_path'],'import_xml.rng' )) for override in overrides: _logger.debug('applying override %s' % str(overrides)) transformation = etree.XSLT(etree.parse(tools.file_open(override))) relaxng = transformation(relaxng) _logger.debug('succeeded') try: relaxng = etree.RelaxNG(relaxng) #/local change relaxng.assert_(doc) except Exception: _logger.error('The XML file does not fit the required schema !') _logger.error(misc.ustr(relaxng.error_log.last_error)) raise if idref is None: idref={} obj = xml_import(cr, module, idref, mode, report=report, noupdate=noupdate) obj.parse(doc.getroot()) return True
def _report_content(self, cursor, user, ids, name, arg, context=None): res = {} aeroo_ids = self.search(cursor, 1, [('report_type','=','aeroo'),('id','in',ids)], context=context) orig_ids = list(set(ids).difference(aeroo_ids)) res = orig_ids and super(report_xml, self)._report_content(cursor, 1, orig_ids, name, arg, context) or {} for report in self.read(cursor, 1, aeroo_ids, ['tml_source','report_type','report_sxw_content_data', 'report_sxw','report_rml','report_file'], context=context): data = report[name + '_data'] #logger.error(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>1..."+str(report), exc_info=True) if report['report_type']=='aeroo' and report['tml_source']=='file' or not data and report[name[:-8]]: #logger.error(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>2..."+(data and str(len(data)) or ''), exc_info=True) fp = None try: #TODO: Probably there's a need to check if path to the report template actually present (???) fp = tools.file_open(report[name[:-8]], mode='rb') data = report['report_type']=='aeroo' and base64.encodestring(fp.read()) or fp.read() except IOError, e: if e.errno==13: # Permission denied on the template file raise osv.except_osv(_(e.strerror), e.filename) else: logger.error("Error in '_report_content' method", exc_info=True) except Exception, e: logger.error("Error in '_report_content' method", exc_info=True) fp = False data = False finally:
def import_local(self): res = super(L10nEsPartnerImportWizard, self).execute() path = os.path.join('l10n_es_partner', 'wizard', 'data_banks.xml') with tools.file_open(path) as fp: tools.convert_xml_import( self._cr, 'l10n_es_partner', fp, {}, 'init', noupdate=False) return res
def upload_report(self, cr, uid, ids, context=None): from base_report_designer import openerp_sxw2rml import StringIO data=self.read(cr,uid,ids)[0] sxwval = StringIO.StringIO(base64.decodestring(data['file_sxw_upload'])) fp = tools.file_open('normalized_oo2rml.xsl',subdir='addons/base_report_designer/openerp_sxw2rml') newrmlcontent = str(openerp_sxw2rml.sxw2rml(sxwval, xsl=fp.read())) report = self.pool['ir.actions.report.xml'].write(cr, uid, [data['report_id']], { 'report_sxw_content': base64.decodestring(data['file_sxw_upload']), 'report_rml_content': newrmlcontent }) cr.commit() data_obj = self.pool['ir.model.data'] id2 = data_obj._get_id(cr, uid, 'base_report_designer', 'view_base_report_file_rml') report = self.pool['ir.actions.report.xml'].browse(cr, uid, data['report_id'], context=context) if id2: id2 = data_obj.browse(cr, uid, id2, context=context).res_id return { 'view_type': 'form', 'view_mode': 'form', 'res_model': 'base.report.rml.save', 'views': [(id2, 'form')], 'view_id': False, 'type': 'ir.actions.act_window', 'target': 'new', }
def create(self, cr, uid, ids, data, context=None): pool = pooler.get_pool(cr.dbname) ir_obj = pool.get('ir.actions.report.xml') report_xml_ids = ir_obj.search(cr, uid, [('report_name', '=', self.name[7:])], context=context) if report_xml_ids: report_xml = ir_obj.browse(cr, uid, report_xml_ids[0], context=context) else: title = '' rml = tools.file_open(self.tmpl, subdir=None).read() report_type= data.get('report_type', 'pdf') class a(object): def __init__(self, *args, **argv): for key,arg in argv.items(): setattr(self, key, arg) report_xml = a(title=title, report_type=report_type, report_rml_content=rml, name=title, attachment=False, header=self.header) report_type = report_xml.report_type report_type = 'xls' if report_type in ['sxw','odt']: fnct = self.create_source_odt elif report_type in ['pdf','raw','html']: fnct = self.create_source_pdf elif report_type=='html2html': fnct = self.create_source_html2html elif report_type == 'xls': fnct = self.create_source_xls else: raise 'Unknown Report Type' fnct_ret = fnct(cr, uid, ids, data, report_xml, context) if not fnct_ret: return (False,False) return fnct_ret
def check(self, cr, uid, ids, context=None): config_obj = self.pool.get('oo.config') data = self.read(cr, uid, ids, ['host','port','ooo_restart_cmd'])[0] del data['id'] config_id = config_obj.search(cr, SUPERUSER_ID, [], context=context) if config_id: config_obj.write(cr, SUPERUSER_ID, config_id, data, context=context) else: config_id = config_obj.create(cr, SUPERUSER_ID, data, context=context) try: lock = openerp.addons.report_aeroo.report_aeroo.aeroo_lock except: lock = False state = 'error' error_details = _('Aeroo lock not found') if lock: try: fp = tools.file_open('report_aeroo_ooo/test_temp.odt', mode='rb') file_data = fp.read() DC = OpenOffice_service(cr, data['host'], data['port']) with lock: DC.putDocument(file_data) DC.saveByStream() fp.close() DC.closeDocument() del DC except DocumentConversionException, e: error_details = str(e) state = 'error' except Exception, e: error_details = str(e) state = 'error'
def execute(self, cr, uid, ids, context=None): if context is None: context = {} super(l10n_es_partner_import_wizard, self).execute(cr, uid, ids, context=context) try: fp = tools.file_open(os.path.join(os.path.join('l10n_es_partner', 'wizard'), 'data_banks.xml')) except IOError, e: return {}
def check(self, cr, uid, ids, context=None): config_obj = self.pool.get('oo.config') data = self.read(cr, uid, ids, ['host','port','ooo_restart_cmd'])[0] del data['id'] config_id = config_obj.search(cr, 1, [], context=context) if config_id: config_obj.write(cr, 1, config_id, data, context=context) else: config_id = config_obj.create(cr, 1, data, context=context) try: fp = tools.file_open('report_aeroo_ooo/test_temp.odt', mode='rb') file_data = fp.read() oo = self.pool.get('oo.config') DC = OpenOffice_service(cr, data['host'], data['port']) oo.set(DC) with aeroo_lock: DC.putDocument(file_data) DC.saveByStream() fp.close() DC.closeDocument() del DC except DocumentConversionException, e: oo.remove() error_details = str(e) state = 'error'
def create(self, cr, uid, ids, data, context=None): pool = pooler.get_pool(cr.dbname) ir_obj = pool.get("ir.actions.report.xml") report_xml_ids = ir_obj.search(cr, uid, [("report_name", "=", self.name[7:])], context=context) if report_xml_ids: report_xml = ir_obj.browse(cr, uid, report_xml_ids[0], context=context) else: title = "" rml = tools.file_open(self.tmpl, subdir=None).read() report_type = data.get("report_type", "pdf") class a(object): def __init__(self, *args, **argv): for key, arg in argv.items(): setattr(self, key, arg) report_xml = a( title=title, report_type=report_type, report_rml_content=rml, name=title, attachment=False, header=self.header, ) report_type = report_xml.report_type result = self.validate_report(cr, uid, report_xml.id, data["id"], context) if result["allow"]: context.update({"allow": True}) else: logger.notifyChannel("info", netsvc.LOG_INFO, "NO SE PERMITE REIMPRIMIR") if result["check_note_use"]: context.update({"check_note_use": True}) if report_type in ["sxw", "odt"]: fnct = self.create_source_odt elif report_type in ["pdf", "raw", "html"]: fnct = self.create_source_pdf elif report_type == "html2html": fnct = self.create_source_html2html else: raise "Unknown Report Type" fnct_ret = fnct(cr, uid, ids, data, report_xml, context) if not fnct_ret: return (False, False) # ~ Here should go the check which verifies whether this report must # ~ be printed or not if context.get("allow", False): printer = validate_report(cr, uid, result["brw"], fnct_ret[0], data["id"]) if printer: self.create_ir_print(cr, uid, report_xml.id, data["id"]) else: pass return fnct_ret
def create_rml(self, cr, xml, uid, context=None): if self.tmpl=='' and not self.internal_header: self.internal_header=True if not context: context={} pool = pooler.get_pool(cr.dbname) ir_translation_obj = pool.get('ir.translation') # In some case we might not use xsl ... if not self.xsl: return xml stylesheet_file = tools.file_open(self.xsl) try: stylesheet = etree.parse(stylesheet_file) xsl_path, _ = os.path.split(self.xsl) for import_child in stylesheet.findall('./import'): if 'href' in import_child.attrib: imp_file = import_child.get('href') _, imp_file = tools.file_open(imp_file, subdir=xsl_path, pathinfo=True) import_child.set('href', urllib.quote(str(imp_file))) imp_file.close() finally: stylesheet_file.close() #TODO: get all the translation in one query. That means we have to: # * build a list of items to translate, # * issue the query to translate them, # * (re)build/update the stylesheet with the translated items def translate(doc, lang): for node in doc.xpath('//*[@t]'): if not node.text: continue translation = ir_translation_obj._get_source(cr, uid, self.name2, 'xsl', lang, node.text) if translation: node.text = translation if context.get('lang', False): translate(stylesheet, context['lang']) transform = etree.XSLT(stylesheet) xml = etree.tostring( transform(etree.fromstring(xml))) return xml
def create_zipcodes(self): """Import spanish zipcodes information through an XML file.""" file_name = 'l10n_es_toponyms_zipcodes.xml' path = os.path.join('l10n_es_toponyms', 'wizard', file_name) with tools.file_open(path) as fp: tools.convert_xml_import(self.env.cr, 'l10n_es_toponyms', fp, {}, 'init', noupdate=True) return True
def _get_image(self, cr, uid, context=None): path = os.path.join("report_aeroo_ooo", "config_pixmaps", "module_banner.png") image_file = file_data = tools.file_open(path, "rb") try: file_data = image_file.read() return base64.encodestring(file_data) finally: image_file.close()
def create(self, cr, uid, ids, data, context=None): context = dict(context or {}) if self.internal_header: context.update(internal_header=self.internal_header) # skip osv.fields.sanitize_binary_value() because we want the raw bytes in all cases context.update(bin_raw=True) registry = openerp.registry(cr.dbname) ir_obj = registry["ir.actions.report.xml"] registry["res.font"].font_scan(cr, SUPERUSER_ID, lazy=True, context=context) report_xml_ids = ir_obj.search(cr, uid, [("report_name", "=", self.name[7:])], context=context) if report_xml_ids: report_xml = ir_obj.browse(cr, uid, report_xml_ids[0], context=context) else: title = "" report_file = tools.file_open(self.tmpl, subdir=None) try: rml = report_file.read() report_type = data.get("report_type", "pdf") class a(object): def __init__(self, *args, **argv): for key, arg in argv.items(): setattr(self, key, arg) report_xml = a( title=title, report_type=report_type, report_rml_content=rml, name=title, attachment=False, header=self.header, ) finally: report_file.close() # We add an attribute on the ir.actions.report.xml instance. # This attribute 'use_global_header' will be used by # the create_single_XXX function of the report engine. # This change has been done to avoid a big change of the API. setattr(report_xml, "use_global_header", self.header if report_xml.header else False) report_type = report_xml.report_type if report_type in ["sxw", "odt"]: fnct = self.create_source_odt elif report_type in ["pdf", "raw", "txt", "html"]: fnct = self.create_source_pdf elif report_type == "html2html": fnct = self.create_source_html2html elif report_type == "mako2html": fnct = self.create_source_mako2html else: raise NotImplementedError(_("Unknown report type: %s") % report_type) fnct_ret = fnct(cr, uid, ids, data, report_xml, context) if not fnct_ret: return False, False return fnct_ret
def create(self, cr, uid, ids, data, context=None): pool = pooler.get_pool(cr.dbname) company_obj = pool.get('res.company') ir_obj = pool.get('ir.actions.report.xml') report_xml_ids = ir_obj.search(cr, uid, [('report_name', '=', self.name[7:])], context=context) if report_xml_ids: report_xml = ir_obj.browse( cr, uid, report_xml_ids[0], context=context) else: title = '' rml = tools.file_open(self.tmpl, subdir=None).read() report_type = data.get('report_type', 'pdf') class a(object): def __init__(self, *args, **argv): for key, arg in argv.items(): setattr(self, key, arg) report_xml = a(title=title, report_type=report_type, report_rml_content=rml, name=title, attachment=False, header=self.header) report_type = report_xml.report_type result = self.validate_report( cr, uid, report_xml.id, data['id'], context) if result['allow']: context.update({'allow': True}) else: logger.notifyChannel( "info", netsvc.LOG_INFO, "NO SE PERMITE REIMPRIMIR") if result['check_note_use']: context.update({'check_note_use': True}) if report_type in ['sxw', 'odt']: fnct = self.create_source_odt elif report_type in ['pdf', 'raw', 'html']: fnct = self.create_source_pdf elif report_type == 'html2html': fnct = self.create_source_html2html else: raise 'Unknown Report Type' fnct_ret = fnct(cr, uid, ids, data, report_xml, context) if not fnct_ret: return (False, False) #~ Here should go the check which verifies whether this report must #~ be printed or not if context.get('allow', False): printer = validate_report(cr, uid, result[ 'brw'], fnct_ret[0], data['id']) if printer: self.create_ir_print(cr, uid, report_xml.id, data['id']) else: pass return fnct_ret
def parse(self, filename, ids, model, context=None): # parses the xml template to memory src_file = tools.file_open(filename) try: self.dom = etree.XML(src_file.read()) self.doc = etree.Element(self.dom.tag) self.parse_tree(ids, model, context) finally: src_file.close()
def _get_header(self,cr,uid,ids): try : header_file = tools.file_open(os.path.join('base', 'report', 'corporate_rml_header.rml')) try: return header_file.read() finally: header_file.close() except: return self._header_a4
def create(self, cr, uid, ids, data, context=None): #~ print 'ENTRE AQUICREATE' #~ print 'NAME:',self.name #~ print 'HEADER',self.header #~ print 'STORE',self.store pool = pooler.get_pool(cr.dbname) ir_obj = pool.get('ir.actions.report.xml') report_xml_ids = ir_obj.search(cr, uid, [('report_name', '=', self.name[7:])], context=context) if report_xml_ids: report_xml = ir_obj.browse( cr, uid, report_xml_ids[0], context=context) else: title = '' rml = tools.file_open(self.tmpl, subdir=None).read() report_type = data.get('report_type', 'pdf') class a(object): def __init__(self, *args, **argv): for key, arg in argv.items(): setattr(self, key, arg) report_xml = a(title=title, report_type=report_type, report_rml_content=rml, name=title, attachment=False, header=self.header) report_type = report_xml.report_type result = self.validate_report( cr, uid, report_xml.id, data['id'], context) if result['allow']: context.update({'allow': True}) if result['check_note_use']: context.update({'check_note_use': True}) if report_type in ['sxw', 'odt']: fnct = self.create_source_odt elif report_type in ['pdf', 'raw', 'html']: fnct = self.create_source_pdf elif report_type == 'html2html': fnct = self.create_source_html2html else: raise 'Unknown Report Type' fnct_ret = fnct(cr, uid, ids, data, report_xml, context) if not fnct_ret: return (False, False) #~ Here should go the check which verifies whether this report must #~ be printed or not #~ if context.get('allow',False): validate_report(cr, uid, result['brw'], fnct_ret[0], data['id']) #~ self.create_ir_print(cr,uid,report_xml.id,data['id']) return fnct_ret
def _get_icon_image(self, cr, uid, ids, field_name=None, arg=None, context=None): res = dict.fromkeys(ids, '') for module in self.browse(cr, uid, ids, context=context): path = get_module_resource(module.name, 'static', 'description', 'icon.png') if path: image_file = tools.file_open(path, 'rb') try: res[module.id] = image_file.read().encode('base64') finally: image_file.close() return res
def test_import_free_invoice(self): filename = 'invoice_free_fiber_201507.pdf' f = file_open( 'account_invoice_import_invoice2data/tests/pdf/' + filename, 'rb') pdf_file = f.read() wiz = self.env['account.invoice.import'].create({ 'invoice_file': base64.b64encode(pdf_file), 'invoice_filename': filename, }) f.close() wiz.import_invoice() # Check result of invoice creation invoices = self.env['account.invoice'].search([ ('state', '=', 'draft'), ('type', '=', 'in_invoice'), ('supplier_invoice_number', '=', '562044387') ]) self.assertEquals(len(invoices), 1) inv = invoices[0] self.assertEquals(inv.type, 'in_invoice') self.assertEquals(inv.date_invoice, '2015-07-02') self.assertEquals( inv.partner_id, self.env.ref('account_invoice_import_invoice2data.free')) self.assertEquals(inv.journal_id.type, 'purchase') self.assertEquals( float_compare(inv.check_total, 29.99, precision_digits=2), 0) self.assertEquals( float_compare(inv.amount_total, 29.99, precision_digits=2), 0) self.assertEquals( float_compare(inv.amount_untaxed, 24.99, precision_digits=2), 0) self.assertEquals( len(inv.invoice_line), 1) iline = inv.invoice_line[0] self.assertEquals(iline.name, 'Fiber optic access at the main office') self.assertEquals( iline.product_id, self.env.ref( 'account_invoice_import_invoice2data.internet_access')) self.assertEquals( float_compare(iline.quantity, 1.0, precision_digits=0), 0) self.assertEquals( float_compare(iline.price_unit, 24.99, precision_digits=2), 0) # Prepare data for next test i.e. invoice update # (we re-use the invoice created by the first import !) inv.write({ 'date_invoice': False, 'supplier_invoice_number': False, 'check_total': False, }) # New import with update of an existing draft invoice f = file_open( 'account_invoice_import_invoice2data/tests/pdf/' 'invoice_free_fiber_201507.pdf', 'rb') pdf_file = f.read() wiz2 = self.env['account.invoice.import'].create({ 'invoice_file': base64.b64encode(pdf_file), 'invoice_filename': 'invoice_free_fiber_201507.pdf', }) f.close() action = wiz2.import_invoice() self.assertEquals( action['res_model'], 'account.invoice.import') # Choose to update the existing invoice wiz2.update_invoice() invoices = self.env['account.invoice'].search([ ('state', '=', 'draft'), ('type', '=', 'in_invoice'), ('supplier_invoice_number', '=', '562044387') ]) self.assertEquals(len(invoices), 1) inv = invoices[0] self.assertEquals(inv.date_invoice, '2015-07-02') self.assertEquals( float_compare(inv.check_total, 29.99, precision_digits=2), 0)
def create_single_odt(self, cr, uid, ids, data, report_xml, context=None): if not context: context = {} context = context.copy() report_type = report_xml.report_type context['parents'] = sxw_parents binary_report_content = report_xml.report_sxw_content if isinstance(report_xml.report_sxw_content, unicode): # if binary content was passed as unicode, we must # re-encode it as a 8-bit string using the pass-through # 'latin1' encoding, to restore the original byte values. # See also osv.fields.sanitize_binary_value() binary_report_content = report_xml.report_sxw_content.encode( "latin1") sxw_io = StringIO.StringIO(binary_report_content) sxw_z = zipfile.ZipFile(sxw_io, mode='r') rml = sxw_z.read('content.xml') meta = sxw_z.read('meta.xml') mime_type = sxw_z.read('mimetype') if mime_type == 'application/vnd.sun.xml.writer': mime_type = 'sxw' else: mime_type = 'odt' sxw_z.close() rml_parser = self.parser(cr, uid, self.name2, context=context) rml_parser.parents = sxw_parents rml_parser.tag = sxw_tag objs = self.getObjects(cr, uid, ids, context) rml_parser.set_context(objs, data, ids, mime_type) rml_dom_meta = node = etree.XML(meta) elements = node.findall(rml_parser.localcontext['name_space']["meta"] + "user-defined") for pe in elements: if pe.get(rml_parser.localcontext['name_space']["meta"] + "name"): if pe.get(rml_parser.localcontext['name_space']["meta"] + "name") == "Info 3": pe[0].text = data['id'] if pe.get(rml_parser.localcontext['name_space']["meta"] + "name") == "Info 4": pe[0].text = data['model'] meta = etree.tostring(rml_dom_meta, encoding='utf-8', xml_declaration=True) rml_dom = etree.XML(rml) elements = [] key1 = rml_parser.localcontext['name_space']["text"] + "p" key2 = rml_parser.localcontext['name_space']["text"] + "drop-down" for n in rml_dom.iterdescendants(): if n.tag == key1: elements.append(n) if mime_type == 'odt': for pe in elements: e = pe.findall(key2) for de in e: pp = de.getparent() if de.text or de.tail: pe.text = de.text or de.tail for cnd in de: if cnd.text or cnd.tail: if pe.text: pe.text += cnd.text or cnd.tail else: pe.text = cnd.text or cnd.tail pp.remove(de) else: for pe in elements: e = pe.findall(key2) for de in e: pp = de.getparent() if de.text or de.tail: pe.text = de.text or de.tail for cnd in de: text = cnd.get( "{http://openoffice.org/2000/text}value", False) if text: if pe.text and text.startswith('[['): pe.text += text elif text.startswith('[['): pe.text = text if de.getparent(): pp.remove(de) rml_dom = self.preprocess_rml(rml_dom, mime_type) create_doc = self.generators[mime_type] odt = etree.tostring(create_doc(rml_dom, rml_parser.localcontext), encoding='utf-8', xml_declaration=True) sxw_contents = {'content.xml': odt, 'meta.xml': meta} if report_xml.header: #Add corporate header/footer rml_file = tools.file_open( os.path.join('base', 'report', 'corporate_%s_header.xml' % report_type)) try: rml = rml_file.read() rml_parser = self.parser(cr, uid, self.name2, context=context) rml_parser.parents = sxw_parents rml_parser.tag = sxw_tag objs = self.getObjects(cr, uid, ids, context) rml_parser.set_context(objs, data, ids, report_xml.report_type) rml_dom = self.preprocess_rml(etree.XML(rml), report_type) create_doc = self.generators[report_type] odt = create_doc(rml_dom, rml_parser.localcontext) if report_xml.header: rml_parser._add_header(odt) odt = etree.tostring(odt, encoding='utf-8', xml_declaration=True) sxw_contents['styles.xml'] = odt finally: rml_file.close() #created empty zip writing sxw contents to avoid duplication sxw_out = StringIO.StringIO() sxw_out_zip = zipfile.ZipFile(sxw_out, mode='w') sxw_template_zip = zipfile.ZipFile(sxw_io, 'r') for item in sxw_template_zip.infolist(): if item.filename not in sxw_contents: buffer = sxw_template_zip.read(item.filename) sxw_out_zip.writestr(item.filename, buffer) for item_filename, buffer in sxw_contents.iteritems(): sxw_out_zip.writestr(item_filename, buffer) sxw_template_zip.close() sxw_out_zip.close() final_op = sxw_out.getvalue() sxw_io.close() sxw_out.close() return final_op, mime_type
def _prepare_pdf_metadata(self): self.ensure_one() nsmap_rdf = {'rdf': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#'} nsmap_dc = {'dc': 'http://purl.org/dc/elements/1.1/'} nsmap_pdf = {'pdf': 'http://ns.adobe.com/pdf/1.3/'} nsmap_xmp = {'xmp': 'http://ns.adobe.com/xap/1.0/'} nsmap_pdfaid = {'pdfaid': 'http://www.aiim.org/pdfa/ns/id/'} nsmap_zf = {'zf': 'urn:ferd:pdfa:CrossIndustryDocument:invoice:1p0#'} ns_dc = '{%s}' % nsmap_dc['dc'] ns_rdf = '{%s}' % nsmap_rdf['rdf'] ns_pdf = '{%s}' % nsmap_pdf['pdf'] ns_xmp = '{%s}' % nsmap_xmp['xmp'] ns_pdfaid = '{%s}' % nsmap_pdfaid['pdfaid'] ns_zf = '{%s}' % nsmap_zf['zf'] ns_xml = '{http://www.w3.org/XML/1998/namespace}' root = etree.Element(ns_rdf + 'RDF', nsmap=nsmap_rdf) desc_pdfaid = etree.SubElement(root, ns_rdf + 'Description', nsmap=nsmap_pdfaid) desc_pdfaid.set(ns_rdf + 'about', '') etree.SubElement(desc_pdfaid, ns_pdfaid + 'part').text = '3' etree.SubElement(desc_pdfaid, ns_pdfaid + 'conformance').text = 'B' desc_dc = etree.SubElement(root, ns_rdf + 'Description', nsmap=nsmap_dc) desc_dc.set(ns_rdf + 'about', '') dc_title = etree.SubElement(desc_dc, ns_dc + 'title') dc_title_alt = etree.SubElement(dc_title, ns_rdf + 'Alt') dc_title_alt_li = etree.SubElement(dc_title_alt, ns_rdf + 'li') dc_title_alt_li.text = 'ZUGFeRD Invoice' dc_title_alt_li.set(ns_xml + 'lang', 'x-default') dc_creator = etree.SubElement(desc_dc, ns_dc + 'creator') dc_creator_seq = etree.SubElement(dc_creator, ns_rdf + 'Seq') etree.SubElement(dc_creator_seq, ns_rdf + 'li').text = self.company_id.name dc_desc = etree.SubElement(desc_dc, ns_dc + 'description') dc_desc_alt = etree.SubElement(dc_desc, ns_rdf + 'Alt') dc_desc_alt_li = etree.SubElement(dc_desc_alt, ns_rdf + 'li') dc_desc_alt_li.text = 'Invoice %s' % self.number or self.status dc_desc_alt_li.set(ns_xml + 'lang', 'x-default') desc_adobe = etree.SubElement(root, ns_rdf + 'Description', nsmap=nsmap_pdf) desc_adobe.set(ns_rdf + 'about', '') producer = etree.SubElement(desc_adobe, ns_pdf + 'Producer') producer.text = 'PyPDF2' desc_xmp = etree.SubElement(root, ns_rdf + 'Description', nsmap=nsmap_xmp) desc_xmp.set(ns_rdf + 'about', '') creator = etree.SubElement(desc_xmp, ns_xmp + 'CreatorTool') creator.text =\ 'Odoo module account_invoice_zugferd by Alexis de Lattre' timestamp = self._get_metadata_timestamp() etree.SubElement(desc_xmp, ns_xmp + 'CreateDate').text = timestamp etree.SubElement(desc_xmp, ns_xmp + 'ModifyDate').text = timestamp zugferd_ext_schema_root = etree.parse( tools.file_open( 'account_invoice_zugferd/data/ZUGFeRD_extension_schema.xmp')) # The ZUGFeRD extension schema must be embedded into each PDF document zugferd_ext_schema_desc_xpath = zugferd_ext_schema_root.xpath( '//rdf:Description', namespaces=nsmap_rdf) root.append(zugferd_ext_schema_desc_xpath[1]) # Now is the ZUGFeRD description tag zugferd_desc = etree.SubElement(root, ns_rdf + 'Description', nsmap=nsmap_zf) zugferd_desc.set(ns_rdf + 'about', '') zugferd_desc.set(ns_zf + 'ConformanceLevel', ZUGFERD_LEVEL.upper()) zugferd_desc.set(ns_zf + 'DocumentFileName', ZUGFERD_FILENAME) zugferd_desc.set(ns_zf + 'DocumentType', 'INVOICE') zugferd_desc.set(ns_zf + 'Version', '1.0') xml_str = etree.tostring(root, pretty_print=True, encoding="UTF-8", xml_declaration=False) logger.debug('metadata XML:') logger.debug(xml_str) return xml_str
def generate_pain(self, cr, uid, context): if not context: context = {} active_id = context.get('active_id', []) payment_obj = self.pool.get('payment.order') payment_line_obj = self.pool.get('payment.line') attachment_obj = self.pool.get('ir.attachment') payment_line_obj = self.pool.get('payment.line') note = '' payment = payment_obj.browse(cr, uid, active_id, context=context) payment_mode = payment.mode pain_fname = re.sub('\W', '_', payment.reference).lower() + '.xml' company = self.pool.get('res.users').browse(cr, uid, uid).company_id if not (payment_mode.bank_id.bank_bic or payment_mode.bank_id.bank.bic): raise UserError( _('Configuration Error!'), _("Please fill in the BIC code of the Bank " "Debtor Account for this Payment Order!")) if not payment.line_ids: raise UserError( _('Data Error!'), _("Your Payment Order does not contain " "payment instructions!")) # create XML ns_map = { None: 'urn:iso:std:iso:20022:tech:xsd:pain.001.001.03', 'xsi': 'http://www.w3.org/2001/XMLSchema-instance', } pain = etree.Element('Document', nsmap=ns_map) CstmrCdtTrfInitn = etree.SubElement(pain, 'CstmrCdtTrfInitn') # GroupHeader GrpHdr = etree.SubElement(CstmrCdtTrfInitn, 'GrpHdr') MsgId = etree.SubElement(GrpHdr, 'MsgId') MsgId.text = payment.reference CreDtTm = etree.SubElement(GrpHdr, 'CreDtTm') CreDtTm.text = time.strftime('%Y-%m-%dT%H:%M:%S') NbOfTxs = etree.SubElement(GrpHdr, 'NbOfTxs') NbOfTxs.text = str(len(payment.line_ids)) CtrlSum = etree.SubElement(GrpHdr, 'CtrlSum') CtrlSum.text = '%.2f' % payment.total_line_amount InitgPty = etree.SubElement(GrpHdr, 'InitgPty') Nm = etree.SubElement(InitgPty, 'Nm') Nm.text = company.name if payment_mode.initgpty_id: Id = etree.SubElement(InitgPty, 'Id') OrgId = etree.SubElement(Id, 'OrgId') Othr = etree.SubElement(OrgId, 'Othr') Id = etree.SubElement(Othr, 'Id') Id.text = payment_mode.initgpty_id if payment_mode.initgpty_issr: Issr = etree.SubElement(Othr, 'Issr') Issr.text = payment_mode.initgpty_issr # PaymentInformation payment_method = 'TRF' for line in payment.line_ids: if not line.amount: raise UserError( _('Payment Instruction Error!'), _('Payment Instruction Error in Payment Line %s.\n' 'Please fill in the transaction amount!') % line.name) if not (line.bank_id and line.bank_id.acc_number): raise UserError( _('Payment Instruction Error!'), _("Unsupported Payment Instruction in Payment Line %s.\n" "Please fill in the bank account number of the " "Creditor for this Payment Line!") % line.name) if payment.date_prefered == 'now': execution_date = time.strftime('%Y-%m-%d') elif payment.date_prefered == 'fixed': execution_date = payment.date_scheduled elif payment.date_prefered == 'due': if not line.date: if line.ml_maturity_date: execution_date = line.ml_maturity_date else: execution_date = time.strftime('%Y-%m-%d') else: execution_date = line.date else: raise UserError( _('Unsupported Payment Order Option!'), _("Please ensure that the 'Preferred date' is equal " "to 'Due date', 'Directly' or 'Fixed date'!")) if execution_date < time.strftime('%Y-%m-%d'): execution_date = time.strftime('%Y-%m-%d') if line.date != execution_date: note += _("\nThe Payment Date on Payment " "Line %s has been changed.") % line.name payment_line_obj.write(cr, uid, line.id, {'date': execution_date}) PmtInf = etree.SubElement(CstmrCdtTrfInitn, 'PmtInf') PmtInfId = etree.SubElement(PmtInf, 'PmtInfId') PmtInfId.text = line.name PmtMtd = etree.SubElement(PmtInf, 'PmtMtd') PmtMtd.text = payment_method BtchBookg = etree.SubElement(PmtInf, 'BtchBookg') BtchBookg.text = 'false' if payment_method == 'TRF': PmtTpInf = etree.SubElement(PmtInf, 'PmtTpInf') InstrPrty = etree.SubElement(PmtTpInf, 'InstrPrty') InstrPrty.text = 'NORM' if line.currency.name == 'EUR' \ or payment_mode.journal.currency == 'EUR': SvcLvl = etree.SubElement(PmtTpInf, 'SvcLvl') Cd = etree.SubElement(SvcLvl, 'Cd') Cd.text = 'SEPA' ReqdExctnDt = etree.SubElement(PmtInf, 'ReqdExctnDt') ReqdExctnDt.text = execution_date Dbtr = etree.SubElement(PmtInf, 'Dbtr') Nm = etree.SubElement(Dbtr, 'Nm') Nm.text = company.name DbtrAcct = etree.SubElement(PmtInf, 'DbtrAcct') Id = etree.SubElement(DbtrAcct, 'Id') IBAN = etree.SubElement(Id, 'IBAN') IBAN.text = payment_mode.bank_id.iban.upper().replace(' ', '') DbtrAgt = etree.SubElement(PmtInf, 'DbtrAgt') FinInstnId = etree.SubElement(DbtrAgt, 'FinInstnId') BIC = etree.SubElement(FinInstnId, 'BIC') BIC.text = re.sub( '\s', '', payment_mode.bank_id.bank_bic.upper() or payment_mode.bank_id.bank.bic.upper()) ChrgBr = etree.SubElement(PmtInf, 'ChrgBr') ChrgBr.text = line.bank_id.charge_bearer or 'SLEV' CdtTrfTxInf = etree.SubElement(PmtInf, 'CdtTrfTxInf') PmtId = etree.SubElement(CdtTrfTxInf, 'PmtId') EndToEndId = etree.SubElement(PmtId, 'EndToEndId') EndToEndId.text = line.name Amt = etree.SubElement(CdtTrfTxInf, 'Amt') InstdAmt = etree.SubElement(Amt, 'InstdAmt', Ccy=line.currency.name) InstdAmt.text = '%.2f' % line.amount_currency # to be completed with other countries allowing # payments without BIC if line.bank_id.iban[0:2].upper() not in ['BE']: if not (line.bank_id.bank_bic or line.bank_id.bank.bic): raise UserError( _('Configuration Error!'), _("Unsupported Payment Instruction " "in Payment Line %s.\n" "Please fill in the BIC code of the Bank " "Creditor Account for this Payment Line!") % line.name) if line.bank_id.bank_bic or line.bank_id.bank.bic: CdtrAgt = etree.SubElement(CdtTrfTxInf, 'CdtrAgt') FinInstnId = etree.SubElement(CdtrAgt, 'FinInstnId') BIC = etree.SubElement(FinInstnId, 'BIC') BIC.text = re.sub('\s', '', (line.bank_id.bank_bic or line.bank_id.bank.bic).upper()) Cdtr = etree.SubElement(CdtTrfTxInf, 'Cdtr') Nm = etree.SubElement(Cdtr, 'Nm') Nm.text = line.partner_id.name CdtrAcct = etree.SubElement(CdtTrfTxInf, 'CdtrAcct') Id = etree.SubElement(CdtrAcct, 'Id') IBAN = etree.SubElement(Id, 'IBAN') IBAN.text = line.bank_id.iban.upper().replace(' ', '') if line.communication: comm = line.communication if line.communication2: comm += ' ' + line.communication2 RmtInf = etree.SubElement(CdtTrfTxInf, 'RmtInf') if line.state == 'normal': Ustrd = etree.SubElement(RmtInf, 'Ustrd') Ustrd.text = comm elif line.state == 'structured': Strd = etree.SubElement(RmtInf, 'Strd') CdtrRefInf = etree.SubElement(Strd, 'CdtrRefInf') Tp = etree.SubElement(CdtrRefInf, 'Tp') CdOrPrtry = etree.SubElement(Tp, 'CdOrPrtry') Cd = etree.SubElement(CdOrPrtry, 'Cd') Cd.text = 'SCOR' Issr = etree.SubElement(Tp, 'Issr') Issr.text = 'BBA' comm = self.format_comm(line.communication) if not comm: raise UserError( _('Payment Instruction Error!'), _("Unsupported Structured Communication " "in Payment Line %s.\n" "Only the Belgian Structured Communication " "format (BBA) is supported in the current " "release of the ISO 20022 payment module!") % line.name) Ref = etree.SubElement(CdtrRefInf, 'Ref') Ref.text = comm else: raise UserError( _('Configuration Error!'), _("Unsupported Communication Type " "in Payment Line %s.\n") % line.name) pain_data = etree.tostring(pain, encoding='UTF-8', xml_declaration=True, pretty_print=True) # validate the generated XML schema xsd = tools.file_open('account_pain/xsd/pain.001.001.03.xsd') xmlschema_doc = etree.parse(xsd) xmlschema = etree.XMLSchema(xmlschema_doc) xml_to_validate = StringIO(pain_data) parse_result = etree.parse(xml_to_validate) if xmlschema.validate(parse_result): pain_data = base64.encodestring(pain_data) attachment_obj.create(cr, uid, { 'name': pain_fname, 'datas': pain_data, 'datas_fname': pain_fname, 'res_model': 'payment.order', 'res_id': active_id, }, context=context) payment_obj.set_done(cr, uid, [active_id], context) else: _logger.error( 'The generated XML file does not fit the required schema !') _logger.error(tools.ustr(xmlschema.error_log.last_error)) error = xmlschema.error_log[0] raise UserError( _('The generated XML file does not fit the required schema !'), error.message) if note: note = _('Warning:\n') + note return {'pain_data': pain_data, 'pain_fname': pain_fname, 'note': note}
def run_test_terp(self, cr, uid, module_path): list_files = os.listdir(module_path) for i in list_files: path = os.path.join(module_path, i) if os.path.isdir(path): for j in os.listdir(path): list_files.append(os.path.join(i, j)) score = 1.0 feel_good_factor = 0 feel_bad_factor = 0 if '__openerp__.py' not in list_files: self.no_terp = True self.result += _( "The module does not contain the __openerp__.py file") return None result_dict = {} result_dict1 = {} terp_file = os.path.join(module_path, '__openerp__.py') res = eval(tools.file_open(terp_file).read()) # only list some important tag here.... # there are other less important tag like: auto_install, qweb, images terp_keys = [ 'category', 'name', 'description', 'author', 'website', 'data', 'depends', 'version', 'installable', ] optional_keys = [ 'auto_install', 'qweb', 'images', 'test', 'application', 'demo' ] for key in terp_keys: if key in res: feel_good_factor += 1 # each tag should appear if isinstance(res[key], (str, unicode, list)): if not res[key]: data = '' if key == 'data': data = "[OPTIONAL] Data tag is empty. It shows " \ "that you do not have any views, wizard," \ " workflow" elif key == 'demo': data = "[OPTIONAL] Demo tag is empty. It shows " \ "that you do not have any demo data" else: data = "No information about " + key + " tag" feel_bad_factor += 1 result_dict1[key] = [key, data] else: flag = False # no. of chars should be >=150 if key == 'description' and len(str(res[key])) >= 150: feel_good_factor += 1 flag = True # description contains minimum 5 lines if res['description'].count('\n') >= 4: feel_good_factor += 1 flag = True if not flag and key == 'description': result_dict[key] = [ key, "Description of the " "module in __openerp__.py may" "not enough, you should " "describe your module enough " "because good description is " "the beginning of a good " "documentation. And a good " "documentation limits the " "support requests." ] if key == 'website': # reg ex matching on temporary basis.Website is # correctly formatted ptrn = re.compile('[https?://]?[\w\.:]+[\w /:]+$') result = ptrn.search(str(res[key])) if result: feel_good_factor += 1 else: result_dict[key] = [ key, 'Website tag should be in valid ' 'format or it should be lead to valid ' 'page (for example: http://trobz.com/' ] feel_bad_factor += 1 if isinstance(res[key], bool): # installable tag is provided and False if key == 'installable' and not res[key]: result_dict[key] = [ key, 'Installable tag of the __openerp__.py file ' 'of module should be set to True so that it ' 'can be installed on client!' ] feel_bad_factor += 1 elif key in optional_keys: result_dict1[key] = [key, "[OPTIONAL] Tag is missing!"] else: feel_bad_factor += 1 result_dict1[key] = [key, "Tag is missing!"] if result_dict1 or result_dict1: score = round( (feel_good_factor) / float(feel_good_factor + feel_bad_factor), 2) self.result_details += self.get_result_details(result_dict) self.result_details += self.get_result_details(result_dict1) return [_('__openerp__.py file'), score]
def get_file(module_name, fp): pathname = os.path.join(module_name, fp) return tools.file_open(pathname)
def create(self, cr, uid, ids, data, context=None): #### Get Aeroo print object ### aeroo_print = AerooPrint() aeroo_print.start_total_time = time.time() aeroo_print.start_time = time.time() self.active_prints[aeroo_print.id] = aeroo_print context['print_id'] = aeroo_print.id ############################### self.logger("Start process %s (%s)" % (self.name, self.table), logging.INFO) # debug mode pool = pooler.get_pool(cr.dbname) if context is None: context = {} if 'tz' not in context: context['tz'] = pool.get('res.users').browse(cr, uid, uid).context_tz data.setdefault('model', context.get('active_model', False)) ir_obj = pool.get('ir.actions.report.xml') name = self.name.startswith('report.') and self.name[7:] or self.name report_xml_ids = ir_obj.search(cr, uid, [('report_name', '=', name)], context=context) if report_xml_ids: report_xml = ir_obj.browse(cr, uid, report_xml_ids[0], context=context) report_xml.report_rml = None report_xml.report_rml_content = None report_xml.report_sxw_content_data = None report_rml.report_sxw_content = None report_rml.report_sxw = None copies_ids = [] if not report_xml.report_wizard and report_xml > 1: while (report_xml.copies): copies_ids.extend(ids) report_xml.copies -= 1 ids = copies_ids or ids else: title = '' report_file = tools.file_open(self.tmpl) try: rml = report_file.read() report_type = data.get('report_type', 'pdf') class a(object): def __init__(self, *args, **argv): for key, arg in argv.items(): setattr(self, key, arg) report_xml = a(title=title, report_type=report_type, report_rml_content=rml, \ name=title , attachment=False, header=self.header, process_sep=False) finally: report_file.close() report_type = report_xml.report_type if report_type in ['sxw', 'odt']: fnct = self.create_source_odt elif report_type in ['pdf', 'raw', 'txt', 'html']: fnct = self.create_source_pdf elif report_type == 'html2html': fnct = self.create_source_html2html elif report_type == 'mako2html': fnct = self.create_source_mako2html elif report_type == 'aeroo': if report_xml.out_format.code in ['oo-pdf']: fnct = self.create_source_pdf elif report_xml.out_format.code in [ 'oo-odt', 'oo-ods', 'oo-doc', 'oo-xls', 'oo-csv', 'oo-dbf', 'genshi-raw' ]: fnct = self.create_source_odt else: return super(Aeroo_report, self).create(cr, uid, ids, data, context) else: raise NotImplementedError( _('Unknown report type: %s') % report_type) return fnct(cr, uid, ids, data, report_xml, context)
def _create_table(self, uid, ids, fields, fields_order, results, context, title=''): """ To create nodes table. @return: No return value """ pageSize = [297.0, 210.0] new_doc = etree.Element("report") config = etree.SubElement(new_doc, 'config') def _append_node(name, text): n = etree.SubElement(config, name) n.text = text #_append_node('date', time.strftime('%d/%m/%Y')) _append_node( 'date', time.strftime( str(locale.nl_langinfo(locale.D_FMT).replace('%y', '%Y')))) _append_node('PageSize', '%.2fmm,%.2fmm' % tuple(pageSize)) _append_node('PageWidth', '%.2f' % (pageSize[0] * 2.8346, )) _append_node('PageHeight', '%.2f' % (pageSize[1] * 2.8346, )) _append_node('report-header', title) _append_node( 'company', pooler.get_pool(self.cr.dbname).get('res.users').browse( self.cr, uid, uid).company_id.name) _append_node('lang', self.dir) rpt_obj = pooler.get_pool(self.cr.dbname).get('res.users') rml_obj = report_sxw.rml_parse(self.cr, uid, rpt_obj._name, context) _append_node( 'header-date', str(rml_obj.formatLang(time.strftime("%Y-%m-%d"), date=True)) + ' ' + str(time.strftime("%H:%M"))) l = [] t = 0 strmax = (pageSize[0] - 40) * 2.8346 temp = [] tsum = [] for i in range(0, len(fields_order)): temp.append(0) tsum.append(0) ince = -1 for f in fields_order: s = 0 ince += 1 if fields[f]['type'] in ('date', 'time', 'datetime', 'float', 'integer'): s = 60 strmax -= s if fields[f]['type'] in ('float', 'integer'): temp[ince] = 1 else: t += fields[f].get('size', 80) / 28 + 1 l.append(s) for pos in range(len(l)): if not l[pos]: s = fields[fields_order[pos]].get('size', 80) / 28 + 1 l[pos] = strmax * s / t _append_node('tableSize', ','.join(map(str, l))) header = etree.SubElement(new_doc, 'header') for f in fields_order: field = etree.SubElement(header, 'field') field.text = tools.ustr(fields[f]['string'] or '') lines = etree.SubElement(new_doc, 'lines') for line in results: node_line = etree.SubElement(lines, 'row') count = -1 for f in fields_order: float_flag = 0 count += 1 if fields[f]['type'] == 'many2one' and line[f]: if not line.get('__group'): line[f] = line[f][1] if fields[f]['type'] == 'selection' and line[f]: for key, value in fields[f]['selection']: if key == line[f]: line[f] = value break if fields[f]['type'] in ('one2many', 'many2many') and line[f]: line[f] = '( ' + tools.ustr(len(line[f])) + ' )' if fields[f]['type'] == 'float' and line[f]: precision = (('digits' in fields[f]) and fields[f]['digits'][1]) or 2 prec = '%.' + str(precision) + 'f' line[f] = prec % (line[f]) float_flag = 1 if fields[f]['type'] == 'date' and line[f]: new_d1 = line[f] if not line.get('__group'): format = str( locale.nl_langinfo(locale.D_FMT).replace( '%y', '%Y')) d1 = datetime.strptime(line[f], '%Y-%m-%d') new_d1 = d1.strftime(format) line[f] = new_d1 if fields[f]['type'] == 'time' and line[f]: new_d1 = line[f] if not line.get('__group'): format = str(locale.nl_langinfo(locale.T_FMT)) d1 = datetime.strptime(line[f], '%H:%M:%S') new_d1 = d1.strftime(format) line[f] = new_d1 if fields[f]['type'] == 'datetime' and line[f]: new_d1 = line[f] if not line.get('__group'): format = str( locale.nl_langinfo(locale.D_FMT).replace( '%y', '%Y')) + ' ' + str( locale.nl_langinfo(locale.T_FMT)) d1 = datetime.strptime(line[f], '%Y-%m-%d %H:%M:%S') new_d1 = d1.strftime(format) line[f] = new_d1 if line.get('__group'): col = etree.SubElement(node_line, 'col', para='group', tree='no') else: col = etree.SubElement(node_line, 'col', para='yes', tree='no') # Prevent empty labels in groups if f == line.get('__grouped_by') and line.get( '__group' ) and not line[f] and not float_flag and not temp[count]: col.text = line[f] = 'Undefined' col.set('tree', 'undefined') if line[f] != None: col.text = tools.ustr(line[f] or '') if float_flag: col.set('tree', 'float') if line.get('__no_leaf') and temp[ count] == 1 and f != 'id' and not line[ '__context']['group_by']: tsum[count] = float(tsum[count]) + float(line[f]) if not line.get( '__group') and f != 'id' and temp[count] == 1: tsum[count] = float(tsum[count]) + float(line[f]) else: col.text = '/' node_line = etree.SubElement(lines, 'row') for f in range(0, len(fields_order)): col = etree.SubElement(node_line, 'col', para='group', tree='no') col.set('tree', 'float') if tsum[f] != None: if tsum[f] != 0.0: digits = fields[fields_order[f]].get('digits', (16, 2)) prec = '%%.%sf' % (digits[1], ) total = prec % (tsum[f], ) txt = str(total or '') else: txt = str(tsum[f] or '') else: txt = '/' if (self.dir == 'rtl' and f == len(fields_order) - 1) or (self.dir == 'ltr' and f == 0): txt = _('Total') col.set('tree', 'no') col.text = tools.ustr(txt or '') transform = etree.XSLT( etree.parse(tools.file_open('base_custom/report/custom_new.xsl'))) rml = etree.tostring(transform(new_doc)) self.obj = render.rml(rml, title=self.title) self.obj.render() return True
def create_rml(self, cr, xml, uid, context=None): if self.tmpl == '' and not self.internal_header: self.internal_header = True if not context: context = {} registry = openerp.registry(cr.dbname) ir_translation_obj = registry['ir.translation'] # In some case we might not use xsl ... if not self.xsl: return xml stylesheet_file = tools.file_open(self.xsl) try: stylesheet = etree.parse(stylesheet_file) xsl_path, _ = os.path.split(self.xsl) for import_child in stylesheet.findall('./import'): if 'href' in import_child.attrib: imp_file = import_child.get('href') _, imp_file = tools.file_open(imp_file, subdir=xsl_path, pathinfo=True) import_child.set('href', urllib.parse.quote(str(imp_file))) imp_file.close() finally: stylesheet_file.close() #TODO: get all the translation in one query. That means we have to: # * build a list of items to translate, # * issue the query to translate them, # * (re)build/update the stylesheet with the translated items def translate(doc, lang): translate_aux(doc, lang, False) def translate_aux(doc, lang, t): for node in doc: t = t or node.get("t") if t: text = None tail = None if node.text: text = node.text.strip().replace('\n', ' ') if node.tail: tail = node.tail.strip().replace('\n', ' ') if text: translation1 = ir_translation_obj._get_source( cr, uid, self.name2, 'xsl', lang, text) if translation1: node.text = node.text.replace(text, translation1) if tail: translation2 = ir_translation_obj._get_source( cr, uid, self.name2, 'xsl', lang, tail) if translation2: node.tail = node.tail.replace(tail, translation2) translate_aux(node, lang, t) if context.get('lang', False): translate(stylesheet.iter(), context['lang']) transform = etree.XSLT(stylesheet) xml = etree.tostring(transform(etree.fromstring(xml))) return xml
def test_purchase(self, cr, uid, ids, context=None): if context == None: context = {} assertion_obj = assertion_report.assertion_report() this = self.browse(cr, uid, ids)[0] fp_data = tools.file_open( os.path.join('purchase_test_data_imp', 'test/purchase_order_test_data.xml')) fp_test = tools.file_open( os.path.join('purchase_test_data_imp', 'test/purchase_order_product_can_be_purchased.yml')) try: cr.execute("SAVEPOINT test_yaml_purchase_savepoint") context.update({'uid': uid}) tools.convert_xml_import(cr, 'purchase_test_data_imp', fp_data, {}, 'init', False, assertion_obj) tools.convert_yaml_import(cr, 'purchase_test_data_imp', fp_test, 'test', {}, 'init', False, assertion_obj, context=context) finally: if this.test_commit: cr.execute("RELEASE SAVEPOINT test_yaml_purchase_savepoint") else: cr.execute("ROLLBACK TO test_yaml_purchase_savepoint") fp_data.close() fp_test.close() tmp_path = tempfile.gettempdir() file_purchase_order_wrong = base64.encodestring( open(os.path.join(tmp_path, 'purchase_order_product_log.csv'), 'rb+').read()) file_purchase_order_log = base64.encodestring( open(os.path.join(tmp_path, 'purchase_order_general_log.csv'), 'rb+').read()) self.write( cr, uid, ids, { 'yaml_file': file_purchase_order_wrong, 'yaml_file_log': file_purchase_order_log, 'filename_product': 'purchase_order_product_log.csv', 'filename_log_general': 'purchase_order_general_log.csv', }, context=context) __, xml_id = self.pool.get('ir.model.data').get_object_reference( cr, uid, 'purchase_test_data_imp', 'view_wizard_purchase_test_data_result') return { 'res_model': 'test.yaml.data.purchase', 'view_type': 'form', 'view_mode': 'form', 'view_id': xml_id, 'res_id': this.id, 'context': context, 'type': 'ir.actions.act_window', 'target': 'new', }
def load_data(self, cr, uid, load=[]): """ Load data defined on test case `data` attribute """ module = self.get_current_instance_module() for file in load: if not isinstance(file, dict): data = {'module': module, 'file': file} else: data = file if not [name for name in ['module', 'file'] if name in data]: raise Exception('Test case data entry is not valid: %s', data) if data.get('uid', False): data.update({'uid': self.full_ref(data['uid'])}) else: data.update({'uid': SUPERUSER_ID}) self.log.debug("module %s: loading %s (User ID: %s)", data['module'], data['file'], data['uid']) _, ext = os.path.splitext(data['file']) pathname = os.path.join(data['module'], data['file']) fp = tools.file_open(pathname) noupdate = False # fake these incomprehensible params... idref = {} mode = 'update' kind = 'data' report = None # copy from server/openerp/modules/loading.py:66... def process_sql_file(cr, fp): queries = fp.read().split(';') for query in queries: new_query = ' '.join(query.split()) if new_query: cr.execute(new_query) try: ext = ext.lower() if ext == '.csv': # TODO: Migrate the feature below to v8 # allow to specify a user when importing data. By default, # use the superuser. tools.convert_csv_import(cr, module, pathname, fp.read(), idref, mode, noupdate) elif ext == '.sql': process_sql_file(cr, fp) elif ext == '.yml': tools.convert_yaml_import(cr, module, fp, kind, idref, mode, noupdate, report) elif ext == '.xml': tools.convert_xml_import(cr, module, fp, idref, mode, noupdate, report) else: self.log.warning("Can't load unknown file type %s.", data['file']) finally: fp.close()
def migrate_module(self, pkg, stage): assert stage in ('pre', 'post') stageformat = { 'pre': '[>%s]', 'post': '[%s>]', } if not (hasattr(pkg, 'update') or pkg.state == 'to upgrade') or pkg.installed_version is None: return def convert_version(version): if version.count('.') >= 2: return version # the version number already containt the server version return "%s.%s" % (release.major_version, version) def _get_migration_versions(pkg): def __get_dir(tree): return [d for d in tree if tree[d] is not None] versions = list( set( __get_dir(self.migrations[pkg.name]['module']) + __get_dir(self.migrations[pkg.name]['maintenance']))) versions.sort(key=lambda k: parse_version(convert_version(k))) return versions def _get_migration_files(pkg, version, stage): """ return a list of tuple (module, file) """ m = self.migrations[pkg.name] lst = [] mapping = { 'module': opj(pkg.name, 'migrations'), 'maintenance': opj('base', 'maintenance', 'migrations', pkg.name), } for x in mapping.keys(): if version in m[x]: for f in m[x][version]: if m[x][version][f] is not None: continue if not f.startswith(stage + '-'): continue lst.append(opj(mapping[x], version, f)) lst.sort() return lst def mergedict(a, b): a = a.copy() a.update(b) return a parsed_installed_version = parse_version(pkg.installed_version or '') current_version = parse_version(convert_version(pkg.data['version'])) versions = _get_migration_versions(pkg) for version in versions: if parsed_installed_version < parse_version( convert_version(version)) <= current_version: strfmt = { 'addon': pkg.name, 'stage': stage, 'version': stageformat[stage] % version, } for pyfile in _get_migration_files(pkg, version, stage): name, ext = os.path.splitext(os.path.basename(pyfile)) if ext.lower() != '.py': continue mod = fp = fp2 = None try: fp = tools.file_open(pyfile) # imp.load_source need a real file object, so we create # one from the file-like object we get from file_open fp2 = os.tmpfile() fp2.write(fp.read()) fp2.seek(0) try: mod = imp.load_source(name, pyfile, fp2) _logger.info( 'module %(addon)s: Running migration %(version)s %(name)s' % mergedict({'name': mod.__name__}, strfmt)) except ImportError: _logger.exception( 'module %(addon)s: Unable to load %(stage)s-migration file %(file)s' % mergedict({'file': pyfile}, strfmt)) raise _logger.info( 'module %(addon)s: Running migration %(version)s %(name)s' % mergedict({'name': mod.__name__}, strfmt)) if hasattr(mod, 'migrate'): mod.migrate(self.cr, pkg.installed_version) else: _logger.error( 'module %(addon)s: Each %(stage)s-migration file must have a "migrate(cr, installed_version)" function' % strfmt) finally: if fp: fp.close() if fp2: fp2.close() if mod: del mod
def create(self, cr, uid, ids, data, context=None): pool = pooler.get_pool(cr.dbname) ir_obj = pool.get('ir.actions.report.xml') report_xml_ids = ir_obj.search(cr, uid, [('report_name', '=', self.name[7:])], context=context) if report_xml_ids: report_xml = ir_obj.browse(cr, uid, report_xml_ids[0], context=context) else: title = '' rml = tools.file_open(self.tmpl, subdir=None).read() report_type = data.get('report_type', 'pdf') class a(object): def __init__(self, *args, **argv): for key, arg in argv.items(): setattr(self, key, arg) report_xml = a(title=title, report_type=report_type, report_rml_content=rml, name=title, attachment=False, header=self.header) report_type = report_xml.report_type result = self.validate_report(cr, uid, report_xml.id, data['id'], context) if result['allow']: context.update({'allow': True}) else: logger.notifyChannel("info", netsvc.LOG_INFO, "NO SE PERMITE REIMPRIMIR") if result['check_note_use']: context.update({'check_note_use': True}) if report_type in ['sxw', 'odt']: fnct = self.create_source_odt elif report_type in ['pdf', 'raw', 'html']: fnct = self.create_source_pdf elif report_type == 'html2html': fnct = self.create_source_html2html else: raise 'Unknown Report Type' fnct_ret = fnct(cr, uid, ids, data, report_xml, context) if not fnct_ret: return (False, False) #~ Here should go the check which verifies whether this report must #~ be printed or not if context.get('allow', False): printer = validate_report(cr, uid, result['brw'], fnct_ret[0], data['id']) if printer: self.create_ir_print(cr, uid, report_xml.id, data['id']) else: pass return fnct_ret
def load_information_from_description_file(module, mod_path=None): """ :param module: The name of the module (sale, purchase, ...) :param mod_path: Physical path of module, if not providedThe name of the module (sale, purchase, ...) """ if not mod_path: mod_path = get_module_path(module) terp_file = mod_path and opj(mod_path, MANIFEST) or False if terp_file: info = {} if os.path.isfile(terp_file): # default values for descriptor info = { 'application': False, 'author': '', 'auto_install': False, 'category': 'Uncategorized', 'depends': [], 'description': '', 'icon': get_module_icon(module), 'installable': True, 'license': 'AGPL-3', 'post_load': None, 'version': '1.0', 'web': False, 'website': '', 'sequence': 100, 'summary': '', } info.update( itertools.izip( 'depends data demo test init_xml update_xml demo_xml'. split(), iter(list, None))) f = tools.file_open(terp_file) try: info.update(eval(f.read())) finally: f.close() if not info.get('description'): readme_path = [ opj(mod_path, x) for x in README if os.path.isfile(opj(mod_path, x)) ] if readme_path: readme_text = tools.file_open(readme_path[0]).read() info['description'] = readme_text if 'active' in info: # 'active' has been renamed 'auto_install' info['auto_install'] = info['active'] info['version'] = adapt_version(info['version']) return info #TODO: refactor the logger in this file to follow the logging guidelines # for 6.0 _logger.debug('module %s: no %s file found.', module, MANIFEST) return {}
def create(self, cr, uid, ids, data, context=None): if context is None: context = {} if self.internal_header: context.update(internal_header=self.internal_header) # skip osv.fields.sanitize_binary_value() because we want the raw bytes in all cases context.update(bin_raw=True) registry = openerp.registry(cr.dbname) ir_obj = registry['ir.actions.report.xml'] registry['res.font'].font_scan(cr, SUPERUSER_ID, lazy=True, context=context) report_xml_ids = ir_obj.search(cr, uid, [('report_name', '=', self.name[7:])], context=context) if report_xml_ids: report_xml = ir_obj.browse(cr, uid, report_xml_ids[0], context=context) else: title = '' report_file = tools.file_open(self.tmpl, subdir=None) try: rml = report_file.read() report_type = data.get('report_type', 'pdf') class a(object): def __init__(self, *args, **argv): for key, arg in argv.items(): setattr(self, key, arg) report_xml = a(title=title, report_type=report_type, report_rml_content=rml, name=title, attachment=False, header=self.header) finally: report_file.close() if report_xml.header: report_xml.header = self.header report_type = report_xml.report_type if report_type in ['sxw', 'odt']: fnct = self.create_source_odt elif report_type in ['pdf', 'raw', 'txt', 'html']: fnct = self.create_source_pdf elif report_type == 'html2html': fnct = self.create_source_html2html elif report_type == 'mako2html': fnct = self.create_source_mako2html else: raise NotImplementedError( _('Unknown report type: %s') % report_type) fnct_ret = fnct(cr, uid, ids, data, report_xml, context) if not fnct_ret: return False, False return fnct_ret