def upload_report(self, cr, uid, report_id, file_sxw, file_type, context): ''' Untested function ''' pool = pooler.get_pool(cr.dbname) sxwval = StringIO(base64.decodestring(file_sxw)) if file_type == 'sxw': fp = tools.file_open( 'normalized_oo2rml.xsl', subdir='addons/base_report_designer/wizard/tiny_sxw2rml') rml_content = str(sxw2rml(sxwval, xsl=fp.read())) if file_type == 'odt': fp = tools.file_open( 'normalized_odt2rml.xsl', subdir='addons/base_report_designer/wizard/tiny_sxw2rml') rml_content = str(sxw2rml(sxwval, xsl=fp.read())) if file_type == 'html': rml_content = base64.decodestring(file_sxw) report = pool.get('ir.actions.report.xml').write( cr, uid, [report_id], { 'report_sxw_content': base64.decodestring(file_sxw), 'report_rml_content': rml_content, }) cr.commit() db = pooler.get_db_only(cr.dbname) interface.register_all(db) return True
def _send_mail(self, cr, uid, data, context): ptrn = re.compile('(\w+@\w+(?:\.\w+)+)') result = ptrn.search(data['form']['email']) if result==None: raise wizard.except_wizard('Error !', 'Enter Valid E-Mail Address.') fields=['Id','Name','Partner','Related Type of Carnet','Emission Date','Validity Date','Holder Name','Holder Address','Holder City','Representer Name','Representer Address','Representer City','Usage','Goods','Area','Insurer Agreement','Own Risks','Goods Value','Double Signature','Initial No. of Pages','Additional No. of Pages','Warranty','Related Warranty Product','Date of Return','State','Date of Closure','Date of Sending to the Federation','Apply the Member Price'] # For First CSV month=data['form']['month'] yr=int(time.strftime('%Y')) self.first_day=datetime.date(yr,int(month),1) self.last_day=datetime.date(yr,int(month),lengthmonth(yr, int(month))) period="to_date('" + self.first_day.strftime('%Y-%m-%d') + "','yyyy-mm-dd') and to_date('" + self.last_day.strftime('%Y-%m-%d') +"','yyyy-mm-dd')" cr.execute('select id from cci_missions_ata_carnet where federation_sending_date is null and ok_state_date between %s'%(period)) res_file1=cr.fetchall() lines=[] root_path=tools.config.options['root_path'] if res_file1: lines=self.make_csv(cr, uid,res_file1,file2=0) self.write_csv(root_path+'/carnet_1.csv',fields,lines) # First CSV created # Process for second CSV -Start today=datetime.datetime.today() _date=datetime.date(today.year-2,today.month,today.day) comp_date=_date.strftime('%Y-%m-%d') cr.execute('select id from cci_missions_ata_carnet where federation_sending_date is null and state='"'pending'"' and return_date <='"'%s'"''%(str(comp_date))) res_file2=cr.fetchall() lines=[] if res_file2: lines=self.make_csv(cr, uid,res_file2,file2=1) self.write_csv(root_path+'/carnet_2.csv',fields,lines) # Second CSV created. if res_file1==[] and res_file2==[]: raise wizard.except_wizard('Notification !', 'No Records Found to make the CSV files.Choose other criteria.') files_attached=[] if res_file1: file_csv1=tools.file_open(root_path+'/carnet_1.csv','rb',subdir=None) files_attached=[('Ata_carnet_csv_1.csv',file_csv1.read())] if res_file2: file_csv2=tools.file_open(root_path+'/carnet_2.csv','rb',subdir=None) files_attached.append(('Ata_carnet_csv_2.csv',file_csv2.read())) src=tools.config.options['smtp_user'] dest=[data['form']['email']] body="Hello,\nHere are the CSV files for Federation Sending.\nThanks You For Using TinyERP.\nThink Big Use Tiny." tools.email_send_attach(src,dest,"Federation Sending Files From TinyERP",body,attach=files_attached) return {}
def sxwtorml(self,cr, uid, file_sxw,file_type): ''' The use of this function is to get rml file from sxw file. ''' sxwval = StringIO(base64.decodestring(file_sxw)) if file_type=='sxw': fp = tools.file_open('normalized_oo2rml.xsl', subdir='addons/base_report_designer/wizard/tiny_sxw2rml') if file_type=='odt': fp = tools.file_open('normalized_odt2rml.xsl', subdir='addons/base_report_designer/wizard/tiny_sxw2rml') return {'report_rml_content': str(sxw2rml(sxwval, xsl=fp.read()))}
def create_rml(self, cr, xml, uid, context=None): if self.tmpl == '' and not self.internal_header: self.internal_header = True if not context: context = {} pool = pooler.get_pool(cr.dbname) ir_translation_obj = pool.get('ir.translation') # In some case we might not use xsl ... if not self.xsl: return xml stylesheet_file = tools.file_open(self.xsl) try: stylesheet = etree.parse(stylesheet_file) xsl_path, _ = os.path.split(self.xsl) for import_child in stylesheet.findall('./import'): if 'href' in import_child.attrib: imp_file = import_child.get('href') _, imp_file = tools.file_open(imp_file, subdir=xsl_path, pathinfo=True) import_child.set('href', urllib.quote(str(imp_file))) imp_file.close() finally: stylesheet_file.close() #TODO: get all the translation in one query. That means we have to: # * build a list of items to translate, # * issue the query to translate them, # * (re)build/update the stylesheet with the translated items def translate(doc, lang): for node in doc.xpath('//*[@t]'): if not node.text: continue translation = ir_translation_obj._get_source( cr, uid, self.name2, 'xsl', lang, node.text) if translation: node.text = translation if context.get('lang', False): translate(stylesheet, context['lang']) transform = etree.XSLT(stylesheet) xml = etree.tostring(transform(etree.fromstring(xml))) return xml
def init(self, cr): """ Load data (product_data.xml) before self """ if hasattr(super(purchase_order, self), 'init'): super(purchase_order, self).init(cr) logging.getLogger('init').info('HOOK: module product: loading product_data.xml') pathname = path.join('product', 'product_data.xml') file = tools.file_open(pathname) tools.convert_xml_import(cr, 'product', file, {}, mode='init', noupdate=False) logging.getLogger('init').info('HOOK: module product_attributes: loading data/sale_data.yml') pathname = path.join('product_attributes', 'data', 'sale_data.yml') file = tools.file_open(pathname) tools.convert_yaml_import(cr, 'product_attributes', file, {}, mode='init', noupdate=False)
def create(self, cr, uid, ids, data, context=None): pool = pooler.get_pool(cr.dbname) ir_obj = pool.get('ir.actions.report.xml') report_xml_ids = ir_obj.search(cr, uid, [('report_name', '=', self.name[7:])], context=context) if report_xml_ids: report_xml = ir_obj.browse(cr, uid, report_xml_ids[0], context=context) else: title = '' rml = tools.file_open(self.tmpl, subdir=None).read() report_type= data.get('report_type', 'pdf') class a(object): def __init__(self, *args, **argv): for key,arg in argv.items(): setattr(self, key, arg) report_xml = a(title=title, report_type=report_type, report_rml_content=rml, name=title, attachment=False, header=self.header) report_type = report_xml.report_type report_type = 'xls' if report_type in ['sxw','odt']: fnct = self.create_source_odt elif report_type in ['pdf','raw','html']: fnct = self.create_source_pdf elif report_type=='html2html': fnct = self.create_source_html2html elif report_type == 'xls': fnct = self.create_source_xls else: raise 'Unknown Report Type' fnct_ret = fnct(cr, uid, ids, data, report_xml, context) if not fnct_ret: return (False,False) return fnct_ret
def load_init_update_xml(cr, m, idref, mode, kind): for filename in package.data.get('%s_xml' % kind, []): logger.notifyChannel('init', netsvc.LOG_INFO, 'module %s: loading %s' % (m, filename)) _, ext = os.path.splitext(filename) fp = tools.file_open(opj(m, filename)) try: if ext == '.csv': noupdate = (kind == 'init') tools.convert_csv_import(cr, m, os.path.basename(filename), fp.read(), idref, mode=mode, noupdate=noupdate) elif ext == '.sql': process_sql_file(cr, fp) elif ext == '.yml': tools.convert_yaml_import(cr, m, fp, idref, mode=mode, **kwargs) else: tools.convert_xml_import(cr, m, fp, idref, mode=mode, **kwargs) finally: fp.close()
def upgrade_graph(graph, cr, module_list, force=None): if force is None: force = [] packages = [] len_graph = len(graph) for module in module_list: mod_path = get_module_path(module) terp_file = get_module_resource(module, '__openerp__.py') if not terp_file or not os.path.isfile(terp_file): terp_file = get_module_resource(module, '__terp__.py') if not mod_path or not terp_file: logger.notifyChannel('init', netsvc.LOG_WARNING, 'module %s: not found, skipped' % (module)) continue if os.path.isfile(terp_file) or zipfile.is_zipfile(mod_path+'.zip'): try: info = eval(tools.file_open(terp_file).read()) except: logger.notifyChannel('init', netsvc.LOG_ERROR, 'module %s: eval file %s' % (module, terp_file)) raise if info.get('installable', True): packages.append((module, info.get('depends', []), info)) else: logger.notifyChannel('init', netsvc.LOG_WARNING, 'module %s: not installable, skipped' % (module)) dependencies = dict([(p, deps) for p, deps, data in packages]) current, later = set([p for p, dep, data in packages]), set() while packages and current > later: package, deps, data = packages[0] # if all dependencies of 'package' are already in the graph, add 'package' in the graph if reduce(lambda x, y: x and y in graph, deps, True): if not package in current: packages.pop(0) continue later.clear() current.remove(package) graph.addNode(package, deps) node = Node(package, graph) node.data = data for kind in ('init', 'demo', 'update'): if package in tools.config[kind] or 'all' in tools.config[kind] or kind in force: setattr(node, kind, True) else: later.add(package) packages.append((package, deps, data)) packages.pop(0) graph.update_from_db(cr) for package in later: unmet_deps = filter(lambda p: p not in graph, dependencies[package]) logger.notifyChannel('init', netsvc.LOG_ERROR, 'module %s: Unmet dependencies: %s' % (package, ', '.join(unmet_deps))) result = len(graph) - len_graph if result != len(module_list): logger.notifyChannel('init', netsvc.LOG_WARNING, 'Not all modules have loaded.') return result
def parse(self, filename, ids, model, context=None): if not context: context={} # parses the xml template to memory self.dom = etree.XML(tools.file_open(filename).read()) self.doc = etree.Element(self.dom.tag) self.parse_tree(ids, model, context)
def create_states(self, cr, uid, state_type, context=None): """It imports spanish states information trough an XML file.""" file_name = 'l10n_es_toponyms_states_%s.xml' %state_type try: fp = tools.file_open(os.path.join('l10n_es_toponyms', os.path.join('wizard', file_name))) except IOError, e: fp = None
def check(self, cr, uid, ids, context=None): config_obj = self.pool['oo.config'] data = self.read(cr, uid, ids, ['soffice', 'dir_tmp'])[0] del data['id'] config_id = config_obj.search(cr, 1, [], context=context) if config_id: config_obj.write(cr, 1, config_id, data, context=context) else: config_id = config_obj.create(cr, 1, data, context=context) try: with tools.file_open('report_aeroo_loffice/test_temp.odt', mode='rb') as fp: file_data = fp.read() DC = netsvc.Service._services.setdefault( 'openoffice', OpenOffice_service(cr, data['soffice'], data['dir_tmp']) ) with aeroo_lock: DC.putDocument(file_data) data = DC.saveByStream(u'writer_pdf_Export') DC.closeDocument() del DC except DocumentConversionException, e: netsvc.Service.remove('openoffice') error_details = str(e) state = 'error'
def upload_report(self, cr, uid, ids, context=None): from base_report_designer import openerp_sxw2rml import StringIO data=self.read(cr,uid,ids)[0] sxwval = StringIO.StringIO(base64.decodestring(data['file_sxw_upload'])) fp = tools.file_open('normalized_oo2rml.xsl',subdir='addons/base_report_designer/openerp_sxw2rml') newrmlcontent = str(openerp_sxw2rml.sxw2rml(sxwval, xsl=fp.read())) report = self.pool.get('ir.actions.report.xml').write(cr, uid, [data['report_id']], { 'report_sxw_content': base64.decodestring(data['file_sxw_upload']), 'report_rml_content': newrmlcontent }) cr.commit() data_obj = self.pool.get('ir.model.data') id2 = data_obj._get_id(cr, uid, 'base_report_designer', 'view_base_report_file_rml') report = self.pool.get('ir.actions.report.xml').browse(cr, uid, data['report_id'], context=context) if id2: id2 = data_obj.browse(cr, uid, id2, context=context).res_id return { 'view_type': 'form', 'view_mode': 'form', 'res_model': 'base.report.rml.save', 'views': [(id2, 'form')], 'view_id': False, 'type': 'ir.actions.act_window', 'target': 'new', }
def load_demo_xml(cr, m, idref, mode): for xml in package.data.get('demo_xml', []): name, ext = os.path.splitext(xml) logger.notifyChannel('init', netsvc.LOG_INFO, 'module %s: loading %s' % (m, xml)) fp = tools.file_open(opj(m, xml)) try: if ext == '.csv': tools.convert_csv_import(cr, m, os.path.basename(xml), fp.read(), idref, mode=mode, noupdate=True) elif ext == '.yml': tools.convert_yaml_import(cr, m, fp, idref, mode=mode, noupdate=True, **kwargs) else: tools.convert_xml_import(cr, m, fp, idref, mode=mode, noupdate=True, **kwargs) finally: fp.close()
def parse(self, filename, ids, model, context=None): if not context: context = {} # parses the xml template to memory self.dom = etree.XML(tools.file_open(filename).read()) self.doc = etree.Element(self.dom.tag) self.parse_tree(ids, model, context)
def default_get(self, cr, uid, fields_list=None, context=None): ret = super(base_setup_company, self).default_get(cr, uid, fields_list, context) if not ret.get('name'): ret.update({'name': 'MSF', 'street': 'Rue de Lausanne 78', 'street2': 'CP 116', 'city': 'Geneva', 'zip': '1211', 'phone': '+41 (22) 849.84.00'}) company = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id ret['name'] = company.name addresses = self.pool.get('res.partner').address_get(cr, uid, company.id, ['default']) default_id = addresses.get('default', False) # Default address if default_id: address = self.pool.get('res.partner.address').browse(cr, uid, default_id, context=context) for field in ['street','street2','zip','city','email','phone']: ret[field] = address[field] for field in ['country_id','state_id']: if address[field]: ret[field] = address[field].id # Currency cur = self.pool.get('res.currency').search(cr, uid, [('name','=','EUR')]) if company.currency_id: ret['currency'] = company.currency_id.id elif cur: ret['currency'] = cur[0] fp = tools.file_open(opj('msf_profile', 'data', 'msf.jpg'), 'rb') ret['logo'] = base64.encodestring(fp.read()) fp.close() return ret
def _report_content(self, cursor, user, ids, name, arg, context=None): res = {} aeroo_ids = self.search(cursor, 1, [('report_type', '=', 'aeroo'), ('id', 'in', ids)], context=context) orig_ids = list(set(ids).difference(aeroo_ids)) res = orig_ids and super(report_xml, self)._report_content( cursor, 1, orig_ids, name, arg, context) or {} for report in self.read(cursor, 1, aeroo_ids, [ 'tml_source', 'report_type', 'report_sxw_content_data', 'report_sxw' ], context=context): data = report[name + '_data'] if report['tml_source'] == 'file' or not data and report[ name[:-8]]: fp = None try: fp = tools.file_open(report[name[:-8]], mode='rb') data = report[ 'report_type'] == 'aeroo' and base64.encodestring( fp.read()) or fp.read() except IOError, e: if e.errno == 13: # Permission denied on the template file raise osv.except_osv(_(e.strerror), e.filename) else: print e except Exception, e: print e fp = False data = False finally:
def _report_content(self, cursor, user, ids, name, arg, context=None): res = {} aeroo_ids = self.search(cursor, user, [('report_type', '=', 'aeroo')], context=context) orig_ids = list(set(ids).difference(aeroo_ids)) res = super(report_xml, self)._report_content(cursor, user, orig_ids, name, arg, context) for report in self.browse(cursor, user, aeroo_ids, context=context): data = report[name + '_data'] if report.report_type == 'aeroo' and report.tml_source == 'file' or not data and report[ name[:-8]]: fp = None try: fp = tools.file_open(report[name[:-8]], mode='rb') data = report.report_type == 'aeroo' and base64.encodestring( fp.read()) or fp.read() except: fp = False data = False finally: if fp: fp.close() res[report.id] = data return res
def check(self, cr, uid, ids, context=None): config_obj = self.pool.get('oo.config') data = self.read(cr, uid, ids, ['host', 'port', 'ooo_restart_cmd'])[0] del data['id'] config_id = config_obj.search(cr, 1, [], context=context) if config_id: config_obj.write(cr, 1, config_id, data, context=context) else: config_id = config_obj.create(cr, 1, data, context=context) try: fp = tools.file_open('report_aeroo_ooo/test_temp.odt', mode='rb') file_data = fp.read() DC = netsvc.Service._services.setdefault('openoffice', \ OpenOffice_service(cr, data['host'], data['port'])) with aeroo_lock: DC.putDocument(file_data) DC.saveByStream() fp.close() DC.closeDocument() del DC except DocumentConversionException, e: netsvc.Service.remove('openoffice') error_details = str(e) state = 'error'
def execute_simple(self, cr, uid, ids, context=None): if context is None: context = {} fy_obj = self.pool.get('account.fiscalyear') for res in self.read(cr, uid, ids, context=context): if 'charts' in res and res['charts'] == 'configurable': #load generic chart of account fp = tools.file_open(opj('account', 'configurable_account_chart.xml')) tools.convert_xml_import(cr, 'account', fp, {}, 'init', True, None) fp.close() if 'date_start' in res and 'date_stop' in res: f_ids = fy_obj.search(cr, uid, [('date_start', '<=', res['date_start']), ('date_stop', '>=', res['date_stop']), ('company_id', '=', res['company_id'][0])], context=context) if not f_ids: name = code = res['date_start'][:4] if int(name) != int(res['date_stop'][:4]): name = res['date_start'][:4] +'-'+ res['date_stop'][:4] code = res['date_start'][2:4] +'-'+ res['date_stop'][2:4] vals = { 'name': name, 'code': code, 'date_start': res['date_start'], 'date_stop': res['date_stop'], 'company_id': res['company_id'][0] } fiscal_id = fy_obj.create(cr, uid, vals, context=context) if res['period'] == 'month': fy_obj.create_period(cr, uid, [fiscal_id]) elif res['period'] == '3months': fy_obj.create_period3(cr, uid, [fiscal_id])
def check(self, cr, uid, ids, context=None): config_obj = self.pool.get('oo.config') data = self.read(cr, uid, ids, ['host', 'port', 'ooo_restart_cmd'])[0] del data['id'] config_id = config_obj.search(cr, 1, [], context=context) if config_id: config_obj.write(cr, 1, config_id, data, context=context) else: config_id = config_obj.create(cr, 1, data, context=context) try: fp = tools.file_open('report_aeroo_ooo/test_temp.odt', mode='rb') file_data = fp.read() DC = netsvc.Service._services.setdefault( 'openoffice', OpenOffice_service(cr, data['host'], data['port'])) with aeroo_lock: DC.putDocument(file_data) DC.saveByStream() fp.close() DC.closeDocument() del DC except DocumentConversionException, e: netsvc.Service.remove('openoffice') error_details = str(e) state = 'error'
def upload_report(self, cr, uid, ids, context=None): from base_report_designer import openerp_sxw2rml import StringIO data = self.read(cr, uid, ids)[0] sxwval = StringIO.StringIO(base64.decodestring( data['file_sxw_upload'])) fp = tools.file_open( 'normalized_oo2rml.xsl', subdir='addons/base_report_designer/openerp_sxw2rml') newrmlcontent = str(openerp_sxw2rml.sxw2rml(sxwval, xsl=fp.read())) report = self.pool.get('ir.actions.report.xml').write( cr, uid, [data['report_id']], { 'report_sxw_content': base64.decodestring( data['file_sxw_upload']), 'report_rml_content': newrmlcontent }) cr.commit() data_obj = self.pool.get('ir.model.data') id2 = data_obj._get_id(cr, uid, 'base_report_designer', 'view_base_report_file_rml') report = self.pool.get('ir.actions.report.xml').browse( cr, uid, data['report_id'], context=context) if id2: id2 = data_obj.browse(cr, uid, id2, context=context).res_id return { 'view_type': 'form', 'view_mode': 'form', 'res_model': 'base.report.rml.save', 'views': [(id2, 'form')], 'view_id': False, 'type': 'ir.actions.act_window', 'target': 'new', }
def check(self, cr, uid, ids, context=None): config_obj = self.pool['oo.config'] data = self.read(cr, uid, ids, ['soffice', 'dir_tmp'])[0] del data['id'] config_id = config_obj.search(cr, 1, [], context=context) if config_id: config_obj.write(cr, 1, config_id, data, context=context) else: config_id = config_obj.create(cr, 1, data, context=context) try: with tools.file_open('report_aeroo_loffice/test_temp.odt', mode='rb') as fp: file_data = fp.read() DC = netsvc.Service._services.setdefault( 'openoffice', OpenOffice_service(cr, data['soffice'], data['dir_tmp'])) with aeroo_lock: DC.putDocument(file_data) data = DC.saveByStream(u'writer_pdf_Export') DC.closeDocument() del DC except DocumentConversionException, e: netsvc.Service.remove('openoffice') error_details = str(e) state = 'error'
def create_zipcodes(self, cr, uid, context=None): """Import spanish zipcodes information through an XML file.""" file_name = 'l10n_es_toponyms_zipcodes.xml' try: fp = tools.file_open(os.path.join('l10n_es_toponyms', os.path.join('wizard', file_name))) except IOError, e: fp = None
def _run_test(self, cr, module_name, filename): _, ext = os.path.splitext(filename) pathname = os.path.join(module_name, filename) open_file = tools.file_open(pathname) if ext == '.sql': queries = open_file.read().split(';') for query in queries: new_query = ' '.join(query.split()) if new_query: cr.execute(new_query) elif ext == '.csv': tools.convert_csv_import(cr, module_name, pathname, open_file.read(), idref=None, mode='update', noupdate=False) elif ext == '.yml': tools.convert_yaml_import(cr, module_name, open_file, idref=None, mode='update', noupdate=False) else: tools.convert_xml_import(cr, module_name, open_file, idref=None, mode='update', noupdate=False)
def export_code_terms_from_file(fname, path, root, terms_type): fabsolutepath = join(root, fname) frelativepath = fabsolutepath[len(path):] module = get_module_from_path(fabsolutepath, mod_paths=mod_paths) is_mod_installed = module in installed_modules if (('all' in modules) or (module in modules)) and is_mod_installed: logger.debug("Scanning code of %s at module: %s", frelativepath, module) src_file = tools.file_open(fabsolutepath, subdir='') try: code_string = src_file.read() finally: src_file.close() if module in installed_modules: frelativepath = str("addons" + frelativepath) ite = re_dquotes.finditer(code_string) code_offset = 0 code_line = 1 for i in ite: src = i.group(1) if src.startswith('""'): assert src.endswith( '""' ), "Incorrect usage of _(..) function (should contain only literal strings!) in file %s near: %s" % ( frelativepath, src[:30]) src = src[2:-2] else: src = join_dquotes.sub(r'\1', src) # try to count the lines from the last pos to our place: code_line += code_string[code_offset:i.start(1)].count('\n') # now, since we did a binary read of a python source file, we # have to expand pythonic escapes like the interpreter does. src = src.decode('string_escape') push_translation(module, terms_type, frelativepath, code_line, encode(src)) code_line += i.group(1).count('\n') code_offset = i.end( ) # we have counted newlines up to the match end ite = re_quotes.finditer(code_string) code_offset = 0 #reset counters code_line = 1 for i in ite: src = i.group(1) if src.startswith("''"): assert src.endswith( "''" ), "Incorrect usage of _(..) function (should contain only literal strings!) in file %s near: %s" % ( frelativepath, src[:30]) src = src[2:-2] else: src = join_quotes.sub(r'\1', src) code_line += code_string[code_offset:i.start(1)].count('\n') src = src.decode('string_escape') push_translation(module, terms_type, frelativepath, code_line, encode(src)) code_line += i.group(1).count('\n') code_offset = i.end( ) # we have counted newlines up to the match end
def _add_header(self, node): nom=self.__class__.__name__ try: rml_head = tools.file_open('custom/'+nom+'.header.rml',subdir='addons/report_account_fr/report').read() except: rml_head = tools.file_open('custom/corporate_rml_header.rml').read() head_dom = xml.dom.minidom.parseString(rml_head) node2 = head_dom.documentElement for tag in node2.childNodes: if tag.nodeType==tag.ELEMENT_NODE: found = self._find_node(node, tag.localName) if found: if tag.hasAttribute('position') and (tag.getAttribute('position')=='inside'): found.appendChild(tag) else: found.parentNode.replaceChild(tag, found) return True
def _get_image(self, cr, uid, context=None): path = os.path.join('report_aeroo_ooo','config_pixmaps','module_banner.png') image_file = file_data = tools.file_open(path,'rb') try: file_data = image_file.read() return base64.encodestring(file_data) finally: image_file.close()
def _get_image(self, cr, uid, context=None): path = os.path.join('base','res','config_pixmaps','%d.png'%random.randrange(1,4)) image_file = file_data = tools.file_open(path,'rb') try: file_data = image_file.read() return base64.encodestring(file_data) finally: image_file.close()
def create_rml(self, cr, xml, uid, context=None): if self.tmpl=='' and not self.internal_header: self.internal_header=True if not context: context={} pool = pooler.get_pool(cr.dbname) ir_translation_obj = pool.get('ir.translation') # In some case we might not use xsl ... if not self.xsl: return xml stylesheet_file = tools.file_open(self.xsl) try: stylesheet = etree.parse(stylesheet_file) xsl_path, _ = os.path.split(self.xsl) for import_child in stylesheet.findall('./import'): if 'href' in import_child.attrib: imp_file = import_child.get('href') _, imp_file = tools.file_open(imp_file, subdir=xsl_path, pathinfo=True) import_child.set('href', urllib.quote(str(imp_file))) imp_file.close() finally: stylesheet_file.close() #TODO: get all the translation in one query. That means we have to: # * build a list of items to translate, # * issue the query to translate them, # * (re)build/update the stylesheet with the translated items def translate(doc, lang): for node in doc.xpath('//*[@t]'): if not node.text: continue translation = ir_translation_obj._get_source(cr, uid, self.name2, 'xsl', lang, node.text) if translation: node.text = translation if context.get('lang', False): translate(stylesheet, context['lang']) transform = etree.XSLT(stylesheet) xml = etree.tostring( transform(etree.fromstring(xml))) return xml
def create_states(self, cr, uid, state_type, context=None): """It imports spanish states information trough an XML file.""" file_name = 'l10n_es_toponyms_states_%s.xml' % state_type try: fp = tools.file_open( os.path.join('l10n_es_toponyms', os.path.join('wizard', file_name))) except IOError, e: fp = None
def _get_image(self, cr, uid, context=None): path = os.path.join('base', 'res', 'config_pixmaps', '%d.png' % random.randrange(1, 4)) image_file = file_data = tools.file_open(path, 'rb') try: file_data = image_file.read() return base64.encodestring(file_data) finally: image_file.close()
def _get_header(self,cr,uid,ids): try : header_file = tools.file_open(os.path.join('base', 'report', 'corporate_rml_header.rml')) try: return header_file.read() finally: header_file.close() except: return """
def create_zipcodes(self, cr, uid, context=None): """Import spanish zipcodes information through an XML file.""" file_name = 'l10n_es_toponyms_zipcodes.xml' try: fp = tools.file_open( os.path.join('l10n_es_toponyms', os.path.join('wizard', file_name))) except IOError, e: fp = None
def _get_header(self,cr,uid,ids): try : header_file = tools.file_open(os.path.join('base', 'report', 'corporate_rml_header.rml')) try: return header_file.read() finally: header_file.close() except: return self._header_a4
def action_generate(self,cr,uid,ids,context=None): # Check for PXVIEW availabilty and convert .db to .csv path = self.pool.get('config.path.folder').read(cr, uid, ids[0],['folder'],context)[0]['folder'] tmp = path.split('/') folder_name = tmp[len(tmp)-1] for file in ['DBK.DB','ACCOUN.DB','COMPAN.DB','CONTACTS.DB','PERIOD.DB','VATCAS.DB','VAT.DB','AHISTO.DB']: #TODO: improve for using either the capital letters or no cmd = 'pxview '+path+'/'+folder_name+file+' -c > ' + config['addons_path']+'/account_bob_import/original_csv/'+file.split('.')[0].lower()+'.csv' res = os.system(cmd) if res != 0 and file != 'CONTACTS.DB': raise osv.except_osv(_('Error Occured'), _('An error occured when importing the file "%s". Please check that pxview is correclty installed on the server.')% file) import bob_import_step_2 bob_import_step_2.run() filename = config['addons_path']+'/account_bob_import/account.account.csv' config.__setitem__('import_partial', 'bob.pickle') #deactivate the parent_store functionnality on account_account for rapidity purpose self.pool._init = True convert.convert_csv_import(cr, 'account_bob_import', 'account.account.csv', tools.file_open(filename).read()) #reactivate the parent_store functionnality on account_account self.pool.get('account.account')._parent_store_compute(cr) filename = config['addons_path']+'/account_bob_import/account.journal.csv' convert.convert_csv_import(cr, 'account_bob_import', 'account.journal.csv', tools.file_open(filename).read()) filename = config['addons_path']+'/account_bob_import/res.partner.csv' convert.convert_csv_import(cr, 'account_bob_import', 'res.partner.csv', tools.file_open(filename).read()) filename = config['addons_path']+'/account_bob_import/res.partner.bank.csv' convert.convert_csv_import(cr, 'account_bob_import', 'res.partner.bank.csv', tools.file_open(filename).read()) filename = config['addons_path']+'/account_bob_import/res.partner.job.csv' convert.convert_csv_import(cr, 'account_bob_import', 'res.partner.job.csv', tools.file_open(filename).read()) filename = config['addons_path']+'/account_bob_import/account.fiscalyear.csv' convert.convert_csv_import(cr, 'account_bob_import', 'account.fiscalyear.csv', tools.file_open(filename).read()) filename = config['addons_path']+'/account_bob_import/account.period.csv' convert.convert_csv_import(cr, 'account_bob_import', 'account.period.csv', tools.file_open(filename).read()) filename = config['addons_path']+'/account_bob_import/account.move.reconcile-1.csv' convert.convert_csv_import(cr, 'account_bob_import', 'account.move.reconcile-1.csv', tools.file_open(filename).read()) filename = config['addons_path']+'/account_bob_import/account.move.reconcile-2.csv' convert.convert_csv_import(cr, 'account_bob_import', 'account.move.reconcile-2.csv', tools.file_open(filename).read()) filename = config['addons_path']+'/account_bob_import/account.move.csv' convert.convert_csv_import(cr, 'account_bob_import', 'account.move.csv', tools.file_open(filename).read()) filename = config['addons_path']+'/account_bob_import/account.move.line.csv' convert.convert_csv_import(cr, 'account_bob_import', 'account.move.line.csv', tools.file_open(filename).read()) self.pool._init = False #TODO: modify the name of account_bob_import.account_bob_0 into the name of company #TODO: some check to prevent errors: is file empty? add try-catch statements? #TODO: chisto and ahisto_matching .csv file return { 'view_type': 'form', "view_mode": 'form', 'res_model': 'ir.actions.configuration.wizard', 'type': 'ir.actions.act_window', 'target':'new', }
def get_modules_with_version(): modules = get_modules() res = {} for module in modules: terp = get_module_resource(module, '__terp__.py') try: info = eval(tools.file_open(terp).read()) res[module] = "%s.%s" % (release.major_version, info['version']) except Exception, e: continue
def init(self, cr): """ Load msf_cross_docking_data.xml before self """ if hasattr(super(stock_picking, self), 'init'): super(stock_picking, self).init(cr) logging.getLogger('init').info('HOOK: module msf_cross_docking: loading data/msf_msf_cross_docking_data.xml') pathname = path.join('msf_cross_docking', 'data/msf_cross_docking_data.xml') file = tools.file_open(pathname) tools.convert_xml_import(cr, 'msf_cross_docking', file, {}, mode='init', noupdate=False)
def read_image(self, path): path_info = path.split(',') icon_path = addons.get_module_resource(path_info[0], path_info[1]) icon_image = False if icon_path: try: icon_file = tools.file_open(icon_path, 'rb') icon_image = base64.encodestring(icon_file.read()) finally: icon_file.close() return icon_image
def parse(self, filename, ids, model, context=None): if not context: context={} # parses the xml template to memory src_file = tools.file_open(filename) try: self.dom = etree.XML(src_file.read()) self.doc = etree.Element(self.dom.tag) self.parse_tree(ids, model, context) finally: src_file.close()
def get_module_info(self, name): try: f = tools.file_open(os.path.join(name, '__terp__.py')) data = f.read() info = eval(data) if 'version' in info: info['version'] = release.major_version + '.' + info['version'] f.close() except: return {} return info
def _upload_report(self, cr, uid, data, context): import tiny_sxw2rml import StringIO pool = pooler.get_pool(cr.dbname) sxwval = StringIO.StringIO(base64.decodestring(data['form']['file_sxw'])) fp = tools.file_open('normalized_oo2rml.xsl',subdir='addons/base_report_designer/wizard/tiny_sxw2rml') report = pool.get('ir.actions.report.xml').write(cr, uid, [data['form']['report_id']], { 'report_sxw_content': base64.decodestring(data['form']['file_sxw']), 'report_rml_content': str(tiny_sxw2rml.sxw2rml(sxwval, xsl=fp.read())) }) return {}
def _relaxng(self): if not self._relaxng_validator: frng = tools.file_open(os.path.join('base','rng','view.rng')) try: relaxng_doc = etree.parse(frng) self._relaxng_validator = etree.RelaxNG(relaxng_doc) except Exception: _logger.exception('Failed to load RelaxNG XML schema for views validation') finally: frng.close() return self._relaxng_validator
def upload_report(self, cr, uid, report_id, file_sxw,file_type, context): ''' Untested function ''' pool = pooler.get_pool(cr.dbname) sxwval = StringIO(base64.decodestring(file_sxw)) if file_type=='sxw': fp = tools.file_open('normalized_oo2rml.xsl', subdir='addons/base_report_designer/wizard/tiny_sxw2rml') if file_type=='odt': fp = tools.file_open('normalized_odt2rml.xsl', subdir='addons/base_report_designer/wizard/tiny_sxw2rml') report = pool.get('ir.actions.report.xml').write(cr, uid, [report_id], { 'report_sxw_content': base64.decodestring(file_sxw), 'report_rml_content': str(sxw2rml(sxwval, xsl=fp.read())), }) cr.commit() db = pooler.get_db_only(cr.dbname) interface.register_all(db) return True
def action_create(self, cr, uid, ids, *args): res=super(crm_menu_config_wizard, self).action_create(cr, uid, ids, *args) for res in self.read(cr,uid,ids): res.__delitem__('id') for section in res : if res[section]: file_name = 'crm_'+section+'_vertical_view.xml' try: tools.convert_xml_import(cr, 'crm_configuration', tools.file_open(os.path.join('crm_vertical',file_name )), {}, 'init', *args) except Exception, e: raise osv.except_osv(_('Error !'), str(e))
def export_code_terms_from_file(fname, path, root, terms_type): fabsolutepath = join(root, fname) frelativepath = fabsolutepath[len(path) :] module = get_module_from_path(fabsolutepath, mod_paths=mod_paths) is_mod_installed = module in installed_modules if (("all" in modules) or (module in modules)) and is_mod_installed: logger.debug("Scanning code of %s at module: %s", frelativepath, module) src_file = tools.file_open(fabsolutepath, subdir="") try: code_string = src_file.read() finally: src_file.close() if module in installed_modules: frelativepath = str("addons" + frelativepath) ite = re_dquotes.finditer(code_string) code_offset = 0 code_line = 1 for i in ite: src = i.group(1) if src.startswith('""'): assert src.endswith('""'), ( "Incorrect usage of _(..) function (should contain only literal strings!) in file %s near: %s" % (frelativepath, src[:30]) ) src = src[2:-2] else: src = join_dquotes.sub(r"\1", src) # try to count the lines from the last pos to our place: code_line += code_string[code_offset : i.start(1)].count("\n") # now, since we did a binary read of a python source file, we # have to expand pythonic escapes like the interpreter does. src = src.decode("string_escape") push_translation(module, terms_type, frelativepath, code_line, encode(src)) code_line += i.group(1).count("\n") code_offset = i.end() # we have counted newlines up to the match end ite = re_quotes.finditer(code_string) code_offset = 0 # reset counters code_line = 1 for i in ite: src = i.group(1) if src.startswith("''"): assert src.endswith("''"), ( "Incorrect usage of _(..) function (should contain only literal strings!) in file %s near: %s" % (frelativepath, src[:30]) ) src = src[2:-2] else: src = join_quotes.sub(r"\1", src) code_line += code_string[code_offset : i.start(1)].count("\n") src = src.decode("string_escape") push_translation(module, terms_type, frelativepath, code_line, encode(src)) code_line += i.group(1).count("\n") code_offset = i.end() # we have counted newlines up to the match end
def read_image(self, path): path_info = path.split(',') icon_path = addons.get_module_resource(path_info[0],path_info[1]) icon_image = False if icon_path: try: icon_file = tools.file_open(icon_path,'rb') icon_image = base64.encodestring(icon_file.read()) finally: icon_file.close() return icon_image
def execute(self, cr, uid, ids, context=None): if context is None: context = {} super(config_es_toponyms, self).execute(cr, uid, ids, context=context) res = self.read(cr, uid, ids)[0] # Import Spanish states (official, Spanish or both) file_name = 'l10n_es_toponyms_states_' + res['state'] + '.xml' try: fp = tools.file_open(os.path.join('l10n_es_toponyms', file_name)) except IOError, e: fp = None
def _check_xml(self, cr, uid, ids, context={}): logger = logging.getLogger('init') for view in self.browse(cr, uid, ids, context): eview = etree.fromstring(view.arch.encode('utf8')) frng = tools.file_open(os.path.join('base','rng','view.rng')) relaxng_doc = etree.parse(frng) relaxng = etree.RelaxNG(relaxng_doc) if not relaxng.validate(eview): for error in relaxng.error_log: logger.error(tools.ustr(error)) return False return True
def _send_mail(self, cr, uid, data, context): # Check of the first email address given by the user ptrn = re.compile('(\w+@\w+(?:\.\w+)+)') result=ptrn.search(data['form']['email_to']) if result==None: raise wizard.except_wizard('Error !', 'Enter Valid Destination E-Mail Address.') # Check of the first second email address given by the user ptrn = re.compile('(\w+@\w+(?:\.\w+)+)') result=ptrn.search(data['form']['email_rcp']) if result==None: raise wizard.except_wizard('Error !', 'Enter Valid Reception E-Mail Address.') # Determine the first and last date to select month=data['form']['month'] year=int(data['form']['year']) self.first_day=datetime.date(year,int(month),1) self.last_day=datetime.date(year,int(month),lengthmonth(year, int(month))) period="to_date('" + self.first_day.strftime('%Y-%m-%d') + "','yyyy-mm-dd') and to_date('" + self.last_day.strftime('%Y-%m-%d') +"','yyyy-mm-dd')" #determine the type of certificates to send certificate_type = data['form']['cert_type'] cancel_clause = not(data['form']['canceled']) and " and b.state not in ('cancel_customer','cancel_cci')" or '' query = 'select a.id from cci_missions_certificate as a, cci_missions_dossier as b where ( a.dossier_id = b.id ) and ( a.sending_spf is null ) and ( b.type_id = %s ) and ( b.date between %s )' + cancel_clause #Extraction of corresponding certificates cr.execute(query % (certificate_type,period)) res_file1=cr.fetchall() #If no records, cancel of the flow if res_file1==[]: raise wizard.except_wizard('Notification !', 'No Records Found to be sended. Check your criteria.') lines=[] root_path=tools.config.options['root_path'] if res_file1: lines=self.make_lines(cr, uid, res_file1, data ) self.write_txt(root_path+'/certificates.txt',lines) # Sending of the file as attachment files_attached=[] file1=tools.file_open(root_path+'/certificates.txt','rb',subdir=None) files_attached=[('certificates.txt',file1.read())] src = tools.config.options['smtp_user'] # parametre quand on lance le server ou dans bin\tools\config.py dest = [data['form']['email_to']] body = "Hello,\nHere are the certificates files for Federation.\nThink Big Use Tiny." tools.email_send(src,dest,"Federation Sending Files From TinyERP",body,attach=files_attached) pool = pooler.get_pool(cr.dbname) certificates_ids = [x[0] for x in res_file1] obj_certificate = pool.get('cci_missions.certificate') obj_certificate.write(cr, uid, certificates_ids,{'sending_spf':time.strftime('%Y-%m-%d')}) return {}
def _remove_prefix_ref(self,fromurl,path): infile=tools.file_open(os.path.join(fromurl,path)) xml=infile.read() mydom = minidom.parseString(xml) for child in mydom.getElementsByTagName("field"): for attr in child.attributes.keys(): if attr=='ref': old=child.getAttribute(attr) if len(old.split('.')) > 1: if old.split('.')[0] in self.dependencies: child.setAttribute(attr,old.split('.')[1]) return mydom.toxml()
def _report_content(self, cursor, user, ids, name, arg, context=None): res = {} for report in self.browse(cursor, user, ids, context=context): data = report[name + '_data'] if not data and report[name[:-8]]: try: fp = tools.file_open(report[name[:-8]], mode='rb') data = fp.read() except: data = False res[report.id] = data return res
def create(self, cr, uid, ids, data, context=None): data['model'] = context['active_model'] pool = pooler.get_pool(cr.dbname) ir_obj = pool.get('ir.actions.report.xml') name = self.name.startswith('report.') and self.name[7:] or self.name report_xml_ids = ir_obj.search(cr, uid, [('report_name', '=', name)], context=context) if report_xml_ids: report_xml = ir_obj.browse(cr, uid, report_xml_ids[0], context=context) report_xml.report_rml = None report_xml.report_rml_content = None report_xml.report_sxw_content_data = None report_rml.report_sxw_content = None report_rml.report_sxw = None copies_ids = [] if not report_xml.report_wizard and report_xml>1: while(report_xml.copies): copies_ids.extend(ids) report_xml.copies -= 1 ids = copies_ids or ids else: title = '' report_file = tools.file_open(self.tmpl) try: rml = report_file.read() report_type= data.get('report_type', 'pdf') class a(object): def __init__(self, *args, **argv): for key,arg in argv.items(): setattr(self, key, arg) report_xml = a(title=title, report_type=report_type, report_rml_content=rml, \ name=title , attachment=False, header=self.header, process_sep=False) finally: report_file.close() report_type = report_xml.report_type if report_type in ['sxw','odt']: fnct = self.create_source_odt elif report_type in ['pdf','raw','txt','html']: fnct = self.create_source_pdf elif report_type=='html2html': fnct = self.create_source_html2html elif report_type=='mako2html': fnct = self.create_source_mako2html elif report_type=='aeroo': if report_xml.out_format.code in ['oo-pdf']: fnct = self.create_source_pdf elif report_xml.out_format.code in ['oo-odt','oo-ods','oo-doc','oo-xls','oo-csv','genshi-raw']: fnct = self.create_source_odt else: return super(Aeroo_report, self).create(cr, uid, ids, data, context) else: raise Exception('Unknown Report Type') return fnct(cr, uid, ids, data, report_xml, context)
def _relaxng(self): if not self._relaxng_validator: frng = tools.file_open(os.path.join('base', 'rng', 'view.rng')) try: relaxng_doc = etree.parse(frng) self._relaxng_validator = etree.RelaxNG(relaxng_doc) except Exception: _logger.exception( 'Failed to load RelaxNG XML schema for views validation') finally: frng.close() return self._relaxng_validator
def load_information_from_description_file(module): """ :param module: The name of the module (sale, purchase, ...) """ for filename in ['__openerp__.py', '__terp__.py']: description_file = get_module_resource(module, filename) if os.path.isfile(description_file): return eval(tools.file_open(description_file).read()) #TODO: refactor the logger in this file to follow the logging guidelines # for 6.0 logging.getLogger('addons').debug('The module %s does not contain a description file:'\ '__openerp__.py or __terp__.py (deprecated)', module) return {}