def update_translations(self, cr, uid, ids, filter_lang=None): logger = netsvc.Logger() if not filter_lang: pool = pooler.get_pool(cr.dbname) lang_obj = pool.get('res.lang') lang_ids = lang_obj.search(cr, uid, [('translatable', '=', True)]) filter_lang = [lang.code for lang in lang_obj.browse(cr, uid, lang_ids)] elif not isinstance(filter_lang, (list, tuple)): filter_lang = [filter_lang] for mod in self.browse(cr, uid, ids): if mod.state != 'installed': continue modpath = addons.get_module_path(mod.name) if not modpath: # unable to find the module. we skip continue for lang in filter_lang: if len(lang) > 5: raise osv.except_osv(_('Error'), _('You Can Not Load Translation For language Due To Invalid Language/Country Code')) iso_lang = tools.get_iso_codes(lang) f = os.path.join(modpath, 'i18n', iso_lang + '.po') if not os.path.exists(f) and iso_lang.find('_') != -1: f = os.path.join(modpath, 'i18n', iso_lang.split('_')[0] + '.po') iso_lang = iso_lang.split('_')[0] if os.path.exists(f): logger.notifyChannel("i18n", netsvc.LOG_INFO, 'module %s: loading translation file for language %s' % (mod.name, iso_lang)) tools.trans_load(cr.dbname, f, lang, verbose=False)
def load(self, cr, modules, langs, context=None): context = dict(context or {}) # local copy for module_name in modules: modpath = openerp.modules.get_module_path(module_name) if not modpath: continue for lang in langs: lang_code = tools.get_iso_codes(lang) base_lang_code = None if '_' in lang_code: base_lang_code = lang_code.split('_')[0] # Step 1: for sub-languages, load base language first (e.g. es_CL.po is loaded over es.po) if base_lang_code: base_trans_file = openerp.modules.get_module_resource(module_name, 'i18n', base_lang_code + '.po') if base_trans_file: _logger.info('module %s: loading base translation file %s for language %s', module_name, base_lang_code, lang) tools.trans_load(cr, base_trans_file, lang, verbose=False, module_name=module_name, context=context) context['overwrite'] = True # make sure the requested translation will override the base terms later # Step 2: then load the main translation file, possibly overriding the terms coming from the base language trans_file = openerp.modules.get_module_resource(module_name, 'i18n', lang_code + '.po') if trans_file: _logger.info('module %s: loading translation file (%s) for language %s', module_name, lang_code, lang) tools.trans_load(cr, trans_file, lang, verbose=False, module_name=module_name, context=context) elif lang_code != 'en_US': _logger.warning('module %s: no translation for language %s', module_name, lang_code) return True
def load_lang(self, cr, uid, lang, lang_name=None): # create the language with locale information fail = True logger = logging.getLogger('i18n') iso_lang = tools.get_iso_codes(lang) for ln in tools.get_locales(lang): try: locale.setlocale(locale.LC_ALL, str(ln)) fail = False break except locale.Error: continue if fail: lc = locale.getdefaultlocale()[0] msg = 'Unable to get information for locale %s. Information from the default locale (%s) have been used.' logger.warning(msg, lang, lc) if not lang_name: lang_name = tools.get_languages().get(lang, lang) def fix_xa0(s): """Fix badly-encoded non-breaking space Unicode character from locale.localeconv(), coercing to utf-8, as some platform seem to output localeconv() in their system encoding, e.g. Windows-1252""" if s == '\xa0': return '\xc2\xa0' return s def fix_datetime_format(format): """Python's strftime supports only the format directives that are available on the platform's libc, so in order to be 100% cross-platform we map to the directives required by the C standard (1989 version), always available on platforms with a C standard implementation.""" for pattern, replacement in tools.DATETIME_FORMATS_MAP.iteritems(): format = format.replace(pattern, replacement) return str(format) lang_info = { 'code': lang, 'iso_code': iso_lang, 'name': lang_name, 'translatable': 1, 'date_format': fix_datetime_format(locale.nl_langinfo(locale.D_FMT)), 'time_format': fix_datetime_format(locale.nl_langinfo(locale.T_FMT)), 'decimal_point': fix_xa0(str(locale.localeconv()['decimal_point'])), 'thousands_sep': fix_xa0(str(locale.localeconv()['thousands_sep'])), } lang_id = False try: lang_id = self.create(cr, uid, lang_info) finally: tools.resetlocale() return lang_id
def update_translations(self, cr, uid, ids, filter_lang=None, context={}): logger = logging.getLogger('i18n') if not filter_lang: pool = pooler.get_pool(cr.dbname) lang_obj = pool.get('res.lang') lang_ids = lang_obj.search(cr, uid, [('translatable', '=', True)]) filter_lang = [ lang.code for lang in lang_obj.browse(cr, uid, lang_ids) ] elif not isinstance(filter_lang, (list, tuple)): filter_lang = [filter_lang] for mod in self.browse(cr, uid, ids): if mod.state != 'installed': continue modpath = addons.get_module_path(mod.name) if not modpath: # unable to find the module. we skip continue for lang in filter_lang: iso_lang = tools.get_iso_codes(lang) f = addons.get_module_resource(mod.name, 'i18n', iso_lang + '.po') context2 = context and context.copy() or {} if f and '_' in iso_lang: iso_lang2 = iso_lang.split('_')[0] f2 = addons.get_module_resource(mod.name, 'i18n', iso_lang2 + '.po') if f2: logger.info( 'module %s: loading base translation file %s for language %s', mod.name, iso_lang2, lang) tools.trans_load(cr, f2, lang, verbose=False, context=context) context2['overwrite'] = True # Implementation notice: we must first search for the full name of # the language derivative, like "en_UK", and then the generic, # like "en". if (not f) and '_' in iso_lang: iso_lang = iso_lang.split('_')[0] f = addons.get_module_resource(mod.name, 'i18n', iso_lang + '.po') if f: logger.info( 'module %s: loading translation file (%s) for language %s', mod.name, iso_lang, lang) tools.trans_load(cr, f, lang, verbose=False, context=context2) elif iso_lang != 'en': logger.warning('module %s: no translation for language %s', mod.name, iso_lang) tools.trans_update_res_ids(cr)
def load_lang(self, cr, uid, lang, lang_name=None): # create the language with locale information fail = True logger = logging.getLogger('i18n') iso_lang = tools.get_iso_codes(lang) for ln in tools.get_locales(lang): try: locale.setlocale(locale.LC_ALL, str(ln)) fail = False break except locale.Error: continue if fail: lc = locale.getdefaultlocale()[0] msg = 'Unable to get information for locale %s. Information from the default locale (%s) have been used.' logger.warning(msg, lang, lc) if not lang_name: lang_name = tools.get_languages().get(lang, lang) def fix_xa0(s): """Fix badly-encoded non-breaking space Unicode character from locale.localeconv(), coercing to utf-8, as some platform seem to output localeconv() in their system encoding, e.g. Windows-1252""" if s == '\xa0': return '\xc2\xa0' return s def fix_datetime_format(format): """Python's strftime supports only the format directives that are available on the platform's libc, so in order to be 100% cross-platform we map to the directives required by the C standard (1989 version), always available on platforms with a C standard implementation.""" for pattern, replacement in tools.DATETIME_FORMATS_MAP.iteritems(): format = format.replace(pattern, replacement) return str(format) lang_info = { 'code': lang, 'iso_code': iso_lang, 'name': lang_name, 'translatable': 1, 'date_format' : fix_datetime_format(locale.nl_langinfo(locale.D_FMT)), 'time_format' : fix_datetime_format(locale.nl_langinfo(locale.T_FMT)), 'decimal_point' : fix_xa0(str(locale.localeconv()['decimal_point'])), 'thousands_sep' : fix_xa0(str(locale.localeconv()['thousands_sep'])), } lang_id = False try: lang_id = self.create(cr, uid, lang_info) finally: tools.resetlocale() return lang_id
def update_translations(self, cr, uid, ids, filter_lang=None, context=None): if context is None: context = {} logger = logging.getLogger('i18n') if not filter_lang: pool = pooler.get_pool(cr.dbname) lang_obj = pool.get('res.lang') lang_ids = lang_obj.search(cr, uid, [('translatable', '=', True)]) filter_lang = [lang.code for lang in lang_obj.browse(cr, uid, lang_ids)] elif not isinstance(filter_lang, (list, tuple)): filter_lang = [filter_lang] msf_profile_id = self.pool.get('ir.module.module').search(cr, uid, [('name', '=', 'msf_profile')]) if msf_profile_id and msf_profile_id[0] in ids: ids.remove(msf_profile_id[0]) # load msf_profile file at the end (due to es.po file, terms are always overwritten) ids.append(msf_profile_id[0]) for mod in self.browse(cr, uid, ids): if mod.state != 'installed': continue modpath = addons.get_module_path(mod.name) if not modpath: # unable to find the module. we skip continue for lang in filter_lang: iso_lang = tools.get_iso_codes(lang) f = addons.get_module_resource(mod.name, 'i18n', iso_lang + '.po') context2 = context and context.copy() or {} if f and '_' in iso_lang: iso_lang2 = iso_lang.split('_')[0] f2 = addons.get_module_resource(mod.name, 'i18n', iso_lang2 + '.po') if f2: logger.info('module %s: loading base translation file %s for language %s', mod.name, iso_lang2, lang) tools.trans_load(cr, f2, lang, verbose=False, context=context) context2['overwrite'] = True # Implementation notice: we must first search for the full name of # the language derivative, like "en_UK", and then the generic, # like "en". if (not f) and '_' in iso_lang: iso_lang = iso_lang.split('_')[0] f = addons.get_module_resource(mod.name, 'i18n', iso_lang + '.po') if f: logger.info('module %s: loading translation file (%s) for language %s', mod.name, iso_lang, lang) tools.trans_load(cr, f, lang, verbose=False, context=context2) elif iso_lang != 'en': logger.warning('module %s: no translation for language %s', mod.name, iso_lang) tools.trans_update_res_ids(cr)
def load(self, cr, modules, langs, context=None): context = dict(context or {}) # local copy for module_name in modules: modpath = openerp.modules.get_module_path(module_name) if not modpath: continue for lang in langs: lang_code = tools.get_iso_codes(lang) base_lang_code = None if '_' in lang_code: base_lang_code = lang_code.split('_')[0] # Step 1: for sub-languages, load base language first (e.g. es_CL.po is loaded over es.po) if base_lang_code: base_trans_file = openerp.modules.get_module_resource( module_name, 'i18n', base_lang_code + '.po') if base_trans_file: _logger.info( 'module %s: loading base translation file %s for language %s', module_name, base_lang_code, lang) tools.trans_load(cr, base_trans_file, lang, verbose=False, module_name=module_name, context=context) context[ 'overwrite'] = True # make sure the requested translation will override the base terms later # Step 2: then load the main translation file, possibly overriding the terms coming from the base language trans_file = openerp.modules.get_module_resource( module_name, 'i18n', lang_code + '.po') if trans_file: _logger.info( 'module %s: loading translation file (%s) for language %s', module_name, lang_code, lang) tools.trans_load(cr, trans_file, lang, verbose=False, module_name=module_name, context=context) elif lang_code != 'en_US': _logger.warning( 'module %s: no translation for language %s', module_name, lang_code) return True
def update_translations(self, cr, uid, ids, filter_lang=None, context=None): logger = logging.getLogger('i18n') if not filter_lang: pool = pooler.get_pool(cr.dbname) lang_obj = pool.get('res.lang') lang_ids = lang_obj.search(cr, uid, [('translatable', '=', True)]) filter_lang = [lang.code for lang in lang_obj.browse(cr, uid, lang_ids)] elif not isinstance(filter_lang, (list, tuple)): filter_lang = [filter_lang] for mod in self.browse(cr, uid, ids): if mod.state != 'installed': continue modpath = addons.get_module_path(mod.name) if not modpath: # unable to find the module. we skip continue for lang in filter_lang: iso_lang = tools.get_iso_codes(lang) # Implementation notice: We need to load both the base language, # like "en" and then the dialects (like "en_GB"). # With overwrite=False, en will be complemented with 'en_GB' terms. # with overwrite, we need to reverse the loading order to_load = [] f = addons.get_module_resource(mod.name, 'i18n', iso_lang + '.po') if f: to_load.append((iso_lang, f)) if '_' in iso_lang: iso_lang = iso_lang.split('_')[0] f = addons.get_module_resource(mod.name, 'i18n', iso_lang + '.po') if f: to_load.append((iso_lang, f)) if context and context.get('overwrite', False): to_load.reverse() for (iso_lang, f) in to_load: logger.info('module %s: loading translation file for language %s', mod.name, iso_lang) tools.trans_load(cr, f, lang, verbose=False, context=context) if to_load == [] and lang != 'en_US': logger.warning('module %s: no translation for language %s', mod.name, lang)
def update_translations(self, cr, uid, ids, filter_lang=None): logger = netsvc.Logger() if not filter_lang: pool = pooler.get_pool(cr.dbname) lang_obj = pool.get('res.lang') lang_ids = lang_obj.search(cr, uid, [('translatable', '=', True)]) filter_lang = [ lang.code for lang in lang_obj.browse(cr, uid, lang_ids) ] elif not isinstance(filter_lang, (list, tuple)): filter_lang = [filter_lang] for mod in self.browse(cr, uid, ids): if mod.state != 'installed': continue modpath = addons.get_module_path(mod.name) if not modpath: # unable to find the module. we skip continue for lang in filter_lang: if len(lang) > 5: raise osv.except_osv( _('Error'), _('You Can Not Load Translation For language Due To Invalid Language/Country Code' )) iso_lang = tools.get_iso_codes(lang) f = os.path.join(modpath, 'i18n', iso_lang + '.po') if not os.path.exists(f) and iso_lang.find('_') != -1: f = os.path.join(modpath, 'i18n', iso_lang.split('_')[0] + '.po') iso_lang = iso_lang.split('_')[0] if os.path.exists(f): logger.notifyChannel( "i18n", netsvc.LOG_INFO, 'module %s: loading translation file for language %s' % (mod.name, iso_lang)) tools.trans_load(cr.dbname, f, lang, verbose=False)
def trans_load_data(db_name, fileobj, fileformat, lang, strict=False, lang_name=None, verbose=True): logger = netsvc.Logger() if verbose: logger.notifyChannel("i18n", netsvc.LOG_INFO, 'loading translation file for language %s' % (lang)) pool = pooler.get_pool(db_name) lang_obj = pool.get('res.lang') trans_obj = pool.get('ir.translation') model_data_obj = pool.get('ir.model.data') iso_lang = tools.get_iso_codes(lang) try: uid = 1 cr = pooler.get_db(db_name).cursor() ids = lang_obj.search(cr, uid, [('code','=', lang)]) if not ids: # lets create the language with locale information fail = True for ln in get_locales(lang): try: locale.setlocale(locale.LC_ALL, str(ln)) fail = False break except locale.Error: continue if fail: lc = locale.getdefaultlocale()[0] msg = 'Unable to get information for locale %s. Information from the default locale (%s) have been used.' logger.notifyChannel('i18n', netsvc.LOG_WARNING, msg % (lang, lc)) if not lang_name: lang_name = tools.get_languages().get(lang, lang) def fix_xa0(s): if s == '\xa0': return '\xc2\xa0' return s lang_info = { 'code': lang, 'iso_code': iso_lang, 'name': lang_name, 'translatable': 1, 'date_format' : str(locale.nl_langinfo(locale.D_FMT).replace('%y', '%Y')), 'time_format' : str(locale.nl_langinfo(locale.T_FMT)), 'decimal_point' : fix_xa0(str(locale.localeconv()['decimal_point'])), 'thousands_sep' : fix_xa0(str(locale.localeconv()['thousands_sep'])), } try: lang_obj.create(cr, uid, lang_info) finally: resetlocale() # now, the serious things: we read the language file fileobj.seek(0) if fileformat == 'csv': reader = csv.reader(fileobj, quotechar='"', delimiter=',') # read the first line of the file (it contains columns titles) for row in reader: f = row break elif fileformat == 'po': reader = TinyPoFile(fileobj) f = ['type', 'name', 'res_id', 'src', 'value'] else: raise Exception(_('Bad file format')) # read the rest of the file line = 1 for row in reader: line += 1 # skip empty rows and rows where the translation field (=last fiefd) is empty #if (not row) or (not row[-1]): # continue # dictionary which holds values for this line of the csv file # {'lang': ..., 'type': ..., 'name': ..., 'res_id': ..., # 'src': ..., 'value': ...} dic = {'lang': lang} for i in range(len(f)): if f[i] in ('module',): continue dic[f[i]] = row[i] try: dic['res_id'] = int(dic['res_id']) except: model_data_ids = model_data_obj.search(cr, uid, [ ('model', '=', dic['name'].split(',')[0]), ('module', '=', dic['res_id'].split('.', 1)[0]), ('name', '=', dic['res_id'].split('.', 1)[1]), ]) if model_data_ids: dic['res_id'] = model_data_obj.browse(cr, uid, model_data_ids[0]).res_id else: dic['res_id'] = False if dic['type'] == 'model' and not strict: (model, field) = dic['name'].split(',') # get the ids of the resources of this model which share # the same source obj = pool.get(model) if obj: if field not in obj.fields_get_keys(cr, uid): continue ids = obj.search(cr, uid, [(field, '=', dic['src'])]) # if the resource id (res_id) is in that list, use it, # otherwise use the whole list if not ids: ids = [] ids = (dic['res_id'] in ids) and [dic['res_id']] or ids for id in ids: dic['res_id'] = id ids = trans_obj.search(cr, uid, [ ('lang', '=', lang), ('type', '=', dic['type']), ('name', '=', dic['name']), ('src', '=', dic['src']), ('res_id', '=', dic['res_id']) ]) if ids: trans_obj.write(cr, uid, ids, {'value': dic['value']}) else: trans_obj.create(cr, uid, dic) else: ids = trans_obj.search(cr, uid, [ ('lang', '=', lang), ('type', '=', dic['type']), ('name', '=', dic['name']), ('src', '=', dic['src']) ]) if ids: trans_obj.write(cr, uid, ids, {'value': dic['value']}) else: trans_obj.create(cr, uid, dic) cr.commit() cr.close() if verbose: logger.notifyChannel("i18n", netsvc.LOG_INFO, "translation file loaded succesfully") except IOError: filename = '[lang: %s][format: %s]' % (iso_lang or 'new', fileformat) logger.notifyChannel("i18n", netsvc.LOG_ERROR, "couldn't read translation file %s" % (filename,))
def trans_load_data(cr, fileobj, fileformat, lang, lang_name=None, verbose=True, context=None): """Populates the ir_translation table. Fixing the res_ids so that they point correctly to ir_model_data is done in a separate step, using the 'trans_update_res_ids' function below.""" logger = logging.getLogger('i18n') if verbose: logger.info('loading translation file for language %s', lang) if context is None: context = {} db_name = cr.dbname pool = pooler.get_pool(db_name) lang_obj = pool.get('res.lang') trans_obj = pool.get('ir.translation') model_data_obj = pool.get('ir.model.data') iso_lang = tools.get_iso_codes(lang) try: uid = 1 ids = lang_obj.search(cr, uid, [('code','=', lang)]) if not ids: # lets create the language with locale information lang_obj.load_lang(cr, 1, lang=lang, lang_name=lang_name) # now, the serious things: we read the language file fileobj.seek(0) if fileformat == 'csv': reader = csv.reader(fileobj, quotechar='"', delimiter=',') # read the first line of the file (it contains columns titles) for row in reader: f = row break elif fileformat == 'po': reader = TinyPoFile(fileobj) f = ['type', 'name', 'res_id', 'src', 'value'] else: logger.error('Bad file format: %s', fileformat) raise Exception(_('Bad file format')) # read the rest of the file line = 1 clear_cache = False for row in reader: line += 1 # skip empty rows and rows where the translation field (=last fiefd) is empty #if (not row) or (not row[-1]): # continue # dictionary which holds values for this line of the csv file # {'lang': ..., 'type': ..., 'name': ..., 'res_id': ..., # 'src': ..., 'value': ...} dic = {'lang': lang} for i in range(len(f)): if f[i] in ('module',): continue dic[f[i]] = row[i] try: dic['res_id'] = dic['res_id'] and int(dic['res_id']) or 0 dic['module'] = False dic['xml_id'] = False except: split_id = dic['res_id'].split('.', 1) dic['module'] = split_id[0] dic['xml_id'] = split_id[1] dic['res_id'] = False args = [ ('lang', '=', lang), ('type', '=', dic['type']), ('name', '=', dic['name']), ('src', '=', dic['src']), ] if dic['type'] == 'model': if dic['res_id'] is False: args.append(('module', '=', dic['module'])) args.append(('xml_id', '=', dic['xml_id'])) else: args.append(('res_id', '=', dic['res_id'])) ids = trans_obj.search(cr, uid, args) if ids: if context.get('overwrite') and dic['value']: # trans_obj.write(cr, uid, ids, {'value': dic['value']}) # bypass write method to speed up the update, the cache will be cleared after the import clear_cache = True cr.execute('UPDATE ir_translation SET value=%s WHERE id in %s', (dic['value'], tuple(ids))) else: trans_obj.create(cr, uid, dic) if clear_cache: tools.cache.clean_caches_for_db(cr.dbname) if verbose: logger.info("translation file loaded succesfully") except IOError: filename = '[lang: %s][format: %s]' % (iso_lang or 'new', fileformat) logger.exception("couldn't read translation file %s", filename)
def trans_load_data(db_name, fileobj, fileformat, lang, strict=False, lang_name=None, verbose=True): logger = netsvc.Logger() if verbose: logger.notifyChannel( "i18n", netsvc.LOG_INFO, 'loading translation file for language %s' % (lang)) pool = pooler.get_pool(db_name) lang_obj = pool.get('res.lang') trans_obj = pool.get('ir.translation') model_data_obj = pool.get('ir.model.data') iso_lang = tools.get_iso_codes(lang) try: uid = 1 cr = pooler.get_db(db_name).cursor() ids = lang_obj.search(cr, uid, [('code', '=', lang)]) if not ids: # lets create the language with locale information fail = True for ln in get_locales(lang): try: locale.setlocale(locale.LC_ALL, str(ln)) fail = False break except locale.Error: continue if fail: lc = locale.getdefaultlocale()[0] msg = 'Unable to get information for locale %s. Information from the default locale (%s) have been used.' logger.notifyChannel('i18n', netsvc.LOG_WARNING, msg % (lang, lc)) if not lang_name: lang_name = tools.get_languages().get(lang, lang) def fix_xa0(s): if s == '\xa0': return '\xc2\xa0' return s lang_info = { 'code': lang, 'iso_code': iso_lang, 'name': lang_name, 'translatable': 1, 'date_format': str(locale.nl_langinfo(locale.D_FMT).replace('%y', '%Y')), 'time_format': str(locale.nl_langinfo(locale.T_FMT)), 'decimal_point': fix_xa0(str(locale.localeconv()['decimal_point'])), 'thousands_sep': fix_xa0(str(locale.localeconv()['thousands_sep'])), } try: lang_obj.create(cr, uid, lang_info) finally: resetlocale() # now, the serious things: we read the language file fileobj.seek(0) if fileformat == 'csv': reader = csv.reader(fileobj, quotechar='"', delimiter=',') # read the first line of the file (it contains columns titles) for row in reader: f = row break elif fileformat == 'po': reader = TinyPoFile(fileobj) f = ['type', 'name', 'res_id', 'src', 'value'] else: raise Exception(_('Bad file format')) # read the rest of the file line = 1 for row in reader: line += 1 # skip empty rows and rows where the translation field (=last fiefd) is empty #if (not row) or (not row[-1]): # continue # dictionary which holds values for this line of the csv file # {'lang': ..., 'type': ..., 'name': ..., 'res_id': ..., # 'src': ..., 'value': ...} dic = {'lang': lang} for i in range(len(f)): if f[i] in ('module', ): continue dic[f[i]] = row[i] try: dic['res_id'] = int(dic['res_id']) except: model_data_ids = model_data_obj.search(cr, uid, [ ('model', '=', dic['name'].split(',')[0]), ('module', '=', dic['res_id'].split('.', 1)[0]), ('name', '=', dic['res_id'].split('.', 1)[1]), ]) if model_data_ids: dic['res_id'] = model_data_obj.browse( cr, uid, model_data_ids[0]).res_id else: dic['res_id'] = False if dic['type'] == 'model' and not strict: (model, field) = dic['name'].split(',') # get the ids of the resources of this model which share # the same source obj = pool.get(model) if obj: if field not in obj.fields_get_keys(cr, uid): continue ids = obj.search(cr, uid, [(field, '=', dic['src'])]) # if the resource id (res_id) is in that list, use it, # otherwise use the whole list if not ids: ids = [] ids = (dic['res_id'] in ids) and [dic['res_id']] or ids for id in ids: dic['res_id'] = id ids = trans_obj.search( cr, uid, [('lang', '=', lang), ('type', '=', dic['type']), ('name', '=', dic['name']), ('src', '=', dic['src']), ('res_id', '=', dic['res_id'])]) if ids: trans_obj.write(cr, uid, ids, {'value': dic['value']}) else: trans_obj.create(cr, uid, dic) else: ids = trans_obj.search(cr, uid, [('lang', '=', lang), ('type', '=', dic['type']), ('name', '=', dic['name']), ('src', '=', dic['src'])]) if ids: trans_obj.write(cr, uid, ids, {'value': dic['value']}) else: trans_obj.create(cr, uid, dic) cr.commit() cr.close() if verbose: logger.notifyChannel("i18n", netsvc.LOG_INFO, "translation file loaded succesfully") except IOError: filename = '[lang: %s][format: %s]' % (iso_lang or 'new', fileformat) logger.notifyChannel( "i18n", netsvc.LOG_ERROR, "couldn't read translation file %s" % (filename, ))
def trans_load_data(cr, fileobj, fileformat, lang, lang_name=None, verbose=True, context=None): """Populates the ir_translation table. Fixing the res_ids so that they point correctly to ir_model_data is done in a separate step, using the 'trans_update_res_ids' function below.""" logger = logging.getLogger('i18n') if verbose: logger.info('loading translation file for language %s', lang) if context is None: context = {} db_name = cr.dbname pool = pooler.get_pool(db_name) lang_obj = pool.get('res.lang') trans_obj = pool.get('ir.translation') model_data_obj = pool.get('ir.model.data') iso_lang = tools.get_iso_codes(lang) try: uid = 1 ids = lang_obj.search(cr, uid, [('code', '=', lang)]) if not ids: # lets create the language with locale information lang_obj.load_lang(cr, 1, lang=lang, lang_name=lang_name) # now, the serious things: we read the language file fileobj.seek(0) if fileformat == 'csv': reader = csv.reader(fileobj, quotechar='"', delimiter=',') # read the first line of the file (it contains columns titles) for row in reader: f = row break elif fileformat == 'po': reader = TinyPoFile(fileobj) f = ['type', 'name', 'res_id', 'src', 'value'] else: logger.error('Bad file format: %s', fileformat) raise Exception(_('Bad file format')) # read the rest of the file line = 1 for row in reader: line += 1 # skip empty rows and rows where the translation field (=last fiefd) is empty #if (not row) or (not row[-1]): # continue # dictionary which holds values for this line of the csv file # {'lang': ..., 'type': ..., 'name': ..., 'res_id': ..., # 'src': ..., 'value': ...} dic = {'lang': lang} for i in range(len(f)): if f[i] in ('module', ): continue dic[f[i]] = row[i] try: dic['res_id'] = dic['res_id'] and int(dic['res_id']) or 0 dic['module'] = False dic['xml_id'] = False except: split_id = dic['res_id'].split('.', 1) dic['module'] = split_id[0] dic['xml_id'] = split_id[1] dic['res_id'] = False args = [ ('lang', '=', lang), ('type', '=', dic['type']), ('name', '=', dic['name']), ('src', '=', dic['src']), ] if dic['type'] == 'model': if dic['res_id'] is False: args.append(('module', '=', dic['module'])) args.append(('xml_id', '=', dic['xml_id'])) else: args.append(('res_id', '=', dic['res_id'])) ids = trans_obj.search(cr, uid, args) if ids: if context.get('overwrite') and dic['value']: trans_obj.write(cr, uid, ids, {'value': dic['value']}) else: trans_obj.create(cr, uid, dic) if verbose: logger.info("translation file loaded succesfully") except IOError: filename = '[lang: %s][format: %s]' % (iso_lang or 'new', fileformat) logger.exception("couldn't read translation file %s", filename)
def trans_load_data(cr, fileobj, fileformat, lang, lang_name=None, verbose=True, context=None): """Populates the ir_translation table. """ logger = logging.getLogger('i18n') if verbose: logger.info('loading translation file for language %s', lang) if context is None: context = {} db_name = cr.dbname pool = pooler.get_pool(db_name) lang_obj = pool.get('res.lang') trans_obj = pool.get('ir.translation') iso_lang = tools.get_iso_codes(lang) try: uid = 1 (lc, encoding) = locale.getdefaultlocale() ids = lang_obj.search(cr, uid, [('code', '=', lang)]) if not ids: # lets create the language with locale information lang_obj.load_lang(cr, 1, lang=lang, lang_name=lang_name) try: locale.setlocale(locale.LC_ALL, str(lc + '.' + encoding)) except locale.Error: pass # Here we try to reset the locale regardless. locale.setlocale(locale.LC_ALL, str(lc + '.' + encoding)) # now, the serious things: we read the language file fileobj.seek(0) if fileformat == 'csv': #Setting the limit of data while loading a CSV csv.field_size_limit(sys.maxint) reader = csv.reader(fileobj, quotechar='"', delimiter=',') # read the first line of the file (it contains columns titles) f = reader.next() elif fileformat == 'po': reader = TinyPoFile(fileobj) f = ['type', 'name', 'res_id', 'src', 'value'] else: logger.error('Bad file format: %s', fileformat) raise Exception(_('Bad file format')) # read the rest of the file line = 1 irt_cursor = trans_obj._get_import_cursor(cr, uid, context=context) for row in reader: line += 1 # skip empty rows and rows where the translation field (=last fiefd) is empty #if (not row) or (not row[-1]): # continue # dictionary which holds values for this line of the csv file # {'lang': ..., 'type': ..., 'name': ..., 'res_id': ..., # 'src': ..., 'value': ...} dic = {'lang': lang} dic_module = False for i, fld in enumerate(f): if fld in ('module', ): continue dic[fld] = row[i] # This would skip terms that fail to specify a res_id if not dic.get('res_id', False): continue res_id = dic.pop('res_id') if res_id and isinstance(res_id, (int, long)) \ or (isinstance(res_id, basestring) and res_id.isdigit()): dic['res_id'] = int(res_id) else: try: tmodel = dic['name'].split(',')[0] if '.' in res_id: tmodule, tname = res_id.split('.', 1) else: tmodule = dic_module tname = res_id dic['imd_model'] = tmodel dic['imd_module'] = tmodule dic['imd_name'] = tname dic['res_id'] = None except Exception: logger.warning( "Could not decode resource for %s, please fix the po file.", dic['res_id'], exc_info=True) dic['res_id'] = None irt_cursor.push(dic) irt_cursor.finish() if verbose: logger.info("translation file loaded succesfully") except IOError: filename = '[lang: %s][format: %s]' % (iso_lang or 'new', fileformat) logger.exception("couldn't read translation file %s", filename) except Exception: raise
def trans_load_data(cr, fileobj, fileformat, lang, lang_name=None, verbose=True, context=None): """Populates the ir_translation table. Fixing the res_ids so that they point correctly to ir_model_data is done in a separate step, using the 'trans_update_res_ids' function below.""" logger = logging.getLogger("i18n") if verbose: logger.info("loading translation file for language %s", lang) if context is None: context = {} db_name = cr.dbname pool = pooler.get_pool(db_name) lang_obj = pool.get("res.lang") trans_obj = pool.get("ir.translation") model_data_obj = pool.get("ir.model.data") iso_lang = tools.get_iso_codes(lang) try: uid = 1 ids = lang_obj.search(cr, uid, [("code", "=", lang)]) if not ids: # lets create the language with locale information lang_obj.load_lang(cr, 1, lang=lang, lang_name=lang_name) # now, the serious things: we read the language file fileobj.seek(0) if fileformat == "csv": reader = csv.reader(fileobj, quotechar='"', delimiter=",") # read the first line of the file (it contains columns titles) for row in reader: f = row break elif fileformat == "po": reader = TinyPoFile(fileobj) f = ["type", "name", "res_id", "src", "value"] else: logger.error("Bad file format: %s", fileformat) raise Exception(_("Bad file format")) # read the rest of the file line = 1 for row in reader: line += 1 # skip empty rows and rows where the translation field (=last fiefd) is empty # if (not row) or (not row[-1]): # continue # dictionary which holds values for this line of the csv file # {'lang': ..., 'type': ..., 'name': ..., 'res_id': ..., # 'src': ..., 'value': ...} dic = {"lang": lang} for i in range(len(f)): if f[i] in ("module",): continue dic[f[i]] = row[i] try: dic["res_id"] = dic["res_id"] and int(dic["res_id"]) or 0 dic["module"] = False dic["xml_id"] = False except: split_id = dic["res_id"].split(".", 1) dic["module"] = split_id[0] dic["xml_id"] = split_id[1] dic["res_id"] = False args = [ ("lang", "=", lang), ("type", "=", dic["type"]), ("name", "=", dic["name"]), ("src", "=", dic["src"]), ] if dic["type"] == "model": if dic["res_id"] is False: args.append(("module", "=", dic["module"])) args.append(("xml_id", "=", dic["xml_id"])) else: args.append(("res_id", "=", dic["res_id"])) ids = trans_obj.search(cr, uid, args) if ids: if context.get("overwrite") and dic["value"]: trans_obj.write(cr, uid, ids, {"value": dic["value"]}) else: trans_obj.create(cr, uid, dic) if verbose: logger.info("translation file loaded succesfully") except IOError: filename = "[lang: %s][format: %s]" % (iso_lang or "new", fileformat) logger.exception("couldn't read translation file %s", filename)