def _checking(self,cr,uid,data,context): file_list = s.get_contrib_revision(self.user,self.password,self.lang,self.version,self.profile) if file_list: lang_dict = tools.get_languages() self.file_list = [(lang[0], lang_dict.get(lang[1], lang[1])+' by '+lang[2]+' at R '+lang[3]) for lang in file_list] return 'file_selection' return 'version_not_found'
def load_lang(self, cr, uid, lang, lang_name=None): # create the language with locale information fail = True logger = logging.getLogger('i18n') iso_lang = tools.get_iso_codes(lang) for ln in tools.get_locales(lang): try: locale.setlocale(locale.LC_ALL, str(ln)) fail = False break except locale.Error: continue if fail: lc = locale.getdefaultlocale()[0] msg = 'Unable to get information for locale %s. Information from the default locale (%s) have been used.' logger.warning(msg, lang, lc) if not lang_name: lang_name = tools.get_languages().get(lang, lang) def fix_xa0(s): """Fix badly-encoded non-breaking space Unicode character from locale.localeconv(), coercing to utf-8, as some platform seem to output localeconv() in their system encoding, e.g. Windows-1252""" if s == '\xa0': return '\xc2\xa0' return s def fix_datetime_format(format): """Python's strftime supports only the format directives that are available on the platform's libc, so in order to be 100% cross-platform we map to the directives required by the C standard (1989 version), always available on platforms with a C standard implementation.""" for pattern, replacement in tools.DATETIME_FORMATS_MAP.iteritems(): format = format.replace(pattern, replacement) return str(format) lang_info = { 'code': lang, 'iso_code': iso_lang, 'name': lang_name, 'translatable': 1, 'date_format': fix_datetime_format(locale.nl_langinfo(locale.D_FMT)), 'time_format': fix_datetime_format(locale.nl_langinfo(locale.T_FMT)), 'decimal_point': fix_xa0(str(locale.localeconv()['decimal_point'])), 'thousands_sep': fix_xa0(str(locale.localeconv()['thousands_sep'])), } lang_id = False try: lang_id = self.create(cr, uid, lang_info) finally: tools.resetlocale() return lang_id
def load_lang(self, cr, uid, lang, lang_name=None): # create the language with locale information fail = True logger = logging.getLogger('i18n') iso_lang = tools.get_iso_codes(lang) for ln in tools.get_locales(lang): try: locale.setlocale(locale.LC_ALL, str(ln)) fail = False break except locale.Error: continue if fail: lc = locale.getdefaultlocale()[0] msg = 'Unable to get information for locale %s. Information from the default locale (%s) have been used.' logger.warning(msg, lang, lc) if not lang_name: lang_name = tools.get_languages().get(lang, lang) def fix_xa0(s): """Fix badly-encoded non-breaking space Unicode character from locale.localeconv(), coercing to utf-8, as some platform seem to output localeconv() in their system encoding, e.g. Windows-1252""" if s == '\xa0': return '\xc2\xa0' return s def fix_datetime_format(format): """Python's strftime supports only the format directives that are available on the platform's libc, so in order to be 100% cross-platform we map to the directives required by the C standard (1989 version), always available on platforms with a C standard implementation.""" for pattern, replacement in tools.DATETIME_FORMATS_MAP.iteritems(): format = format.replace(pattern, replacement) return str(format) lang_info = { 'code': lang, 'iso_code': iso_lang, 'name': lang_name, 'translatable': 1, 'date_format' : fix_datetime_format(locale.nl_langinfo(locale.D_FMT)), 'time_format' : fix_datetime_format(locale.nl_langinfo(locale.T_FMT)), 'decimal_point' : fix_xa0(str(locale.localeconv()['decimal_point'])), 'thousands_sep' : fix_xa0(str(locale.localeconv()['thousands_sep'])), } lang_id = False try: lang_id = self.create(cr, uid, lang_info) finally: tools.resetlocale() return lang_id
def get_language(cr,uid,context,user=None,model=None,lang=None): if user: if user=='contributor': list = s.get_lang_list() else: login = pooler.get_pool(cr.dbname).get('res.users').read(cr,uid,uid,['login'])['login'] list = s.get_lang_list(login) elif model: if model=='ir_translation_contribution': sql = "select distinct lang from ir_translation_contribution where state='propose'" else: sql = "select distinct lang from ir_translation" cr.execute(sql) list = map(lambda x: x[0],cr.fetchall()) else : sql = "select distinct lang from ir_translation_contribution where state='accept'" cr.execute(sql) list = map(lambda x: x[0],cr.fetchall()) lang_dict = tools.get_languages() return [(lang, lang_dict.get(lang, lang)) for lang in list]
def get_language(cr, uid, context, user=None, model=None, lang=None): if user: if user == 'contributor': list = s.get_lang_list() else: login = pooler.get_pool(cr.dbname).get('res.users').read( cr, uid, uid, ['login'])['login'] list = s.get_lang_list(login) elif model: if model == 'ir_translation_contribution': sql = "select distinct lang from ir_translation_contribution where state='propose'" else: sql = "select distinct lang from ir_translation" cr.execute(sql) list = map(lambda x: x[0], cr.fetchall()) else: sql = "select distinct lang from ir_translation_contribution where state='accept'" cr.execute(sql) list = map(lambda x: x[0], cr.fetchall()) lang_dict = tools.get_languages() return [(lang, lang_dict.get(lang, lang)) for lang in list]
def trans_load_data(db_name, fileobj, fileformat, lang, strict=False, lang_name=None, verbose=True): logger = netsvc.Logger() if verbose: logger.notifyChannel("i18n", netsvc.LOG_INFO, 'loading translation file for language %s' % (lang)) pool = pooler.get_pool(db_name) lang_obj = pool.get('res.lang') trans_obj = pool.get('ir.translation') model_data_obj = pool.get('ir.model.data') iso_lang = tools.get_iso_codes(lang) try: uid = 1 cr = pooler.get_db(db_name).cursor() ids = lang_obj.search(cr, uid, [('code','=', lang)]) if not ids: # lets create the language with locale information fail = True for ln in get_locales(lang): try: locale.setlocale(locale.LC_ALL, str(ln)) fail = False break except locale.Error: continue if fail: lc = locale.getdefaultlocale()[0] msg = 'Unable to get information for locale %s. Information from the default locale (%s) have been used.' logger.notifyChannel('i18n', netsvc.LOG_WARNING, msg % (lang, lc)) if not lang_name: lang_name = tools.get_languages().get(lang, lang) def fix_xa0(s): if s == '\xa0': return '\xc2\xa0' return s lang_info = { 'code': lang, 'iso_code': iso_lang, 'name': lang_name, 'translatable': 1, 'date_format' : str(locale.nl_langinfo(locale.D_FMT).replace('%y', '%Y')), 'time_format' : str(locale.nl_langinfo(locale.T_FMT)), 'decimal_point' : fix_xa0(str(locale.localeconv()['decimal_point'])), 'thousands_sep' : fix_xa0(str(locale.localeconv()['thousands_sep'])), } try: lang_obj.create(cr, uid, lang_info) finally: resetlocale() # now, the serious things: we read the language file fileobj.seek(0) if fileformat == 'csv': reader = csv.reader(fileobj, quotechar='"', delimiter=',') # read the first line of the file (it contains columns titles) for row in reader: f = row break elif fileformat == 'po': reader = TinyPoFile(fileobj) f = ['type', 'name', 'res_id', 'src', 'value'] else: raise Exception(_('Bad file format')) # read the rest of the file line = 1 for row in reader: line += 1 # skip empty rows and rows where the translation field (=last fiefd) is empty #if (not row) or (not row[-1]): # continue # dictionary which holds values for this line of the csv file # {'lang': ..., 'type': ..., 'name': ..., 'res_id': ..., # 'src': ..., 'value': ...} dic = {'lang': lang} for i in range(len(f)): if f[i] in ('module',): continue dic[f[i]] = row[i] try: dic['res_id'] = int(dic['res_id']) except: model_data_ids = model_data_obj.search(cr, uid, [ ('model', '=', dic['name'].split(',')[0]), ('module', '=', dic['res_id'].split('.', 1)[0]), ('name', '=', dic['res_id'].split('.', 1)[1]), ]) if model_data_ids: dic['res_id'] = model_data_obj.browse(cr, uid, model_data_ids[0]).res_id else: dic['res_id'] = False if dic['type'] == 'model' and not strict: (model, field) = dic['name'].split(',') # get the ids of the resources of this model which share # the same source obj = pool.get(model) if obj: if field not in obj.fields_get_keys(cr, uid): continue ids = obj.search(cr, uid, [(field, '=', dic['src'])]) # if the resource id (res_id) is in that list, use it, # otherwise use the whole list if not ids: ids = [] ids = (dic['res_id'] in ids) and [dic['res_id']] or ids for id in ids: dic['res_id'] = id ids = trans_obj.search(cr, uid, [ ('lang', '=', lang), ('type', '=', dic['type']), ('name', '=', dic['name']), ('src', '=', dic['src']), ('res_id', '=', dic['res_id']) ]) if ids: trans_obj.write(cr, uid, ids, {'value': dic['value']}) else: trans_obj.create(cr, uid, dic) else: ids = trans_obj.search(cr, uid, [ ('lang', '=', lang), ('type', '=', dic['type']), ('name', '=', dic['name']), ('src', '=', dic['src']) ]) if ids: trans_obj.write(cr, uid, ids, {'value': dic['value']}) else: trans_obj.create(cr, uid, dic) cr.commit() cr.close() if verbose: logger.notifyChannel("i18n", netsvc.LOG_INFO, "translation file loaded succesfully") except IOError: filename = '[lang: %s][format: %s]' % (iso_lang or 'new', fileformat) logger.notifyChannel("i18n", netsvc.LOG_ERROR, "couldn't read translation file %s" % (filename,))
def trans_load_data(db_name, fileobj, fileformat, lang, strict=False, lang_name=None, verbose=True): logger = netsvc.Logger() if verbose: logger.notifyChannel( "i18n", netsvc.LOG_INFO, 'loading translation file for language %s' % (lang)) pool = pooler.get_pool(db_name) lang_obj = pool.get('res.lang') trans_obj = pool.get('ir.translation') model_data_obj = pool.get('ir.model.data') iso_lang = tools.get_iso_codes(lang) try: uid = 1 cr = pooler.get_db(db_name).cursor() ids = lang_obj.search(cr, uid, [('code', '=', lang)]) if not ids: # lets create the language with locale information fail = True for ln in get_locales(lang): try: locale.setlocale(locale.LC_ALL, str(ln)) fail = False break except locale.Error: continue if fail: lc = locale.getdefaultlocale()[0] msg = 'Unable to get information for locale %s. Information from the default locale (%s) have been used.' logger.notifyChannel('i18n', netsvc.LOG_WARNING, msg % (lang, lc)) if not lang_name: lang_name = tools.get_languages().get(lang, lang) def fix_xa0(s): if s == '\xa0': return '\xc2\xa0' return s lang_info = { 'code': lang, 'iso_code': iso_lang, 'name': lang_name, 'translatable': 1, 'date_format': str(locale.nl_langinfo(locale.D_FMT).replace('%y', '%Y')), 'time_format': str(locale.nl_langinfo(locale.T_FMT)), 'decimal_point': fix_xa0(str(locale.localeconv()['decimal_point'])), 'thousands_sep': fix_xa0(str(locale.localeconv()['thousands_sep'])), } try: lang_obj.create(cr, uid, lang_info) finally: resetlocale() # now, the serious things: we read the language file fileobj.seek(0) if fileformat == 'csv': reader = csv.reader(fileobj, quotechar='"', delimiter=',') # read the first line of the file (it contains columns titles) for row in reader: f = row break elif fileformat == 'po': reader = TinyPoFile(fileobj) f = ['type', 'name', 'res_id', 'src', 'value'] else: raise Exception(_('Bad file format')) # read the rest of the file line = 1 for row in reader: line += 1 # skip empty rows and rows where the translation field (=last fiefd) is empty #if (not row) or (not row[-1]): # continue # dictionary which holds values for this line of the csv file # {'lang': ..., 'type': ..., 'name': ..., 'res_id': ..., # 'src': ..., 'value': ...} dic = {'lang': lang} for i in range(len(f)): if f[i] in ('module', ): continue dic[f[i]] = row[i] try: dic['res_id'] = int(dic['res_id']) except: model_data_ids = model_data_obj.search(cr, uid, [ ('model', '=', dic['name'].split(',')[0]), ('module', '=', dic['res_id'].split('.', 1)[0]), ('name', '=', dic['res_id'].split('.', 1)[1]), ]) if model_data_ids: dic['res_id'] = model_data_obj.browse( cr, uid, model_data_ids[0]).res_id else: dic['res_id'] = False if dic['type'] == 'model' and not strict: (model, field) = dic['name'].split(',') # get the ids of the resources of this model which share # the same source obj = pool.get(model) if obj: if field not in obj.fields_get_keys(cr, uid): continue ids = obj.search(cr, uid, [(field, '=', dic['src'])]) # if the resource id (res_id) is in that list, use it, # otherwise use the whole list if not ids: ids = [] ids = (dic['res_id'] in ids) and [dic['res_id']] or ids for id in ids: dic['res_id'] = id ids = trans_obj.search( cr, uid, [('lang', '=', lang), ('type', '=', dic['type']), ('name', '=', dic['name']), ('src', '=', dic['src']), ('res_id', '=', dic['res_id'])]) if ids: trans_obj.write(cr, uid, ids, {'value': dic['value']}) else: trans_obj.create(cr, uid, dic) else: ids = trans_obj.search(cr, uid, [('lang', '=', lang), ('type', '=', dic['type']), ('name', '=', dic['name']), ('src', '=', dic['src'])]) if ids: trans_obj.write(cr, uid, ids, {'value': dic['value']}) else: trans_obj.create(cr, uid, dic) cr.commit() cr.close() if verbose: logger.notifyChannel("i18n", netsvc.LOG_INFO, "translation file loaded succesfully") except IOError: filename = '[lang: %s][format: %s]' % (iso_lang or 'new', fileformat) logger.notifyChannel( "i18n", netsvc.LOG_ERROR, "couldn't read translation file %s" % (filename, ))