def _get_source_query(self, cr, uid, name, types, lang, source, res_id): if source: # Note: the extra test on md5(src) is a hint for postgres to use the # index ir_translation_src_md5 query = """SELECT value FROM ir_translation WHERE lang=%s AND type in %s AND src=%s AND md5(src)=md5(%s)""" source = tools.ustr(source) params = (lang or '', types, source, source) if res_id: query += " AND res_id in %s" params += (res_id,) if name: query += " AND name=%s" params += (tools.ustr(name),) else: query = """SELECT value FROM ir_translation WHERE lang=%s AND type in %s AND name=%s""" params = (lang or '', types, tools.ustr(name)) return (query, params)
def _process_text(self, txt): """Translate ``txt`` according to the language in the local context, replace dynamic ``[[expr]]`` with their real value, then escape the result for XML. :param str txt: original text to translate (must NOT be XML-escaped) :return: translated text, with dynamic expressions evaluated and with special XML characters escaped (``&,<,>``). """ if not self.localcontext: return str2xml(txt) if not txt: return '' result = '' sps = _regex.split(txt) while sps: # This is a simple text to translate to_translate = tools.ustr(sps.pop(0)) result += tools.ustr(self.localcontext.get('translate', lambda x:x)(to_translate)) if sps: txt = None try: expr = sps.pop(0) txt = eval(expr, self.localcontext) if txt and isinstance(txt, basestring): txt = tools.ustr(txt) except Exception: _logger.info("Failed to evaluate expression [[ %s ]] with context %r while rendering report, ignored.", expr, self.localcontext) if isinstance(txt, basestring): result += txt elif txt and (txt is not None) and (txt is not False): result += ustr(txt) return str2xml(result)
def _get_source_query(self, cr, uid, name, types, lang, source, res_id): if source: # Note: the extra test on md5(src) is a hint for postgres to use the # index ir_translation_src_md5 query = """SELECT value FROM ir_translation WHERE lang=%s AND type in %s AND src=%s AND md5(src)=md5(%s)""" source = tools.ustr(source) params = (lang or '', types, source, source) if res_id: query += " AND res_id in %s" params += (res_id, ) if name: query += " AND name=%s" params += (tools.ustr(name), ) else: query = """SELECT value FROM ir_translation WHERE lang=%s AND type in %s AND name=%s""" params = (lang or '', types, tools.ustr(name)) return (query, params)
def get_record_data(self, values): """ Returns a defaults-like dict with initial values for the composition wizard when sending an email related a previous email (parent_id) or a document (model, res_id). This is based on previously computed default values. """ result, subject = {}, False if values.get('parent_id'): parent = self.env['mail.message'].browse(values.get('parent_id')) result['record_name'] = parent.record_name, subject = tools.ustr(parent.subject or parent.record_name or '') if not values.get('model'): result['model'] = parent.model if not values.get('res_id'): result['res_id'] = parent.res_id partner_ids = values.get('partner_ids', list()) + [ (4, id) for id in parent.partner_ids.ids ] if self._context.get( 'is_private' ) and parent.author_id: # check message is private then add author also in partner list. partner_ids += [(4, parent.author_id.id)] result['partner_ids'] = partner_ids elif values.get('model') and values.get('res_id'): doc_name_get = self.env[values.get('model')].browse( values.get('res_id')).name_get() result['record_name'] = doc_name_get and doc_name_get[0][1] or '' subject = tools.ustr(result['record_name']) re_prefix = _('Re:') if subject and not (subject.startswith('Re:') or subject.startswith(re_prefix)): subject = "%s %s" % (re_prefix, subject) result['subject'] = subject return result
def go(id, uid, ids, datas, context): with ecore.api.Environment.manage(): cr = ecore.registry(db).cursor() try: result, format = ecore.report.render_report( cr, uid, ids, object, datas, context) if not result: tb = sys.exc_info() self_reports[id][ 'exception'] = ecore.exceptions.DeferredException( 'RML is not available at specified location or not enough data to print!', tb) self_reports[id]['result'] = result self_reports[id]['format'] = format self_reports[id]['state'] = True except Exception, exception: _logger.exception('Exception: %s\n', exception) if hasattr(exception, 'name') and hasattr(exception, 'value'): self_reports[id][ 'exception'] = ecore.exceptions.DeferredException( tools.ustr(exception.name), tools.ustr(exception.value)) else: tb = sys.exc_info() self_reports[id][ 'exception'] = ecore.exceptions.DeferredException( tools.exception_to_unicode(exception), tb) self_reports[id]['state'] = True cr.commit() cr.close()
def from_data(self, uid, fields, rows, company_name): pageSize=[210.0,297.0] new_doc = etree.Element("report") config = etree.SubElement(new_doc, 'config') def _append_node(name, text): n = etree.SubElement(config, name) n.text = text _append_node('date', time.strftime(str(locale.nl_langinfo(locale.D_FMT).replace('%y', '%Y')))) _append_node('PageSize', '%.2fmm,%.2fmm' % tuple(pageSize)) _append_node('PageWidth', '%.2f' % (pageSize[0] * 2.8346,)) _append_node('PageHeight', '%.2f' %(pageSize[1] * 2.8346,)) _append_node('PageFormat', 'a4') _append_node('header-date', time.strftime(str(locale.nl_langinfo(locale.D_FMT).replace('%y', '%Y')))) _append_node('company', company_name) l = [] t = 0 temp = [] tsum = [] skip_index = [] header = etree.SubElement(new_doc, 'header') i = 0 for f in fields: if f.get('header_data_id', False): value = f.get('header_name', "") field = etree.SubElement(header, 'field') field.text = tools.ustr(value) else: skip_index.append(i) i += 1 lines = etree.SubElement(new_doc, 'lines') for row_lines in rows: node_line = etree.SubElement(lines, 'row') j = 0 for row in row_lines: if not j in skip_index: para = "yes" tree = "no" value = row.get('data', '') if row.get('bold', False): para = "group" if row.get('number', False): tree = "float" col = etree.SubElement(node_line, 'col', para=para, tree=tree) col.text = tools.ustr(value) j += 1 transform = etree.XSLT( etree.parse(os.path.join(tools.config['root_path'], 'addons/base/report/custom_new.xsl'))) rml = etree.tostring(transform(new_doc)) self.obj = trml2pdf.parseNode(rml, title='Printscreen') return self.obj
def _prac_amt(self, cr, uid, ids, context=None): res = {} result = 0.0 if context is None: context = {} for line in self.browse(cr, uid, ids, context=context): acc_ids = [x.id for x in line.general_budget_id.account_ids] if not acc_ids: raise UserError( _("The Budget '%s' has no accounts!") % ustr(line.general_budget_id.name)) date_to = line.date_to date_from = line.date_from if line.analytic_account_id.id: cr.execute( "SELECT SUM(amount) FROM account_analytic_line WHERE account_id=%s AND (date " "between to_date(%s,'yyyy-mm-dd') AND to_date(%s,'yyyy-mm-dd')) AND " "general_account_id=ANY(%s)", ( line.analytic_account_id.id, date_from, date_to, acc_ids, )) result = cr.fetchone()[0] if result is None: result = 0.00 res[line.id] = result return res
def _get_source(self, cr, uid, name, types, lang, source=None, res_id=None): """ Returns the translation for the given combination of name, type, language and source. All values passed to this method should be unicode (not byte strings), especially ``source``. :param name: identification of the term to translate, such as field name (optional if source is passed) :param types: single string defining type of term to translate (see ``type`` field on ir.translation), or sequence of allowed types (strings) :param lang: language code of the desired translation :param source: optional source term to translate (should be unicode) :param res_id: optional resource id or a list of ids to translate (if used, ``source`` should be set) :rtype: unicode :return: the request translation, or an empty unicode string if no translation was found and `source` was not passed """ # FIXME: should assert that `source` is unicode and fix all callers to always pass unicode # so we can remove the string encoding/decoding. if not lang: return tools.ustr(source or '') if isinstance(types, basestring): types = (types,) if res_id: if isinstance(res_id, (int, long)): res_id = (res_id,) else: res_id = tuple(res_id) return self.__get_source(cr, uid, name, types, lang, source, res_id)
def name_get(self, cr, uid, ids, context=None): if not ids: return [] if isinstance(ids, (int, long)): ids = [ids] reads = self.read(cr, uid, ids, ['name','symbol'], context=context, load='_classic_write') return [(x['id'], tools.ustr(x['name'])) for x in reads]
def initialize_sys_path(): """ Setup an import-hook to be able to import eCore addons from the different addons paths. This ensures something like ``import crm`` (or even ``import ecore.addons.crm``) works even if the addons are not in the PYTHONPATH. """ global ad_paths global hooked dd = tools.config.addons_data_dir if dd not in ad_paths: ad_paths.append(dd) for ad in tools.config['addons_path'].split(','): ad = os.path.abspath(tools.ustr(ad.strip())) if ad not in ad_paths: ad_paths.append(ad) # add base module path base_path = os.path.abspath(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'addons')) if base_path not in ad_paths: ad_paths.append(base_path) if not hooked: sys.meta_path.append(AddonsImportHook()) hooked = True
def slugify(s, max_length=None): """ Transform a string to a slug that can be used in a url path. This method will first try to do the job with python-slugify if present. Otherwise it will process string by stripping leading and ending spaces, converting unicode chars to ascii, lowering all chars and replacing spaces and underscore with hyphen "-". :param s: str :param max_length: int :rtype: str """ s = ustr(s) if slugify_lib: # There are 2 different libraries only python-slugify is supported try: return slugify_lib.slugify(s, max_length=max_length) except TypeError: pass uni = unicodedata.normalize('NFKD', s).encode('ascii', 'ignore').decode('ascii') slug = re.sub('[\W_]', ' ', uni).strip().lower() slug = re.sub('[-\s]+', '-', slug) return slug[:max_length]
def pack_jobs_request(self, cr, uid, term_ids, context=None): ''' prepare the terms that will be requested to gengo and returns them in a dictionary with following format {'jobs': { 'term1.id': {...} 'term2.id': {...} } }''' base_url = self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url') translation_pool = self.pool.get('ir.translation') jobs = {} user = self.pool.get('res.users').browse(cr, uid, uid, context=context) auto_approve = 1 if user.company_id.gengo_auto_approve else 0 for term in translation_pool.browse(cr, uid, term_ids, context=context): if re.search(r"\w", term.src or ""): comment = user.company_id.gengo_comment or '' if term.gengo_comment: comment += '\n' + term.gengo_comment jobs[time.strftime('%Y%m%d%H%M%S') + '-' + str(term.id)] = { 'type': 'text', 'slug': 'Single :: English to ' + term.lang, 'tier': tools.ustr(term.gengo_translation), 'custom_data': str(term.id), 'body_src': term.src, 'lc_src': 'en', 'lc_tgt': translation_pool._get_gengo_corresponding_language(term.lang), 'auto_approve': auto_approve, 'comment': comment, 'callback_url': "%s/website/gengo_callback?pgk=%s&db=%s" % (base_url, self.get_gengo_key(cr), cr.dbname) } return {'jobs': jobs, 'as_group': 0}
def encode_header(header_text): """Returns an appropriate representation of the given header value, suitable for direct assignment as a header value in an email.message.Message. RFC2822 assumes that headers contain only 7-bit characters, so we ensure it is the case, using RFC2047 encoding when needed. :param header_text: unicode or utf-8 encoded string with header value :rtype: string | email.header.Header :return: if ``header_text`` represents a plain ASCII string, return the same 7-bit string, otherwise returns an email.header.Header that will perform the appropriate RFC2047 encoding of non-ASCII values. """ if not header_text: return "" # convert anything to utf-8, suitable for testing ASCIIness, as 7-bit chars are # encoded as ASCII in utf-8 header_text_utf8 = tools.ustr(header_text).encode('utf-8') header_text_ascii = try_coerce_ascii(header_text_utf8) # if this header contains non-ASCII characters, # we'll need to wrap it up in a message.header.Header # that will take care of RFC2047-encoding it as # 7-bit string. return header_text_ascii if header_text_ascii\ else Header(header_text_utf8, 'utf-8')
def remove_accents(input_str): """Suboptimal-but-better-than-nothing way to replace accented latin letters by an ASCII equivalent. Will obviously change the meaning of input_str and work only for some cases""" input_str = ustr(input_str) nkfd_form = unicodedata.normalize('NFKD', input_str) return u''.join([c for c in nkfd_form if not unicodedata.combining(c)])
def _get_source(self, cr, uid, name, types, lang, source=None, res_id=None): """ Returns the translation for the given combination of name, type, language and source. All values passed to this method should be unicode (not byte strings), especially ``source``. :param name: identification of the term to translate, such as field name (optional if source is passed) :param types: single string defining type of term to translate (see ``type`` field on ir.translation), or sequence of allowed types (strings) :param lang: language code of the desired translation :param source: optional source term to translate (should be unicode) :param res_id: optional resource id or a list of ids to translate (if used, ``source`` should be set) :rtype: unicode :return: the request translation, or an empty unicode string if no translation was found and `source` was not passed """ # FIXME: should assert that `source` is unicode and fix all callers to always pass unicode # so we can remove the string encoding/decoding. if not lang: return tools.ustr(source or '') if isinstance(types, basestring): types = (types, ) if res_id: if isinstance(res_id, (int, long)): res_id = (res_id, ) else: res_id = tuple(res_id) return self.__get_source(cr, uid, name, types, lang, source, res_id)
def initialize_sys_path(): """ Setup an import-hook to be able to import eCore addons from the different addons paths. This ensures something like ``import crm`` (or even ``import ecore.addons.crm``) works even if the addons are not in the PYTHONPATH. """ global ad_paths global hooked dd = tools.config.addons_data_dir if dd not in ad_paths: ad_paths.append(dd) for ad in tools.config['addons_path'].split(','): ad = os.path.abspath(tools.ustr(ad.strip())) if ad not in ad_paths: ad_paths.append(ad) # add base module path base_path = os.path.abspath( os.path.join(os.path.dirname(os.path.dirname(__file__)), 'addons')) if base_path not in ad_paths: ad_paths.append(base_path) if not hooked: sys.meta_path.append(AddonsImportHook()) hooked = True
def exp_render_report(db, uid, object, ids, datas=None, context=None): if not datas: datas={} if not context: context={} self_id_protect.acquire() global self_id self_id += 1 id = self_id self_id_protect.release() self_reports[id] = {'uid': uid, 'result': False, 'state': False, 'exception': None} cr = ecore.registry(db).cursor() try: result, format = ecore.report.render_report(cr, uid, ids, object, datas, context) if not result: tb = sys.exc_info() self_reports[id]['exception'] = ecore.exceptions.DeferredException('RML is not available at specified location or not enough data to print!', tb) self_reports[id]['result'] = result self_reports[id]['format'] = format self_reports[id]['state'] = True except Exception, exception: _logger.exception('Exception: %s\n', exception) if hasattr(exception, 'name') and hasattr(exception, 'value'): self_reports[id]['exception'] = ecore.exceptions.DeferredException(tools.ustr(exception.name), tools.ustr(exception.value)) else: tb = sys.exc_info() self_reports[id]['exception'] = ecore.exceptions.DeferredException(tools.exception_to_unicode(exception), tb) self_reports[id]['state'] = True
def save_as_template(self): """ hit save as template button: current form value will be a new template attached to the current document. """ for record in self: models = self.env['ir.model'].search([('model', '=', record.model or 'mail.message')]) model_name = '' if models: model_name = models.name template_name = "%s: %s" % (model_name, tools.ustr(record.subject)) values = { 'name': template_name, 'subject': record.subject or False, 'body_html': record.body or False, 'model_id': models.id or False, 'attachment_ids': [(6, 0, [att.id for att in record.attachment_ids])], } template = self.env['mail.template'].create(values) # generate the saved template record.write({'template_id': template.id}) record.onchange_template_id_wrapper() return _reopen(self, record.id, record.model)
def import_lang(self, cr, uid, ids, context=None): if context is None: context = {} this = self.browse(cr, uid, ids[0]) if this.overwrite: context = dict(context, overwrite=True) fileobj = TemporaryFile('w+') try: fileobj.write(base64.decodestring(this.data)) # now we determine the file format fileobj.seek(0) fileformat = os.path.splitext(this.filename)[-1][1:].lower() tools.trans_load_data(cr, fileobj, fileformat, this.code, lang_name=this.name, context=context) except Exception, e: _logger.exception( 'File unsuccessfully imported, due to format mismatch.') raise UserError( _('File not imported due to format mismatch or a malformed file. (Valid formats are .csv, .po, .pot)\n\nTechnical Details:\n%s' ) % tools.ustr(e))
def connect(self, host, port, user=None, password=None, encryption=False, smtp_debug=False): """Returns a new SMTP connection to the give SMTP server, authenticated with ``user`` and ``password`` if provided, and encrypted as requested by the ``encryption`` parameter. :param host: host or IP of SMTP server to connect to :param int port: SMTP port to connect to :param user: optional username to authenticate with :param password: optional password to authenticate with :param string encryption: optional, ``'ssl'`` | ``'starttls'`` :param bool smtp_debug: toggle debugging of SMTP sessions (all i/o will be output in logs) """ if encryption == 'ssl': if not 'SMTP_SSL' in smtplib.__all__: raise UserError( _("Your eCore Server does not support SMTP-over-SSL. You could use STARTTLS instead." "If SSL is needed, an upgrade to Python 2.6 on the server-side should do the trick." )) connection = smtplib.SMTP_SSL(host, port) else: connection = smtplib.SMTP(host, port) connection.set_debuglevel(smtp_debug) if encryption == 'starttls': # starttls() will perform ehlo() if needed first # and will discard the previous list of services # after successfully performing STARTTLS command, # (as per RFC 3207) so for example any AUTH # capability that appears only on encrypted channels # will be correctly detected for next step connection.starttls() if user: # Attempt authentication - will raise if AUTH service not supported # The user/password must be converted to bytestrings in order to be usable for # certain hashing schemes, like HMAC. # See also bug #597143 and python issue #5285 user = tools.ustr(user).encode('utf-8') password = tools.ustr(password).encode('utf-8') connection.login(user, password) return connection
def extract_rfc2822_addresses(text): """Returns a list of valid RFC2822 addresses that can be found in ``source``, ignoring malformed ones and non-ASCII ones. """ if not text: return [] candidates = address_pattern.findall(tools.ustr(text).encode('utf-8')) return filter(try_coerce_ascii, candidates)
def __get_source(self, cr, uid, name, types, lang, source, res_id): # res_id is a tuple or None, otherwise ormcache cannot cache it! query, params = self._get_source_query(cr, uid, name, types, lang, source, res_id) cr.execute(query, params) res = cr.fetchone() trad = res and res[0] or u'' if source and not trad: return tools.ustr(source) return trad
def geo_query_address(street=None, zip=None, city=None, state=None, country=None): if country and ',' in country and (country.endswith(' of') or country.endswith(' of the')): # put country qualifier in front, otherwise GMap gives wrong results, # e.g. 'Congo, Democratic Republic of the' => 'Democratic Republic of the Congo' country = '{1} {0}'.format(*country.split(',', 1)) return tools.ustr(', '.join(filter(None, [street, ("%s %s" % (zip or '', city or '')).strip(), state, country])))
def exp_render_report(db, uid, object, ids, datas=None, context=None): if not datas: datas = {} if not context: context = {} self_id_protect.acquire() global self_id self_id += 1 id = self_id self_id_protect.release() self_reports[id] = { 'uid': uid, 'result': False, 'state': False, 'exception': None } cr = ecore.registry(db).cursor() try: result, format = ecore.report.render_report(cr, uid, ids, object, datas, context) if not result: tb = sys.exc_info() self_reports[id]['exception'] = ecore.exceptions.DeferredException( 'RML is not available at specified location or not enough data to print!', tb) self_reports[id]['result'] = result self_reports[id]['format'] = format self_reports[id]['state'] = True except Exception, exception: _logger.exception('Exception: %s\n', exception) if hasattr(exception, 'name') and hasattr(exception, 'value'): self_reports[id]['exception'] = ecore.exceptions.DeferredException( tools.ustr(exception.name), tools.ustr(exception.value)) else: tb = sys.exc_info() self_reports[id]['exception'] = ecore.exceptions.DeferredException( tools.exception_to_unicode(exception), tb) self_reports[id]['state'] = True
def button_confirm_login(self, cr, uid, ids, context=None): if context is None: context = {} for server in self.browse(cr, uid, ids, context=context): try: connection = server.connect() server.write({'state':'done'}) except Exception, e: _logger.info("Failed to connect to %s server %s.", server.type, server.name, exc_info=True) raise UserError(_("Connection test failed: %s") % tools.ustr(e)) finally:
def from_html(self, cr, uid, model, field, element, context=None): record = self.browse(cr, uid, [], context=context) value = element.text_content().strip() selection = field.get_description(record.env)['selection'] for k, v in selection: if isinstance(v, str): v = ustr(v) if value == v: return k raise ValueError(u"No value found for label %s in selection %s" % (value, selection))
def connect(self, host, port, user=None, password=None, encryption=False, smtp_debug=False): """Returns a new SMTP connection to the give SMTP server, authenticated with ``user`` and ``password`` if provided, and encrypted as requested by the ``encryption`` parameter. :param host: host or IP of SMTP server to connect to :param int port: SMTP port to connect to :param user: optional username to authenticate with :param password: optional password to authenticate with :param string encryption: optional, ``'ssl'`` | ``'starttls'`` :param bool smtp_debug: toggle debugging of SMTP sessions (all i/o will be output in logs) """ if encryption == 'ssl': if not 'SMTP_SSL' in smtplib.__all__: raise UserError(_("Your eCore Server does not support SMTP-over-SSL. You could use STARTTLS instead." "If SSL is needed, an upgrade to Python 2.6 on the server-side should do the trick.")) connection = smtplib.SMTP_SSL(host, port) else: connection = smtplib.SMTP(host, port) connection.set_debuglevel(smtp_debug) if encryption == 'starttls': # starttls() will perform ehlo() if needed first # and will discard the previous list of services # after successfully performing STARTTLS command, # (as per RFC 3207) so for example any AUTH # capability that appears only on encrypted channels # will be correctly detected for next step connection.starttls() if user: # Attempt authentication - will raise if AUTH service not supported # The user/password must be converted to bytestrings in order to be usable for # certain hashing schemes, like HMAC. # See also bug #597143 and python issue #5285 user = tools.ustr(user).encode('utf-8') password = tools.ustr(password).encode('utf-8') connection.login(user, password) return connection
def test_smtp_connection(self, cr, uid, ids, context=None): for smtp_server in self.browse(cr, uid, ids, context=context): smtp = False try: smtp = self.connect(smtp_server.smtp_host, smtp_server.smtp_port, user=smtp_server.smtp_user, password=smtp_server.smtp_pass, encryption=smtp_server.smtp_encryption, smtp_debug=smtp_server.smtp_debug) except Exception, e: raise UserError( _("Connection Test Failed! Here is what we got instead:\n %s" ) % tools.ustr(e)) finally:
def encode_rfc2822_address_header(header_text): """If ``header_text`` contains non-ASCII characters, attempts to locate patterns of the form ``"Name" <address@domain>`` and replace the ``"Name"`` portion by the RFC2047-encoded version, preserving the address part untouched. """ def encode_addr(addr): name, email = addr if not try_coerce_ascii(name): name = str(Header(name, 'utf-8')) return formataddr((name, email)) addresses = getaddresses([tools.ustr(header_text).encode('utf-8')]) return COMMASPACE.join(map(encode_addr, addresses))
def _process_text(self, txt): """Translate ``txt`` according to the language in the local context, replace dynamic ``[[expr]]`` with their real value, then escape the result for XML. :param str txt: original text to translate (must NOT be XML-escaped) :return: translated text, with dynamic expressions evaluated and with special XML characters escaped (``&,<,>``). """ if not self.localcontext: return str2xml(txt) if not txt: return '' result = '' sps = _regex.split(txt) while sps: # This is a simple text to translate to_translate = tools.ustr(sps.pop(0)) result += tools.ustr( self.localcontext.get('translate', lambda x: x)(to_translate)) if sps: txt = None try: expr = sps.pop(0) txt = eval(expr, self.localcontext) if txt and isinstance(txt, basestring): txt = tools.ustr(txt) except Exception: _logger.info( "Failed to evaluate expression [[ %s ]] with context %r while rendering report, ignored.", expr, self.localcontext) if isinstance(txt, basestring): result += txt elif txt and (txt is not None) and (txt is not False): result += ustr(txt) return str2xml(result)
def button_confirm_login(self, cr, uid, ids, context=None): if context is None: context = {} for server in self.browse(cr, uid, ids, context=context): try: connection = server.connect() server.write({'state': 'done'}) except Exception, e: _logger.info("Failed to connect to %s server %s.", server.type, server.name, exc_info=True) raise UserError( _("Connection test failed: %s") % tools.ustr(e)) finally:
def update_list(self, cr, uid, context=None): res = [0, 0] # [update, add] default_version = modules.adapt_version('1.0') known_mods = self.browse(cr, uid, self.search(cr, uid, [])) known_mods_names = dict([(m.name, m) for m in known_mods]) # iterate through detected modules and update/create them in db for mod_name in modules.get_modules(): mod = known_mods_names.get(mod_name) terp = self.get_module_info(mod_name) values = self.get_values_from_terp(terp) if mod: updated_values = {} for key in values: old = getattr(mod, key) updated = isinstance(values[key], basestring) and tools.ustr( values[key]) or values[key] if (old or updated) and updated != old: updated_values[key] = values[key] if terp.get('installable', True) and mod.state == 'uninstallable': updated_values['state'] = 'uninstalled' if parse_version(terp.get( 'version', default_version)) > parse_version( mod.latest_version or default_version): res[0] += 1 if updated_values: self.write(cr, uid, mod.id, updated_values) else: mod_path = modules.get_module_path(mod_name) if not mod_path: continue if not terp or not terp.get('installable', True): continue id = self.create( cr, uid, dict(name=mod_name, state='uninstalled', **values)) mod = self.browse(cr, uid, id) res[1] += 1 self._update_dependencies(cr, uid, mod, terp.get('depends', [])) self._update_category(cr, uid, mod, terp.get('category', 'Uncategorized')) return res
def fields_get(self, cr, uid, fields=None, context=None, write_access=True, attributes=None): """ If an addon is already installed, set it to readonly as res.config.installer doesn't handle uninstallations of already installed addons """ fields = super(res_config_installer, self).fields_get( cr, uid, fields, context, write_access, attributes) for name in self.already_installed(cr, uid, context=context): if name not in fields: continue fields[name].update( readonly=True, help= ustr(fields[name].get('help', '')) + _('\n\nThis addon is already installed on your system')) return fields
def create(self, cr, uid, ids, datas, context): registry = ecore.registry(cr.dbname) xml = self.create_xml(cr, uid, ids, datas, context) xml = tools.ustr(xml).encode('utf8') report_type = datas.get('report_type', 'pdf') if report_type == 'raw': return xml, report_type registry['res.font'].font_scan(cr, SUPERUSER_ID, lazy=True, context=context) rml = self.create_rml(cr, xml, uid, context) ir_actions_report_xml_obj = registry['ir.actions.report.xml'] report_xml_ids = ir_actions_report_xml_obj.search(cr, uid, [('report_name', '=', self.name[7:])], context=context) self.title = report_xml_ids and ir_actions_report_xml_obj.browse(cr,uid,report_xml_ids)[0].name or 'eCore Report' create_doc = self.generators[report_type] pdf = create_doc(rml, title=self.title) return pdf, report_type
def _str_to_selection(self, model, field, value): # get untranslated values env = self.with_context(lang=None).env selection = field.get_description(env)['selection'] for item, label in selection: label = ustr(label) labels = [label] + self._get_translations(('selection', 'model', 'code'), label) if value == unicode(item) or value in labels: return item, [] raise self._format_import_error( ValueError, _(u"Value '%s' not found in selection field '%%(field)s'"), value, {'moreinfo': [_label or unicode(item) for item, _label in selection if _label or item]} )
def change_product_qty(self, cr, uid, ids, context=None): """ Changes the Product Quantity by making a Physical Inventory. """ if context is None: context = {} inventory_obj = self.pool.get('stock.inventory') inventory_line_obj = self.pool.get('stock.inventory.line') for data in self.browse(cr, uid, ids, context=context): if data.new_quantity < 0: raise UserError(_('Quantity cannot be negative.')) ctx = context.copy() ctx['location'] = data.location_id.id ctx['lot_id'] = data.lot_id.id if data.product_id.id and data.lot_id.id: filter = 'none' elif data.product_id.id: filter = 'product' else: filter = 'none' inventory_id = inventory_obj.create( cr, uid, { 'name': _('INV: %s') % tools.ustr(data.product_id.name), 'filter': filter, 'product_id': data.product_id.id, 'location_id': data.location_id.id, 'lot_id': data.lot_id.id }, context=context) product = data.product_id.with_context( location=data.location_id.id, lot_id=data.lot_id.id) th_qty = product.qty_available line_data = { 'inventory_id': inventory_id, 'product_qty': data.new_quantity, 'location_id': data.location_id.id, 'product_id': data.product_id.id, 'product_uom_id': data.product_id.uom_id.id, 'theoretical_qty': th_qty, 'prod_lot_id': data.lot_id.id } inventory_line_obj.create(cr, uid, line_data, context=context) inventory_obj.action_done(cr, uid, [inventory_id], context=context) return {}
def _sync_response(self, cr, uid, limit=GENGO_DEFAULT_LIMIT, context=None): """ This method will be called by cron services to get translations from Gengo. It will read translated terms and comments from Gengo and will update respective ir.translation in eCore. """ translation_pool = self.pool.get('ir.translation') flag, gengo = self.gengo_authentication(cr, uid, context=context) if not flag: _logger.warning("%s", gengo) else: offset = 0 all_translation_ids = translation_pool.search(cr, uid, [('state', '=', 'inprogress'), ('gengo_translation', 'in', ('machine', 'standard', 'pro', 'ultra')), ('order_id', "!=", False)], context=context) while True: translation_ids = all_translation_ids[offset:offset + limit] offset += limit if not translation_ids: break terms_progress = { 'gengo_order_ids': set(), 'ir_translation_ids': set(), } translation_terms = translation_pool.browse(cr, uid, translation_ids, context=context) for term in translation_terms: terms_progress['gengo_order_ids'].add(term.order_id) terms_progress['ir_translation_ids'].add(tools.ustr(term.id)) for order_id in terms_progress['gengo_order_ids']: order_response = gengo.getTranslationOrderJobs(id=order_id) jobs_approved = order_response.get('response', []).get('order', []).get('jobs_approved', []) gengo_ids = ','.join(jobs_approved) if gengo_ids: # Need to check, because getTranslationJobBatch don't catch this case and so call the getTranslationJobs because no ids in url try: job_response = gengo.getTranslationJobBatch(id=gengo_ids) except: continue if job_response['opstat'] == 'ok': for job in job_response['response'].get('jobs', []): if job.get('custom_data') in terms_progress['ir_translation_ids']: self._update_terms_job(cr, uid, job, context=context) return True
def encode_header_param(param_text): """Returns an appropriate RFC2047 encoded representation of the given header parameter value, suitable for direct assignation as the param value (e.g. via Message.set_param() or Message.add_header()) RFC2822 assumes that headers contain only 7-bit characters, so we ensure it is the case, using RFC2047 encoding when needed. :param param_text: unicode or utf-8 encoded string with header value :rtype: string :return: if ``param_text`` represents a plain ASCII string, return the same 7-bit string, otherwise returns an ASCII string containing the RFC2047 encoded text. """ # For details see the encode_header() method that uses the same logic if not param_text: return "" param_text_utf8 = tools.ustr(param_text).encode('utf-8') param_text_ascii = try_coerce_ascii(param_text_utf8) return param_text_ascii if param_text_ascii\ else Charset('utf8').header_encode(param_text_utf8)
def _prac_amt(self, cr, uid, ids, context=None): res = {} result = 0.0 if context is None: context = {} for line in self.browse(cr, uid, ids, context=context): acc_ids = [x.id for x in line.general_budget_id.account_ids] if not acc_ids: raise UserError(_("The Budget '%s' has no accounts!") % ustr(line.general_budget_id.name)) date_to = line.date_to date_from = line.date_from if line.analytic_account_id.id: cr.execute("SELECT SUM(amount) FROM account_analytic_line WHERE account_id=%s AND (date " "between to_date(%s,'yyyy-mm-dd') AND to_date(%s,'yyyy-mm-dd')) AND " "general_account_id=ANY(%s)", (line.analytic_account_id.id, date_from, date_to,acc_ids,)) result = cr.fetchone()[0] if result is None: result = 0.00 res[line.id] = result return res
def attr_get(node, attrs, dict=None): if dict is None: dict = {} res = {} for name in attrs: if node.get(name): res[name] = unit_get(node.get(name)) for key in dict: if node.get(key): if dict[key] == 'str': res[key] = tools.ustr(node.get(key)) elif dict[key] == 'bool': res[key] = bool_get(node.get(key)) elif dict[key] == 'int': res[key] = int(node.get(key)) elif dict[key] == 'unit': res[key] = unit_get(node.get(key)) elif dict[key] == 'float': res[key] = float(node.get(key)) return res
def attr_get(node, attrs, dict=None): if dict is None: dict = {} res = {} for name in attrs: if node.get(name): res[name] = unit_get(node.get(name)) for key in dict: if node.get(key): if dict[key]=='str': res[key] = tools.ustr(node.get(key)) elif dict[key]=='bool': res[key] = bool_get(node.get(key)) elif dict[key]=='int': res[key] = int(node.get(key)) elif dict[key]=='unit': res[key] = unit_get(node.get(key)) elif dict[key] == 'float' : res[key] = float(node.get(key)) return res
def change_product_qty(self, cr, uid, ids, context=None): """ Changes the Product Quantity by making a Physical Inventory. """ if context is None: context = {} inventory_obj = self.pool.get('stock.inventory') inventory_line_obj = self.pool.get('stock.inventory.line') for data in self.browse(cr, uid, ids, context=context): if data.new_quantity < 0: raise UserError(_('Quantity cannot be negative.')) ctx = context.copy() ctx['location'] = data.location_id.id ctx['lot_id'] = data.lot_id.id if data.product_id.id and data.lot_id.id: filter = 'none' elif data.product_id.id: filter = 'product' else: filter = 'none' inventory_id = inventory_obj.create(cr, uid, { 'name': _('INV: %s') % tools.ustr(data.product_id.name), 'filter': filter, 'product_id': data.product_id.id, 'location_id': data.location_id.id, 'lot_id': data.lot_id.id}, context=context) product = data.product_id.with_context(location=data.location_id.id, lot_id= data.lot_id.id) th_qty = product.qty_available line_data = { 'inventory_id': inventory_id, 'product_qty': data.new_quantity, 'location_id': data.location_id.id, 'product_id': data.product_id.id, 'product_uom_id': data.product_id.uom_id.id, 'theoretical_qty': th_qty, 'prod_lot_id': data.lot_id.id } inventory_line_obj.create(cr , uid, line_data, context=context) inventory_obj.action_done(cr, uid, [inventory_id], context=context) return {}
def update_list(self, cr, uid, context=None): res = [0, 0] # [update, add] default_version = modules.adapt_version('1.0') known_mods = self.browse(cr, uid, self.search(cr, uid, [])) known_mods_names = dict([(m.name, m) for m in known_mods]) # iterate through detected modules and update/create them in db for mod_name in modules.get_modules(): mod = known_mods_names.get(mod_name) terp = self.get_module_info(mod_name) values = self.get_values_from_terp(terp) if mod: updated_values = {} for key in values: old = getattr(mod, key) updated = isinstance(values[key], basestring) and tools.ustr(values[key]) or values[key] if (old or updated) and updated != old: updated_values[key] = values[key] if terp.get('installable', True) and mod.state == 'uninstallable': updated_values['state'] = 'uninstalled' if parse_version(terp.get('version', default_version)) > parse_version(mod.latest_version or default_version): res[0] += 1 if updated_values: self.write(cr, uid, mod.id, updated_values) else: mod_path = modules.get_module_path(mod_name) if not mod_path: continue if not terp or not terp.get('installable', True): continue id = self.create(cr, uid, dict(name=mod_name, state='uninstalled', **values)) mod = self.browse(cr, uid, id) res[1] += 1 self._update_dependencies(cr, uid, mod, terp.get('depends', [])) self._update_category(cr, uid, mod, terp.get('category', 'Uncategorized')) return res
def onchange_employee_id(self, cr, uid, ids, date_from, date_to, employee_id=False, contract_id=False, context=None): empolyee_obj = self.pool.get('hr.employee') contract_obj = self.pool.get('hr.contract') worked_days_obj = self.pool.get('hr.payslip.worked_days') input_obj = self.pool.get('hr.payslip.input') if context is None: context = {} #delete old worked days lines old_worked_days_ids = ids and worked_days_obj.search(cr, uid, [('payslip_id', '=', ids[0])], context=context) or False if old_worked_days_ids: worked_days_obj.unlink(cr, uid, old_worked_days_ids, context=context) #delete old input lines old_input_ids = ids and input_obj.search(cr, uid, [('payslip_id', '=', ids[0])], context=context) or False if old_input_ids: input_obj.unlink(cr, uid, old_input_ids, context=context) #defaults res = {'value':{ 'line_ids':[], 'input_line_ids': [], 'worked_days_line_ids': [], #'details_by_salary_head':[], TODO put me back 'name':'', 'contract_id': False, 'struct_id': False, } } if (not employee_id) or (not date_from) or (not date_to): return res ttyme = datetime.fromtimestamp(time.mktime(time.strptime(date_from, "%Y-%m-%d"))) employee_id = empolyee_obj.browse(cr, uid, employee_id, context=context) res['value'].update({ 'name': _('Salary Slip of %s for %s') % (employee_id.name, tools.ustr(ttyme.strftime('%B-%Y'))), 'company_id': employee_id.company_id.id }) if not context.get('contract', False): #fill with the first contract of the employee contract_ids = self.get_contract(cr, uid, employee_id, date_from, date_to, context=context) else: if contract_id: #set the list of contract for which the input have to be filled contract_ids = [contract_id] else: #if we don't give the contract, then the input to fill should be for all current contracts of the employee contract_ids = self.get_contract(cr, uid, employee_id, date_from, date_to, context=context) if not contract_ids: return res contract_record = contract_obj.browse(cr, uid, contract_ids[0], context=context) res['value'].update({ 'contract_id': contract_record and contract_record.id or False }) struct_record = contract_record and contract_record.struct_id or False if not struct_record: return res res['value'].update({ 'struct_id': struct_record.id, }) #computation of the salary input worked_days_line_ids = self.get_worked_day_lines(cr, uid, contract_ids, date_from, date_to, context=context) input_line_ids = self.get_inputs(cr, uid, contract_ids, date_from, date_to, context=context) res['value'].update({ 'worked_days_line_ids': worked_days_line_ids, 'input_line_ids': input_line_ids, }) return res
def go(id, uid, ids, datas, context): with ecore.api.Environment.manage(): cr = ecore.registry(db).cursor() try: result, format = ecore.report.render_report(cr, uid, ids, object, datas, context) if not result: tb = sys.exc_info() self_reports[id]['exception'] = ecore.exceptions.DeferredException('RML is not available at specified location or not enough data to print!', tb) self_reports[id]['result'] = result self_reports[id]['format'] = format self_reports[id]['state'] = True except Exception, exception: _logger.exception('Exception: %s\n', exception) if hasattr(exception, 'name') and hasattr(exception, 'value'): self_reports[id]['exception'] = ecore.exceptions.DeferredException(tools.ustr(exception.name), tools.ustr(exception.value)) else: tb = sys.exc_info() self_reports[id]['exception'] = ecore.exceptions.DeferredException(tools.exception_to_unicode(exception), tb) self_reports[id]['state'] = True cr.commit() cr.close()
def toxml(value): unicode_value = tools.ustr(value) return unicode_value.replace('&', '&').replace('<','<').replace('>','>')
msg = u"%s" % e _logger.info(msg, exc_info=True) raise UserError(msg) else: try : html = body_mako_tpl.render(dict(parser_instance.localcontext)) htmls.append(html) except Exception, e: msg = u"%s" % e _logger.info(msg, exc_info=True) raise UserError(msg) head_mako_tpl = mako_template(header) try : head = head_mako_tpl.render(dict(parser_instance.localcontext, _debug=False)) except Exception, e: raise UserError(tools.ustr(e)) foot = False if footer : foot_mako_tpl = mako_template(footer) try : foot = foot_mako_tpl.render(dict(parser_instance.localcontext)) except Exception, e: msg = u"%s" % e _logger.info(msg, exc_info=True) raise UserError(msg) if report_xml.webkit_debug : try : deb = head_mako_tpl.render(dict(parser_instance.localcontext, _debug=tools.ustr("\n".join(htmls)))) except Exception, e: msg = u"%s" % e _logger.info(msg, exc_info=True)