def get_record_data(self, values): """ Returns a defaults-like dict with initial values for the composition wizard when sending an email related a previous email (parent_id) or a document (model, res_id). This is based on previously computed default values. """ result, subject = {}, False if values.get('parent_id'): parent = self.env['mail.message'].browse(values.get('parent_id')) result['record_name'] = parent.record_name, subject = tools.ustr(parent.subject or parent.record_name or '') if not values.get('model'): result['model'] = parent.model if not values.get('res_id'): result['res_id'] = parent.res_id partner_ids = values.get('partner_ids', list()) + parent.partner_ids.ids result['partner_ids'] = partner_ids elif values.get('model') and values.get('res_id'): doc_name_get = self.env[values.get('model')].browse(values.get('res_id')).name_get() result['record_name'] = doc_name_get and doc_name_get[0][1] or '' subject = tools.ustr(result['record_name']) re_prefix = _('Re:') if subject and not (subject.startswith('Re:') or subject.startswith(re_prefix)): subject = "%s %s" % (re_prefix, subject) result['subject'] = subject return result
def amount_to_text(self, amount): self.ensure_one() def _num2words(number, lang): try: return num2words(number, lang=lang).title() except NotImplementedError: return num2words(number, lang='en').title() if num2words is None: logging.getLogger(__name__).warning( "The library 'num2words' is missing, cannot render textual amounts." ) return "" formatted = "%.{0}f".format(self.decimal_places) % amount parts = formatted.partition('.') integer_value = int(parts[0]) fractional_value = int(parts[2] or 0) lang_code = self.env.context.get('lang') or self.env.user.lang lang = self.env['res.lang'].with_context(active_test=False).search([ ('code', '=', lang_code) ]) amount_words = tools.ustr('{amt_value} {amt_word}').format( amt_value=_num2words(integer_value, lang=lang.iso_code), amt_word=self.currency_unit_label, ) if not self.is_zero(amount - integer_value): amount_words += ' ' + _('and') + tools.ustr( ' {amt_value} {amt_word}').format( amt_value=_num2words(fractional_value, lang=lang.iso_code), amt_word=self.currency_subunit_label, ) return amount_words
def encode_rfc2822_address_header(header_text): """If ``header_text`` contains non-ASCII characters, attempts to locate patterns of the form ``"Name" <address@domain>`` and replace the ``"Name"`` portion by the RFC2047-encoded version, preserving the address part untouched. """ def encode_addr(addr): name, email = addr # If s is a <text string>, then charset is a hint specifying the # character set of the characters in the string. The Unicode string # will be encoded using the following charsets in order: us-ascii, # the charset hint, utf-8. The first character set to not provoke a # UnicodeError is used. # -> always pass a text string to Header # also Header.__str__ in Python 3 "Returns an approximation of the # Header as a string, using an unlimited line length.", the old one # was "A synonym for Header.encode()." so call encode() directly? name = Header(pycompat.to_text(name)).encode() # if the from does not follow the (name <addr>),* convention, we might # try to encode meaningless strings as address, as getaddresses is naive # note it would also fail on real addresses with non-ascii characters try: return formataddr((name, email)) except UnicodeEncodeError: _logger.warning( _('Failed to encode the address %s\n' 'from mail header:\n%s') % (addr, header_text)) return "" addresses = getaddresses([pycompat.to_text(ustr(header_text))]) return COMMASPACE.join(a for a in (encode_addr(addr) for addr in addresses) if a)
def initialize_sys_path(): """ Setup an import-hook to be able to import OpenERP addons from the different addons paths. This ensures something like ``import crm`` (or even ``import coffice.addons.crm``) works even if the addons are not in the PYTHONPATH. """ global hooked dd = os.path.normcase(tools.config.addons_data_dir) if os.access(dd, os.R_OK) and dd not in coffice.addons.__path__: coffice.addons.__path__.append(dd) for ad in tools.config['addons_path'].split(','): ad = os.path.normcase(os.path.abspath(tools.ustr(ad.strip()))) if ad not in coffice.addons.__path__: coffice.addons.__path__.append(ad) # add base module path base_path = os.path.normcase( os.path.abspath( os.path.join(os.path.dirname(os.path.dirname(__file__)), 'addons'))) if base_path not in coffice.addons.__path__ and os.path.isdir(base_path): coffice.addons.__path__.append(base_path) if not hooked: sys.meta_path.insert(0, COfficeHook()) sys.meta_path.insert(0, AddonsHook()) hooked = True
def _get_or_create_user(self, conf, login, ldap_entry): """ Retrieve an active resource of model res_users with the specified login. Create the user if it is not initially found. :param dict conf: LDAP configuration :param login: the user's login :param tuple ldap_entry: single LDAP result (dn, attrs) :return: res_users id :rtype: int """ login = tools.ustr(login.lower().strip()) self.env.cr.execute( "SELECT id, active FROM res_users WHERE lower(login)=%s", (login, )) res = self.env.cr.fetchone() if res: if res[1]: return res[0] elif conf['create_user']: _logger.debug("Creating new COffice user \"%s\" from LDAP" % login) values = self._map_ldap_attributes(conf, login, ldap_entry) SudoUser = self.env['res.users'].sudo().with_context( no_reset_password=True) if conf['user']: values['active'] = True return SudoUser.browse(conf['user'][0]).copy(default=values).id else: return SudoUser.create(values).id raise AccessDenied( _("No local user found for LDAP login and not configured to create one" ))
def _get_source(self, name, types, lang, source=None, res_id=None): """ Return the translation for the given combination of ``name``, ``type``, ``language`` and ``source``. All values passed to this method should be unicode (not byte strings), especially ``source``. :param name: identification of the term to translate, such as field name (optional if source is passed) :param types: single string defining type of term to translate (see ``type`` field on ir.translation), or sequence of allowed types (strings) :param lang: language code of the desired translation :param source: optional source term to translate (should be unicode) :param res_id: optional resource id or a list of ids to translate (if used, ``source`` should be set) :rtype: unicode :return: the request translation, or an empty unicode string if no translation was found and `source` was not passed """ # FIXME: should assert that `source` is unicode and fix all callers to # always pass unicode so we can remove the string encoding/decoding. if not lang: return tools.ustr(source or '') if isinstance(types, str): types = (types, ) if res_id: if isinstance(res_id, int): res_id = (res_id, ) else: res_id = tuple(res_id) return self.__get_source(name, types, lang, source, res_id)
def _get_graph_data(self, question, current_filters=None): '''Returns formatted data required by graph library on basis of filter''' # TODO refactor this terrible method and merge it with _prepare_result_dict current_filters = current_filters if current_filters else [] Survey = request.env['survey.survey'] result = [] if question.question_type == 'multiple_choice': result.append({ 'key': ustr(question.question), 'values': Survey.prepare_result(question, current_filters)['answers'] }) if question.question_type == 'simple_choice': result = Survey.prepare_result(question, current_filters)['answers'] if question.question_type == 'matrix': data = Survey.prepare_result(question, current_filters) for answer in data['answers']: values = [] for row in data['rows']: values.append({ 'text': data['rows'].get(row), 'count': data['result'].get((row, answer)) }) result.append({ 'key': data['answers'].get(answer), 'values': values }) return json.dumps(result)
def write(self, vals): if vals.get('user_domain'): users = self._get_challenger_users(ustr(vals.get('user_domain'))) if not vals.get('user_ids'): vals['user_ids'] = [] vals['user_ids'].extend((4, user.id) for user in users) write_res = super(Challenge, self).write(vals) if vals.get('report_message_frequency', 'never') != 'never': # _recompute_challenge_users do not set users for challenges with no reports, subscribing them now for challenge in self: challenge.message_subscribe( [user.partner_id.id for user in challenge.user_ids]) if vals.get('state') == 'inprogress': self._recompute_challenge_users() self._generate_goals_from_challenge() elif vals.get('state') == 'done': self._check_challenge_reward(force=True) elif vals.get('state') == 'draft': # resetting progress if self.env['gamification.goal'].search( [('challenge_id', 'in', self.ids), ('state', '=', 'inprogress')], limit=1): raise exceptions.UserError( _("You can not reset a challenge with unfinished goals.")) return write_res
def extract_rfc2822_addresses(text): """Returns a list of valid RFC2822 addresses that can be found in ``source``, ignoring malformed ones and non-ASCII ones. """ if not text: return [] candidates = address_pattern.findall(ustr(text)) return [c for c in candidates if is_ascii(c)]
def _geoip_setup_resolver(cls): # Lazy init of GeoIP resolver if coffice._geoip_resolver is not None: return geofile = config.get('geoip_database') try: coffice._geoip_resolver = GeoIPResolver.open(geofile) or False except Exception as e: _logger.warning('Cannot load GeoIP: %s', ustr(e))
def test_smtp_connection(self): for server in self: smtp = False try: smtp = self.connect(mail_server_id=server.id) # simulate sending an email from current user's address - without sending it! email_from, email_to = self.env.user.email, '*****@*****.**' if not email_from: raise UserError( _('Please configure an email on the current user to simulate ' 'sending an email message via this outgoing server')) # Testing the MAIL FROM step should detect sender filter problems (code, repl) = smtp.mail(email_from) if code != 250: raise UserError( _('The server refused the sender address (%(email_from)s) ' 'with error %(repl)s') % locals()) # Testing the RCPT TO step should detect most relaying problems (code, repl) = smtp.rcpt(email_to) if code not in (250, 251): raise UserError( _('The server refused the test recipient (%(email_to)s) ' 'with error %(repl)s') % locals()) # Beginning the DATA step should detect some deferred rejections # Can't use self.data() as it would actually send the mail! smtp.putcmd("data") (code, repl) = smtp.getreply() if code != 354: raise UserError( _('The server refused the test connection ' 'with error %(repl)s') % locals()) except UserError as e: # let UserErrors (messages) bubble up raise e except Exception as e: raise UserError( _("Connection Test Failed! Here is what we got instead:\n %s" ) % ustr(e)) finally: try: if smtp: smtp.close() except Exception: # ignored, just a consequence of the previous exception pass title = _("Connection Test Succeeded!") message = _("Everything seems properly set up!") return { 'type': 'ir.actions.client', 'tag': 'display_notification', 'params': { 'title': title, 'message': message, 'sticky': False, } }
def __get_source(self, name, types, lang, source, res_id): # res_id is a tuple or None, otherwise ormcache cannot cache it! query, params = self._get_source_query(name, types, lang, source, res_id) self._cr.execute(query, params) res = self._cr.fetchone() trad = res and res[0] or u'' if source and not trad: return tools.ustr(source) return trad
def from_html(self, model, field, element): value = element.text_content().strip() selection = field.get_description(self.env)['selection'] for k, v in selection: if isinstance(v, str): v = ustr(v) if value == v: return k raise ValueError(u"No value found for label %s in selection %s" % ( value, selection))
def _get_source_query(self, name, types, lang, source, res_id): if source: # Note: the extra test on md5(src) is a hint for postgres to use the # index ir_translation_src_md5 query = """SELECT value FROM ir_translation WHERE lang=%s AND type in %s AND src=%s AND md5(src)=md5(%s)""" source = tools.ustr(source) params = (lang or '', types, source, source) if res_id: query += " AND res_id in %s" params += (res_id, ) if name: query += " AND name=%s" params += (tools.ustr(name), ) else: query = """ SELECT value FROM ir_translation WHERE lang=%s AND type in %s AND name=%s """ params = (lang or '', types, tools.ustr(name)) return (query, params)
def _geo_query_address_default(self, street=None, zip=None, city=None, state=None, country=None): address_list = [ street, ("%s %s" % (zip or '', city or '')).strip(), state, country ] address_list = [item for item in address_list if item] return tools.ustr(', '.join(address_list))
def create(self, vals): """Overwrite the create method to add the user of groups""" if vals.get('user_domain'): users = self._get_challenger_users(ustr(vals.get('user_domain'))) if not vals.get('user_ids'): vals['user_ids'] = [] vals['user_ids'].extend((4, user.id) for user in users) return super(Challenge, self).create(vals)
def _get_sys_logs(self): """ Utility method to send a publisher warranty get logs messages. """ msg = self._get_message() arguments = {'arg0': ustr(msg), "action": "update"} url = config.get("publisher_warranty_url") r = requests.post(url, data=arguments, timeout=30) r.raise_for_status() return literal_eval(r.text)
def _sync_response(self, limit=GENGO_DEFAULT_LIMIT): """ This method will be called by cron services to get translations from Gengo. It will read translated terms and comments from Gengo and will update respective ir.translation in COffice. """ IrTranslation = self.env['ir.translation'] flag, gengo = self.gengo_authentication() if not flag: _logger.warning("%s", gengo) else: offset = 0 all_translation_ids = IrTranslation.search([ ('state', '=', 'inprogress'), ('gengo_translation', 'in', ('machine', 'standard', 'pro', 'ultra')), ('order_id', "!=", False) ]) while True: translation_ids = all_translation_ids[offset:offset + limit] offset += limit if not translation_ids: break terms_progress = { 'gengo_order_ids': set(), 'ir_translation_ids': set(), } for term in translation_ids: terms_progress['gengo_order_ids'].add(term.order_id) terms_progress['ir_translation_ids'].add( tools.ustr(term.id)) for order_id in terms_progress['gengo_order_ids']: order_response = gengo.getTranslationOrderJobs(id=order_id) jobs_approved = order_response.get('response', []).get( 'order', []).get('jobs_approved', []) gengo_ids = ','.join(jobs_approved) if gengo_ids: # Need to check, because getTranslationJobBatch don't catch this case and so call the getTranslationJobs because no ids in url try: job_response = gengo.getTranslationJobBatch( id=gengo_ids) except: continue if job_response['opstat'] == 'ok': for job in job_response['response'].get('jobs', []): if job.get('custom_data') in terms_progress[ 'ir_translation_ids']: self._update_terms_job(job) return True
def import_lang(self): this = self[0] this = this.with_context(overwrite=this.overwrite) with TemporaryFile('wb+') as buf: try: buf.write(base64.decodestring(this.data)) # now we determine the file format buf.seek(0) fileformat = os.path.splitext(this.filename)[-1][1:].lower() tools.trans_load_data(this._cr, buf, fileformat, this.code, lang_name=this.name, context=this._context) except ProgrammingError as e: _logger.exception( 'File unsuccessfully imported, due to a malformed file.') with closing(sql_db.db_connect( self._cr.dbname).cursor()) as cr: raise UserError( _('File %r not imported due to a malformed file.\n\n' + 'This issue can be caused by duplicates entries who are referring to the same field. ' + 'Please check the content of the file you are trying to import.\n\n' + 'Technical Details:\n%s') % tools.ustr(e)) except Exception as e: _logger.exception( 'File unsuccessfully imported, due to format mismatch.') raise UserError( _('File %r not imported due to format mismatch or a malformed file.' ' (Valid formats are .csv, .po, .pot)\n\nTechnical Details:\n%s') % \ (this.filename, tools.ustr(e)) ) return True
def fields_get(self, fields=None, attributes=None): """ If an addon is already installed, set it to readonly as res.config.installer doesn't handle uninstallations of already installed addons """ fields = super(ResConfigInstaller, self).fields_get(fields, attributes=attributes) for name in self.already_installed(): if name not in fields: continue fields[name].update( readonly=True, help=ustr(fields[name].get('help', '')) + _('\n\nThis addon is already installed on your system')) return fields
def _str_to_selection(self, model, field, value): # get untranslated values env = self.with_context(lang=None).env selection = field.get_description(env)['selection'] for item, label in selection: label = ustr(label) labels = [label] + self._get_translations(('selection', 'model', 'code'), label) if value == str(item) or value in labels: return item, [] raise self._format_import_error( ValueError, _(u"Value '%s' not found in selection field '%%(field)s'"), value, {'moreinfo': [_label or str(item) for item, _label in selection if _label or item]} )
def save_as_template(self): """ hit save as template button: current form value will be a new template attached to the current document. """ for record in self: model = self.env['ir.model']._get(record.model or 'mail.message') model_name = model.name or '' template_name = "%s: %s" % (model_name, tools.ustr(record.subject)) values = { 'name': template_name, 'subject': record.subject or False, 'body_html': record.body or False, 'model_id': model.id or False, 'attachment_ids': [(6, 0, [att.id for att in record.attachment_ids])], } template = self.env['mail.template'].create(values) # generate the saved template record.write({'template_id': template.id}) record.onchange_template_id_wrapper() return _reopen(self, record.id, record.model, context=self._context)
def button_confirm_login(self): for server in self: try: connection = server.connect() server.write({'state': 'done'}) except Exception as err: _logger.info("Failed to connect to %s server %s.", server.server_type, server.name, exc_info=True) raise UserError(_("Connection test failed: %s") % tools.ustr(err)) finally: try: if connection: if server.server_type == 'imap': connection.close() elif server.server_type == 'pop': connection.quit() except Exception: # ignored, just a consequence of the previous exception pass return True
def _unsubscribe_token(self, res_id, email): """Generate a secure hash for this mailing list and parameters. This is appended to the unsubscription URL and then checked at unsubscription time to ensure no malicious unsubscriptions are performed. :param int res_id: ID of the resource that will be unsubscribed. :param str email: Email of the resource that will be unsubscribed. """ secret = self.env["ir.config_parameter"].sudo().get_param( "database.secret") token = (self.env.cr.dbname, self.id, int(res_id), tools.ustr(email)) return hmac.new(secret.encode('utf-8'), repr(token).encode('utf-8'), hashlib.sha512).hexdigest()
def pack_jobs_request(self, term_ids, context=None): ''' prepare the terms that will be requested to gengo and returns them in a dictionary with following format {'jobs': { 'term1.id': {...} 'term2.id': {...} } }''' base_url = self.env['ir.config_parameter'].sudo().get_param( 'web.base.url') IrTranslation = self.env['ir.translation'] jobs = {} user = self.env.user auto_approve = 1 if user.company_id.gengo_auto_approve else 0 for term in term_ids: if re.search(r"\w", term.src or ""): comment = user.company_id.gengo_comment or '' if term.gengo_comment: comment += '\n' + term.gengo_comment jobs[time.strftime('%Y%m%d%H%M%S') + '-' + str(term.id)] = { 'type': 'text', 'slug': 'Single :: English to ' + term.lang, 'tier': tools.ustr(term.gengo_translation), 'custom_data': str(term.id), 'body_src': term.src, 'lc_src': 'en', 'lc_tgt': IrTranslation._get_gengo_corresponding_language(term.lang), 'auto_approve': auto_approve, 'comment': comment, 'callback_url': "%s/website/gengo_callback?pgk=%s&db=%s" % (base_url, self._get_gengo_key(), self.env.cr.dbname) } return {'jobs': jobs, 'as_group': 0}
def encode_header(header_text): """Returns an appropriate representation of the given header value, suitable for direct assignment as a header value in an email.message.Message. RFC2822 assumes that headers contain only 7-bit characters, so we ensure it is the case, using RFC2047 encoding when needed. :param header_text: unicode or utf-8 encoded string with header value :rtype: string | email.header.Header :return: if ``header_text`` represents a plain ASCII string, return the same 7-bit string, otherwise returns an email.header.Header that will perform the appropriate RFC2047 encoding of non-ASCII values. """ if not header_text: return "" header_text = ustr(header_text) # FIXME: require unicode higher up? if is_ascii(header_text): return pycompat.to_text(header_text) return Header(header_text, 'utf-8')
def _authenticate(self, conf, login, password): """ Authenticate a user against the specified LDAP server. In order to prevent an unintended 'unauthenticated authentication', which is an anonymous bind with a valid dn and a blank password, check for empty passwords explicitely (:rfc:`4513#section-6.3.1`) :param dict conf: LDAP configuration :param login: username :param password: Password for the LDAP user :return: LDAP entry of authenticated user or False :rtype: dictionary of attributes """ if not password: return False entry = False try: filter = filter_format(conf['ldap_filter'], (login, )) except TypeError: _logger.warning( 'Could not format LDAP filter. Your filter should contain one \'%s\'.' ) return False try: results = self._query(conf, tools.ustr(filter)) # Get rid of (None, attrs) for searchResultReference replies results = [i for i in results if i[0]] if len(results) == 1: dn = results[0][0] conn = self._connect(conf) conn.simple_bind_s(dn, to_text(password)) conn.unbind() entry = results[0] except ldap.INVALID_CREDENTIALS: return False except ldap.LDAPError as e: _logger.error('An LDAP exception occurred: %s', e) return entry
def encode_header_param(param_text): """Returns an appropriate RFC2047 encoded representation of the given header parameter value, suitable for direct assignation as the param value (e.g. via Message.set_param() or Message.add_header()) RFC2822 assumes that headers contain only 7-bit characters, so we ensure it is the case, using RFC2047 encoding when needed. :param param_text: unicode or utf-8 encoded string with header value :rtype: string :return: if ``param_text`` represents a plain ASCII string, return the same 7-bit string, otherwise returns an ASCII string containing the RFC2047 encoded text. """ # For details see the encode_header() method that uses the same logic if not param_text: return "" param_text = ustr(param_text) # FIXME: require unicode higher up? if is_ascii(param_text): return pycompat.to_text( param_text) # TODO: is that actually necessary? return Charset("utf-8").header_encode(param_text)
def slugify_one(s, max_length=0): """ Transform a string to a slug that can be used in a url path. This method will first try to do the job with python-slugify if present. Otherwise it will process string by stripping leading and ending spaces, converting unicode chars to ascii, lowering all chars and replacing spaces and underscore with hyphen "-". :param s: str :param max_length: int :rtype: str """ s = ustr(s) if slugify_lib: # There are 2 different libraries only python-slugify is supported try: return slugify_lib.slugify(s, max_length=max_length) except TypeError: pass uni = unicodedata.normalize('NFKD', s).encode('ascii', 'ignore').decode('ascii') slug_str = re.sub(r'[\W_]', ' ', uni).strip().lower() slug_str = re.sub(r'[-\s]+', '-', slug_str) return slug_str[:max_length] if max_length > 0 else slug_str
def export_xls(self, data, token): jdata = json.loads(data) workbook = xlwt.Workbook() worksheet = workbook.add_sheet(jdata['title']) header_bold = xlwt.easyxf("font: bold on; pattern: pattern solid, fore_colour gray25;") header_plain = xlwt.easyxf("pattern: pattern solid, fore_colour gray25;") bold = xlwt.easyxf("font: bold on;") measure_count = jdata['measure_count'] origin_count = jdata['origin_count'] # Step 1: writing col group headers col_group_headers = jdata['col_group_headers'] # x,y: current coordinates # carry: queue containing cell information when a cell has a >= 2 height # and the drawing code needs to add empty cells below x, y, carry = 1, 0, deque() for i, header_row in enumerate(col_group_headers): worksheet.write(i, 0, '', header_plain) for header in header_row: while (carry and carry[0]['x'] == x): cell = carry.popleft() for j in range(measure_count * (2 * origin_count - 1)): worksheet.write(y, x+j, '', header_plain) if cell['height'] > 1: carry.append({'x': x, 'height': cell['height'] - 1}) x = x + measure_count * (2 * origin_count - 1) for j in range(header['width']): worksheet.write(y, x + j, header['title'] if j == 0 else '', header_plain) if header['height'] > 1: carry.append({'x': x, 'height': header['height'] - 1}) x = x + header['width'] while (carry and carry[0]['x'] == x): cell = carry.popleft() for j in range(measure_count * (2 * origin_count - 1)): worksheet.write(y, x+j, '', header_plain) if cell['height'] > 1: carry.append({'x': x, 'height': cell['height'] - 1}) x = x + measure_count * (2 * origin_count - 1) x, y = 1, y + 1 # Step 2: writing measure headers measure_headers = jdata['measure_headers'] if measure_headers: worksheet.write(y, 0, '', header_plain) for measure in measure_headers: style = header_bold if measure['is_bold'] else header_plain worksheet.write(y, x, measure['title'], style) for i in range(1, 2 * origin_count - 1): worksheet.write(y, x+i, '', header_plain) x = x + (2 * origin_count - 1) x, y = 1, y + 1 # Step 3: writing origin headers origin_headers = jdata['origin_headers'] if origin_headers: worksheet.write(y, 0, '', header_plain) for origin in origin_headers: style = header_bold if origin['is_bold'] else header_plain worksheet.write(y, x, origin['title'], style) x = x + 1 y = y + 1 # Step 4: writing data x = 0 for row in jdata['rows']: worksheet.write(y, x, row['indent'] * ' ' + ustr(row['title']), header_plain) for cell in row['values']: x = x + 1 if cell.get('is_bold', False): worksheet.write(y, x, cell['value'], bold) else: worksheet.write(y, x, cell['value']) x, y = 0, y + 1 response = request.make_response(None, headers=[('Content-Type', 'application/vnd.ms-excel'), ('Content-Disposition', 'attachment; filename=table.xls')], cookies={'fileToken': token}) workbook.save(response.stream) return response