def _adyen_form_get_tx_from_data(self, data): reference, pspReference = data.get('merchantReference'), data.get('pspReference') if not reference or not pspReference: error_msg = _('Adyen: received data with missing reference (%s) or missing pspReference (%s)') % (reference, pspReference) _logger.info(error_msg) raise ValidationError(error_msg) # find tx -> @TDENOTE use pspReference ? tx = self.env['payment.transaction'].search([('reference', '=', reference)]) if not tx or len(tx) > 1: error_msg = _('Adyen: received data for reference %s') % (reference) if not tx: error_msg += _('; no order found') else: error_msg += _('; multiple order found') _logger.info(error_msg) raise ValidationError(error_msg) # verify shasign if len(tx.acquirer_id.adyen_skin_hmac_key) == 64: shasign_check = tx.acquirer_id._adyen_generate_merchant_sig_sha256('out', data) else: shasign_check = tx.acquirer_id._adyen_generate_merchant_sig('out', data) if to_text(shasign_check) != to_text(data.get('merchantSig')): error_msg = _('Adyen: invalid merchantSig, received %s, computed %s') % (data.get('merchantSig'), shasign_check) _logger.warning(error_msg) raise ValidationError(error_msg) return tx
def url_for(path_or_uri, lang=None): current_path = request.httprequest.path # should already be text location = pycompat.to_text(path_or_uri).strip() force_lang = lang is not None url = werkzeug.urls.url_parse(location) if not url.netloc and not url.scheme and (url.path or force_lang): location = werkzeug.urls.url_join(current_path, location) lang = pycompat.to_text(lang or request.context.get('lang') or 'en_US') langs = [lg[0] for lg in request.env['ir.http']._get_language_codes()] if (len(langs) > 1 or force_lang) and is_multilang_url(location, langs): ps = location.split(u'/') if ps[1] in langs: # Replace the language only if we explicitly provide a language to url_for if force_lang: ps[1] = lang # Remove the default language unless it's explicitly provided elif ps[1] == request.env['ir.http']._get_default_lang().code: ps.pop(1) # Insert the context language or the provided language elif lang != request.env['ir.http']._get_default_lang().code or force_lang: ps.insert(1, lang) location = u'/'.join(ps) return location
def record_to_html(self, record, field_name, options): assert options['tagName'] != 'img',\ "Oddly enough, the root tag of an image field can not be img. " \ "That is because the image goes into the tag, or it gets the " \ "hose again." if options.get('qweb_img_raw_data', False): return super(Image, self).record_to_html(record, field_name, options) aclasses = ['img', 'img-fluid'] if options.get('qweb_img_responsive', True) else ['img'] aclasses += options.get('class', '').split() classes = ' '.join(map(escape, aclasses)) max_size = None if options.get('resize'): max_size = options.get('resize') else: max_width, max_height = options.get('max_width', 0), options.get('max_height', 0) if max_width or max_height: max_size = '%sx%s' % (max_width, max_height) sha = hashlib.sha1(str(getattr(record, '__last_update')).encode('utf-8')).hexdigest()[0:7] max_size = '' if max_size is None else '/%s' % max_size avoid_if_small = '&avoid_if_small=true' if options.get('avoid_if_small') else '' src = '/web/image/%s/%s/%s%s?unique=%s%s' % (record._name, record.id, options.get('preview_image', field_name), max_size, sha, avoid_if_small) alt = None if options.get('alt-field') and getattr(record, options['alt-field'], None): alt = escape(record[options['alt-field']]) elif options.get('alt'): alt = options['alt'] src_zoom = None if options.get('zoom') and getattr(record, options['zoom'], None): src_zoom = '/web/image/%s/%s/%s%s?unique=%s' % (record._name, record.id, options['zoom'], max_size, sha) elif options.get('zoom'): src_zoom = options['zoom'] atts = OrderedDict() atts["src"] = src atts["class"] = classes atts["style"] = options.get('style') atts["alt"] = alt atts["data-zoom"] = src_zoom and u'1' or None atts["data-zoom-image"] = src_zoom atts["data-no-post-process"] = options.get('data-no-post-process') atts = self.env['ir.qweb']._post_processing_att('img', atts, options.get('template_options')) img = ['<img'] for name, value in atts.items(): if value: img.append(' ') img.append(escape(pycompat.to_text(name))) img.append('="') img.append(escape(pycompat.to_text(value))) img.append('"') img.append('/>') return u''.join(img)
def _compile_directive_snippet(self, el, options): el.set('t-call', el.attrib.pop('t-snippet')) name = self.env['ir.ui.view'].search([('key', '=', el.attrib.get('t-call'))]).display_name thumbnail = el.attrib.pop('t-thumbnail', "oe-thumbnail") div = u'<div name="%s" data-oe-type="snippet" data-oe-thumbnail="%s">' % ( escape(pycompat.to_text(name)), escape(pycompat.to_text(thumbnail)) ) return [self._append(ast.Str(div))] + self._compile_node(el, options) + [self._append(ast.Str(u'</div>'))]
def to_html(self): tagName, attributes, content = self.to_node() html = u"<%s " % tagName for name, value in attributes.items(): if value or isinstance(value, string_types): html += u' %s="%s"' % (name, escape(to_text(value))) if content is None: html += u'/>' else: html += u'>%s</%s>' % (escape(to_text(content)), tagName) return html
def _compile_directive_install(self, el, options): if self.user_has_groups('base.group_system'): module = self.env['ir.module.module'].search([('name', '=', el.attrib.get('t-install'))]) if not module or module.state == 'installed': return [] name = el.attrib.get('string') or 'Snippet' thumbnail = el.attrib.pop('t-thumbnail', 'oe-thumbnail') div = u'<div name="%s" data-oe-type="snippet" data-module-id="%s" data-oe-thumbnail="%s"><section/></div>' % ( escape(pycompat.to_text(name)), module.id, escape(pycompat.to_text(thumbnail)) ) return [self._append(ast.Str(div))] else: return []
def to_node(self): if self.url: attr = OrderedDict([ ["type", "text/css"], ["rel", "stylesheet"], ["href", self.html_url], ["media", escape(to_text(self.media)) if self.media else None] ]) return ("link", attr, None) else: attr = OrderedDict([ ["type", "text/css"], ["media", escape(to_text(self.media)) if self.media else None] ]) return ("style", attr, self.with_header())
def value_to_html(self, value, options): if not value: return '' lang = self.user_lang() locale = babel.Locale.parse(lang.code) if isinstance(value, str): value = fields.Datetime.from_string(value) value = fields.Datetime.context_timestamp(self, value) if options and 'format' in options: pattern = options['format'] else: if options and options.get('time_only'): strftime_pattern = (u"%s" % (lang.time_format)) else: strftime_pattern = (u"%s %s" % (lang.date_format, lang.time_format)) pattern = posix_to_ldml(strftime_pattern, locale=locale) if options and options.get('hide_seconds'): pattern = pattern.replace(":ss", "").replace(":s", "") return pycompat.to_text(babel.dates.format_datetime(value, format=pattern, locale=locale))
def _binary_record_content( self, record, field='datas', filename=None, filename_field='datas_fname', default_mimetype='application/octet-stream'): model = record._name mimetype = 'mimetype' in record and record.mimetype or False content = None filehash = 'checksum' in record and record['checksum'] or False field_def = record._fields[field] if field_def.type == 'binary' and field_def.attachment: field_attachment = self.env['ir.attachment'].search_read(domain=[('res_model', '=', model), ('res_id', '=', record.id), ('res_field', '=', field)], fields=['datas', 'mimetype', 'checksum'], limit=1) if field_attachment: mimetype = field_attachment[0]['mimetype'] content = field_attachment[0]['datas'] filehash = field_attachment[0]['checksum'] if not content: content = record[field] or '' # filename if not filename: if filename_field in record: filename = record[filename_field] else: filename = "%s-%s-%s" % (record._name, record.id, field) if not mimetype: mimetype = guess_mimetype(base64.b64decode(content), default=default_mimetype) if not filehash: filehash = '"%s"' % hashlib.md5(pycompat.to_text(content).encode('utf-8')).hexdigest() status = 200 if content else 404 return status, content, filename, mimetype, filehash
def _binary_ir_attachment_redirect_content(cls, record, default_mimetype='application/octet-stream'): # mainly used for theme images attachemnts status = content = filename = filehash = None mimetype = getattr(record, 'mimetype', False) if record.type == 'url' and record.url: # if url in in the form /somehint server locally url_match = re.match("^/(\w+)/(.+)$", record.url) if url_match: module = url_match.group(1) module_path = get_module_path(module) module_resource_path = get_resource_path(module, url_match.group(2)) if module_path and module_resource_path: module_path = os.path.join(os.path.normpath(module_path), '') # join ensures the path ends with '/' module_resource_path = os.path.normpath(module_resource_path) if module_resource_path.startswith(module_path): with open(module_resource_path, 'rb') as f: content = base64.b64encode(f.read()) status = 200 filename = os.path.basename(module_resource_path) mimetype = guess_mimetype(base64.b64decode(content), default=default_mimetype) filehash = '"%s"' % hashlib.md5(pycompat.to_text(content).encode('utf-8')).hexdigest() else: status = 301 content = record.url return status, content, filename, mimetype, filehash
def value_to_html(self, value, options): """ value_to_html(value, field, options=None) Converts a single value to its HTML version/output :rtype: unicode """ return html_escape(pycompat.to_text(value), options)
def nl2br(string): """ Converts newlines to HTML linebreaks in ``string``. returns the unicode result :param str string: :rtype: unicode """ return pycompat.to_text(string).replace(u'\n', u'<br>\n')
def to_html(self, sep=None, css=True, js=True, debug=False, async_load=False, url_for=(lambda url: url)): nodes = self.to_node(css=css, js=js, debug=debug, async_load=async_load) if sep is None: sep = u'\n ' response = [] for tagName, attributes, content in nodes: html = u"<%s " % tagName for name, value in attributes.items(): if value or isinstance(value, string_types): html += u' %s="%s"' % (name, escape(to_text(value))) if content is None: html += u'/>' else: html += u'>%s</%s>' % (escape(to_text(content)), tagName) response.append(html) return sep + sep.join(response)
def fiscal_pos_map_to_csv(self): writer = pycompat.csv_writer(open('account.fiscal.' 'position.tax.template-%s.csv' % self.suffix, 'wb')) fiscal_pos_map_iterator = self.iter_fiscal_pos_map() keys = next(fiscal_pos_map_iterator) writer.writerow(keys) for row in fiscal_pos_map_iterator: writer.writerow([pycompat.to_text(s) for s in row.values()])
def _get_asset_content(self, xmlid, options): options = dict(options, inherit_branding=False, inherit_branding_auto=False, edit_translations=False, translatable=False, rendering_bundle=True) env = self.env(context=options) # TODO: This helper can be used by any template that wants to embedd the backend. # It is currently necessary because the ir.ui.view bundle inheritance does not # match the module dependency graph. def get_modules_order(): if request: from odoo.addons.web.controllers.main import module_boot return json.dumps(module_boot()) return '[]' template = env['ir.qweb'].render(xmlid, {"get_modules_order": get_modules_order}) files = [] remains = [] for el in html.fragments_fromstring(template): if isinstance(el, pycompat.string_types): remains.append(pycompat.to_text(el)) elif isinstance(el, html.HtmlElement): href = el.get('href', '') src = el.get('src', '') atype = el.get('type') media = el.get('media') can_aggregate = not urls.url_parse(href).netloc and not href.startswith('/web/content') if el.tag == 'style' or (el.tag == 'link' and el.get('rel') == 'stylesheet' and can_aggregate): if href.endswith('.sass'): atype = 'text/sass' elif href.endswith('.scss'): atype = 'text/scss' elif href.endswith('.less'): atype = 'text/less' if atype not in ('text/less', 'text/scss', 'text/sass'): atype = 'text/css' path = [segment for segment in href.split('/') if segment] filename = get_resource_path(*path) if path else None files.append({'atype': atype, 'url': href, 'filename': filename, 'content': el.text, 'media': media}) elif el.tag == 'script': atype = 'text/javascript' path = [segment for segment in src.split('/') if segment] filename = get_resource_path(*path) if path else None files.append({'atype': atype, 'url': src, 'filename': filename, 'content': el.text, 'media': media}) else: remains.append(html.tostring(el, encoding='unicode')) else: try: remains.append(html.tostring(el, encoding='unicode')) except Exception: # notYETimplementederror raise NotImplementedError return (files, remains)
def value_to_html(self, value, options): locale = babel.Locale.parse(self.user_lang().code) if isinstance(value, str): value = fields.Datetime.from_string(value) # value should be a naive datetime in UTC. So is fields.Datetime.now() reference = fields.Datetime.from_string(options['now']) return pycompat.to_text(babel.dates.format_timedelta(value - reference, add_direction=True, locale=locale))
def record_to_html(self, record, field_name, options): if not getattr(record, field_name): return None view = getattr(record, field_name) if view._name != "ir.ui.view": _logger.warning("%s.%s must be a 'ir.ui.view' model." % (record, field_name)) return None return pycompat.to_text(view.render(options.get('values', {}), engine='ir.qweb'))
def load_information_from_description_file(module, mod_path=None): """ :param module: The name of the module (sale, purchase, ...) :param mod_path: Physical path of module, if not providedThe name of the module (sale, purchase, ...) """ if not mod_path: mod_path = get_module_path(module, downloaded=True) manifest_file = module_manifest(mod_path) if manifest_file: # default values for descriptor info = { 'application': False, 'author': 'Odoo S.A.', 'auto_install': False, 'category': 'Uncategorized', 'depends': [], 'description': '', 'icon': get_module_icon(module), 'installable': True, 'license': 'LGPL-3', 'post_load': None, 'version': '1.0', 'web': False, 'sequence': 100, 'summary': '', 'website': '', } info.update(zip( 'depends data demo test init_xml update_xml demo_xml'.split(), iter(list, None))) f = tools.file_open(manifest_file, mode='rb') try: info.update(ast.literal_eval(pycompat.to_text(f.read()))) finally: f.close() if not info.get('description'): readme_path = [opj(mod_path, x) for x in README if os.path.isfile(opj(mod_path, x))] if readme_path: readme_text = tools.file_open(readme_path[0]).read() info['description'] = readme_text if 'active' in info: # 'active' has been renamed 'auto_install' info['auto_install'] = info['active'] info['version'] = adapt_version(info['version']) return info _logger.debug('module %s: no manifest file found %s', module, MANIFEST_NAMES) return {}
def record_to_html(self, record, field_name, options): if not getattr(record, field_name): return None view = getattr(record, field_name) if view._name != "ir.ui.view": _logger.warning("%s.%s must be a 'ir.ui.view' model." % (record, field_name)) return None view = view.with_context(object=record) return pycompat.to_text(view.render(view._context, engine='ir.qweb'))
def charge(env, key, account_token, credit, description=None, credit_template=None): """ Account charge context manager: takes a hold for ``credit`` amount before executing the body, then captures it if there is no error, or cancels it if the body generates an exception. :param str key: service identifier :param str account_token: user identifier :param int credit: cost of the body's operation :param description: a description of the purpose of the charge, the user will be able to see it in their dashboard :type description: str :param credit_template: a QWeb template to render and show to the user if their account does not have enough credits for the requested operation :type credit_template: str """ endpoint = get_endpoint(env) params = { 'account_token': account_token, 'credit': credit, 'key': key, 'description': description, } try: transaction_token = jsonrpc(endpoint + '/iap/1/authorize', params=params) except InsufficientCreditError as e: if credit_template: arguments = json.loads(e.args[0]) arguments['body'] = pycompat.to_text(env['ir.qweb'].render(credit_template)) e.args = (json.dumps(arguments),) raise e try: transaction = IapTransaction() transaction.credit = credit yield transaction except Exception as e: params = { 'token': transaction_token, 'key': key, } r = jsonrpc(endpoint + '/iap/1/cancel', params=params) raise e else: params = { 'token': transaction_token, 'key': key, 'credit_to_capture': transaction.credit, } r = jsonrpc(endpoint + '/iap/1/capture', params=params) # noqa
def encode_addr(addr): name, email = addr # If s is a <text string>, then charset is a hint specifying the # character set of the characters in the string. The Unicode string # will be encoded using the following charsets in order: us-ascii, # the charset hint, utf-8. The first character set to not provoke a # UnicodeError is used. # -> always pass a text string to Header # also Header.__str__ in Python 3 "Returns an approximation of the # Header as a string, using an unlimited line length.", the old one # was "A synonym for Header.encode()." so call encode() directly? name = Header(pycompat.to_text(name)).encode() return formataddr((name, email))
def _query(self, conf, filter, retrieve_attributes=None): """ Query an LDAP server with the filter argument and scope subtree. Allow for all authentication methods of the simple authentication method: - authenticated bind (non-empty binddn + valid password) - anonymous bind (empty binddn + empty password) - unauthenticated authentication (non-empty binddn + empty password) .. seealso:: :rfc:`4513#section-5.1` - LDAP: Simple Authentication Method. :param dict conf: LDAP configuration :param filter: valid LDAP filter :param list retrieve_attributes: LDAP attributes to be retrieved. \ If not specified, return all attributes. :return: ldap entries :rtype: list of tuples (dn, attrs) """ results = [] try: conn = self._connect(conf) ldap_password = conf['ldap_password'] or '' ldap_binddn = conf['ldap_binddn'] or '' conn.simple_bind_s(to_text(ldap_binddn), to_text(ldap_password)) results = conn.search_st(to_text(conf['ldap_base']), ldap.SCOPE_SUBTREE, filter, retrieve_attributes, timeout=60) conn.unbind() except ldap.INVALID_CREDENTIALS: _logger.error('LDAP bind failed.') except ldap.LDAPError as e: _logger.error('An LDAP exception occurred: %s', e) return results
def authorize(env, key, account_token, credit, dbuuid=False, description=None, credit_template=None): endpoint = get_endpoint(env) params = { 'account_token': account_token, 'credit': credit, 'key': key, 'description': description, } if dbuuid: params.update({'dbuuid': dbuuid}) try: transaction_token = jsonrpc(endpoint + '/iap/1/authorize', params=params) except InsufficientCreditError as e: if credit_template: arguments = json.loads(e.args[0]) arguments['body'] = pycompat.to_text(env['ir.qweb'].render(credit_template)) e.args = (json.dumps(arguments),) raise e return transaction_token
def taxes_to_csv(self): writer = pycompat.csv_writer(open('account.tax.template-%s.csv' % self.suffix, 'wb')) taxes_iterator = self.iter_taxes() keys = next(taxes_iterator) writer.writerow(keys[3:] + ['sequence']) seq = 100 for row in sorted(taxes_iterator, key=lambda r: r['description']): if not _is_true(row['active']): continue seq += 1 if row['parent_id:id']: cur_seq = seq + 1000 else: cur_seq = seq writer.writerow([ pycompat.to_text(v) for v in list(row.values())[3:] ] + [cur_seq])
def encode_header_param(param_text): """Returns an appropriate RFC2047 encoded representation of the given header parameter value, suitable for direct assignation as the param value (e.g. via Message.set_param() or Message.add_header()) RFC2822 assumes that headers contain only 7-bit characters, so we ensure it is the case, using RFC2047 encoding when needed. :param param_text: unicode or utf-8 encoded string with header value :rtype: string :return: if ``param_text`` represents a plain ASCII string, return the same 7-bit string, otherwise returns an ASCII string containing the RFC2047 encoded text. """ # For details see the encode_header() method that uses the same logic if not param_text: return "" param_text = ustr(param_text) # FIXME: require unicode higher up? if is_ascii(param_text): return pycompat.to_text(param_text) # TODO: is that actually necessary? return Charset("utf-8").header_encode(param_text)
def encode_header(header_text): """Returns an appropriate representation of the given header value, suitable for direct assignment as a header value in an email.message.Message. RFC2822 assumes that headers contain only 7-bit characters, so we ensure it is the case, using RFC2047 encoding when needed. :param header_text: unicode or utf-8 encoded string with header value :rtype: string | email.header.Header :return: if ``header_text`` represents a plain ASCII string, return the same 7-bit string, otherwise returns an email.header.Header that will perform the appropriate RFC2047 encoding of non-ASCII values. """ if not header_text: return "" header_text = ustr(header_text) # FIXME: require unicode higher up? if is_ascii(header_text): return pycompat.to_text(header_text) return Header(header_text, 'utf-8')
def record_to_html(self, record, field_name, options): assert options['tagName'] != 'img',\ "Oddly enough, the root tag of an image field can not be img. " \ "That is because the image goes into the tag, or it gets the " \ "hose again." if options.get('qweb_img_raw_data', False): return super(Image, self).record_to_html(record, field_name, options) aclasses = ['img', 'img-responsive'] if options.get('qweb_img_responsive', True) else ['img'] aclasses += options.get('class', '').split() classes = ' '.join(pycompat.imap(escape, aclasses)) max_size = None if options.get('resize'): max_size = options.get('resize') else: max_width, max_height = options.get('max_width', 0), options.get('max_height', 0) if max_width or max_height: max_size = '%sx%s' % (max_width, max_height) sha = hashlib.sha1(getattr(record, '__last_update').encode('utf-8')).hexdigest()[0:7] max_size = '' if max_size is None else '/%s' % max_size avoid_if_small = '&avoid_if_small=true' if options.get('avoid_if_small') else '' src = '/web/image/%s/%s/%s%s?unique=%s%s' % (record._name, record.id, field_name, max_size, sha, avoid_if_small) alt = None if options.get('alt-field') and getattr(record, options['alt-field'], None): alt = escape(record[options['alt-field']]) elif options.get('alt'): alt = options['alt'] src_zoom = None if options.get('zoom') and getattr(record, options['zoom'], None): src_zoom = '/web/image/%s/%s/%s%s?unique=%s' % (record._name, record.id, options['zoom'], max_size, sha) elif options.get('zoom'): src_zoom = options['zoom'] img = '<img class="%s" src="%s" style="%s"%s%s/>' % \ (classes, src, options.get('style', ''), ' alt="%s"' % alt if alt else '', ' data-zoom="1" data-zoom-image="%s"' % src_zoom if src_zoom else '') return pycompat.to_text(img)
def value_to_html(self, value, options): if 'decimal_precision' in options: precision = self.env['decimal.precision'].search([('name', '=', options['decimal_precision'])]).digits else: precision = options['precision'] if precision is None: fmt = '%f' else: value = float_utils.float_round(value, precision_digits=precision) fmt = '%.{precision}f'.format(precision=precision) formatted = self.user_lang().format(fmt, value, grouping=True).replace(r'-', u'-\N{ZERO WIDTH NO-BREAK SPACE}') # %f does not strip trailing zeroes. %g does but its precision causes # it to switch to scientific notation starting at a million *and* to # strip decimals. So use %f and if no precision was specified manually # strip trailing 0. if precision is None: formatted = re.sub(r'(?:(0|\d+?)0+)$', r'\1', formatted) return pycompat.to_text(formatted)
def _authenticate(self, conf, login, password): """ Authenticate a user against the specified LDAP server. In order to prevent an unintended 'unauthenticated authentication', which is an anonymous bind with a valid dn and a blank password, check for empty passwords explicitely (:rfc:`4513#section-6.3.1`) :param dict conf: LDAP configuration :param login: username :param password: Password for the LDAP user :return: LDAP entry of authenticated user or False :rtype: dictionary of attributes """ if not password: return False entry = False try: filter = filter_format(conf['ldap_filter'], (login,)) except TypeError: _logger.warning('Could not format LDAP filter. Your filter should contain one \'%s\'.') return False try: results = self._query(conf, tools.ustr(filter)) # Get rid of (None, attrs) for searchResultReference replies results = [i for i in results if i[0]] if len(results) == 1: dn = results[0][0] conn = self._connect(conf) conn.simple_bind_s(dn, to_text(password)) conn.unbind() entry = results[0] except ldap.INVALID_CREDENTIALS: return False except ldap.LDAPError as e: _logger.error('An LDAP exception occurred: %s', e) return entry
def encode_rfc2822_address_header(header_text): """If ``header_text`` contains non-ASCII characters, attempts to locate patterns of the form ``"Name" <address@domain>`` and replace the ``"Name"`` portion by the RFC2047-encoded version, preserving the address part untouched. """ def encode_addr(addr): name, email = addr # If s is a <text string>, then charset is a hint specifying the # character set of the characters in the string. The Unicode string # will be encoded using the following charsets in order: us-ascii, # the charset hint, utf-8. The first character set to not provoke a # UnicodeError is used. # -> always pass a text string to Header # also Header.__str__ in Python 3 "Returns an approximation of the # Header as a string, using an unlimited line length.", the old one # was "A synonym for Header.encode()." so call encode() directly? name = Header(pycompat.to_text(name)).encode() return formataddr((name, email)) addresses = getaddresses([pycompat.to_text(ustr(header_text))]) return COMMASPACE.join(encode_addr(a) for a in addresses)
def tax_codes_to_csv(self): writer = pycompat.csv_writer( open('account.tax.code.template-%s.csv' % self.suffix, 'wb')) tax_codes_iterator = self.iter_tax_codes() keys = next(tax_codes_iterator) writer.writerow(keys) # write structure tax codes tax_codes = {} # code: id for row in tax_codes_iterator: tax_code = row['code'] if tax_code in tax_codes: raise RuntimeError('duplicate tax code %s' % tax_code) tax_codes[tax_code] = row['id'] writer.writerow([pycompat.to_text(v) for v in row.values()]) # read taxes and add leaf tax codes new_tax_codes = {} # id: parent_code def add_new_tax_code(tax_code_id, new_name, new_parent_code): if not tax_code_id: return name, parent_code = new_tax_codes.get(tax_code_id, (None, None)) if parent_code and parent_code != new_parent_code: raise RuntimeError('tax code "%s" already exist with ' 'parent %s while trying to add it with ' 'parent %s' % (tax_code_id, parent_code, new_parent_code)) else: new_tax_codes[tax_code_id] = (new_name, new_parent_code) taxes_iterator = self.iter_taxes() next(taxes_iterator) for row in taxes_iterator: if not _is_true(row['active']): continue if row['child_depend'] and row['amount'] != 1: raise RuntimeError('amount must be one if child_depend ' 'for %s' % row['id']) # base parent base_code = row['BASE_CODE'] if not base_code or base_code == '/': base_code = 'NA' if base_code not in tax_codes: raise RuntimeError('undefined tax code %s' % base_code) if base_code != 'NA': if row['child_depend']: raise RuntimeError('base code specified ' 'with child_depend for %s' % row['id']) if not row['child_depend']: # ... in lux, we have the same code for invoice and refund if base_code != 'NA': assert row[ 'base_code_id:id'], 'missing base_code_id for %s' % row[ 'id'] assert row['ref_base_code_id:id'] == row['base_code_id:id'] add_new_tax_code(row['base_code_id:id'], 'Base - ' + row['name'], base_code) # tax parent tax_code = row['TAX_CODE'] if not tax_code or tax_code == '/': tax_code = 'NA' if tax_code not in tax_codes: raise RuntimeError('undefined tax code %s' % tax_code) if tax_code == 'NA': if row['amount'] and not row['child_depend']: raise RuntimeError('TAX_CODE not specified ' 'for non-zero tax %s' % row['id']) if row['tax_code_id:id']: raise RuntimeError('tax_code_id specified ' 'for tax %s' % row['id']) else: if row['child_depend']: raise RuntimeError('TAX_CODE specified ' 'with child_depend for %s' % row['id']) if not row['amount']: raise RuntimeError('TAX_CODE specified ' 'for zero tax %s' % row['id']) if not row['tax_code_id:id']: raise RuntimeError('tax_code_id not specified ' 'for tax %s' % row['id']) if not row['child_depend'] and row['amount']: # ... in lux, we have the same code for invoice and refund assert row[ 'tax_code_id:id'], 'missing tax_code_id for %s' % row['id'] assert row['ref_tax_code_id:id'] == row['tax_code_id:id'] add_new_tax_code(row['tax_code_id:id'], 'Taxe - ' + row['name'], tax_code) for tax_code_id in sorted(new_tax_codes): name, parent_code = new_tax_codes[tax_code_id] writer.writerow([ tax_code_id, u'lu_tct_m' + parent_code, tax_code_id.replace('lu_tax_code_template_', u''), u'1', u'', pycompat.to_text(name), u'' ])
def _get_asset_content(self, xmlid, options): options = dict(options, inherit_branding=False, inherit_branding_auto=False, edit_translations=False, translatable=False, rendering_bundle=True) env = self.env(context=options) # TODO: This helper can be used by any template that wants to embedd the backend. # It is currently necessary because the ir.ui.view bundle inheritance does not # match the module dependency graph. def get_modules_order(): if request: from odoo.addons.web.controllers.main import module_boot return json.dumps(module_boot()) return '[]' template = env['ir.qweb'].render( xmlid, {"get_modules_order": get_modules_order}) files = [] remains = [] for el in html.fragments_fromstring(template): if isinstance(el, pycompat.string_types): remains.append(pycompat.to_text(el)) elif isinstance(el, html.HtmlElement): href = el.get('href', '') src = el.get('src', '') atype = el.get('type') media = el.get('media') can_aggregate = not urls.url_parse( href).netloc and not href.startswith('/web/content') if el.tag == 'style' or (el.tag == 'link' and el.get('rel') == 'stylesheet' and can_aggregate): if href.endswith('.sass'): atype = 'text/sass' elif href.endswith('.scss'): atype = 'text/scss' elif href.endswith('.less'): atype = 'text/less' if atype not in ('text/less', 'text/scss', 'text/sass'): atype = 'text/css' path = [segment for segment in href.split('/') if segment] filename = get_resource_path(*path) if path else None files.append({ 'atype': atype, 'url': href, 'filename': filename, 'content': el.text, 'media': media }) elif el.tag == 'script': atype = 'text/javascript' path = [segment for segment in src.split('/') if segment] filename = get_resource_path(*path) if path else None files.append({ 'atype': atype, 'url': src, 'filename': filename, 'content': el.text, 'media': media }) else: remains.append(html.tostring(el, encoding='unicode')) else: try: remains.append(html.tostring(el, encoding='unicode')) except Exception: # notYETimplementederror raise NotImplementedError return (files, remains)
def from_data(self, fields, rows): if len(rows) > 65535: raise UserError( _('There are too many rows (%s rows, limit: 65535) to export as Excel 97-2003 (.xls) format. Consider splitting the export.' ) % len(rows)) path = self._full_folder_path( "WK_export_update" ) # ================ Use to Create Folder to store File workbook = xlsxwriter.Workbook( path + "/file_data.xlsx") # ============= Use xlsxwriter instead of xlwt worksheet = workbook.add_worksheet('Sheet 1') # =================================================================================================== # check that the folder exsists or not if not then create it # =================================================================================================== if not os.path.isdir(path): os.mkdir(path) bold = workbook.add_format({'bold': True}) # =================================================================================================== for i, fieldname in enumerate(fields): worksheet.write( 0, i, fieldname, bold) # ====================== Create the heading in BOLD worksheet.set_column(0, i, 15) # ====================== Set column size worksheet.set_row(0, 20) for row_index, row in enumerate(rows): for cell_index, cell_value in enumerate(row): worksheet.set_row(row_index + 1, 80) # if isinstance(cell_value, bytes): # because xls uses raw export, we can get a bytes object # here. xlwt does not support bytes values in Python 3 -> # assume this is base64 and decode to a string, if this # fails note that you can't export try: # ====================================================================================== # lde to store image not as b64 # ====================================================================================== mimetype = guess_mimetype(base64.b64decode(cell_value)) path1 = self._generate_semi_path( path, row_index, cell_index) if (mimetype.startswith('image')): cell_value = odoo.tools.image_process( cell_value, size=odoo.tools. image_guess_size_from_field_name(fieldname), crop=False, quality=0) img_format = mimetype.split('/')[1] self._create_image(path1, cell_value, img_format) worksheet.insert_image(row_index + 1, cell_index, path1 + "." + img_format, options={}) else: cell_value = pycompat.to_text(cell_value) except UnicodeDecodeError: raise UserError( _("Binary fields can not be exported to Excel unless their content is base64-encoded. That does not seem to be the case for %s." ) % fields[cell_index]) # if isinstance(cell_value, pycompat.string_types): if isinstance(cell_value, bytes): cell_value = re.sub("\r", " ", pycompat.to_text(cell_value)) # Excel supports a maximum of 32767 characters in each cell: cell_value = cell_value[:32767] # elif isinstance(cell_value, datetime.datetime): # cell_style = datetime_style # elif isinstance(cell_value, datetime.date): # cell_style = date_style worksheet.write(row_index + 1, cell_index, cell_value) # ======================================================================================================= # Store the data in xls and then open and send that file to the user # ======================================================================================================= workbook.close() fp = open(path + "/file_data.xlsx", "rb") # ======================================================================================================= data = fp.read() fp.close() if os.path.isdir(path): shutil.rmtree(path) return data
def binary_content( cls, xmlid=None, model="ir.attachment", id=None, field="datas", # pylint: disable=redefined-builtin unique=False, filename=None, filename_field="datas_fname", download=False, mimetype=None, default_mimetype="application/octet-stream", access_token=None, related_id=None, access_mode=None, env=None, ): """ Get file, attachment or downloadable content If the ``xmlid`` and ``id`` parameter is omitted, fetches the default value for the binary field (via ``default_get``), otherwise fetches the field for that precise record. :param str xmlid: xmlid of the record :param str model: name of the model to fetch the binary from :param int id: id of the record from which to fetch the binary :param str field: binary field :param bool unique: add a max-age for the cache control :param str filename: choose a filename :param str filename_field: if not create an filename with model-id-field :param bool download: apply headers to download the file :param str mimetype: mintype of the field (for headers) :param related_id: the id of another record used for custom_check :param access_mode: if truthy, will call custom_check to fetch the object that contains the binary. :param str default_mimetype: default mintype if no mintype found :param str access_token: optional token for unauthenticated access only available for ir.attachment :param Environment env: by default use request.env :returns: (status, headers, content) """ env = env or request.env # get object and content obj = None if xmlid: obj = cls._xmlid_to_obj(env, xmlid) elif id and model in env.registry: obj = env[model].browse(int(id)) # obj exists if not obj or not obj.exists() or field not in obj: return (404, [], None) # access token grant access if model == "ir.attachment" and access_token: obj = obj.sudo() if access_mode: if not cls._check_access_mode( env, id, access_mode, model, access_token=access_token, related_id=related_id, ): return (403, [], None) elif not consteq(obj.access_token or u"", access_token): return (403, [], None) # check read access try: obj["__last_update"] except AccessError: return (403, [], None) status, headers, content = None, [], None # attachment by url check module_resource_path = None if model == "ir.attachment" and obj.type == "url" and obj.url: url_match = re.match(r"^/(\w+)/(.+)$", obj.url) if url_match: module = url_match.group(1) module_path = get_module_path(module) module_resource_path = get_resource_path( module, url_match.group(2)) if module_path and module_resource_path: module_path = os.path.join( os.path.normpath(module_path), "") # join ensures the path ends with '/' module_resource_path = os.path.normpath( module_resource_path) if module_resource_path.startswith(module_path): with open(module_resource_path, "rb") as f: content = base64.b64encode(f.read()) # 'last_update' variable removed for lint error fix if not module_resource_path: module_resource_path = obj.url if not content: status = 301 content = module_resource_path else: # begin redefined part of original binary_content of odoo/base/addons/ir/ir_http att = env["ir.http"].find_field_attachment(env, model, field, obj) if att: content = att.url status = 301 # yelizariev: # Why do we redefine mimetype variable passed to the method? Can original mimetype has not a Non wrong value? # em230418: # in original binary_content method, mimetype is redefined without any condition: # https://github.com/odoo/odoo/blob/98a137e4b1f631a10d46b5e0cb21bb83ed7e861f/odoo/addons/base/ir/ir_http.py#L312 mimetype = att.mimetype if not content: content = obj[field] or "" # end redefined part of original binary_content # filename if not filename: if filename_field in obj: filename = obj[filename_field] elif module_resource_path: filename = os.path.basename(module_resource_path) else: filename = "{}-{}-{}".format(obj._name, obj.id, field) # mimetype # redefined: in the original function there is no condition if not mimetype: mimetype = "mimetype" in obj and obj.mimetype or False if not mimetype: if filename: mimetype = mimetypes.guess_type(filename)[0] if not mimetype and getattr(env[model]._fields[field], "attachment", False): # for binary fields, fetch the ir_attachement for mimetype check attach_mimetype = env["ir.attachment"].search_read( domain=[ ("res_model", "=", model), ("res_id", "=", id), ("res_field", "=", field), ], fields=["mimetype"], limit=1, ) mimetype = attach_mimetype and attach_mimetype[0]["mimetype"] if not mimetype: mimetype = guess_mimetype(base64.b64decode(content), default=default_mimetype) headers += [("Content-Type", mimetype), ("X-Content-Type-Options", "nosniff")] # cache etag = bool(request) and request.httprequest.headers.get( "If-None-Match") retag = ( '"%s"' % hashlib.md5(pycompat.to_text(content).encode("utf-8")).hexdigest()) status = status or (304 if etag == retag else 200) headers.append(("ETag", retag)) headers.append( ("Cache-Control", "max-age=%s" % (STATIC_CACHE if unique else 0))) # content-disposition default name if download: headers.append( ("Content-Disposition", cls.content_disposition(filename))) return (status, headers, content)
def value_to_html(self, value, options): return pycompat.to_text(self.user_lang().format('%d', value, grouping=True).replace(r'-', u'-\N{ZERO WIDTH NO-BREAK SPACE}'))
def value_to_html(self, value, options): if not value: return '' return escape(pycompat.to_text(options['selection'][value]) or u'')
def value_to_html(self, value, options): return pycompat.to_text(self.user_lang().format('%d', value, grouping=True).replace(r'-', u'\u2011'))
def value_to_html(self, value, options): if options.get('format_decimalized_number'): return tools.format_decimalized_number(value) return pycompat.to_text(self.user_lang().format('%d', value, grouping=True).replace(r'-', '-\N{ZERO WIDTH NO-BREAK SPACE}'))
def value_to_html(self, value, options): return pycompat.to_text(value)
def from_data(self, fields, rows): if len(rows) > 65535: raise UserError(_('There are too many rows (%s rows, limit: 65535) to export as Excel 97-2003 (.xls) format. Consider splitting the export.') % len(rows)) workbook = xlwt.Workbook(style_compression=2) worksheet = workbook.add_sheet('Sheet 1') style = xlwt.easyxf('align: wrap yes') font = xlwt.Font() font.bold = True font.height = 300 style.font = font for i, fieldname in enumerate(fields): worksheet.write(0, i, fieldname, style) worksheet.col(i).width = 8000 # around 220 pixels base_style = xlwt.easyxf('align: wrap yes') date_style = xlwt.easyxf('align: wrap yes', num_format_str='YYYY-MM-DD') datetime_style = xlwt.easyxf('align: wrap yes', num_format_str='YYYY-MM-DD HH:mm:SS') for row_index, row in enumerate(rows): unfoldable = row[-1] row.pop(-1) for cell_index, cell_value in enumerate(row): cell_style = base_style if isinstance(cell_value, bytes) and not isinstance(cell_value, pycompat.string_types): # because xls uses raw export, we can get a bytes object # here. xlwt does not support bytes values in Python 3 -> # assume this is base64 and decode to a string, if this # fails note that you can't export try: cell_value = pycompat.to_text(cell_value) except UnicodeDecodeError: raise UserError(_("Binary fields can not be exported to Excel unless their content is base64-encoded. That does not seem to be the case for %s.") % fields[cell_index]) if isinstance(cell_value, pycompat.string_types): cell_value = re.sub("\r", " ", pycompat.to_text(cell_value)) # Excel supports a maximum of 32767 characters in each cell: cell_value = cell_value[:32767] elif isinstance(cell_value, datetime.datetime): cell_style = datetime_style elif isinstance(cell_value, datetime.date): cell_style = date_style font = xlwt.Font() font.bold = False cell_style.font = font if row_index + 1 in [2, 5]: font = xlwt.Font() font.bold = True cell_style.font = font if unfoldable: font.bold = True worksheet.write(row_index + 1, cell_index, cell_value, cell_style) fp = io.BytesIO() workbook.save(fp) fp.seek(0) data = fp.read() fp.close() return data
def from_string(self, text): return email.message_from_string(pycompat.to_text(text), policy=email.policy.SMTP)
def __leaf_to_sql(self, leaf, model, alias): left, operator, right = leaf # final sanity checks - should never fail assert operator in (TERM_OPERATORS + ('inselect', 'not inselect')), \ "Invalid operator %r in domain term %r" % (operator, leaf) assert leaf in (TRUE_LEAF, FALSE_LEAF) or left in model._fields, \ "Invalid field %r in domain term %r" % (left, leaf) assert not isinstance(right, BaseModel), \ "Invalid value %r in domain term %r" % (right, leaf) table_alias = '"%s"' % alias if leaf == TRUE_LEAF: query = 'TRUE' params = [] elif leaf == FALSE_LEAF: query = 'FALSE' params = [] elif operator == 'inselect': query = '(%s."%s" in (%s))' % (table_alias, left, right[0]) params = list(right[1]) elif operator == 'not inselect': query = '(%s."%s" not in (%s))' % (table_alias, left, right[0]) params = list(right[1]) elif operator in ['in', 'not in']: # Two cases: right is a boolean or a list. The boolean case is an # abuse and handled for backward compatibility. if isinstance(right, bool): _logger.warning("The domain term '%s' should use the '=' or '!=' operator." % (leaf,)) if (operator == 'in' and right) or (operator == 'not in' and not right): query = '(%s."%s" IS NOT NULL)' % (table_alias, left) else: query = '(%s."%s" IS NULL)' % (table_alias, left) params = [] elif isinstance(right, Query): subquery, subparams = right.subselect() query = '(%s."%s" %s (%s))' % (table_alias, left, operator, subquery) params = subparams elif isinstance(right, (list, tuple)): if model._fields[left].type == "boolean": params = [it for it in (True, False) if it in right] check_null = False in right else: params = [it for it in right if it != False] check_null = len(params) < len(right) if params: if left == 'id': instr = ','.join(['%s'] * len(params)) else: field = model._fields[left] instr = ','.join([field.column_format] * len(params)) params = [field.convert_to_column(p, model, validate=False) for p in params] query = '(%s."%s" %s (%s))' % (table_alias, left, operator, instr) else: # The case for (left, 'in', []) or (left, 'not in', []). query = 'FALSE' if operator == 'in' else 'TRUE' if (operator == 'in' and check_null) or (operator == 'not in' and not check_null): query = '(%s OR %s."%s" IS NULL)' % (query, table_alias, left) elif operator == 'not in' and check_null: query = '(%s AND %s."%s" IS NOT NULL)' % (query, table_alias, left) # needed only for TRUE. else: # Must not happen raise ValueError("Invalid domain term %r" % (leaf,)) elif left in model and model._fields[left].type == "boolean" and ((operator == '=' and right is False) or (operator == '!=' and right is True)): query = '(%s."%s" IS NULL or %s."%s" = false )' % (table_alias, left, table_alias, left) params = [] elif (right is False or right is None) and (operator == '='): query = '%s."%s" IS NULL ' % (table_alias, left) params = [] elif left in model and model._fields[left].type == "boolean" and ((operator == '!=' and right is False) or (operator == '==' and right is True)): query = '(%s."%s" IS NOT NULL and %s."%s" != false)' % (table_alias, left, table_alias, left) params = [] elif (right is False or right is None) and (operator == '!='): query = '%s."%s" IS NOT NULL' % (table_alias, left) params = [] elif operator == '=?': if right is False or right is None: # '=?' is a short-circuit that makes the term TRUE if right is None or False query = 'TRUE' params = [] else: # '=?' behaves like '=' in other cases query, params = self.__leaf_to_sql((left, '=', right), model, alias) else: need_wildcard = operator in ('like', 'ilike', 'not like', 'not ilike') sql_operator = {'=like': 'like', '=ilike': 'ilike'}.get(operator, operator) cast = '::text' if sql_operator.endswith('like') else '' if left not in model: raise ValueError("Invalid field %r in domain term %r" % (left, leaf)) format = '%s' if need_wildcard else model._fields[left].column_format unaccent = self._unaccent if sql_operator.endswith('like') else lambda x: x column = '%s.%s' % (table_alias, _quote(left)) query = '(%s %s %s)' % (unaccent(column + cast), sql_operator, unaccent(format)) if (need_wildcard and not right) or (right and operator in NEGATIVE_TERM_OPERATORS): query = '(%s OR %s."%s" IS NULL)' % (query, table_alias, left) if need_wildcard: params = ['%%%s%%' % pycompat.to_text(right)] else: field = model._fields[left] params = [field.convert_to_column(right, model, validate=False)] return query, params
def charge(env, key, account_token, credit, dbuuid=False, description=None, credit_template=None): """ Account charge context manager: takes a hold for ``credit`` amount before executing the body, then captures it if there is no error, or cancels it if the body generates an exception. :param str key: service identifier :param str account_token: user identifier :param int credit: cost of the body's operation :param description: a description of the purpose of the charge, the user will be able to see it in their dashboard :type description: str :param credit_template: a QWeb template to render and show to the user if their account does not have enough credits for the requested operation :type credit_template: str """ endpoint = get_endpoint(env) params = { 'account_token': account_token, 'credit': credit, 'key': key, 'description': description, } if dbuuid: params.update({'dbuuid': dbuuid}) try: transaction_token = jsonrpc(endpoint + '/iap/1/authorize', params=params) except InsufficientCreditError as e: if credit_template: arguments = json.loads(e.args[0]) arguments['body'] = pycompat.to_text( env['ir.qweb'].render(credit_template)) e.args = (json.dumps(arguments), ) raise e try: transaction = IapTransaction() transaction.credit = credit yield transaction except Exception as e: params = { 'token': transaction_token, 'key': key, } r = jsonrpc(endpoint + '/iap/1/cancel', params=params) raise e else: params = { 'token': transaction_token, 'key': key, 'credit_to_capture': transaction.credit, } r = jsonrpc(endpoint + '/iap/1/capture', params=params) # noqa
def build_email(self, email_from, email_to, subject, body, email_cc=None, email_bcc=None, reply_to=False, attachments=None, message_id=None, references=None, object_id=False, subtype='plain', headers=None, body_alternative=None, subtype_alternative='plain'): """Constructs an RFC2822 email.message.Message object based on the keyword arguments passed, and returns it. :param string email_from: sender email address :param list email_to: list of recipient addresses (to be joined with commas) :param string subject: email subject (no pre-encoding/quoting necessary) :param string body: email body, of the type ``subtype`` (by default, plaintext). If html subtype is used, the message will be automatically converted to plaintext and wrapped in multipart/alternative, unless an explicit ``body_alternative`` version is passed. :param string body_alternative: optional alternative body, of the type specified in ``subtype_alternative`` :param string reply_to: optional value of Reply-To header :param string object_id: optional tracking identifier, to be included in the message-id for recognizing replies. Suggested format for object-id is "res_id-model", e.g. "12345-crm.lead". :param string subtype: optional mime subtype for the text body (usually 'plain' or 'html'), must match the format of the ``body`` parameter. Default is 'plain', making the content part of the mail "text/plain". :param string subtype_alternative: optional mime subtype of ``body_alternative`` (usually 'plain' or 'html'). Default is 'plain'. :param list attachments: list of (filename, filecontents) pairs, where filecontents is a string containing the bytes of the attachment :param list email_cc: optional list of string values for CC header (to be joined with commas) :param list email_bcc: optional list of string values for BCC header (to be joined with commas) :param dict headers: optional map of headers to set on the outgoing mail (may override the other headers, including Subject, Reply-To, Message-Id, etc.) :rtype: email.message.EmailMessage :return: the new RFC2822 email message """ email_from = email_from or self._get_default_from_address() assert email_from, "You must either provide a sender address explicitly or configure "\ "using the combintion of `mail.catchall.domain` and `mail.default.from` "\ "ICPs, in the server configuration file or with the "\ "--email-from startup parameter." headers = headers or {} # need valid dict later email_cc = email_cc or [] email_bcc = email_bcc or [] body = body or u'' msg = EmailMessage(policy=email.policy.SMTP) msg.set_charset('utf-8') if not message_id: if object_id: message_id = tools.generate_tracking_message_id(object_id) else: message_id = make_msgid() msg['Message-Id'] = message_id if references: msg['references'] = references msg['Subject'] = subject msg['From'] = email_from del msg['Reply-To'] msg['Reply-To'] = reply_to or email_from msg['To'] = email_to if email_cc: msg['Cc'] = email_cc if email_bcc: msg['Bcc'] = email_bcc msg['Date'] = datetime.datetime.utcnow() for key, value in headers.items(): msg[pycompat.to_text(ustr(key))] = value email_body = ustr(body) if subtype == 'html' and not body_alternative: msg.add_alternative(html2text.html2text(email_body), subtype='plain', charset='utf-8') msg.add_alternative(email_body, subtype=subtype, charset='utf-8') elif body_alternative: msg.add_alternative(ustr(body_alternative), subtype=subtype_alternative, charset='utf-8') msg.add_alternative(email_body, subtype=subtype, charset='utf-8') else: msg.set_content(email_body, subtype=subtype, charset='utf-8') if attachments: for (fname, fcontent, mime) in attachments: maintype, subtype = mime.split( '/') if mime and '/' in mime else ('application', 'octet-stream') msg.add_attachment(fcontent, maintype, subtype, filename=fname) return msg
def load_information_from_description_file(module, mod_path=None): """ :param module: The name of the module (sale, purchase, ...) :param mod_path: Physical path of module, if not providedThe name of the module (sale, purchase, ...) """ if not mod_path: mod_path = get_module_path(module, downloaded=True) manifest_file = module_manifest(mod_path) if manifest_file: # default values for descriptor info = { 'application': False, 'author': 'Odoo S.A.', 'auto_install': False, 'category': 'Uncategorized', 'depends': [], 'description': '', 'icon': get_module_icon(module), 'installable': True, 'license': 'LGPL-3', 'post_load': None, 'version': '1.0', 'web': False, 'sequence': 100, 'summary': '', 'website': '', } info.update( zip('depends data demo test init_xml update_xml demo_xml'.split(), iter(list, None))) f = tools.file_open(manifest_file, mode='rb') try: info.update(ast.literal_eval(pycompat.to_text(f.read()))) finally: f.close() if not info.get('description'): readme_path = [ opj(mod_path, x) for x in README if os.path.isfile(opj(mod_path, x)) ] if readme_path: with tools.file_open(readme_path[0]) as fd: info['description'] = fd.read() # auto_install is set to `False` if disabled, and a set of # auto_install dependencies otherwise. That way, we can set # auto_install: [] to always auto_install a module regardless of its # dependencies auto_install = info.get('auto_install', info.get('active', False)) if isinstance(auto_install, collections.abc.Iterable): info['auto_install'] = set(auto_install) non_dependencies = info['auto_install'].difference(info['depends']) assert not non_dependencies,\ "auto_install triggers must be dependencies, found " \ "non-dependencies [%s] for module %s" % ( ', '.join(non_dependencies), module ) elif auto_install: info['auto_install'] = set(info['depends']) else: info['auto_install'] = False info['version'] = adapt_version(info['version']) return info _logger.debug('module %s: no manifest file found %s', module, MANIFEST_NAMES) return {}
def _fetch_content(self): try: return super(JavascriptAsset, self)._fetch_content() except AssetError as e: return u"console.error(%s);" % json.dumps(to_text(e))
def _binary_record_content(self, record, field='datas', filename=None, filename_field='name', default_mimetype='application/octet-stream'): model = record._name mimetype = 'mimetype' in record and record.mimetype or False content = None filehash = 'checksum' in record and record['checksum'] or False field_def = record._fields[field] if field_def.type == 'binary' and field_def.attachment: if model != 'ir.attachment': field_attachment = self.env['ir.attachment'].sudo( ).search_read(domain=[('res_model', '=', model), ('res_id', '=', record.id), ('res_field', '=', field)], fields=['datas', 'mimetype', 'checksum'], limit=1) if field_attachment: mimetype = field_attachment[0]['mimetype'] content = field_attachment[0]['datas'] filehash = field_attachment[0]['checksum'] else: mimetype = record['mimetype'] content = record['datas'] filehash = record['checksum'] if not content: content = record[field] or '' # filename default_filename = False if not filename: if filename_field in record: filename = record[filename_field] if not filename: default_filename = True filename = "%s-%s-%s" % (record._name, record.id, field) if not mimetype: try: decoded_content = base64.b64decode(content) except base64.binascii.Error: # if we could not decode it, no need to pass it down: it would crash elsewhere... return (404, [], None) mimetype = guess_mimetype(decoded_content, default=default_mimetype) # extension _, existing_extension = os.path.splitext(filename) if not existing_extension or default_filename: extension = mimetypes.guess_extension(mimetype) if extension: filename = "%s%s" % (filename, extension) if not filehash: filehash = '"%s"' % hashlib.md5( pycompat.to_text(content).encode('utf-8')).hexdigest() status = 200 if content else 404 return status, content, filename, mimetype, filehash
def binary_content(cls, xmlid=None, model='ir.attachment', id=None, field='datas', unique=False, filename=None, filename_field='datas_fname', download=False, mimetype=None, default_mimetype='application/octet-stream', access_token=None, env=None): """ Get file, attachment or downloadable content If the ``xmlid`` and ``id`` parameter is omitted, fetches the default value for the binary field (via ``default_get``), otherwise fetches the field for that precise record. :param str xmlid: xmlid of the record :param str model: name of the model to fetch the binary from :param int id: id of the record from which to fetch the binary :param str field: binary field :param bool unique: add a max-age for the cache control :param str filename: choose a filename :param str filename_field: if not create an filename with model-id-field :param bool download: apply headers to download the file :param str mimetype: mintype of the field (for headers) :param str default_mimetype: default mintype if no mintype found :param str access_token: optional token for unauthenticated access only available for ir.attachment :param Environment env: by default use request.env :returns: (status, headers, content) """ env = env or request.env # get object and content obj = None if xmlid: obj = env.ref(xmlid, False) elif id and model in env.registry: obj = env[model].browse(int(id)) # obj exists if not obj or not obj.exists() or field not in obj: return (404, [], None) # access token grant access if model == 'ir.attachment' and access_token: obj = obj.sudo() if not consteq(obj.access_token or u'', access_token): return (403, [], None) # check read access try: obj['__last_update'] except AccessError: return (403, [], None) status, headers, content = None, [], None # attachment by url check module_resource_path = None if model == 'ir.attachment' and obj.type == 'url' and obj.url: url_match = re.match(r"^/(\w+)/(.+)$", obj.url) if url_match: module = url_match.group(1) module_path = get_module_path(module) module_resource_path = get_resource_path(module, url_match.group(2)) if module_path and module_resource_path: module_path = os.path.join(os.path.normpath(module_path), '') # join ensures the path ends with '/' module_resource_path = os.path.normpath(module_resource_path) if module_resource_path.startswith(module_path): with open(module_resource_path, 'rb') as f: content = base64.b64encode(f.read()) # 'last_update' variable removed for lint error fix if not module_resource_path: module_resource_path = obj.url if not content: status = 301 content = module_resource_path else: # begin redefined part of original binary_content of odoo/base/addons/ir/ir_http att = env['ir.http'].find_field_attachment(env, model, field, obj) if att: content = att.url status = 301 if not content: content = obj[field] or '' # end redefined part of original binary_content # filename if not filename: if filename_field in obj: filename = obj[filename_field] elif module_resource_path: filename = os.path.basename(module_resource_path) else: filename = "%s-%s-%s" % (obj._name, obj.id, field) # mimetype mimetype = 'mimetype' in obj and obj.mimetype or False if not mimetype: if filename: mimetype = mimetypes.guess_type(filename)[0] if not mimetype and getattr(env[model]._fields[field], 'attachment', False): # for binary fields, fetch the ir_attachement for mimetype check attach_mimetype = env['ir.attachment'].search_read(domain=[('res_model', '=', model), ('res_id', '=', id), ('res_field', '=', field)], fields=['mimetype'], limit=1) mimetype = attach_mimetype and attach_mimetype[0]['mimetype'] if not mimetype: mimetype = guess_mimetype(base64.b64decode(content), default=default_mimetype) headers += [('Content-Type', mimetype), ('X-Content-Type-Options', 'nosniff')] # cache etag = bool(request) and request.httprequest.headers.get('If-None-Match') retag = '"%s"' % hashlib.md5(pycompat.to_text(content).encode('utf-8')).hexdigest() status = status or (304 if etag == retag else 200) headers.append(('ETag', retag)) headers.append(('Cache-Control', 'max-age=%s' % (STATIC_CACHE if unique else 0))) # content-disposition default name if download: headers.append(('Content-Disposition', cls.content_disposition(filename))) return (status, headers, content)
def _get_query(self): name = "%s" % (self.name.strip()) email = "%%%s%%" % pycompat.to_text(self.email.strip()) email_normalized = tools.email_normalize(self.email.strip()) # Step 1: Retrieve users/partners liked to email address or name query = """ WITH indirect_references AS ( SELECT id FROM res_partner WHERE email_normalized = %s OR name ilike %s) SELECT %s AS res_model_id, id AS res_id, active AS is_active FROM res_partner WHERE id IN (SELECT id FROM indirect_references) UNION ALL SELECT %s AS res_model_id, id AS res_id, active AS is_active FROM res_users WHERE ( (login ilike %s) OR (partner_id IN ( SELECT id FROM res_partner WHERE email ilike %s or name ilike %s))) """ values = [ # Indirect references CTE email_normalized, name, # Search on res.partner self.env['ir.model.data']._xmlid_to_res_id('base.model_res_partner' ), # Search on res.users self.env['ir.model.data']._xmlid_to_res_id('base.model_res_users'), email, email, name, ] # Step 2: Special case for direct messages query += """ UNION ALL SELECT %s AS res_model_id, id AS res_id, True AS is_active FROM mail_message WHERE author_id IN (SELECT id FROM indirect_references) """ values += [ self.env['ir.model.data']._xmlid_to_res_id( 'mail.model_mail_message'), ] # Step 3: Retrieve info on other models blacklisted_models = self._get_query_models_blacklist() for model_name in self.env: if model_name in blacklisted_models: continue table_name = model_name.replace('.', '_') model = self.env[model_name] if model._transient or model._transient or not model._auto: continue res_model_id = self.env['ir.model'].search([('model', '=', model_name)]).id has_active = 'active' in model has_additional_query = False additional_query = """ UNION ALL SELECT %s AS res_model_id, id AS res_id, {active} AS is_active FROM {table_name} WHERE """.format(table_name=table_name, active='active' if has_active else True) additional_values = [res_model_id] # 3.1 Search Basic Personal Data Records (aka email/name usage) for field_name in [ 'email_normalized', 'email', 'email_from', 'company_email' ]: if field_name in model and model._fields[field_name].store: has_additional_query = True rec_name = model._rec_name or 'name' is_normalized = field_name == 'email_normalized' or ( model_name == 'mailing.trace' and field_name == 'email') if rec_name in model and model._fields[ model._rec_name].type == 'char': additional_query += """ {field_name} {search_type} %s OR {rec_name} ilike %s """.format( field_name=field_name, search_type='=' if is_normalized else 'ilike', # Manage Foo Bar <*****@*****.**> rec_name=rec_name) additional_values += [ email_normalized if is_normalized else email, name ] else: additional_query += """ {field_name} {search_type} %s """.format(field_name=field_name, search_type='=' if is_normalized else 'ilike') # Manage Foo Bar <*****@*****.**> additional_values += [ email_normalized if is_normalized else email ] if is_normalized: break # 3.2 Search Indirect Personal Data References (aka partner_id) partner_fields = [ field_name for field_name, field in model._fields.items() \ if field.comodel_name == 'res.partner' and field.store and field.type == 'many2one' and field.ondelete != 'cascade'] if partner_fields: for field_name in partner_fields: additional_query += """ {or_clause}{table_field_name} in (SELECT id FROM indirect_references)""".format( or_clause='OR ' if has_additional_query else '', table_field_name='"%s"."%s"' % (table_name, field_name)) has_additional_query = True if has_additional_query: query += additional_query values += additional_values return query, values
def _attachment2url(att): sha = hashlib.md5(pycompat.to_text( att.datas).encode('utf-8')).hexdigest()[0:7] return '/web/image/%s-%s' % (att.id, sha)
def _attachment2url(att): sha = hashlib.md5(pycompat.to_text( att.datas).encode("utf-8")).hexdigest()[0:7] return "/web/image/{}-{}".format(att.id, sha)
def _verify_notification_signature(self, received_signature, payload, hmac_key): """ Check that the signature computed from the payload matches the received one. See https://docs.adyen.com/development-resources/webhooks/verify-hmac-signatures :param str received_signature: The signature sent with the notification :param dict payload: The notification payload :param str hmac_key: The HMAC key of the acquirer handling the transaction :return: Whether the signatures match :rtype: str """ def _flatten_dict(_value, _path_base='', _separator='.'): """ Recursively generate a flat representation of a dict. :param Object _value: The value to flatten. A dict or an already flat value :param str _path_base: They base path for keys of _value, including preceding separators :param str _separator: The string to use as a separator in the key path """ if isinstance(_value, dict): # The inner value is a dict, flatten it _path_base = _path_base if not _path_base else _path_base + _separator for _key in _value: yield from _flatten_dict(_value[_key], _path_base + str(_key)) else: # The inner value cannot be flattened, yield it yield _path_base, _value def _to_escaped_string(_value): """ Escape payload values that are using illegal symbols and cast them to string. String values containing `\\` or `:` are prefixed with `\\`. Empty values (`None`) are replaced by an empty string. :param Object _value: The value to escape :return: The escaped value :rtype: string """ if isinstance(_value, str): return _value.replace('\\', '\\\\').replace(':', '\\:') elif _value is None: return '' else: return str(_value) if not received_signature: _logger.warning("ignored notification with missing signature") return False # Compute the signature from the payload signature_keys = [ 'pspReference', 'originalReference', 'merchantAccountCode', 'merchantReference', 'amount.value', 'amount.currency', 'eventCode', 'success' ] # Flatten the payload to allow accessing inner dicts naively flattened_payload = {k: v for k, v in _flatten_dict(payload)} # Build the list of signature values as per the list of required signature keys signature_values = [flattened_payload.get(key) for key in signature_keys] # Escape values using forbidden symbols escaped_values = [_to_escaped_string(value) for value in signature_values] # Concatenate values together with ':' as delimiter signing_string = ':'.join(escaped_values) # Convert the HMAC key to the binary representation binary_hmac_key = binascii.a2b_hex(hmac_key.encode('ascii')) # Calculate the HMAC with the binary representation of the signing string with SHA-256 binary_hmac = hmac.new(binary_hmac_key, signing_string.encode('utf-8'), hashlib.sha256) # Calculate the signature by encoding the result with Base64 expected_signature = base64.b64encode(binary_hmac.digest()) # Compare signatures if received_signature != to_text(expected_signature): _logger.warning("ignored event with invalid signature") return False return True
def load_information_from_description_file(module, mod_path=None): """ :param module: The name of the module (sale, purchase, ...) :param mod_path: Physical path of module, if not providedThe name of the module (sale, purchase, ...) """ if not mod_path: mod_path = get_module_path(module, downloaded=True) manifest_file = module_manifest(mod_path) if manifest_file: # default values for descriptor info = { 'application': False, 'author': 'Odoo S.A.', 'auto_install': False, 'category': 'Uncategorized', 'depends': [], 'description': '', 'icon': get_module_icon(module), 'installable': True, 'post_load': None, 'version': '1.0', 'web': False, 'sequence': 100, 'summary': '', 'website': '', } info.update( zip('depends data demo test init_xml update_xml demo_xml'.split(), iter(list, None))) f = tools.file_open(manifest_file, mode='rb') try: info.update(ast.literal_eval(pycompat.to_text(f.read()))) finally: f.close() if not info.get('description'): readme_path = [ opj(mod_path, x) for x in README if os.path.isfile(opj(mod_path, x)) ] if readme_path: with tools.file_open(readme_path[0]) as fd: info['description'] = fd.read() if not info.get('license'): info['license'] = 'LGPL-3' _logger.warning( "Missing `license` key in manifest for '%s', defaulting to LGPL-3", module) # auto_install is either `False` (by default) in which case the module # is opt-in, either a list of dependencies in which case the module is # automatically installed if all dependencies are (special case: [] to # always install the module), either `True` to auto-install the module # in case all dependencies declared in `depends` are installed. if isinstance(info['auto_install'], collections.abc.Iterable): info['auto_install'] = set(info['auto_install']) non_dependencies = info['auto_install'].difference(info['depends']) assert not non_dependencies,\ "auto_install triggers must be dependencies, found " \ "non-dependencies [%s] for module %s" % ( ', '.join(non_dependencies), module ) elif info['auto_install']: info['auto_install'] = set(info['depends']) info['version'] = adapt_version(info['version']) return info _logger.debug('module %s: no manifest file found %s', module, MANIFEST_NAMES) return {}
def connect(self, host=None, port=None, user=None, password=None, encryption=None, smtp_debug=False, mail_server_id=None): """Returns a new SMTP connection to the given SMTP server. When running in test mode, this method does nothing and returns `None`. :param host: host or IP of SMTP server to connect to, if mail_server_id not passed :param int port: SMTP port to connect to :param user: optional username to authenticate with :param password: optional password to authenticate with :param string encryption: optional, ``'ssl'`` | ``'starttls'`` :param bool smtp_debug: toggle debugging of SMTP sessions (all i/o will be output in logs) :param mail_server_id: ID of specific mail server to use (overrides other parameters) """ # Do not actually connect while running in test mode if getattr(threading.currentThread(), 'testing', False): return None mail_server = smtp_encryption = None if mail_server_id: mail_server = self.sudo().browse(mail_server_id) elif not host: mail_server = self.sudo().search([], order='sequence', limit=1) if mail_server: smtp_server = mail_server.smtp_host smtp_port = mail_server.smtp_port smtp_user = mail_server.smtp_user smtp_password = mail_server.smtp_pass smtp_encryption = mail_server.smtp_encryption smtp_debug = smtp_debug or mail_server.smtp_debug else: # we were passed individual smtp parameters or nothing and there is no default server smtp_server = host or tools.config.get('smtp_server') smtp_port = tools.config.get('smtp_port', 25) if port is None else port smtp_user = user or tools.config.get('smtp_user') smtp_password = password or tools.config.get('smtp_password') smtp_encryption = encryption if smtp_encryption is None and tools.config.get('smtp_ssl'): smtp_encryption = 'starttls' # smtp_ssl => STARTTLS as of v7 if not smtp_server: raise UserError( (_("Missing SMTP Server") + "\n" + _("Please define at least one SMTP server, " "or provide the SMTP parameters explicitly."))) if smtp_encryption == 'ssl': if 'SMTP_SSL' not in smtplib.__all__: raise UserError( _("Your Odoo Server does not support SMTP-over-SSL. " "You could use STARTTLS instead. " "If SSL is needed, an upgrade to Python 2.6 on the server-side " "should do the trick.")) connection = smtplib.SMTP_SSL(smtp_server, smtp_port, timeout=SMTP_TIMEOUT) else: connection = smtplib.SMTP(smtp_server, smtp_port, timeout=SMTP_TIMEOUT) connection.set_debuglevel(smtp_debug) if smtp_encryption == 'starttls': # starttls() will perform ehlo() if needed first # and will discard the previous list of services # after successfully performing STARTTLS command, # (as per RFC 3207) so for example any AUTH # capability that appears only on encrypted channels # will be correctly detected for next step connection.starttls() if smtp_user: # Attempt authentication - will raise if AUTH service not supported # The user/password must be converted to bytestrings in order to be usable for # certain hashing schemes, like HMAC. # See also bug #597143 and python issue #5285 smtp_user = pycompat.to_text(ustr(smtp_user)) smtp_password = pycompat.to_text(ustr(smtp_password)) connection.login(smtp_user, smtp_password) # Some methods of SMTP don't check whether EHLO/HELO was sent. # Anyway, as it may have been sent by login(), all subsequent usages should consider this command as sent. connection.ehlo_or_helo_if_needed() return connection
def build_email(self, email_from, email_to, subject, body, email_cc=None, email_bcc=None, reply_to=False, attachments=None, message_id=None, references=None, object_id=False, subtype='plain', headers=None, body_alternative=None, subtype_alternative='plain'): """Constructs an RFC2822 email.message.Message object based on the keyword arguments passed, and returns it. :param string email_from: sender email address :param list email_to: list of recipient addresses (to be joined with commas) :param string subject: email subject (no pre-encoding/quoting necessary) :param string body: email body, of the type ``subtype`` (by default, plaintext). If html subtype is used, the message will be automatically converted to plaintext and wrapped in multipart/alternative, unless an explicit ``body_alternative`` version is passed. :param string body_alternative: optional alternative body, of the type specified in ``subtype_alternative`` :param string reply_to: optional value of Reply-To header :param string object_id: optional tracking identifier, to be included in the message-id for recognizing replies. Suggested format for object-id is "res_id-model", e.g. "12345-crm.lead". :param string subtype: optional mime subtype for the text body (usually 'plain' or 'html'), must match the format of the ``body`` parameter. Default is 'plain', making the content part of the mail "text/plain". :param string subtype_alternative: optional mime subtype of ``body_alternative`` (usually 'plain' or 'html'). Default is 'plain'. :param list attachments: list of (filename, filecontents) pairs, where filecontents is a string containing the bytes of the attachment :param list email_cc: optional list of string values for CC header (to be joined with commas) :param list email_bcc: optional list of string values for BCC header (to be joined with commas) :param dict headers: optional map of headers to set on the outgoing mail (may override the other headers, including Subject, Reply-To, Message-Id, etc.) :rtype: email.message.Message (usually MIMEMultipart) :return: the new RFC2822 email message """ email_from = email_from or tools.config.get('email_from') assert email_from, "You must either provide a sender address explicitly or configure "\ "a global sender address in the server configuration or with the "\ "--email-from startup parameter." # Note: we must force all strings to to 8-bit utf-8 when crafting message, # or use encode_header() for headers, which does it automatically. headers = headers or {} # need valid dict later email_cc = email_cc or [] email_bcc = email_bcc or [] body = body or u'' email_body = ustr(body) email_text_part = MIMEText(email_body, _subtype=subtype, _charset='utf-8') msg = MIMEMultipart() if not message_id: if object_id: message_id = tools.generate_tracking_message_id(object_id) else: message_id = make_msgid() msg['Message-Id'] = encode_header(message_id) if references: msg['references'] = encode_header(references) msg['Subject'] = encode_header(subject) msg['From'] = encode_rfc2822_address_header(email_from) del msg['Reply-To'] if reply_to: msg['Reply-To'] = encode_rfc2822_address_header(reply_to) else: msg['Reply-To'] = msg['From'] msg['To'] = encode_rfc2822_address_header(COMMASPACE.join(email_to)) if email_cc: msg['Cc'] = encode_rfc2822_address_header(COMMASPACE.join(email_cc)) if email_bcc: msg['Bcc'] = encode_rfc2822_address_header(COMMASPACE.join(email_bcc)) msg['Date'] = formatdate() # Custom headers may override normal headers or provide additional ones for key, value in headers.items(): msg[pycompat.to_text(ustr(key))] = encode_header(value) if subtype == 'html' and not body_alternative: # Always provide alternative text body ourselves if possible. text = html2text.html2text(email_body) alternative_part = MIMEMultipart(_subtype="alternative") alternative_part.attach(MIMEText(text, _charset='utf-8', _subtype='plain')) alternative_part.attach(email_text_part) msg.attach(alternative_part) elif body_alternative: # Include both alternatives, as specified, within a multipart/alternative part alternative_part = MIMEMultipart(_subtype="alternative") body_alternative_ = ustr(body_alternative) alternative_body_part = MIMEText(body_alternative_, _subtype=subtype_alternative, _charset='utf-8') alternative_part.attach(alternative_body_part) alternative_part.attach(email_text_part) msg.attach(alternative_part) else: msg.attach(email_text_part) if attachments: for (fname, fcontent, mime) in attachments: filename_rfc2047 = encode_header_param(fname) if mime and '/' in mime: maintype, subtype = mime.split('/', 1) part = MIMEBase(maintype, subtype) else: part = MIMEBase('application', "octet-stream") # The default RFC2231 encoding of Message.add_header() works in Thunderbird but not GMail # so we fix it by using RFC2047 encoding for the filename instead. part.set_param('name', filename_rfc2047) part.add_header('Content-Disposition', 'attachment', filename=filename_rfc2047) part.set_payload(fcontent) encoders.encode_base64(part) msg.attach(part) return msg
def binary_content(cls, xmlid=None, model='ir.attachment', id=None, field='datas', unique=False, filename=None, filename_field='datas_fname', download=False, mimetype=None, default_mimetype='application/octet-stream', access_token=None, share_id=None, share_token=None, force_ext=False, env=None): """ Get file, attachment or downloadable content If the ``xmlid`` and ``id`` parameter is omitted, fetches the default value for the binary field (via ``default_get``), otherwise fetches the field for that precise record. :param str xmlid: xmlid of the record :param str model: name of the model to fetch the binary from :param int id: id of the record from which to fetch the binary :param str field: binary field :param bool unique: add a max-age for the cache control :param str filename: choose a filename :param str filename_field: if not create an filename with model-id-field :param bool download: apply headers to download the file :param str mimetype: mintype of the field (for headers) :param share_id: the id of the documents.share that contains the attachment :param share_token: the token of the documents.share that contains the attachment :param str default_mimetype: default mintype if no mintype found :param str access_token: optional token for unauthenticated access only available for ir.attachment :param bool force_ext: if true, adds the extension to the filename that corresponds to the mimetype :param Environment env: by default use request.env :returns: (status, headers, content) """ env = env or request.env # get object and content obj = None if xmlid: obj = cls._xmlid_to_obj(env, xmlid) elif id and model == 'ir.attachment' and access_token: obj = env[model].sudo().browse(int(id)) if not consteq(obj.access_token, access_token): return (403, [], None) elif id and share_id and share_token: share = env['documents.share'].sudo().browse(int(share_id)) if share: if share.state == 'expired': return (403, [], None) if not consteq(share.access_token, share_token): return (403, [], None) elif share.type == 'ids' and (id in share.attachment_ids.ids): obj = env[model].sudo().browse(int(id)) elif share.type == 'domain': obj = env[model].sudo().browse(int(id)) share_domain = [] if share.domain: share_domain = literal_eval(share.domain) domain = [['folder_id', '=', share.folder_id.id]] + share_domain attachments_check = http.request.env['ir.attachment'].sudo().search(domain) if obj not in attachments_check: return (403, [], None) elif id and model in env.registry: obj = env[model].browse(int(id)) # obj exists if not obj or not obj.exists() or field not in obj: return (404, [], None) # check read access try: last_update = obj['__last_update'] except AccessError: return (403, [], None) status, headers, content = None, [], None # attachment by url check module_resource_path = None if model == 'ir.attachment' and obj.type == 'url' and obj.url: url_match = re.match("^/(\w+)/(.+)$", obj.url) if url_match: module = url_match.group(1) module_path = get_module_path(module) module_resource_path = get_resource_path(module, url_match.group(2)) if module_path and module_resource_path: module_path = os.path.join(os.path.normpath(module_path), '') # join ensures the path ends with '/' module_resource_path = os.path.normpath(module_resource_path) if module_resource_path.startswith(module_path): with open(module_resource_path, 'rb') as f: content = base64.b64encode(f.read()) last_update = pycompat.text_type(os.path.getmtime(module_resource_path)) if not module_resource_path: module_resource_path = obj.url if not content: status = 301 content = module_resource_path else: content = obj[field] or '' # filename if not filename: if filename_field in obj: filename = obj[filename_field] elif module_resource_path: filename = os.path.basename(module_resource_path) else: filename = "%s-%s-%s" % (obj._name, obj.id, field) # mimetype mimetype = 'mimetype' in obj and obj.mimetype or False if not mimetype: if filename: mimetype = mimetypes.guess_type(filename)[0] if not mimetype and getattr(env[model]._fields[field], 'attachment', False): # for binary fields, fetch the ir_attachement for mimetype check attach_mimetype = env['ir.attachment'].search_read(domain=[('res_model', '=', model), ('res_id', '=', id), ('res_field', '=', field)], fields=['mimetype'], limit=1) mimetype = attach_mimetype and attach_mimetype[0]['mimetype'] if not mimetype: mimetype = guess_mimetype(base64.b64decode(content), default=default_mimetype) if force_ext and (mimetype != default_mimetype): dot_index = filename.rfind('.') if dot_index > -1: if mimetypes.guess_extension(mimetype) != filename[dot_index:]: filename = filename[:dot_index] + mimetypes.guess_extension(mimetype) else: filename = filename + mimetypes.guess_extension(mimetype) headers += [('Content-Type', mimetype), ('X-Content-Type-Options', 'nosniff')] # cache etag = bool(request) and request.httprequest.headers.get('If-None-Match') retag = '"%s"' % hashlib.md5(pycompat.to_text(content).encode('utf-8')).hexdigest() status = status or (304 if etag == retag else 200) headers.append(('ETag', retag)) headers.append(('Cache-Control', 'max-age=%s' % (STATIC_CACHE if unique else 0))) # content-disposition default name if download: headers.append(('Content-Disposition', cls.content_disposition(filename))) return (status, headers, content)
def record_to_html(self, record, field_name, options): assert options['tagName'] != 'img',\ "Oddly enough, the root tag of an image field can not be img. " \ "That is because the image goes into the tag, or it gets the " \ "hose again." if options.get('qweb_img_raw_data', False): return super(Image, self).record_to_html(record, field_name, options) aclasses = ['img', 'img-fluid'] if options.get('qweb_img_responsive', True) else ['img'] aclasses += options.get('class', '').split() classes = ' '.join(map(escape, aclasses)) max_size = None if options.get('resize'): max_size = options.get('resize') else: max_width, max_height = options.get('max_width', 0), options.get( 'max_height', 0) if max_width or max_height: max_size = '%sx%s' % (max_width, max_height) sha = hashlib.sha1( str(getattr(record, '__last_update')).encode('utf-8')).hexdigest()[0:7] max_size = '' if max_size is None else '/%s' % max_size avoid_if_small = '&avoid_if_small=true' if options.get( 'avoid_if_small') else '' if options.get('filename-field') and getattr( record, options['filename-field'], None): filename = record[options['filename-field']] elif options.get('filename'): filename = options['filename'] else: filename = record.display_name src = '/web/image/%s/%s/%s%s/%s?unique=%s%s' % ( record._name, record.id, options.get('preview_image', field_name), max_size, url_quote(filename), sha, avoid_if_small) if options.get('alt-field') and getattr(record, options['alt-field'], None): alt = escape(record[options['alt-field']]) elif options.get('alt'): alt = options['alt'] else: alt = escape(record.display_name) src_zoom = None if options.get('zoom') and getattr(record, options['zoom'], None): src_zoom = '/web/image/%s/%s/%s%s/%s?unique=%s' % ( record._name, record.id, options['zoom'], max_size, url_quote(filename), sha) elif options.get('zoom'): src_zoom = options['zoom'] atts = OrderedDict() atts["src"] = src atts["class"] = classes atts["style"] = options.get('style') atts["alt"] = alt atts["data-zoom"] = src_zoom and u'1' or None atts["data-zoom-image"] = src_zoom atts["data-no-post-process"] = options.get('data-no-post-process') atts = self.env['ir.qweb']._post_processing_att( 'img', atts, options.get('template_options')) img = ['<img'] for name, value in atts.items(): if value: img.append(' ') img.append(escape(pycompat.to_text(name))) img.append('="') img.append(escape(pycompat.to_text(value))) img.append('"') img.append('/>') return u''.join(img)
self.stylesheets.append(StylesheetAsset(self, url=f['url'], filename=f['filename'], inline=f['content'], media=f['media'])) elif f['atype'] == 'text/javascript': self.javascripts.append(JavascriptAsset(self, url=f['url'], filename=f['filename'], inline=f['content'])) # depreciated and will remove after v11 def to_html(self, sep=None, css=True, js=True, debug=False, async=False, url_for=(lambda url: url)): nodes = self.to_node(css=css, js=js, debug=debug, async=async) if sep is None: sep = u'\n ' response = [] for tagName, attributes, content in nodes: html = u"<%s " % tagName for name, value in attributes.items(): if value or isinstance(value, string_types): html += u' %s="%s"' % (name, escape(to_text(value))) if content is None: html += u'/>' else: html += u'>%s</%s>' % (escape(to_text(content)), tagName) response.append(html) return sep + sep.join(response) def to_node(self, css=True, js=True, debug=False, async=False): """ :returns [(tagName, attributes, content)] if the tag is auto close """ response = [] if debug == 'assets': if css and self.stylesheets:
def test_message_parse_xhtml(self): # Test that the parsing of XHTML mails does not fail self.env['mail.thread'].message_parse( email.message_from_string( pycompat.to_text(test_mail_data.MAIL_XHTML)))