def _read_xls_book(self, book): sheet = book.sheet_by_index(0) # emulate Sheet.get_rows for pre-0.9.4 for row in pycompat.imap(sheet.row, range(sheet.nrows)): values = [] for cell in row: if cell.ctype is xlrd.XL_CELL_NUMBER: is_float = cell.value % 1 != 0.0 values.append( pycompat.text_type(cell.value) if is_float else pycompat.text_type(int(cell.value)) ) elif cell.ctype is xlrd.XL_CELL_DATE: is_datetime = cell.value % 1 != 0.0 # emulate xldate_as_datetime for pre-0.9.3 dt = datetime.datetime(*xlrd.xldate.xldate_as_tuple(cell.value, book.datemode)) values.append( dt.strftime(DEFAULT_SERVER_DATETIME_FORMAT) if is_datetime else dt.strftime(DEFAULT_SERVER_DATE_FORMAT) ) elif cell.ctype is xlrd.XL_CELL_BOOLEAN: values.append(u'True' if cell.value else u'False') elif cell.ctype is xlrd.XL_CELL_ERROR: raise ValueError( _("Error cell found while reading XLS/XLSX file: %s") % xlrd.error_text_from_code.get( cell.value, "unknown error code %s" % cell.value) ) else: values.append(cell.value) if any(x for x in values if x.strip()): yield values
def format_tz(env, dt, tz=False, format=False): record_user_timestamp = env.user.sudo().with_context(tz=tz or env.user.sudo().tz or 'UTC') timestamp = datetime.datetime.strptime(dt, tools.DEFAULT_SERVER_DATETIME_FORMAT) ts = fields.Datetime.context_timestamp(record_user_timestamp, timestamp) # Babel allows to format datetime in a specific language without change locale # So month 1 = January in English, and janvier in French # Be aware that the default value for format is 'medium', instead of 'short' # medium: Jan 5, 2016, 10:20:31 PM | 5 janv. 2016 22:20:31 # short: 1/5/16, 10:20 PM | 5/01/16 22:20 if env.context.get('use_babel'): # Formatting available here : http://babel.pocoo.org/en/latest/dates.html#date-fields from babel.dates import format_datetime return format_datetime(ts, format or 'medium', locale=env.context.get("lang") or 'en_US') if format: return pycompat.text_type(ts.strftime(format)) else: lang = env.context.get("lang") langs = env['res.lang'] if lang: langs = env['res.lang'].search([("code", "=", lang)]) format_date = langs.date_format or '%B-%d-%Y' format_time = langs.time_format or '%I-%M %p' fdate = pycompat.text_type(ts.strftime(format_date)) ftime = pycompat.text_type(ts.strftime(format_time)) return u"%s %s%s" % (fdate, ftime, (u' (%s)' % tz) if tz else u'')
def starttag(self, node, tagname, **attributes): tagname = pycompat.text_type(tagname).lower() # extract generic attributes attrs = {name.lower(): value for name, value in attributes.items()} attrs.update( (name, value) for name, value in node.attributes.items() if name.startswith('data-') ) prefix = [] postfix = [] # handle possibly multiple ids assert 'id' not in attrs, "starttag can't be passed a single id attribute, use a list of ids" ids = node.get('ids', []) + attrs.pop('ids', []) if ids: _ids = iter(ids) attrs['id'] = next(_ids) postfix.extend(u'<i id="{}"></i>'.format(_id) for _id in _ids) # set CSS class classes = set(node.get('classes', []) + attrs.pop('class', '').split()) if classes: attrs['class'] = u' '.join(classes) return u'{prefix}<{tag} {attrs}>{postfix}'.format( prefix=u''.join(prefix), tag=tagname, attrs=u' '.join(u'{}="{}"'.format(name, self.attval(value)) for name, value in attrs.items()), postfix=u''.join(postfix), )
def test_path(self): """ Can recursively export fields of m2o via path """ record = self.env['export.integer'].create({'value': 42}) self.assertEqual( self.export(record.id, fields=['value/.id', 'value/value']), [[pycompat.text_type(record.id), u'42']])
def attach(self, upload=None, url=None, disable_optimization=None, filters=None, **kwargs): # the upload argument doesn't allow us to access the files if more than # one file is uploaded, as upload references the first file # therefore we have to recover the files from the request object Attachments = request.env['ir.attachment'] # registry for the attachment table uploads = [] message = None if not upload: # no image provided, storing the link and the image name name = url.split("/").pop() # recover filename datas_fname = name if filters: datas_fname = filters + '_' + datas_fname attachment = Attachments.create({ 'name': name, 'datas_fname': datas_fname, 'type': 'url', 'url': url, 'public': True, 'res_model': 'ir.ui.view', }) uploads += attachment.read(['name', 'mimetype', 'checksum', 'url']) else: # images provided try: attachments = request.env['ir.attachment'] for c_file in request.httprequest.files.getlist('upload'): data = c_file.read() try: image = Image.open(io.BytesIO(data)) w, h = image.size if w*h > 42e6: # Nokia Lumia 1020 photo resolution raise ValueError( u"Image size excessive, uploaded images must be smaller " u"than 42 million pixel") if not disable_optimization and image.format in ('PNG', 'JPEG'): data = tools.image_save_for_web(image) except IOError as e: pass name = c_file.filename datas_fname = name if filters: datas_fname = filters + '_' + datas_fname attachment = Attachments.create({ 'name': name, 'datas': base64.b64encode(data), 'datas_fname': datas_fname, 'public': True, 'res_model': 'ir.ui.view', }) attachments += attachment uploads += attachments.read(['name', 'mimetype', 'checksum', 'url']) except Exception as e: logger.exception("Failed to upload image to attachment") message = pycompat.text_type(e) return """<script type='text/javascript'> window.attachments = %s; window.error = %s; </script>""" % (json.dumps(uploads), json.dumps(message))
def create_token(wizard, partner_id, email): if context.get("survey_resent_token"): survey_user_input = SurveyUserInput.search( [('survey_id', '=', wizard.survey_id.id), ('state', 'in', ['new', 'skip']), '|', ('partner_id', '=', partner_id), ('email', '=', email)], limit=1) if survey_user_input: return survey_user_input.token if wizard.public != 'email_private': return None else: token = pycompat.text_type(uuid.uuid4()) # create response with token survey_user_input = SurveyUserInput.create({ 'survey_id': wizard.survey_id.id, 'deadline': wizard.date_deadline, 'date_create': fields.Datetime.now(), 'input_type': 'link', 'state': 'new', 'token': token, 'partner_id': partner_id, 'email': email }) return survey_user_input.token
def do(self, fields, options, dryrun=False): """ Actual execution of the import :param fields: import mapping: maps each column to a field, ``False`` for the columns to ignore :type fields: list(str|bool) :param dict options: :param bool dryrun: performs all import operations (and validations) but rollbacks writes, allows getting as much errors as possible without the risk of clobbering the database. :returns: A list of errors. If the list is empty the import executed fully and correctly. If the list is non-empty it contains dicts with 3 keys ``type`` the type of error (``error|warning``); ``message`` the error message associated with the error (a string) and ``record`` the data which failed to import (or ``false`` if that data isn't available or provided) :rtype: list({type, message, record}) """ self.ensure_one() self._cr.execute('SAVEPOINT import') try: data, import_fields = self._convert_import_data(fields, options) # Parse date and float field data = self._parse_import_data(data, import_fields, options) except ValueError as error: return [{ 'type': 'error', 'message': pycompat.text_type(error), 'record': False, }] _logger.info('importing %d rows...', len(data)) model = self.env[self.res_model].with_context(import_file=True) defer_parent_store = self.env.context.get('defer_parent_store_computation', True) if defer_parent_store and model._parent_store: model = model.with_context(defer_parent_store_computation=True) import_result = model.load(import_fields, data) _logger.info('done') # If transaction aborted, RELEASE SAVEPOINT is going to raise # an InternalError (ROLLBACK should work, maybe). Ignore that. # TODO: to handle multiple errors, create savepoint around # write and release it in case of write error (after # adding error to errors array) => can keep on trying to # import stuff, and rollback at the end if there is any # error in the results. try: if dryrun: self._cr.execute('ROLLBACK TO SAVEPOINT import') else: self._cr.execute('RELEASE SAVEPOINT import') except psycopg2.InternalError: pass return import_result['messages']
def convert_to_column(self, value, record, values=None, validate=True): if not value: return None magic_bytes = { b'P', b'<', } if isinstance(value, str): value = value.encode() if value[:1] in magic_bytes: try: decoded_value = base64.b64decode(value.translate(None, delete=b'\r\n'), validate=True) except binascii.Error: decoded_value = value if (guess_mimetype(decoded_value).startswith('image/svg') and not record.env.user._is_system()): raise UserError(_("Only admins can upload SVG files.")) if isinstance(value, bytes): return psycopg2.Binary(value) try: return psycopg2.Binary(pycompat.text_type(value).encode('ascii')) except UnicodeEncodeError: raise UserError( _("ASCII characters are required for %s in %s") % (value, self.name))
def encode(self, text): return pycompat.text_type(text).translate({ ord('&'): u'&', ord('<'): u'<', ord('"'): u'"', ord('>'): u'>', 0xa0: u' ' })
def _str_to_selection(self, model, field, value): # get untranslated values env = self.with_context(lang=None).env selection = field.get_description(env)['selection'] for item, label in selection: label = ustr(label) labels = [label] + self._get_translations(('selection', 'model', 'code'), label) if value == pycompat.text_type(item) or value in labels: return item, [] raise self._format_import_error( ValueError, _(u"Value '%s' not found in selection field '%%(field)s'"), value, {'moreinfo': [_label or pycompat.text_type(item) for item, _label in selection if _label or item]} )
def do(self, fields, options, dryrun=False): """ Actual execution of the import :param fields: import mapping: maps each column to a field, ``False`` for the columns to ignore :type fields: list(str|bool) :param dict options: :param bool dryrun: performs all import operations (and validations) but rollbacks writes, allows getting as much errors as possible without the risk of clobbering the database. :returns: A list of errors. If the list is empty the import executed fully and correctly. If the list is non-empty it contains dicts with 3 keys ``type`` the type of error (``error|warning``); ``message`` the error message associated with the error (a string) and ``record`` the data which failed to import (or ``false`` if that data isn't available or provided) :rtype: list({type, message, record}) """ self.ensure_one() self._cr.execute('SAVEPOINT import') try: data, import_fields = self._convert_import_data(fields, options) # Parse date and float field data = self._parse_import_data(data, import_fields, options) except ValueError as error: return [{ 'type': 'error', 'message': pycompat.text_type(error), 'record': False, }] _logger.info('importing %d rows...', len(data)) model = self.env[self.res_model].with_context(import_file=True) import_result = model.load(import_fields, data) _logger.info('done') # If transaction aborted, RELEASE SAVEPOINT is going to raise # an InternalError (ROLLBACK should work, maybe). Ignore that. # TODO: to handle multiple errors, create savepoint around # write and release it in case of write error (after # adding error to errors array) => can keep on trying to # import stuff, and rollback at the end if there is any # error in the results. try: if dryrun: self._cr.execute('ROLLBACK TO SAVEPOINT import') else: self._cr.execute('RELEASE SAVEPOINT import') except psycopg2.InternalError: pass return import_result['messages']
def _generate_code(self): """Generate a 20 char long pseudo-random string of digits for barcode generation. A decimal serialisation is longer than a hexadecimal one *but* it generates a more compact barcode (Code128C rather than Code128A). Generate 8 bytes (64 bits) barcodes as 16 bytes barcodes are not compatible with all scanners. """ return pycompat.text_type(random.getrandbits(64))
def do(self, fields, options, parent_model, customer_id, offer_id, dryrun=False): self.ensure_one() import_result = {'messages': []} try: self._cr.execute('SAVEPOINT import') data, import_fields, col = self._convert_import_data( fields, options) if len(col) == 1: resource_model = self.env[parent_model] columns = col[0] dict_list = [{ import_field: columns[idx] } for idx, import_field in enumerate(import_fields)] resource_model_dict = dict( template_status='Active', file_name=self.file_name, columns_from_template=self.columns_from_template, customer_id=customer_id) for dictionary in dict_list: resource_model_dict.update(dictionary) template = resource_model.create(resource_model_dict) vendor_offer = self.env['purchase.order'].search([('id', '=', offer_id)]) if len(vendor_offer) == 1: vendor_offer[0].write({ 'template_id': template.id, 'document': self.file }) try: if dryrun: self._cr.execute('ROLLBACK TO SAVEPOINT import') else: self._cr.execute('RELEASE SAVEPOINT import') except psycopg2.InternalError: pass except ValueError as error: _logger.info('Error %r', str(error)) return [{ 'type': 'error', 'message': pycompat.text_type(error), 'record': False, }] return import_result['messages']
def _read_xls_book_custom(book, pricing_index, read_data=False, expiration_date_index=-1): sheet = book.sheet_by_index(pricing_index) data = [] row_index = 0 for row in pycompat.imap(sheet.row, range(sheet.nrows)): if read_data is True and row_index == 0: row_index = row_index + 1 continue values = [] cell_index = 0 for cell in row: if expiration_date_index == cell_index and not cell.value is None and str( cell.value) != '': is_datetime = cell.value % 1 != 0.0 # emulate xldate_as_datetime for pre-0.9.3 dt = datetime(*xlrd.xldate.xldate_as_tuple( cell.value, book.datemode)) values.append( dt.strftime(DEFAULT_SERVER_DATETIME_FORMAT ) if is_datetime else dt. strftime(DEFAULT_SERVER_DATE_FORMAT)) else: if cell.ctype is xlrd.XL_CELL_NUMBER: is_float = cell.value % 1 != 0.0 values.append( pycompat.text_type(cell.value) if is_float else pycompat.text_type(int(cell.value))) else: values.append(cell.value) cell_index = cell_index + 1 data.append(values) if not read_data: break row_index = row_index + 1 return data
def _read_xls_book(book, read_data=False): sheet = book.sheet_by_index(0) data = [] for row in pycompat.imap(sheet.row, range(sheet.nrows)): values = [] for cell in row: if cell.ctype is xlrd.XL_CELL_NUMBER: is_float = cell.value % 1 != 0.0 values.append( pycompat.text_type(cell.value) if is_float else pycompat.text_type(int(cell.value)) ) # elif cell.ctype is xlrd.XL_CELL_DATE: # is_datetime = cell.value % 1 != 0.0 # dt = datetime(*xlrd.xldate.xldate_as_tuple(cell.value, book.datemode)) # values.append( # dt.strftime(DEFAULT_SERVER_DATETIME_FORMAT) # if is_datetime # else dt.strftime(DEFAULT_SERVER_DATE_FORMAT) # ) # elif cell.ctype is xlrd.XL_CELL_BOOLEAN: # values.append(u'True' if cell.value else u'False') # elif cell.ctype is xlrd.XL_CELL_ERROR: # raise ValueError( # ("Error cell found while reading XLS/XLSX file: %s") % # xlrd.error_text_from_code.get( # cell.value, "unknown error code %s" % cell.value) # ) else: values.append(cell.value) data.append(values) if not read_data: break return data
def format_tz(env, dt, tz=False, format=False): record_user_timestamp = env.user.sudo().with_context( tz=tz or env.user.sudo().tz or 'UTC') timestamp = datetime.datetime.strptime( dt, tools.DEFAULT_SERVER_DATETIME_FORMAT) ts = fields.Datetime.context_timestamp(record_user_timestamp, timestamp) # Babel allows to format datetime in a specific language without change locale # So month 1 = January in English, and janvier in French # Be aware that the default value for format is 'medium', instead of 'short' # medium: Jan 5, 2016, 10:20:31 PM | 5 janv. 2016 22:20:31 # short: 1/5/16, 10:20 PM | 5/01/16 22:20 if env.context.get('use_babel'): # Formatting available here : http://babel.pocoo.org/en/latest/dates.html#date-fields from babel.dates import format_datetime return format_datetime(ts, format or 'medium', locale=env.context.get("lang") or 'en_US') if format: return pycompat.text_type(ts.strftime(format)) else: lang = env.context.get("lang") langs = env['res.lang'] if lang: langs = env['res.lang'].search([("code", "=", lang)]) format_date = langs.date_format or '%B-%d-%Y' format_time = langs.time_format or '%I-%M %p' fdate = pycompat.text_type(ts.strftime(format_date)) ftime = pycompat.text_type(ts.strftime(format_time)) return u"%s %s%s" % (fdate, ftime, (u' (%s)' % tz) if tz else u'')
def _log(base, record, field, exception): kind = "warning" if isinstance(exception, Warning) else "error" # logs the logical (not human-readable) field name for automated # processing of response, but injects human readable in message exc_vals = dict(base, record=record, field=field_names[field]) record = dict( base, type=kind, record=record, field=field, message=pycompat.text_type(exception.args[0]) % exc_vals, ) if len(exception.args) > 1 and exception.args[1]: record.update(exception.args[1]) log(record)
def get_test_modules(module): """ Return a list of module for the addons potentially containing tests to feed unittest.TestLoader.loadTestsFromModule() """ # Try to import the module modpath = 'odoo.addons.' + module try: mod = importlib.import_module('.tests', modpath) except Exception as e: # If module has no `tests` sub-module, no problem. if not pycompat.text_type(e).startswith(u'No module named'): _logger.exception('Can not `import %s`.', module) return [] if hasattr(mod, 'fast_suite') or hasattr(mod, 'checks'): _logger.warn( "Found deprecated fast_suite or checks attribute in test module " "%s. These have no effect in or after version 8.0.", mod.__name__) result = [mod_obj for name, mod_obj in inspect.getmembers(mod, inspect.ismodule) if name.startswith('test_')] return result
def create_token(wizard, partner_id, email): if context.get("survey_resent_token"): survey_user_input = SurveyUserInput.search([('survey_id', '=', wizard.survey_id.id), ('state', 'in', ['new', 'skip']), '|', ('partner_id', '=', partner_id), ('email', '=', email)], limit=1) if survey_user_input: return survey_user_input.token if wizard.public != 'email_private': return None else: token = pycompat.text_type(uuid.uuid4()) # create response with token survey_user_input = SurveyUserInput.create({ 'survey_id': wizard.survey_id.id, 'deadline': wizard.date_deadline, 'date_create': fields.Datetime.now(), 'input_type': 'link', 'state': 'new', 'token': token, 'partner_id': partner_id, 'email': email}) return survey_user_input.token
def _generate_declaration_survey_user_input(self): token = pycompat.text_type(uuid.uuid4()) recipient = self._get_declaration_recipient() survey = self._get_declaration_survey() user_input = self.env['survey.user_input'].create({ 'survey_id': survey.id, 'date_create': fields.Datetime.now(), 'type': 'link', 'state': 'new', 'token': token, 'partner_id': recipient.id, 'email': recipient.email, 'declaration_employee_id': self.id, }) return user_input
def _build_intrastat_line(self, numlgn, item, linekey, amounts, dispatchmode, extendedmode): self._set_Dim(item, 'EXSEQCODE', text_type(numlgn)) self._set_Dim(item, 'EXTRF', text_type(linekey.EXTRF)) self._set_Dim(item, 'EXCNT', text_type(linekey.EXCNT)) self._set_Dim(item, 'EXTTA', text_type(linekey.EXTTA)) self._set_Dim(item, 'EXREG', text_type(linekey.EXREG)) self._set_Dim(item, 'EXTGO', text_type(linekey.EXGO)) if extendedmode: self._set_Dim(item, 'EXTPC', text_type(linekey.EXTPC)) self._set_Dim(item, 'EXDELTRM', text_type(linekey.EXDELTRM)) self._set_Dim(item, 'EXTXVAL', text_type(round(amounts[0], 0)).replace(".", ",")) self._set_Dim(item, 'EXWEIGHT', text_type(round(amounts[1], 0)).replace(".", ",")) self._set_Dim(item, 'EXUNITS', text_type(round(amounts[2], 0)).replace(".", ","))
def do(self, fields, columns, options, dryrun=False): """ Actual execution of the import :param fields: import mapping: maps each column to a field, ``False`` for the columns to ignore :type fields: list(str|bool) :param columns: columns label :type columns: list(str|bool) :param dict options: :param bool dryrun: performs all import operations (and validations) but rollbacks writes, allows getting as much errors as possible without the risk of clobbering the database. :returns: A list of errors. If the list is empty the import executed fully and correctly. If the list is non-empty it contains dicts with 3 keys ``type`` the type of error (``error|warning``); ``message`` the error message associated with the error (a string) and ``record`` the data which failed to import (or ``false`` if that data isn't available or provided) :rtype: dict(ids: list(int), messages: list({type, message, record})) """ self.ensure_one() self._cr.execute('SAVEPOINT import') try: data, import_fields = self._convert_import_data(fields, options) # Parse date and float field data = self._parse_import_data(data, import_fields, options) except ValueError as error: return { 'messages': [{ 'type': 'error', 'message': pycompat.text_type(error), 'record': False, }] } _logger.info('importing %d rows...', len(data)) model = self.env[self.res_model].with_context(import_file=True) import_result = model.load(import_fields, data) _logger.info('done') # If transaction aborted, RELEASE SAVEPOINT is going to raise # an InternalError (ROLLBACK should work, maybe). Ignore that. # TODO: to handle multiple errors, create savepoint around # write and release it in case of write error (after # adding error to errors array) => can keep on trying to # import stuff, and rollback at the end if there is any # error in the results. try: if dryrun: self._cr.execute('ROLLBACK TO SAVEPOINT import') else: self._cr.execute('RELEASE SAVEPOINT import') except psycopg2.InternalError: pass # Insert/Update mapping columns when import complete successfully if import_result['ids'] and options.get('headers'): BaseImportMapping = self.env['base_import.mapping'] for index, column_name in enumerate(columns): if column_name: # Update to latest selected field exist_records = BaseImportMapping.search([('res_model', '=', self.res_model), ('column_name', '=', column_name)]) if exist_records: exist_records.write({'field_name': fields[index]}) else: BaseImportMapping.create({ 'res_model': self.res_model, 'column_name': column_name, 'field_name': fields[index] }) return import_result
""" Store database-specific configuration parameters """ import uuid import logging from odoo import api, fields, models from odoo.tools import config, ormcache, mute_logger, pycompat _logger = logging.getLogger(__name__) """ A dictionary holding some configuration parameters to be initialized when the database is created. """ _default_parameters = { "database.secret": lambda: pycompat.text_type(uuid.uuid4()), "database.uuid": lambda: pycompat.text_type(uuid.uuid1()), "database.create_date": fields.Datetime.now, "web.base.url": lambda: "http://localhost:%s" % config.get('http_port'), } class IrConfigParameter(models.Model): """Per-database storage of configuration key-value pairs.""" _name = 'ir.config_parameter' _rec_name = 'key' key = fields.Char(required=True, index=True) value = fields.Text(required=True) _sql_constraints = [('key_uniq', 'unique (key)', 'Key must be unique.')]
def attach(self, func, upload=None, url=None, disable_optimization=None, **kwargs): # the upload argument doesn't allow us to access the files if more than # one file is uploaded, as upload references the first file # therefore we have to recover the files from the request object Attachments = request.env['ir.attachment'] # registry for the attachment table res_model = kwargs.get('res_model', 'ir.ui.view') if res_model != 'ir.ui.view' and kwargs.get('res_id'): res_id = int(kwargs['res_id']) else: res_id = None uploads = [] message = None if not upload: # no image provided, storing the link and the image name name = url.split("/").pop() # recover filename attachment = Attachments.create({ 'name': name, 'type': 'url', 'url': url, 'public': res_model == 'ir.ui.view', 'res_id': res_id, 'res_model': res_model, }) attachment.generate_access_token() uploads += attachment.read(['name', 'mimetype', 'checksum', 'url', 'res_id', 'res_model', 'access_token']) else: # images provided try: attachments = request.env['ir.attachment'] for c_file in request.httprequest.files.getlist('upload'): data = c_file.read() try: image = Image.open(io.BytesIO(data)) w, h = image.size if w*h > 42e6: # Nokia Lumia 1020 photo resolution raise ValueError( u"Image size excessive, uploaded images must be smaller " u"than 42 million pixel") if not disable_optimization and image.format in ('PNG', 'JPEG'): data = tools.image_save_for_web(image) except IOError as e: pass attachment = Attachments.create({ 'name': c_file.filename, 'datas': base64.b64encode(data), 'datas_fname': c_file.filename, 'public': res_model == 'ir.ui.view', 'res_id': res_id, 'res_model': res_model, }) attachment.generate_access_token() attachments += attachment uploads += attachments.read(['name', 'mimetype', 'checksum', 'url', 'res_id', 'res_model', 'access_token']) except Exception as e: logger.exception("Failed to upload image to attachment") message = pycompat.text_type(e) return """<script type='text/javascript'> window.parent['%s'](%s, %s); </script>""" % (func, json.dumps(uploads), json.dumps(message))
def _get_lines(self, dispatchmode=False, extendedmode=False): company = self.company_id IntrastatRegion = self.env['l10n_be_intrastat.region'] if dispatchmode: mode1 = 'out_invoice' mode2 = 'in_refund' declcode = "29" else: mode1 = 'in_invoice' mode2 = 'out_refund' declcode = "19" decl = ET.Element('Report') if not extendedmode: decl.set('code', 'EX%sS' % declcode) else: decl.set('code', 'EX%sE' % declcode) decl.set('date', '%s-%s' % (self.year, self.month)) datas = ET.SubElement(decl, 'Data') if not extendedmode: datas.set('form', 'EXF%sS' % declcode) else: datas.set('form', 'EXF%sE' % declcode) datas.set('close', 'true') intrastatkey = namedtuple( "intrastatkey", ['EXTRF', 'EXCNT', 'EXTTA', 'EXREG', 'EXGO', 'EXTPC', 'EXDELTRM']) entries = {} query = """ SELECT inv_line.id FROM account_invoice_line inv_line JOIN account_invoice inv ON inv_line.invoice_id=inv.id LEFT JOIN res_country ON res_country.id = inv.intrastat_country_id LEFT JOIN res_partner ON res_partner.id = inv.partner_id LEFT JOIN res_country countrypartner ON countrypartner.id = res_partner.country_id JOIN product_product ON inv_line.product_id=product_product.id JOIN product_template ON product_product.product_tmpl_id=product_template.id WHERE inv.state IN ('open','paid') AND inv.company_id=%s AND not product_template.type='service' AND (res_country.intrastat=true OR (inv.intrastat_country_id is NULL AND countrypartner.intrastat=true)) AND ((res_country.code IS NOT NULL AND not res_country.code=%s) OR (res_country.code is NULL AND countrypartner.code IS NOT NULL AND not countrypartner.code=%s)) AND inv.type IN (%s, %s) AND to_char(COALESCE(inv.date, inv.date_invoice), 'YYYY')=%s AND to_char(COALESCE(inv.date, inv.date_invoice), 'MM')=%s """ self.env.cr.execute(query, (company.id, company.partner_id.country_id.code, company.partner_id.country_id.code, mode1, mode2, self.year, self.month)) lines = self.env.cr.fetchall() invoicelines_ids = [rec[0] for rec in lines] invoicelines = self.env['account.invoice.line'].browse( invoicelines_ids) for inv_line in invoicelines: #Check type of transaction if inv_line.intrastat_transaction_id: extta = inv_line.intrastat_transaction_id.code else: extta = "1" #Check country if inv_line.invoice_id.intrastat_country_id: excnt = inv_line.invoice_id.intrastat_country_id.code else: excnt = inv_line.invoice_id.partner_shipping_id.country_id.code or inv_line.invoice_id.partner_id.country_id.code #Check region #If purchase, comes from purchase order, linked to a location, #which is linked to the warehouse #if sales, the sales order is linked to the warehouse #if sales, from a delivery order, linked to a location, #which is linked to the warehouse #If none found, get the company one. exreg = None if inv_line.invoice_id.type in ('in_invoice', 'in_refund'): #comes from purchase po_lines = self.env['purchase.order.line'].search( [('invoice_lines', 'in', inv_line.id)], limit=1) if po_lines: if self._is_situation_triangular(company, po_line=po_lines): continue location = self.env['stock.location'].browse( po_lines.order_id._get_destination_location()) region_id = self.env[ 'stock.warehouse'].get_regionid_from_locationid( location) if region_id: exreg = IntrastatRegion.browse(region_id).code elif inv_line.invoice_id.type in ('out_invoice', 'out_refund'): #comes from sales so_lines = self.env['sale.order.line'].search( [('invoice_lines', 'in', inv_line.id)], limit=1) if so_lines: if self._is_situation_triangular(company, so_line=so_lines): continue saleorder = so_lines.order_id if saleorder and saleorder.warehouse_id and saleorder.warehouse_id.region_id: exreg = IntrastatRegion.browse( saleorder.warehouse_id.region_id.id).code if not exreg: if company.region_id: exreg = company.region_id.code else: self._company_warning( _('The Intrastat Region of the selected company is not set, ' 'please make sure to configure it first.')) #Check commodity codes intrastat_id = inv_line.product_id.get_intrastat_recursively() if intrastat_id: exgo = self.env['report.intrastat.code'].browse( intrastat_id).name else: raise exceptions.Warning( _('Product "%s" has no intrastat code, please configure it' ) % inv_line.product_id.display_name) #In extended mode, 2 more fields required if extendedmode: #Check means of transport if inv_line.invoice_id.transport_mode_id: extpc = inv_line.invoice_id.transport_mode_id.code elif company.transport_mode_id: extpc = company.transport_mode_id.code else: self._company_warning( _('The default Intrastat transport mode of your company ' 'is not set, please make sure to configure it first.' )) #Check incoterm if inv_line.invoice_id.incoterm_id: exdeltrm = inv_line.invoice_id.incoterm_id.code elif company.incoterm_id: exdeltrm = company.incoterm_id.code else: self._company_warning( _('The default Incoterm of your company is not set, ' 'please make sure to configure it first.')) else: extpc = "" exdeltrm = "" linekey = intrastatkey(EXTRF=declcode, EXCNT=excnt, EXTTA=extta, EXREG=exreg, EXGO=exgo, EXTPC=extpc, EXDELTRM=exdeltrm) #We have the key #calculate amounts if inv_line.price_unit and inv_line.quantity: amount = inv_line.price_unit * inv_line.quantity else: amount = 0 weight = (inv_line.product_id.weight or 0.0) * \ inv_line.uom_id._compute_quantity(inv_line.quantity, inv_line.product_id.uom_id) if not inv_line.product_id.uom_id.category_id: supply_units = inv_line.quantity else: supply_units = inv_line.quantity * inv_line.uom_id.factor amounts = entries.setdefault(linekey, (0, 0, 0)) amounts = (amounts[0] + amount, amounts[1] + weight, amounts[2] + supply_units) entries[linekey] = amounts numlgn = 0 for linekey in entries: amounts = entries[linekey] if round(amounts[0], 0) == 0: continue numlgn += 1 item = ET.SubElement(datas, 'Item') self._set_Dim(item, 'EXSEQCODE', text_type(numlgn)) self._set_Dim(item, 'EXTRF', text_type(linekey.EXTRF)) self._set_Dim(item, 'EXCNT', text_type(linekey.EXCNT)) self._set_Dim(item, 'EXTTA', text_type(linekey.EXTTA)) self._set_Dim(item, 'EXREG', text_type(linekey.EXREG)) self._set_Dim(item, 'EXTGO', text_type(linekey.EXGO)) if extendedmode: self._set_Dim(item, 'EXTPC', text_type(linekey.EXTPC)) self._set_Dim(item, 'EXDELTRM', text_type(linekey.EXDELTRM)) self._set_Dim(item, 'EXTXVAL', text_type(round(amounts[0], 0)).replace(".", ",")) self._set_Dim(item, 'EXWEIGHT', text_type(round(amounts[1], 0)).replace(".", ",")) self._set_Dim(item, 'EXUNITS', text_type(round(amounts[2], 0)).replace(".", ",")) if numlgn == 0: #no datas datas.set('action', 'nihil') return decl
def _build_intrastat_line(self, numlgn, item, linekey, amounts, dispatchmode, extendedmode): round_digits = self._get_rounding_digits() _round = partial(float_round, precision_digits=round_digits) value, weight, supply_units = amounts # Assuming weight cannot be negative if weight >= 0 and weight < 0.01: weight = 0.01 self._set_Dim(item, 'EXSEQCODE', text_type(numlgn)) self._set_Dim(item, 'EXTRF', text_type(linekey.EXTRF)) self._set_Dim(item, 'EXCNT', text_type(linekey.EXCNT)) self._set_Dim(item, 'EXTTA', text_type(linekey.EXTTA)) self._set_Dim(item, 'EXREG', text_type(linekey.EXREG)) self._set_Dim(item, 'EXTGO', text_type(linekey.EXGO)) if extendedmode: self._set_Dim(item, 'EXTPC', text_type(linekey.EXTPC)) self._set_Dim(item, 'EXDELTRM', text_type(linekey.EXDELTRM)) self._set_Dim(item, 'EXTXVAL', text_type(_round(value)).replace(".", ",")) self._set_Dim(item, 'EXWEIGHT', text_type(_round(weight)).replace(".", ",")) self._set_Dim(item, 'EXUNITS', text_type(_round(supply_units)).replace(".", ","))
def html_sanitize(src, silent=True, sanitize_tags=True, sanitize_attributes=False, sanitize_style=False, strip_style=False, strip_classes=False): if not src: return src src = ustr(src, errors='replace') # html: remove encoding attribute inside tags doctype = re.compile( r'(<[^>]*\s)(encoding=(["\'][^"\']*?["\']|[^\s\n\r>]+)(\s[^>]*|/)?>)', re.IGNORECASE | re.DOTALL) src = doctype.sub(u"", src) logger = logging.getLogger(__name__ + '.html_sanitize') # html encode email tags part = re.compile(r"(<(([^a<>]|a[^<>\s])[^<>]*)@[^<>]+>)", re.IGNORECASE | re.DOTALL) # remove results containing cite="mid:email_like@address" (ex: blockquote cite) # cite_except = re.compile(r"^((?!cite[\s]*=['\"]).)*$", re.IGNORECASE) src = part.sub( lambda m: (u'cite=' not in m.group(1) and u'alt=' not in m.group(1) and u'src=' not in m.group(1)) and misc.html_escape(m.group(1)) or m.group(1), src) # html encode mako tags <% ... %> to decode them later and keep them alive, otherwise they are stripped by the cleaner src = src.replace(u'<%', misc.html_escape(u'<%')) src = src.replace(u'%>', misc.html_escape(u'%>')) kwargs = { 'page_structure': True, 'style': strip_style, # True = remove style tags/attrs 'sanitize_style': sanitize_style, # True = sanitize styling 'forms': True, # True = remove form tags 'remove_unknown_tags': False, 'comments': False, 'processing_instructions': False } if sanitize_tags: kwargs['allow_tags'] = allowed_tags if etree.LXML_VERSION >= (2, 3, 1): # kill_tags attribute has been added in version 2.3.1 kwargs.update({ 'kill_tags': tags_to_kill, 'remove_tags': tags_to_remove, }) else: kwargs['remove_tags'] = tags_to_kill + tags_to_remove if sanitize_attributes and etree.LXML_VERSION >= ( 3, 1, 0 ): # lxml < 3.1.0 does not allow to specify safe_attrs. We keep all attributes in order to keep "style" if strip_classes: current_safe_attrs = safe_attrs - frozenset(['class']) else: current_safe_attrs = safe_attrs kwargs.update({ 'safe_attrs_only': True, 'safe_attrs': current_safe_attrs, }) else: kwargs.update({ 'safe_attrs_only': False, # keep oe-data attributes + style 'strip_classes': strip_classes, # remove classes, even when keeping other attributes }) try: # some corner cases make the parser crash (such as <SCRIPT/XSS SRC=\"http://ha.ckers.org/xss.js\"></SCRIPT> in test_mail) cleaner = _Cleaner(**kwargs) cleaned = cleaner.clean_html(src) assert isinstance(cleaned, pycompat.text_type) # MAKO compatibility: $, { and } inside quotes are escaped, preventing correct mako execution cleaned = cleaned.replace(u'%24', u'$') cleaned = cleaned.replace(u'%7B', u'{') cleaned = cleaned.replace(u'%7D', u'}') cleaned = cleaned.replace(u'%20', u' ') cleaned = cleaned.replace(u'%5B', u'[') cleaned = cleaned.replace(u'%5D', u']') cleaned = cleaned.replace(u'%7C', u'|') cleaned = cleaned.replace(u'<%', u'<%') cleaned = cleaned.replace(u'%>', u'%>') # html considerations so real html content match database value cleaned.replace(u'\xa0', u' ') except etree.ParserError as e: if u'empty' in pycompat.text_type(e): return u"" if not silent: raise logger.warning(u'ParserError obtained when sanitizing %r', src, exc_info=True) cleaned = u'<p>ParserError when sanitizing</p>' except Exception: if not silent: raise logger.warning(u'unknown error obtained when sanitizing %r', src, exc_info=True) cleaned = u'<p>Unknown error when sanitizing</p>' # this is ugly, but lxml/etree tostring want to put everything in a 'div' that breaks the editor -> remove that if cleaned.startswith(u'<div>') and cleaned.endswith(u'</div>'): cleaned = cleaned[5:-6] return cleaned
def test_huge(self): self.assertEqual(self.export(2**31 - 1), [[pycompat.text_type(2**31 - 1)]])
def attval(self, value, whitespace=re.compile(u'[ \t\n\f\r]+')): return self.encode(whitespace.sub(u' ', pycompat.text_type(value)))
def _get_lines(self, dispatchmode=False, extendedmode=False): company = self.company_id IntrastatRegion = self.env['l10n_be_intrastat.region'] if dispatchmode: mode1 = 'out_invoice' mode2 = 'in_refund' declcode = "29" else: mode1 = 'in_invoice' mode2 = 'out_refund' declcode = "19" decl = ET.Element('Report') if not extendedmode: decl.set('code', 'EX%sS' % declcode) else: decl.set('code', 'EX%sE' % declcode) decl.set('date', '%s-%s' % (self.year, self.month)) datas = ET.SubElement(decl, 'Data') if not extendedmode: datas.set('form', 'EXF%sS' % declcode) else: datas.set('form', 'EXF%sE' % declcode) datas.set('close', 'true') intrastatkey = namedtuple("intrastatkey", ['EXTRF', 'EXCNT', 'EXTTA', 'EXREG', 'EXGO', 'EXTPC', 'EXDELTRM']) entries = {} query = """ SELECT inv_line.id FROM account_invoice_line inv_line JOIN account_invoice inv ON inv_line.invoice_id=inv.id LEFT JOIN res_country ON res_country.id = inv.intrastat_country_id LEFT JOIN res_partner ON res_partner.id = inv.partner_id LEFT JOIN res_country countrypartner ON countrypartner.id = res_partner.country_id JOIN product_product ON inv_line.product_id=product_product.id JOIN product_template ON product_product.product_tmpl_id=product_template.id WHERE inv.state IN ('open','paid') AND inv.company_id=%s AND not product_template.type='service' AND (res_country.intrastat=true OR (inv.intrastat_country_id is NULL AND countrypartner.intrastat=true)) AND ((res_country.code IS NOT NULL AND not res_country.code=%s) OR (res_country.code is NULL AND countrypartner.code IS NOT NULL AND not countrypartner.code=%s)) AND inv.type IN (%s, %s) AND to_char(inv.date_invoice, 'YYYY')=%s AND to_char(inv.date_invoice, 'MM')=%s """ self.env.cr.execute(query, (company.id, company.partner_id.country_id.code, company.partner_id.country_id.code, mode1, mode2, self.year, self.month)) lines = self.env.cr.fetchall() invoicelines_ids = [rec[0] for rec in lines] invoicelines = self.env['account.invoice.line'].browse(invoicelines_ids) for inv_line in invoicelines: #Check type of transaction if inv_line.intrastat_transaction_id: extta = inv_line.intrastat_transaction_id.code else: extta = "1" #Check country if inv_line.invoice_id.intrastat_country_id: excnt = inv_line.invoice_id.intrastat_country_id.code else: excnt = inv_line.invoice_id.partner_id.country_id.code #Check region #If purchase, comes from purchase order, linked to a location, #which is linked to the warehouse #if sales, the sales order is linked to the warehouse #if sales, from a delivery order, linked to a location, #which is linked to the warehouse #If none found, get the company one. exreg = None if inv_line.invoice_id.type in ('in_invoice', 'in_refund'): #comes from purchase po_lines = self.env['purchase.order.line'].search([('invoice_lines', 'in', inv_line.id)], limit=1) if po_lines: if self._is_situation_triangular(company, po_line=po_lines): continue location = self.env['stock.location'].browse(po_lines.order_id._get_destination_location()) region_id = self.env['stock.warehouse'].get_regionid_from_locationid(location) if region_id: exreg = IntrastatRegion.browse(region_id).code elif inv_line.invoice_id.type in ('out_invoice', 'out_refund'): #comes from sales so_lines = self.env['sale.order.line'].search([('invoice_lines', 'in', inv_line.id)], limit=1) if so_lines: if self._is_situation_triangular(company, so_line=so_lines): continue saleorder = so_lines.order_id if saleorder and saleorder.warehouse_id and saleorder.warehouse_id.region_id: exreg = IntrastatRegion.browse(saleorder.warehouse_id.region_id.id).code if not exreg: if company.region_id: exreg = company.region_id.code else: self._company_warning(_('The Intrastat Region of the selected company is not set, ' 'please make sure to configure it first.')) #Check commodity codes intrastat_id = inv_line.product_id.get_intrastat_recursively() if intrastat_id: exgo = self.env['report.intrastat.code'].browse(intrastat_id).name else: raise exceptions.Warning( _('Product "%s" has no intrastat code, please configure it') % inv_line.product_id.display_name) #In extended mode, 2 more fields required if extendedmode: #Check means of transport if inv_line.invoice_id.transport_mode_id: extpc = inv_line.invoice_id.transport_mode_id.code elif company.transport_mode_id: extpc = company.transport_mode_id.code else: self._company_warning(_('The default Intrastat transport mode of your company ' 'is not set, please make sure to configure it first.')) #Check incoterm if inv_line.invoice_id.incoterm_id: exdeltrm = inv_line.invoice_id.incoterm_id.code elif company.incoterm_id: exdeltrm = company.incoterm_id.code else: self._company_warning(_('The default Incoterm of your company is not set, ' 'please make sure to configure it first.')) else: extpc = "" exdeltrm = "" linekey = intrastatkey(EXTRF=declcode, EXCNT=excnt, EXTTA=extta, EXREG=exreg, EXGO=exgo, EXTPC=extpc, EXDELTRM=exdeltrm) #We have the key #calculate amounts if inv_line.price_unit and inv_line.quantity: amount = inv_line.price_unit * inv_line.quantity else: amount = 0 weight = (inv_line.product_id.weight or 0.0) * \ inv_line.uom_id._compute_quantity(inv_line.quantity, inv_line.product_id.uom_id) if not inv_line.product_id.uom_id.category_id: supply_units = inv_line.quantity else: supply_units = inv_line.quantity * inv_line.uom_id.factor amounts = entries.setdefault(linekey, (0, 0, 0)) amounts = (amounts[0] + amount, amounts[1] + weight, amounts[2] + supply_units) entries[linekey] = amounts numlgn = 0 for linekey in entries: amounts = entries[linekey] if round(amounts[0], 0) == 0: continue numlgn += 1 item = ET.SubElement(datas, 'Item') self._set_Dim(item, 'EXSEQCODE', text_type(numlgn)) self._set_Dim(item, 'EXTRF', text_type(linekey.EXTRF)) self._set_Dim(item, 'EXCNT', text_type(linekey.EXCNT)) self._set_Dim(item, 'EXTTA', text_type(linekey.EXTTA)) self._set_Dim(item, 'EXREG', text_type(linekey.EXREG)) self._set_Dim(item, 'EXTGO', text_type(linekey.EXGO)) if extendedmode: self._set_Dim(item, 'EXTPC', text_type(linekey.EXTPC)) self._set_Dim(item, 'EXDELTRM', text_type(linekey.EXDELTRM)) self._set_Dim(item, 'EXTXVAL', text_type(round(amounts[0], 0)).replace(".", ",")) self._set_Dim(item, 'EXWEIGHT', text_type(round(amounts[1], 0)).replace(".", ",")) self._set_Dim(item, 'EXUNITS', text_type(round(amounts[2], 0)).replace(".", ",")) if numlgn == 0: #no datas datas.set('action', 'nihil') return decl
def do(self, fields, options, parent_model, customer_id, template_type, dryrun=False): self.ensure_one() import_result = {'messages': []} try: data, import_fields, col = self._convert_import_data( fields, options) if 'customer_sku' not in import_fields: raise ValueError( _("You must configure Customer Sku field to import")) if template_type.lower().strip( ) == 'inventory' and 'quantity' not in import_fields: raise ValueError(_("You must configure Stock field to import")) if template_type.lower().strip( ) == 'requirement' and 'required_quantity' not in import_fields: raise ValueError( _("You must configure Required Quantity field to import")) self._cr.execute('SAVEPOINT import') if len(col) == 1: resource_model = self.env[parent_model] columns = col[0] dict_list = [{ 'mf_' + import_field: columns[idx] } for idx, import_field in enumerate(import_fields)] resource_model_dict = dict(template_file=self.file, file_name=self.file_name, customer_id=customer_id) for dictionary in dict_list: resource_model_dict.update(dictionary) resource_model_dict.update( dict(template_type=template_type, template_status='Active')) template_resources = resource_model.search([ ('template_type', '=', template_type), ('customer_id', '=', customer_id) ]) for template_resource in template_resources: template_resource.write(dict(template_status='InActive')) resource_model.create(resource_model_dict) try: if dryrun: self._cr.execute('ROLLBACK TO SAVEPOINT import') else: self._cr.execute('RELEASE SAVEPOINT import') except psycopg2.InternalError: pass except ValueError as error: return [{ 'type': 'error', 'message': pycompat.text_type(error), 'record': False, }] return import_result['messages']
def test_huge(self): self.assertEqual( self.export(2**31-1), [[pycompat.text_type(2**31-1)]])
def binary_content(cls, xmlid=None, model='ir.attachment', id=None, field='datas', unique=False, filename=None, filename_field='datas_fname', download=False, mimetype=None, default_mimetype='application/octet-stream', access_token=None, related_id=None, access_mode=None, env=None): """ Get file, attachment or downloadable content If the ``xmlid`` and ``id`` parameter is omitted, fetches the default value for the binary field (via ``default_get``), otherwise fetches the field for that precise record. :param str xmlid: xmlid of the record :param str model: name of the model to fetch the binary from :param int id: id of the record from which to fetch the binary :param str field: binary field :param bool unique: add a max-age for the cache control :param str filename: choose a filename :param str filename_field: if not create an filename with model-id-field :param bool download: apply headers to download the file :param str mimetype: mintype of the field (for headers) :param related_id: the id of another record used for custom_check :param access_mode: if truthy, will call custom_check to fetch the object that contains the binary. :param str default_mimetype: default mintype if no mintype found :param str access_token: optional token for unauthenticated access only available for ir.attachment :param Environment env: by default use request.env :returns: (status, headers, content) """ env = env or request.env # get object and content obj = None if xmlid: obj = cls._xmlid_to_obj(env, xmlid) elif id and model in env.registry: obj = env[model].browse(int(id)) # obj exists if not obj or not obj.exists() or field not in obj: return (404, [], None) # access token grant access if model == 'ir.attachment' and access_token: obj = obj.sudo() if access_mode: if not cls._check_access_mode(env, id, access_mode, model, access_token=access_token, related_id=related_id): return (403, [], None) elif not consteq(obj.access_token or u'', access_token): return (403, [], None) # check read access try: last_update = obj['__last_update'] except AccessError: return (403, [], None) status, headers, content = None, [], None # attachment by url check module_resource_path = None if model == 'ir.attachment' and obj.type == 'url' and obj.url: url_match = re.match("^/(\w+)/(.+)$", obj.url) if url_match: module = url_match.group(1) module_path = get_module_path(module) module_resource_path = get_resource_path( module, url_match.group(2)) if module_path and module_resource_path: module_path = os.path.join( os.path.normpath(module_path), '') # join ensures the path ends with '/' module_resource_path = os.path.normpath( module_resource_path) if module_resource_path.startswith(module_path): with open(module_resource_path, 'rb') as f: content = base64.b64encode(f.read()) last_update = pycompat.text_type( os.path.getmtime(module_resource_path)) if not module_resource_path: module_resource_path = obj.url if not content: status = 301 content = module_resource_path else: content = obj[field] or '' # filename default_filename = False if not filename: if filename_field in obj: filename = obj[filename_field] if not filename and module_resource_path: filename = os.path.basename(module_resource_path) if not filename: default_filename = True filename = "%s-%s-%s" % (obj._name, obj.id, field) # mimetype mimetype = 'mimetype' in obj and obj.mimetype or False if not mimetype: if filename: mimetype = mimetypes.guess_type(filename)[0] if not mimetype and getattr(env[model]._fields[field], 'attachment', False): # for binary fields, fetch the ir_attachement for mimetype check attach_mimetype = env['ir.attachment'].search_read( domain=[('res_model', '=', model), ('res_id', '=', id), ('res_field', '=', field)], fields=['mimetype'], limit=1) mimetype = attach_mimetype and attach_mimetype[0]['mimetype'] if not mimetype: try: decoded_content = base64.b64decode(content) except base64.binascii.Error: # if we could not decode it, no need to pass it down: it would crash elsewhere... return (404, [], None) mimetype = guess_mimetype(decoded_content, default=default_mimetype) # extension _, existing_extension = os.path.splitext(filename) if not existing_extension or default_filename: extension = mimetypes.guess_extension(mimetype) if extension: filename = "%s%s" % (filename, extension) headers += [('Content-Type', mimetype), ('X-Content-Type-Options', 'nosniff')] # cache etag = bool(request) and request.httprequest.headers.get( 'If-None-Match') retag = '"%s"' % hashlib.md5( pycompat.to_text(content).encode('utf-8')).hexdigest() status = status or (304 if etag == retag else 200) headers.append(('ETag', retag)) headers.append( ('Cache-Control', 'max-age=%s' % (STATIC_CACHE if unique else 0))) # content-disposition default name if download: headers.append( ('Content-Disposition', cls.content_disposition(filename))) return (status, headers, content)
def html_sanitize(src, silent=True, sanitize_tags=True, sanitize_attributes=False, sanitize_style=False, strip_style=False, strip_classes=False): if not src: return src src = ustr(src, errors='replace') # html: remove encoding attribute inside tags doctype = re.compile(r'(<[^>]*\s)(encoding=(["\'][^"\']*?["\']|[^\s\n\r>]+)(\s[^>]*|/)?>)', re.IGNORECASE | re.DOTALL) src = doctype.sub(u"", src) logger = logging.getLogger(__name__ + '.html_sanitize') # html encode email tags part = re.compile(r"(<(([^a<>]|a[^<>\s])[^<>]*)@[^<>]+>)", re.IGNORECASE | re.DOTALL) # remove results containing cite="mid:email_like@address" (ex: blockquote cite) # cite_except = re.compile(r"^((?!cite[\s]*=['\"]).)*$", re.IGNORECASE) src = part.sub(lambda m: (u'cite=' not in m.group(1) and u'alt=' not in m.group(1)) and misc.html_escape(m.group(1)) or m.group(1), src) # html encode mako tags <% ... %> to decode them later and keep them alive, otherwise they are stripped by the cleaner src = src.replace(u'<%', misc.html_escape(u'<%')) src = src.replace(u'%>', misc.html_escape(u'%>')) kwargs = { 'page_structure': True, 'style': strip_style, # True = remove style tags/attrs 'sanitize_style': sanitize_style, # True = sanitize styling 'forms': True, # True = remove form tags 'remove_unknown_tags': False, 'comments': False, 'processing_instructions': False } if sanitize_tags: kwargs['allow_tags'] = allowed_tags if etree.LXML_VERSION >= (2, 3, 1): # kill_tags attribute has been added in version 2.3.1 kwargs.update({ 'kill_tags': tags_to_kill, 'remove_tags': tags_to_remove, }) else: kwargs['remove_tags'] = tags_to_kill + tags_to_remove if sanitize_attributes and etree.LXML_VERSION >= (3, 1, 0): # lxml < 3.1.0 does not allow to specify safe_attrs. We keep all attributes in order to keep "style" if strip_classes: current_safe_attrs = safe_attrs - frozenset(['class']) else: current_safe_attrs = safe_attrs kwargs.update({ 'safe_attrs_only': True, 'safe_attrs': current_safe_attrs, }) else: kwargs.update({ 'safe_attrs_only': False, # keep oe-data attributes + style 'strip_classes': strip_classes, # remove classes, even when keeping other attributes }) try: # some corner cases make the parser crash (such as <SCRIPT/XSS SRC=\"http://ha.ckers.org/xss.js\"></SCRIPT> in test_mail) cleaner = _Cleaner(**kwargs) cleaned = cleaner.clean_html(src) assert isinstance(cleaned, pycompat.text_type) # MAKO compatibility: $, { and } inside quotes are escaped, preventing correct mako execution cleaned = cleaned.replace(u'%24', u'$') cleaned = cleaned.replace(u'%7B', u'{') cleaned = cleaned.replace(u'%7D', u'}') cleaned = cleaned.replace(u'%20', u' ') cleaned = cleaned.replace(u'%5B', u'[') cleaned = cleaned.replace(u'%5D', u']') cleaned = cleaned.replace(u'%7C', u'|') cleaned = cleaned.replace(u'<%', u'<%') cleaned = cleaned.replace(u'%>', u'%>') # html considerations so real html content match database value cleaned.replace(u'\xa0', u' ') except etree.ParserError as e: if u'empty' in pycompat.text_type(e): return u"" if not silent: raise logger.warning(u'ParserError obtained when sanitizing %r', src, exc_info=True) cleaned = u'<p>ParserError when sanitizing</p>' except Exception: if not silent: raise logger.warning(u'unknown error obtained when sanitizing %r', src, exc_info=True) cleaned = u'<p>Unknown error when sanitizing</p>' # this is ugly, but lxml/etree tostring want to put everything in a 'div' that breaks the editor -> remove that if cleaned.startswith(u'<div>') and cleaned.endswith(u'</div>'): cleaned = cleaned[5:-6] return cleaned
def attach(self, func, upload=None, url=None, disable_optimization=None, **kwargs): # the upload argument doesn't allow us to access the files if more than # one file is uploaded, as upload references the first file # therefore we have to recover the files from the request object Attachments = request.env['ir.attachment'] # registry for the attachment table print ("kwargs",kwargs) hidden_folder_id = kwargs.get('hidden_folder_id', False) uploads = [] message = None if not upload: # no image provided, storing the link and the image name name = url.split("/").pop()# recover filename if '?' in name: name = name[:name.find('?')] attach_vals = { 'name': name, 'datas_fname': name, 'type': 'binary', 'url': '', 'public': True, 'res_model': 'hc.image.bank', 'hc_image_bank_id' : hidden_folder_id, } try: import requests res = requests.get(url, params={'d': '404', 's': '128'}, timeout=5) datas = base64.b64encode(res.content) attach_vals.update({'datas': datas, 'type': 'binary',}) except Exception as e: logger.info("Unable to store this Image in database - " + str(e)) attachment = Attachments.create(attach_vals) if hidden_folder_id: hidden_folder_obj = request.env['hc.image.bank'] hidden_folder_browse = hidden_folder_obj.browse(int(hidden_folder_id)) hidden_folder_browse.write({'attachment_ids': [(4, attachment.id)]}) uploads += attachment.read(['name', 'mimetype', 'checksum', 'url', 'hc_image_bank_id'])#, 'res_model' else: # images provided try: attachments = request.env['ir.attachment'] for c_file in request.httprequest.files.getlist('upload'): data = c_file.read() try: image = Image.open(io.BytesIO(data)) w, h = image.size if w*h > 42e6: # Nokia Lumia 1020 photo resolution raise ValueError( u"Image size excessive, uploaded images must be smaller " u"than 42 million pixel") if not disable_optimization and image.format in ('PNG', 'JPEG'): data = tools.image_save_for_web(image) except IOError as e: pass attachment = Attachments.create({ 'name': c_file.filename, 'datas': base64.b64encode(data), 'datas_fname': c_file.filename, 'public': True, 'res_model': 'hc.image.bank', 'hc_image_bank_id': hidden_folder_id }) attachments += attachment if hidden_folder_id: hidden_folder_obj = request.env['hc.image.bank'] hidden_folder_browse = hidden_folder_obj.browse(int(hidden_folder_id)) hidden_folder_browse.write({'attachment_ids': [(4, attachment.id)]}) uploads += attachments.read(['name', 'mimetype', 'checksum', 'url', 'hc_image_bank_id']) except Exception as e: logger.exception("Failed to upload image to attachment") message = pycompat.text_type(e) return """<script type='text/javascript'> window.parent['%s'](%s, %s); </script>""" % (func, json.dumps(uploads), json.dumps(message))
Store database-specific configuration parameters """ import uuid import logging from odoo import api, fields, models from odoo.tools import config, ormcache, mute_logger, pycompat _logger = logging.getLogger(__name__) """ A dictionary holding some configuration parameters to be initialized when the database is created. """ _default_parameters = { "database.secret": lambda: pycompat.text_type(uuid.uuid4()), "database.uuid": lambda: pycompat.text_type(uuid.uuid1()), "database.create_date": fields.Datetime.now, "web.base.url": lambda: "http://localhost:%s" % config.get('xmlrpc_port'), } class IrConfigParameter(models.Model): """Per-database storage of configuration key-value pairs.""" _name = 'ir.config_parameter' _rec_name = 'key' key = fields.Char(required=True, index=True) value = fields.Text(required=True) _sql_constraints = [
def log_model(model, local_registry): """ OpenUpgrade: Store the characteristics of the BaseModel and its fields in the local registry, so that we can compare changes with the main registry """ if not model._name: return typemap = {'monetary': 'float'} # Deferred import to prevent import loop from odoo import models # persistent models only if isinstance(model, models.TransientModel): return def isfunction(model, k): if (model._fields[k].compute and not model._fields[k].related and not model._fields[k].company_dependent): return 'function' return '' def isproperty(model, k): if model._fields[k].company_dependent: return 'property' return '' def isrelated(model, k): if model._fields[k].related: return 'related' return '' model_registry = local_registry.setdefault(model._name, {}) if model._inherits: model_registry['_inherits'] = { '_inherits': pycompat.text_type(model._inherits) } for k, v in model._fields.items(): properties = { 'type': typemap.get(v.type, v.type), 'isfunction': isfunction(model, k), 'isproperty': isproperty(model, k), 'isrelated': isrelated(model, k), 'relation': v.comodel_name if v.type in ('many2many', 'many2one', 'one2many') else '', 'required': v.required and 'required' or '', 'selection_keys': '', 'req_default': '', 'inherits': '', } if hasattr(v, 'oldname'): properties['oldname'] = v.oldname if v.type == 'selection': if isinstance(v.selection, (tuple, list)): properties['selection_keys'] = pycompat.text_type( sorted([x[0] for x in v.selection])) else: properties['selection_keys'] = 'function' elif v.type == 'binary': properties['attachment'] = str(getattr(v, "attachment", False)) default = model._fields[k].default if v.required and default: if isinstance(default, types.FunctionType): # todo: in OpenERP 5 (and in 6 as well), # literals are wrapped in a lambda function properties['req_default'] = 'function' else: properties['req_default'] = pycompat.text_type(default) for key, value in properties.items(): if value: model_registry.setdefault(k, {})[key] = value
def binary_content(cls, xmlid=None, model='ir.attachment', id=None, field='datas', unique=False, filename=None, filename_field='datas_fname', download=False, mimetype=None, default_mimetype='application/octet-stream', access_token=None, env=None): """ Get file, attachment or downloadable content If the ``xmlid`` and ``id`` parameter is omitted, fetches the default value for the binary field (via ``default_get``), otherwise fetches the field for that precise record. :param str xmlid: xmlid of the record :param str model: name of the model to fetch the binary from :param int id: id of the record from which to fetch the binary :param str field: binary field :param bool unique: add a max-age for the cache control :param str filename: choose a filename :param str filename_field: if not create an filename with model-id-field :param bool download: apply headers to download the file :param str mimetype: mintype of the field (for headers) :param str default_mimetype: default mintype if no mintype found :param str access_token: optional token for unauthenticated access :param Environment env: by default use request.env :returns: (status, headers, content) """ env = env or request.env # get object and content obj = None if xmlid: obj = env.ref(xmlid, False) elif id and model in env.registry: obj = env[model].browse(int(id)) # obj exists if not obj or not obj.exists() or field not in obj: return (404, [], None) # check read access try: last_update = obj['__last_update'] except AccessError: return (403, [], None) status, headers, content = None, [], None # attachment by url check module_resource_path = None if model == 'ir.attachment' and obj.type == 'url' and obj.url: url_match = re.match(r"^/(\w+)/(.+)$", obj.url) if url_match: module = url_match.group(1) module_path = get_module_path(module) module_resource_path = get_resource_path( module, url_match.group(2)) if module_path and module_resource_path: module_path = os.path.join( os.path.normpath(module_path), '') # join ensures the path ends with '/' module_resource_path = os.path.normpath( module_resource_path) if module_resource_path.startswith(module_path): with open(module_resource_path, 'rb') as f: content = base64.b64encode(f.read()) last_update = pycompat.text_type( os.path.getmtime(module_resource_path)) if not module_resource_path: module_resource_path = obj.url if not content: status = 301 content = module_resource_path else: # begin redefined part of original binary_content of odoo/base/addons/ir/ir_http is_attachment = env[model]._fields[field].attachment if is_attachment: domain = [ ('res_model', '=', model), ('res_field', '=', field), ('res_id', '=', obj.id), ('type', '=', 'url'), ] att = env['ir.attachment'].sudo().search(domain) if att: content = att.url status = 301 if not content: content = obj[field] or '' # end redefined part of original binary_content # filename if not filename: if filename_field in obj: filename = obj[filename_field] elif module_resource_path: filename = os.path.basename(module_resource_path) else: filename = "%s-%s-%s" % (obj._name, obj.id, field) # mimetype mimetype = 'mimetype' in obj and obj.mimetype or False if not mimetype: if filename: mimetype = mimetypes.guess_type(filename)[0] if not mimetype and getattr(env[model]._fields[field], 'attachment', False): # for binary fields, fetch the ir_attachement for mimetype check attach_mimetype = env['ir.attachment'].search_read( domain=[('res_model', '=', model), ('res_id', '=', id), ('res_field', '=', field)], fields=['mimetype'], limit=1) mimetype = attach_mimetype and attach_mimetype[0]['mimetype'] if not mimetype: mimetype = guess_mimetype(base64.b64decode(content), default=default_mimetype) headers += [('Content-Type', mimetype), ('X-Content-Type-Options', 'nosniff')] # cache etag = bool(request) and request.httprequest.headers.get( 'If-None-Match') retag = '"%s"' % hashlib.md5(last_update.encode('utf-8')).hexdigest() status = status or (304 if etag == retag else 200) headers.append(('ETag', retag)) headers.append( ('Cache-Control', 'max-age=%s' % (STATIC_CACHE if unique else 0))) # content-disposition default name if download: headers.append( ('Content-Disposition', cls.content_disposition(filename))) return (status, headers, content)
def binary_content(cls, xmlid=None, model='ir.attachment', id=None, field='datas', unique=False, filename=None, filename_field='datas_fname', download=False, mimetype=None, default_mimetype='application/octet-stream', env=None): """ Get file, attachment or downloadable content If the ``xmlid`` and ``id`` parameter is omitted, fetches the default value for the binary field (via ``default_get``), otherwise fetches the field for that precise record. :param str xmlid: xmlid of the record :param str model: name of the model to fetch the binary from :param int id: id of the record from which to fetch the binary :param str field: binary field :param bool unique: add a max-age for the cache control :param str filename: choose a filename :param str filename_field: if not create an filename with model-id-field :param bool download: apply headers to download the file :param str mimetype: mintype of the field (for headers) :param str default_mimetype: default mintype if no mintype found :param Environment env: by default use request.env :returns: (status, headers, content) """ env = env or request.env # get object and content obj = None if xmlid: obj = env.ref(xmlid, False) elif id and model in env.registry: obj = env[model].browse(int(id)) # obj exists if not obj or not obj.exists() or field not in obj: return (404, [], None) # check read access try: last_update = obj['__last_update'] except AccessError: return (403, [], None) status, headers, content = None, [], None # attachment by url check module_resource_path = None if model == 'ir.attachment' and obj.type == 'url' and obj.url: url_match = re.match("^/(\w+)/(.+)$", obj.url) if url_match: module = url_match.group(1) module_path = get_module_path(module) module_resource_path = get_resource_path(module, url_match.group(2)) if module_path and module_resource_path: module_path = os.path.join(os.path.normpath(module_path), '') # join ensures the path ends with '/' module_resource_path = os.path.normpath(module_resource_path) if module_resource_path.startswith(module_path): with open(module_resource_path, 'rb') as f: content = base64.b64encode(f.read()) last_update = pycompat.text_type(os.path.getmtime(module_resource_path)) if not module_resource_path: module_resource_path = obj.url if not content: status = 301 content = module_resource_path else: content = obj[field] or '' # filename if not filename: if filename_field in obj: filename = obj[filename_field] elif module_resource_path: filename = os.path.basename(module_resource_path) else: filename = "%s-%s-%s" % (obj._name, obj.id, field) # mimetype mimetype = 'mimetype' in obj and obj.mimetype or False if not mimetype: if filename: mimetype = mimetypes.guess_type(filename)[0] if not mimetype and getattr(env[model]._fields[field], 'attachment', False): # for binary fields, fetch the ir_attachement for mimetype check attach_mimetype = env['ir.attachment'].search_read(domain=[('res_model', '=', model), ('res_id', '=', id), ('res_field', '=', field)], fields=['mimetype'], limit=1) mimetype = attach_mimetype and attach_mimetype[0]['mimetype'] if not mimetype: mimetype = guess_mimetype(base64.b64decode(content), default=default_mimetype) headers += [('Content-Type', mimetype), ('X-Content-Type-Options', 'nosniff')] # cache etag = bool(request) and request.httprequest.headers.get('If-None-Match') retag = '"%s"' % hashlib.md5(last_update.encode('utf-8')).hexdigest() status = status or (304 if etag == retag else 200) headers.append(('ETag', retag)) headers.append(('Cache-Control', 'max-age=%s' % (STATIC_CACHE if unique else 0))) # content-disposition default name if download: headers.append(('Content-Disposition', cls.content_disposition(filename))) return (status, headers, content)
def do(self, fields, columns, options, dryrun=False): """ Actual execution of the import :param fields: import mapping: maps each column to a field, ``False`` for the columns to ignore :type fields: list(str|bool) :param columns: columns label :type columns: list(str|bool) :param dict options: :param bool dryrun: performs all import operations (and validations) but rollbacks writes, allows getting as much errors as possible without the risk of clobbering the database. :returns: A list of errors. If the list is empty the import executed fully and correctly. If the list is non-empty it contains dicts with 3 keys ``type`` the type of error (``error|warning``); ``message`` the error message associated with the error (a string) and ``record`` the data which failed to import (or ``false`` if that data isn't available or provided) :rtype: dict(ids: list(int), messages: list({type, message, record})) """ self.ensure_one() self._cr.execute('SAVEPOINT import') try: data, import_fields = self._convert_import_data(fields, options) # Parse date and float field data = self._parse_import_data(data, import_fields, options) except ValueError as error: return { 'messages': [{ 'type': 'error', 'message': pycompat.text_type(error), 'record': False, }] } _logger.info('importing %d rows...', len(data)) name_create_enabled_fields = options.pop('name_create_enabled_fields', {}) model = self.env[self.res_model].with_context( import_file=True, name_create_enabled_fields=name_create_enabled_fields) import_result = model.load(import_fields, data) _logger.info('done') # If transaction aborted, RELEASE SAVEPOINT is going to raise # an InternalError (ROLLBACK should work, maybe). Ignore that. # TODO: to handle multiple errors, create savepoint around # write and release it in case of write error (after # adding error to errors array) => can keep on trying to # import stuff, and rollback at the end if there is any # error in the results. try: if dryrun: self._cr.execute('ROLLBACK TO SAVEPOINT import') # cancel all changes done to the registry/ormcache self.pool.reset_changes() else: self._cr.execute('RELEASE SAVEPOINT import') except psycopg2.InternalError: pass # Insert/Update mapping columns when import complete successfully if import_result['ids'] and options.get('headers'): BaseImportMapping = self.env['base_import.mapping'] for index, column_name in enumerate(columns): if column_name: # Update to latest selected field exist_records = BaseImportMapping.search([ ('res_model', '=', self.res_model), ('column_name', '=', column_name) ]) if exist_records: exist_records.write({'field_name': fields[index]}) else: BaseImportMapping.create({ 'res_model': self.res_model, 'column_name': column_name, 'field_name': fields[index] }) return import_result
def read_xls(self, map1): # import ipdb; ipdb.set_trace() file1 = base64.b64decode(self.file1) try: book = xlrd.open_workbook(file_contents=file1 or b'') except: return [] sheet = book.sheet_by_index(0) cols = [] vals = [] x = -1 for row in pycompat.imap(sheet.row, range(sheet.nrows)): x += 1 val_one = {} c = -1 for cell in row: c += 1 if x == 0: col = str(cell.value).lower() get_map = map1.get(col, '') # if type(get_map) is list: # col_to_save = get_map[0] # else: # col_to_save = get_map # cols.append(col_to_save) cols.append(get_map) else: get_map = cols[c] if not get_map: continue val = '' if cell.ctype is xlrd.XL_CELL_NUMBER: is_float = cell.value % 1 != 0.0 val = pycompat.text_type( cell.value) if is_float else pycompat.text_type( int(cell.value)) elif cell.ctype is xlrd.XL_CELL_DATE: is_datetime = cell.value % 1 != 0.0 # emulate xldate_as_datetime for pre-0.9.3 dt = datetime.datetime(*xlrd.xldate.xldate_as_tuple( cell.value, book.datemode)) val = dt.strftime(DEFAULT_SERVER_DATETIME_FORMAT ) if is_datetime else dt.strftime( DEFAULT_SERVER_DATE_FORMAT) elif cell.ctype is xlrd.XL_CELL_BOOLEAN: val = u'True' if cell.value else u'False' elif cell.ctype is xlrd.XL_CELL_ERROR: # raise ValueError( # _("Error cell found while reading XLS/XLSX file: %s") % # xlrd.error_text_from_code.get( # cell.value, "unknown error code %s" % cell.value) # ) val = '' else: # import ipdb; ipdb.set_trace() # print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@", cell) val = cell.value # val_one[field] = val value = val if type(get_map) is list: col_to_save = get_map[0] col = get_map[3] if col_to_save: val_to_save = False tipe = get_map[1] model = get_map[2] if tipe == 'many2one': model = self.env[model] rec = model.search([('name', '=', value)]) if not rec: # create first # rec = model.create({'name': value}) raise ValidationError( _('{} : "{}" is not known'.format( col, value))) val_to_save = rec.id val_one[col_to_save] = val_to_save else: col_to_save = get_map if col_to_save: val_one[col_to_save] = value if val_one: vals.append((0, 0, val_one)) return vals