class BinarySvg(models.Model): _name = 'test_new_api.binary_svg' _description = 'Test SVG upload' name = fields.Char(required=True) image_attachment = fields.Binary(attachment=True) image_wo_attachment = fields.Binary(attachment=False)
class test_model(models.Model): _name = 'test_converter.test_model' _description = 'Test Converter Model' char = fields.Char() integer = fields.Integer() float = fields.Float() numeric = fields.Float(digits=(16, 2)) many2one = fields.Many2one('test_converter.test_model.sub', group_expand='_gbf_m2o') binary = fields.Binary(attachment=False) date = fields.Date() datetime = fields.Datetime() selection_str = fields.Selection( [ ('A', u"Qu'il n'est pas arrivé à Toronto"), ('B', u"Qu'il était supposé arriver à Toronto"), ('C', u"Qu'est-ce qu'il fout ce maudit pancake, tabernacle ?"), ('D', u"La réponse D"), ], string=u"Lorsqu'un pancake prend l'avion à destination de Toronto et " u"qu'il fait une escale technique à St Claude, on dit:") html = fields.Html() text = fields.Text() # `base` module does not contains any model that implement the functionality # `group_expand`; test this feature here... @api.model def _gbf_m2o(self, subs, domain, order): sub_ids = subs._search([], order=order, access_rights_uid=SUPERUSER_ID) return subs.browse(sub_ids)
class ConverterTest(models.Model): _name = 'web_editor.converter.test' _description = 'Web Editor Converter Test' # disable translation export for those brilliant field labels and values _translate = False char = fields.Char() integer = fields.Integer() float = fields.Float() numeric = fields.Float(digits=(16, 2)) many2one = fields.Many2one('web_editor.converter.test.sub') binary = fields.Binary(attachment=False) date = fields.Date() datetime = fields.Datetime() selection_str = fields.Selection( [ ('A', "Qu'il n'est pas arrivé à Toronto"), ('B', "Qu'il était supposé arriver à Toronto"), ('C', "Qu'est-ce qu'il fout ce maudit pancake, tabernacle ?"), ('D', "La réponse D"), ], string=u"Lorsqu'un pancake prend l'avion à destination de Toronto et " u"qu'il fait une escale technique à St Claude, on dit:") html = fields.Html() text = fields.Text()
class BaseLanguageExport(models.TransientModel): _name = "base.language.export" _description = 'Language Export' @api.model def _get_languages(self): langs = self.env['res.lang'].get_installed() return [(NEW_LANG_KEY, _('New Language (Empty translation template)'))] + \ langs name = fields.Char('File Name', readonly=True) lang = fields.Selection(_get_languages, string='Language', required=True, default=NEW_LANG_KEY) format = fields.Selection([('csv', 'CSV File'), ('po', 'PO File'), ('tgz', 'TGZ Archive')], string='File Format', required=True, default='csv') modules = fields.Many2many('ir.module.module', 'rel_modules_langexport', 'wiz_id', 'module_id', string='Apps To Export', domain=[('state', '=', 'installed')]) data = fields.Binary('File', readonly=True, attachment=False) state = fields.Selection( [('choose', 'choose'), ('get', 'get')], # choose language or get the file default='choose') def act_getfile(self): this = self[0] lang = this.lang if this.lang != NEW_LANG_KEY else False mods = sorted(this.mapped('modules.name')) or ['all'] with contextlib.closing(io.BytesIO()) as buf: tools.trans_export(lang, mods, buf, this.format, self._cr) out = base64.encodestring(buf.getvalue()) filename = 'new' if lang: filename = tools.get_iso_codes(lang) elif len(mods) == 1: filename = mods[0] extension = this.format if not lang and extension == 'po': extension = 'pot' name = "%s.%s" % (filename, extension) this.write({'state': 'get', 'data': out, 'name': name}) return { 'type': 'ir.actions.act_window', 'res_model': 'base.language.export', 'view_mode': 'form', 'res_id': this.id, 'views': [(False, 'form')], 'target': 'new', }
class BaseLanguageImport(models.TransientModel): _name = "base.language.import" _description = "Language Import" name = fields.Char('Language Name', required=True) code = fields.Char('ISO Code', size=6, required=True, help="ISO Language and Country code, e.g. en_US") data = fields.Binary('File', required=True, attachment=False) filename = fields.Char('File Name', required=True) overwrite = fields.Boolean( 'Overwrite Existing Terms', default=True, help= "If you enable this option, existing translations (including custom ones) " "will be overwritten and replaced by those in this file") def import_lang(self): this = self[0] this = this.with_context(overwrite=this.overwrite) with TemporaryFile('wb+') as buf: try: buf.write(base64.decodestring(this.data)) # now we determine the file format buf.seek(0) fileformat = os.path.splitext(this.filename)[-1][1:].lower() tools.trans_load_data(this._cr, buf, fileformat, this.code, lang_name=this.name, context=this._context) except ProgrammingError as e: _logger.exception( 'File unsuccessfully imported, due to a malformed file.') with closing(sql_db.db_connect( self._cr.dbname).cursor()) as cr: raise UserError( _('File %r not imported due to a malformed file.\n\n' + 'This issue can be caused by duplicates entries who are referring to the same field. ' + 'Please check the content of the file you are trying to import.\n\n' + 'Technical Details:\n%s') % tools.ustr(e)) except Exception as e: _logger.exception( 'File unsuccessfully imported, due to format mismatch.') raise UserError( _('File %r not imported due to format mismatch or a malformed file.' ' (Valid formats are .csv, .po, .pot)\n\nTechnical Details:\n%s') % \ (this.filename, tools.ustr(e)) ) return True
class ModelBinary(models.Model): _name = 'test_new_api.model_binary' _description = 'Test Image field' binary = fields.Binary() binary_related_store = fields.Binary("Binary Related Store", related='binary', store=True, readonly=False) binary_related_no_store = fields.Binary("Binary Related No Store", related='binary', store=False, readonly=False) binary_computed = fields.Binary(compute='_compute_binary') @api.depends('binary') def _compute_binary(self): # arbitrary value: 'bin_size' must have no effect for record in self: record.binary_computed = [(record.id, bool(record.binary))]
class BaseImportModule(models.TransientModel): """ Import Module """ _name = "base.import.module" _description = "Import Module" module_file = fields.Binary(string='Module .ZIP file', required=True, attachment=False) state = fields.Selection([('init', 'init'), ('done', 'done')], string='Status', readonly=True, default='init') import_message = fields.Text() force = fields.Boolean( string='Force init', help= "Force init mode even if installed. (will update `noupdate='1'` records)" ) def import_module(self): self.ensure_one() IrModule = self.env['ir.module.module'] zip_data = base64.decodestring(self.module_file) fp = BytesIO() fp.write(zip_data) res = IrModule.import_zipfile(fp, force=self.force) self.write({'state': 'done', 'import_message': res[0]}) context = dict(self.env.context, module_name=res[1]) # Return wizard otherwise it will close wizard and will not show result message to user. return { 'name': 'Import Module', 'view_mode': 'form', 'target': 'new', 'res_id': self.id, 'res_model': 'base.import.module', 'type': 'ir.actions.act_window', 'context': context, } def action_module_open(self): self.ensure_one() return { 'domain': [('name', 'in', self.env.context.get('module_name', []))], 'name': 'Modules', 'view_mode': 'tree,form', 'res_model': 'ir.module.module', 'view_id': False, 'type': 'ir.actions.act_window', }
class MrpRoutingWorkcenter(models.Model): _name = 'mrp.routing.workcenter' _description = 'Work Center Usage' _order = 'sequence, id' _check_company_auto = True name = fields.Char('Operation', required=True) workcenter_id = fields.Many2one('mrp.workcenter', 'Work Center', required=True, check_company=True) sequence = fields.Integer( 'Sequence', default=100, help= "Gives the sequence order when displaying a list of routing Work Centers." ) routing_id = fields.Many2one( 'mrp.routing', 'Parent Routing', index=True, ondelete='cascade', required=True, help= "The routing contains all the Work Centers used and for how long. This will create work orders afterwards " "which alters the execution of the manufacturing order.") note = fields.Text('Description') company_id = fields.Many2one('res.company', 'Company', readonly=True, related='routing_id.company_id', store=True) worksheet = fields.Binary('PDF', help="Upload your PDF file.") worksheet_type = fields.Selection( [('pdf', 'PDF'), ('google_slide', 'Google Slide')], string="Work Sheet", default="pdf", help="Defines if you want to use a PDF or a Google Slide as work sheet." ) worksheet_google_slide = fields.Char( 'Google Slide', help= "Paste the url of your Google Slide. Make sure the access to the document is public." ) time_mode = fields.Selection([('auto', 'Compute based on real time'), ('manual', 'Set duration manually')], string='Duration Computation', default='manual') time_mode_batch = fields.Integer('Based on', default=10) time_cycle_manual = fields.Float( 'Manual Duration', default=60, help= "Time in minutes. Is the time used in manual mode, or the first time supposed in real time when there are not any work orders yet." ) time_cycle = fields.Float('Duration', compute="_compute_time_cycle") workorder_count = fields.Integer("# Work Orders", compute="_compute_workorder_count") batch = fields.Selection([('no', 'Once all products are processed'), ('yes', 'Once some products are processed')], string='Start Next Operation', default='no', required=True) batch_size = fields.Float('Quantity to Process', default=1.0) workorder_ids = fields.One2many('mrp.workorder', 'operation_id', string="Work Orders") @api.depends('time_cycle_manual', 'time_mode', 'workorder_ids') def _compute_time_cycle(self): manual_ops = self.filtered( lambda operation: operation.time_mode == 'manual') for operation in manual_ops: operation.time_cycle = operation.time_cycle_manual for operation in self - manual_ops: data = self.env['mrp.workorder'].read_group( [('operation_id', '=', operation.id), ('state', '=', 'done')], ['operation_id', 'duration', 'qty_produced'], ['operation_id'], limit=operation.time_mode_batch) count_data = dict((item['operation_id'][0], (item['duration'], item['qty_produced'])) for item in data) if count_data.get(operation.id) and count_data[operation.id][1]: operation.time_cycle = ( count_data[operation.id][0] / count_data[operation.id][1] ) * (operation.workcenter_id.capacity or 1.0) else: operation.time_cycle = operation.time_cycle_manual def _compute_workorder_count(self): data = self.env['mrp.workorder'].read_group( [('operation_id', 'in', self.ids), ('state', '=', 'done')], ['operation_id'], ['operation_id']) count_data = dict((item['operation_id'][0], item['operation_id_count']) for item in data) for operation in self: operation.workorder_count = count_data.get(operation.id, 0)
class Module(models.Model): _name = "ir.module.module" _rec_name = "shortdesc" _description = "Module" _order = 'sequence,name' @api.model def fields_view_get(self, view_id=None, view_type='form', toolbar=False, submenu=False): res = super(Module, self).fields_view_get(view_id, view_type, toolbar=toolbar, submenu=False) if view_type == 'form' and res.get('toolbar', False): install_id = self.env.ref( 'base.action_server_module_immediate_install').id action = [ rec for rec in res['toolbar']['action'] if rec.get('id', False) != install_id ] res['toolbar'] = {'action': action} return res @classmethod def get_module_info(cls, name): try: return modules.load_information_from_description_file(name) except Exception: _logger.debug( 'Error when trying to fetch information for module %s', name, exc_info=True) return {} @api.depends('name', 'description') def _get_desc(self): for module in self: path = modules.get_module_resource( module.name, 'static/description/index.html') if path: with tools.file_open(path, 'rb') as desc_file: doc = desc_file.read() html = lxml.html.document_fromstring(doc) for element, attribute, link, pos in html.iterlinks(): if element.get('src') and not '//' in element.get( 'src') and not 'static/' in element.get('src'): element.set( 'src', "/%s/static/description/%s" % (module.name, element.get('src'))) module.description_html = tools.html_sanitize( lxml.html.tostring(html)) else: overrides = { 'embed_stylesheet': False, 'doctitle_xform': False, 'output_encoding': 'unicode', 'xml_declaration': False, 'file_insertion_enabled': False, } output = publish_string( source=module.description if not module.application and module.description else '', settings_overrides=overrides, writer=MyWriter()) module.description_html = tools.html_sanitize(output) @api.depends('name') def _get_latest_version(self): default_version = modules.adapt_version('1.0') for module in self: module.installed_version = self.get_module_info(module.name).get( 'version', default_version) @api.depends('name', 'state') def _get_views(self): IrModelData = self.env['ir.model.data'].with_context(active_test=True) dmodels = ['ir.ui.view', 'ir.actions.report', 'ir.ui.menu'] for module in self: # Skip uninstalled modules below, no data to find anyway. if module.state not in ('installed', 'to upgrade', 'to remove'): module.views_by_module = "" module.reports_by_module = "" module.menus_by_module = "" continue # then, search and group ir.model.data records imd_models = defaultdict(list) imd_domain = [('module', '=', module.name), ('model', 'in', tuple(dmodels))] for data in IrModelData.sudo().search(imd_domain): imd_models[data.model].append(data.res_id) def browse(model): # as this method is called before the module update, some xmlid # may be invalid at this stage; explictly filter records before # reading them return self.env[model].browse(imd_models[model]).exists() def format_view(v): return '%s%s (%s)' % (v.inherit_id and '* INHERIT ' or '', v.name, v.type) module.views_by_module = "\n".join( sorted(format_view(v) for v in browse('ir.ui.view'))) module.reports_by_module = "\n".join( sorted(r.name for r in browse('ir.actions.report'))) module.menus_by_module = "\n".join( sorted(m.complete_name for m in browse('ir.ui.menu'))) @api.depends('icon') def _get_icon_image(self): for module in self: module.icon_image = '' if module.icon: path_parts = module.icon.split('/') path = modules.get_module_resource(path_parts[1], *path_parts[2:]) else: path = modules.module.get_module_icon(module.name) if path: with tools.file_open(path, 'rb') as image_file: module.icon_image = base64.b64encode(image_file.read()) name = fields.Char('Technical Name', readonly=True, required=True, index=True) category_id = fields.Many2one('ir.module.category', string='Category', readonly=True, index=True) shortdesc = fields.Char('Module Name', readonly=True, translate=True) summary = fields.Char('Summary', readonly=True, translate=True) description = fields.Text('Description', readonly=True, translate=True) description_html = fields.Html('Description HTML', compute='_get_desc') author = fields.Char("Author", readonly=True) maintainer = fields.Char('Maintainer', readonly=True) contributors = fields.Text('Contributors', readonly=True) website = fields.Char("Website", readonly=True) # attention: Incorrect field names !! # installed_version refers the latest version (the one on disk) # latest_version refers the installed version (the one in database) # published_version refers the version available on the repository installed_version = fields.Char('Latest Version', compute='_get_latest_version') latest_version = fields.Char('Installed Version', readonly=True) published_version = fields.Char('Published Version', readonly=True) url = fields.Char('URL', readonly=True) sequence = fields.Integer('Sequence', default=100) dependencies_id = fields.One2many('ir.module.module.dependency', 'module_id', string='Dependencies', readonly=True) exclusion_ids = fields.One2many('ir.module.module.exclusion', 'module_id', string='Exclusions', readonly=True) auto_install = fields.Boolean( 'Automatic Installation', help='An auto-installable module is automatically installed by the ' 'system when all its dependencies are satisfied. ' 'If the module has no dependency, it is always installed.') state = fields.Selection(STATES, string='Status', default='uninstallable', readonly=True, index=True) demo = fields.Boolean('Demo Data', default=False, readonly=True) license = fields.Selection( [('GPL-2', 'GPL Version 2'), ('GPL-2 or any later version', 'GPL-2 or later version'), ('GPL-3', 'GPL Version 3'), ('GPL-3 or any later version', 'GPL-3 or later version'), ('AGPL-3', 'Affero GPL-3'), ('LGPL-3', 'LGPL Version 3'), ('Other OSI approved licence', 'Other OSI Approved License'), ('OEEL-1', 'Coffice Enterprise Edition License v1.0'), ('OPL-1', 'Coffice Proprietary License v1.0'), ('Other proprietary', 'Other Proprietary')], string='License', default='LGPL-3', readonly=True) menus_by_module = fields.Text(string='Menus', compute='_get_views', store=True) reports_by_module = fields.Text(string='Reports', compute='_get_views', store=True) views_by_module = fields.Text(string='Views', compute='_get_views', store=True) application = fields.Boolean('Application', readonly=True) icon = fields.Char('Icon URL') icon_image = fields.Binary(string='Icon', compute='_get_icon_image') to_buy = fields.Boolean('Coffice Enterprise Module', default=False) _sql_constraints = [ ('name_uniq', 'UNIQUE (name)', 'The name of the module must be unique!'), ] def unlink(self): if not self: return True for module in self: if module.state in ('installed', 'to upgrade', 'to remove', 'to install'): raise UserError( _('You are trying to remove a module that is installed or will be installed.' )) self.clear_caches() return super(Module, self).unlink() @staticmethod def _check_python_external_dependency(pydep): try: pkg_resources.get_distribution(pydep) except pkg_resources.DistributionNotFound as e: try: importlib.import_module(pydep) _logger.warning( "python external dependency %s should be replaced by it's PyPI package name", pydep) except ImportError: # backward compatibility attempt failed _logger.warning("DistributionNotFound: %s", e) raise Exception('Python library not installed: %s' % (pydep, )) except pkg_resources.VersionConflict as e: _logger.warning("VersionConflict: %s", e) raise Exception('Python library version conflict: %s' % (pydep, )) except Exception as e: _logger.warning("get_distribution(%s) failed: %s", pydep, e) raise Exception('Error finding python library %s' % (pydep, )) @staticmethod def _check_external_dependencies(terp): depends = terp.get('external_dependencies') if not depends: return for pydep in depends.get('python', []): Module._check_python_external_dependency(pydep) for binary in depends.get('bin', []): try: tools.find_in_path(binary) except IOError: raise Exception('Unable to find %r in path' % (binary, )) @classmethod def check_external_dependencies(cls, module_name, newstate='to install'): terp = cls.get_module_info(module_name) try: cls._check_external_dependencies(terp) except Exception as e: if newstate == 'to install': msg = _( 'Unable to install module "%s" because an external dependency is not met: %s' ) elif newstate == 'to upgrade': msg = _( 'Unable to upgrade module "%s" because an external dependency is not met: %s' ) else: msg = _( 'Unable to process module "%s" because an external dependency is not met: %s' ) raise UserError(msg % (module_name, e.args[0])) def _state_update(self, newstate, states_to_update, level=100): if level < 1: raise UserError(_('Recursion error in modules dependencies !')) # whether some modules are installed with demo data demo = False for module in self: # determine dependency modules to update/others update_mods, ready_mods = self.browse(), self.browse() for dep in module.dependencies_id: if dep.state == 'unknown': raise UserError( _("You try to install module '%s' that depends on module '%s'.\nBut the latter module is not available in your system." ) % ( module.name, dep.name, )) if dep.depend_id.state == newstate: ready_mods += dep.depend_id else: update_mods += dep.depend_id # update dependency modules that require it, and determine demo for module update_demo = update_mods._state_update(newstate, states_to_update, level=level - 1) module_demo = module.demo or update_demo or any( mod.demo for mod in ready_mods) demo = demo or module_demo # check dependencies and update module itself self.check_external_dependencies(module.name, newstate) if module.state in states_to_update: module.write({'state': newstate, 'demo': module_demo}) return demo @assert_log_admin_access def button_install(self): # domain to select auto-installable (but not yet installed) modules auto_domain = [('state', '=', 'uninstalled'), ('auto_install', '=', True)] # determine whether an auto-install module must be installed: # - all its dependencies are installed or to be installed, # - at least one dependency is 'to install' install_states = frozenset(('installed', 'to install', 'to upgrade')) def must_install(module): states = { dep.state for dep in module.dependencies_id if dep.auto_install_required } return states <= install_states and 'to install' in states modules = self while modules: # Mark the given modules and their dependencies to be installed. modules._state_update('to install', ['uninstalled']) # Determine which auto-installable modules must be installed. modules = self.search(auto_domain).filtered(must_install) # the modules that are installed/to install/to upgrade install_mods = self.search([('state', 'in', list(install_states))]) # check individual exclusions install_names = {module.name for module in install_mods} for module in install_mods: for exclusion in module.exclusion_ids: if exclusion.name in install_names: msg = _('Modules "%s" and "%s" are incompatible.') raise UserError( msg % (module.shortdesc, exclusion.exclusion_id.shortdesc)) # check category exclusions def closure(module): todo = result = module while todo: result |= todo todo = todo.dependencies_id.depend_id return result exclusives = self.env['ir.module.category'].search([('exclusive', '=', True)]) for category in exclusives: # retrieve installed modules in category and sub-categories categories = category.search([('id', 'child_of', category.ids)]) modules = install_mods.filtered( lambda mod: mod.category_id in categories) # the installation is valid if all installed modules in categories # belong to the transitive dependencies of one of them if modules and not any(modules <= closure(module) for module in modules): msg = _( 'You are trying to install incompatible modules in category "%s":' ) labels = dict(self.fields_get(['state'])['state']['selection']) raise UserError("\n".join([msg % category.name] + [ "- %s (%s)" % (module.shortdesc, labels[module.state]) for module in modules ])) return dict(ACTION_DICT, name=_('Install')) @assert_log_admin_access def button_immediate_install(self): """ Installs the selected module(s) immediately and fully, returns the next res.config action to execute :returns: next res.config item to execute :rtype: dict[str, object] """ _logger.info('User #%d triggered module installation', self.env.uid) # We use here the request object (which is thread-local) as a kind of # "global" env because the env is not usable in the following use case. # When installing a Chart of Account, I would like to send the # allowed companies to configure it on the correct company. # Otherwise, the SUPERUSER won't be aware of that and will try to # configure the CoA on his own company, which makes no sense. if request: request.allowed_company_ids = self.env.companies.ids return self._button_immediate_function(type(self).button_install) @assert_log_admin_access def button_install_cancel(self): self.write({'state': 'uninstalled', 'demo': False}) return True @assert_log_admin_access def module_uninstall(self): """ Perform the various steps required to uninstall a module completely including the deletion of all database structures created by the module: tables, columns, constraints, etc. """ modules_to_remove = self.mapped('name') self.env['ir.model.data']._module_data_uninstall(modules_to_remove) # we deactivate prefetching to not try to read a column that has been deleted self.with_context(prefetch_fields=False).write({ 'state': 'uninstalled', 'latest_version': False }) return True def _remove_copied_views(self): """ Remove the copies of the views installed by the modules in `self`. Those copies do not have an external id so they will not be cleaned by `_module_data_uninstall`. This is why we rely on `key` instead. It is important to remove these copies because using them will crash if they rely on data that don't exist anymore if the module is removed. """ domain = expression.OR([[('key', '=like', m.name + '.%')] for m in self]) orphans = self.env['ir.ui.view'].with_context( **{ 'active_test': False, MODULE_UNINSTALL_FLAG: True }).search(domain) orphans.unlink() @api.returns('self') def downstream_dependencies(self, known_deps=None, exclude_states=('uninstalled', 'uninstallable', 'to remove')): """ Return the modules that directly or indirectly depend on the modules in `self`, and that satisfy the `exclude_states` filter. """ if not self: return self known_deps = known_deps or self.browse() query = """ SELECT DISTINCT m.id FROM ir_module_module_dependency d JOIN ir_module_module m ON (d.module_id=m.id) WHERE d.name IN (SELECT name from ir_module_module where id in %s) AND m.state NOT IN %s AND m.id NOT IN %s """ self._cr.execute(query, (tuple(self.ids), tuple(exclude_states), tuple(known_deps.ids or self.ids))) new_deps = self.browse([row[0] for row in self._cr.fetchall()]) missing_mods = new_deps - known_deps known_deps |= new_deps if missing_mods: known_deps |= missing_mods.downstream_dependencies( known_deps, exclude_states) return known_deps @api.returns('self') def upstream_dependencies(self, known_deps=None, exclude_states=('installed', 'uninstallable', 'to remove')): """ Return the dependency tree of modules of the modules in `self`, and that satisfy the `exclude_states` filter. """ if not self: return self known_deps = known_deps or self.browse() query = """ SELECT DISTINCT m.id FROM ir_module_module_dependency d JOIN ir_module_module m ON (d.module_id=m.id) WHERE m.name IN (SELECT name from ir_module_module_dependency where module_id in %s) AND m.state NOT IN %s AND m.id NOT IN %s """ self._cr.execute(query, (tuple(self.ids), tuple(exclude_states), tuple(known_deps.ids or self.ids))) new_deps = self.browse([row[0] for row in self._cr.fetchall()]) missing_mods = new_deps - known_deps known_deps |= new_deps if missing_mods: known_deps |= missing_mods.upstream_dependencies( known_deps, exclude_states) return known_deps def next(self): """ Return the action linked to an ir.actions.todo is there exists one that should be executed. Otherwise, redirect to /web """ Todos = self.env['ir.actions.todo'] _logger.info('getting next %s', Todos) active_todo = Todos.search([('state', '=', 'open')], limit=1) if active_todo: _logger.info('next action is "%s"', active_todo.name) return active_todo.action_launch() return { 'type': 'ir.actions.act_url', 'target': 'self', 'url': '/web', } def _button_immediate_function(self, function): try: # This is done because the installation/uninstallation/upgrade can modify a currently # running cron job and prevent it from finishing, and since the ir_cron table is locked # during execution, the lock won't be released until timeout. self._cr.execute("SELECT * FROM ir_cron FOR UPDATE NOWAIT") except psycopg2.OperationalError: raise UserError( _("The server is busy right now, module operations are not possible at" " this time, please try again later.")) function(self) self._cr.commit() api.Environment.reset() modules.registry.Registry.new(self._cr.dbname, update_module=True) self._cr.commit() env = api.Environment(self._cr, self._uid, self._context) # pylint: disable=next-method-called config = env['ir.module.module'].next() or {} if config.get('type') not in ('ir.actions.act_window_close', ): return config # reload the client; open the first available root menu menu = env['ir.ui.menu'].search([('parent_id', '=', False)])[:1] return { 'type': 'ir.actions.client', 'tag': 'reload', 'params': { 'menu_id': menu.id }, } @assert_log_admin_access def button_immediate_uninstall(self): """ Uninstall the selected module(s) immediately and fully, returns the next res.config action to execute """ _logger.info('User #%d triggered module uninstallation', self.env.uid) return self._button_immediate_function(type(self).button_uninstall) @assert_log_admin_access def button_uninstall(self): if 'base' in self.mapped('name'): raise UserError(_("The `base` module cannot be uninstalled")) deps = self.downstream_dependencies() (self + deps).write({'state': 'to remove'}) return dict(ACTION_DICT, name=_('Uninstall')) @assert_log_admin_access def button_uninstall_wizard(self): """ Launch the wizard to uninstall the given module. """ return { 'type': 'ir.actions.act_window', 'target': 'new', 'name': _('Uninstall module'), 'view_mode': 'form', 'res_model': 'base.module.uninstall', 'context': { 'default_module_id': self.id }, } def button_uninstall_cancel(self): self.write({'state': 'installed'}) return True @assert_log_admin_access def button_immediate_upgrade(self): """ Upgrade the selected module(s) immediately and fully, return the next res.config action to execute """ return self._button_immediate_function(type(self).button_upgrade) @assert_log_admin_access def button_upgrade(self): Dependency = self.env['ir.module.module.dependency'] self.update_list() todo = list(self) i = 0 while i < len(todo): module = todo[i] i += 1 if module.state not in ('installed', 'to upgrade'): raise UserError( _("Can not upgrade module '%s'. It is not installed.") % (module.name, )) self.check_external_dependencies(module.name, 'to upgrade') for dep in Dependency.search([('name', '=', module.name)]): if dep.module_id.state == 'installed' and dep.module_id not in todo: todo.append(dep.module_id) self.browse(module.id for module in todo).write({'state': 'to upgrade'}) to_install = [] for module in todo: for dep in module.dependencies_id: if dep.state == 'unknown': raise UserError( _('You try to upgrade the module %s that depends on the module: %s.\nBut this module is not available in your system.' ) % ( module.name, dep.name, )) if dep.state == 'uninstalled': to_install += self.search([('name', '=', dep.name)]).ids self.browse(to_install).button_install() return dict(ACTION_DICT, name=_('Apply Schedule Upgrade')) @assert_log_admin_access def button_upgrade_cancel(self): self.write({'state': 'installed'}) return True @staticmethod def get_values_from_terp(terp): return { 'description': terp.get('description', ''), 'shortdesc': terp.get('name', ''), 'author': terp.get('author', 'Unknown'), 'maintainer': terp.get('maintainer', False), 'contributors': ', '.join(terp.get('contributors', [])) or False, 'website': terp.get('website', ''), 'license': terp.get('license', 'LGPL-3'), 'sequence': terp.get('sequence', 100), 'application': terp.get('application', False), 'auto_install': terp.get('auto_install', False) is not False, 'icon': terp.get('icon', False), 'summary': terp.get('summary', ''), 'url': terp.get('url') or terp.get('live_test_url', ''), 'to_buy': False } @api.model def create(self, vals): new = super(Module, self).create(vals) module_metadata = { 'name': 'module_%s' % vals['name'], 'model': 'ir.module.module', 'module': 'base', 'res_id': new.id, 'noupdate': True, } self.env['ir.model.data'].create(module_metadata) return new # update the list of available packages @assert_log_admin_access @api.model def update_list(self): res = [0, 0] # [update, add] default_version = modules.adapt_version('1.0') known_mods = self.with_context(lang=None).search([]) known_mods_names = {mod.name: mod for mod in known_mods} # iterate through detected modules and update/create them in db for mod_name in modules.get_modules(): mod = known_mods_names.get(mod_name) terp = self.get_module_info(mod_name) values = self.get_values_from_terp(terp) if mod: updated_values = {} for key in values: old = getattr(mod, key) if (old or values[key]) and values[key] != old: updated_values[key] = values[key] if terp.get('installable', True) and mod.state == 'uninstallable': updated_values['state'] = 'uninstalled' if parse_version(terp.get( 'version', default_version)) > parse_version( mod.latest_version or default_version): res[0] += 1 if updated_values: mod.write(updated_values) else: mod_path = modules.get_module_path(mod_name) if not mod_path or not terp: continue state = "uninstalled" if terp.get('installable', True) else "uninstallable" mod = self.create(dict(name=mod_name, state=state, **values)) res[1] += 1 mod._update_dependencies(terp.get('depends', []), terp.get('auto_install')) mod._update_exclusions(terp.get('excludes', [])) mod._update_category(terp.get('category', 'Uncategorized')) return res @assert_log_admin_access def download(self, download=True): return [] @assert_log_admin_access @api.model def install_from_urls(self, urls): if not self.env.user.has_group('base.group_system'): raise AccessDenied() # One-click install is opt-in - cfr Issue #15225 ad_dir = tools.config.addons_data_dir if not os.access(ad_dir, os.W_OK): msg = (_( "Automatic install of downloaded Apps is currently disabled." ) + "\n\n" + _( "To enable it, make sure this directory exists and is writable on the server:" ) + "\n%s" % ad_dir) _logger.warning(msg) raise UserError(msg) apps_server = urls.url_parse(self.get_apps_server()) OPENERP = coffice.release.product_name.lower() tmp = tempfile.mkdtemp() _logger.debug('Install from url: %r', urls) try: # 1. Download & unzip missing modules for module_name, url in urls.items(): if not url: continue # nothing to download, local version is already the last one up = urls.url_parse(url) if up.scheme != apps_server.scheme or up.netloc != apps_server.netloc: raise AccessDenied() try: _logger.info('Downloading module `%s` from OpenERP Apps', module_name) response = requests.get(url) response.raise_for_status() content = response.content except Exception: _logger.exception('Failed to fetch module %s', module_name) raise UserError( _('The `%s` module appears to be unavailable at the moment, please try again later.' ) % module_name) else: zipfile.ZipFile(io.BytesIO(content)).extractall(tmp) assert os.path.isdir(os.path.join(tmp, module_name)) # 2a. Copy/Replace module source in addons path for module_name, url in urls.items(): if module_name == OPENERP or not url: continue # OPENERP is special case, handled below, and no URL means local module module_path = modules.get_module_path(module_name, downloaded=True, display_warning=False) bck = backup(module_path, False) _logger.info('Copy downloaded module `%s` to `%s`', module_name, module_path) shutil.move(os.path.join(tmp, module_name), module_path) if bck: shutil.rmtree(bck) # 2b. Copy/Replace server+base module source if downloaded if urls.get(OPENERP): # special case. it contains the server and the base module. # extract path is not the same base_path = os.path.dirname(modules.get_module_path('base')) # copy all modules in the SERVER/coffice/addons directory to the new "coffice" module (except base itself) for d in os.listdir(base_path): if d != 'base' and os.path.isdir(os.path.join( base_path, d)): destdir = os.path.join( tmp, OPENERP, 'addons', d) # XXX 'coffice' subdirectory ? shutil.copytree(os.path.join(base_path, d), destdir) # then replace the server by the new "base" module server_dir = tools.config['root_path'] # XXX or dirname() bck = backup(server_dir) _logger.info('Copy downloaded module `coffice` to `%s`', server_dir) shutil.move(os.path.join(tmp, OPENERP), server_dir) #if bck: # shutil.rmtree(bck) self.update_list() with_urls = [ module_name for module_name, url in urls.items() if url ] downloaded = self.search([('name', 'in', with_urls)]) installed = self.search([('id', 'in', downloaded.ids), ('state', '=', 'installed')]) to_install = self.search([('name', 'in', list(urls)), ('state', '=', 'uninstalled')]) post_install_action = to_install.button_immediate_install() if installed or to_install: # in this case, force server restart to reload python code... self._cr.commit() coffice.service.server.restart() return { 'type': 'ir.actions.client', 'tag': 'home', 'params': { 'wait': True }, } return post_install_action finally: shutil.rmtree(tmp) @api.model def get_apps_server(self): return tools.config.get('apps_server', 'https://apps.coffice.com/apps') def _update_dependencies(self, depends=None, auto_install_requirements=()): existing = set(dep.name for dep in self.dependencies_id) needed = set(depends or []) for dep in (needed - existing): self._cr.execute( 'INSERT INTO ir_module_module_dependency (module_id, name) values (%s, %s)', (self.id, dep)) for dep in (existing - needed): self._cr.execute( 'DELETE FROM ir_module_module_dependency WHERE module_id = %s and name = %s', (self.id, dep)) self._cr.execute( 'UPDATE ir_module_module_dependency SET auto_install_required = (name = any(%s)) WHERE module_id = %s', (list(auto_install_requirements or ()), self.id)) self.invalidate_cache(['dependencies_id'], self.ids) def _update_exclusions(self, excludes=None): existing = set(excl.name for excl in self.exclusion_ids) needed = set(excludes or []) for name in (needed - existing): self._cr.execute( 'INSERT INTO ir_module_module_exclusion (module_id, name) VALUES (%s, %s)', (self.id, name)) for name in (existing - needed): self._cr.execute( 'DELETE FROM ir_module_module_exclusion WHERE module_id=%s AND name=%s', (self.id, name)) self.invalidate_cache(['exclusion_ids'], self.ids) def _update_category(self, category='Uncategorized'): current_category = self.category_id current_category_path = [] while current_category: current_category_path.insert(0, current_category.name) current_category = current_category.parent_id categs = category.split('/') if categs != current_category_path: cat_id = modules.db.create_categories(self._cr, categs) self.write({'category_id': cat_id}) def _update_translations(self, filter_lang=None): if not filter_lang: langs = self.env['res.lang'].get_installed() filter_lang = [code for code, _ in langs] elif not isinstance(filter_lang, (list, tuple)): filter_lang = [filter_lang] update_mods = self.filtered(lambda r: r.state in ('installed', 'to install', 'to upgrade')) mod_dict = { mod.name: mod.dependencies_id.mapped('name') for mod in update_mods } mod_names = topological_sort(mod_dict) self.env['ir.translation']._load_module_terms(mod_names, filter_lang) def _check(self): for module in self: if not module.description_html: _logger.warning('module %s: description is empty !', module.name) @api.model @tools.ormcache() def _installed(self): """ Return the set of installed modules as a dictionary {name: id} """ return { module.name: module.id for module in self.sudo().search([('state', '=', 'installed')]) }
class BaseDocumentLayout(models.TransientModel): """ Customise the company document layout and display a live preview """ _name = 'base.document.layout' _description = 'Company Document Layout' company_id = fields.Many2one('res.company', default=lambda self: self.env.company, required=True) logo = fields.Binary(related='company_id.logo', readonly=False) preview_logo = fields.Binary(related='logo', string="Preview logo") report_header = fields.Text(related='company_id.report_header', readonly=False) report_footer = fields.Text(related='company_id.report_footer', readonly=False) paperformat_id = fields.Many2one(related='company_id.paperformat_id', readonly=False) external_report_layout_id = fields.Many2one( related='company_id.external_report_layout_id', readonly=False) font = fields.Selection(related='company_id.font', readonly=False) primary_color = fields.Char(related='company_id.primary_color', readonly=False) secondary_color = fields.Char(related='company_id.secondary_color', readonly=False) custom_colors = fields.Boolean(compute="_compute_custom_colors", readonly=False) logo_primary_color = fields.Char(compute="_compute_logo_colors") logo_secondary_color = fields.Char(compute="_compute_logo_colors") report_layout_id = fields.Many2one('report.layout') preview = fields.Html(compute='_compute_preview') @api.depends('logo_primary_color', 'logo_secondary_color', 'primary_color', 'secondary_color') def _compute_custom_colors(self): for wizard in self: logo_primary = wizard.logo_primary_color or '' logo_secondary = wizard.logo_secondary_color or '' # Force lower case on color to ensure that FF01AA == ff01aa wizard.custom_colors = ( wizard.logo and wizard.primary_color and wizard.secondary_color and not (wizard.primary_color.lower() == logo_primary.lower() and wizard.secondary_color.lower() == logo_secondary.lower())) @api.depends('logo') def _compute_logo_colors(self): for wizard in self: if wizard._context.get('bin_size'): wizard_for_image = wizard.with_context(bin_size=False) else: wizard_for_image = wizard wizard.logo_primary_color, wizard.logo_secondary_color = wizard_for_image._parse_logo_colors( ) @api.depends('report_layout_id', 'logo', 'font', 'primary_color', 'secondary_color') def _compute_preview(self): """ compute a qweb based preview to display on the wizard """ for wizard in self: if wizard.report_layout_id: ir_qweb = wizard.env['ir.qweb'] wizard.preview = ir_qweb.render('base.layout_preview', {'company': wizard}) else: wizard.preview = False @api.onchange('company_id') def _onchange_company_id(self): for wizard in self: wizard.logo = wizard.company_id.logo wizard.report_header = wizard.company_id.report_header wizard.report_footer = wizard.company_id.report_footer wizard.paperformat_id = wizard.company_id.paperformat_id wizard.external_report_layout_id = wizard.company_id.external_report_layout_id wizard.font = wizard.company_id.font wizard.primary_color = wizard.company_id.primary_color wizard.secondary_color = wizard.company_id.secondary_color wizard_layout = wizard.env["report.layout"].search([ ('view_id.key', '=', wizard.company_id.external_report_layout_id.key) ]) wizard.report_layout_id = wizard_layout or wizard_layout.search( [], limit=1) if not wizard.primary_color: wizard.primary_color = wizard.logo_primary_color or DEFAULT_PRIMARY if not wizard.secondary_color: wizard.secondary_color = wizard.logo_secondary_color or DEFAULT_SECONDARY @api.onchange('custom_colors') def _onchange_custom_colors(self): for wizard in self: if wizard.logo and not wizard.custom_colors: wizard.primary_color = wizard.logo_primary_color or DEFAULT_PRIMARY wizard.secondary_color = wizard.logo_secondary_color or DEFAULT_SECONDARY @api.onchange('report_layout_id') def _onchange_report_layout_id(self): for wizard in self: wizard.external_report_layout_id = wizard.report_layout_id.view_id @api.onchange('logo') def _onchange_logo(self): for wizard in self: # It is admitted that if the user puts the original image back, it won't change colors company = wizard.company_id # at that point wizard.logo has been assigned the value present in DB if wizard.logo == company.logo and company.primary_color and company.secondary_color: continue if wizard.logo_primary_color: wizard.primary_color = wizard.logo_primary_color if wizard.logo_secondary_color: wizard.secondary_color = wizard.logo_secondary_color def _parse_logo_colors(self, logo=None, white_threshold=225): """ Identifies dominant colors First resizes the original image to improve performance, then discards transparent colors and white-ish colors, then calls the averaging method twice to evaluate both primary and secondary colors. :param logo: alternate logo to process :param white_threshold: arbitrary value defining the maximum value a color can reach :return colors: hex values of primary and secondary colors """ self.ensure_one() logo = logo or self.logo if not logo: return False, False # The "===" gives different base64 encoding a correct padding logo += b'===' if type(logo) == bytes else '===' try: # Catches exceptions caused by logo not being an image image = tools.image_fix_orientation(tools.base64_to_image(logo)) except Exception: return False, False base_w, base_h = image.size w = int(50 * base_w / base_h) h = 50 # Converts to RGBA if no alpha detected image_converted = image.convert( 'RGBA') if 'A' not in image.getbands() else image image_resized = image_converted.resize((w, h)) colors = [] for color in image_resized.getcolors(w * h): if not (color[1][0] > white_threshold and color[1][1] > white_threshold and color[1][2] > white_threshold) and color[1][3] > 0: colors.append(color) if not colors: # May happen when the whole image is white return False, False primary, remaining = tools.average_dominant_color(colors) secondary = tools.average_dominant_color( remaining)[0] if len(remaining) > 0 else primary # Lightness and saturation are calculated here. # - If both colors have a similar lightness, the most colorful becomes primary # - When the difference in lightness is too great, the brightest color becomes primary l_primary = tools.get_lightness(primary) l_secondary = tools.get_lightness(secondary) if (l_primary < 0.2 and l_secondary < 0.2) or (l_primary >= 0.2 and l_secondary >= 0.2): s_primary = tools.get_saturation(primary) s_secondary = tools.get_saturation(secondary) if s_primary < s_secondary: primary, secondary = secondary, primary elif l_secondary > l_primary: primary, secondary = secondary, primary return tools.rgb_to_hex(primary), tools.rgb_to_hex(secondary) @api.model def action_open_base_document_layout(self, action_ref=None): if not action_ref: action_ref = 'base.action_base_document_layout_configurator' return self.env.ref(action_ref).read()[0] def document_layout_save(self): # meant to be overridden return self.env.context.get('report_action') or { 'type': 'ir.actions.act_window_close' }
class MrpWorkorder(models.Model): _name = 'mrp.workorder' _description = 'Work Order' _inherit = ['mail.thread', 'mail.activity.mixin', 'mrp.abstract.workorder'] def _read_group_workcenter_id(self, workcenters, domain, order): workcenter_ids = self.env.context.get('default_workcenter_id') if not workcenter_ids: workcenter_ids = workcenters._search( [], order=order, access_rights_uid=SUPERUSER_ID) return workcenters.browse(workcenter_ids) name = fields.Char('Work Order', required=True, states={ 'done': [('readonly', True)], 'cancel': [('readonly', True)] }) company_id = fields.Many2one('res.company', 'Company', default=lambda self: self.env.company, required=True, index=True, readonly=True) workcenter_id = fields.Many2one('mrp.workcenter', 'Work Center', required=True, states={ 'done': [('readonly', True)], 'cancel': [('readonly', True)] }, group_expand='_read_group_workcenter_id', check_company=True) working_state = fields.Selection('Workcenter Status', related='workcenter_id.working_state', readonly=False, help='Technical: used in views only') production_availability = fields.Selection( 'Stock Availability', readonly=True, related='production_id.reservation_state', store=True, help='Technical: used in views and domains only.') production_state = fields.Selection('Production State', readonly=True, related='production_id.state', help='Technical: used in views only.') qty_production = fields.Float('Original Production Quantity', readonly=True, related='production_id.product_qty') qty_remaining = fields.Float('Quantity To Be Produced', compute='_compute_qty_remaining', digits='Product Unit of Measure') qty_produced = fields.Float( 'Quantity', default=0.0, readonly=True, digits='Product Unit of Measure', help="The number of products already handled by this work order") is_produced = fields.Boolean(string="Has Been Produced", compute='_compute_is_produced') state = fields.Selection([('pending', 'Waiting for another WO'), ('ready', 'Ready'), ('progress', 'In Progress'), ('done', 'Finished'), ('cancel', 'Cancelled')], string='Status', default='pending') leave_id = fields.Many2one( 'resource.calendar.leaves', help='Slot into workcenter calendar once planned', check_company=True) date_planned_start = fields.Datetime('Scheduled Date Start', compute='_compute_dates_planned', inverse='_set_dates_planned', states={ 'done': [('readonly', True)], 'cancel': [('readonly', True)] }, store=True, tracking=True) date_planned_finished = fields.Datetime('Scheduled Date Finished', compute='_compute_dates_planned', inverse='_set_dates_planned', states={ 'done': [('readonly', True)], 'cancel': [('readonly', True)] }, store=True, tracking=True) date_start = fields.Datetime('Effective Start Date', states={ 'done': [('readonly', True)], 'cancel': [('readonly', True)] }) date_finished = fields.Datetime('Effective End Date', states={ 'done': [('readonly', True)], 'cancel': [('readonly', True)] }) duration_expected = fields.Float('Expected Duration', digits=(16, 2), states={ 'done': [('readonly', True)], 'cancel': [('readonly', True)] }, help="Expected duration (in minutes)") duration = fields.Float('Real Duration', compute='_compute_duration', readonly=True, store=True) duration_unit = fields.Float('Duration Per Unit', compute='_compute_duration', readonly=True, store=True) duration_percent = fields.Integer('Duration Deviation (%)', compute='_compute_duration', group_operator="avg", readonly=True, store=True) progress = fields.Float('Progress Done (%)', digits=(16, 2), compute='_compute_progress') operation_id = fields.Many2one('mrp.routing.workcenter', 'Operation', check_company=True) # Should be used differently as BoM can change in the meantime worksheet = fields.Binary('Worksheet', related='operation_id.worksheet', readonly=True) worksheet_type = fields.Selection('Worksheet Type', related='operation_id.worksheet_type', readonly=True) worksheet_google_slide = fields.Char( 'Worksheet URL', related='operation_id.worksheet_google_slide', readonly=True) move_raw_ids = fields.One2many('stock.move', 'workorder_id', 'Raw Moves', domain=[('raw_material_production_id', '!=', False), ('production_id', '=', False)]) move_finished_ids = fields.One2many('stock.move', 'workorder_id', 'Finished Moves', domain=[('raw_material_production_id', '=', False), ('production_id', '!=', False) ]) move_line_ids = fields.One2many( 'stock.move.line', 'workorder_id', 'Moves to Track', help= "Inventory moves for which you must scan a lot number at this work order" ) finished_lot_id = fields.Many2one( 'stock.production.lot', 'Lot/Serial Number', domain="[('id', 'in', allowed_lots_domain)]", states={ 'done': [('readonly', True)], 'cancel': [('readonly', True)] }, check_company=True) time_ids = fields.One2many('mrp.workcenter.productivity', 'workorder_id') is_user_working = fields.Boolean( 'Is the Current User Working', compute='_compute_working_users', help="Technical field indicating whether the current user is working. " ) working_user_ids = fields.One2many( 'res.users', string='Working user on this work order.', compute='_compute_working_users') last_working_user_id = fields.One2many( 'res.users', string='Last user that worked on this work order.', compute='_compute_working_users') next_work_order_id = fields.Many2one('mrp.workorder', "Next Work Order", check_company=True) scrap_ids = fields.One2many('stock.scrap', 'workorder_id') scrap_count = fields.Integer(compute='_compute_scrap_move_count', string='Scrap Move') production_date = fields.Datetime( 'Production Date', related='production_id.date_planned_start', store=True, readonly=False) color = fields.Integer('Color', compute='_compute_color') capacity = fields.Float( 'Capacity', default=1.0, help="Number of pieces that can be produced in parallel.") raw_workorder_line_ids = fields.One2many('mrp.workorder.line', 'raw_workorder_id', string='Components') finished_workorder_line_ids = fields.One2many('mrp.workorder.line', 'finished_workorder_id', string='By-products') allowed_lots_domain = fields.One2many( comodel_name='stock.production.lot', compute="_compute_allowed_lots_domain") # Both `date_planned_start` and `date_planned_finished` are related fields on `leave_id`. Let's say # we slide a workorder on a gantt view, a single call to write is made with both # fields Changes. As the ORM doesn't batch the write on related fields and instead # makes multiple call, the constraint check_dates() is raised. # That's why the compute and set methods are needed. to ensure the dates are updated # in the same time. @api.depends('leave_id') def _compute_dates_planned(self): for workorder in self: workorder.date_planned_start = workorder.leave_id.date_from workorder.date_planned_finished = workorder.leave_id.date_to def _set_dates_planned(self): date_from = self[0].date_planned_start date_to = self[0].date_planned_finished self.mapped('leave_id').write({ 'date_from': date_from, 'date_to': date_to, }) @api.onchange('finished_lot_id') def _onchange_finished_lot_id(self): """When the user changes the lot being currently produced, suggest a quantity to produce consistent with the previous workorders. """ previous_wo = self.env['mrp.workorder'].search([('next_work_order_id', '=', self.id)]) if previous_wo: line = previous_wo.finished_workorder_line_ids.filtered( lambda line: line.product_id == self.product_id and line.lot_id == self.finished_lot_id) if line: self.qty_producing = line.qty_done @api.onchange('date_planned_finished') def _onchange_date_planned_finished(self): if self.date_planned_start and self.date_planned_finished: diff = self.date_planned_finished - self.date_planned_start self.duration_expected = diff.total_seconds() / 60 @api.depends( 'production_id.workorder_ids.finished_workorder_line_ids', 'production_id.workorder_ids.finished_workorder_line_ids.qty_done', 'production_id.workorder_ids.finished_workorder_line_ids.lot_id') def _compute_allowed_lots_domain(self): """ Check if all the finished products has been assigned to a serial number or a lot in other workorders. If yes, restrict the selectable lot to the lot/sn used in other workorders. """ productions = self.mapped('production_id') treated = self.browse() for production in productions: if production.product_id.tracking == 'none': continue rounding = production.product_uom_id.rounding finished_workorder_lines = production.workorder_ids.mapped( 'finished_workorder_line_ids').filtered( lambda wl: wl.product_id == production.product_id) qties_done_per_lot = defaultdict(list) for finished_workorder_line in finished_workorder_lines: # It is possible to have finished workorder lines without a lot (eg using the dummy # test type). Ignore them when computing the allowed lots. if finished_workorder_line.lot_id: qties_done_per_lot[ finished_workorder_line.lot_id.id].append( finished_workorder_line.qty_done) qty_to_produce = production.product_qty allowed_lot_ids = self.env['stock.production.lot'] qty_produced = sum( [max(qty_dones) for qty_dones in qties_done_per_lot.values()]) if float_compare(qty_produced, qty_to_produce, precision_rounding=rounding) < 0: # If we haven't produced enough, all lots are available allowed_lot_ids = self.env['stock.production.lot'].search([ ('product_id', '=', production.product_id.id), ('company_id', '=', production.company_id.id), ]) else: # If we produced enough, only the already produced lots are available allowed_lot_ids = self.env['stock.production.lot'].browse( qties_done_per_lot.keys()) workorders = production.workorder_ids.filtered( lambda wo: wo.state not in ('done', 'cancel')) for workorder in workorders: if workorder.product_tracking == 'serial': workorder.allowed_lots_domain = allowed_lot_ids - workorder.finished_workorder_line_ids.filtered( lambda wl: wl.product_id == production.product_id ).mapped('lot_id') else: workorder.allowed_lots_domain = allowed_lot_ids treated |= workorder (self - treated).allowed_lots_domain = False def name_get(self): return [(wo.id, "%s - %s - %s" % (wo.production_id.name, wo.product_id.name, wo.name)) for wo in self] def unlink(self): # Removes references to workorder to avoid Validation Error (self.mapped('move_raw_ids') | self.mapped('move_finished_ids')).write( {'workorder_id': False}) self.mapped('leave_id').unlink() return super(MrpWorkorder, self).unlink() @api.depends('production_id.product_qty', 'qty_produced') def _compute_is_produced(self): self.is_produced = False for order in self.filtered(lambda p: p.production_id): rounding = order.production_id.product_uom_id.rounding order.is_produced = float_compare(order.qty_produced, order.production_id.product_qty, precision_rounding=rounding) >= 0 @api.depends('time_ids.duration', 'qty_produced') def _compute_duration(self): for order in self: order.duration = sum(order.time_ids.mapped('duration')) order.duration_unit = round(order.duration / max(order.qty_produced, 1), 2) # rounding 2 because it is a time if order.duration_expected: order.duration_percent = 100 * ( order.duration_expected - order.duration) / order.duration_expected else: order.duration_percent = 0 @api.depends('duration', 'duration_expected', 'state') def _compute_progress(self): for order in self: if order.state == 'done': order.progress = 100 elif order.duration_expected: order.progress = order.duration * 100 / order.duration_expected else: order.progress = 0 def _compute_working_users(self): """ Checks whether the current user is working, all the users currently working and the last user that worked. """ for order in self: order.working_user_ids = [ (4, order.id) for order in order.time_ids.filtered( lambda time: not time.date_end).sorted( 'date_start').mapped('user_id') ] if order.working_user_ids: order.last_working_user_id = order.working_user_ids[-1] elif order.time_ids: order.last_working_user_id = order.time_ids.sorted( 'date_end')[-1].user_id else: order.last_working_user_id = False if order.time_ids.filtered( lambda x: (x.user_id.id == self.env.user.id) and (not x.date_end) and (x.loss_type in ('productive', 'performance'))): order.is_user_working = True else: order.is_user_working = False def _compute_scrap_move_count(self): data = self.env['stock.scrap'].read_group( [('workorder_id', 'in', self.ids)], ['workorder_id'], ['workorder_id']) count_data = dict((item['workorder_id'][0], item['workorder_id_count']) for item in data) for workorder in self: workorder.scrap_count = count_data.get(workorder.id, 0) @api.depends('date_planned_finished', 'production_id.date_planned_finished') def _compute_color(self): late_orders = self.filtered( lambda x: x.production_id.date_planned_finished and x. date_planned_finished and x.date_planned_finished > x.production_id .date_planned_finished) for order in late_orders: order.color = 4 for order in (self - late_orders): order.color = 2 @api.onchange('date_planned_start', 'duration_expected') def _onchange_date_planned_start(self): if self.date_planned_start and self.duration_expected: self.date_planned_finished = self.date_planned_start + relativedelta( minutes=self.duration_expected) def write(self, values): if 'production_id' in values: raise UserError( _('You cannot link this work order to another manufacturing order.' )) if 'workcenter_id' in values: for workorder in self: if workorder.workcenter_id.id != values['workcenter_id']: if workorder.state in ('progress', 'done', 'cancel'): raise UserError( _('You cannot change the workcenter of a work order that is in progress or done.' )) workorder.leave_id.resource_id = self.env[ 'mrp.workcenter'].browse( values['workcenter_id']).resource_id if list(values.keys()) != ['time_ids'] and any( workorder.state == 'done' for workorder in self): raise UserError(_('You can not change the finished work order.')) if 'date_planned_start' in values or 'date_planned_finished' in values: for workorder in self: start_date = fields.Datetime.to_datetime( values.get( 'date_planned_start')) or workorder.date_planned_start end_date = fields.Datetime.to_datetime( values.get('date_planned_finished') ) or workorder.date_planned_finished if start_date and end_date and start_date > end_date: raise UserError( _('The planned end date of the work order cannot be prior to the planned start date, please correct this to save the work order.' )) # Update MO dates if the start date of the first WO or the # finished date of the last WO is update. if workorder == workorder.production_id.workorder_ids[ 0] and 'date_planned_start' in values: workorder.production_id.with_context( force_date=True).write({ 'date_planned_start': fields.Datetime.to_datetime( values['date_planned_start']) }) if workorder == workorder.production_id.workorder_ids[ -1] and 'date_planned_finished' in values: workorder.production_id.with_context( force_date=True).write({ 'date_planned_finished': fields.Datetime.to_datetime( values['date_planned_finished']) }) return super(MrpWorkorder, self).write(values) def _generate_wo_lines(self): """ Generate workorder line """ self.ensure_one() moves = (self.move_raw_ids | self.move_finished_ids ).filtered(lambda move: move.state not in ('done', 'cancel')) for move in moves: qty_to_consume = self._prepare_component_quantity( move, self.qty_producing) line_values = self._generate_lines_values(move, qty_to_consume) self.env['mrp.workorder.line'].create(line_values) def _apply_update_workorder_lines(self): """ update existing line on the workorder. It could be trigger manually after a modification of qty_producing. """ self.ensure_one() line_values = self._update_workorder_lines() self.env['mrp.workorder.line'].create(line_values['to_create']) if line_values['to_delete']: line_values['to_delete'].unlink() for line, vals in line_values['to_update'].items(): line.write(vals) def _refresh_wo_lines(self): """ Modify exisiting workorder line in order to match the reservation on stock move line. The strategy is to remove the line that were not processed yet then call _generate_lines_values that recreate workorder line depending the reservation. """ for workorder in self: raw_moves = workorder.move_raw_ids.filtered( lambda move: move.state not in ('done', 'cancel')) wl_to_unlink = self.env['mrp.workorder.line'] for move in raw_moves: rounding = move.product_uom.rounding qty_already_consumed = 0.0 workorder_lines = workorder.raw_workorder_line_ids.filtered( lambda w: w.move_id == move) for wl in workorder_lines: if not wl.qty_done: wl_to_unlink |= wl continue qty_already_consumed += wl.qty_done qty_to_consume = self._prepare_component_quantity( move, workorder.qty_producing) wl_to_unlink.unlink() if float_compare(qty_to_consume, qty_already_consumed, precision_rounding=rounding) > 0: line_values = workorder._generate_lines_values( move, qty_to_consume - qty_already_consumed) self.env['mrp.workorder.line'].create(line_values) def _defaults_from_finished_workorder_line(self, reference_lot_lines): for r_line in reference_lot_lines: # see which lot we could suggest and its related qty_producing if not r_line.lot_id: continue candidates = self.finished_workorder_line_ids.filtered( lambda line: line.lot_id == r_line.lot_id) rounding = self.product_uom_id.rounding if not candidates: self.write({ 'finished_lot_id': r_line.lot_id.id, 'qty_producing': r_line.qty_done, }) return True elif float_compare(candidates.qty_done, r_line.qty_done, precision_rounding=rounding) < 0: self.write({ 'finished_lot_id': r_line.lot_id.id, 'qty_producing': r_line.qty_done - candidates.qty_done, }) return True return False def record_production(self): if not self: return True self.ensure_one() self._check_company() if float_compare(self.qty_producing, 0, precision_rounding=self.product_uom_id.rounding) <= 0: raise UserError( _('Please set the quantity you are currently producing. It should be different from zero.' )) # If last work order, then post lots used if not self.next_work_order_id: self._update_finished_move() # Transfer quantities from temporary to final move line or make them final self._update_moves() # Transfer lot (if present) and quantity produced to a finished workorder line if self.product_tracking != 'none': self._create_or_update_finished_line() # Update workorder quantity produced self.qty_produced += self.qty_producing # Suggest a finished lot on the next workorder if self.next_work_order_id and self.product_tracking != 'none' and ( not self.next_work_order_id.finished_lot_id or self.next_work_order_id.finished_lot_id == self.finished_lot_id): self.next_work_order_id._defaults_from_finished_workorder_line( self.finished_workorder_line_ids) # As we may have changed the quantity to produce on the next workorder, # make sure to update its wokorder lines self.next_work_order_id._apply_update_workorder_lines() # One a piece is produced, you can launch the next work order self._start_nextworkorder() # Test if the production is done rounding = self.production_id.product_uom_id.rounding if float_compare(self.qty_produced, self.production_id.product_qty, precision_rounding=rounding) < 0: previous_wo = self.env['mrp.workorder'] if self.product_tracking != 'none': previous_wo = self.env['mrp.workorder'].search([ ('next_work_order_id', '=', self.id) ]) candidate_found_in_previous_wo = False if previous_wo: candidate_found_in_previous_wo = self._defaults_from_finished_workorder_line( previous_wo.finished_workorder_line_ids) if not candidate_found_in_previous_wo: # self is the first workorder self.qty_producing = self.qty_remaining self.finished_lot_id = False if self.product_tracking == 'serial': self.qty_producing = 1 self._apply_update_workorder_lines() else: self.qty_producing = 0 self.button_finish() return True def _get_byproduct_move_to_update(self): return self.production_id.move_finished_ids.filtered( lambda x: (x.product_id.id != self.production_id.product_id.id) and (x.state not in ('done', 'cancel'))) def _create_or_update_finished_line(self): """ 1. Check that the final lot and the quantity producing is valid regarding other workorders of this production 2. Save final lot and quantity producing to suggest on next workorder """ self.ensure_one() final_lot_quantity = self.qty_production rounding = self.product_uom_id.rounding # Get the max quantity possible for current lot in other workorders for workorder in (self.production_id.workorder_ids - self): # We add the remaining quantity to the produced quantity for the # current lot. For 5 finished products: if in the first wo it # creates 4 lot A and 1 lot B and in the second it create 3 lot A # and it remains 2 units to product, it could produce 5 lot A. # In this case we select 4 since it would conflict with the first # workorder otherwise. line = workorder.finished_workorder_line_ids.filtered( lambda line: line.lot_id == self.finished_lot_id) line_without_lot = workorder.finished_workorder_line_ids.filtered( lambda line: line.product_id == workorder.product_id and not line.lot_id) quantity_remaining = workorder.qty_remaining + line_without_lot.qty_done quantity = line.qty_done + quantity_remaining if line and float_compare(quantity, final_lot_quantity, precision_rounding=rounding) <= 0: final_lot_quantity = quantity elif float_compare(quantity_remaining, final_lot_quantity, precision_rounding=rounding) < 0: final_lot_quantity = quantity_remaining # final lot line for this lot on this workorder. current_lot_lines = self.finished_workorder_line_ids.filtered( lambda line: line.lot_id == self.finished_lot_id) # this lot has already been produced if float_compare(final_lot_quantity, current_lot_lines.qty_done + self.qty_producing, precision_rounding=rounding) < 0: raise UserError( _('You have produced %s %s of lot %s in the previous workorder. You are trying to produce %s in this one' ) % (final_lot_quantity, self.product_id.uom_id.name, self.finished_lot_id.name, current_lot_lines.qty_done + self.qty_producing)) # Update workorder line that regiter final lot created if not current_lot_lines: current_lot_lines = self.env['mrp.workorder.line'].create({ 'finished_workorder_id': self.id, 'product_id': self.product_id.id, 'lot_id': self.finished_lot_id.id, 'qty_done': self.qty_producing, }) else: current_lot_lines.qty_done += self.qty_producing def _start_nextworkorder(self): rounding = self.product_id.uom_id.rounding if self.next_work_order_id.state == 'pending' and ( (self.operation_id.batch == 'no' and float_compare(self.qty_production, self.qty_produced, precision_rounding=rounding) <= 0) or (self.operation_id.batch == 'yes' and float_compare(self.operation_id.batch_size, self.qty_produced, precision_rounding=rounding) <= 0)): self.next_work_order_id.state = 'ready' def button_start(self): self.ensure_one() # As button_start is automatically called in the new view if self.state in ('done', 'cancel'): return True # Need a loss in case of the real time exceeding the expected timeline = self.env['mrp.workcenter.productivity'] if self.duration < self.duration_expected: loss_id = self.env['mrp.workcenter.productivity.loss'].search( [('loss_type', '=', 'productive')], limit=1) if not len(loss_id): raise UserError( _("You need to define at least one productivity loss in the category 'Productivity'. Create one from the Manufacturing app, menu: Configuration / Productivity Losses." )) else: loss_id = self.env['mrp.workcenter.productivity.loss'].search( [('loss_type', '=', 'performance')], limit=1) if not len(loss_id): raise UserError( _("You need to define at least one productivity loss in the category 'Performance'. Create one from the Manufacturing app, menu: Configuration / Productivity Losses." )) if self.production_id.state != 'progress': self.production_id.write({ 'date_start': datetime.now(), }) timeline.create({ 'workorder_id': self.id, 'workcenter_id': self.workcenter_id.id, 'description': _('Time Tracking: ') + self.env.user.name, 'loss_id': loss_id[0].id, 'date_start': datetime.now(), 'user_id': self.env.user.id, # FIXME sle: can be inconsistent with company_id 'company_id': self.company_id.id, }) if self.state == 'progress': return True else: start_date = datetime.now() vals = { 'state': 'progress', 'date_start': start_date, 'date_planned_start': start_date, } if self.date_planned_finished < start_date: vals['date_planned_finished'] = start_date return self.write(vals) def button_finish(self): self.ensure_one() self.end_all() end_date = datetime.now() return self.write({ 'state': 'done', 'date_finished': end_date, 'date_planned_finished': end_date }) def end_previous(self, doall=False): """ @param: doall: This will close all open time lines on the open work orders when doall = True, otherwise only the one of the current user """ # TDE CLEANME timeline_obj = self.env['mrp.workcenter.productivity'] domain = [('workorder_id', 'in', self.ids), ('date_end', '=', False)] if not doall: domain.append(('user_id', '=', self.env.user.id)) not_productive_timelines = timeline_obj.browse() for timeline in timeline_obj.search(domain, limit=None if doall else 1): wo = timeline.workorder_id if wo.duration_expected <= wo.duration: if timeline.loss_type == 'productive': not_productive_timelines += timeline timeline.write({'date_end': fields.Datetime.now()}) else: maxdate = fields.Datetime.from_string( timeline.date_start) + relativedelta( minutes=wo.duration_expected - wo.duration) enddate = datetime.now() if maxdate > enddate: timeline.write({'date_end': enddate}) else: timeline.write({'date_end': maxdate}) not_productive_timelines += timeline.copy({ 'date_start': maxdate, 'date_end': enddate }) if not_productive_timelines: loss_id = self.env['mrp.workcenter.productivity.loss'].search( [('loss_type', '=', 'performance')], limit=1) if not len(loss_id): raise UserError( _("You need to define at least one unactive productivity loss in the category 'Performance'. Create one from the Manufacturing app, menu: Configuration / Productivity Losses." )) not_productive_timelines.write({'loss_id': loss_id.id}) return True def end_all(self): return self.end_previous(doall=True) def button_pending(self): self.end_previous() return True def button_unblock(self): for order in self: order.workcenter_id.unblock() return True def action_cancel(self): self.leave_id.unlink() return self.write({'state': 'cancel'}) def button_done(self): if any([x.state in ('done', 'cancel') for x in self]): raise UserError( _('A Manufacturing Order is already done or cancelled.')) self.end_all() end_date = datetime.now() return self.write({ 'state': 'done', 'date_finished': end_date, 'date_planned_finished': end_date, }) def button_scrap(self): self.ensure_one() return { 'name': _('Scrap'), 'view_mode': 'form', 'res_model': 'stock.scrap', 'view_id': self.env.ref('stock.stock_scrap_form_view2').id, 'type': 'ir.actions.act_window', 'context': { 'default_company_id': self.production_id.company_id.id, 'default_workorder_id': self.id, 'default_production_id': self.production_id.id, 'product_ids': (self.production_id.move_raw_ids.filtered( lambda x: x.state not in ('done', 'cancel')) | self.production_id.move_finished_ids.filtered( lambda x: x.state == 'done')).mapped('product_id').ids }, 'target': 'new', } def action_see_move_scrap(self): self.ensure_one() action = self.env.ref('stock.action_stock_scrap').read()[0] action['domain'] = [('workorder_id', '=', self.id)] return action @api.depends('qty_production', 'qty_produced') def _compute_qty_remaining(self): for wo in self: wo.qty_remaining = float_round( wo.qty_production - wo.qty_produced, precision_rounding=wo.production_id.product_uom_id.rounding)
class SnailmailLetter(models.Model): _name = 'snailmail.letter' _description = 'Snailmail Letter' user_id = fields.Many2one('res.users', 'Sent by') model = fields.Char('Model', required=True) res_id = fields.Integer('Document ID', required=True) partner_id = fields.Many2one('res.partner', string='Recipient', required=True) company_id = fields.Many2one('res.company', string='Company', required=True, readonly=True, default=lambda self: self.env.company.id) report_template = fields.Many2one('ir.actions.report', 'Optional report to print and attach') attachment_id = fields.Many2one('ir.attachment', string='Attachment', ondelete='cascade') attachment_datas = fields.Binary('Document', related='attachment_id.datas') attachment_fname = fields.Char('Attachment Filename', related='attachment_id.name') color = fields.Boolean( string='Color', default=lambda self: self.env.company.snailmail_color) cover = fields.Boolean( string='Cover Page', default=lambda self: self.env.company.snailmail_cover) duplex = fields.Boolean( string='Both side', default=lambda self: self.env.company.snailmail_duplex) state = fields.Selection( [('pending', 'In Queue'), ('sent', 'Sent'), ('error', 'Error'), ('canceled', 'Canceled')], 'Status', readonly=True, copy=False, default='pending', required=True, help="When a letter is created, the status is 'Pending'.\n" "If the letter is correctly sent, the status goes in 'Sent',\n" "If not, it will got in state 'Error' and the error message will be displayed in the field 'Error Message'." ) error_code = fields.Selection([(err_code, err_code) for err_code in ERROR_CODES], string="Error") info_msg = fields.Char('Information') display_name = fields.Char('Display Name', compute="_compute_display_name") reference = fields.Char(string='Related Record', compute='_compute_reference', readonly=True, store=False) message_id = fields.Many2one('mail.message', string="Snailmail Status Message") street = fields.Char('Street') street2 = fields.Char('Street2') zip = fields.Char('Zip') city = fields.Char('City') state_id = fields.Many2one("res.country.state", string='State') country_id = fields.Many2one('res.country', string='Country') @api.depends('reference', 'partner_id') def _compute_display_name(self): for letter in self: if letter.attachment_id: letter.display_name = "%s - %s" % (letter.attachment_id.name, letter.partner_id.name) else: letter.display_name = letter.partner_id.name @api.depends('model', 'res_id') def _compute_reference(self): for res in self: res.reference = "%s,%s" % (res.model, res.res_id) @api.model def create(self, vals): msg_id = self.env[vals['model']].browse(vals['res_id']).message_post( body=_("Letter sent by post with Snailmail"), message_type='snailmail') partner_id = self.env['res.partner'].browse(vals['partner_id']) vals.update({ 'message_id': msg_id.id, 'street': partner_id.street, 'street2': partner_id.street2, 'zip': partner_id.zip, 'city': partner_id.city, 'state_id': partner_id.state_id.id, 'country_id': partner_id.country_id.id, }) return super(SnailmailLetter, self).create(vals) def _fetch_attachment(self): """ This method will check if we have any existent attachement matching the model and res_ids and create them if not found. """ self.ensure_one() obj = self.env[self.model].browse(self.res_id) if not self.attachment_id: report = self.report_template if not report: report_name = self.env.context.get('report_name') report = self.env['ir.actions.report']._get_report_from_name( report_name) if not report: return False else: self.write({'report_template': report.id}) # report = self.env.ref('account.account_invoices') if report.print_report_name: report_name = safe_eval(report.print_report_name, {'object': obj}) elif report.attachment: report_name = safe_eval(report.attachment, {'object': obj}) else: report_name = 'Document' filename = "%s.%s" % (report_name, "pdf") pdf_bin, _ = report.with_context( snailmail_layout=not self.cover).render_qweb_pdf(self.res_id) attachment = self.env['ir.attachment'].create({ 'name': filename, 'datas': base64.b64encode(pdf_bin), 'res_model': 'snailmail.letter', 'res_id': self.id, 'type': 'binary', # override default_type from context, possibly meant for another model! }) self.write({'attachment_id': attachment.id}) return self.attachment_id def _count_pages_pdf(self, bin_pdf): """ Count the number of pages of the given pdf file. :param bin_pdf : binary content of the pdf file """ pages = 0 for match in re.compile(b"/Count\s+(\d+)").finditer(bin_pdf): pages = int(match.group(1)) return pages def _snailmail_create(self, route): """ Create a dictionnary object to send to snailmail server. :return: Dict in the form: { account_token: string, //IAP Account token of the user documents: [{ pages: int, pdf_bin: pdf file res_id: int (client-side res_id), res_model: char (client-side res_model), address: { name: char, street: char, street2: char (OPTIONAL), zip: int, city: char, state: char (state code (OPTIONAL)), country_code: char (country code) } return_address: { name: char, street: char, street2: char (OPTIONAL), zip: int, city: char,at state: char (state code (OPTIONAL)), country_code: char (country code) } }], options: { color: boolean (true if color, false if black-white), duplex: boolean (true if duplex, false otherwise), currency_name: char } } """ account_token = self.env['iap.account'].get('snailmail').account_token dbuuid = self.env['ir.config_parameter'].sudo().get_param( 'database.uuid') documents = [] batch = len(self) > 1 for letter in self: document = { # generic informations to send 'letter_id': letter.id, 'res_model': letter.model, 'res_id': letter.res_id, 'contact_address': letter.partner_id.with_context( snailmail_layout=True, show_address=True).name_get()[0][1], 'address': { 'name': letter.partner_id.name, 'street': letter.partner_id.street, 'street2': letter.partner_id.street2, 'zip': letter.partner_id.zip, 'state': letter.partner_id.state_id.code if letter.partner_id.state_id else False, 'city': letter.partner_id.city, 'country_code': letter.partner_id.country_id.code }, 'return_address': { 'name': letter.company_id.partner_id.name, 'street': letter.company_id.partner_id.street, 'street2': letter.company_id.partner_id.street2, 'zip': letter.company_id.partner_id.zip, 'state': letter.company_id.partner_id.state_id.code if letter.company_id.partner_id.state_id else False, 'city': letter.company_id.partner_id.city, 'country_code': letter.company_id.partner_id.country_id.code, } } # Specific to each case: # If we are estimating the price: 1 object = 1 page # If we are printing -> attach the pdf if route == 'estimate': document.update(pages=1) else: # adding the web logo from the company for future possible customization document.update({ 'company_logo': letter.company_id.logo_web and letter.company_id.logo_web.decode('utf-8') or False, }) attachment = letter._fetch_attachment() if attachment: document.update({ 'pdf_bin': route == 'print' and attachment.datas.decode('utf-8'), 'pages': route == 'estimate' and self._count_pages_pdf( base64.b64decode(attachment.datas)), }) else: letter.write({ 'info_msg': 'The attachment could not be generated.', 'state': 'error', 'error_code': 'ATTACHMENT_ERROR' }) continue if letter.company_id.external_report_layout_id == self.env.ref( 'l10n_de.external_layout_din5008', False): document.update({ 'rightaddress': 0, }) documents.append(document) return { 'account_token': account_token, 'dbuuid': dbuuid, 'documents': documents, 'options': { 'color': self and self[0].color, 'cover': self and self[0].cover, 'duplex': self and self[0].duplex, 'currency_name': 'EUR', }, # this will not raise the InsufficientCreditError which is the behaviour we want for now 'batch': True, } def _get_error_message(self, error): if error == 'CREDIT_ERROR': link = self.env['iap.account'].get_credits_url( service_name='snailmail') return _( 'You don\'t have enough credits to perform this operation.<br>Please go to your <a href=%s target="new">iap account</a>.' % link) if error == 'TRIAL_ERROR': link = self.env['iap.account'].get_credits_url( service_name='snailmail', trial=True) return _( 'You don\'t have an IAP account registered for this service.<br>Please go to <a href=%s target="new">iap.coffice.com</a> to claim your free credits.' % link) if error == 'NO_PRICE_AVAILABLE': return _('The country of the partner is not covered by Snailmail.') if error == 'MISSING_REQUIRED_FIELDS': return _('One or more required fields are empty.') if error == 'FORMAT_ERROR': return _( 'The attachment of the letter could not be sent. Please check its content and contact the support if the problem persists.' ) else: return _('An unknown error happened. Please contact the support.') return error def _snailmail_print(self, immediate=True): valid_address_letters = self.filtered(lambda l: l._is_valid_address(l)) invalid_address_letters = self - valid_address_letters invalid_address_letters._snailmail_print_invalid_address() if valid_address_letters and immediate: valid_address_letters._snailmail_print_valid_address() self.env.cr.commit() def _snailmail_print_invalid_address(self): for letter in self: letter.write({ 'state': 'error', 'error_code': 'MISSING_REQUIRED_FIELDS', 'info_msg': _('The address of the recipient is not complete') }) self.send_snailmail_update() def _snailmail_print_valid_address(self): """ get response { 'request_code': RESPONSE_OK, # because we receive 200 if good or fail 'total_cost': total_cost, 'credit_error': credit_error, 'request': { 'documents': documents, 'options': options } } } """ endpoint = self.env['ir.config_parameter'].sudo().get_param( 'snailmail.endpoint', DEFAULT_ENDPOINT) params = self._snailmail_create('print') response = jsonrpc(endpoint + PRINT_ENDPOINT, params=params) for doc in response['request']['documents']: if doc.get('sent') and response['request_code'] == 200: note = _( 'The document was correctly sent by post.<br>The tracking id is %s' % doc['send_id']) letter_data = { 'info_msg': note, 'state': 'sent', 'error_code': False } else: error = doc['error'] if response[ 'request_code'] == 200 else response['reason'] note = _( 'An error occured when sending the document by post.<br>Error: %s' % self._get_error_message(error)) letter_data = { 'info_msg': note, 'state': 'error', 'error_code': error if error in ERROR_CODES else 'UNKNOWN_ERROR' } letter = self.browse(doc['letter_id']) letter.write(letter_data) self.send_snailmail_update() def send_snailmail_update(self): notifications = [] for letter in self: notifications.append([(self._cr.dbname, 'res.partner', letter.user_id.partner_id.id), { 'type': 'snailmail_update', 'elements': letter._format_snailmail_failures() }]) self.env['bus.bus'].sendmany(notifications) def snailmail_print(self): self._snailmail_print() def cancel(self): self.write({'state': 'canceled', 'error_code': False}) self.send_snailmail_update() @api.model def _snailmail_cron(self): letters_send = self.search([ '|', ('state', '=', 'pending'), '&', ('state', '=', 'error'), ('error_code', 'in', [ 'TRIAL_ERROR', 'CREDIT_ERROR', 'ATTACHMENT_ERROR', 'MISSING_REQUIRED_FIELDS' ]) ]) letters_send._snailmail_print() @api.model def fetch_failed_letters(self): failed_letters = self.search([('state', '=', 'error'), ('user_id.id', '=', self.env.user.id), ('res_id', '!=', 0), ('model', '!=', False)]) return failed_letters._format_snailmail_failures() @api.model def _is_valid_address(self, record): record.ensure_one() required_keys = ['street', 'city', 'zip', 'country_id'] return all(record[key] for key in required_keys) def _format_snailmail_failures(self): """ A shorter message to notify a failure update """ failures_infos = [] for letter in self: info = { 'message_id': letter.message_id.id, 'record_name': letter.message_id.record_name, 'model_name': self.env['ir.model']._get(letter.model).display_name, 'uuid': letter.message_id.message_id, 'res_id': letter.res_id, 'model': letter.model, 'last_message_date': letter.message_id.date, 'module_icon': '/snailmail/static/img/snailmail_failure.png', 'snailmail_status': letter.error_code if letter.state == 'error' else '', 'snailmail_error': letter.state == 'error', 'failure_type': 'snailmail', } failures_infos.append(info) return failures_infos
class RatingMixin(models.AbstractModel): _name = 'rating.mixin' _description = "Rating Mixin" rating_ids = fields.One2many( 'rating.rating', 'res_id', string='Rating', domain=lambda self: [('res_model', '=', self._name)], auto_join=True) rating_last_value = fields.Float('Rating Last Value', compute='_compute_rating_last_value', compute_sudo=True, store=True) rating_last_feedback = fields.Text('Rating Last Feedback', related='rating_ids.feedback') rating_last_image = fields.Binary('Rating Last Image', related='rating_ids.rating_image') rating_count = fields.Integer('Rating count', compute="_compute_rating_stats") rating_avg = fields.Float("Rating Average", compute='_compute_rating_stats') @api.depends('rating_ids.rating') def _compute_rating_last_value(self): for record in self: ratings = self.env['rating.rating'].search( [('res_model', '=', self._name), ('res_id', '=', record.id)], limit=1) record.rating_last_value = ratings and ratings.rating or 0 @api.depends('rating_ids') def _compute_rating_stats(self): """ Compute avg and count in one query, as thoses fields will be used together most of the time. """ domain = self._rating_domain() read_group_res = self.env['rating.rating'].read_group( domain, ['rating:avg'], groupby=['res_id'], lazy=False) # force average on rating column mapping = { item['res_id']: { 'rating_count': item['__count'], 'rating_avg': item['rating'] } for item in read_group_res } for record in self: record.rating_count = mapping.get(record.id, {}).get('rating_count', 0) record.rating_avg = mapping.get(record.id, {}).get('rating_avg', 0) def write(self, values): """ If the rated ressource name is modified, we should update the rating res_name too. If the rated ressource parent is changed we should update the parent_res_id too""" with self.env.norecompute(): result = super(RatingMixin, self).write(values) for record in self: if record._rec_name in values: # set the res_name of ratings to be recomputed res_name_field = self.env['rating.rating']._fields[ 'res_name'] self.env.add_to_compute(res_name_field, record.rating_ids) if record._rating_get_parent_field_name() in values: record.rating_ids.write({ 'parent_res_id': record[record._rating_get_parent_field_name()].id }) return result def unlink(self): """ When removing a record, its rating should be deleted too. """ record_ids = self.ids result = super(RatingMixin, self).unlink() self.env['rating.rating'].sudo().search([ ('res_model', '=', self._name), ('res_id', 'in', record_ids) ]).unlink() return result def _rating_get_parent_field_name(self): """Return the parent relation field name Should return a Many2One""" return None def _rating_domain(self): """ Returns a normalized domain on rating.rating to select the records to include in count, avg, ... computation of current model. """ return [ '&', '&', ('res_model', '=', self._name), ('res_id', 'in', self.ids), ('consumed', '=', True) ] def rating_get_partner_id(self): if hasattr(self, 'partner_id') and self.partner_id: return self.partner_id return self.env['res.partner'] def rating_get_rated_partner_id(self): if hasattr(self, 'user_id') and self.user_id.partner_id: return self.user_id.partner_id return self.env['res.partner'] def rating_get_access_token(self, partner=None): if not partner: partner = self.rating_get_partner_id() rated_partner = self.rating_get_rated_partner_id() ratings = self.rating_ids.filtered( lambda x: x.partner_id.id == partner.id and not x.consumed) if not ratings: record_model_id = self.env['ir.model'].sudo().search( [('model', '=', self._name)], limit=1).id rating = self.env['rating.rating'].create({ 'partner_id': partner.id, 'rated_partner_id': rated_partner.id, 'res_model_id': record_model_id, 'res_id': self.id }) else: rating = ratings[0] return rating.access_token def rating_send_request(self, template, lang=False, subtype_id=False, force_send=True, composition_mode='comment', notif_layout=None): """ This method send rating request by email, using a template given in parameter. :param template: a mail.template record used to compute the message body; :param lang: optional lang; it can also be specified directly on the template itself in the lang field; :param subtype_id: optional subtype to use when creating the message; is a note by default to avoid spamming followers; :param force_send: whether to send the request directly or use the mail queue cron (preferred option); :param composition_mode: comment (message_post) or mass_mail (template.send_mail); :param notif_layout: layout used to encapsulate the content when sending email; """ if lang: template = template.with_context(lang=lang) if subtype_id is False: subtype_id = self.env['ir.model.data'].xmlid_to_res_id( 'mail.mt_note') if force_send: self = self.with_context( mail_notify_force_send=True ) # default value is True, should be set to false if not? for record in self: record.message_post_with_template( template.id, composition_mode=composition_mode, email_layout_xmlid=notif_layout if notif_layout is not None else 'mail.mail_notification_light', subtype_id=subtype_id) def rating_apply(self, rate, token=None, feedback=None, subtype=None): """ Apply a rating given a token. If the current model inherits from mail.thread mixing, a message is posted on its chatter. :param rate : the rating value to apply :type rate : float :param token : access token :param feedback : additional feedback :type feedback : string :param subtype : subtype for mail :type subtype : string :returns rating.rating record """ Rating, rating = self.env['rating.rating'], None if token: rating = self.env['rating.rating'].search( [('access_token', '=', token)], limit=1) else: rating = Rating.search([('res_model', '=', self._name), ('res_id', '=', self.ids[0])], limit=1) if rating: rating.write({ 'rating': rate, 'feedback': feedback, 'consumed': True }) if hasattr(self, 'message_post'): feedback = tools.plaintext2html(feedback or '') self.message_post( body= "<img src='/rating/static/src/img/rating_%s.png' alt=':%s/10' style='width:18px;height:18px;float:left;margin-right: 5px;'/>%s" % (rate, rate, feedback), subtype=subtype or "mail.mt_comment", author_id=rating.partner_id and rating.partner_id.id or None # None will set the default author in mail_thread.py ) if hasattr(self, 'stage_id') and self.stage_id and hasattr( self.stage_id, 'auto_validation_kanban_state' ) and self.stage_id.auto_validation_kanban_state: if rating.rating > 5: self.write({'kanban_state': 'done'}) if rating.rating < 5: self.write({'kanban_state': 'blocked'}) return rating def rating_get_repartition(self, add_stats=False, domain=None): """ get the repatition of rating grade for the given res_ids. :param add_stats : flag to add stat to the result :type add_stats : boolean :param domain : optional extra domain of the rating to include/exclude in repartition :return dictionnary if not add_stats, the dict is like - key is the rating value (integer) - value is the number of object (res_model, res_id) having the value otherwise, key is the value of the information (string) : either stat name (avg, total, ...) or 'repartition' containing the same dict if add_stats was False. """ base_domain = expression.AND( [self._rating_domain(), [('rating', '>=', 1)]]) if domain: base_domain += domain data = self.env['rating.rating'].read_group(base_domain, ['rating'], ['rating', 'res_id']) # init dict with all posible rate value, except 0 (no value for the rating) values = dict.fromkeys(range(1, 11), 0) values.update((d['rating'], d['rating_count']) for d in data) # add other stats if add_stats: rating_number = sum(values.values()) result = { 'repartition': values, 'avg': sum(float(key * values[key]) for key in values) / rating_number if rating_number > 0 else 0, 'total': sum(it['rating_count'] for it in data), } return result return values def rating_get_grades(self, domain=None): """ get the repatition of rating grade for the given res_ids. :param domain : optional domain of the rating to include/exclude in grades computation :return dictionnary where the key is the grade (great, okay, bad), and the value, the number of object (res_model, res_id) having the grade the grade are compute as 0-30% : Bad 31-69%: Okay 70-100%: Great """ data = self.rating_get_repartition(domain=domain) res = dict.fromkeys(['great', 'okay', 'bad'], 0) for key in data: if key >= RATING_LIMIT_SATISFIED: res['great'] += data[key] elif key > RATING_LIMIT_OK: res['okay'] += data[key] else: res['bad'] += data[key] return res def rating_get_stats(self, domain=None): """ get the statistics of the rating repatition :param domain : optional domain of the rating to include/exclude in statistic computation :return dictionnary where - key is the the name of the information (stat name) - value is statistic value : 'percent' contains the repartition in percentage, 'avg' is the average rate and 'total' is the number of rating """ data = self.rating_get_repartition(domain=domain, add_stats=True) result = { 'avg': data['avg'], 'total': data['total'], 'percent': dict.fromkeys(range(1, 11), 0), } for rate in data['repartition']: result['percent'][rate] = ( data['repartition'][rate] * 100) / data['total'] if data['total'] > 0 else 0 return result
class Rating(models.Model): _name = "rating.rating" _description = "Rating" _order = 'write_date desc' _rec_name = 'res_name' _sql_constraints = [ ('rating_range', 'check(rating >= 0 and rating <= 10)', 'Rating should be between 0 to 10'), ] @api.depends('res_model', 'res_id') def _compute_res_name(self): for rating in self: name = self.env[rating.res_model].sudo().browse(rating.res_id).name_get() rating.res_name = name and name[0][1] or ('%s/%s') % (rating.res_model, rating.res_id) @api.model def _default_access_token(self): return uuid.uuid4().hex res_name = fields.Char(string='Resource name', compute='_compute_res_name', store=True, help="The name of the rated resource.") res_model_id = fields.Many2one('ir.model', 'Related Document Model', index=True, ondelete='cascade', help='Model of the followed resource') res_model = fields.Char(string='Document Model', related='res_model_id.model', store=True, index=True, readonly=True) res_id = fields.Integer(string='Document', required=True, help="Identifier of the rated object", index=True) parent_res_name = fields.Char('Parent Document Name', compute='_compute_parent_res_name', store=True) parent_res_model_id = fields.Many2one('ir.model', 'Parent Related Document Model', index=True, ondelete='cascade') parent_res_model = fields.Char('Parent Document Model', store=True, related='parent_res_model_id.model', index=True, readonly=False) parent_res_id = fields.Integer('Parent Document', index=True) rated_partner_id = fields.Many2one('res.partner', string="Rated person", help="Owner of the rated resource") partner_id = fields.Many2one('res.partner', string='Customer', help="Author of the rating") rating = fields.Float(string="Rating Number", group_operator="avg", default=0, help="Rating value: 0=Unhappy, 10=Happy") rating_image = fields.Binary('Image', compute='_compute_rating_image') rating_text = fields.Selection([ ('satisfied', 'Satisfied'), ('not_satisfied', 'Not satisfied'), ('highly_dissatisfied', 'Highly dissatisfied'), ('no_rating', 'No Rating yet')], string='Rating', store=True, compute='_compute_rating_text', readonly=True) feedback = fields.Text('Comment', help="Reason of the rating") message_id = fields.Many2one( 'mail.message', string="Linked message", index=True, ondelete='cascade', help="Associated message when posting a review. Mainly used in website addons.") access_token = fields.Char('Security Token', default=_default_access_token, help="Access token to set the rating of the value") consumed = fields.Boolean(string="Filled Rating", help="Enabled if the rating has been filled.") @api.depends('parent_res_model', 'parent_res_id') def _compute_parent_res_name(self): for rating in self: name = False if rating.parent_res_model and rating.parent_res_id: name = self.env[rating.parent_res_model].sudo().browse(rating.parent_res_id).name_get() name = name and name[0][1] or ('%s/%s') % (rating.parent_res_model, rating.parent_res_id) rating.parent_res_name = name @api.depends('rating') def _compute_rating_image(self): # Due to some new widgets, we may have ratings different from 0/1/5/10 (e.g. slide.channel review) # Let us have some custom rounding while finding a better solution for images. for rating in self: rating_for_img = 0 if rating.rating >= 8: rating_for_img = 10 elif rating.rating > 3: rating_for_img = 5 elif rating.rating >= 1: rating_for_img = 1 try: image_path = get_resource_path('rating', 'static/src/img', 'rating_%s.png' % rating_for_img) rating.rating_image = base64.b64encode(open(image_path, 'rb').read()) except (IOError, OSError): rating.rating_image = False @api.depends('rating') def _compute_rating_text(self): for rating in self: if rating.rating >= RATING_LIMIT_SATISFIED: rating.rating_text = 'satisfied' elif rating.rating > RATING_LIMIT_OK: rating.rating_text = 'not_satisfied' elif rating.rating >= RATING_LIMIT_MIN: rating.rating_text = 'highly_dissatisfied' else: rating.rating_text = 'no_rating' @api.model def create(self, values): if values.get('res_model_id') and values.get('res_id'): values.update(self._find_parent_data(values)) return super(Rating, self).create(values) def write(self, values): if values.get('res_model_id') and values.get('res_id'): values.update(self._find_parent_data(values)) return super(Rating, self).write(values) def _find_parent_data(self, values): """ Determine the parent res_model/res_id, based on the values to create or write """ current_model_name = self.env['ir.model'].sudo().browse(values['res_model_id']).model current_record = self.env[current_model_name].browse(values['res_id']) data = { 'parent_res_model_id': False, 'parent_res_id': False, } if hasattr(current_record, '_rating_get_parent_field_name'): current_record_parent = current_record._rating_get_parent_field_name() if current_record_parent: parent_res_model = getattr(current_record, current_record_parent) data['parent_res_model_id'] = self.env['ir.model']._get(parent_res_model._name).id data['parent_res_id'] = parent_res_model.id return data def reset(self): for record in self: record.write({ 'rating': 0, 'access_token': record._default_access_token(), 'feedback': False, 'consumed': False, }) def action_open_rated_object(self): self.ensure_one() return { 'type': 'ir.actions.act_window', 'res_model': self.res_model, 'res_id': self.res_id, 'views': [[False, 'form']] }
class IrUiMenu(models.Model): _name = 'ir.ui.menu' _description = 'Menu' _order = "sequence,id" _parent_store = True def __init__(self, *args, **kwargs): super(IrUiMenu, self).__init__(*args, **kwargs) self.pool['ir.model.access'].register_cache_clearing_method( self._name, 'clear_caches') name = fields.Char(string='Menu', required=True, translate=True) active = fields.Boolean(default=True) sequence = fields.Integer(default=10) child_id = fields.One2many('ir.ui.menu', 'parent_id', string='Child IDs') parent_id = fields.Many2one('ir.ui.menu', string='Parent Menu', index=True, ondelete="restrict") parent_path = fields.Char(index=True) groups_id = fields.Many2many('res.groups', 'ir_ui_menu_group_rel', 'menu_id', 'gid', string='Groups', help="If you have groups, the visibility of this menu will be based on these groups. "\ "If this field is empty, Coffice will compute visibility based on the related object's read access.") complete_name = fields.Char(compute='_compute_complete_name', string='Full Path') web_icon = fields.Char(string='Web Icon File') action = fields.Reference(selection=[( 'ir.actions.report', 'ir.actions.report' ), ('ir.actions.act_window', 'ir.actions.act_window'), ( 'ir.actions.act_url', 'ir.actions.act_url'), ( 'ir.actions.server', 'ir.actions.server'), ('ir.actions.client', 'ir.actions.client')]) web_icon_data = fields.Binary(string='Web Icon Image', attachment=True) @api.depends('name', 'parent_id.complete_name') def _compute_complete_name(self): for menu in self: menu.complete_name = menu._get_full_name() def _get_full_name(self, level=6): """ Return the full name of ``self`` (up to a certain level). """ if level <= 0: return '...' if self.parent_id: return self.parent_id._get_full_name( level - 1) + MENU_ITEM_SEPARATOR + (self.name or "") else: return self.name def read_image(self, path): if not path: return False path_info = path.split(',') icon_path = get_module_resource(path_info[0], path_info[1]) icon_image = False if icon_path: with tools.file_open(icon_path, 'rb') as icon_file: icon_image = base64.encodestring(icon_file.read()) return icon_image @api.constrains('parent_id') def _check_parent_id(self): if not self._check_recursion(): raise ValidationError( _('Error! You cannot create recursive menus.')) @api.model @tools.ormcache('frozenset(self.env.user.groups_id.ids)', 'debug') def _visible_menu_ids(self, debug=False): """ Return the ids of the menu items visible to the user. """ # retrieve all menus, and determine which ones are visible context = {'ir.ui.menu.full_list': True} menus = self.with_context(context).search([]) groups = self.env.user.groups_id if not debug: groups = groups - self.env.ref('base.group_no_one') # first discard all menus with groups the user does not have menus = menus.filtered( lambda menu: not menu.groups_id or menu.groups_id & groups) # take apart menus that have an action action_menus = menus.filtered(lambda m: m.action and m.action.exists()) folder_menus = menus - action_menus visible = self.browse() # process action menus, check whether their action is allowed access = self.env['ir.model.access'] MODEL_GETTER = { 'ir.actions.act_window': lambda action: action.res_model, 'ir.actions.report': lambda action: action.model, 'ir.actions.server': lambda action: action.model_id.model, } for menu in action_menus: get_model = MODEL_GETTER.get(menu.action._name) if not get_model or not get_model(menu.action) or \ access.check(get_model(menu.action), 'read', False): # make menu visible, and its folder ancestors, too visible += menu menu = menu.parent_id while menu and menu in folder_menus and menu not in visible: visible += menu menu = menu.parent_id return set(visible.ids) @api.returns('self') def _filter_visible_menus(self): """ Filter `self` to only keep the menu items that should be visible in the menu hierarchy of the current user. Uses a cache for speeding up the computation. """ visible_ids = self._visible_menu_ids( request.session.debug if request else False) return self.filtered(lambda menu: menu.id in visible_ids) @api.model def _search(self, args, offset=0, limit=None, order=None, count=False, access_rights_uid=None): menu_ids = super(IrUiMenu, self)._search(args, offset=0, limit=None, order=order, count=False, access_rights_uid=access_rights_uid) menus = self.browse(menu_ids) if menus: # menu filtering is done only on main menu tree, not other menu lists if not self._context.get('ir.ui.menu.full_list'): menus = menus._filter_visible_menus() if offset: menus = menus[offset:] if limit: menus = menus[:limit] return len(menus) if count else menus.ids def name_get(self): return [(menu.id, menu._get_full_name()) for menu in self] @api.model_create_multi def create(self, vals_list): self.clear_caches() for values in vals_list: if 'web_icon' in values: values['web_icon_data'] = self._compute_web_icon_data( values.get('web_icon')) return super(IrUiMenu, self).create(vals_list) def write(self, values): self.clear_caches() if 'web_icon' in values: values['web_icon_data'] = self._compute_web_icon_data( values.get('web_icon')) return super(IrUiMenu, self).write(values) def _compute_web_icon_data(self, web_icon): """ Returns the image associated to `web_icon`. `web_icon` can either be: - an image icon [module, path] - a built icon [icon_class, icon_color, background_color] and it only has to call `read_image` if it's an image. """ if web_icon and len(web_icon.split(',')) == 2: return self.read_image(web_icon) def unlink(self): # Detach children and promote them to top-level, because it would be unwise to # cascade-delete submenus blindly. We also can't use ondelete=set null because # that is not supported when _parent_store is used (would silently corrupt it). # TODO: ideally we should move them under a generic "Orphans" menu somewhere? extra = {'ir.ui.menu.full_list': True, 'active_test': False} direct_children = self.with_context(**extra).search([('parent_id', 'in', self.ids)]) direct_children.write({'parent_id': False}) self.clear_caches() return super(IrUiMenu, self).unlink() def copy(self, default=None): record = super(IrUiMenu, self).copy(default=default) match = NUMBER_PARENS.search(record.name) if match: next_num = int(match.group(1)) + 1 record.name = NUMBER_PARENS.sub('(%d)' % next_num, record.name) else: record.name = record.name + '(1)' return record @api.model @api.returns('self') def get_user_roots(self): """ Return all root menu ids visible for the user. :return: the root menu ids :rtype: list(int) """ return self.search([('parent_id', '=', False)]) @api.model @tools.ormcache_context('self._uid', keys=('lang', )) def load_menus_root(self): fields = ['name', 'sequence', 'parent_id', 'action', 'web_icon_data'] menu_roots = self.get_user_roots() menu_roots_data = menu_roots.read(fields) if menu_roots else [] menu_root = { 'id': False, 'name': 'root', 'parent_id': [-1, ''], 'children': menu_roots_data, 'all_menu_ids': menu_roots.ids, } menu_roots._set_menuitems_xmlids(menu_root) return menu_root @api.model @tools.ormcache_context('self._uid', 'debug', keys=('lang', )) def load_menus(self, debug): """ Loads all menu items (all applications and their sub-menus). :return: the menu root :rtype: dict('children': menu_nodes) """ fields = [ 'name', 'sequence', 'parent_id', 'action', 'web_icon', 'web_icon_data' ] menu_roots = self.get_user_roots() menu_roots_data = menu_roots.read(fields) if menu_roots else [] menu_root = { 'id': False, 'name': 'root', 'parent_id': [-1, ''], 'children': menu_roots_data, 'all_menu_ids': menu_roots.ids, } if not menu_roots_data: return menu_root # menus are loaded fully unlike a regular tree view, cause there are a # limited number of items (752 when all 6.1 addons are installed) menus = self.search([('id', 'child_of', menu_roots.ids)]) menu_items = menus.read(fields) # add roots at the end of the sequence, so that they will overwrite # equivalent menu items from full menu read when put into id:item # mapping, resulting in children being correctly set on the roots. menu_items.extend(menu_roots_data) menu_root['all_menu_ids'] = menus.ids # includes menu_roots! # make a tree using parent_id menu_items_map = { menu_item["id"]: menu_item for menu_item in menu_items } for menu_item in menu_items: parent = menu_item['parent_id'] and menu_item['parent_id'][0] if parent in menu_items_map: menu_items_map[parent].setdefault('children', []).append(menu_item) # sort by sequence a tree using parent_id for menu_item in menu_items: menu_item.setdefault('children', []).sort(key=operator.itemgetter('sequence')) (menu_roots + menus)._set_menuitems_xmlids(menu_root) return menu_root def _set_menuitems_xmlids(self, menu_root): menuitems = self.env['ir.model.data'].sudo().search([ ('res_id', 'in', self.ids), ('model', '=', 'ir.ui.menu') ]) xmlids = {menu.res_id: menu.complete_name for menu in menuitems} def _set_xmlids(tree, xmlids): tree['xmlid'] = xmlids.get(tree['id'], '') if 'children' in tree: for child in tree['children']: _set_xmlids(child, xmlids) _set_xmlids(menu_root, xmlids)
class Import(models.TransientModel): _name = 'base_import.import' _description = 'Base Import' # allow imports to survive for 12h in case user is slow _transient_max_hours = 12.0 res_model = fields.Char('Model') file = fields.Binary( 'File', help="File to check and/or import, raw binary (not base64)", attachment=False) file_name = fields.Char('File Name') file_type = fields.Char('File Type') @api.model def get_fields(self, model, depth=FIELDS_RECURSION_LIMIT): """ Recursively get fields for the provided model (through fields_get) and filter them according to importability The output format is a list of ``Field``, with ``Field`` defined as: .. class:: Field .. attribute:: id (str) A non-unique identifier for the field, used to compute the span of the ``required`` attribute: if multiple ``required`` fields have the same id, only one of them is necessary. .. attribute:: name (str) The field's logical (Coffice) name within the scope of its parent. .. attribute:: string (str) The field's human-readable name (``@string``) .. attribute:: required (bool) Whether the field is marked as required in the model. Clients must provide non-empty import values for all required fields or the import will error out. .. attribute:: fields (list(Field)) The current field's subfields. The database and external identifiers for m2o and m2m fields; a filtered and transformed fields_get for o2m fields (to a variable depth defined by ``depth``). Fields with no sub-fields will have an empty list of sub-fields. :param str model: name of the model to get fields form :param int depth: depth of recursion into o2m fields """ Model = self.env[model] importable_fields = [{ 'id': 'id', 'name': 'id', 'string': _("External ID"), 'required': False, 'fields': [], 'type': 'id', }] if not depth: return importable_fields model_fields = Model.fields_get() blacklist = models.MAGIC_COLUMNS + [Model.CONCURRENCY_CHECK_FIELD] for name, field in model_fields.items(): if name in blacklist: continue # an empty string means the field is deprecated, @deprecated must # be absent or False to mean not-deprecated if field.get('deprecated', False) is not False: continue if field.get('readonly'): states = field.get('states') if not states: continue # states = {state: [(attr, value), (attr2, value2)], state2:...} if not any(attr == 'readonly' and value is False for attr, value in itertools.chain.from_iterable( states.values())): continue field_value = { 'id': name, 'name': name, 'string': field['string'], # Y U NO ALWAYS HAS REQUIRED 'required': bool(field.get('required')), 'fields': [], 'type': field['type'], } if field['type'] in ('many2many', 'many2one'): field_value['fields'] = [ dict(field_value, name='id', string=_("External ID"), type='id'), dict(field_value, name='.id', string=_("Database ID"), type='id'), ] elif field['type'] == 'one2many': field_value['fields'] = self.get_fields(field['relation'], depth=depth - 1) if self.user_has_groups('base.group_no_one'): field_value['fields'].append({ 'id': '.id', 'name': '.id', 'string': _("Database ID"), 'required': False, 'fields': [], 'type': 'id' }) importable_fields.append(field_value) # TODO: cache on model? return importable_fields def _read_file(self, options): """ Dispatch to specific method to read file content, according to its mimetype or file type :param options : dict of reading options (quoting, separator, ...) """ self.ensure_one() # guess mimetype from file content mimetype = guess_mimetype(self.file or b'') (file_extension, handler, req) = FILE_TYPE_DICT.get(mimetype, (None, None, None)) if handler: try: return getattr(self, '_read_' + file_extension)(options) except Exception: _logger.warn( "Failed to read file '%s' (transient id %d) using guessed mimetype %s", self.file_name or '<unknown>', self.id, mimetype) # try reading with user-provided mimetype (file_extension, handler, req) = FILE_TYPE_DICT.get(self.file_type, (None, None, None)) if handler: try: return getattr(self, '_read_' + file_extension)(options) except Exception: _logger.warn( "Failed to read file '%s' (transient id %d) using user-provided mimetype %s", self.file_name or '<unknown>', self.id, self.file_type) # fallback on file extensions as mime types can be unreliable (e.g. # software setting incorrect mime types, or non-installed software # leading to browser not sending mime types) if self.file_name: p, ext = os.path.splitext(self.file_name) if ext in EXTENSIONS: try: return getattr(self, '_read_' + ext[1:])(options) except Exception: _logger.warn( "Failed to read file '%s' (transient id %s) using file extension", self.file_name, self.id) if req: raise ImportError( _("Unable to load \"{extension}\" file: requires Python module \"{modname}\"" ).format(extension=file_extension, modname=req)) raise ValueError( _("Unsupported file format \"{}\", import only supports CSV, ODS, XLS and XLSX" ).format(self.file_type)) def _read_xls(self, options): """ Read file content, using xlrd lib """ book = xlrd.open_workbook(file_contents=self.file or b'') return self._read_xls_book(book) def _read_xls_book(self, book): sheet = book.sheet_by_index(0) # emulate Sheet.get_rows for pre-0.9.4 for rowx, row in enumerate(map(sheet.row, range(sheet.nrows)), 1): values = [] for colx, cell in enumerate(row, 1): if cell.ctype is xlrd.XL_CELL_NUMBER: is_float = cell.value % 1 != 0.0 values.append( str(cell.value) if is_float else str(int(cell.value))) elif cell.ctype is xlrd.XL_CELL_DATE: is_datetime = cell.value % 1 != 0.0 # emulate xldate_as_datetime for pre-0.9.3 dt = datetime.datetime(*xlrd.xldate.xldate_as_tuple( cell.value, book.datemode)) values.append( dt.strftime(DEFAULT_SERVER_DATETIME_FORMAT ) if is_datetime else dt. strftime(DEFAULT_SERVER_DATE_FORMAT)) elif cell.ctype is xlrd.XL_CELL_BOOLEAN: values.append(u'True' if cell.value else u'False') elif cell.ctype is xlrd.XL_CELL_ERROR: raise ValueError( _("Invalid cell value at row %(row)s, column %(col)s: %(cell_value)s" ) % { 'row': rowx, 'col': colx, 'cell_value': xlrd.error_text_from_code.get( cell.value, _("unknown error code %s") % cell.value) }) else: values.append(cell.value) if any(x for x in values if x.strip()): yield values # use the same method for xlsx and xls files _read_xlsx = _read_xls def _read_ods(self, options): """ Read file content using ODSReader custom lib """ doc = odf_ods_reader.ODSReader(file=io.BytesIO(self.file or b'')) return (row for row in doc.getFirstSheet() if any(x for x in row if x.strip())) def _read_csv(self, options): """ Returns a CSV-parsed iterator of all non-empty lines in the file :throws csv.Error: if an error is detected during CSV parsing """ csv_data = self.file or b'' if not csv_data: return iter([]) encoding = options.get('encoding') if not encoding: encoding = options['encoding'] = chardet.detect( csv_data)['encoding'].lower() # some versions of chardet (e.g. 2.3.0 but not 3.x) will return # utf-(16|32)(le|be), which for python means "ignore / don't strip # BOM". We don't want that, so rectify the encoding to non-marked # IFF the guessed encoding is LE/BE and csv_data starts with a BOM bom = BOM_MAP.get(encoding) if bom and csv_data.startswith(bom): encoding = options['encoding'] = encoding[:-2] if encoding != 'utf-8': csv_data = csv_data.decode(encoding).encode('utf-8') separator = options.get('separator') if not separator: # default for unspecified separator so user gets a message about # having to specify it separator = ',' for candidate in (',', ';', '\t', ' ', '|', unicodedata.lookup('unit separator')): # pass through the CSV and check if all rows are the same # length & at least 2-wide assume it's the correct one it = pycompat.csv_reader(io.BytesIO(csv_data), quotechar=options['quoting'], delimiter=candidate) w = None for row in it: width = len(row) if w is None: w = width if width == 1 or width != w: break # next candidate else: # nobreak separator = options['separator'] = candidate break csv_iterator = pycompat.csv_reader(io.BytesIO(csv_data), quotechar=options['quoting'], delimiter=separator) return (row for row in csv_iterator if any(x for x in row if x.strip())) @api.model def _try_match_column(self, preview_values, options): """ Returns the potential field types, based on the preview values, using heuristics :param preview_values : list of value for the column to determine :param options : parsing options """ values = set(preview_values) # If all values are empty in preview than can be any field if values == {''}: return ['all'] # If all values starts with __export__ this is probably an id if all(v.startswith('__export__') for v in values): return ['id', 'many2many', 'many2one', 'one2many'] # If all values can be cast to int type is either id, float or monetary # Exception: if we only have 1 and 0, it can also be a boolean if all(v.isdigit() for v in values if v): field_type = [ 'id', 'integer', 'char', 'float', 'monetary', 'many2one', 'many2many', 'one2many' ] if {'0', '1', ''}.issuperset(values): field_type.append('boolean') return field_type # If all values are either True or False, type is boolean if all(val.lower() in ('true', 'false', 't', 'f', '') for val in preview_values): return ['boolean'] # If all values can be cast to float, type is either float or monetary try: thousand_separator = decimal_separator = False for val in preview_values: val = val.strip() if not val: continue # value might have the currency symbol left or right from the value val = self._remove_currency_symbol(val) if val: if options.get('float_thousand_separator') and options.get( 'float_decimal_separator'): val = val.replace( options['float_thousand_separator'], '').replace(options['float_decimal_separator'], '.') # We are now sure that this is a float, but we still need to find the # thousand and decimal separator else: if val.count('.') > 1: options['float_thousand_separator'] = '.' options['float_decimal_separator'] = ',' elif val.count(',') > 1: options['float_thousand_separator'] = ',' options['float_decimal_separator'] = '.' elif val.find('.') > val.find(','): thousand_separator = ',' decimal_separator = '.' elif val.find(',') > val.find('.'): thousand_separator = '.' decimal_separator = ',' else: # This is not a float so exit this try float('a') if thousand_separator and not options.get( 'float_decimal_separator'): options['float_thousand_separator'] = thousand_separator options['float_decimal_separator'] = decimal_separator return ['float', 'monetary'] except ValueError: pass results = self._try_match_date_time(preview_values, options) if results: return results return [ 'id', 'text', 'boolean', 'char', 'datetime', 'selection', 'many2one', 'one2many', 'many2many', 'html' ] def _try_match_date_time(self, preview_values, options): # Or a date/datetime if it matches the pattern date_patterns = [options['date_format'] ] if options.get('date_format') else [] user_date_format = self.env['res.lang']._lang_get( self.env.user.lang).date_format if user_date_format: try: to_re(user_date_format) date_patterns.append(user_date_format) except KeyError: pass date_patterns.extend(DATE_PATTERNS) match = check_patterns(date_patterns, preview_values) if match: options['date_format'] = match return ['date', 'datetime'] datetime_patterns = [options['datetime_format'] ] if options.get('datetime_format') else [] datetime_patterns.extend("%s %s" % (d, t) for d in date_patterns for t in TIME_PATTERNS) match = check_patterns(datetime_patterns, preview_values) if match: options['datetime_format'] = match return ['datetime'] return [] @api.model def _find_type_from_preview(self, options, preview): type_fields = [] if preview: for column in range(0, len(preview[0])): preview_values = [value[column].strip() for value in preview] type_field = self._try_match_column(preview_values, options) type_fields.append(type_field) return type_fields def _match_header(self, header, fields, options): """ Attempts to match a given header to a field of the imported model. :param str header: header name from the CSV file :param fields: :param dict options: :returns: an empty list if the header couldn't be matched, or all the fields to traverse :rtype: list(Field) """ string_match = None IrTranslation = self.env['ir.translation'] for field in fields: # FIXME: should match all translations & original # TODO: use string distance (levenshtein? hamming?) if header.lower() == field['name'].lower(): return [field] if header.lower() == field['string'].lower(): # matching string are not reliable way because # strings have no unique constraint string_match = field translated_header = IrTranslation._get_source( 'ir.model.fields,field_description', 'model', self.env.lang, header).lower() if translated_header == field['string'].lower(): string_match = field if string_match: # this behavior is only applied if there is no matching field['name'] return [string_match] if '/' not in header: return [] # relational field path traversal = [] subfields = fields # Iteratively dive into fields tree for section in header.split('/'): # Strip section in case spaces are added around '/' for # readability of paths match = self._match_header(section.strip(), subfields, options) # Any match failure, exit if not match: return [] # prep subfields for next iteration within match[0] field = match[0] subfields = field['fields'] traversal.append(field) return traversal def _match_headers(self, rows, fields, options): """ Attempts to match the imported model's fields to the titles of the parsed CSV file, if the file is supposed to have headers. Will consume the first line of the ``rows`` iterator. Returns the list of headers and a dict mapping cell indices to key paths in the ``fields`` tree. If headers were not requested, both collections are empty. :param Iterator rows: :param dict fields: :param dict options: :rtype: (list(str), dict(int: list(str))) """ if not options.get('headers'): return [], {} headers = next(rows, None) if not headers: return [], {} matches = {} mapping_records = self.env['base_import.mapping'].search_read( [('res_model', '=', self.res_model)], ['column_name', 'field_name']) mapping_fields = { rec['column_name']: rec['field_name'] for rec in mapping_records } for index, header in enumerate(headers): match_field = [] mapping_field_name = mapping_fields.get(header.lower()) if mapping_field_name: match_field = mapping_field_name.split('/') if not match_field: match_field = [ field['name'] for field in self._match_header(header, fields, options) ] matches[index] = match_field or None return headers, matches def parse_preview(self, options, count=10): """ Generates a preview of the uploaded files, and performs fields-matching between the import's file data and the model's columns. If the headers are not requested (not options.headers), ``matches`` and ``headers`` are both ``False``. :param int count: number of preview lines to generate :param options: format-specific options. CSV: {quoting, separator, headers} :type options: {str, str, str, bool} :returns: {fields, matches, headers, preview} | {error, preview} :rtype: {dict(str: dict(...)), dict(int, list(str)), list(str), list(list(str))} | {str, str} """ self.ensure_one() fields = self.get_fields(self.res_model) try: rows = self._read_file(options) headers, matches = self._match_headers(rows, fields, options) # Match should have consumed the first row (iif headers), get # the ``count`` next rows for preview preview = list(itertools.islice(rows, count)) assert preview, "file seems to have no content" header_types = self._find_type_from_preview(options, preview) if options.get('keep_matches') and len(options.get('fields', [])): matches = {} for index, match in enumerate(options.get('fields')): if match: matches[index] = match.split('/') if options.get('keep_matches'): advanced_mode = options.get('advanced') else: # Check is label contain relational field has_relational_header = any( len(models.fix_import_export_id_paths(col)) > 1 for col in headers) # Check is matches fields have relational field has_relational_match = any( len(match) > 1 for field, match in matches.items() if match) advanced_mode = has_relational_header or has_relational_match batch = False batch_cutoff = options.get('limit') if batch_cutoff: if count > batch_cutoff: batch = len(preview) > batch_cutoff else: batch = bool( next( itertools.islice(rows, batch_cutoff - count, None), None)) return { 'fields': fields, 'matches': matches or False, 'headers': headers or False, 'headers_type': header_types or False, 'preview': preview, 'options': options, 'advanced_mode': advanced_mode, 'debug': self.user_has_groups('base.group_no_one'), 'batch': batch, } except Exception as error: # Due to lazy generators, UnicodeDecodeError (for # instance) may only be raised when serializing the # preview to a list in the return. _logger.debug("Error during parsing preview", exc_info=True) preview = None if self.file_type == 'text/csv' and self.file: preview = self.file[:ERROR_PREVIEW_BYTES].decode('iso-8859-1') return { 'error': str(error), # iso-8859-1 ensures decoding will always succeed, # even if it yields non-printable characters. This is # in case of UnicodeDecodeError (or csv.Error # compounded with UnicodeDecodeError) 'preview': preview, } @api.model def _convert_import_data(self, fields, options): """ Extracts the input BaseModel and fields list (with ``False``-y placeholders for fields to *not* import) into a format Model.import_data can use: a fields list without holes and the precisely matching data matrix :param list(str|bool): fields :returns: (data, fields) :rtype: (list(list(str)), list(str)) :raises ValueError: in case the import data could not be converted """ # Get indices for non-empty fields indices = [index for index, field in enumerate(fields) if field] if not indices: raise ValueError( _("You must configure at least one field to import")) # If only one index, itemgetter will return an atom rather # than a 1-tuple if len(indices) == 1: mapper = lambda row: [row[indices[0]]] else: mapper = operator.itemgetter(*indices) # Get only list of actually imported fields import_fields = [f for f in fields if f] rows_to_import = self._read_file(options) if options.get('headers'): rows_to_import = itertools.islice(rows_to_import, 1, None) data = [ list(row) for row in map(mapper, rows_to_import) # don't try inserting completely empty rows (e.g. from # filtering out o2m fields) if any(row) ] # slicing needs to happen after filtering out empty rows as the # data offsets from load are post-filtering return data[options.get('skip'):], import_fields @api.model def _remove_currency_symbol(self, value): value = value.strip() negative = False # Careful that some countries use () for negative so replace it by - sign if value.startswith('(') and value.endswith(')'): value = value[1:-1] negative = True float_regex = re.compile(r'([+-]?[0-9.,]+)') split_value = [g for g in float_regex.split(value) if g] if len(split_value) > 2: # This is probably not a float return False if len(split_value) == 1: if float_regex.search(split_value[0]) is not None: return split_value[0] if not negative else '-' + split_value[0] return False else: # String has been split in 2, locate which index contains the float and which does not currency_index = 0 if float_regex.search(split_value[0]) is not None: currency_index = 1 # Check that currency exists currency = self.env['res.currency'].search([ ('symbol', '=', split_value[currency_index].strip()) ]) if len(currency): return split_value[(currency_index + 1) % 2] if not negative else '-' + split_value[ (currency_index + 1) % 2] # Otherwise it is not a float with a currency symbol return False @api.model def _parse_float_from_data(self, data, index, name, options): for line in data: line[index] = line[index].strip() if not line[index]: continue thousand_separator, decimal_separator = self._infer_separators( line[index], options) line[index] = line[index].replace(thousand_separator, '').replace( decimal_separator, '.') old_value = line[index] line[index] = self._remove_currency_symbol(line[index]) if line[index] is False: raise ValueError( _("Column %s contains incorrect values (value: %s)" % (name, old_value))) def _infer_separators(self, value, options): """ Try to infer the shape of the separators: if there are two different "non-numberic" characters in the number, the former/duplicated one would be grouping ("thousands" separator) and the latter would be the decimal separator. The decimal separator should furthermore be unique. """ # can't use \p{Sc} using re so handroll it non_number = [ # any character c for c in value # which is not a numeric decoration (() is used for negative # by accountants) if c not in '()-+' # which is not a digit or a currency symbol if unicodedata.category(c) not in ('Nd', 'Sc') ] counts = collections.Counter(non_number) # if we have two non-numbers *and* the last one has a count of 1, # we probably have grouping & decimal separators if len(counts) == 2 and counts[non_number[-1]] == 1: return [character for character, _count in counts.most_common()] # otherwise get whatever's in the options, or fallback to a default thousand_separator = options.get('float_thousand_separator', ' ') decimal_separator = options.get('float_decimal_separator', '.') return thousand_separator, decimal_separator def _parse_import_data(self, data, import_fields, options): """ Lauch first call to _parse_import_data_recursive with an empty prefix. _parse_import_data_recursive will be run recursively for each relational field. """ return self._parse_import_data_recursive(self.res_model, '', data, import_fields, options) def _parse_import_data_recursive(self, model, prefix, data, import_fields, options): # Get fields of type date/datetime all_fields = self.env[model].fields_get() for name, field in all_fields.items(): name = prefix + name if field['type'] in ('date', 'datetime') and name in import_fields: index = import_fields.index(name) self._parse_date_from_data(data, index, name, field['type'], options) # Check if the field is in import_field and is a relational (followed by /) # Also verify that the field name exactly match the import_field at the correct level. elif any(name + '/' in import_field and name == import_field.split('/')[prefix.count('/')] for import_field in import_fields): # Recursive call with the relational as new model and add the field name to the prefix self._parse_import_data_recursive(field['relation'], name + '/', data, import_fields, options) elif field['type'] in ('float', 'monetary') and name in import_fields: # Parse float, sometimes float values from file have currency symbol or () to denote a negative value # We should be able to manage both case index = import_fields.index(name) self._parse_float_from_data(data, index, name, options) elif field['type'] == 'binary' and field.get('attachment') and any( f in name for f in IMAGE_FIELDS) and name in import_fields: index = import_fields.index(name) with requests.Session() as session: session.stream = True for num, line in enumerate(data): if re.match( config.get("import_image_regex", DEFAULT_IMAGE_REGEX), line[index]): if not self.env.user._can_import_remote_urls(): raise AccessError( _("You can not import images via URL, check with your administrator or support for the reason." )) line[index] = self._import_image_by_url( line[index], session, name, num) else: try: base64.b64decode(line[index], validate=True) except binascii.Error: raise ValueError( _("Found invalid image data, images should be imported as either URLs or base64-encoded data." )) return data def _parse_date_from_data(self, data, index, name, field_type, options): dt = datetime.datetime fmt = fields.Date.to_string if field_type == 'date' else fields.Datetime.to_string d_fmt = options.get('date_format') dt_fmt = options.get('datetime_format') for num, line in enumerate(data): if not line[index]: continue v = line[index].strip() try: # first try parsing as a datetime if it's one if dt_fmt and field_type == 'datetime': try: line[index] = fmt(dt.strptime(v, dt_fmt)) continue except ValueError: pass # otherwise try parsing as a date whether it's a date # or datetime line[index] = fmt(dt.strptime(v, d_fmt)) except ValueError as e: raise ValueError( _("Column %s contains incorrect values. Error in line %d: %s" ) % (name, num + 1, e)) except Exception as e: raise ValueError( _("Error Parsing Date [%s:L%d]: %s") % (name, num + 1, e)) def _import_image_by_url(self, url, session, field, line_number): """ Imports an image by URL :param str url: the original field value :param requests.Session session: :param str field: name of the field (for logging/debugging) :param int line_number: 0-indexed line number within the imported file (for logging/debugging) :return: the replacement value :rtype: bytes """ maxsize = int( config.get("import_image_maxbytes", DEFAULT_IMAGE_MAXBYTES)) try: response = session.get(url, timeout=int( config.get("import_image_timeout", DEFAULT_IMAGE_TIMEOUT))) response.raise_for_status() if response.headers.get('Content-Length') and int( response.headers['Content-Length']) > maxsize: raise ValueError( _("File size exceeds configured maximum (%s bytes)") % maxsize) content = bytearray() for chunk in response.iter_content(DEFAULT_IMAGE_CHUNK_SIZE): content += chunk if len(content) > maxsize: raise ValueError( _("File size exceeds configured maximum (%s bytes)") % maxsize) image = Image.open(io.BytesIO(content)) w, h = image.size if w * h > 42e6: # Nokia Lumia 1020 photo resolution raise ValueError( u"Image size excessive, imported images must be smaller " u"than 42 million pixel") return base64.b64encode(content) except Exception as e: raise ValueError( _("Could not retrieve URL: %(url)s [%(field_name)s: L%(line_number)d]: %(error)s" ) % { 'url': url, 'field_name': field, 'line_number': line_number + 1, 'error': e }) def do(self, fields, columns, options, dryrun=False): """ Actual execution of the import :param fields: import mapping: maps each column to a field, ``False`` for the columns to ignore :type fields: list(str|bool) :param columns: columns label :type columns: list(str|bool) :param dict options: :param bool dryrun: performs all import operations (and validations) but rollbacks writes, allows getting as much errors as possible without the risk of clobbering the database. :returns: A list of errors. If the list is empty the import executed fully and correctly. If the list is non-empty it contains dicts with 3 keys ``type`` the type of error (``error|warning``); ``message`` the error message associated with the error (a string) and ``record`` the data which failed to import (or ``false`` if that data isn't available or provided) :rtype: dict(ids: list(int), messages: list({type, message, record})) """ self.ensure_one() self._cr.execute('SAVEPOINT import') try: data, import_fields = self._convert_import_data(fields, options) # Parse date and float field data = self._parse_import_data(data, import_fields, options) except ValueError as error: return { 'messages': [{ 'type': 'error', 'message': str(error), 'record': False, }] } _logger.info('importing %d rows...', len(data)) name_create_enabled_fields = options.pop('name_create_enabled_fields', {}) import_limit = options.pop('limit', None) model = self.env[self.res_model].with_context( import_file=True, name_create_enabled_fields=name_create_enabled_fields, _import_limit=import_limit) import_result = model.load(import_fields, data) _logger.info('done') # If transaction aborted, RELEASE SAVEPOINT is going to raise # an InternalError (ROLLBACK should work, maybe). Ignore that. # TODO: to handle multiple errors, create savepoint around # write and release it in case of write error (after # adding error to errors array) => can keep on trying to # import stuff, and rollback at the end if there is any # error in the results. try: if dryrun: self._cr.execute('ROLLBACK TO SAVEPOINT import') # cancel all changes done to the registry/ormcache self.pool.clear_caches() self.pool.reset_changes() else: self._cr.execute('RELEASE SAVEPOINT import') except psycopg2.InternalError: pass # Insert/Update mapping columns when import complete successfully if import_result['ids'] and options.get('headers'): BaseImportMapping = self.env['base_import.mapping'] for index, column_name in enumerate(columns): if column_name: # Update to latest selected field exist_records = BaseImportMapping.search([ ('res_model', '=', self.res_model), ('column_name', '=', column_name) ]) if exist_records: exist_records.write({'field_name': fields[index]}) else: BaseImportMapping.create({ 'res_model': self.res_model, 'column_name': column_name, 'field_name': fields[index] }) if 'name' in import_fields: index_of_name = import_fields.index('name') skipped = options.get('skip', 0) # pad front as data doesn't contain anythig for skipped lines r = import_result['name'] = [''] * skipped # only add names for the window being imported r.extend(x[index_of_name] for x in data[:import_limit]) # pad back (though that's probably not useful) r.extend([''] * (len(data) - (import_limit or 0))) else: import_result['name'] = [] skip = options.get('skip', 0) # convert load's internal nextrow to the imported file's if import_result[ 'nextrow']: # don't update if nextrow = 0 (= no nextrow) import_result['nextrow'] += skip return import_result
class Property(models.Model): _name = 'ir.property' _description = 'Company Property' name = fields.Char(index=True) res_id = fields.Char( string='Resource', index=True, help="If not set, acts as a default value for new resources", ) company_id = fields.Many2one('res.company', string='Company', index=True) fields_id = fields.Many2one('ir.model.fields', string='Field', ondelete='cascade', required=True, index=True) value_float = fields.Float() value_integer = fields.Integer() value_text = fields.Text() # will contain (char, text) value_binary = fields.Binary(attachment=False) value_reference = fields.Char() value_datetime = fields.Datetime() type = fields.Selection([ ('char', 'Char'), ('float', 'Float'), ('boolean', 'Boolean'), ('integer', 'Integer'), ('text', 'Text'), ('binary', 'Binary'), ('many2one', 'Many2One'), ('date', 'Date'), ('datetime', 'DateTime'), ('selection', 'Selection'), ], required=True, default='many2one', index=True) def _update_values(self, values): if 'value' not in values: return values value = values.pop('value') prop = None type_ = values.get('type') if not type_: if self: prop = self[0] type_ = prop.type else: type_ = self._fields['type'].default(self) field = TYPE2FIELD.get(type_) if not field: raise UserError(_('Invalid type')) if field == 'value_reference': if not value: value = False elif isinstance(value, models.BaseModel): value = '%s,%d' % (value._name, value.id) elif isinstance(value, int): field_id = values.get('fields_id') if not field_id: if not prop: raise ValueError() field_id = prop.fields_id else: field_id = self.env['ir.model.fields'].browse(field_id) value = '%s,%d' % (field_id.sudo().relation, value) values[field] = value return values def write(self, values): # if any of the records we're writing on has a res_id=False *or* # we're writing a res_id=False on any record default_set = False if self._ids: self.env.cr.execute( 'SELECT EXISTS (SELECT 1 FROM ir_property WHERE id in %s AND res_id IS NULL)', [self._ids]) default_set = self.env.cr.rowcount == 1 or any( v.get('res_id') is False for v in values) r = super(Property, self).write(self._update_values(values)) if default_set: # DLE P44: test `test_27_company_dependent` # Easy solution, need to flush write when changing a property. # Maybe it would be better to be able to compute all impacted cache value and update those instead # Then clear_caches must be removed as well. self.flush() self.clear_caches() return r @api.model_create_multi def create(self, vals_list): vals_list = [self._update_values(vals) for vals in vals_list] created_default = any(not v.get('res_id') for v in vals_list) r = super(Property, self).create(vals_list) if created_default: # DLE P44: test `test_27_company_dependent` self.flush() self.clear_caches() return r def unlink(self): default_deleted = False if self._ids: self.env.cr.execute( 'SELECT EXISTS (SELECT 1 FROM ir_property WHERE id in %s)', [self._ids]) default_deleted = self.env.cr.rowcount == 1 r = super().unlink() if default_deleted: self.clear_caches() return r def get_by_record(self): self.ensure_one() if self.type in ('char', 'text', 'selection'): return self.value_text elif self.type == 'float': return self.value_float elif self.type == 'boolean': return bool(self.value_integer) elif self.type == 'integer': return self.value_integer elif self.type == 'binary': return self.value_binary elif self.type == 'many2one': if not self.value_reference: return False model, resource_id = self.value_reference.split(',') return self.env[model].browse(int(resource_id)).exists() elif self.type == 'datetime': return self.value_datetime elif self.type == 'date': if not self.value_datetime: return False return fields.Date.to_string( fields.Datetime.from_string(self.value_datetime)) return False @api.model def get(self, name, model, res_id=False): if not res_id: t, v = self._get_default_property(name, model) if not v or t != 'many2one': return v return self.env[v[0]].browse(v[1]) p = self._get_property(name, model, res_id=res_id) if p: return p.get_by_record() return False # only cache Property.get(res_id=False) as that's # sub-optimally. COMPANY_KEY = "self.env.context.get('force_company') or self.env.company.id" @ormcache(COMPANY_KEY, 'name', 'model') def _get_default_property(self, name, model): prop = self._get_property(name, model, res_id=False) if not prop: return None, False v = prop.get_by_record() if prop.type != 'many2one': return prop.type, v return 'many2one', v and (v._name, v.id) def _get_property(self, name, model, res_id): domain = self._get_domain(name, model) if domain is not None: domain = [('res_id', '=', res_id)] + domain #make the search with company_id asc to make sure that properties specific to a company are given first return self.search(domain, limit=1, order='company_id') return self.browse(()) def _get_domain(self, prop_name, model): self._cr.execute( "SELECT id FROM ir_model_fields WHERE name=%s AND model=%s", (prop_name, model)) res = self._cr.fetchone() if not res: return None company_id = self._context.get('force_company') or self.env.company.id return [('fields_id', '=', res[0]), ('company_id', 'in', [company_id, False])] @api.model def get_multi(self, name, model, ids): """ Read the property field `name` for the records of model `model` with the given `ids`, and return a dictionary mapping `ids` to their corresponding value. """ if not ids: return {} field = self.env[model]._fields[name] field_id = self.env['ir.model.fields']._get(model, name).id company_id = (self._context.get('force_company') or self.env.company.id) if field.type == 'many2one': comodel = self.env[field.comodel_name] model_pos = len(model) + 2 value_pos = len(comodel._name) + 2 # retrieve values: both p.res_id and p.value_reference are formatted # as "<rec._name>,<rec.id>"; the purpose of the LEFT JOIN is to # return the value id if it exists, NULL otherwise query = """ SELECT substr(p.res_id, %s)::integer, r.id FROM ir_property p LEFT JOIN {} r ON substr(p.value_reference, %s)::integer=r.id WHERE p.fields_id=%s AND (p.company_id=%s OR p.company_id IS NULL) AND (p.res_id IN %s OR p.res_id IS NULL) ORDER BY p.company_id NULLS FIRST """.format(comodel._table) params = [model_pos, value_pos, field_id, company_id] clean = comodel.browse elif field.type in TYPE2FIELD: model_pos = len(model) + 2 # retrieve values: p.res_id is formatted as "<rec._name>,<rec.id>" query = """ SELECT substr(p.res_id, %s)::integer, p.{} FROM ir_property p WHERE p.fields_id=%s AND (p.company_id=%s OR p.company_id IS NULL) AND (p.res_id IN %s OR p.res_id IS NULL) ORDER BY p.company_id NULLS FIRST """.format(TYPE2FIELD[field.type]) params = [model_pos, field_id, company_id] clean = TYPE2CLEAN[field.type] else: return dict.fromkeys(ids, False) # retrieve values cr = self.env.cr result = {} refs = {"%s,%s" % (model, id) for id in ids} for sub_refs in cr.split_for_in_conditions(refs): cr.execute(query, params + [sub_refs]) result.update(cr.fetchall()) # remove default value, add missing values, and format them default = result.pop(None, None) for id in ids: result[id] = clean(result.get(id, default)) return result @api.model def set_multi(self, name, model, values, default_value=None): """ Assign the property field `name` for the records of model `model` with `values` (dictionary mapping record ids to their value). If the value for a given record is the same as the default value, the property entry will not be stored, to avoid bloating the database. If `default_value` is provided, that value will be used instead of the computed default value, to determine whether the value for a record should be stored or not. """ def clean(value): return value.id if isinstance(value, models.BaseModel) else value if not values: return if default_value is None: domain = self._get_domain(name, model) if domain is None: raise Exception() # retrieve the default value for the field default_value = clean(self.get(name, model)) # retrieve the properties corresponding to the given record ids self._cr.execute( "SELECT id FROM ir_model_fields WHERE name=%s AND model=%s", (name, model)) field_id = self._cr.fetchone()[0] company_id = self.env.context.get( 'force_company') or self.env.company.id refs = {('%s,%s' % (model, id)): id for id in values} props = self.search([ ('fields_id', '=', field_id), ('company_id', '=', company_id), ('res_id', 'in', list(refs)), ]) # modify existing properties for prop in props: id = refs.pop(prop.res_id) value = clean(values[id]) if value == default_value: # avoid prop.unlink(), as it clears the record cache that can # contain the value of other properties to set on record! prop.check_access_rights('unlink') prop.check_access_rule('unlink') self._cr.execute("DELETE FROM ir_property WHERE id=%s", [prop.id]) elif value != clean(prop.get_by_record()): prop.write({'value': value}) # create new properties for records that do not have one yet vals_list = [] for ref, id in refs.items(): value = clean(values[id]) if value != default_value: vals_list.append({ 'fields_id': field_id, 'company_id': company_id, 'res_id': ref, 'name': name, 'value': value, 'type': self.env[model]._fields[name].type, }) self.create(vals_list) @api.model def search_multi(self, name, model, operator, value): """ Return a domain for the records that match the given condition. """ default_matches = False include_zero = False field = self.env[model]._fields[name] if field.type == 'many2one': comodel = field.comodel_name def makeref(value): return value and '%s,%s' % (comodel, value) if operator == "=": value = makeref(value) # if searching properties not set, search those not in those set if value is False: default_matches = True elif operator in ('!=', '<=', '<', '>', '>='): value = makeref(value) elif operator in ('in', 'not in'): value = [makeref(v) for v in value] elif operator in ('=like', '=ilike', 'like', 'not like', 'ilike', 'not ilike'): # most probably inefficient... but correct target = self.env[comodel] target_names = target.name_search(value, operator=operator, limit=None) target_ids = [n[0] for n in target_names] operator, value = 'in', [makeref(v) for v in target_ids] elif field.type in ('integer', 'float'): # No record is created in ir.property if the field's type is float or integer with a value # equal to 0. Then to match with the records that are linked to a property field equal to 0, # the negation of the operator must be taken to compute the goods and the domain returned # to match the searched records is just the opposite. if value == 0 and operator == '=': operator = '!=' include_zero = True elif value <= 0 and operator == '>=': operator = '<' include_zero = True elif value < 0 and operator == '>': operator = '<=' include_zero = True elif value >= 0 and operator == '<=': operator = '>' include_zero = True elif value > 0 and operator == '<': operator = '>=' include_zero = True # retrieve the properties that match the condition domain = self._get_domain(name, model) if domain is None: raise Exception() props = self.search(domain + [(TYPE2FIELD[field.type], operator, value)]) # retrieve the records corresponding to the properties that match good_ids = [] for prop in props: if prop.res_id: res_model, res_id = prop.res_id.split(',') good_ids.append(int(res_id)) else: default_matches = True if include_zero: return [('id', 'not in', good_ids)] elif default_matches: # exclude all records with a property that does not match all_ids = [] props = self.search(domain + [('res_id', '!=', False)]) for prop in props: res_model, res_id = prop.res_id.split(',') all_ids.append(int(res_id)) bad_ids = list(set(all_ids) - set(good_ids)) return [('id', 'not in', bad_ids)] else: return [('id', 'in', good_ids)]
class BlogPost(models.Model): _name = "blog.post" _description = "Blog Post" _inherit = [ 'mail.thread', 'website.seo.metadata', 'website.published.multi.mixin' ] _order = 'id DESC' _mail_post_access = 'read' def _compute_website_url(self): super(BlogPost, self)._compute_website_url() for blog_post in self: blog_post.website_url = "/blog/%s/post/%s" % (slug( blog_post.blog_id), slug(blog_post)) def _default_content(self): return ''' <p class="o_default_snippet_text">''' + _( "Start writing here...") + '''</p> ''' name = fields.Char('Title', required=True, translate=True, default='') subtitle = fields.Char('Sub Title', translate=True) author_id = fields.Many2one('res.partner', 'Author', default=lambda self: self.env.user.partner_id) active = fields.Boolean('Active', default=True) cover_properties = fields.Text( 'Cover Properties', default= '{"background-image": "none", "background-color": "oe_black", "opacity": "0.2", "resize_class": "cover_mid"}' ) blog_id = fields.Many2one('blog.blog', 'Blog', required=True, ondelete='cascade') tag_ids = fields.Many2many('blog.tag', string='Tags') content = fields.Html('Content', default=_default_content, translate=html_translate, sanitize=False) teaser = fields.Text('Teaser', compute='_compute_teaser', inverse='_set_teaser') teaser_manual = fields.Text(string='Teaser Content') website_message_ids = fields.One2many( domain=lambda self: [('model', '=', self._name), ('message_type', '=', 'comment')]) # creation / update stuff create_date = fields.Datetime('Created on', index=True, readonly=True) published_date = fields.Datetime('Published Date') post_date = fields.Datetime( 'Publishing date', compute='_compute_post_date', inverse='_set_post_date', store=True, help= "The blog post will be visible for your visitors as of this date on the website if it is set as published." ) create_uid = fields.Many2one('res.users', 'Created by', index=True, readonly=True) write_date = fields.Datetime('Last Updated on', index=True, readonly=True) write_uid = fields.Many2one('res.users', 'Last Contributor', index=True, readonly=True) author_avatar = fields.Binary(related='author_id.image_128', string="Avatar", readonly=False) visits = fields.Integer('No of Views', copy=False) website_id = fields.Many2one(related='blog_id.website_id', readonly=True) @api.depends('content', 'teaser_manual') def _compute_teaser(self): for blog_post in self: if blog_post.teaser_manual: blog_post.teaser = blog_post.teaser_manual else: content = html2plaintext(blog_post.content).replace('\n', ' ') blog_post.teaser = content[:200] + '...' def _set_teaser(self): for blog_post in self: blog_post.teaser_manual = blog_post.teaser @api.depends('create_date', 'published_date') def _compute_post_date(self): for blog_post in self: if blog_post.published_date: blog_post.post_date = blog_post.published_date else: blog_post.post_date = blog_post.create_date def _set_post_date(self): for blog_post in self: blog_post.published_date = blog_post.post_date if not blog_post.published_date: blog_post._write(dict(post_date=blog_post.create_date) ) # dont trigger inverse function def _check_for_publication(self, vals): if vals.get('is_published'): for post in self: post.blog_id.message_post_with_view( 'website_blog.blog_post_template_new_post', subject=post.name, values={'post': post}, subtype_id=self.env['ir.model.data'].xmlid_to_res_id( 'website_blog.mt_blog_blog_published')) return True return False @api.model def create(self, vals): post_id = super(BlogPost, self.with_context(mail_create_nolog=True)).create(vals) post_id._check_for_publication(vals) return post_id def write(self, vals): result = True for post in self: copy_vals = dict(vals) published_in_vals = set( vals.keys()) & {'is_published', 'website_published'} if (published_in_vals and 'published_date' not in vals and (not post.published_date or post.published_date <= fields.Datetime.now())): copy_vals['published_date'] = vals[list( published_in_vals)[0]] and fields.Datetime.now() or False result &= super(BlogPost, self).write(copy_vals) self._check_for_publication(vals) return result def get_access_action(self, access_uid=None): """ Instead of the classic form view, redirect to the post on website directly if user is an employee or if the post is published. """ self.ensure_one() user = access_uid and self.env['res.users'].sudo().browse( access_uid) or self.env.user if user.share and not self.sudo().website_published: return super(BlogPost, self).get_access_action(access_uid) return { 'type': 'ir.actions.act_url', 'url': self.website_url, 'target': 'self', 'target_type': 'public', 'res_id': self.id, } def _notify_get_groups(self): """ Add access button to everyone if the document is published. """ groups = super(BlogPost, self)._notify_get_groups() if self.website_published: for group_name, group_method, group_data in groups: group_data['has_button_access'] = True return groups def _notify_record_by_inbox(self, message, recipients_data, msg_vals=False, **kwargs): """ Override to avoid keeping all notified recipients of a comment. We avoid tracking needaction on post comments. Only emails should be sufficient. """ if msg_vals.get('message_type', message.message_type) == 'comment': return return super(BlogPost, self)._notify_record_by_inbox(message, recipients_data, msg_vals=msg_vals, **kwargs) def _default_website_meta(self): res = super(BlogPost, self)._default_website_meta() res['default_opengraph']['og:description'] = res['default_twitter'][ 'twitter:description'] = self.subtitle res['default_opengraph']['og:type'] = 'article' res['default_opengraph']['article:published_time'] = self.post_date res['default_opengraph']['article:modified_time'] = self.write_date res['default_opengraph']['article:tag'] = self.tag_ids.mapped('name') res['default_opengraph']['og:image'] = res['default_twitter'][ 'twitter:image'] = json.loads(self.cover_properties).get( 'background-image', 'none')[4:-1] res['default_opengraph']['og:title'] = res['default_twitter'][ 'twitter:title'] = self.name res['default_meta_description'] = self.subtitle return res
class Company(models.Model): _name = "res.company" _description = 'Companies' _order = 'sequence, name' def copy(self, default=None): raise UserError( _('Duplicating a company is not allowed. Please create a new company instead.' )) def _get_logo(self): return base64.b64encode( open( os.path.join(tools.config['root_path'], 'addons', 'base', 'static', 'img', 'res_company_logo.png'), 'rb').read()) @api.model def _get_euro(self): return self.env['res.currency.rate'].search([('rate', '=', 1)], limit=1).currency_id @api.model def _get_user_currency(self): currency_id = self.env['res.users'].browse( self._uid).company_id.currency_id return currency_id or self._get_euro() def _get_default_favicon(self, original=False): img_path = get_resource_path('web', 'static/src/img/favicon.ico') with tools.file_open(img_path, 'rb') as f: if original: return base64.b64encode(f.read()) # Modify the source image to add a colored bar on the bottom # This could seem overkill to modify the pixels 1 by 1, but # Pillow doesn't provide an easy way to do it, and this # is acceptable for a 16x16 image. color = (randrange(32, 224, 24), randrange(32, 224, 24), randrange(32, 224, 24)) original = Image.open(f) new_image = Image.new('RGBA', original.size) height = original.size[1] width = original.size[0] bar_size = 1 for y in range(height): for x in range(width): pixel = original.getpixel((x, y)) if height - bar_size <= y + 1 <= height: new_image.putpixel((x, y), (color[0], color[1], color[2], 255)) else: new_image.putpixel( (x, y), (pixel[0], pixel[1], pixel[2], pixel[3])) stream = io.BytesIO() new_image.save(stream, format="ICO") return base64.b64encode(stream.getvalue()) name = fields.Char(related='partner_id.name', string='Company Name', required=True, store=True, readonly=False) sequence = fields.Integer( help='Used to order Companies in the company switcher', default=10) parent_id = fields.Many2one('res.company', string='Parent Company', index=True) child_ids = fields.One2many('res.company', 'parent_id', string='Child Companies') partner_id = fields.Many2one('res.partner', string='Partner', required=True) report_header = fields.Text( string='Company Tagline', help= "Appears by default on the top right corner of your printed documents (report header)." ) report_footer = fields.Text( string='Report Footer', translate=True, help="Footer text displayed at the bottom of all reports.") logo = fields.Binary(related='partner_id.image_1920', default=_get_logo, string="Company Logo", readonly=False) # logo_web: do not store in attachments, since the image is retrieved in SQL for # performance reasons (see addons/web/controllers/main.py, Binary.company_logo) logo_web = fields.Binary(compute='_compute_logo_web', store=True, attachment=False) currency_id = fields.Many2one( 'res.currency', string='Currency', required=True, default=lambda self: self._get_user_currency()) user_ids = fields.Many2many('res.users', 'res_company_users_rel', 'cid', 'user_id', string='Accepted Users') account_no = fields.Char(string='Account No.') street = fields.Char(compute='_compute_address', inverse='_inverse_street') street2 = fields.Char(compute='_compute_address', inverse='_inverse_street2') zip = fields.Char(compute='_compute_address', inverse='_inverse_zip') city = fields.Char(compute='_compute_address', inverse='_inverse_city') state_id = fields.Many2one('res.country.state', compute='_compute_address', inverse='_inverse_state', string="Fed. State") bank_ids = fields.One2many('res.partner.bank', 'company_id', string='Bank Accounts', help='Bank accounts related to this company') country_id = fields.Many2one('res.country', compute='_compute_address', inverse='_inverse_country', string="Country") email = fields.Char(related='partner_id.email', store=True, readonly=False) phone = fields.Char(related='partner_id.phone', store=True, readonly=False) website = fields.Char(related='partner_id.website', readonly=False) vat = fields.Char(related='partner_id.vat', string="Tax ID", readonly=False) company_registry = fields.Char() paperformat_id = fields.Many2one( 'report.paperformat', 'Paper format', default=lambda self: self.env.ref('base.paperformat_euro', raise_if_not_found=False)) external_report_layout_id = fields.Many2one('ir.ui.view', 'Document Template') base_onboarding_company_state = fields.Selection( [('not_done', "Not done"), ('just_done', "Just done"), ('done', "Done")], string="State of the onboarding company step", default='not_done') favicon = fields.Binary( string="Company Favicon", help= "This field holds the image used to display a favicon for a given company.", default=_get_default_favicon) font = fields.Selection([("Lato", "Lato"), ("Roboto", "Roboto"), ("Open_Sans", "Open Sans"), ("Montserrat", "Montserrat"), ("Oswald", "Oswald"), ("Raleway", "Raleway")], default="Lato") primary_color = fields.Char() secondary_color = fields.Char() _sql_constraints = [('name_uniq', 'unique (name)', 'The company name must be unique !')] def init(self): for company in self.search([('paperformat_id', '=', False)]): paperformat_euro = self.env.ref('base.paperformat_euro', False) if paperformat_euro: company.write({'paperformat_id': paperformat_euro.id}) sup = super(Company, self) if hasattr(sup, 'init'): sup.init() def _get_company_address_fields(self, partner): return { 'street': partner.street, 'street2': partner.street2, 'city': partner.city, 'zip': partner.zip, 'state_id': partner.state_id, 'country_id': partner.country_id, } # TODO @api.depends(): currently now way to formulate the dependency on the # partner's contact address def _compute_address(self): for company in self.filtered(lambda company: company.partner_id): address_data = company.partner_id.sudo().address_get( adr_pref=['contact']) if address_data['contact']: partner = company.partner_id.browse( address_data['contact']).sudo() company.update(company._get_company_address_fields(partner)) def _inverse_street(self): for company in self: company.partner_id.street = company.street def _inverse_street2(self): for company in self: company.partner_id.street2 = company.street2 def _inverse_zip(self): for company in self: company.partner_id.zip = company.zip def _inverse_city(self): for company in self: company.partner_id.city = company.city def _inverse_state(self): for company in self: company.partner_id.state_id = company.state_id def _inverse_country(self): for company in self: company.partner_id.country_id = company.country_id @api.depends('partner_id.image_1920') def _compute_logo_web(self): for company in self: company.logo_web = tools.image_process( company.partner_id.image_1920, size=(180, 0)) @api.onchange('state_id') def _onchange_state(self): if self.state_id.country_id: self.country_id = self.state_id.country_id def on_change_country(self, country_id): # This function is called from account/models/chart_template.py, hence decorated with `multi`. self.ensure_one() currency_id = self._get_user_currency() if country_id: currency_id = self.env['res.country'].browse( country_id).currency_id return {'value': {'currency_id': currency_id.id}} @api.onchange('country_id') def _onchange_country_id_wrapper(self): res = {'domain': {'state_id': []}} if self.country_id: res['domain']['state_id'] = [('country_id', '=', self.country_id.id)] values = self.on_change_country(self.country_id.id)['value'] for fname, value in values.items(): setattr(self, fname, value) return res @api.model def _name_search(self, name, args=None, operator='ilike', limit=100, name_get_uid=None): context = dict(self.env.context) newself = self if context.pop('user_preference', None): # We browse as superuser. Otherwise, the user would be able to # select only the currently visible companies (according to rules, # which are probably to allow to see the child companies) even if # she belongs to some other companies. companies = self.env.user.company_ids args = (args or []) + [('id', 'in', companies.ids)] newself = newself.sudo() return super(Company, newself.with_context(context))._name_search( name=name, args=args, operator=operator, limit=limit, name_get_uid=name_get_uid) @api.model @api.returns('self', lambda value: value.id) def _company_default_get(self, object=False, field=False): """ Returns the user's company - Deprecated """ _logger.warning( _("The method '_company_default_get' on res.company is deprecated and shouldn't be used anymore" )) return self.env.company # deprecated, use clear_caches() instead def cache_restart(self): self.clear_caches() @api.model def create(self, vals): if not vals.get('favicon'): vals['favicon'] = self._get_default_favicon() if not vals.get('name') or vals.get('partner_id'): self.clear_caches() return super(Company, self).create(vals) partner = self.env['res.partner'].create({ 'name': vals['name'], 'is_company': True, 'image_1920': vals.get('logo'), 'email': vals.get('email'), 'phone': vals.get('phone'), 'website': vals.get('website'), 'vat': vals.get('vat'), }) vals['partner_id'] = partner.id self.clear_caches() company = super(Company, self).create(vals) # The write is made on the user to set it automatically in the multi company group. self.env.user.write({'company_ids': [(4, company.id)]}) # Make sure that the selected currency is enabled if vals.get('currency_id'): currency = self.env['res.currency'].browse(vals['currency_id']) if not currency.active: currency.write({'active': True}) return company def write(self, values): self.clear_caches() # Make sure that the selected currency is enabled if values.get('currency_id'): currency = self.env['res.currency'].browse(values['currency_id']) if not currency.active: currency.write({'active': True}) return super(Company, self).write(values) @api.constrains('parent_id') def _check_parent_id(self): if not self._check_recursion(): raise ValidationError(_('You cannot create recursive companies.')) def open_company_edit_report(self): self.ensure_one() return self.env['res.config.settings'].open_company() def write_company_and_print_report(self): context = self.env.context report_name = context.get('default_report_name') active_ids = context.get('active_ids') active_model = context.get('active_model') if report_name and active_ids and active_model: docids = self.env[active_model].browse(active_ids) return (self.env['ir.actions.report'].search( [('report_name', '=', report_name)], limit=1).report_action(docids)) @api.model def action_open_base_onboarding_company(self): """ Onboarding step for company basic information. """ action = self.env.ref( 'base.action_open_base_onboarding_company').read()[0] action['res_id'] = self.env.company.id return action def set_onboarding_step_done(self, step_name): if self[step_name] == 'not_done': self[step_name] = 'just_done' def get_and_update_onbarding_state(self, onboarding_state, steps_states): """ Needed to display onboarding animations only one time. """ old_values = {} all_done = True for step_state in steps_states: old_values[step_state] = self[step_state] if self[step_state] == 'just_done': self[step_state] = 'done' all_done = all_done and self[step_state] == 'done' if all_done: if self[onboarding_state] == 'not_done': # string `onboarding_state` instead of variable name is not an error old_values['onboarding_state'] = 'just_done' else: old_values['onboarding_state'] = 'done' self[onboarding_state] = 'done' return old_values def action_save_onboarding_company_step(self): if bool(self.street): self.set_onboarding_step_done('base_onboarding_company_state') @api.model def _get_main_company(self): try: main_company = self.sudo().env.ref('base.main_company') except ValueError: main_company = self.env['res.company'].sudo().search([], limit=1, order="id") return main_company def update_scss(self): """ update the company scss stylesheet """ scss_properties = [] if self.primary_color: scss_properties.append('$o-company-primary-color:%s;' % self.primary_color) if self.secondary_color: scss_properties.append('$o-company-secondary-color:%s;' % self.secondary_color) if self.font: scss_properties.append('$o-company-font:%s;' % self.font) scss_string = '\n'.join(scss_properties) if not len(scss_string): scss_string = "" scss_data = base64.b64encode((scss_string).encode('utf-8')) attachment = self.env['ir.attachment'].search([('name', '=', 'res.company.scss')]) attachment.write({'datas': scss_data}) return ''
class Country(models.Model): _name = 'res.country' _description = 'Country' _order = 'name' name = fields.Char(string='Country Name', required=True, translate=True, help='The full name of the country.') code = fields.Char( string='Country Code', size=2, help= 'The ISO country code in two chars. \nYou can use this field for quick search.' ) address_format = fields.Text( string="Layout in Reports", help="Display format to use for addresses belonging to this country.\n\n" "You can use python-style string pattern with all the fields of the address " "(for example, use '%(street)s' to display the field 'street') plus" "\n%(state_name)s: the name of the state" "\n%(state_code)s: the code of the state" "\n%(country_name)s: the name of the country" "\n%(country_code)s: the code of the country", default= '%(street)s\n%(street2)s\n%(city)s %(state_code)s %(zip)s\n%(country_name)s' ) address_view_id = fields.Many2one( comodel_name='ir.ui.view', string="Input View", domain=[('model', '=', 'res.partner'), ('type', '=', 'form')], help= "Use this field if you want to replace the usual way to encode a complete address. " "Note that the address_format field is used to modify the way to display addresses " "(in reports for example), while this field is used to modify the input form for " "addresses.") currency_id = fields.Many2one('res.currency', string='Currency') image = fields.Binary(attachment=True) phone_code = fields.Integer(string='Country Calling Code') country_group_ids = fields.Many2many('res.country.group', 'res_country_res_country_group_rel', 'res_country_id', 'res_country_group_id', string='Country Groups') state_ids = fields.One2many('res.country.state', 'country_id', string='States') name_position = fields.Selection( [ ('before', 'Before Address'), ('after', 'After Address'), ], string="Customer Name Position", default="before", help= "Determines where the customer/company name should be placed, i.e. after or before the address." ) vat_label = fields.Char( string='Vat Label', translate=True, help="Use this field if you want to change vat label.") _sql_constraints = [('name_uniq', 'unique (name)', 'The name of the country must be unique !'), ('code_uniq', 'unique (code)', 'The code of the country must be unique !')] name_search = location_name_search @api.model_create_multi def create(self, vals_list): for vals in vals_list: if vals.get('code'): vals['code'] = vals['code'].upper() return super(Country, self).create(vals_list) def write(self, vals): if vals.get('code'): vals['code'] = vals['code'].upper() return super(Country, self).write(vals) def get_address_fields(self): self.ensure_one() return re.findall(r'\((.+?)\)', self.address_format)