def load_test(module_name, idref, mode): cr.commit() try: _load_data(cr, module_name, idref, mode, 'test') return True except Exception: _test_logger.exception( 'module %s: an exception occurred in a test', module_name) return False finally: cr.rollback() # avoid keeping stale xml_id, etc. in cache odoo.registry(cr.dbname).clear_caches()
def load_test(idref, mode): cr.execute("SAVEPOINT load_test_data_file") try: load_data(cr, idref, mode, 'test', package, report) return True except Exception: _test_logger.exception( 'module %s: an exception occurred in a test', package.name) return False finally: cr.execute("ROLLBACK TO SAVEPOINT load_test_data_file") # avoid keeping stale xml_id, etc. in cache odoo.registry(cr.dbname).clear_caches()
def load_test(module_name, idref, mode): cr.commit() try: _load_data(cr, module_name, idref, mode, "test") return True except Exception: _test_logger.exception("module %s: an exception occurred in a test", module_name) return False finally: if tools.config.options["test_commit"]: cr.commit() else: cr.rollback() # avoid keeping stale xml_id, etc. in cache odoo.registry(cr.dbname).clear_caches()
def LocalService(name): """ The odoo.netsvc.LocalService() function is deprecated. It still works in two cases: workflows and reports. For workflows, instead of using LocalService('workflow'), odoo.workflow should be used (better yet, methods on odoo.osv.orm.Model should be used). For reports, odoo.report.render_report() should be used (methods on the Model should be provided too in the future). """ assert odoo.conf.deprecation.allow_local_service _logger.warning("LocalService() is deprecated since march 2013 (it was called with '%s')." % name) if name == 'workflow': return odoo.workflow if name.startswith('report.'): report = odoo.report.interface.report_int._reports.get(name) if report: return report else: dbname = getattr(threading.currentThread(), 'dbname', None) if dbname: registry = odoo.registry(dbname) with registry.cursor() as cr: return registry['ir.actions.report.xml']._lookup_report(cr, name[len('report.'):])
def run_scheduler(self, use_new_cursor=False, company_id=False): ''' Call the scheduler in order to check the running procurements (super method), to check the minimum stock rules and the availability of moves. This function is intended to be run for all the companies at the same time, so we run functions as SUPERUSER to avoid intercompanies and access rights issues. ''' super(ProcurementOrder, self).run_scheduler(use_new_cursor=use_new_cursor, company_id=company_id) try: if use_new_cursor: cr = registry(self._cr.dbname).cursor() self = self.with_env(self.env(cr=cr)) # TDE FIXME # Minimum stock rules self.sudo()._procure_orderpoint_confirm(use_new_cursor=use_new_cursor, company_id=company_id) # Search all confirmed stock_moves and try to assign them confirmed_moves = self.env['stock.move'].search([('state', '=', 'confirmed')], limit=None, order='priority desc, date_expected asc') for x in xrange(0, len(confirmed_moves.ids), 100): # TDE CLEANME: muf muf self.env['stock.move'].browse(confirmed_moves.ids[x:x + 100]).action_assign() if use_new_cursor: self._cr.commit() if use_new_cursor: self._cr.commit() finally: if use_new_cursor: try: self._cr.close() except Exception: pass return {}
def _refresh_google_token_json(self, refresh_token, service): # exchange_AUTHORIZATION vs Token (service = calendar) get_param = self.env['ir.config_parameter'].sudo().get_param client_id = get_param('google_%s_client_id' % (service,), default=False) client_secret = get_param('google_%s_client_secret' % (service,), default=False) if not client_id or not client_secret: raise UserError(_("The account for the Google service '%s' is not configured") % service) headers = {"content-type": "application/x-www-form-urlencoded"} data = { 'refresh_token': refresh_token, 'client_id': client_id, 'client_secret': client_secret, 'grant_type': 'refresh_token', } try: dummy, response, dummy = self._do_request(GOOGLE_TOKEN_ENDPOINT, params=data, headers=headers, type='POST', preuri='') return response except requests.HTTPError as error: if error.response.status_code == 400: # invalid grant with registry(request.session.db).cursor() as cur: self.env(cur)['res.users'].browse(self.env.uid).write({'google_%s_rtoken' % service: False}) error_key = error.response.json().get("error", "nc") _logger.exception("Bad google request : %s !", error_key) error_msg = _("Something went wrong during your token generation. Maybe your Authorization Code is invalid or already expired [%s]") % error_key raise self.env['res.config.settings'].get_config_warning(error_msg)
def poll(self, dbname, channels, last, options=None, timeout=TIMEOUT): if options is None: options = {} # Dont hang ctrl-c for a poll request, we need to bypass private # attribute access because we dont know before starting the thread that # it will handle a longpolling request if not odoo.evented: current = threading.current_thread() current._Thread__daemonic = True # rename the thread to avoid tests waiting for a longpolling current.setName("openerp.longpolling.request.%s" % current.ident) registry = odoo.registry(dbname) # immediatly returns if past notifications exist with registry.cursor() as cr: env = api.Environment(cr, SUPERUSER_ID, {}) notifications = env['bus.bus'].poll(channels, last, options) # or wait for future ones if not notifications: event = self.Event() for channel in channels: self.channels.setdefault(hashable(channel), []).append(event) try: event.wait(timeout=timeout) with registry.cursor() as cr: env = api.Environment(cr, SUPERUSER_ID, {}) notifications = env['bus.bus'].poll(channels, last, options, force_status=True) except Exception: # timeout pass return notifications
def run(self, qid, uid, set_state=True): Env, reg = api.Environment, registry(self.env.cr.dbname) with Env.manage(), reg.cursor() as crc, reg.cursor() as crj: control_env = Env(crc, SUPERUSER_ID, {}) job_env = Env(crj, uid, {}) # Load queue in a dedicated environment, dedicated to update # queue and steps states with explicit commits, outside # the job transaction. queue = control_env[self._name].browse(qid) if set_state: queue.state = 'running' try: getattr(job_env[queue.model], queue.method)( queue, *safe_eval(queue.args)) except Exception: crj.rollback() queue.write( {'state': 'failed', 'error_log': traceback.format_exc()}) if queue.failed_method: getattr(job_env[queue.model], queue.failed_method)( queue, *safe_eval(queue.args)) else: crc.commit() crj.commit() queue.write({'state': 'done'}) finally: crc.commit() crj.commit()
def environment(): """ Return an environment with a new cursor for the current database; the cursor is committed and closed after the context block. """ registry = odoo.registry(common.get_db_name()) with registry.cursor() as cr: yield odoo.api.Environment(cr, ADMIN_USER_ID, {})
def exp_render_report(db, uid, object, ids, datas=None, context=None): if not datas: datas={} if not context: context={} self_id_protect.acquire() global self_id self_id += 1 id = self_id self_id_protect.release() self_reports[id] = {'uid': uid, 'result': False, 'state': False, 'exception': None} cr = odoo.registry(db).cursor() try: result, format = odoo.report.render_report(cr, uid, ids, object, datas, context) if not result: tb = sys.exc_info() self_reports[id]['exception'] = odoo.exceptions.DeferredException('RML is not available at specified location or not enough data to print!', tb) self_reports[id]['result'] = result self_reports[id]['format'] = format self_reports[id]['state'] = True except Exception, exception: _logger.exception('Exception: %s\n', exception) if hasattr(exception, 'name') and hasattr(exception, 'value'): self_reports[id]['exception'] = odoo.exceptions.DeferredException(tools.ustr(exception.name), tools.ustr(exception.value)) else: tb = sys.exc_info() self_reports[id]['exception'] = odoo.exceptions.DeferredException(tools.exception_to_unicode(exception), tb) self_reports[id]['state'] = True
def check_local_translation_done(self): reload(sys) sys.setdefaultencoding('UTF8') tc = translate_connector.TranslateConnect() letters_to_update = tc.get_translated_letters() for letter in letters_to_update: try: with api.Environment.manage(): with registry( self.env.cr.dbname).cursor() as new_cr: # Create a new environment with new cursor database new_env = api.Environment(new_cr, self.env.uid, self.env.context) correspondence = self.with_env(new_env).browse( letter["letter_odoo_id"]) logger.info( ".....CHECK TRANSLATION FOR LETTER {}" .format(correspondence.id) ) correspondence.update_translation( letter["target_lang"], letter["text"], letter["translator"]) tc.update_translation_to_treated(letter["id"]) except Exception as e: logger.error( "Error fetching a translation on translation platform: {}" .format(e.message) ) return True
def execute(db, uid, obj, method, *args, **kw): threading.currentThread().dbname = db with odoo.registry(db).cursor() as cr: check_method_name(method) res = execute_cr(cr, uid, obj, method, *args, **kw) if res is None: _logger.info('The method %s of the object %s can not return `None` !', method, obj) return res
def __getattr__(self, name): cr = self._cursor if cr is None: from odoo import registry cr = self._cursor = registry(self.dbname).cursor() for _ in range(self._depth): cr.__enter__() return getattr(cr, name)
def environment(): """ Return an environment with a new cursor for the current database; the cursor is committed and closed after the context block. """ reg = registry(common.get_db_name()) with reg.cursor() as cr: yield api.Environment(cr, SUPERUSER_ID, {}) cr.commit()
def execute(self): self.ensure_one() if not self.env.user._is_admin(): raise AccessError(_("Only administrators can change the settings")) self = self.with_context(active_test=False) classified = self._get_classified_fields() # default values fields IrValues = self.env['ir.values'].sudo() for name, model, field in classified['default']: IrValues.set_default(model, field, self[name]) # group fields: modify group / implied groups for name, groups, implied_group in classified['group']: if self[name]: groups.write({'implied_ids': [(4, implied_group.id)]}) else: groups.write({'implied_ids': [(3, implied_group.id)]}) implied_group.write({'users': [(3, user.id) for user in groups.mapped('users')]}) # other fields: execute all methods that start with 'set_' for method in dir(self): if method.startswith('set_'): getattr(self, method)() # module fields: install/uninstall the selected modules to_install = [] to_uninstall_modules = self.env['ir.module.module'] lm = len('module_') for name, module in classified['module']: if self[name]: to_install.append((name[lm:], module)) else: if module and module.state in ('installed', 'to upgrade'): to_uninstall_modules += module if to_uninstall_modules: to_uninstall_modules.button_immediate_uninstall() action = self._install_modules(to_install) if action: return action # After the uninstall/install calls, the self.pool is no longer valid. # So we reach into the RegistryManager directly. ResConfig = registry(self._cr.dbname)['res.config'] config = ResConfig.browse(self._cr, self._uid, [], self._context).next() or {} if config.get('type') not in ('ir.actions.act_window_close',): return config # force client-side reload (update user menu and current view) return { 'type': 'ir.actions.client', 'tag': 'reload', }
def dispatch(method, params): (db, uid, passwd ) = params[0:3] threading.current_thread().uid = uid params = params[3:] if method not in ['report', 'report_get', 'render_report']: raise KeyError("Method not supported %s" % method) security.check(db,uid,passwd) registry = odoo.registry(db).check_signaling() fn = globals()['exp_' + method] res = fn(db, uid, *params) registry.signal_caches_change() return res
def poll(self, channels, last, options=None): if request.env.user.has_group('base.group_user'): ip_address = request.httprequest.remote_addr users_log = request.env['res.users.log'].search_count([ ('create_uid', '=', request.env.user.id), ('ip', '=', ip_address), ('create_date', '>=', Datetime.to_string(Datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)))]) if not users_log: with registry(request.env.cr.dbname).cursor() as cr: env = Environment(cr, request.env.user.id, {}) env['res.users.log'].create({'ip': ip_address}) return super(BusController, self).poll(channels, last, options=options)
def receive(self, req): """ End-point to receive mail from an external SMTP server. """ dbs = req.jsonrequest.get('databases') for db in dbs: message = base64.b64decode(dbs[db]) try: db_registry = registry(db) with db_registry.cursor() as cr: env = api.Environment(cr, SUPERUSER_ID, {}) env['mail.thread'].message_process(None, message) except psycopg2.Error: pass return True
def setUp(self): # check that the registry is properly reset registry = odoo.registry() fnames = set(registry[self.MODEL]._fields) @self.addCleanup def check_registry(): assert set(registry[self.MODEL]._fields) == fnames super(TestCustomFields, self).setUp() # use a test cursor instead of a real cursor self.registry.enter_test_mode(self.cr) self.addCleanup(self.registry.leave_test_mode)
def email_send(email_from, email_to, subject, body, email_cc=None, email_bcc=None, reply_to=False, attachments=None, message_id=None, references=None, openobject_id=False, debug=False, subtype='plain', headers=None, smtp_server=None, smtp_port=None, ssl=False, smtp_user=None, smtp_password=None, cr=None, uid=None): """Low-level function for sending an email (deprecated). :deprecate: since OpenERP 6.1, please use ir.mail_server.send_email() instead. :param email_from: A string used to fill the `From` header, if falsy, config['email_from'] is used instead. Also used for the `Reply-To` header if `reply_to` is not provided :param email_to: a sequence of addresses to send the mail to. """ # If not cr, get cr from current thread database local_cr = None if not cr: db_name = getattr(threading.currentThread(), 'dbname', None) if db_name: local_cr = cr = odoo.registry(db_name).cursor() else: raise Exception("No database cursor found, please pass one explicitly") # Send Email try: mail_server_pool = odoo.registry(cr.dbname)['ir.mail_server'] res = False # Pack Message into MIME Object email_msg = mail_server_pool.build_email(email_from, email_to, subject, body, email_cc, email_bcc, reply_to, attachments, message_id, references, openobject_id, subtype, headers=headers) res = mail_server_pool.send_email(cr, uid or 1, email_msg, mail_server_id=None, smtp_server=smtp_server, smtp_port=smtp_port, smtp_user=smtp_user, smtp_password=smtp_password, smtp_encryption=('ssl' if ssl else None), smtp_debug=debug) except Exception: _logger.exception("tools.email_send failed to deliver email") return False finally: if local_cr: cr.close() return res
def setUp(self): self.registry = odoo.registry(get_db_name()) #: current transaction's cursor self.cr = self.cursor() self.uid = odoo.SUPERUSER_ID #: :class:`~odoo.api.Environment` for the current test case self.env = api.Environment(self.cr, self.uid, {}) @self.addCleanup def reset(): # rollback and close the cursor, and reset the environments self.registry.clear_caches() self.env.reset() self.cr.rollback() self.cr.close()
def oauth2callback(self, **kw): """ This route/function is called by Google when user Accept/Refuse the consent of Google """ state = json.loads(kw['state']) dbname = state.get('d') service = state.get('s') url_return = state.get('f') with registry(dbname).cursor() as cr: if kw.get('code'): request.env(cr, request.session.uid)['google.%s' % service].set_all_tokens(kw['code']) return redirect(url_return) elif kw.get('error'): return redirect("%s%s%s" % (url_return, "?error=", kw['error'])) else: return redirect("%s%s" % (url_return, "?error=Unknown_error"))
def run_scheduler(self, use_new_cursor=False, company_id=False): """ Call the scheduler to check the procurement order. This is intented to be done for all existing companies at the same time, so we're running all the methods as SUPERUSER to avoid intercompany and access rights issues. @param use_new_cursor: if set, use a dedicated cursor and auto-commit after processing each procurement. This is appropriate for batch jobs only. @return: Dictionary of values """ ProcurementSudo = self.env["procurement.order"].sudo() try: if use_new_cursor: cr = registry(self._cr.dbname).cursor() self = self.with_env(self.env(cr=cr)) # TDE FIXME # Run confirmed procurements procurements = ProcurementSudo.search( [("state", "=", "confirmed")] + (company_id and [("company_id", "=", company_id)] or []) ) while procurements: procurements.run(autocommit=use_new_cursor) if use_new_cursor: self.env.cr.commit() procurements = ProcurementSudo.search( [("id", "not in", procurements.ids), ("state", "=", "confirmed")] + (company_id and [("company_id", "=", company_id)] or []) ) # Check done procurements procurements = ProcurementSudo.search( [("state", "=", "running")] + (company_id and [("company_id", "=", company_id)] or []) ) while procurements: procurements.check(autocommit=use_new_cursor) if use_new_cursor: self.env.cr.commit() procurements = ProcurementSudo.search( [("id", "not in", procurements.ids), ("state", "=", "running")] + (company_id and [("company_id", "=", company_id)] or []) ) finally: if use_new_cursor: try: self.env.cr.close() except Exception: pass return {}
def setUp(self): # check that the registry is properly reset registry = odoo.registry() fnames = set(registry[self.MODEL]._fields) @self.addCleanup def check_registry(): assert set(registry[self.MODEL]._fields) == fnames super(TestCustomFields, self).setUp() # use a test cursor instead of a real cursor self.registry.enter_test_mode() self.addCleanup(self.registry.leave_test_mode) # do not reload the registry after removing a field self.env = self.env(context={'_force_unlink': True})
def shell(self, dbname): local_vars = { 'openerp': odoo } with odoo.api.Environment.manage(): if dbname: registry = odoo.registry(dbname) with registry.cursor() as cr: uid = odoo.SUPERUSER_ID ctx = odoo.api.Environment(cr, uid, {})['res.users'].context_get() env = odoo.api.Environment(cr, uid, ctx) local_vars['env'] = env local_vars['self'] = env.user self.console(local_vars) else: self.console(local_vars)
def poll(self, dbname, channels, last, options=None, timeout=TIMEOUT): if options is None: options = {} # Dont hang ctrl-c for a poll request, we need to bypass private # attribute access because we dont know before starting the thread that # it will handle a longpolling request if not odoo.evented: current = threading.current_thread() current._daemonic = True # rename the thread to avoid tests waiting for a longpolling current.setName("openerp.longpolling.request.%s" % current.ident) registry = odoo.registry(dbname) # immediatly returns if past notifications exist with registry.cursor() as cr: env = api.Environment(cr, SUPERUSER_ID, {}) notifications = env['bus.bus'].poll(channels, last, options) # immediatly returns in peek mode if options.get('peek'): return dict(notifications=notifications, channels=channels) # or wait for future ones if not notifications: if not self.started: # Lazy start of events listener self.start() event = self.Event() for channel in channels: self.channels.setdefault(hashable(channel), set()).add(event) try: event.wait(timeout=timeout) with registry.cursor() as cr: env = api.Environment(cr, SUPERUSER_ID, {}) notifications = env['bus.bus'].poll(channels, last, options) except Exception: # timeout pass finally: # gc pointers to event for channel in channels: channel_events = self.channels.get(hashable(channel)) if channel_events and event in channel_events: channel_events.remove(event) return notifications
def thread_session_update_state(dbname, uid, context): # Creating a separate cursor to commit errors # in case of exception thrown with odoo.api.Environment.manage(): with odoo.registry(dbname).cursor() as new_cr: new_env = api.Environment(new_cr, uid, context) orm_clws = new_env['clouder.web.session'] session = orm_clws.browse([session_id])[0] session.state = 'error' # Commit the change we just made new_env.cr.commit() # Return to avoid getting back to # the other instructions after this thread return
def run_scheduler(self, use_new_cursor=False, company_id=False): """ Call the scheduler in order to check the running procurements (super method), to check the minimum stock rules and the availability of moves. This function is intended to be run for all the companies at the same time, so we run functions as SUPERUSER to avoid intercompanies and access rights issues. """ try: if use_new_cursor: cr = registry(self._cr.dbname).cursor() self = self.with_env(self.env(cr=cr)) # TDE FIXME self._run_scheduler_tasks(use_new_cursor=use_new_cursor, company_id=company_id) finally: if use_new_cursor: try: self._cr.close() except Exception: pass return {}
def _login(cls, db, login, password): try: return super(Users, cls)._login(db, login, password) except AccessDenied as e: with registry(db).cursor() as cr: cr.execute("SELECT id FROM res_users WHERE lower(login)=%s", (login,)) res = cr.fetchone() if res: raise e env = api.Environment(cr, SUPERUSER_ID, {}) Ldap = env['res.company.ldap'] for conf in Ldap._get_ldap_dicts(): entry = Ldap._authenticate(conf, login, password) if entry: return Ldap._get_or_create_user(conf, login, entry) raise e
def dispatch(method, params): (db, uid, passwd ) = params[0:3] # set uid tracker - cleaned up at the WSGI # dispatching phase in odoo.service.wsgi_server.application threading.current_thread().uid = uid params = params[3:] if method == 'obj_list': raise NameError("obj_list has been discontinued via RPC as of 6.0, please query ir.model directly!") if method not in ['execute', 'execute_kw']: raise NameError("Method not available %s" % method) security.check(db,uid,passwd) registry = odoo.registry(db).check_signaling() fn = globals()[method] res = fn(db, uid, *params) registry.signal_caches_change() return res
def getProcessBankStatementLine(self, limit=0, use_new_cursor=False): if use_new_cursor: cr = registry(self._cr.dbname).cursor() self = self.with_env(self.env(cr=cr)) statementLines = self.env['account.bank.statement.line'] Account = self.env["account.account"] LayoutLine = self.env['bank.statement.export.layout.line'] AccountMoveLine = self.env['account.move.line'] Afiliation = self.env["account.bank.afiliation"] AccountAnalytic = self.env["account.analytic.account"] Tag = self.env["account.analytic.tag"] Codes = self.env['account.code.bank.statement'] self._end_balance() if use_new_cursor: cr.commit() codigo = {} for codes_id in Codes.search([('company_id', '=', self.company_id.id), ('journal_id', '=', self.journal_id.id) ]): for code in codes_id.code_line_ids: codigo[code. name] = code.account_id and code.account_id.id or False # codigo[ code.name.ljust(3, " ") ] = code.account_id and code.account_id.id or False extra_code = [] ctx = dict(self._context, force_price_include=False) len_line_ids = len( self.line_ids.filtered(lambda l: not l.journal_entry_ids)) _logger.info("01 ----------- Start statement process of %s lines" % (len_line_ids)) counter = 0 ret = False milliseconds = limit * 60 * 1000 milliseconds_now = millis() for indx, st_line in enumerate( self.line_ids.filtered(lambda l: not l.journal_entry_ids)): if st_line.journal_entry_ids: continue if limit != 0: milliseconds_now_02 = millis() milliseconds_tmp = (milliseconds_now_02 - milliseconds_now) if milliseconds_tmp >= milliseconds: break transaccion = st_line.note.split("|") codigo_transaccion = transaccion and transaccion[0].strip() or "" concepto_transaccion = transaccion and transaccion[1].strip() or "" ref = st_line.ref if codigo_transaccion in ['T17'] and ref: ref = ref.replace('0000001', '') std_ids = statementLines.search_read( [('statement_id', '=', self.id), ('name', '=', st_line.name), ('ref', '=', st_line.ref), ('note', 'like', 'T22|')], ['name', 'ref', 'note', 'amount']) _logger.info("---------- std_ids %s " % (std_ids)) for std_id in std_ids: if abs(std_id['amount']) == abs(st_line.amount): ref = '' if codigo_transaccion in ['T22']: ref = '' _logger.info("-------- codigo_transaccion %s %s " % (codigo_transaccion, ref)) folioOdoo = ref and ref[:10] or '' if codigo_transaccion in ['T06']: folioOdoo = ref[7:17] if codigo_transaccion in ["P14"]: folioOdoo = ref.replace('REF:', '').replace('CIE:1', '').replace('CIE:0', '').strip() account_id = False _logger.info( "02 *********** COUNT: %s | Process Line %s/%s - CODE %s -%s" % (counter, indx, len_line_ids, codigo_transaccion, st_line.name)) counter += 1 # if counter > 1: # break res = False for layoutline_id in LayoutLine.search_read( [('name', '=', folioOdoo)], fields=[ 'id', 'name', 'cuenta_cargo', 'cuenta_abono', 'motivo_pago', 'referencia_numerica', 'layout_id', 'movel_line_ids', 'partner_id', 'importe' ]): movel_line_ids = layoutline_id.get( 'movel_line_ids' ) and layoutline_id['movel_line_ids'] or [] move_lines = AccountMoveLine.browse(movel_line_ids) line_residual = st_line.currency_id and st_line.amount_currency or st_line.amount line_currency = st_line.currency_id or st_line.journal_id.currency_id or st_line.company_id.currency_id amount_residual = move_lines and sum( aml.currency_id and aml.amount_residual_currency or aml.amount_residual for aml in move_lines) total_residual = amount_residual or 0.0 balance = total_residual - line_residual open_balance_dicts = [] counterpart_aml_dicts = [] amount_total = abs(line_residual) payment_aml_rec = self.env['account.move.line'] _logger.info(" TEST: Move IDS %s -%s " % (move_lines, st_line.name)) for aml in move_lines: if aml.full_reconcile_id: continue # Convertir a la moneda... # if aml.currency_id: break amount = aml.currency_id and aml.amount_residual_currency or aml.amount_residual if round(amount_total, 2) >= round(abs(amount), 2): amount_total -= abs(amount) counterpart_aml_dicts.append({ 'name': aml.name if aml.name != '/' else aml.move_id.name, 'debit': amount < 0 and -amount or 0, 'credit': amount > 0 and amount or 0, 'move_line': aml, }) else: counterpart_aml_dicts.append({ 'name': aml.name if aml.name != '/' else aml.move_id.name, 'debit': balance < 0 and -balance or 0, 'credit': balance > 0 and balance or 0, 'move_line': aml, }) _logger.info("03 ----------- Start Reconcile %s - %s" % (counterpart_aml_dicts, st_line.name)) res = st_line.with_context(ctx).process_reconciliation( counterpart_aml_dicts, payment_aml_rec, open_balance_dicts) if use_new_cursor: cr.commit() _logger.info("04 ----------- End Reconcile: %s - %s" % (res.name, st_line.name)) if res: break if res: _logger.info("------RES %s -%s " % (codigo_transaccion, res)) ret = True continue ctx = {} if (codigo_transaccion in codigo): # for account_id in Account.search_read([('code_alias', 'ilike', codigo[codigo_transaccion]), # ('company_id', '=', st_line.company_id.id)], fields=["name"]): # st_line.account_id = account_id.get("id") st_line.account_id = codigo[codigo_transaccion] for afiliation_id in Afiliation.search_read( [("name", "ilike", st_line.name)], fields=["name", "description"], limit=1): for tag_id in Tag.search_read( [("afiliation_id", "=", afiliation_id.get("id"))], fields=["name", "code"]): analytic_id = AccountAnalytic.search([ ("code", "=", tag_id.get("code")) ]) ctx = { "tag_id": tag_id.get("id"), "analytic_id": analytic_id and analytic_id.id or False, "import_etl": True } if not ctx and codigo_transaccion in ["Y01", "Y15"]: ctx = self.getAnalyticTagIdsTransactions( concepto_transaccion, conreplace="CE662143") elif not ctx and codigo_transaccion in ["Y16"]: ctx = self.getAnalyticTagIdsTransactions( concepto_transaccion, conreplace="CI") elif not ctx and codigo_transaccion in ["C72"]: concepto_transaccion_tmp = st_line.ref ctx = self.getAnalyticTagIdsTransactions( concepto_transaccion_tmp, conreplace="CI") elif not ctx and codigo_transaccion in ["W01"]: if (st_line.ref.upper().find('EMPENO') >= 0) or (st_line.ref.upper().find('EXPREQ') >= 0): concepto_transaccion_tmp = st_line.ref ctx = self.getAnalyticTagIdsTransactions( concepto_transaccion_tmp, conreplace="CI") if not ctx: ctx = {"import_etl": True} ret = True st_line.with_context(ctx).fast_counterpart_creation() _logger.info( "05 ----------- Codigo/Cuenta: %s/%s - ACCID:%s - CTX:%s" % (codigo_transaccion, codigo[codigo_transaccion], codigo[codigo_transaccion], ctx)) if use_new_cursor: cr.commit() _logger.info("06 ----------- END LINE") if use_new_cursor: cr.commit() cr.close() return ret
def _postprocess_restored_db(self, dbname): """ Do some postprocessing after DB restored from backup The reason for this method, is to ensure that yodoo_client is installed on database and if needed updated. Also, we check installed addons and compare them with thats are available on disk, and if needed run update for them. """ to_update_modules = set() to_install_modules = set() modules_in_db = {} modules_on_disk = { mod: odoo.modules.module.load_information_from_description_file(mod) for mod in odoo.modules.module.get_modules() } auto_install_addons = odoo.tools.config.get( 'yodoo_auto_install_addons', '') auto_install_addons = [ a.strip() for a in auto_install_addons.split(',') if a.strip() ] with closing(db_connect(dbname).cursor()) as cr: cr.execute(""" SELECT name, latest_version FROM ir_module_module WHERE state = 'installed'; """) modules_in_db = dict(cr.fetchall()) if 'yodoo_client' not in modules_in_db: _logger.info("yodoo_client not installed, adding to install list") to_install_modules.add('yodoo_client') elif modules_in_db['yodoo_client'] != get_yodoo_client_version(): _logger.info( "yodoo_client not up to date (%s != %s), " "adding to update list", modules_in_db['yodoo_client'], get_yodoo_client_version()) to_update_modules.add('yodoo_client') for module_name in auto_install_addons: if module_name not in modules_on_disk: continue if module_name not in modules_in_db: _logger.info( "Module %s is mentioned in auto_install_addons list, " "but is not installed in database. Installing.", module_name) to_install_modules.add(module_name) for module_name, db_version in modules_in_db.items(): if module_name not in modules_on_disk: continue if db_version != modules_on_disk[module_name]['version']: _logger.info( "Module %s is not up to data, adding to update list.", module_name) to_update_modules.add(module_name) if to_install_modules or to_update_modules: _logger.info( "There are addons to install %s and to update %s found.", tuple(to_install_modules), tuple(to_update_modules)) with registry(dbname).cursor() as cr: env = api.Environment(cr, SUPERUSER_ID, context={}) env['ir.module.module'].update_list() if to_install_modules: env['ir.module.module'].search([ ('name', 'in', list(to_install_modules)), ('state', 'in', ('uninstalled', 'to_install')) ]).button_immediate_install() if to_update_modules: env['ir.module.module'].search([ ('name', 'in', list(to_update_modules)), ('state', 'in', ('uninstalled', 'to_install')) ]).button_immediate_upgrade()
def put(self, key, value, tag='/', expire=DEFAULT_EXPIRE_SECONDS, serialize=None, new_env=False, overwrite_existing=True): self.do_expiration() try: expire_time = ( datetime.utcnow() + timedelta(seconds=expire)).strftime('%Y-%m-%d %H:%M:%S') # Check serialize option if serialize == 'json': value = json.dumps(value) if not new_env: with mute_logger('odoo.sql_db'), self.env.cr.savepoint(): self.create({ 'key': key, 'tag': tag, 'value': value, 'expire': expire_time, }) logger.debug('CACHE PUT KEY: %s VALUE: %s', key, value) else: with mute_logger('odoo.sql_db'), api.Environment.manage(): with registry(self.env.cr.dbname).cursor() as new_cr: env = api.Environment(new_cr, self.env.uid, self.env.context) env['kv_cache.cache'].create({ 'key': key, 'tag': tag, 'value': value, 'expire': expire_time, }) env.cr.commit() logger.debug('CACHE NEW ENV PUT KEY: %s VALUE: %s', key, value) except psycopg2.IntegrityError as e: if e.pgcode == psycopg2.errorcodes.UNIQUE_VIOLATION: if not new_env: with self.env.cr.savepoint(): # Find duplicate record to update cache_record = self.env['kv_cache.cache'].search([ ('key', '=', key), ('tag', '=', tag) ]) cache_record.update({ 'key': key, 'tag': tag, 'value': value, 'expire': expire_time, }) logger.debug('CACHE UPDATE KEY: %s VALUE: %s', key, value) else: with api.Environment.manage(): with registry(self.env.cr.dbname).cursor() as new_cr: env = api.Environment(new_cr, self.env.uid, self.env.context) cache_record = env['kv_cache.cache'].search([ ('key', '=', key), ('tag', '=', tag) ]) cache_record.update({ 'key': key, 'tag': tag, 'value': value, 'expire': expire_time, }) env.cr.commit() logger.debug('CACHE NEW ENV UPDATE KEY: %s VALUE: %s', key, value) except Exception: logger.exception('[ODOO_ERROR] CACHE PUT ERROR') raise Exception('CACHE PUT ERROR')
def runjob(self, db, job_uuid, **kw): http.request.session.db = db env = http.request.env(user=odoo.SUPERUSER_ID) def retry_postpone(job, message, seconds=None): job.env.clear() with odoo.api.Environment.manage(): with odoo.registry(job.env.cr.dbname).cursor() as new_cr: job.env = job.env(cr=new_cr) job.postpone(result=message, seconds=seconds) job.set_pending(reset_retry=False) job.store() new_cr.commit() # ensure the job to run is in the correct state and lock the record env.cr.execute( "SELECT state FROM queue_job WHERE uuid=%s AND state=%s FOR UPDATE", (job_uuid, ENQUEUED), ) if not env.cr.fetchone(): _logger.warn( "was requested to run job %s, but it does not exist, " "or is not in state %s", job_uuid, ENQUEUED, ) return "" job = Job.load(env, job_uuid) assert job and job.state == ENQUEUED try: try: self._try_perform_job(env, job) except OperationalError as err: # Automatically retry the typical transaction serialization # errors if err.pgcode not in PG_CONCURRENCY_ERRORS_TO_RETRY: raise retry_postpone(job, tools.ustr(err.pgerror, errors="replace"), seconds=PG_RETRY) _logger.debug("%s OperationalError, postponed", job) except NothingToDoJob as err: if str(err): msg = str(err) else: msg = _("Job interrupted and set to Done: nothing to do.") job.set_done(msg) job.store() env.cr.commit() except RetryableJobError as err: # delay the job later, requeue retry_postpone(job, str(err), seconds=err.seconds) _logger.debug("%s postponed", job) except (FailedJobError, Exception): buff = StringIO() traceback.print_exc(file=buff) _logger.error(buff.getvalue()) job.env.clear() with odoo.api.Environment.manage(): with odoo.registry(job.env.cr.dbname).cursor() as new_cr: job.env = job.env(cr=new_cr) job.set_failed(exc_info=buff.getvalue()) job.store() new_cr.commit() raise return ""
def migrate(cr, version): registry = odoo.registry(cr.dbname) from odoo.addons.account.models.chart_template import migrate_tags_on_taxes migrate_tags_on_taxes(cr, registry)
def email_send(email_from, email_to, subject, body, email_cc=None, email_bcc=None, reply_to=False, attachments=None, message_id=None, references=None, openobject_id=False, debug=False, subtype='plain', headers=None, smtp_server=None, smtp_port=None, ssl=False, smtp_user=None, smtp_password=None, cr=None, uid=None): """Low-level function for sending an email (deprecated). :deprecate: since OpenERP 6.1, please use ir.mail_server.send_email() instead. :param email_from: A string used to fill the `From` header, if falsy, config['email_from'] is used instead. Also used for the `Reply-To` header if `reply_to` is not provided :param email_to: a sequence of addresses to send the mail to. """ # If not cr, get cr from current thread database local_cr = None if not cr: db_name = getattr(threading.currentThread(), 'dbname', None) if db_name: local_cr = cr = odoo.registry(db_name).cursor() else: raise Exception( "No database cursor found, please pass one explicitly") # Send Email try: mail_server_pool = odoo.registry(cr.dbname)['ir.mail_server'] res = False # Pack Message into MIME Object email_msg = mail_server_pool.build_email(email_from, email_to, subject, body, email_cc, email_bcc, reply_to, attachments, message_id, references, openobject_id, subtype, headers=headers) res = mail_server_pool.send_email( cr, uid or 1, email_msg, mail_server_id=None, smtp_server=smtp_server, smtp_port=smtp_port, smtp_user=smtp_user, smtp_password=smtp_password, smtp_encryption=('ssl' if ssl else None), smtp_debug=debug) except Exception: _logger.exception("tools.email_send failed to deliver email") return False finally: if local_cr: cr.close() return res
def migrate(cr, version): registry = odoo.registry(cr.dbname) from odoo.addons.account.models.chart_template import migrate_set_tags_and_taxes_updatable migrate_set_tags_and_taxes_updatable(cr, registry, 'l10n_hr')
def _cron_fetch_and_build(self, hostname): """ This method have to be called from a dedicated cron created on each runbot instance. """ if hostname != fqdn(): return 'Not for me' host = self.env['runbot.host']._get_current() host.set_psql_conn_count() host.last_start_loop = fields.Datetime.now() self.env.cr.commit() start_time = time.time() # 1. source cleanup # -> Remove sources when no build is using them # (could be usefull to keep them for wakeup but we can checkout them again if not forced push) self.env['runbot.repo']._source_cleanup() # 2. db and log cleanup # -> Keep them as long as possible self.env['runbot.build']._local_cleanup() # 3. docker cleanup docker_ps_result = docker_ps() containers = { int(dc.split('-', 1)[0]): dc for dc in docker_ps_result if dest_reg.match(dc) } if containers: candidates = self.env['runbot.build'].search([ ('id', 'in', list(containers.keys())), ('local_state', '=', 'done') ]) for c in candidates: _logger.info( 'container %s found running with build state done', containers[c.id]) docker_stop(containers[c.id]) ignored = {dc for dc in docker_ps_result if not dest_reg.match(dc)} if ignored: _logger.debug('docker (%s) not deleted because not dest format', " ".join(list(ignored))) timeout = self._get_cron_period() icp = self.env['ir.config_parameter'] update_frequency = int( icp.get_param('runbot.runbot_update_frequency', default=10)) while time.time() - start_time < timeout: repos = self.search([('mode', '!=', 'disabled')]) try: repos._scheduler(host) host.last_success = fields.Datetime.now() self.env.cr.commit() self.env.reset() self = self.env()[self._name] self._reload_nginx() time.sleep(update_frequency) except TransactionRollbackError: # can lead to psycopg2.InternalError'>: "current transaction is aborted, commands ignored until end of transaction block _logger.exception('Trying to rollback') self.env.cr.rollback() self.env.reset() time.sleep(random.uniform(0, 3)) except Exception as e: with registry(self._cr.dbname).cursor( ) as cr: # user another cursor since transaction will be rollbacked message = str(e) chost = host.with_env(self.env(cr=cr)) if chost.last_exception == message: chost.exception_count += 1 else: chost.with_env(self.env(cr=cr)).last_exception = str(e) chost.exception_count = 1 raise if host.last_exception: host.last_exception = "" host.exception_count = 0 host.last_end_loop = fields.Datetime.now()
def registry(): return odoo.registry(common.get_db_name())
def _process_jobs(cls, db_name): """ Try to process all cron jobs. This selects in database all the jobs that should be processed. It then tries to lock each of them and, if it succeeds, run the cron job (if it doesn't succeed, it means the job was already locked to be taken care of by another thread) and return. :raise BadVersion: if the version is different from the worker's :raise BadModuleState: if modules are to install/upgrade/remove """ db = odoo.sql_db.db_connect(db_name) threading.current_thread().dbname = db_name try: with db.cursor() as cr: # Make sure the database has the same version as the code of # base and that no module must be installed/upgraded/removed cr.execute( "SELECT latest_version FROM ir_module_module WHERE name=%s", ['base']) (version, ) = cr.fetchone() cr.execute( "SELECT COUNT(*) FROM ir_module_module WHERE state LIKE %s", ['to %']) (changes, ) = cr.fetchone() if not version or changes: raise BadModuleState() elif version != BASE_VERSION: raise BadVersion() # Careful to compare timestamps with 'UTC' - everything is UTC as of v6.1. cr.execute("""SELECT * FROM ir_cron WHERE numbercall != 0 AND active AND nextcall <= (now() at time zone 'UTC') ORDER BY priority""") jobs = cr.dictfetchall() for job in jobs: lock_cr = db.cursor() try: # Try to grab an exclusive lock on the job row from within the task transaction # Restrict to the same conditions as for the search since the job may have already # been run by an other thread when cron is running in multi thread lock_cr.execute("""SELECT * FROM ir_cron WHERE numbercall != 0 AND active AND nextcall <= (now() at time zone 'UTC') AND id=%s FOR UPDATE NOWAIT""", (job['id'], ), log_exceptions=False) locked_job = lock_cr.fetchone() if not locked_job: _logger.debug( "Job `%s` already executed by another process/thread. skipping it", job['name']) continue # Got the lock on the job row, run its code _logger.debug('Starting job `%s`.', job['name']) job_cr = db.cursor() try: registry = odoo.registry(db_name) registry[cls._name]._process_job(job_cr, job, lock_cr) except Exception: _logger.exception( 'Unexpected exception while processing cron job %r', job) finally: job_cr.close() except psycopg2.OperationalError, e: if e.pgcode == '55P03': # Class 55: Object not in prerequisite state; 55P03: lock_not_available _logger.debug( 'Another process/thread is already busy executing job `%s`, skipping it.', job['name']) continue else: # Unexpected OperationalError raise finally: # we're exiting due to an exception while acquiring the lock lock_cr.close()
def _procure_orderpoint_confirm(self, use_new_cursor=False, company_id=False): """ Create procurements based on orderpoints. :param bool use_new_cursor: if set, use a dedicated cursor and auto-commit after processing 1000 orderpoints. This is appropriate for batch jobs only. """ OrderPoint = self.env['stock.warehouse.orderpoint'] domain = self._get_orderpoint_domain(company_id=company_id) orderpoints_noprefetch = OrderPoint.with_context( prefetch_fields=False).search( domain, order=self._procurement_from_orderpoint_get_order()).ids while orderpoints_noprefetch: if use_new_cursor: cr = registry(self._cr.dbname).cursor() self = self.with_env(self.env(cr=cr)) OrderPoint = self.env['stock.warehouse.orderpoint'] Procurement = self.env['procurement.order'] ProcurementAutorundefer = Procurement.with_context( procurement_autorun_defer=True) procurement_list = [] orderpoints = OrderPoint.browse(orderpoints_noprefetch[:1000]) orderpoints_noprefetch = orderpoints_noprefetch[1000:] # Calculate groups that can be executed together location_data = defaultdict(lambda: dict( products=self.env['product.product'], orderpoints=self.env['stock.warehouse.orderpoint'], groups=list())) for orderpoint in orderpoints: key = self._procurement_from_orderpoint_get_grouping_key( [orderpoint.id]) location_data[key]['products'] += orderpoint.product_id location_data[key]['orderpoints'] += orderpoint location_data[key][ 'groups'] = self._procurement_from_orderpoint_get_groups( [orderpoint.id]) for location_id, location_data in location_data.items(): location_orderpoints = location_data['orderpoints'] product_context = dict( self._context, location=location_orderpoints[0].location_id.id) substract_quantity = location_orderpoints.subtract_procurements_from_orderpoints( ) for group in location_data['groups']: if group.get('from_date'): product_context['from_date'] = group[ 'from_date'].strftime( DEFAULT_SERVER_DATETIME_FORMAT) if group['to_date']: product_context['to_date'] = group['to_date'].strftime( DEFAULT_SERVER_DATETIME_FORMAT) product_quantity = location_data['products'].with_context( product_context)._product_available() for orderpoint in location_orderpoints: try: op_product_virtual = product_quantity[ orderpoint.product_id.id]['virtual_available'] if op_product_virtual is None: continue if float_compare(op_product_virtual, orderpoint.product_min_qty, precision_rounding=orderpoint. product_uom.rounding) <= 0: qty = max(orderpoint.product_min_qty, orderpoint.product_max_qty ) - op_product_virtual remainder = orderpoint.qty_multiple > 0 and qty % orderpoint.qty_multiple or 0.0 if float_compare(remainder, 0.0, precision_rounding=orderpoint. product_uom.rounding) > 0: qty += orderpoint.qty_multiple - remainder if float_compare(qty, 0.0, precision_rounding=orderpoint. product_uom.rounding) < 0: continue qty -= substract_quantity[orderpoint.id] qty_rounded = float_round( qty, precision_rounding=orderpoint.product_uom. rounding) if qty_rounded > 0: new_procurement = ProcurementAutorundefer.create( orderpoint._prepare_procurement_values( qty_rounded, **group['procurement_values'])) procurement_list.append(new_procurement) new_procurement.message_post_with_view( 'mail.message_origin_link', values={ 'self': new_procurement, 'origin': orderpoint }, subtype_id=self.env.ref( 'mail.mt_note').id) self._procurement_from_orderpoint_post_process( [orderpoint.id]) if use_new_cursor: cr.commit() except OperationalError: if use_new_cursor: orderpoints_noprefetch += [orderpoint.id] cr.rollback() continue else: raise try: # TDE CLEANME: use record set ? procurement_list.reverse() procurements = self.env['procurement.order'] for p in procurement_list: procurements += p procurements.run() if use_new_cursor: cr.commit() except OperationalError: if use_new_cursor: cr.rollback() continue else: raise if use_new_cursor: cr.commit() cr.close() return {}
def load_modules(db, force_demo=False, status=None, update_module=False): initialize_sys_path() force = [] if force_demo: force.append('demo') upg_registry = {} cr = db.cursor() try: if not odoo.modules.db.is_initialized(cr): _logger.info("init db") odoo.modules.db.initialize(cr) update_module = True # process auto-installed modules tools.config["init"]["all"] = 1 tools.config['update']['all'] = 1 if not tools.config['without_demo']: tools.config["demo"]['all'] = 1 # This is a brand new registry, just created in # odoo.modules.registry.Registry.new(). registry = odoo.registry(cr.dbname) env = api.Environment(cr, SUPERUSER_ID, {}) if 'base' in tools.config['update'] or 'all' in tools.config['update']: cr.execute( "update ir_module_module set state=%s where name=%s and state=%s", ('to upgrade', 'base', 'installed')) # STEP 1: LOAD BASE (must be done before module dependencies can be computed for later steps) graph = odoo.modules.graph.Graph() graph.add_module(cr, 'base', force) if not graph: _logger.critical( 'module base cannot be loaded! (hint: verify addons-path)') raise ImportError( 'Module `base` cannot be loaded! (hint: verify addons-path)') # processed_modules: for cleanup step after install # loaded_modules: to avoid double loading report = registry._assertion_report loaded_modules, processed_modules = load_module_graph( cr, graph, status, perform_checks=update_module, report=report, upg_registry=upg_registry) load_lang = tools.config.pop('load_language') if load_lang or update_module: # some base models are used below, so make sure they are set up registry.setup_models(cr, partial=True) if load_lang: for lang in load_lang.split(','): tools.load_language(cr, lang) # STEP 2: Mark other modules to be loaded/updated if update_module: Module = env['ir.module.module'] if ('base' in tools.config['init']) or ('base' in tools.config['update']): _logger.info('updating modules list') Module.update_list() _check_module_names( cr, itertools.chain(tools.config['init'].keys(), tools.config['update'].keys())) module_names = [k for k, v in tools.config['init'].items() if v] if module_names: modules = Module.search([('state', '=', 'uninstalled'), ('name', 'in', module_names)]) if modules: modules.button_install() module_names = [k for k, v in tools.config['update'].items() if v] if module_names: # OpenUpgrade: in standard Odoo, '--update all' just means: # '--update base + upward (installed) dependencies. This breaks # the chain when new glue modules are encountered. # E.g. purchase in 8.0 depends on stock_account and report, # both of which are new. They may be installed, but purchase as # an upward dependency is not selected for upgrade. # Therefore, explicitely select all installed modules for # upgrading in OpenUpgrade in that case. domain = [('state', '=', 'installed')] if 'all' not in module_names: domain.append(('name', 'in', module_names)) modules = Module.search(domain) if modules: modules.button_upgrade() cr.execute("update ir_module_module set state=%s where name=%s", ('installed', 'base')) Module.invalidate_cache(['state']) # STEP 3: Load marked modules (skipping base which was done in STEP 1) # IMPORTANT: this is done in two parts, first loading all installed or # partially installed modules (i.e. installed/to upgrade), to # offer a consistent system to the second part: installing # newly selected modules. # We include the modules 'to remove' in the first step, because # they are part of the "currently installed" modules. They will # be dropped in STEP 6 later, before restarting the loading # process. # IMPORTANT 2: We have to loop here until all relevant modules have been # processed, because in some rare cases the dependencies have # changed, and modules that depend on an uninstalled module # will not be processed on the first pass. # It's especially useful for migrations. previously_processed = -1 while previously_processed < len(processed_modules): previously_processed = len(processed_modules) processed_modules += load_marked_modules( cr, graph, ['installed', 'to upgrade', 'to remove'], force, status, report, loaded_modules, update_module, upg_registry) if update_module: processed_modules += load_marked_modules( cr, graph, ['to install'], force, status, report, loaded_modules, update_module, upg_registry) registry.setup_models(cr) # STEP 3.5: execute migration end-scripts migrations = odoo.modules.migration.MigrationManager(cr, graph) for package in graph: migrations.migrate_module(package, 'end') # STEP 4: Finish and cleanup installations if processed_modules: cr.execute( """select model,name from ir_model where id NOT IN (select distinct model_id from ir_model_access)""" ) for (model, name) in cr.fetchall(): if model in registry and not registry[ model]._abstract and not registry[model]._transient: _logger.warning( 'The model %s has no access rules, consider adding one. E.g. access_%s,access_%s,model_%s,,1,0,0,0', model, model.replace('.', '_'), model.replace('.', '_'), model.replace('.', '_')) # Temporary warning while we remove access rights on osv_memory objects, as they have # been replaced by owner-only access rights cr.execute( """select distinct mod.model, mod.name from ir_model_access acc, ir_model mod where acc.model_id = mod.id""" ) for (model, name) in cr.fetchall(): if model in registry and registry[model]._transient: _logger.warning( 'The transient model %s (%s) should not have explicit access rules!', model, name) cr.execute("SELECT model from ir_model") for (model, ) in cr.fetchall(): if model in registry: env[model]._check_removed_columns(log=True) elif _logger.isEnabledFor( logging.INFO): # more an info that a warning... _logger.warning( "Model %s is declared but cannot be loaded! (Perhaps a module was partially removed or renamed)", model) # Cleanup orphan records env['ir.model.data']._process_end(processed_modules) for kind in ('init', 'demo', 'update'): tools.config[kind] = {} cr.commit() # STEP 5: Uninstall modules to remove if update_module: # Remove records referenced from ir_model_data for modules to be # removed (and removed the references from ir_model_data). cr.execute("SELECT name, id FROM ir_module_module WHERE state=%s", ('to remove', )) modules_to_remove = dict(cr.fetchall()) if modules_to_remove: pkgs = reversed( [p for p in graph if p.name in modules_to_remove]) for pkg in pkgs: uninstall_hook = pkg.info.get('uninstall_hook') if uninstall_hook: py_module = sys.modules['odoo.addons.%s' % (pkg.name, )] getattr(py_module, uninstall_hook)(cr, registry) Module = env['ir.module.module'] Module.browse(modules_to_remove.values()).module_uninstall() # Recursive reload, should only happen once, because there should be no # modules to remove next time cr.commit() _logger.info( 'Reloading registry once more after uninstalling modules') api.Environment.reset() return odoo.modules.registry.Registry.new( cr.dbname, force_demo, status, update_module) # STEP 6: verify custom views on every model if update_module: View = env['ir.ui.view'] for model in registry: try: View._validate_custom_views(model) except Exception as e: _logger.warning('invalid custom view(s) for model %s: %s', model, tools.ustr(e)) if report.failures: _logger.error('At least one test failed when loading the modules.') else: _logger.info('Modules loaded.') # STEP 8: call _register_hook on every model for model in env.values(): model._register_hook() # STEP 9: Run the post-install tests cr.commit() t0 = time.time() t0_sql = odoo.sql_db.sql_counter if odoo.tools.config['test_enable']: if update_module: cr.execute( "SELECT name FROM ir_module_module WHERE state='installed' and name = ANY(%s)", (processed_modules, )) else: cr.execute( "SELECT name FROM ir_module_module WHERE state='installed'" ) for module_name in cr.fetchall(): report.record_result( odoo.modules.module.run_unit_tests( module_name[0], cr.dbname, position=runs_post_install)) _logger.log(25, "All post-tested in %.2fs, %s queries", time.time() - t0, odoo.sql_db.sql_counter - t0_sql) finally: cr.close()
def load_module_graph(cr, graph, status=None, perform_checks=True, skip_modules=None, report=None, upg_registry=None): """Migrates+Updates or Installs all module nodes from ``graph`` :param graph: graph of module nodes to load :param status: deprecated parameter, unused, left to avoid changing signature in 8.0 :param perform_checks: whether module descriptors should be checked for validity (prints warnings for same cases) :param skip_modules: optional list of module names (packages) which have previously been loaded and can be skipped :return: list of modules that were installed or updated """ def load_test(module_name, idref, mode): cr.commit() try: _load_data(cr, module_name, idref, mode, 'test') return True except Exception: _test_logger.exception( 'module %s: an exception occurred in a test', module_name) return False finally: if tools.config.options['test_commit']: cr.commit() else: cr.rollback() # avoid keeping stale xml_id, etc. in cache odoo.registry(cr.dbname).clear_caches() def _get_files_of_kind(kind): if kind == 'demo': kind = ['demo_xml', 'demo'] elif kind == 'data': kind = ['init_xml', 'update_xml', 'data'] if isinstance(kind, str): kind = [kind] files = [] for k in kind: for f in package.data[k]: files.append(f) if k.endswith('_xml') and not (k == 'init_xml' and not f.endswith('.xml')): # init_xml, update_xml and demo_xml are deprecated except # for the case of init_xml with yaml, csv and sql files as # we can't specify noupdate for those file. correct_key = 'demo' if k.count('demo') else 'data' _logger.warning( "module %s: key '%s' is deprecated in favor of '%s' for file '%s'.", package.name, k, correct_key, f) return files def _load_data(cr, module_name, idref, mode, kind): """ kind: data, demo, test, init_xml, update_xml, demo_xml. noupdate is False, unless it is demo data or it is csv data in init mode. """ try: if kind in ('demo', 'test'): threading.currentThread().testing = True for filename in _get_files_of_kind(kind): _logger.info("loading %s/%s", module_name, filename) noupdate = False if kind in ('demo', 'demo_xml') or (filename.endswith('.csv') and kind in ('init', 'init_xml')): noupdate = True tools.convert_file(cr, module_name, filename, idref, mode, noupdate, kind, report) finally: if kind in ('demo', 'test'): threading.currentThread().testing = False if status is None: status = {} if skip_modules is None: skip_modules = [] processed_modules = [] loaded_modules = [] registry = odoo.registry(cr.dbname) migrations = odoo.modules.migration.MigrationManager(cr, graph) module_count = len(graph) _logger.info('loading %d modules...', module_count) registry.clear_manual_fields() # suppress commits to have the upgrade of one module in just one transaction cr.commit_org = cr.commit cr.commit = lambda *args: None cr.rollback_org = cr.rollback cr.rollback = lambda *args: None fields.set_migration_cursor(cr) # register, instantiate and initialize models for each modules t0 = time.time() t0_sql = odoo.sql_db.sql_counter for index, package in enumerate(graph, 1): module_name = package.name module_id = package.id if module_name in skip_modules or module_name in loaded_modules: continue _logger.debug('loading module %s (%d/%d)', module_name, index, module_count) migrations.migrate_module(package, 'pre') load_openerp_module(package.name) new_install = package.state == 'to install' if new_install: py_module = sys.modules['odoo.addons.%s' % (module_name, )] pre_init = package.info.get('pre_init_hook') if pre_init: getattr(py_module, pre_init)(cr) model_names = registry.load(cr, package) loaded_modules.append(package.name) if hasattr(package, 'init') or hasattr( package, 'update') or package.state in ('to install', 'to upgrade'): registry.setup_models(cr, partial=True) # OpenUpgrade: rebuild the local registry based on the loaded models local_registry = {} env = api.Environment(cr, SUPERUSER_ID, {}) for model in env.values(): if not model._auto: continue openupgrade_loading.log_model(model, local_registry) openupgrade_loading.compare_registries(cr, package.name, upg_registry, local_registry) registry.init_models(cr, model_names, {'module': package.name}) idref = {} mode = 'update' if hasattr(package, 'init') or package.state == 'to install': mode = 'init' if hasattr(package, 'init') or hasattr( package, 'update') or package.state in ('to install', 'to upgrade'): env = api.Environment(cr, SUPERUSER_ID, {}) # Can't put this line out of the loop: ir.module.module will be # registered by init_models() above. module = env['ir.module.module'].browse(module_id) if perform_checks: module.check() if package.state == 'to upgrade': # upgrading the module information module.write(module.get_values_from_terp(package.data)) _load_data(cr, module_name, idref, mode, kind='data') has_demo = hasattr(package, 'demo') or (package.dbdemo and package.state != 'installed') if has_demo: _load_data(cr, module_name, idref, mode, kind='demo') cr.execute('update ir_module_module set demo=%s where id=%s', (True, module_id)) module.invalidate_cache(['demo']) # OpenUpgrade: add 'try' block for logging exceptions # as errors in post scripts seem to be dropped try: migrations.migrate_module(package, 'post') except Exception as exc: _logger.error( 'Error executing post migration script for module %s: %s', package, exc) raise # Update translations for all installed languages overwrite = odoo.tools.config["overwrite_existing_translations"] module.with_context(overwrite=overwrite).update_translations() registry._init_modules.add(package.name) if new_install: post_init = package.info.get('post_init_hook') if post_init: getattr(py_module, post_init)(cr, registry) # validate all the views at a whole env['ir.ui.view']._validate_module_views(module_name) if has_demo: # launch tests only in demo mode, allowing tests to use demo data. if tools.config.options['test_enable']: # Yamel test report.record_result(load_test(module_name, idref, mode)) # Python tests env['ir.http']._clear_routing_map( ) # force routing map to be rebuilt report.record_result( odoo.modules.module.run_unit_tests( module_name, cr.dbname)) processed_modules.append(package.name) ver = adapt_version(package.data['version']) # Set new modules and dependencies module.write({'state': 'installed', 'latest_version': ver}) package.load_state = package.state package.load_version = package.installed_version package.state = 'installed' for kind in ('init', 'demo', 'update'): if hasattr(package, kind): delattr(package, kind) registry._init_modules.add(package.name) cr.commit_org() # OpenUpgrade edit start: # if there's a tests directory, run those if tests are enabled tests_dir = os.path.join( odoo.modules.module.get_module_path(package.name), 'migrations', adapt_version(package.data['version']), 'tests', ) # check for an environment variable because we don't want to mess # with odoo's config.py, but we also don't want to run existing # tests if os.environ.get('OPENUPGRADE_TESTS') and os.path.exists(tests_dir): import unittest threading.currentThread().testing = True tests = unittest.defaultTestLoader.discover( tests_dir, top_level_dir=tests_dir) report.record_result( unittest.TextTestRunner( verbosity=2, stream=odoo.modules.module.TestStream(package.name), ).run(tests).wasSuccessful()) threading.currentThread().testing = False # OpenUpgrade edit end _logger.log(25, "%s modules loaded in %.2fs, %s queries", len(graph), time.time() - t0, odoo.sql_db.sql_counter - t0_sql) registry.clear_manual_fields() cr.commit = cr.commit_org cr.commit() fields.set_migration_cursor() return loaded_modules, processed_modules
def load_module_graph(cr, graph, status=None, perform_checks=True, skip_modules=None, report=None): """Migrates+Updates or Installs all module nodes from ``graph`` :param graph: graph of module nodes to load :param status: deprecated parameter, unused, left to avoid changing signature in 8.0 :param perform_checks: whether module descriptors should be checked for validity (prints warnings for same cases) :param skip_modules: optional list of module names (packages) which have previously been loaded and can be skipped :return: list of modules that were installed or updated """ def load_test(module_name, idref, mode): cr.commit() try: _load_data(cr, module_name, idref, mode, 'test') return True except Exception: _test_logger.exception( 'module %s: an exception occurred in a test', module_name) return False finally: if tools.config.options['test_commit']: cr.commit() else: cr.rollback() # avoid keeping stale xml_id, etc. in cache odoo.registry(cr.dbname).clear_caches() def _get_files_of_kind(kind): if kind == 'demo': kind = ['demo_xml', 'demo'] elif kind == 'data': kind = ['init_xml', 'update_xml', 'data'] if isinstance(kind, str): kind = [kind] files = [] for k in kind: for f in package.data[k]: files.append(f) if k.endswith('_xml') and not (k == 'init_xml' and not f.endswith('.xml')): # init_xml, update_xml and demo_xml are deprecated except # for the case of init_xml with csv and sql files as # we can't specify noupdate for those file. correct_key = 'demo' if k.count('demo') else 'data' _logger.warning( "module %s: key '%s' is deprecated in favor of '%s' for file '%s'.", package.name, k, correct_key, f) return files def _load_data(cr, module_name, idref, mode, kind): """ kind: data, demo, test, init_xml, update_xml, demo_xml. noupdate is False, unless it is demo data or it is csv data in init mode. """ try: if kind in ('demo', 'test'): threading.currentThread().testing = True for filename in _get_files_of_kind(kind): _logger.info("loading %s/%s", module_name, filename) noupdate = False if kind in ('demo', 'demo_xml') or (filename.endswith('.csv') and kind in ('init', 'init_xml')): noupdate = True tools.convert_file(cr, module_name, filename, idref, mode, noupdate, kind, report) finally: if kind in ('demo', 'test'): threading.currentThread().testing = False processed_modules = [] loaded_modules = [] registry = odoo.registry(cr.dbname) migrations = odoo.modules.migration.MigrationManager(cr, graph) module_count = len(graph) _logger.info('loading %d modules...', module_count) registry.clear_caches() # register, instantiate and initialize models for each modules t0 = time.time() t0_sql = odoo.sql_db.sql_counter for index, package in enumerate(graph, 1): module_name = package.name module_id = package.id if skip_modules and module_name in skip_modules: continue _logger.debug('loading module %s (%d/%d)', module_name, index, module_count) migrations.migrate_module(package, 'pre') load_openerp_module(package.name) new_install = package.state == 'to install' if new_install: py_module = sys.modules['odoo.addons.%s' % (module_name, )] pre_init = package.info.get('pre_init_hook') if pre_init: getattr(py_module, pre_init)(cr) model_names = registry.load(cr, package) loaded_modules.append(package.name) if hasattr(package, 'init') or hasattr( package, 'update') or package.state in ('to install', 'to upgrade'): registry.setup_models(cr) registry.init_models(cr, model_names, {'module': package.name}) cr.commit() idref = {} mode = 'update' if hasattr(package, 'init') or package.state == 'to install': mode = 'init' if hasattr(package, 'init') or hasattr( package, 'update') or package.state in ('to install', 'to upgrade'): env = api.Environment(cr, SUPERUSER_ID, {}) # Can't put this line out of the loop: ir.module.module will be # registered by init_models() above. module = env['ir.module.module'].browse(module_id) if perform_checks: module._check() if package.state == 'to upgrade': # upgrading the module information module.write(module.get_values_from_terp(package.data)) _load_data(cr, module_name, idref, mode, kind='data') has_demo = hasattr(package, 'demo') or (package.dbdemo and package.state != 'installed') if has_demo: _load_data(cr, module_name, idref, mode, kind='demo') cr.execute('update ir_module_module set demo=%s where id=%s', (True, module_id)) module.invalidate_cache(['demo']) migrations.migrate_module(package, 'post') # Update translations for all installed languages overwrite = odoo.tools.config["overwrite_existing_translations"] module.with_context(overwrite=overwrite)._update_translations() if package.name is not None: registry._init_modules.add(package.name) if new_install: post_init = package.info.get('post_init_hook') if post_init: getattr(py_module, post_init)(cr, registry) # validate all the views at a whole env['ir.ui.view']._validate_module_views(module_name) if has_demo: # launch tests only in demo mode, allowing tests to use demo data. if tools.config.options['test_enable']: # Yamel test report.record_result(load_test(module_name, idref, mode)) # Python tests env['ir.http']._clear_routing_map( ) # force routing map to be rebuilt report.record_result( odoo.modules.module.run_unit_tests( module_name, cr.dbname)) # tests may have reset the environment env = api.Environment(cr, SUPERUSER_ID, {}) module = env['ir.module.module'].browse(module_id) processed_modules.append(package.name) ver = adapt_version(package.data['version']) # Set new modules and dependencies module.write({'state': 'installed', 'latest_version': ver}) package.load_state = package.state package.load_version = package.installed_version package.state = 'installed' for kind in ('init', 'demo', 'update'): if hasattr(package, kind): delattr(package, kind) if package.name is not None: registry._init_modules.add(package.name) cr.commit() _logger.log(25, "%s modules loaded in %.2fs, %s queries", len(graph), time.time() - t0, odoo.sql_db.sql_counter - t0_sql) registry.clear_caches() cr.commit() return loaded_modules, processed_modules
def assert_record_is_created(self, db_name, model_name, search_domain): db = odoo.sql_db.db_connect(db_name) odoo.registry(db_name).check_signaling() with odoo.api.Environment.manage(), db.cursor() as cr: env = odoo.api.Environment(cr, SUPERUSER_ID, {}) return self.assertTrue(env[model_name].search(search_domain))
def load_module_graph(cr, graph, status=None, perform_checks=True, skip_modules=None, report=None, models_to_check=None): """Migrates+Updates or Installs all module nodes from ``graph`` :param graph: graph of module nodes to load :param status: deprecated parameter, unused, left to avoid changing signature in 8.0 :param perform_checks: whether module descriptors should be checked for validity (prints warnings for same cases) :param skip_modules: optional list of module names (packages) which have previously been loaded and can be skipped :return: list of modules that were installed or updated """ def load_test(idref, mode): cr.execute("SAVEPOINT load_test_data_file") try: load_data(cr, idref, mode, 'test', package, report) return True except Exception: _test_logger.exception( 'module %s: an exception occurred in a test', package.name) return False finally: cr.execute("ROLLBACK TO SAVEPOINT load_test_data_file") # avoid keeping stale xml_id, etc. in cache odoo.registry(cr.dbname).clear_caches() if models_to_check is None: models_to_check = set() processed_modules = [] loaded_modules = [] registry = odoo.registry(cr.dbname) migrations = odoo.modules.migration.MigrationManager(cr, graph) module_count = len(graph) _logger.info('loading %d modules...', module_count) # register, instantiate and initialize models for each modules t0 = time.time() t0_sql = odoo.sql_db.sql_counter models_updated = set() for index, package in enumerate(graph, 1): module_name = package.name module_id = package.id if skip_modules and module_name in skip_modules: continue _logger.debug('loading module %s (%d/%d)', module_name, index, module_count) needs_update = (hasattr(package, "init") or hasattr(package, "update") or package.state in ("to install", "to upgrade")) if needs_update: if package.name != 'base': registry.setup_models(cr) migrations.migrate_module(package, 'pre') load_openerp_module(package.name) new_install = package.state == 'to install' if new_install: py_module = sys.modules['odoo.addons.%s' % (module_name, )] pre_init = package.info.get('pre_init_hook') if pre_init: getattr(py_module, pre_init)(cr) model_names = registry.load(cr, package) loaded_modules.append(package.name) if needs_update: models_updated |= set(model_names) models_to_check -= set(model_names) registry.setup_models(cr) registry.init_models(cr, model_names, {'module': package.name}) elif package.state != 'to remove': # The current module has simply been loaded. The models extended by this module # and for which we updated the schema, must have their schema checked again. # This is because the extension may have changed the model, # e.g. adding required=True to an existing field, but the schema has not been # updated by this module because it's not marked as 'to upgrade/to install'. models_to_check |= set(model_names) & models_updated idref = {} mode = 'update' if hasattr(package, 'init') or package.state == 'to install': mode = 'init' if needs_update: env = api.Environment(cr, SUPERUSER_ID, {}) # Can't put this line out of the loop: ir.module.module will be # registered by init_models() above. module = env['ir.module.module'].browse(module_id) if perform_checks: module._check() if package.state == 'to upgrade': # upgrading the module information module.write(module.get_values_from_terp(package.data)) load_data(cr, idref, mode, kind='data', package=package, report=report) demo_loaded = package.dbdemo = load_demo(cr, package, idref, mode, report) cr.execute('update ir_module_module set demo=%s where id=%s', (demo_loaded, module_id)) module.invalidate_cache(['demo']) migrations.migrate_module(package, 'post') # Update translations for all installed languages overwrite = odoo.tools.config["overwrite_existing_translations"] module.with_context(overwrite=overwrite)._update_translations() if package.name is not None: registry._init_modules.add(package.name) if new_install: post_init = package.info.get('post_init_hook') if post_init: getattr(py_module, post_init)(cr, registry) if mode == 'update': # validate the views that have not been checked yet env['ir.ui.view']._validate_module_views(module_name) # need to commit any modification the module's installation or # update made to the schema or data so the tests can run # (separately in their own transaction) cr.commit() if tools.config.options['test_enable']: report.record_result(load_test(idref, mode)) # Python tests env['ir.http']._clear_routing_map( ) # force routing map to be rebuilt report.record_result( odoo.modules.module.run_unit_tests(module_name)) # tests may have reset the environment env = api.Environment(cr, SUPERUSER_ID, {}) module = env['ir.module.module'].browse(module_id) processed_modules.append(package.name) ver = adapt_version(package.data['version']) # Set new modules and dependencies module.write({'state': 'installed', 'latest_version': ver}) package.load_state = package.state package.load_version = package.installed_version package.state = 'installed' for kind in ('init', 'demo', 'update'): if hasattr(package, kind): delattr(package, kind) if package.name is not None: registry._init_modules.add(package.name) _logger.log(25, "%s modules loaded in %.2fs, %s queries", len(graph), time.time() - t0, odoo.sql_db.sql_counter - t0_sql) return loaded_modules, processed_modules
def load_module_graph(cr, graph, status=None, perform_checks=True, skip_modules=None, report=None, models_to_check=None, upg_registry=None): """Migrates+Updates or Installs all module nodes from ``graph`` :param graph: graph of module nodes to load :param status: deprecated parameter, unused, left to avoid changing signature in 8.0 :param perform_checks: whether module descriptors should be checked for validity (prints warnings for same cases) :param skip_modules: optional list of module names (packages) which have previously been loaded and can be skipped :return: list of modules that were installed or updated """ def load_test(idref, mode): cr.execute("SAVEPOINT load_test_data_file") try: load_data(cr, idref, mode, 'test', package, report) return True except Exception: _test_logger.exception( 'module %s: an exception occurred in a test', package.name) return False finally: cr.execute("ROLLBACK TO SAVEPOINT load_test_data_file") # avoid keeping stale xml_id, etc. in cache odoo.registry(cr.dbname).clear_caches() if skip_modules is None: skip_modules = [] if models_to_check is None: models_to_check = set() processed_modules = [] loaded_modules = [] registry = odoo.registry(cr.dbname) migrations = odoo.modules.migration.MigrationManager(cr, graph) module_count = len(graph) _logger.info('loading %d modules...', module_count) # suppress commits to have the upgrade of one module in just one transaction cr.commit_org = cr.commit cr.commit = lambda *args: None cr.rollback_org = cr.rollback cr.rollback = lambda *args: None # Delayed import to prevent import loop from odoo.fields import set_migration_cursor set_migration_cursor(cr) # register, instantiate and initialize models for each modules t0 = time.time() t0_sql = odoo.sql_db.sql_counter models_updated = set() for index, package in enumerate(graph, 1): module_name = package.name module_id = package.id if module_name in skip_modules or module_name in loaded_modules: continue _logger.debug('loading module %s (%d/%d)', module_name, index, module_count) needs_update = (hasattr(package, "init") or hasattr(package, "update") or package.state in ("to install", "to upgrade")) if needs_update: if package.name != 'base': registry.setup_models(cr) migrations.migrate_module(package, 'pre') load_openerp_module(package.name) new_install = package.state == 'to install' if new_install: py_module = sys.modules['odoo.addons.%s' % (module_name, )] pre_init = package.info.get('pre_init_hook') if pre_init: getattr(py_module, pre_init)(cr) model_names = registry.load(cr, package) loaded_modules.append(package.name) if needs_update: models_updated |= set(model_names) models_to_check -= set(model_names) registry.setup_models(cr) # OpenUpgrade: rebuild the local registry based on the loaded models local_registry = {} env = api.Environment(cr, SUPERUSER_ID, {}) for model in env.values(): if not model._auto: continue openupgrade_loading.log_model(model, local_registry) openupgrade_loading.compare_registries(cr, package.name, upg_registry, local_registry) # OpenUpgrade end registry.init_models(cr, model_names, {'module': package.name}) elif package.state != 'to remove': # The current module has simply been loaded. The models extended by this module # and for which we updated the schema, must have their schema checked again. # This is because the extension may have changed the model, # e.g. adding required=True to an existing field, but the schema has not been # updated by this module because it's not marked as 'to upgrade/to install'. models_to_check |= set(model_names) & models_updated idref = {} mode = 'update' if hasattr(package, 'init') or package.state == 'to install': mode = 'init' if needs_update: env = api.Environment(cr, SUPERUSER_ID, {}) # Can't put this line out of the loop: ir.module.module will be # registered by init_models() above. module = env['ir.module.module'].browse(module_id) if perform_checks: module._check() if package.state == 'to upgrade': # upgrading the module information module.write(module.get_values_from_terp(package.data)) load_data(cr, idref, mode, kind='data', package=package, report=report) demo_loaded = package.dbdemo = load_demo(cr, package, idref, mode, report) cr.execute('update ir_module_module set demo=%s where id=%s', (demo_loaded, module_id)) module.invalidate_cache(['demo']) # OpenUpgrade: add 'try' block for logging exceptions # as errors in post scripts seem to be dropped try: migrations.migrate_module(package, 'post') except Exception as exc: _logger.error( 'Error executing post migration script for module %s: %s', package, exc) raise # Update translations for all installed languages overwrite = odoo.tools.config["overwrite_existing_translations"] module.with_context(overwrite=overwrite)._update_translations() if package.name is not None: registry._init_modules.add(package.name) if new_install: post_init = package.info.get('post_init_hook') if post_init: getattr(py_module, post_init)(cr, registry) if mode == 'update': # validate the views that have not been checked yet env['ir.ui.view']._validate_module_views(module_name) # need to commit any modification the module's installation or # update made to the schema or data so the tests can run # (separately in their own transaction) # OpenUpgrade: commit after processing every module as well, for # easier debugging and continuing an interrupted migration cr.commit_org() if demo_loaded: # launch tests only in demo mode, allowing tests to use demo data. if tools.config.options['test_enable']: # Yamel test report.record_result(load_test(idref, mode)) # Python tests env['ir.http']._clear_routing_map( ) # force routing map to be rebuilt report.record_result( odoo.modules.module.run_unit_tests( module_name, cr.dbname)) # tests may have reset the environment env = api.Environment(cr, SUPERUSER_ID, {}) module = env['ir.module.module'].browse(module_id) # OpenUpgrade: run tests if os.environ.get( 'OPENUPGRADE_TESTS') and package.name is not None: # Load tests in <module>/migrations/tests and enable standard tags if necessary prefix = '.migrations' test_tags = tools.config['test_tags'] if not test_tags: tools.config['test_tags'] = '+standard' report.record_result( odoo.modules.module.run_unit_tests( module_name, cr.dbname, openupgrade_prefix=prefix)) tools.config['test_tags'] = test_tags processed_modules.append(package.name) ver = adapt_version(package.data['version']) # Set new modules and dependencies module.write({'state': 'installed', 'latest_version': ver}) # OpenUpgrade: commit module_n state and version immediatly # to avoid invalid database state if module_n+1 raises an # exception cr.commit_org() package.load_state = package.state package.load_version = package.installed_version package.state = 'installed' for kind in ('init', 'demo', 'update'): if hasattr(package, kind): delattr(package, kind) if package.name is not None: registry._init_modules.add(package.name) _logger.log(25, "%s modules loaded in %.2fs, %s queries", len(graph), time.time() - t0, odoo.sql_db.sql_counter - t0_sql) # Openupgrade: restore commit method and unset migration cursor cr.commit = cr.commit_org cr.commit() set_migration_cursor() return loaded_modules, processed_modules
def load_modules(db, force_demo=False, status=None, update_module=False): initialize_sys_path() force = [] if force_demo: force.append('demo') models_to_check = set() with db.cursor() as cr: if not odoo.modules.db.is_initialized(cr): if not update_module: _logger.error( "Database %s not initialized, you can force it with `-i base`", cr.dbname) return _logger.info("init db") odoo.modules.db.initialize(cr) update_module = True # process auto-installed modules tools.config["init"]["all"] = 1 tools.config['update']['all'] = 1 if not tools.config['without_demo']: tools.config["demo"]['all'] = 1 # This is a brand new registry, just created in # odoo.modules.registry.Registry.new(). registry = odoo.registry(cr.dbname) if 'base' in tools.config['update'] or 'all' in tools.config['update']: cr.execute( "update ir_module_module set state=%s where name=%s and state=%s", ('to upgrade', 'base', 'installed')) # STEP 1: LOAD BASE (must be done before module dependencies can be computed for later steps) graph = odoo.modules.graph.Graph() graph.add_module(cr, 'base', force) if not graph: _logger.critical( 'module base cannot be loaded! (hint: verify addons-path)') raise ImportError( 'Module `base` cannot be loaded! (hint: verify addons-path)') # processed_modules: for cleanup step after install # loaded_modules: to avoid double loading report = registry._assertion_report loaded_modules, processed_modules = load_module_graph( cr, graph, status, perform_checks=update_module, report=report, models_to_check=models_to_check) load_lang = tools.config.pop('load_language') if load_lang or update_module: # some base models are used below, so make sure they are set up registry.setup_models(cr) if load_lang: for lang in load_lang.split(','): tools.load_language(cr, lang) # STEP 2: Mark other modules to be loaded/updated if update_module: env = api.Environment(cr, SUPERUSER_ID, {}) Module = env['ir.module.module'] _logger.info('updating modules list') Module.update_list() _check_module_names( cr, itertools.chain(tools.config['init'], tools.config['update'])) module_names = [k for k, v in tools.config['init'].items() if v] if module_names: modules = Module.search([('state', '=', 'uninstalled'), ('name', 'in', module_names)]) if modules: modules.button_install() module_names = [k for k, v in tools.config['update'].items() if v] if module_names: modules = Module.search([('state', '=', 'installed'), ('name', 'in', module_names)]) if modules: modules.button_upgrade() cr.execute("update ir_module_module set state=%s where name=%s", ('installed', 'base')) Module.invalidate_cache(['state']) # STEP 3: Load marked modules (skipping base which was done in STEP 1) # IMPORTANT: this is done in two parts, first loading all installed or # partially installed modules (i.e. installed/to upgrade), to # offer a consistent system to the second part: installing # newly selected modules. # We include the modules 'to remove' in the first step, because # they are part of the "currently installed" modules. They will # be dropped in STEP 6 later, before restarting the loading # process. # IMPORTANT 2: We have to loop here until all relevant modules have been # processed, because in some rare cases the dependencies have # changed, and modules that depend on an uninstalled module # will not be processed on the first pass. # It's especially useful for migrations. previously_processed = -1 while previously_processed < len(processed_modules): previously_processed = len(processed_modules) processed_modules += load_marked_modules( cr, graph, ['installed', 'to upgrade', 'to remove'], force, status, report, loaded_modules, update_module, models_to_check) if update_module: processed_modules += load_marked_modules( cr, graph, ['to install'], force, status, report, loaded_modules, update_module, models_to_check) registry.loaded = True registry.setup_models(cr) # STEP 3.5: execute migration end-scripts migrations = odoo.modules.migration.MigrationManager(cr, graph) for package in graph: migrations.migrate_module(package, 'end') # STEP 4: Finish and cleanup installations if processed_modules: env = api.Environment(cr, SUPERUSER_ID, {}) cr.execute( """select model,name from ir_model where id NOT IN (select distinct model_id from ir_model_access)""" ) for (model, name) in cr.fetchall(): if model in registry and not registry[ model]._abstract and not registry[model]._transient: _logger.warning( 'The model %s has no access rules, consider adding one. E.g. access_%s,access_%s,model_%s,base.group_user,1,0,0,0', model, model.replace('.', '_'), model.replace('.', '_'), model.replace('.', '_')) # Temporary warning while we remove access rights on osv_memory objects, as they have # been replaced by owner-only access rights cr.execute( """select distinct mod.model, mod.name from ir_model_access acc, ir_model mod where acc.model_id = mod.id""" ) for (model, name) in cr.fetchall(): if model in registry and registry[model]._transient: _logger.warning( 'The transient model %s (%s) should not have explicit access rules!', model, name) cr.execute("SELECT model from ir_model") for (model, ) in cr.fetchall(): if model in registry: env[model]._check_removed_columns(log=True) elif _logger.isEnabledFor( logging.INFO): # more an info that a warning... _logger.warning( "Model %s is declared but cannot be loaded! (Perhaps a module was partially removed or renamed)", model) # Cleanup orphan records env['ir.model.data']._process_end(processed_modules) for kind in ('init', 'demo', 'update'): tools.config[kind] = {} # STEP 5: Uninstall modules to remove if update_module: # Remove records referenced from ir_model_data for modules to be # removed (and removed the references from ir_model_data). cr.execute("SELECT name, id FROM ir_module_module WHERE state=%s", ('to remove', )) modules_to_remove = dict(cr.fetchall()) if modules_to_remove: env = api.Environment(cr, SUPERUSER_ID, {}) pkgs = reversed( [p for p in graph if p.name in modules_to_remove]) for pkg in pkgs: uninstall_hook = pkg.info.get('uninstall_hook') if uninstall_hook: py_module = sys.modules['odoo.addons.%s' % (pkg.name, )] getattr(py_module, uninstall_hook)(cr, registry) Module = env['ir.module.module'] Module.browse(modules_to_remove.values()).module_uninstall() # Recursive reload, should only happen once, because there should be no # modules to remove next time cr.commit() _logger.info( 'Reloading registry once more after uninstalling modules') api.Environment.reset() registry = odoo.modules.registry.Registry.new( cr.dbname, force_demo, status, update_module) registry.check_tables_exist(cr) cr.commit() return registry # STEP 5.5: Verify extended fields on every model # This will fix the schema of all models in a situation such as: # - module A is loaded and defines model M; # - module B is installed/upgraded and extends model M; # - module C is loaded and extends model M; # - module B and C depend on A but not on each other; # The changes introduced by module C are not taken into account by the upgrade of B. if models_to_check: registry.init_models(cr, list(models_to_check), {'models_to_check': True}) # STEP 6: verify custom views on every model if update_module: env = api.Environment(cr, SUPERUSER_ID, {}) View = env['ir.ui.view'] for model in registry: try: View._validate_custom_views(model) except Exception as e: _logger.warning('invalid custom view(s) for model %s: %s', model, tools.ustr(e)) if report.failures: _logger.error('At least one test failed when loading the modules.') else: _logger.info('Modules loaded.') # STEP 8: call _register_hook on every model env = api.Environment(cr, SUPERUSER_ID, {}) for model in env.values(): model._register_hook() # STEP 9: save installed/updated modules for post-install tests registry.updated_modules += processed_modules
def load_module_graph(cr, graph, status=None, perform_checks=True, skip_modules=None, report=None, models_to_check=None): """Migrates+Updates or Installs all module nodes from ``graph`` :param graph: graph of module nodes to load :param status: deprecated parameter, unused, left to avoid changing signature in 8.0 :param perform_checks: whether module descriptors should be checked for validity (prints warnings for same cases) :param skip_modules: optional list of module names (packages) which have previously been loaded and can be skipped :return: list of modules that were installed or updated """ if models_to_check is None: models_to_check = set() processed_modules = [] loaded_modules = [] registry = odoo.registry(cr.dbname) migrations = odoo.modules.migration.MigrationManager(cr, graph) module_count = len(graph) _logger.info('loading %d modules...', module_count) # register, instantiate and initialize models for each modules t0 = time.time() loading_extra_query_count = odoo.sql_db.sql_counter loading_cursor_query_count = cr.sql_log_count models_updated = set() for index, package in enumerate(graph, 1): module_name = package.name module_id = package.id if skip_modules and module_name in skip_modules: continue module_t0 = time.time() module_cursor_query_count = cr.sql_log_count module_extra_query_count = odoo.sql_db.sql_counter needs_update = (hasattr(package, "init") or hasattr(package, "update") or package.state in ("to install", "to upgrade")) module_log_level = logging.DEBUG if needs_update: module_log_level = logging.INFO _logger.log(module_log_level, 'Loading module %s (%d/%d)', module_name, index, module_count) if needs_update: if package.name != 'base': registry.setup_models(cr) migrations.migrate_module(package, 'pre') if package.name != 'base': env = api.Environment(cr, SUPERUSER_ID, {}) env['base'].flush() load_openerp_module(package.name) new_install = package.state == 'to install' if new_install: py_module = sys.modules['odoo.addons.%s' % (module_name, )] pre_init = package.info.get('pre_init_hook') if pre_init: getattr(py_module, pre_init)(cr) model_names = registry.load(cr, package) mode = 'update' if hasattr(package, 'init') or package.state == 'to install': mode = 'init' loaded_modules.append(package.name) if needs_update: models_updated |= set(model_names) models_to_check -= set(model_names) registry.setup_models(cr) registry.init_models(cr, model_names, {'module': package.name}, new_install) elif package.state != 'to remove': # The current module has simply been loaded. The models extended by this module # and for which we updated the schema, must have their schema checked again. # This is because the extension may have changed the model, # e.g. adding required=True to an existing field, but the schema has not been # updated by this module because it's not marked as 'to upgrade/to install'. models_to_check |= set(model_names) & models_updated idref = {} if needs_update: env = api.Environment(cr, SUPERUSER_ID, {}) # Can't put this line out of the loop: ir.module.module will be # registered by init_models() above. module = env['ir.module.module'].browse(module_id) if perform_checks: module._check() if package.state == 'to upgrade': # upgrading the module information module.write(module.get_values_from_terp(package.data)) load_data(cr, idref, mode, kind='data', package=package) demo_loaded = package.dbdemo = load_demo(cr, package, idref, mode) cr.execute('update ir_module_module set demo=%s where id=%s', (demo_loaded, module_id)) module.invalidate_cache(['demo']) migrations.migrate_module(package, 'post') # Update translations for all installed languages overwrite = odoo.tools.config["overwrite_existing_translations"] module._update_translations(overwrite=overwrite) if package.name is not None: registry._init_modules.add(package.name) if needs_update: if new_install: post_init = package.info.get('post_init_hook') if post_init: getattr(py_module, post_init)(cr, registry) if mode == 'update': # validate the views that have not been checked yet env['ir.ui.view']._validate_module_views(module_name) # need to commit any modification the module's installation or # update made to the schema or data so the tests can run # (separately in their own transaction) cr.commit() concrete_models = [ model for model in model_names if not registry[model]._abstract ] if concrete_models: cr.execute( """ SELECT model FROM ir_model WHERE id NOT IN (SELECT DISTINCT model_id FROM ir_model_access) AND model IN %s """, [tuple(concrete_models)]) models = [model for [model] in cr.fetchall()] if models: lines = [ f"The models {models} have no access rules in module {module_name}, consider adding some, like:", "id,name,model_id:id,group_id:id,perm_read,perm_write,perm_create,perm_unlink" ] for model in models: xmlid = model.replace('.', '_') lines.append( f"{module_name}.access_{xmlid},access_{xmlid},{module_name}.model_{xmlid},base.group_user,1,0,0,0" ) _logger.warning('\n'.join(lines)) updating = tools.config.options['init'] or tools.config.options[ 'update'] test_time = test_queries = 0 test_results = None if tools.config.options['test_enable'] and (needs_update or not updating): env = api.Environment(cr, SUPERUSER_ID, {}) loader = odoo.tests.loader suite = loader.make_suite([module_name], 'at_install') if suite.countTestCases(): if not needs_update: registry.setup_models(cr) # Python tests env['ir.http']._clear_routing_map( ) # force routing map to be rebuilt tests_t0, tests_q0 = time.time(), odoo.sql_db.sql_counter test_results = loader.run_suite(suite, module_name) report.update(test_results) test_time = time.time() - tests_t0 test_queries = odoo.sql_db.sql_counter - tests_q0 # tests may have reset the environment env = api.Environment(cr, SUPERUSER_ID, {}) module = env['ir.module.module'].browse(module_id) if needs_update: processed_modules.append(package.name) ver = adapt_version(package.data['version']) # Set new modules and dependencies module.write({'state': 'installed', 'latest_version': ver}) package.load_state = package.state package.load_version = package.installed_version package.state = 'installed' for kind in ('init', 'demo', 'update'): if hasattr(package, kind): delattr(package, kind) module.flush() extra_queries = odoo.sql_db.sql_counter - module_extra_query_count - test_queries extras = [] if test_queries: extras.append(f'+{test_queries} test') if extra_queries: extras.append(f'+{extra_queries} other') _logger.log(module_log_level, "Module %s loaded in %.2fs%s, %s queries%s", module_name, time.time() - module_t0, f' (incl. {test_time:.2f}s test)' if test_time else '', cr.sql_log_count - module_cursor_query_count, f' ({", ".join(extras)})' if extras else '') if test_results and not test_results.wasSuccessful(): _logger.error("Module %s: %d failures, %d errors of %d tests", module_name, len(test_results.failures), len(test_results.errors), test_results.testsRun) _logger.runbot("%s modules loaded in %.2fs, %s queries (+%s extra)", len(graph), time.time() - t0, cr.sql_log_count - loading_cursor_query_count, odoo.sql_db.sql_counter - loading_extra_query_count ) # extra queries: testes, notify, any other closed cursor return loaded_modules, processed_modules
def wrapper(___dbname, *args, **kwargs): """ Wraps around OSV functions and normalises a few exceptions """ dbname = ___dbname # NOTE: this forbid to use "___dbname" as arguments in http routes def tr(src, ttype): # We try to do the same as the _(), but without the frame # inspection, since we aready are wrapping an osv function # trans_obj = self.get('ir.translation') cannot work yet :( ctx = {} if not kwargs: if args and isinstance(args[-1], dict): ctx = args[-1] elif isinstance(kwargs, dict): if 'context' in kwargs: ctx = kwargs['context'] elif 'kwargs' in kwargs: # http entry points such as call_kw() ctx = kwargs['kwargs'].get('context') uid = 1 if args and isinstance(args[0], pycompat.integer_types): uid = args[0] lang = ctx and ctx.get('lang') if not (lang or hasattr(src, '__call__')): return src # We open a *new* cursor here, one reason is that failed SQL # queries (as in IntegrityError) will invalidate the current one. cr = False if hasattr(src, '__call__'): # callable. We need to find the right parameters to call # the orm._sql_message(self, cr, uid, ids, context) function, # or we skip.. # our signature is f(registry, dbname [,uid, obj, method, args]) try: if args and len(args) > 1: # TODO self doesn't exist, but was already wrong before (it was not a registry but just the object_service. obj = self.get(args[1]) if len(args) > 3 and isinstance( args[3], (pycompat.integer_types, list)): ids = args[3] else: ids = [] cr = odoo.sql_db.db_connect(dbname).cursor() return src(obj, cr, uid, ids, context=(ctx or {})) except Exception: pass finally: if cr: cr.close() return False # so that the original SQL error will # be returned, it is the best we have. try: cr = odoo.sql_db.db_connect(dbname).cursor() res = translate(cr, name=False, source_type=ttype, lang=lang, source=src) if res: return res else: return src finally: if cr: cr.close() def _(src): return tr(src, 'code') tries = 0 while True: try: if odoo.registry( dbname)._init and not odoo.tools.config['test_enable']: raise odoo.exceptions.Warning( 'Currently, this database is not fully loaded and can not be used.' ) return f(dbname, *args, **kwargs) except (OperationalError, QWebException) as e: if isinstance(e, QWebException): cause = e.qweb.get('cause') if isinstance(cause, OperationalError): e = cause else: raise # Automatically retry the typical transaction serialization errors if e.pgcode not in PG_CONCURRENCY_ERRORS_TO_RETRY: raise if tries >= MAX_TRIES_ON_CONCURRENCY_FAILURE: _logger.info("%s, maximum number of tries reached" % errorcodes.lookup(e.pgcode)) raise wait_time = random.uniform(0.0, 2**tries) tries += 1 _logger.info("%s, retry %d/%d in %.04f sec..." % (errorcodes.lookup(e.pgcode), tries, MAX_TRIES_ON_CONCURRENCY_FAILURE, wait_time)) time.sleep(wait_time) except IntegrityError as inst: registry = odoo.registry(dbname) for key in registry._sql_error.keys(): if key in inst[0]: raise ValidationError( tr(registry._sql_error[key], 'sql_constraint') or inst[0]) if inst.pgcode in (errorcodes.NOT_NULL_VIOLATION, errorcodes.FOREIGN_KEY_VIOLATION, errorcodes.RESTRICT_VIOLATION): msg = _( 'The operation cannot be completed, probably due to the following:\n- deletion: you may be trying to delete a record while other records still reference it\n- creation/update: a mandatory field is not correctly set' ) _logger.debug("IntegrityError", exc_info=True) try: errortxt = inst.pgerror.replace('«', '"').replace('»', '"') if '"public".' in errortxt: context = errortxt.split('"public".')[1] model_name = table = context.split('"')[1] else: last_quote_end = errortxt.rfind('"') last_quote_begin = errortxt.rfind( '"', 0, last_quote_end) model_name = table = errortxt[ last_quote_begin + 1:last_quote_end].strip() model = table.replace("_", ".") if model in registry: model_class = registry[model] model_name = model_class._description or model_class._name msg += _('\n\n[object with reference: %s - %s]') % ( model_name, model) except Exception: pass raise ValidationError(msg) else: raise ValidationError(inst[0])
def setUpClass(cls): cls.registry = odoo.registry(get_db_name()) cls.cr = cls.registry.cursor() cls.uid = odoo.SUPERUSER_ID cls.env = api.Environment(cls.cr, cls.uid, {})
def setUpClass(cls): super(SingleTransactionCase, cls).setUpClass() cls.registry = odoo.registry(get_db_name()) cls.cr = cls.registry.cursor() cls.env = api.Environment(cls.cr, odoo.SUPERUSER_ID, {})
def _acquire_job(cls, db_name): # TODO remove 'check' argument from addons/base_automation/base_automation.py """ Try to process one cron job. This selects in database all the jobs that should be processed. It then tries to lock each of them and, if it succeeds, run the cron job (if it doesn't succeed, it means the job was already locked to be taken care of by another thread) and return. If a job was processed, returns True, otherwise returns False. """ db = odoo.sql_db.db_connect(db_name) threading.current_thread().dbname = db_name jobs = [] try: with db.cursor() as cr: # Make sure the database we poll has the same version as the code of base cr.execute("SELECT 1 FROM ir_module_module WHERE name=%s AND latest_version=%s", ('base', BASE_VERSION)) if cr.fetchone(): # Careful to compare timestamps with 'UTC' - everything is UTC as of v6.1. cr.execute("""SELECT * FROM ir_cron WHERE numbercall != 0 AND active AND nextcall <= (now() at time zone 'UTC') ORDER BY priority""") jobs = cr.dictfetchall() else: _logger.warning('Skipping database %s as its base version is not %s.', db_name, BASE_VERSION) except psycopg2.ProgrammingError as e: if e.pgcode == '42P01': # Class 42 — Syntax Error or Access Rule Violation; 42P01: undefined_table # The table ir_cron does not exist; this is probably not an OpenERP database. _logger.warning('Tried to poll an undefined table on database %s.', db_name) else: raise except Exception: _logger.warning('Exception in cron:', exc_info=True) for job in jobs: lock_cr = db.cursor() try: # Try to grab an exclusive lock on the job row from within the task transaction # Restrict to the same conditions as for the search since the job may have already # been run by an other thread when cron is running in multi thread lock_cr.execute("""SELECT * FROM ir_cron WHERE numbercall != 0 AND active AND nextcall <= (now() at time zone 'UTC') AND id=%s FOR UPDATE NOWAIT""", (job['id'],), log_exceptions=False) locked_job = lock_cr.fetchone() if not locked_job: _logger.debug("Job `%s` already executed by another process/thread. skipping it", job['cron_name']) continue # Got the lock on the job row, run its code _logger.debug('Starting job `%s`.', job['cron_name']) job_cr = db.cursor() try: registry = odoo.registry(db_name) registry[cls._name]._process_job(job_cr, job, lock_cr) except Exception: _logger.exception('Unexpected exception while processing cron job %r', job) finally: job_cr.close() except psycopg2.OperationalError as e: if e.pgcode == '55P03': # Class 55: Object not in prerequisite state; 55P03: lock_not_available _logger.debug('Another process/thread is already busy executing job `%s`, skipping it.', job['cron_name']) continue else: # Unexpected OperationalError raise finally: # we're exiting due to an exception while acquiring the lock lock_cr.close() if hasattr(threading.current_thread(), 'dbname'): # cron job could have removed it as side-effect del threading.current_thread().dbname
def _auto_cache_data(self, table, ids, sigle_cache=False): """ :param table: :param ids: :param sigle_cache True if callback from sigle record: :return: """ use_redis, redis_db_pos = self.get_config() if use_redis and sigle_cache: for id in ids: redis_db_pos.delete("{}:{}".format(table, id)) _logger.info('query %s with start ID %s and End ID %s' % (table, min(ids), max(ids))) with api.Environment.manage(): new_cr = registry(self._cr.dbname).cursor() self = self.with_env(self.env(cr=new_cr)) start_time = time.time() model = table.split('_') module_name = '' for i in model: if not module_name: module_name += i else: module_name += '.' + i model_fields = self.env[module_name].fields_get() fields_load = [] fields_orm = [] for k, v in model_fields.iteritems(): if v['type'] in ['many2many', 'many2one' ] and v['store'] == True: fields_orm.append(k) if v['type'] not in [ 'one2many', 'binary', 'monetary', 'many2many' ] and v['store'] == True: fields_load.append(k) if table != 'product_product': sql_select = "SELECT id " for i in fields_load: sql_select += ', {}'.format(i) sql_from = "FROM {} ".format(table) if len(ids) == 1: sql_where = "WHERE id = {}".format(ids[0]) else: sql_where = "WHERE id in {}".format(tuple(ids)) sql_query = "{} {} {}".format(sql_select, sql_from, sql_where) if table == 'res_partner': sql_query + " AND customer is True AND active is True" self.env.cr.execute(sql_query) records = self.env.cr.dictfetchall() if not use_redis: path_file = self.get_data_path(table + '_' + str(max(ids)) + '.xlsx') workbook = xlsxwriter.Workbook(path_file) worksheet = workbook.add_worksheet() row = 0 for record in records: sub_records = self.env[module_name].browse( record['id']).read(fields_orm) if sub_records: record.update(sub_records[0]) if not use_redis: worksheet.write(row, 0, json.dumps(record)) row += 1 else: redis_db_pos.set( "{}:{}".format(table, record['id']), json.dumps(record)) else: _logger.error('Record not found: %s' % record['id']) if not use_redis: records.remove(record) if not use_redis: workbook.close() src_file = path_file dst_file = self.get_data_path(table + '_' + str(max(ids)) + '_copy' + '.xlsx') shutil.copy(src_file, dst_file) _logger.info('stored : %s total %s rows, need a times %s' % (module_name, len(records), (time.time() - start_time))) if table == 'product_product': price_lists = self.env['product.pricelist'].search([]) sql_select = "SELECT p.id " for i in fields_load: sql_select += ', p.{}'.format(i) model_fields = self.env['product.template'].fields_get() fields_load = [] for k, v in model_fields.iteritems(): if k != 'id': if v['type'] in ['many2many', 'many2one' ] and v['store'] == True: fields_orm.append(k) if v['type'] not in [ 'one2many', 'binary', 'monetary', 'many2many' ] and v['store'] == True: fields_load.append(k) for i in fields_load: sql_select += ', pt.{}'.format(i) sql_from = " FROM product_product AS p, product_template AS pt " sql_where = " WHERE p.product_tmpl_id = pt.id AND pt.sale_ok is True AND pt.available_in_pos is True AND pt.active is True " if len(ids) == 1: sql_where += " AND p.id = {}".format(ids[0]) else: sql_where += " AND p.id in {}".format(tuple(ids)) sql_query = sql_select + sql_from + sql_where _logger.info(sql_query) self.env.cr.execute(sql_query) records = self.env.cr.dictfetchall() if not use_redis: path_file = self.get_data_path(table + '_' + str(max(ids)) + '.xlsx') workbook = xlsxwriter.Workbook(path_file) worksheet = workbook.add_worksheet() row = 0 for record in records: product = self.env[module_name].browse(record['id']) sub_records = product.read(fields_orm) if sub_records: sub_records[0]['price_with_pricelist'] = {} sub_records[0].update( {"qty_available": product.qty_available}) for price_list in price_lists: product_with_context = self.env[ module_name].browse(record['id']).with_context( pricelist=price_list.id, quantity=1, date_order=fields.Datetime.now()) price = product_with_context.price sub_records[0]['price_with_pricelist'][ price_list.id] = price record.update(sub_records[0]) record['display_name'] = record['name'] record['price'] = record['list_price'] if not use_redis: worksheet.write(row, 0, json.dumps(record)) row += 1 else: redis_db_pos.set( "{}:{}".format(table, record['id']), json.dumps(record)) else: _logger.error('Record not found: %s' % record['id']) if not use_redis: records.remove(record) if not use_redis: workbook.close() src_file = path_file dst_file = self.get_data_path(table + '_' + str(max(ids)) + '_copy' + '.xlsx') shutil.copy(src_file, dst_file) _logger.info( 'stored : {} total {} rows, need a times {}'.format( module_name, len(records), (time.time() - start_time))) self._cr.close() return {}
def client_db_configure_mail(self, incoming, outgoing, db=None, test_and_confirm=False, **params): # pylint: disable=too-many-locals, too-many-branches """ Configure mail servers for database :param dict incoming: dict with config of incoming mail server :param dict outgoing: dict with config of outgoing mail server :param bool test_and_confirm: if set to True, test if odoo can use specified mail servers :return: 200 OK if everythning is ok. in case of errors, 500 code will be returned Required params for incoming mail server: - host - user - password Required params for outgoing mail server: - host - user - password """ test_and_confirm = str2bool(test_and_confirm) incoming = json.loads(incoming) outgoing = json.loads(outgoing) incoming_data = { 'name': 'Yodoo Incoming Mail', 'server_type': 'imap', 'is_ssl': True, 'port': 993, 'server': incoming['host'], 'user': incoming['user'], 'password': incoming['password'], 'active': incoming.get('active', True), 'state': 'draft', } outgoing_data = { 'name': 'Yodoo Outgoing Mail', 'smtp_encryption': 'starttls', 'smtp_port': 587, 'smtp_host': outgoing['host'], 'smtp_user': outgoing['user'], 'smtp_pass': outgoing['password'], 'active': outgoing.get('active', True), } with registry(db).cursor() as cr: env = api.Environment(cr, SUPERUSER_ID, context={}) incoming_srv = env.ref('yodoo_client.yodoo_incoming_mail', raise_if_not_found=False) if incoming_srv: incoming_srv.write(incoming_data) else: incoming_srv = env['fetchmail.server'].create(incoming_data) env['ir.model.data'].create({ 'name': 'yodoo_incoming_mail', 'module': 'yodoo_client', 'model': incoming_srv._name, 'res_id': incoming_srv.id, 'noupdate': True, }) if test_and_confirm: incoming_srv.button_confirm_login() if incoming_srv.state != 'done': raise werkzeug.exceptions.InternalServerError( "Cannot configure incoming mail server") outgoing_srv = env.ref('yodoo_client.yodoo_outgoing_mail', raise_if_not_found=False) if outgoing_srv: outgoing_srv.write(outgoing_data) else: catchall_domain = outgoing['user'].split('@') if len(catchall_domain) > 1: catchall_domain = catchall_domain[1] res_users = env['res.users'].sudo().with_context( active_test=False) res_users.browse(SUPERUSER_ID).partner_id.write( {'email': 'odoobot@%s' % catchall_domain}) env['ir.config_parameter'].sudo().set_param( 'mail.catchall.domain', catchall_domain) env['ir.mail_server'].search([('active', '=', True) ]).write({'active': False}) outgoing_srv = env['ir.mail_server'].create(outgoing_data) env['ir.model.data'].create({ 'name': 'yodoo_outgoing_mail', 'module': 'yodoo_client', 'model': outgoing_srv._name, 'res_id': outgoing_srv.id, 'noupdate': True, }) if test_and_confirm: try: smtp = outgoing_srv.connect(mail_server_id=outgoing_srv.id) except Exception: _logger.error("Cannot configure outgoing mail server", exc_info=True) raise werkzeug.exceptions.InternalServerError( "Cannot configure outgoing mail server") finally: try: if smtp: smtp.quit() except Exception: # pylint: disable=except-pass # ignored, just a consequence of the previous exception pass return http.Response('OK', status=200)
def count_database(self, database): registry = odoo.registry(config['db_name']) with registry.cursor() as cr: uid = odoo.SUPERUSER_ID env = odoo.api.Environment(cr, uid, {}) self.count_env(env)
def _procure_orderpoint_confirm(self, use_new_cursor=False, company_id=False): """ Create procurements based on orderpoints. :param bool use_new_cursor: if set, use a dedicated cursor and auto-commit after processing 1000 orderpoints. This is appropriate for batch jobs only. """ if company_id and self.env.company.id != company_id: # To ensure that the company_id is taken into account for # all the processes triggered by this method # i.e. If a PO is generated by the run of the procurements the # sequence to use is the one for the specified company not the # one of the user's company self = self.with_context(company_id=company_id, force_company=company_id) OrderPoint = self.env['stock.warehouse.orderpoint'] domain = self._get_orderpoint_domain(company_id=company_id) orderpoints_noprefetch = OrderPoint.with_context( prefetch_fields=False).search( domain, order=self._procurement_from_orderpoint_get_order()).ids while orderpoints_noprefetch: if use_new_cursor: cr = registry(self._cr.dbname).cursor() self = self.with_env(self.env(cr=cr)) OrderPoint = self.env['stock.warehouse.orderpoint'] orderpoints = OrderPoint.browse(orderpoints_noprefetch[:1000]) orderpoints_noprefetch = orderpoints_noprefetch[1000:] # Calculate groups that can be executed together location_data = OrderedDict() def makedefault(): return { 'products': self.env['product.product'], 'orderpoints': self.env['stock.warehouse.orderpoint'], 'groups': [] } for orderpoint in orderpoints: key = self._procurement_from_orderpoint_get_grouping_key( [orderpoint.id]) if not location_data.get(key): location_data[key] = makedefault() location_data[key]['products'] += orderpoint.product_id location_data[key]['orderpoints'] += orderpoint location_data[key][ 'groups'] = self._procurement_from_orderpoint_get_groups( [orderpoint.id]) for location_id, location_res in location_data.items(): location_orderpoints = location_res['orderpoints'] product_context = dict( self._context, location=location_orderpoints[0].location_id.id) substract_quantity = location_orderpoints._quantity_in_progress( ) for group in location_res['groups']: if group.get('from_date'): product_context['from_date'] = group[ 'from_date'].strftime( DEFAULT_SERVER_DATETIME_FORMAT) if group['to_date']: product_context['to_date'] = group['to_date'].strftime( DEFAULT_SERVER_DATETIME_FORMAT) product_quantity = location_res['products'].with_context( product_context)._product_available() for orderpoint in location_orderpoints: try: op_product_virtual = product_quantity[ orderpoint.product_id.id]['virtual_available'] if op_product_virtual is None: continue if float_compare(op_product_virtual, orderpoint.product_min_qty, precision_rounding=orderpoint. product_uom.rounding) <= 0: qty = max(orderpoint.product_min_qty, orderpoint.product_max_qty ) - op_product_virtual remainder = orderpoint.qty_multiple > 0 and qty % orderpoint.qty_multiple or 0.0 if float_compare(remainder, 0.0, precision_rounding=orderpoint. product_uom.rounding) > 0: qty += orderpoint.qty_multiple - remainder if float_compare(qty, 0.0, precision_rounding=orderpoint. product_uom.rounding) < 0: continue qty -= substract_quantity[orderpoint.id] qty_rounded = float_round( qty, precision_rounding=orderpoint.product_uom. rounding) if qty_rounded > 0: values = orderpoint._prepare_procurement_values( qty_rounded, **group['procurement_values']) try: with self._cr.savepoint(): #TODO: make it batch self.env['procurement.group'].run([ self.env['procurement.group']. Procurement( orderpoint.product_id, qty_rounded, orderpoint.product_uom, orderpoint.location_id, orderpoint.name, orderpoint.name, orderpoint.company_id, values) ]) except UserError as error: self.env[ 'stock.rule']._log_next_activity( orderpoint.product_id, error.name) self._procurement_from_orderpoint_post_process( [orderpoint.id]) if use_new_cursor: cr.commit() except OperationalError: if use_new_cursor: orderpoints_noprefetch += [orderpoint.id] cr.rollback() continue else: raise try: if use_new_cursor: cr.commit() except OperationalError: if use_new_cursor: cr.rollback() continue else: raise if use_new_cursor: cr.commit() cr.close() return {}
def check(db, uid, passwd): res_users = odoo.registry(db)['res.users'] return res_users.check(db, uid, passwd)
def send_notifications(): db_registry = registry(dbname) with api.Environment.manage(), db_registry.cursor() as cr: env = api.Environment(cr, SUPERUSER_ID, _context) env['mail.mail'].browse(email_ids).send()
def _procure_orderpoint_confirm(self, use_new_cursor=False, company_id=None, raise_user_error=True): """ Create procurements based on orderpoints. :param bool use_new_cursor: if set, use a dedicated cursor and auto-commit after processing 1000 orderpoints. This is appropriate for batch jobs only. """ self = self.with_company(company_id) orderpoints_noprefetch = self.read(['id']) orderpoints_noprefetch = [ orderpoint['id'] for orderpoint in orderpoints_noprefetch ] for orderpoints_batch in split_every(1000, orderpoints_noprefetch): if use_new_cursor: cr = registry(self._cr.dbname).cursor() self = self.with_env(self.env(cr=cr)) orderpoints_batch = self.env['stock.warehouse.orderpoint'].browse( orderpoints_batch) orderpoints_exceptions = [] while orderpoints_batch: procurements = [] for orderpoint in orderpoints_batch: if float_compare(orderpoint.qty_to_order, 0.0, precision_rounding=orderpoint.product_uom. rounding) == 1: date = datetime.combine(orderpoint.lead_days_date, time.min) values = orderpoint._prepare_procurement_values( date=date) procurements.append( self.env['procurement.group'].Procurement( orderpoint.product_id, orderpoint.qty_to_order, orderpoint.product_uom, orderpoint.location_id, orderpoint.name, orderpoint.name, orderpoint.company_id, values)) try: with self.env.cr.savepoint(): self.env['procurement.group'].with_context( from_orderpoint=True).run( procurements, raise_user_error=raise_user_error) except ProcurementException as errors: for procurement, error_msg in errors.procurement_exceptions: orderpoints_exceptions += [ (procurement.values.get('orderpoint_id'), error_msg) ] failed_orderpoints = self.env[ 'stock.warehouse.orderpoint'].concat( *[o[0] for o in orderpoints_exceptions]) if not failed_orderpoints: _logger.error('Unable to process orderpoints') break orderpoints_batch -= failed_orderpoints except OperationalError: if use_new_cursor: cr.rollback() continue else: raise else: orderpoints_batch._post_process_scheduler() break # Log an activity on product template for failed orderpoints. for orderpoint, error_msg in orderpoints_exceptions: existing_activity = self.env['mail.activity'].search([ ('res_id', '=', orderpoint.product_id.product_tmpl_id.id), ('res_model_id', '=', self.env.ref('product.model_product_template').id), ('note', '=', error_msg) ]) if not existing_activity: orderpoint.product_id.product_tmpl_id.activity_schedule( 'mail.mail_activity_data_warning', note=error_msg, user_id=orderpoint.product_id.responsible_id.id or SUPERUSER_ID, ) if use_new_cursor: cr.commit() cr.close() return {}