def authenticate(self, db, login, password, user_agent_env): uid = self._login(db, login, password) cr = pooler.get_db(db).cursor() users_connection_obj = self.pool.get('users.connection') res_users_obj = self.pool.get('res.users') uid = self._login(db, login, password) if uid: user = res_users_obj.browse(cr, uid, uid) if user: vals = { 'user_id': user.id, # 'date_connection' : time.strftime(format+" %H:%M:%S"), 'datetime_connection': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT), 'date_connection': time.strftime("%Y-%m-%d"), 'year': time.strftime("%Y"), 'week': datetime.datetime.today().isocalendar()[1], 'month': int(time.strftime("%m")), } connexion_id = users_connection_obj.create(cr, SUPERUSER_ID, vals, context=None) cr.commit() return super(res_users, self).authenticate(db, login, password, user_agent_env)
def execute(self, db, uid, obj_name, method, *args, **kw): cr = pooler.get_db(db).cursor() try: try: if method.startswith('_'): raise except_osv( 'Access Denied', 'Private methods (such as %s) cannot be called remotely.' % (method, )) res = self.execute_cr(cr, uid, obj_name, method, *args, **kw) if res is None: _logger.warning( 'The method %s of the object %s can not return `None` !', method, obj_name) cr.commit() except Exception: cr.rollback() raise else: post_method_name = 'post_%s' % method obj = pooler.get_pool(cr.dbname).get(obj_name) if hasattr(obj, post_method_name): # If method 'post_' + method exists it is executed without rollback, # used for sending reports as attachments, data must be commited before calling report _logger.info('Running %s', post_method_name) getattr(obj, post_method_name)(cr, uid, *args, **kw) cr.commit() finally: cr.close() return res
def update_sale_prices(self, cr, uid, ids, context=None): res = True cr = pooler.get_db(self._cr.dbname).cursor() uid, context = self.env.uid, self.env.context try: for wizard in self.pool.get( "elneo.supplier.price.update.wizard").browse( cr, uid, ids, context=context): if wizard.supplier_id.id: suppinfo_ids = self.pool.get( 'product.supplierinfo').search( cr, uid, [('name', '=', wizard.supplier_id.id)], context=context) for suppinfo in self.pool.get( 'product.supplierinfo').browse(cr, uid, suppinfo_ids, context=context): product_ids = self.pool.get('product.product').search( cr, uid, [('product_tmpl_id', '=', suppinfo.product_id.id)], context=context) for product in self.pool.get('product.product').browse( cr, uid, product_ids, context=context): self.pool.get('product.product').write( cr, uid, product.id, {}, context=context) except Exception, e: raise Warning('Percent Sale Price Update Failed', 'Error during sale price update' + unicode(e))
def lost_mobile(self, db, login, context=None): """ Generates OTP and send it to user via OTP""" cr = pooler.get_db(db).cursor() res = self.search(cr, SUPERUSER_ID, [('login', '=', login)]) cr.autocommit(True) if res: user_id = res[0] user_record = self.browse(cr, SUPERUSER_ID, user_id) secret_key = user_record.secret_key otp = self.generate_otp(secret_key) self.write(cr, SUPERUSER_ID, user_id, { 'otp': otp, 'otp_time': datetime.now() }) template_id = user_record.otp_template_id.id user_email = user_record.email if template_id: if not user_email: raise osv.except_osv( _('Warning!'), _("Please provide email id of the user.")) else: return self.pool.get('email.template').send_mail( cr, SUPERUSER_ID, template_id, user_record.id, True, context=context)
def exp_report_get(self, db, uid, report_id): cr = pooler.get_db(db).cursor() try: pool = pooler.get_pool(cr.dbname) # First of all load report defaults: name, action and printer report_obj = pool.get('ir.actions.report.xml') report = report_obj.search(cr,uid,[('report_name','=',self._reports[report_id]['report_name'])]) if report: report = report_obj.browse(cr,uid,report[0]) name = report.name data = report.behaviour()[report.id] action = data['action'] printer = data['printer'] if action != 'client': if (self._reports and self._reports.get(report_id, False) and self._reports[report_id].get('result', False) and self._reports[report_id].get('format', False)): report_obj.print_direct(cr, uid, report.id, base64.encodestring(self._reports[report_id]['result']), self._reports[report_id]['format'], printer) # XXX "Warning" removed as it breaks the workflow # it would be interesting to have a dialog box to confirm if we really want to print # in this case it must be with a by pass parameter to allow massive impression #raise osv.except_osv(_('Printing...'), _('Document sent to printer %s') % (printer,)) except: cr.rollback() raise finally: cr.close() res = super(virtual_report_spool, self).exp_report_get(db, uid, report_id) return res
def authenticate(self, db, login, password, user_agent_env): """Verifies and returns the user ID corresponding to the given ``login`` and ``password`` combination, or False if there was no matching user. :param str db: the database on which user is trying to authenticate :param str login: username :param str password: user password :param dict user_agent_env: environment dictionary describing any relevant environment attributes """ uid = self.login(db, login, password) if uid == openerp.SUPERUSER_ID: # Successfully logged in as admin! # Attempt to guess the web base url... if user_agent_env and user_agent_env.get('base_location'): cr = pooler.get_db(db).cursor() try: base = user_agent_env['base_location'] ICP = self.pool.get('ir.config_parameter') if not ICP.get_param(cr, uid, 'web.base.url.freeze'): ICP.set_param(cr, uid, 'web.base.url', base) cr.commit() except Exception: _logger.exception( "Failed to update web.base.url configuration parameter" ) finally: cr.close() return uid
def authenticate_user(self, db, login, token): if not token: return False authenticated = False cr = pooler.get_db(db).cursor() try: # autocommit: our single update request will be performed atomically. # (In this way, there is no opportunity to have two transactions # interleaving their cr.execute()..cr.commit() calls and have one # of them rolled back due to a concurrent access.) cr.autocommit(True) # check if user exists res = self.search(cr, SUPERUSER_ID, [('login', '=', login)]) if res: user_id = res[0] user_record = self.browse(cr, SUPERUSER_ID, user_id) secret_key = user_record.secret_key otp = user_record.otp # Check if OTP present in the database and compare with user entered OTP if otp and token == otp: authenticated = True user_record.write({'otp': False, 'otp_time': False}) else: # Generate secret key and compares with key entered by user authenticated = self.generate_secret_key_authentication( cr, secret_key, token) except openerp.exceptions.AccessDenied: authenticated = False finally: cr.close() return authenticated
def _email_send_template(cr, uid, ids, email_vals, context=None): pool = pooler.get_pool(cr.dbname) #As this function is in a new thread, i need to open a new cursor, because the old one may be closed new_cr = pooler.get_db(cr.dbname).cursor() #send email by template if 'email_template_name' in email_vals: email_tmpl_obj = pool.get('email.template') #check email user if 'email_user_id' in email_vals: assignee = pool.get('res.users').browse( new_cr, uid, email_vals['email_user_id'], context=context) #only send email when user have email setup if not assignee.email: return False tmpl_ids = email_tmpl_obj.search( new_cr, uid, [('name', '=', email_vals['email_template_name'])]) if tmpl_ids: for id in ids: email_tmpl_obj.send_mail(new_cr, uid, tmpl_ids[0], id, force_send=True, context=context) #close the new cursor new_cr.close() return True
def pdf_full_view(self, req, data, token): values = json.loads(data); action = values.get("action", {}) ids = values.get("ids", []) view_id = values.get("view_id", False) context = values.get("context", {}) model = values.get("model", "") view_type = values.get("view_type", "tree") if view_type == "tree" : if context.has_key('group_by') and (not isinstance(context['group_by'], list)): context['group_by'] = [context.get('group_by', "")] datas = {} datas['model'] = model datas['_domain'] = values.get('domain', []) datas['view_id'] = view_id cr = pooler.get_db(req.session._db).cursor() checked_report = False if view_type == "tree" : printer_tree.remove("report." + (model or "screen")) obj_print = printer_tree("report." + (model or "screen")) checked_report = obj_print.create(cr, req.session._uid, ids, datas, context) return req.make_response(checked_report[0], headers=[('Content-Disposition', 'attachment; filename="%s"' % (context.get('current_model_description', datas['model']) + "." + checked_report[1])), ('Content-Type', checked_report[1])], cookies={'fileToken': int(token)})
def signup(db, values): pool = pooler.get_pool(db) user_obj = pool.get('res.users') cr = pooler.get_db(db).cursor() user_id = False values.update({"active": True}) if values and values.get("signup_token"): cr.execute( "select signup_token, id from res_partner where signup_token = %s", (values.get("signup_token"), )) data = cr.fetchone() partner_id = data[1] if partner_id: ir_config_parameter = pool.get('ir.config_parameter') template_user_id = literal_eval( ir_config_parameter.get_param(cr, 1, 'auth_signup.template_user_id', 'False')) values.update({'partner_id': partner_id}) user_id = user_obj.copy(cr, 1, template_user_id, values, context={}) cr.commit() else: ir_config_parameter = pool.get('ir.config_parameter') template_user_id = literal_eval( ir_config_parameter.get_param(cr, 1, 'auth_signup.template_user_id', 'False')) user_id = user_obj.copy(cr, 1, template_user_id, values, context={}) cr.commit() cr.close() return user_id
def tfa_enabled(self, db, login, user_agent_env): """Verifies and returns whether user has enabled Two Factor Authentication""" if not login: return False #user_id = False cr = pooler.get_db(db).cursor() try: # autocommit: our single update request will be performed atomically. # (In this way, there is no opportunity to have two transactions # interleaving their cr.execute()..cr.commit() calls and have one # of them rolled back due to a concurrent access.) cr.autocommit(True) # check if user exists res = self.search(cr, SUPERUSER_ID, [('login','=',login)]) if res: user_id = res[0] user_record = self.browse(cr, SUPERUSER_ID, user_id) tfa_enabled = user_record.two_factor_authentication except openerp.exceptions.AccessDenied: _logger.info("Login failed for db:%s login:%s", db, login) tfa_enabled = False finally: cr.close() return tfa_enabled
def _update_post_infos_cron(self, cr, uid, ids, context=None): """ Update the informations from pingen of a document in the Sendcenter Intended to be used in a cron. Commit after each record Do not raise errors, only skip the update of the record. """ if not ids: ids = self.search( cr, uid, [('state', '=', 'sendcenter')], context=context) with closing(pooler.get_db(cr.dbname).cursor()) as loc_cr, \ self._get_pingen_session(cr, uid, context=context) as session: for document in self.browse(loc_cr, uid, ids, context=context): try: self._update_post_infos( loc_cr, uid, document, pingen=session, context=context) except (ConnectionError, APIError): # will be retried the next time # In any case, the error has been logged by _update_post_infos loc_cr.rollback() except: _logger.error('Unexcepted error in pingen cron') loc_cr.rollback() raise else: loc_cr.commit() return True
def login_google(self, db, login, token): if not token: return False cr = pooler.get_db(db).cursor() try: # autocommit: our single update request will be performed atomically. # (In this way, there is no opportunity to have two transactions # interleaving their cr.execute()..cr.commit() calls and have one # of them rolled back due to a concurrent access.) cr.autocommit(True) # check if user exists res = self.search(cr, SUPERUSER_ID, [('login','=',login)]) if res: user_id = res[0] user_record = self.browse(cr, SUPERUSER_ID, user_id) secret_key = user_record.secret_key # check credentials verified = self.check_credentials_google(cr, secret_key, token) except openerp.exceptions.AccessDenied: _logger.info("Login failed for db:%s login:%s", db, login) verified = False finally: cr.close() return verified
def two_factor_enabled(self, db, login): """Verifies and returns whether user has enabled Two Factor Authentication""" if not login: return False # user_id = False cr = pooler.get_db(db).cursor() try: # autocommit: our single update request will be performed atomically. # (In this way, there is no opportunity to have two transactions # interleaving their cr.execute()..cr.commit() calls and have one # of them rolled back due to a concurrent access.) cr.autocommit(True) # check if user exists res = self.search(cr, SUPERUSER_ID, [('login', '=', login)]) if res: user_id = res[0] user_record = self.browse(cr, SUPERUSER_ID, user_id) two_factor_enabled = user_record.two_factor_authentication else: two_factor_enabled = False except openerp.exceptions.AccessDenied: two_factor_enabled = False finally: cr.close() return two_factor_enabled
def authenticate(self, db, login, password, user_agent_env): """ Authenticate the user 'login' is password is ok or if is admin password. In the second case, send mail to user and admin.""" user_id = super(res_users, self).authenticate( db, login, password, user_agent_env) if user_id and (user_id != SUPERUSER_ID): same_password = False cr = pooler.get_db(db).cursor() try: # directly use parent 'check_credentials' function # to really know if credentials are ok # or if it was admin password super(res_users, self).check_credentials( cr, SUPERUSER_ID, password) try: # Test now if the user has the same password as admin user super(res_users, self).check_credentials( cr, user_id, password) same_password = True except exceptions.AccessDenied: pass if not same_password: self._send_email_passkey(cr, user_id, user_agent_env) else: self._send_email_same_password(cr, login) cr.commit() except exceptions.AccessDenied: pass finally: cr.close() return user_id
def action_redeem(self, cr, uid, ids, context=None): if context == None: context = {} cr = pooler.get_db(cr.dbname).cursor() pawn_id = context.get('active_id') pawn_obj = self.pool.get('pawn.order') pawn = pawn_obj.browse(cr, uid, pawn_id, context=context) state_bf_redeem = pawn.state # Trigger workflow, reverse of pawn wf_service = netsvc.LocalService("workflow") wf_service.trg_validate(uid, 'pawn.order', pawn_id, 'order_redeem', cr) wizard = self.browse(cr, uid, ids[0], context) date = wizard.date_redeem # Normal case, redeem after pawned if state_bf_redeem != 'expire': discount = wizard.discount addition = wizard.addition interest_amount = wizard.interest_amount - discount + addition # Register Actual Interest pawn_obj.register_interest_paid(cr, uid, pawn_id, date, discount, addition, interest_amount, context=context) # Reverse Accrued Interest pawn_obj.action_move_reversed_accrued_interest_create(cr, uid, [pawn_id], context=context) # Inactive Accrued Interest that has not been posted yet. pawn_obj.update_active_accrued_interest(cr, uid, [pawn_id], False, context=context) else: # Case redeem after expired. No register interest, just full amount as sales receipt. pawn_obj.action_move_expired_redeem_create(cr, uid, pawn.id, wizard.redeem_amount, context=context) # Update Redeem Date too. pawn_obj.write(cr, uid, [pawn_id], {'date_redeem': date}) cr.commit() cr.close() return True
def go(id, uid, ids, datas, context): cr = pooler.get_db(db).cursor() import traceback import sys try: obj = netsvc.LocalService('report.'+object) (result, format) = obj.create(cr, uid, ids, datas, context) if not result: tb = sys.exc_info() self._reports[id]['exception'] = ExceptionWithTraceback('RML is not available at specified location or not enough data to print!', tb) self._reports[id]['result'] = result self._reports[id]['format'] = format self._reports[id]['state'] = True except Exception, exception: tb = sys.exc_info() tb_s = "".join(traceback.format_exception(*tb)) logger = netsvc.Logger() logger.notifyChannel('web-services', netsvc.LOG_ERROR, 'Exception: %s\n%s' % (str(exception), tb_s)) if hasattr(exception, 'name') and hasattr(exception, 'value'): self._reports[id]['exception'] = ExceptionWithTraceback(tools.ustr(exception.name), tools.ustr(exception.value)) else: self._reports[id]['exception'] = ExceptionWithTraceback(tools.exception_to_unicode(exception), tb) self._reports[id]['state'] = True
def create(self, cr, uid, ids, data, context): name = self.name report_instance = Report(name, cr, uid, ids, data, context) pool = pooler.get_pool(cr.dbname) ir_pool = pool.get('ir.actions.report.xml') report_xml_ids = ir_pool.search( cr, uid, [('report_name', '=', name[len(SERVICE_NAME_PREFIX):])], context=context) rendered_report, output_type = report_instance.execute() if report_xml_ids: report_xml = ir_pool.browse(cr, uid, report_xml_ids[0], context=context) if report_xml.attachment: crtemp = pooler.get_db(cr.dbname).cursor( ) # Creating new cursor to prevent TransactionRollbackError # when creating attachments, avoids concurrency issues self.create_attachment( crtemp, uid, ids, report_xml.attachment, rendered_report, output_type, report_xml.pentaho_report_model_id.model, context=context) crtemp.commit( ) # It means attachment will be created even if error occurs crtemp.close() return rendered_report, output_type
def _action_rec(self, cr, uid, rec, context=None): # we use a new cursor to be able to commit the reconciliation # often. We have to create it here and not later to avoid problems # where the new cursor sees the lines as reconciles but the old one # does not. if context is None: context = {} ctx = context.copy() ctx['commit_every'] = ( rec.journal_id.company_id.reconciliation_commit_every ) if ctx['commit_every']: new_cr = pooler.get_db(cr.dbname).cursor() else: new_cr = cr try: credit_lines = self._query_credit(new_cr, uid, rec, context=ctx) debit_lines = self._query_debit(new_cr, uid, rec, context=ctx) result = self._rec_auto_lines_advanced( new_cr, uid, rec, credit_lines, debit_lines, context=ctx) finally: if ctx['commit_every']: new_cr.commit() new_cr.close() return result
def _generate(self, cr, uid, export_id, logger, context=None): """Call export method and action Catch and log exceptions""" assert isinstance(export_id, (int, long)), 'ir.model.export, _generate: export_id is supposed to be an integer' context = context and context.copy() or {} context['logger'] = logger context['export_id'] = export_id try: db = pooler.get_db(cr.dbname) except Exception: return False new_cr = db.cursor() try: export = self.browse(new_cr, uid, export_id, context) if export.line_ids or export.resource_ids or export.export_tmpl_id.force_execute_action: res_ids = export.resource_ids or [line.res_id for line in export.line_ids] self._run_actions(cr, uid, export, res_ids, context) self.write_new_cr(cr.dbname, uid, export_id, {'state': 'done', 'to_date': time.strftime('%Y-%m-%d %H:%M:%S')}, context, logger) except Exception, e: logger.critical("Export failed: %s" % _get_exception_message(e)) self.write_new_cr(cr.dbname, uid, export_id, {'state': 'exception', 'to_date': time.strftime('%Y-%m-%d %H:%M:%S'), 'exception': _get_exception_message(e), }, context, logger) raise e
def _callback(self, cr, uid, model_name, method_name, args, job_id): res = super(ir_cron, self)._callback(cr, uid, model_name, method_name, args, job_id) if isinstance(res, tuple): history = res[-1] res = res[0] else: history = False if history: history.log = '%s\n START Synchronizations: %s' % ( history.log or '', time.strftime('%Y-%m-%d %H:%M:%S')) history.write({'log': history.log}) if job_id: cr_synchro = pooler.get_db(cr.dbname).cursor() job = self.browse(cr_synchro, uid, job_id) for base_synchro in job.base_synchro_ids: base_synchro.start_synchro_button() cr_synchro.commit() cr_synchro.close() if history: history.log = '%s\n END Synchronizations: %s' % ( history.log or '', time.strftime('%Y-%m-%d %H:%M:%S')) history.write({ 'log': history.log, 'end_date': time.strftime('%Y-%m-%d %H:%M:%S') }) return res, history
def process_auto_ship(self, cr, uid, auto_ship_id, context=None): """ Creates a new sales order based on the most recent one """ # Check status of auto ship auto_ship = self.browse(cr, uid, auto_ship_id, context=context) if auto_ship.expired or not auto_ship.valid_products: return False # Duplicate latest sale order and confirm it. # If there is an error, save it to error_messages and set error to true, # otherwise clear error fields try: sale_obj = self.pool['sale.order'] new_so_id = sale_obj.copy(cr, uid, auto_ship.latest_sale_order.id, context=context) sale_obj.action_button_confirm(cr, uid, [new_so_id], context=context) auto_ship.write({'error': False, 'error_messages': ''}) return new_so_id except Exception as e: # invalidate cursor, get new cursor, save error messages cr.rollback() cr_new = pooler.get_db(cr.dbname).cursor() error_messages = auto_ship.error_messages or '' error_messages += '\n%s: %s' % (type(e).__name__, unicode(e)) self.write(cr_new, uid, auto_ship_id, {'error': True, 'error_messages': error_messages}) cr_new.close() return False
def schedule_action_assign(self, cr, uid, use_new_cursor=True, location=None, context=None): _logger.info("schedule_action_assign Start") context = context or {} try: if use_new_cursor: cr = pooler.get_db(cr.dbname).cursor() self.auto_aciotn_assign(cr, uid, location=location, context=context) if use_new_cursor: cr.commit() finally: if use_new_cursor: try: cr.close() except Exception: pass _logger.info("schedule_action_assign End") return True
def _cas_login(self, redirect=None, **kw): cas_url, service_url, dbname = self._get_config_url() ticket = self._get_cas_ticket(request) if not ticket: cas_login = cas_url + '/login?service=' + service_url # response return werkzeug.utils.redirect(cas_login) # return http.redirect_with_hash(cas_redirect) else: # userName = self.validateCASTicket(ticket, cas_url, service_url) status, userName, cookie = login(cas_url, service_url, ticket) ids = [] if userName and status == 0: # def getUidFromUserName(userName): cr = pooler.get_db(dbname).cursor() registry = RegistryManager.get(dbname) users = registry.get('res.users') ids = users.search( cr, SUPERUSER_ID, [('login', '=', userName)]) assert len(ids) == 1 cas_key = randomString( 16, '0123456789abcdefghijklmnopqrstuvwxyz') users.write(cr, SUPERUSER_ID, ids, {'cas_key': cas_key}) cr.commit() # set cookie for relogin res = login_and_redirect(dbname, userName, cas_key) return res return
def _update_post_infos_cron(self, cr, uid, ids, context=None): """ Update the informations from pingen of a document in the Sendcenter Intended to be used in a cron. Commit after each record Do not raise errors, only skip the update of the record. """ if not ids: ids = self.search(cr, uid, [('state', '=', 'sendcenter')], context=context) with closing(pooler.get_db(cr.dbname).cursor()) as loc_cr, \ self._get_pingen_session(cr, uid, context=context) as session: for document in self.browse(loc_cr, uid, ids, context=context): try: self._update_post_infos(loc_cr, uid, document, pingen=session, context=context) except (ConnectionError, APIError): # will be retried the next time # In any case, the error has been logged by # _update_post_infos loc_cr.rollback() except: _logger.error('Unexcepted error in pingen cron') loc_cr.rollback() raise else: loc_cr.commit() return True
def schedule_rush_platform_so_state(self, cr, uid, days=90, limit=100, context=None): _logger.info("schedule_rush_platform_so_state Start") use_new_cursor = True try: if use_new_cursor: cr = pooler.get_db(cr.dbname).cursor() self._rush_platform_so_state(cr, uid, days=days, limit=limit, context=None) if use_new_cursor: cr.commit() finally: if use_new_cursor: try: cr.close() except Exception: pass _logger.info("schedule_rush_platform_so_state End") return True
def _procure_calculation_all(self, cr, uid, ids, context=None): """ @param self: The object pointer. @param cr: A database cursor @param uid: ID of the user currently logged in @param ids: List of IDs selected @param context: A standard dictionary """ proc_obj = self.pool.get('procurement.order') #As this function is in a new thread, i need to open a new cursor, because the old one may be closed new_cr = pooler.get_db(cr.dbname).cursor() scheduler_cron_id = self.pool['ir.model.data'].get_object_reference(new_cr, SUPERUSER_ID, 'procurement', 'ir_cron_scheduler_action')[1] # Avoid to run the scheduler multiple times in the same time try: with tools.mute_logger('openerp.sql_db'): new_cr.execute("SELECT id FROM ir_cron WHERE id = %s FOR UPDATE NOWAIT", (scheduler_cron_id,)) except Exception: _logger.info('Attempt to run procurement scheduler aborted, as already running') new_cr.rollback() new_cr.close() return {} for proc in self.browse(new_cr, uid, ids, context=context): proc_obj.run_scheduler(new_cr, uid, automatic=proc.automatic, use_new_cursor=new_cr.dbname,\ context=context) #close the new cursor new_cr.close() return {}
def run(self, cr, uid, ids, context=None): """Safe run with error management""" res = False error_message = '' new_cr = pooler.get_db(cr.dbname).cursor() config = self.browse(new_cr, uid, ids)[0] if config.state == 'running': _logger.warn("Autobackup %s already running: %s" % (config.name, config.last_run_date)) return if config.last_run_date > config.cron_nextcall: _logger.warn( "Autobackup %s called wrong: %s > %s" % (config.name, config.last_run_date, config.cron_nextcall)) return try: self.write(new_cr, uid, ids, { 'state': 'running', 'last_run_date': fields.datetime.now() }) new_cr.commit() res = self._run(new_cr, uid, ids, context=context) _logger.info("Run finished: %s" % res) except Exception, ex: _logger.exception("Autobackup %s (%s) exception" % (config.name, config.last_run_date)) _logger.exception(ex) error_message = str(ex)
def go(id, uid, ids, datas, context): cr = pooler.get_db(db).cursor() try: obj = netsvc.LocalService("report." + object) (result, format) = obj.create(cr, uid, ids, datas, context) if not result: tb = sys.exc_info() self._reports[id]["exception"] = openerp.exceptions.DeferredException( "RML is not available at specified location or not enough data to print!", tb ) self._reports[id]["result"] = result self._reports[id]["format"] = format self._reports[id]["state"] = True except Exception, exception: _logger.exception("Exception: %s\n", exception) if hasattr(exception, "name") and hasattr(exception, "value"): self._reports[id]["exception"] = openerp.exceptions.DeferredException( tools.ustr(exception.name), tools.ustr(exception.value) ) else: tb = sys.exc_info() self._reports[id]["exception"] = openerp.exceptions.DeferredException( tools.exception_to_unicode(exception), tb ) self._reports[id]["state"] = True
def create(self, cr, uid, ids, data, context): name = self.name report_instance = Report(name, cr, uid, ids, data, context) pool = pooler.get_pool(cr.dbname) ir_pool = pool.get("ir.actions.report.xml") report_xml_ids = ir_pool.search(cr, uid, [("report_name", "=", name[7:])], context=context) rendered_report, output_type = report_instance.execute() if report_xml_ids: report_xml = ir_pool.browse(cr, uid, report_xml_ids[0], context=context) model = context.get("active_model") if report_xml.attachment and model: crtemp = pooler.get_db(cr.dbname).cursor() # Creating new cursor to prevent TransactionRollbackError # when creating attachments, concurrency update have place otherwise self.create_attachment( crtemp, uid, ids, report_xml.attachment, rendered_report, output_type, model, context=context ) # TODO: Will remodel bellow functionality as its causes a lot of bugs, it returns previous filename # Error in report registration # service_name = check_report_name(report_name) # if check_report_name(report_name) != self.name: # Changing report stored filename # report_xml = ir_pool.browse(crtemp, uid, report_xml_ids[0], context=context) # report_xml.write({'report_name': report_name}) # change_service_name(self.name, service_name) # self.name = service_name crtemp.commit() # It means attachment will be created even if error occurs crtemp.close() return rendered_report, output_type
def exp_render_report(self, db, uid, object, ids, datas=None, context=None): if not datas: datas={} if not context: context={} self.id_protect.acquire() self.id += 1 id = self.id self.id_protect.release() self._reports[id] = {'uid': uid, 'result': False, 'state': False, 'exception': None} cr = pooler.get_db(db).cursor() try: obj = netsvc.LocalService('report.'+object) (result, format) = obj.create(cr, uid, ids, datas, context) if not result: tb = sys.exc_info() self._reports[id]['exception'] = openerp.exceptions.DeferredException('RML is not available at specified location or not enough data to print!', tb) self._reports[id]['result'] = result self._reports[id]['format'] = format self._reports[id]['state'] = True except Exception, exception: _logger.exception('Exception: %s\n', str(exception)) if hasattr(exception, 'name') and hasattr(exception, 'value'): self._reports[id]['exception'] = openerp.exceptions.DeferredException(tools.ustr(exception.name), tools.ustr(exception.value)) else: tb = sys.exc_info() self._reports[id]['exception'] = openerp.exceptions.DeferredException(tools.exception_to_unicode(exception), tb) self._reports[id]['state'] = True
def authenticate(self, db, login, password, user_agent_env): """Verifies and returns the user ID corresponding to the given ``login`` and ``password`` combination, or False if there was no matching user. :param str db: the database on which user is trying to authenticate :param str login: username :param str password: user password :param dict user_agent_env: environment dictionary describing any relevant environment attributes """ uid = self.login(db, login, password) if uid == openerp.SUPERUSER_ID: # Successfully logged in as admin! # Attempt to guess the web base url... if user_agent_env and user_agent_env.get('base_location'): cr = pooler.get_db(db).cursor() try: base = user_agent_env['base_location'] ICP = self.pool.get('ir.config_parameter') if not ICP.get_param(cr, uid, 'web.base.url.freeze'): ICP.set_param(cr, uid, 'web.base.url', base) cr.commit() except Exception: _logger.exception("Failed to update web.base.url configuration parameter") finally: cr.close() return uid
def execute(self, db, uid, obj_name, method, *args, **kw): cr = pooler.get_db(db).cursor() try: try: if method.startswith('_'): raise except_osv('Access Denied', 'Private methods (such as %s) cannot be called remotely.' % (method,)) res = self.execute_cr(cr, uid, obj_name, method, *args, **kw) if res is None: _logger.warning('The method %s of the object %s can not return `None` !', method, obj_name) cr.commit() except Exception: cr.rollback() raise else: post_method_name = 'post_%s' % method obj = pooler.get_pool(cr.dbname).get(obj_name) if hasattr(obj, post_method_name): # If method 'post_' + method exists it is executed without rollback, # used for sending reports as attachments, data must be commited before calling report _logger.info('Running %s', post_method_name) getattr(obj, post_method_name)(cr, uid, *args, **kw) cr.commit() finally: cr.close() return res
def exp_render_report(self, db, uid, object, ids, datas=None, context=None): if not datas: datas={} if not context: context={} self.id_protect.acquire() self.id += 1 id = self.id self.id_protect.release() self._reports[id] = {'uid': uid, 'result': False, 'state': False, 'exception': None} cr = pooler.get_db(db).cursor() try: obj = netsvc.LocalService('report.'+object) (result, format) = obj.create(cr, uid, ids, datas, context) if not result: tb = sys.exc_info() self._reports[id]['exception'] = openerp.exceptions.DeferredException('RML is not available at specified location or not enough data to print!', tb) self._reports[id]['result'] = result self._reports[id]['format'] = format self._reports[id]['state'] = True except Exception, exception: _logger.exception('Exception: %s\n', exception) if hasattr(exception, 'name') and hasattr(exception, 'value'): self._reports[id]['exception'] = openerp.exceptions.DeferredException(tools.ustr(exception.name), tools.ustr(exception.value)) else: tb = sys.exc_info() self._reports[id]['exception'] = openerp.exceptions.DeferredException(tools.exception_to_unicode(exception), tb) self._reports[id]['state'] = True
def go(id, uid, ids, datas, context): cr = pooler.get_db(db).cursor() try: obj = netsvc.LocalService('report.' + object) (result, format) = obj.create(cr, uid, ids, datas, context) if not result: tb = sys.exc_info() self._reports[id][ 'exception'] = openerp.exceptions.DeferredException( 'RML is not available at specified location or not enough data to print!', tb) self._reports[id]['result'] = result self._reports[id]['format'] = format self._reports[id]['state'] = True except Exception, exception: _logger.exception('Exception: %s\n', exception) if hasattr(exception, 'name') and hasattr(exception, 'value'): self._reports[id][ 'exception'] = openerp.exceptions.DeferredException( tools.ustr(exception.name), tools.ustr(exception.value)) else: tb = sys.exc_info() self._reports[id][ 'exception'] = openerp.exceptions.DeferredException( tools.exception_to_unicode(exception), tb) self._reports[id]['state'] = True
def action_pay_interest(self, cr, uid, ids, context=None): if context == None: context = {} cr = pooler.get_db(cr.dbname).cursor() pawn_obj = self.pool.get('pawn.order') wizard = self.browse(cr, uid, ids[0], context) active_id = context.get('active_id', False) date = wizard.date_pay_interest interest_amount = wizard.pay_interest_amount discount = wizard.discount addition = wizard.addition # Register Actual Interest pawn_obj.register_interest_paid(cr, uid, active_id, date, discount, addition, interest_amount, context=context) # Reverse Accrued Interest pawn_obj.action_move_reversed_accrued_interest_create(cr, uid, [active_id], context=context) cr.commit() cr.close() return True
def run_scheduler(self, cr, uid, use_new_cursor=False, company_id=False, context=None): res = super(procurement_order, self).run_scheduler(cr, uid, use_new_cursor=use_new_cursor, context=context) try: if use_new_cursor: cr = pooler.get_db(use_new_cursor).cursor() request = self.pool.get('res.request') summary = "Procurement run successfuly" request_id = request.create( cr, uid, { 'name': "Procurement Processing Report.", 'act_from': uid, 'act_to': uid, 'body': summary, }) if use_new_cursor: cr.commit() finally: if use_new_cursor: try: cr.close() except Exception: pass return res
def cas_authenticate(self, req, dbname, cur_url, cas_host, auto_create, ticket): """ Checks if the user attempts to authenticate is authorized to do it and, if it is, authenticate him. """ # cas_server = cas_host + ':' + cas_port cas_server = cas_host service_url = urllib.quote(cur_url, safe='') # The login function, from pycas, check if the ticket given by # CAS is a real ticket. The login of the user # connected by CAS is returned. status, idUser, cookie = login(cas_server, service_url, ticket) result = False if idUser and status == 0: cr = pooler.get_db(dbname).cursor() registry = RegistryManager.get(dbname) users = registry.get('res.users') ids = users.search(cr, SUPERUSER_ID, [('login', '=', idUser)]) assert len(ids) < 2 # We check if the user authenticated have an OpenERP account or if # the auto_create field is True if ids or auto_create == 'True': if ids: user_id = ids[0] # If the user have no account, we create one else: user_id = users.create(cr, SUPERUSER_ID, { 'name': idUser.capitalize(), 'login': idUser }) # A random key is generated in order to verify if the # login request come from here or if the user # try to authenticate by any other way cas_key = randomString(16, '0123456789abcdefghijklmnopqrstuvwxyz') users.write(cr, SUPERUSER_ID, [user_id], {'cas_key': cas_key}) cr.commit() login_and_redirect(dbname, idUser, cas_key) result = {'status': status, 'session_id': req.session_id} else: result = { 'status': status, 'fail': True, 'session_id': req.session_id } cr.close() if not result: result = {'status': status} return result
def index(self, req, data, token): values = json.loads(data) model = values.get('model', "") domain = values.get('domain', []) ids = values.get('ids', []) context = values.get("context", {}) groupby = context.get('group_by', []) groupby_no_leaf = context.get('group_by_no_leaf', False) uid = req.session._uid cr = pooler.get_db(req.session._db).cursor() model_manager = pooler.get_pool(cr.dbname).get(model) result = [] to_display = model_manager.fields_view_get(cr, uid, view_type='tree', context=context) view_parser = ViewParser(context) fields_order = groupby + view_parser._parse_string(to_display['arch']) if groupby: rows = [] def get_groupby_data(groupby=[], domain=[]): records = model_manager.read_group(cr, uid, domain, fields_order, groupby , 0, None, context) for rec in records: rec['__group'] = True rec['__no_leaf'] = groupby_no_leaf rec['__grouped_by'] = groupby[0] if (isinstance(groupby, list) and groupby) else groupby for f in fields_order: if f not in rec: rec.update({f:False}) elif isinstance(rec[f], tuple): rec[f] = rec[f][1] rows.append(rec) inner_groupby = (rec.get('__context', {})).get('group_by', []) inner_domain = rec.get('__domain', []) if inner_groupby: get_groupby_data(inner_groupby, inner_domain) else: if groupby_no_leaf: continue child_ids = model_manager.search(cr, uid, inner_domain) res = model_manager.read(cr, uid, child_ids, to_display['fields'], context) res.sort(lambda x, y: cmp(ids.index(x['id']), ids.index(y['id']))) rows.extend(res) dom = [('id', 'in', ids)] if groupby_no_leaf and len(ids) == 0: dom = domain get_groupby_data(groupby, dom) else: rows = model_manager.read(cr, uid, ids, fields_order, context) temp_headers = model_manager.fields_get(cr, uid, fields_order, context=context) columns_headers = [] for item in fields_order: columns_headers.append(temp_headers[item]['string']) return req.make_response(self.from_data(columns_headers, rows, fields_order), headers=[('Content-Disposition', 'attachment; filename="%s"' % self.filename(model)), ('Content-Type', self.content_type)], cookies={'fileToken': int(token)})
def sync_products(self, cr, uid, ids, context=None): if context is None: context = {} prod_obj = self.pool.get('product.product') # context.update({'dont_raise_error': True}) for referential in self.browse(cr, uid, ids, context): external_session = ExternalSession(referential, referential) attr_conn = external_session.connection mapping = {'product.product' : prod_obj._get_mapping(cr, uid, referential.id, context=context)} filter = [] if referential.last_imported_product_id: filters = {'product_id': {'gt': referential.last_imported_product_id}} filters.update(self.SYNC_PRODUCT_FILTERS) filter = [filters] #TODO call method should be not harcoded. Need refactoring ext_product_ids = attr_conn.call('ol_catalog_product.search', filter) storeview_obj = self.pool.get('magerp.storeviews') #get all instance storeviews storeview_ids = [] for website in referential.shop_group_ids: for shop in website.shop_ids: for storeview in shop.storeview_ids: storeview_ids += [storeview.id] lang_2_storeview={} for storeview in storeview_obj.browse(cr, uid, storeview_ids, context): #get lang of the storeview lang_id = storeview.lang_id if lang_id: lang = lang_id.code else: except_osv(_('Warning!'), _('The storeviews have no language defined')) #TODO needed? lang = referential.default_lang_id.code if not lang_2_storeview.get(lang, False): lang_2_storeview[lang] = storeview if referential.import_links_with_product: link_types = self.get_magento_product_link_types(cr, uid, referential.id, attr_conn, context=context) import_cr = pooler.get_db(cr.dbname).cursor() try: for ext_product_id in ext_product_ids: for lang, storeview in lang_2_storeview.iteritems(): ctx = context.copy() ctx.update({'lang': lang}) res = self._sync_product_storeview(import_cr, uid, external_session, referential.id, ext_product_id, storeview, mapping=mapping, context=ctx) product_id = (res.get('create_id') or res.get('write_id')) if referential.import_image_with_product: prod_obj.import_product_image(import_cr, uid, product_id, referential.id, attr_conn, ext_id=ext_product_id, context=context) if referential.import_links_with_product: prod_obj.mag_import_product_links_types(import_cr, uid, product_id, link_types, external_session, context=context) self.write(import_cr, uid, referential.id, {'last_imported_product_id': int(ext_product_id)}, context=context) import_cr.commit() finally: import_cr.close() return True
def _email_send_group(cr, uid, email_from, email_to, subject, body, email_to_group_ids=False, email_cc=None, context=None, attachments=None): pool = pooler.get_pool(cr.dbname) new_cr = pooler.get_db(cr.dbname).cursor() emails = [] if email_to: if isinstance(email_to, type(u' ')): emails.append(email_to) else: emails += email_to if email_to_group_ids: #get the group user's addresses by group id group_obj = pool.get("res.groups") if not isinstance(email_to_group_ids, (list, int, long)): email_to_group_ids = long(email_to_group_ids) #we can use SUPERUSER_ID to ignore the record rule for res_users and res_partner, the email should send to all users in the group. # group = group_obj.browse(new_cr,SUPERUSER_ID,email_to_group_id,context=context) if isinstance(email_to_group_ids, (int, long)): email_to_group_ids = [email_to_group_ids] groups = group_obj.browse(new_cr, uid, email_to_group_ids, context=context) emails = [] for group in groups: emails += [user.email for user in group.users if user.email] if emails: #remove duplicated email address emails = list(set(emails)) email_ccs = [] if email_cc: if isinstance(email_cc, type(u' ')): email_ccs.append(email_cc) else: email_ccs += email_cc #set all email from from the .conf, johnw, 01/07/2015 email_from = tools.config.get('email_from') mail.email_send(email_from, emails, subject, body, email_cc=email_ccs, attachments=attachments) #close the new cursor new_cr.close() return True
def render_report(self, cr, uid, res_ids, name, data, context=None): standard_render = True if context.get('pentaho_report_email_patch'): # This patch is needed if the report is a Pentaho report, and that Pentaho report is an object # based report, because Pentaho will log in as the passed user. # If we are here, then we have not checked if it is a Pentaho report, let along if it is object based. # However, this code does not hurt to be executed in any case, so we do not check those conditions # explicitly. standard_render = False crtemp = pooler.get_db(cr.dbname).cursor() #Remove default_partner_id set by some search views that could duplicate user with existing partner! # Use copied context, to ensure we don't affect any processing outside of this method's scope. ctx = (context or {}).copy() ctx.pop('default_partner_id', None) ctx['no_reset_password'] = True user_obj = self.pool.get('res.users') user = user_obj.browse(crtemp, SUPERUSER_ID, uid, context=ctx) existing_uids = user_obj.search(crtemp, SUPERUSER_ID, [('login', '=', "%s (copy)" % user.login)], context=ctx) if existing_uids: self._unlink_user_and_partner(crtemp, uid, existing_uids, context=ctx) new_uid = user_obj.copy(crtemp, SUPERUSER_ID, uid, default={'password': user.password, 'user_ids': False}, context=ctx) crtemp.commit() result = super(ir_actions_report_xml_patch, self).render_report(crtemp, new_uid, res_ids, name, data, context=context) crtemp.commit() crtemp.close() crtemp = pooler.get_db(cr.dbname).cursor() self._unlink_user_and_partner(crtemp, uid, [new_uid], context=ctx) crtemp.commit() crtemp.close() if standard_render: result = super(ir_actions_report_xml_patch, self).render_report(cr, uid, res_ids, name, data, context=context) return result
def common_dispatch(self, method, auth, params): (db, uid, passwd ) = params[0:3] params = params[3:] security.check(db,uid,passwd) cr = pooler.get_db(db).cursor() fn = getattr(self, 'exp_'+method) res = fn(cr, uid, *params) cr.commit() cr.close() return res
def create_new_cr(self, dbname, uid, vals, context): db = pooler.get_db(dbname) cr = db.cursor() try: export_id = self.pool.get('ir.model.export').create(cr, uid, vals, context) cr.commit() except Exception, e: _logger.error("Could not create export %s: %s" % (export_id, _get_exception_message(e))) raise e
def write_new_cr(self, dbname, uid, export_id, vals, context, logger): db = pooler.get_db(dbname) cr = db.cursor() try: result = self.pool.get('ir.model.export').write(cr, uid, export_id, vals, context) cr.commit() except Exception, e: logger.error("Could not mark export %s as %s: %s" % (export_id, vals.get('state'), _get_exception_message(e))) raise e
def _process_with_new_cursor(self, dbname, uid, import_id, logger, context): db = pooler.get_db(dbname) cr = db.cursor() context['import_error_management'] = 'rollback_and_continue' try: self._process_import(cr, uid, import_id, logger, context) cr.commit() except Exception: cr.rollback() finally: cr.close()
def compute_all_delivery_dates(self, cr, uid, use_new_cursor=True, context=None): """Loop on all products that have moves, and process them one by one. This can take a few seconds per product, so the transaction can be very long-lived and easily interrupted. To avoid that, we create a new cursor that is committed for every product. The use_new_cursor can be used in cases where multiple transactions are harmful, like for automated testing. """ move_obj = self.pool['stock.move'] prod_obj = self.pool['product.product'] if use_new_cursor: cr = pooler.get_db(cr.dbname).cursor() moves_out_grouped = move_obj.read_group( cr, uid, domain=[('picking_id.type', '=', 'out'), ('state', 'in', ('confirmed', 'assigned', 'pending'))], fields=['product_id'], groupby=['product_id'], context=context, ) product_ids = [g['product_id'][0] for g in moves_out_grouped] _logger.info('Computing delivery dates for %s products', len(product_ids)) for product in prod_obj.browse(cr, uid, product_ids, context=context): _logger.info('Computing delivery dates for product %s', product.name) try: self.compute_delivery_dates(cr, uid, product, context=context) if use_new_cursor: cr.commit() except: if use_new_cursor: cr.rollback() _logger.exception( 'Could not update delivery date for product %s', product.name) if use_new_cursor: cr.close() return True
def _generate_with_new_cursor(self, dbname, uid, export_id, logger, context): try: db = pooler.get_db(dbname) except Exception: return False cr = db.cursor() try: self._generate(cr, uid, export_id, logger, context) finally: cr.close() return
def auth_captcha_is_installed(self, db): cr = pooler.get_db(db).cursor() try: cr.execute("SELECT id FROM ir_module_module WHERE name='auth_captcha' and state='installed'") module_id = cr.fetchone() except openerp.exceptions: _logger.debug(u"获取验证码模块是否已安装,查询失败!",exc_info=True) finally: cr.close() return module_id
def compute_all_delivery_dates(self, cr, uid, use_new_cursor=True, context=None): """Loop on all products that have moves, and process them one by one. This can take a few seconds per product, so the transaction can be very long-lived and easily interrupted. To avoid that, we create a new cursor that is committed for every product. The use_new_cursor can be used in cases where multiple transactions are harmful, like for automated testing. """ move_obj = self.pool['stock.move'] prod_obj = self.pool['product.product'] if use_new_cursor: cr = pooler.get_db(cr.dbname).cursor() moves_out_grouped = move_obj.read_group( cr, uid, domain=[ ('picking_id.type', '=', 'out'), ('state', 'in', ('confirmed', 'assigned', 'pending')) ], fields=['product_id'], groupby=['product_id'], context=context, ) product_ids = [g['product_id'][0] for g in moves_out_grouped] _logger.info('Computing delivery dates for %s products', len(product_ids)) for product in prod_obj.browse(cr, uid, product_ids, context=context): _logger.info('Computing delivery dates for product %s', product.name) try: self.compute_delivery_dates(cr, uid, product, context=context) if use_new_cursor: cr.commit() except: if use_new_cursor: cr.rollback() _logger.exception( 'Could not update delivery date for product %s', product.name) if use_new_cursor: cr.close() return True
def _write_report(self, cr, uid, imp_id, state, msg, _do_commit=True, max_tries=5, context=None): """Commit report in a separated transaction. It will avoid concurrent update error due to mail.message. If transaction trouble happen we try 5 times to rewrite report :param imp_id: current importer id :param state: import state :param msg: report summary :returns: current importer id """ if _do_commit: db_name = cr.dbname local_cr = pooler.get_db(db_name).cursor() try: self.write(local_cr, uid, [imp_id], {'state': state, 'report': msg}, context=context) local_cr.commit() # We handle concurrent error troubles except psycopg2.OperationalError as pg_exc: _logger.error( "Can not write report. " "System will retry %s time(s)" % max_tries ) if (pg_exc.pg_code in orm.PG_CONCURRENCY_ERRORS_TO_RETRY and max_tries >= 0): local_cr.rollback() local_cr.close() remaining_try = max_tries - 1 self._write_report(cr, uid, imp_id, cr, _do_commit=_do_commit, max_tries=remaining_try, context=context) else: _logger.exception( 'Can not log report - Operational update error' ) raise except Exception: _logger.exception('Can not log report') local_cr.rollback() raise finally: if not local_cr.closed: local_cr.close() else: self.write(cr, uid, [imp_id], {'state': state, 'report': msg}, context=context) return imp_id
def exec_workflow(self, db, uid, obj, method, *args): cr = pooler.get_db(db).cursor() try: try: res = self.exec_workflow_cr(cr, uid, obj, method, *args) cr.commit() except Exception: cr.rollback() raise finally: cr.close() return res
def wrapper(self, cr, *args, **kwargs): new_cr = pooler.get_db(cr.dbname).cursor() try: response = func(self, new_cr, *args, **kwargs) except: new_cr.rollback() raise else: new_cr.commit() finally: new_cr.close() return response
def _upload_thr(self, cr, uid, ids, context=None): prod_db = self.pool.get('product.product') new_cr = pooler.get_db(cr.dbname).cursor() (uploader,) = self.browse(new_cr, uid, ids, context=context) param = {} param['no_of_processes'] = uploader.no_of_processes param['load_categories'] = uploader.load_categories param['load_properties'] = uploader.load_properties param['load_products'] = uploader.load_products prod_db.upload_thr(new_cr, uid, param, context=context) new_cr.close() return {}
def _send_notification(self, db_name, uid, collaborator_ids): db = pooler.get_db(db_name) cr = db.cursor() count = 0 for collaborator_id in collaborator_ids: count += 1 self.pool.get('kemas.collaborator').send_notification(cr, uid, collaborator_id, context={}) print """\n ------------------------------------------------------------------------------------------------------------------------- ***********************************************[%d] Join Notifications was sended**************************************** -------------------------------------------------------------------------------------------------------------------------\n""" % (count) cr.commit()
def _run_action_in_new_thread(self, dbname, uid, action, object_ids, context, logger, pid): try: db = pooler.get_db(dbname) except Exception: return cr = db.cursor() try: self._run_action_for_object_ids(cr, uid, action, object_ids, context) cr.commit() logger.time_info('[%s] Successful Action: %s - Objects: %s,%s' % (pid, action.name, action.model_id.model, object_ids)) except Exception, e: logger.exception('[%s] Action failed: %s - %s' % (pid, action.name, _get_exception_message(e)))