def process_move_student_requests(): global fnr2move_student, autostud br = BofhdRequests(db, const) rows = br.get_requests(operation=const.bofh_move_student) if not rows: return logger.debug("Preparing autostud framework") autostud = AutoStud.AutoStud(db, logger, debug=False, cfg_file=studconfig_file, studieprogs_file=studieprogs_file, emne_info_file=emne_info_file, ou_perspective=ou_perspective) # Hent ut personens fødselsnummer + account_id fnr2move_student = {} account = Utils.Factory.get('Account')(db) person = Utils.Factory.get('Person')(db) for r in rows: if not is_valid_request(r['request_id']): continue account.clear() account.find(r['entity_id']) person.clear() person.find(account.owner_id) fnr = person.get_external_id(id_type=const.externalid_fodselsnr, source_system=const.system_fs) if not fnr: logger.warn("Not student fnr for: %i" % account.entity_id) br.delete_request(request_id=r['request_id']) db.commit() continue fnr = fnr[0]['external_id'] fnr2move_student.setdefault(fnr, []).append( (int(account.entity_id), int(r['request_id']), int(r['requestee_id']))) logger.debug("Starting callbacks to find: %s" % fnr2move_student) autostud.start_student_callbacks(student_info_file, move_student_callback) # Move remaining users to pending disk disk = Utils.Factory.get('Disk')(db) disk.find_by_path(cereconf.AUTOSTUD_PENDING_DISK) logger.debug(str(fnr2move_student.values())) for tmp_stud in fnr2move_student.values(): for account_id, request_id, requestee_id in tmp_stud: logger.debug("Sending %s to pending disk" % repr(account_id)) br.delete_request(request_id=request_id) br.add_request(requestee_id, br.batch_time, const.bofh_move_user, account_id, disk.entity_id, state_data=int(default_spread)) db.commit()
def release_guest(self, guest, operator_id): """Release a guest account from temporary owner. Make sure that the guest account specified actually exists and release it from owner. The guest account is now in release_quarantine and will be available for new allocations when the quarantine period is due. @param guest: uname of guest account @type guest: str @param operator_id: entity id of operator @type operator_id: int """ ac = Factory.get('Account')(self.db) ac.find_by_name(guest) trait = ac.get_trait(self.co.trait_uio_guest_owner) if trait is None: raise GuestAccountException("%s is not a guest" % guest) elif trait['target_id'] is None: raise GuestAccountException("%s is already available" % guest) # Remove owner, i.e set owner_trait to None ac.populate_trait(self.co.trait_uio_guest_owner, target_id=None) self.logger.debug("Removed owner_id in owner_trait for %s" % guest) # Remove quarantine set by _alloc_guest and set a new # quarantine that kicks in now. if ac.get_entity_quarantine(self.co.quarantine_guest_release): ac.delete_entity_quarantine(self.co.quarantine_guest_release) ac.add_entity_quarantine(self.co.quarantine_guest_release, operator_id, "Guest user released", start=DateTime.today()) self.logger.debug("%s is now in release_quarantine" % guest) ac.set_password(ac.make_passwd(guest)) ac.write_db() self.update_group_memberships(ac.entity_id) self.logger.debug("Updating group memberships for %s" % guest) # Finally, register a request to archive the home directory. # A new directory will be created when archival has been done. br = BofhdRequests(self.db, self.co) br.add_request(operator_id, br.now, self.co.bofh_archive_user, ac.entity_id, None, state_data=int(self.co.spread_uio_nis_user)) self.logger.debug("Added archive_user request for %s" % guest)
def _UiO_order_cyrus_action(self, action, destination, state_data=None): br = BofhdRequests(self._db, self.const) # If there are any registered BofhdRequests for this account # that would conflict with 'action', remove them. for anti_action in br.get_conflicts(action): for r in br.get_requests(entity_id=self.entity_id, operation=anti_action): self.logger.info("Removing BofhdRequest #%d: %r", r['request_id'], r) br.delete_request(request_id=r['request_id']) # If the ChangeLog module knows who the user requesting this # change is, use that knowledge. Otherwise, set requestor to # None; it's the best we can do. requestor = getattr(self._db, 'change_by', None) # Register a BofhdRequest to create the mailbox. br.add_request(requestor, br.now, action, self.entity_id, destination, state_data=state_data)
def process_account(account, delete=False, bofhdreq=False): """Deactivate the given account. :param Cerebrum.Account: The account that should get deactivated. :param bool delete: If True, the account will be totally deleted instead of just deactivated. :param bool bofhdreq: If True, the account will be given to BofhdRequest for further processing. It will then not be deactivated by this script. :rtype: bool :returns: If the account really got deactivated/deleted. """ if account.is_deleted(): logger.debug2("Account %s already deleted", account.account_name) return False logger.info('Deactivating account: %s (%s)', account.account_name, account.entity_id) if delete: logger.info("Terminating account: %s", account.account_name) account.terminate() elif bofhdreq: logger.debug("Send to BofhdRequest: %s", account.account_name) br = BofhdRequests(database, constants) try: reqid = br.add_request(operator_id, when=br.now, op_code=constants.bofh_delete_user, entity_id=account.entity_id, destination_id=None) logger.debug("BofhdRequest-Id: %s", reqid) except Errors.CerebrumError as e: # A CerebrumError is thrown if there exists some move_user for the # same user... logger.warn("Couldn't delete %s: %s", account.account_name, e) return False else: account.deactivate() return True
def _UiO_order_cyrus_action(self, action, destination, state_data=None): br = BofhdRequests(self._db, self.const) # If there are any registered BofhdRequests for this account # that would conflict with 'action', remove them. for anti_action in br.get_conflicts(action): for r in br.get_requests(entity_id=self.entity_id, operation=anti_action): self.logger.info("Removing BofhdRequest #%d: %r", r['request_id'], r) br.delete_request(request_id=r['request_id']) # If the ChangeLog module knows who the user requesting this # change is, use that knowledge. Otherwise, set requestor to # None; it's the best we can do. requestor = getattr(self._db, 'change_by', None) # Register a BofhdRequest to create the mailbox. reqid = br.add_request(requestor, br.now, action, self.entity_id, destination, state_data=state_data)
def process_account(account, delete=False, bofhdreq=False): """Deactivate the given account. :param Cerebrum.Account: The account that should get deactivated. :param bool delete: If True, the account will be totally deleted instead of just deactivated. :param bool bofhdreq: If True, the account will be given to BofhdRequest for further processing. It will then not be deactivated by this script. :rtype: bool :returns: If the account really got deactivated/deleted. """ if account.is_deleted(): logger.debug2("Account %s already deleted", account.account_name) return False logger.info('Deactivating account: %s (%s)', account.account_name, account.entity_id) if delete: logger.info("Terminating account: %s", account.account_name) account.terminate() elif bofhdreq: logger.debug("Send to BofhdRequest: %s", account.account_name) br = BofhdRequests(database, constants) try: reqid = br.add_request(operator_id, when=br.now, op_code=constants.bofh_delete_user, entity_id=account.entity_id, destination_id=None) logger.debug("BofhdRequest-Id: %s", reqid) except Errors.CerebrumError, e: # A CerebrumError is thrown if there exists some move_user for the # same user... logger.warn("Couldn't delete %s: %s", account.account_name, e) return False
class Quarantine2Request(EvtHandler): """When a quarantine has been added/updated/deleted, we register a bofh_quarantine_refresh bofhd_request on the apropriate start_date, end_date and disable_until dates. """ def __init__(self): self.br = BofhdRequests(db, const) self.eq = EntityQuarantine(db) def get_triggers(self): return ("quarantine_add", "quarantine_mod", "quarantine_del") def _get_quarantine(self, entity_id, q_type): self.eq.clear() try: self.eq.find(entity_id) except Errors.NotFoundError: return None qdata = self.eq.get_entity_quarantine(q_type) if not qdata: return None return qdata[0] def notify_quarantine_add(self, evt, params): # Register a bofh_quarantine_refresh on start, end and # disable_date qdata = self._get_quarantine(evt['subject_entity'], params['q_type']) if not qdata: return True for when in ('start_date', 'end_date', 'disable_until'): if qdata[when] is not None: self.br.add_request(None, qdata[when], const.bofh_quarantine_refresh, evt['subject_entity'], None, state_data=int(params['q_type'])) db.commit() return True def notify_quarantine_mod(self, evt, params): # Currently only disable_until is affected by quarantine_mod. qdata = self._get_quarantine(evt['subject_entity'], params['q_type']) if not qdata: return True if qdata['disable_until']: self.br.add_request(None, qdata['disable_until'], const.bofh_quarantine_refresh, evt['subject_entity'], None, state_data=int(params['q_type'])) self.br.add_request(None, self.br.now, const.bofh_quarantine_refresh, evt['subject_entity'], None, state_data=int(params['q_type'])) db.commit() return True def notify_quarantine_del(self, evt, params): # Remove existing requests for this entity_id/quarantine_type # combination as they are no longer needed for row in self.br.get_requests( entity_id=evt['subject_entity'], operation=int(const.bofh_quarantine_refresh)): if int(row['state_data']) == int(params['q_type']): self.br.delete_request(request_id=row['request_id']) self.br.add_request(None, self.br.now, const.bofh_quarantine_refresh, evt['subject_entity'], None, state_data=int(params['q_type'])) db.commit() return True
def sympa_remove_list(self, operator, run_host, listname, force_yes_no): """ Remove a sympa list from cerebrum. @type force_request: bool @param force_request: Controls whether a bofhd request should be issued. This may come in handy, if we want to delete a sympa list from Cerebrum only and not issue any requests. misc cancel_request would have worked too, but it's better to merge this into one command. """ force_request = self._is_yes(force_yes_no) # Check that the command exec host is sane if run_host not in cereconf.SYMPA_RUN_HOSTS: raise CerebrumError("run-host '%s' for list '%s' is not valid" % (run_host, listname)) et, ea = self._get_email_target_and_address(listname) self.ba.can_email_list_delete(operator.get_entity_id(), ea) if et.email_target_type != self.const.email_target_Sympa: raise CerebrumError( "'%s' is not a sympa list (type: %s)" % (listname, self.const.EmailTarget(et.email_target_type))) epat = Email.EmailPrimaryAddressTarget(self.db) list_id = ea.entity_id # Now, there are *many* ETs/EAs associated with one sympa list. We # have to wipe them all out. if not self._validate_sympa_list(listname): raise CerebrumError("Illegal sympa list name: '%s'", listname) deleted_EA = self.email_info(operator, listname) # needed for pattern interpolation below (these are actually used) local_part, domain = self._split_email_address(listname) for pattern, pipe_destination in self._sympa_addr2alias: address = pattern % locals() # For each address, find the target, and remove all email # addresses for that target (there may be many addresses for the # same target). try: ea.clear() ea.find_by_address(address) et.clear() et.find(ea.get_target_id()) epat.clear() try: epat.find(et.entity_id) except Errors.NotFoundError: pass else: epat.delete() # Wipe all addresses... for row in et.get_addresses(): addr = '%(local_part)s@%(domain)s' % row ea.clear() ea.find_by_address(addr) ea.delete() et.delete() except Errors.NotFoundError: pass if cereconf.INSTITUTION_DOMAIN_NAME == 'uio.no': self._report_deleted_EA(deleted_EA) if not force_request: return {'listname': listname, 'request': False} br = BofhdRequests(self.db, self.const) state = {'run_host': run_host, 'listname': listname} br.add_request( operator.get_entity_id(), # IVR 2008-08-04 +1 hour to allow changes to spread to # LDAP. This way we'll have a nice SMTP-error, rather # than a confusing error burp from sympa. DateTime.now() + DateTime.DateTimeDelta(0, 1), self.const.bofh_sympa_remove, list_id, None, state_data=pickle.dumps(state)) return {'listname': listname, 'request': True}
def sympa_create_list(self, operator, run_host, delivery_host, listname, admins, list_profile, list_description, yes_no_force="No"): """ Create a sympa list in Cerebrum and on the sympa server(s). Registers all the necessary cerebrum information and make a bofhd request for the actual list creation. """ # Check that the profile is legal if list_profile not in cereconf.SYMPA_PROFILES: raise CerebrumError("Profile %s for sympa list %s is not valid" % (list_profile, listname)) # Check that the command exec host is sane if run_host not in cereconf.SYMPA_RUN_HOSTS: raise CerebrumError("run-host '%s' for list '%s' is not valid" % (run_host, listname)) metachars = "'\"$&()*;<>?[\\]`{|}~\n" def has_meta(s1, s2=metachars): """Check if any char of s1 is in s2""" for c in s1: if c in s2: return True return False # Sympa list creation command will be passed through multiple # exec/shells. Better be restrictive. if True in [ has_meta(x) for x in (run_host, delivery_host, listname, admins, list_profile, list_description) ]: raise CerebrumError( "Illegal metacharacter in list parameter. Allowed: '%s'" % metachars) delivery_host = self._get_email_server(delivery_host) force = self._is_yes(yes_no_force) self._create_sympa_list(operator, listname, delivery_host, force=force) # Now make a bofhd request to create the list itself admin_list = list() for item in admins.split(","): # it's a user name. That username must exist in Cerebrum if "@" not in item: self._get_account(item) # TODO: Not good, this is in use by UIA item = item + "@ulrik.uio.no" admin_list.append(item) # Make the request. lp, dom = self._split_email_address(listname) ed = self._get_email_domain_from_str(dom) ea = Email.EmailAddress(self.db) ea.clear() ea.find_by_local_part_and_domain(lp, ed.entity_id) list_id = ea.entity_id # IVR 2008-08-01 TBD: this is a big ugly. We need to pass several # arguments to p_b_r, but we cannot really store them anywhere :( The # idea is then to take a small dict, pickle it, shove into state_data, # unpickle in p_b_r and be on our merry way. It is at the very best # suboptimal. state = { "runhost": run_host, # IVR 2008-08-01 FIXME: non-fqdn? force? # check? "admins": admin_list, "profile": list_profile, "description": list_description, } br = BofhdRequests(self.db, self.const) # IVR 2009-04-17 +30 minute delay to allow changes to spread to # LDAP. The postmasters are nagging for that delay. All questions # should be directed to them (this is similar to delaying a delete # request). br.add_request(operator.get_entity_id(), DateTime.now() + DateTime.DateTimeDelta(0, 0, 30), self.const.bofh_sympa_create, list_id, ea.entity_id, state_data=pickle.dumps(state)) return {'listname': listname}
class MoveStudentProcessor(object): def __init__(self, db, co, ou_perspective, emne_info_file, studconfig_file, studieprogs_file, default_spread=None): self.db = db self.co = co self.br = BofhdRequests(self.db, self.co) self.default_spread = default_spread logger.debug("Preparing autostud framework") self.autostud = AutoStud.AutoStud(self.db, logger.getChild('autostud'), debug=False, cfg_file=studconfig_file, studieprogs_file=studieprogs_file, emne_info_file=emne_info_file, ou_perspective=ou_perspective) def process_requests(self, student_info_file): rows = self.br.get_requests(operation=self.co.bofh_move_student) if not rows: return # Set self.fnr2move_student self.set_fnr2move_student(rows) logger.debug("Starting callbacks to find: %s" % self.fnr2move_student) self.autostud.start_student_callbacks(student_info_file, self.move_student_callback) self.move_remaining_users() def move_remaining_users(self): # Move remaining users to pending disk disk = Factory.get('Disk')(self.db) disk.find_by_path(cereconf.AUTOSTUD_PENDING_DISK) logger.debug(str(self.fnr2move_student.values())) for tmp_stud in self.fnr2move_student.values(): for account_id, request_id, requestee_id in tmp_stud: logger.debug("Sending %s to pending disk" % repr(account_id)) self.br.delete_request(request_id=request_id) self.br.add_request(requestee_id, self.br.batch_time, self.co.bofh_move_user, account_id, disk.entity_id, state_data=int(self.default_spread)) self.db.commit() def set_fnr2move_student(self, rows): # Hent ut personens fodselsnummer + account_id self.fnr2move_student = {} account = Factory.get('Account')(self.db) person = Factory.get('Person')(self.db) for r in rows: if not is_valid_request(self.br, r['request_id']): continue account.clear() account.find(r['entity_id']) person.clear() person.find(account.owner_id) fnr = person.get_external_id( id_type=self.co.externalid_fodselsnr, source_system=self.co.system_fs ) if not fnr: logger.warn("Not student fnr for: %i" % account.entity_id) self.br.delete_request(request_id=r['request_id']) self.db.commit() continue fnr = fnr[0]['external_id'] self.fnr2move_student.setdefault(fnr, []).append( (int(account.entity_id), int(r['request_id']), int(r['requestee_id']))) def move_student_callback(self, person_info): """We will only move the student if it has a valid fnr from FS, and it is not currently on a student disk. If the new homedir cannot be determined, user will be moved to a pending disk. process_students moves users from this disk as soon as a proper disk can be determined. Currently we only operate on the disk whose spread is default_spread""" fnr = "%06d%05d" % (int(person_info['fodselsdato']), int(person_info['personnr'])) logger.debug("Callback for %s" % fnr) try: fodselsnr.personnr_ok(fnr) except Exception, e: logger.exception(e) return if fnr not in self.fnr2move_student: return account = Factory.get('Account')(self.db) group = Factory.get('Group')(self.db) for account_id, request_id, requestee_id in \ self.fnr2move_student.get(fnr, []): account.clear() account.find(account_id) groups = list(int(x["group_id"]) for x in group.search(member_id=account_id, indirect_members=False)) try: profile = self.autostud.get_profile(person_info, member_groups=groups) logger.debug(profile.matcher.debug_dump()) except AutostudError, msg: logger.debug("Error getting profile, using pending: %s" % msg) continue disks = self.determine_disks(account, request_id, profile, fnr) logger.debug(str((fnr, account_id, disks))) if disks: logger.debug("Destination %s" % repr(disks)) del (self.fnr2move_student[fnr]) for disk, spread in disks: self.br.delete_request(request_id=request_id) self.br.add_request(requestee_id, self.br.batch_time, self.co.bofh_move_user, account_id, disk, state_data=spread) self.db.commit()
def move_student_callback(person_info): """We will only move the student if it has a valid fnr from FS, and it is not currently on a student disk. If the new homedir cannot be determined, user will be moved to a pending disk. process_students moves users from this disk as soon as a proper disk can be determined. Currently we only operate on the disk whose spread is default_spread""" fnr = fodselsnr.personnr_ok("%06d%05d" % (int(person_info['fodselsdato']), int(person_info['personnr']))) if fnr not in fnr2move_student: return logger.debug("Callback for %s" % fnr) account = Utils.Factory.get('Account')(db) group = Utils.Factory.get('Group')(db) br = BofhdRequests(db, const) for account_id, request_id, requestee_id in fnr2move_student.get(fnr, []): account.clear() account.find(account_id) groups = list(int(x["group_id"]) for x in group.search(member_id=account_id, indirect_members=False)) try: profile = autostud.get_profile(person_info, member_groups=groups) logger.debug(profile.matcher.debug_dump()) except AutostudError, msg: logger.debug("Error getting profile, using pending: %s" % msg) continue # Determine disk disks = [] spreads = [int(s) for s in profile.get_spreads()] try: for d_spread in profile.get_disk_spreads(): if d_spread != default_spread: # TBD: How can all spreads be taken into account? continue if d_spread in spreads: try: ah = account.get_home(d_spread) homedir_id = ah['homedir_id'] current_disk_id = ah['disk_id'] except Errors.NotFoundError: homedir_id, current_disk_id = None, None if autostud.disk_tool.get_diskdef_by_diskid( int(current_disk_id)): logger.debug("Already on a student disk") br.delete_request(request_id=request_id) db.commit() # actually, we remove a bit too much data from # the below dict, but remaining data will be # rebuilt on next run. del(fnr2move_student[fnr]) raise NextAccount try: new_disk = profile.get_disk(d_spread, current_disk_id, do_check_move_ok=False) if new_disk == current_disk_id: continue disks.append((new_disk, d_spread)) if (autostud.disk_tool.using_disk_kvote and homedir_id is not None): from Cerebrum.modules.no.uio import DiskQuota disk_quota_obj = DiskQuota.DiskQuota(db) try: cur_quota = disk_quota_obj.get_quota( homedir_id) except Errors.NotFoundError: cur_quota = None quota = profile.get_disk_kvote(new_disk) if (cur_quota is None or cur_quota['quota'] != int(quota)): disk_quota_obj.set_quota(homedir_id, quota=int(quota)) except AutostudError, msg: # Will end up on pending (since we only use one spread) logger.debug("Error getting disk: %s" % msg) break except NextAccount: pass # Stupid python don't have labeled breaks logger.debug(str((fnr, account_id, disks))) if disks: logger.debug("Destination %s" % repr(disks)) del(fnr2move_student[fnr]) for disk, spread in disks: br.delete_request(request_id=request_id) br.add_request(requestee_id, br.batch_time, const.bofh_move_user, account_id, disk, state_data=spread) db.commit()
def email_move_child(host, r): local_db = Utils.Factory.get('Database')() local_co = Utils.Factory.get('Constants')(local_db) r_id = r['request_id'] if not is_valid_request(r_id, local_db=local_db, local_co=local_co): return if dependency_pending(r['state_data'], local_db=local_db, local_co=local_co): logger.debug("Request '%d' still has deps: '%s'.", r_id, r['state_data']) return try: acc = get_account(r['entity_id'], local_db=local_db) except Errors.NotFoundError: logger.error("email_move: user %d not found", r['entity_id']) return old_server = get_email_server(r['entity_id'], local_db=local_db) new_server = Email.EmailServer(local_db) new_server.find(r['destination_id']) if old_server.entity_id == new_server.entity_id: logger.error("Trying to move %s from " % acc.account_name + "and to the same server! Deleting request") br = BofhdRequests(local_db, local_co) br.delete_request(request_id=r_id) local_db.commit() return if not email_delivery_stopped(acc.account_name): logger.debug("E-mail delivery not stopped for %s", acc.account_name) return logger.debug("User being moved: '%s'.", acc.account_name) reqlock = RequestLockHandler() if not reqlock.grab(r_id): return # Disable quota while copying so the move doesn't fail cyrus_set_quota(acc.entity_id, 0, host=new_server, local_db=local_db) # Call the script cmd = [SSH_CMD, "cerebrum@%s" % host, cereconf.IMAPSYNC_SCRIPT, '--user1', acc.account_name, '--host1', old_server.name, '--user2', acc.account_name, '--host2', new_server.name, '--authusing', cereconf.CYRUS_ADMIN, '--passfile1', '/etc/cyrus.pw', '--useheader', 'Message-ID', '--regexmess', 's/\\0/ /g', '--ssl', '--subscribe', '--nofoldersizes'] proc = subprocess.Popen(cmd, capturestderr=True, bufsize=10240, close_fds=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) pid = proc.pid logger.debug("Called cmd(%d): '%s'", pid, cmd) proc.stdin.close() # Stolen from Utils.py:spawn_and_log_output() descriptor = {proc.stdout: logger.debug, proc.stderr: logger.info} while descriptor: # select() is called for _every_ line, since we can't inspect # the buffering in Python's file object. This works OK since # select() will return "readable" for an unread EOF, and # Python won't read the EOF until the buffers are exhausted. ready, x, x = select(descriptor.keys(), [], []) for fd in ready: line = fd.readline() if line == '': fd.close() del descriptor[fd] else: descriptor[fd]("[%d] %s" % (pid, line.rstrip())) status = proc.wait() if status == EXIT_SUCCESS: logger.debug("[%d] Completed successfully", pid) elif os.WIFSIGNALED(status): # The process was killed by a signal. sig = os.WTERMSIG(status) logger.warning('[%d] Command "%r" was killed by signal %d', pid, cmd, sig) return else: # The process exited with an exit status sig = os.WSTOPSIG(status) logger.warning("[%d] Return value was %d from command %r", pid, sig, cmd) return # Need move SIEVE filters as well cmd = [cereconf.MANAGESIEVE_SCRIPT, '-v', '-a', cereconf.CYRUS_ADMIN, '-p', pwfile, acc.account_name, old_server.name, new_server.name] if Utils.spawn_and_log_output( cmd, connect_to=[old_server.name, new_server.name]) != 0: logger.warning('%s: managesieve_sync failed!', acc.account_name) return logger.info('%s: managesieve_sync completed successfully', acc.account_name) # The move was successful, update the user's server # Now set the correct quota. hq = get_email_hardquota(acc.entity_id, local_db=local_db) cyrus_set_quota(acc.entity_id, hq, host=new_server, local_db=local_db) et = Email.EmailTarget(local_db) et.find_by_target_entity(acc.entity_id) et.email_server_id = new_server.entity_id et.write_db() # We need to delete this request before adding the # delete to avoid triggering the conflicting request # test. br = BofhdRequests(local_db, local_co) br.delete_request(request_id=r_id) local_db.commit() br.add_request(r['requestee_id'], r['run_at'], local_co.bofh_email_delete, r['entity_id'], old_server.entity_id) local_db.commit() logger.info("%s: move_email success.", acc.account_name) reqlock.release()