def get_messages_for_cleaning(user_email=None, process_id=None): clean_process = CleanUserProcess.get_by_id(process_id) imap = IMAPHelper() imap.login(email=user_email, password=clean_process.source_password) msg_ids = imap.list_messages(criteria=clean_process.search_criteria, only_with_attachments=True, not_migrated=True) imap.close() if len(msg_ids) > 0: if constants.USER_CONNECTION_LIMIT < len(msg_ids): n = constants.USER_CONNECTION_LIMIT else: n = len(msg_ids) counter.load_and_increment_counter( 'cleaning_%s_total_count' % user_email, delta=len(msg_ids), namespace=str(process_id)) # chunkify: due to the migration API 1QPS limit # should this optimization be used? # return [msg_ids[i::n] for i in xrange(n)] return [msg_ids] else: counter.load_and_increment_counter( 'cleaning_%s_total_count' % user_email, delta=0, namespace=str(process_id)) process = CleanUserProcess.get_by_id(process_id) process.status = constants.FINISHED process.put() return []
def get_messages(user_email=None, tag=None, process_id=None): imap = None msg_ids = [] try: imap = IMAPHelper() imap.oauth1_2lo_login(user_email=user_email) try: if tag: logging.info('Creating label [%s]', tag) imap.create_label(tag) msg_ids = imap.list_messages(only_from_trash=True) except Exception as e: logging.exception('Error creating label or retrieving messages for ' 'user [%s]', user_email) processed_user = ProcessedUser.get_by_id(email) if not processed_user: processed_user = ProcessedUser(id=user_email, ok_count=0, error_count=0, total_count=list(), error_description=list()) processed_user.error_description.append(e.message) processed_user.put() return [] except Exception as e: logging.exception('Authentication or connection problem for user ' '[%s]', user_email) processed_user = ProcessedUser.get_by_id(user_email) if not processed_user: processed_user = ProcessedUser(id=user_email, ok_count=0, error_count=0, total_count=list(), error_description=list()) processed_user.error_description.append(e.message) processed_user.put() return [] finally: if imap: imap.close() # Assuming IMAP connection was OK if len(msg_ids) > 0: counter.load_and_increment_counter('%s_total_count' % user_email, delta=len(msg_ids), namespace=str(process_id)) return chunkify(msg_ids, num_chunks=constants.USER_CONNECTION_LIMIT) else: counter.load_and_increment_counter('%s_total_count' % user_email, delta=0, namespace=str(process_id)) return []
def delayed_delete_message(msg_id=None, process_id=None, retries=0): process = CleanUserProcess.get_by_id(process_id) criteria = process.search_criteria msg_process = CleanMessageProcess.query(ndb.AND( CleanMessageProcess.msg_id == msg_id, CleanMessageProcess.clean_process_id == process_id) ).get() if msg_process.status != constants.MIGRATED: if retries < constants.MAX_RETRIES: deferred.defer(delayed_delete_message, msg_id=msg_id, process_id=process_id, retries=retries+1, _countdown=60*2**retries, _queue="elimination") else: logging.error("Couldn't delete msg %s for user %s" % (msg_id, process.source_email)) return imap = IMAPHelper() imap.login(process.source_email, process.source_password) imap.select() # Look for the migrated email, if it doesn't exist yet # retry later try: subject = imap.get_subject(msg_id=msg_id) except Exception as e: if retries < constants.MAX_RETRIES: deferred.defer(delayed_delete_message, msg_id=msg_id, process_id=process_id, retries=retries+1, _countdown=60*2**retries) else: logging.error("Couldn't delete msg %s for user %s, error %s" % (msg_id, process.source_email, e.message)) return messages = imap.list_messages(criteria="subject:(%s) label:Migrated-Migrados" % subject) if len(messages) < 1: if retries < constants.MAX_RETRIES: deferred.defer(delayed_delete_message, msg_id=msg_id, process_id=process_id, retries=retries+1, _countdown=60*2**retries, _queue="elimination") else: logging.error("Couldn't delete msg %s for user %s" % (msg_id, process.source_email)) return imap.delete_message(msg_id=msg_id, criteria=criteria) imap.close() msg_process.status = constants.FINISHED msg_process.put() all_done = True all_cleaning_messages = CleanMessageProcess.query( CleanMessageProcess.clean_process_id == process_id ).fetch() progress = 0 for message in all_cleaning_messages: if not message.status == constants.FINISHED: all_done = False else: progress += 1 if all_done: process.status = constants.FINISHED utc_now = datetime.datetime.utcnow() local_tz = pytz.timezone('America/Bogota') tz_offset = local_tz.utcoffset(utc_now) now = utc_now + tz_offset process.progress = progress process.latest_activity = "%s" % now process.put()