def get(self): try: name = "visitor" counter.load_and_increment_counter(name) except: logging.info(repr(error)) self.response.out.write('Visitor: ' + str(counter.load_and_get_count(name)))
def post(self): namespace = self.request.get('namespace') period_type = self.request.get('period_type') period_types = self.request.get('period_types').replace(" ", "") period = self.request.get('period') name = self.request.get('counter_name') delta = self.request.get('delta') type = self.request.get('type') if type == "Increment Counter": counter.load_and_increment_counter( name=name, period=period, period_types=period_types.split(","), namespace=namespace, delta=long(delta)) elif type == "Decrement Counter": counter.load_and_decrement_counter( name=name, period=period, period_types=period_types.split(","), namespace=namespace, delta=long(delta)) logging.info("Redirecting to: /livecount/counter_admin?namespace=" + namespace + "&period_type=" + period_type + "&period_types=" + period_types + "&period=" + period + "&counter_name=" + name + "&delta=" + delta) self.redirect("/livecount/counter_admin?namespace=" + namespace + "&period_type=" + period_type + "&period_types=" + period_types + "&period=" + period + "&counter_name=" + name + "&delta=" + delta)
def get_messages_for_cleaning(user_email=None, process_id=None): clean_process = CleanUserProcess.get_by_id(process_id) imap = IMAPHelper() imap.login(email=user_email, password=clean_process.source_password) msg_ids = imap.list_messages(criteria=clean_process.search_criteria, only_with_attachments=True, not_migrated=True) imap.close() if len(msg_ids) > 0: if constants.USER_CONNECTION_LIMIT < len(msg_ids): n = constants.USER_CONNECTION_LIMIT else: n = len(msg_ids) counter.load_and_increment_counter( 'cleaning_%s_total_count' % user_email, delta=len(msg_ids), namespace=str(process_id)) # chunkify: due to the migration API 1QPS limit # should this optimization be used? # return [msg_ids[i::n] for i in xrange(n)] return [msg_ids] else: counter.load_and_increment_counter( 'cleaning_%s_total_count' % user_email, delta=0, namespace=str(process_id)) process = CleanUserProcess.get_by_id(process_id) process.status = constants.FINISHED process.put() return []
def run(self, message_process_id=None, user_process_id=None, move_process_id=None): move_process = MoveProcess.get_by_id(move_process_id) user_process = MoveUserProcess.get_by_id(user_process_id) try: move_message(user_process=user_process, message_process_id=message_process_id, label=move_process.tag) except Exception as e: if self.current_attempt >= self.max_attempts: logging.exception( 'Failed definitely moving the message id [%s] for user [%s] messages', message_process_id, user_process.user_email) message_process = MoveMessageProcess.get_by_id( message_process_id) message_process.status = constants.FAILED message_process.error_description = e.message message_process.put() counter.load_and_increment_counter( '%s_%s_error_counter' % ( user_process.user_email, user_process.key.id())) else: logging.exception( 'Failed retrieving a messagee id [%s] for [%s], ' 'try again...', message_process_id, user_process.user_email) raise e
def get_messages(user_email=None, tag=None, process_id=None): imap = None msg_ids = [] try: imap = IMAPHelper() imap.oauth1_2lo_login(user_email=user_email) try: if tag: logging.info('Creating label [%s]', tag) imap.create_label(tag) msg_ids = imap.list_messages(only_from_trash=True) except Exception as e: logging.exception('Error creating label or retrieving messages for ' 'user [%s]', user_email) processed_user = ProcessedUser.get_by_id(email) if not processed_user: processed_user = ProcessedUser(id=user_email, ok_count=0, error_count=0, total_count=list(), error_description=list()) processed_user.error_description.append(e.message) processed_user.put() return [] except Exception as e: logging.exception('Authentication or connection problem for user ' '[%s]', user_email) processed_user = ProcessedUser.get_by_id(user_email) if not processed_user: processed_user = ProcessedUser(id=user_email, ok_count=0, error_count=0, total_count=list(), error_description=list()) processed_user.error_description.append(e.message) processed_user.put() return [] finally: if imap: imap.close() # Assuming IMAP connection was OK if len(msg_ids) > 0: counter.load_and_increment_counter('%s_total_count' % user_email, delta=len(msg_ids), namespace=str(process_id)) return chunkify(msg_ids, num_chunks=constants.USER_CONNECTION_LIMIT) else: counter.load_and_increment_counter('%s_total_count' % user_email, delta=0, namespace=str(process_id)) return []
def post(self): global counter_list counter_name = self.request.get('counter') namespace = self.request.get('namespace') delta = self.request.get('delta') type = self.request.get('type') # if counter_name not in counter_list: # counter_list.append(counter_name) # logging.info("counter_list: " + str(counter_list)) if type == "Increment Counter": counter.load_and_increment_counter(counter_name, long(delta), namespace=namespace) elif type == "Decrement Counter": counter.load_and_decrement_counter(counter_name, long(delta), namespace=namespace) self.redirect("/livecount/counter_admin?namespace=" + namespace + "&counter_name=" + counter_name + "&delta=" + delta)
def post(self): namespace = self.request.get('namespace') period_type = self.request.get('period_type') period_types = self.request.get('period_types').replace(" ", "") period = self.request.get('period') name = self.request.get('counter_name') delta = self.request.get('delta') type = self.request.get('type') if type == "Increment Counter": counter.load_and_increment_counter(name=name, period=period, period_types=period_types.split(","), namespace=namespace, delta=long(delta)) elif type == "Decrement Counter": counter.load_and_decrement_counter(name=name, period=period, period_types=period_types.split(","), namespace=namespace, delta=long(delta)) logging.info("Redirecting to: /livecount/counter_admin?namespace=" + namespace + "&period_type=" + period_type + "&period_types=" + period_types + "&period=" + period + "&counter_name=" + name + "&delta=" + delta) self.redirect("/livecount/counter_admin?namespace=" + namespace + "&period_type=" + period_type + "&period_types=" + period_types + "&period=" + period + "&counter_name=" + name + "&delta=" + delta)
def count_view(name, period=None, period_types=None, namespace='default', delta=1, batch_size=223): if period is None: period = datetime.now() if period_types is None: period_types = [ PeriodType.HOUR, PeriodType.DAY, PeriodType.WEEK, PeriodType.MONTH ] counter.load_and_increment_counter(name, period=period, period_types=period_types, namespace=namespace, delta=delta, batch_size=batch_size)
def advanced_event_counts(type, device, url, version): batch_size = None if type == 'impression': batch_size = 10 domain = extract_feed_domain(story_url); namespace = "website_" + version # logging.info("Bar " + type + " for device: " + device + ", domain: " + domain) counter.load_and_increment_counter(type, 1, namespace, batch_size) counter.load_and_increment_counter(type + "__" + device, 1, namespace, batch_size) counter.load_and_increment_counter(type + "__" + device + "__" + domain, 1, namespace, batch_size)
def advanced_count(name): counter.load_and_increment_counter(name, datetime.now(), period_types=[PeriodType.DAY, PeriodType.WEEK], namespace="tweet", delta=1)
def count(name): counter.load_and_increment_counter(name)
def count_view(name, period=None, period_types=None, namespace='default', delta=1, batch_size=223): if period is None: period = datetime.now() if period_types is None: period_types = [PeriodType.HOUR, PeriodType.DAY, PeriodType.WEEK, PeriodType.MONTH] counter.load_and_increment_counter(name, period=period, period_types=period_types, namespace=namespace, delta=delta, batch_size=batch_size)
def count_event(event): counter.load_and_increment_counter(event, 1)
def clean_messages(user_email=None, password=None, chunk_ids=list(), retry_count=0, process_id=None): cleaned_successfully = [] remaining = [] if len(chunk_ids) <= 0: process = CleanUserProcess.get_by_id(process_id) process.status = constants.FINISHED process.put() return True try: process = CleanUserProcess.get_by_id(process_id) imap = IMAPHelper() imap.login(email=user_email, password=process.source_password) imap.select() domain_name = user_email.split('@')[1] primary_domain = PrimaryDomain.get_or_create( domain_name) try: drive = DriveHelper(credentials_json=primary_domain.credentials, admin_email=primary_domain.admin_email, refresh_token=primary_domain.refresh_token) folder = drive.get_folder(constants.ATTACHMENT_FOLDER) if not folder: folder = drive.create_folder(constants.ATTACHMENT_FOLDER) sub_folder = drive.get_folder(user_email) if not sub_folder: sub_folder = drive.create_folder(user_email, [{'id': folder['id']}]) except Exception as e: logging.error( "Couldn't authenticate drive for user %s" % user_email) raise e try: migration = MigrationHelper( credentials_json=primary_domain.credentials, refresh_token=primary_domain.refresh_token) except Exception as e: logging.error( "Couldn't authenticate migration api for user %s" % user_email) raise e for message_id in chunk_ids: try: result = clean_message(msg_id=message_id, imap=imap, drive=drive, migration=migration, folder_id=sub_folder['id'], user_email=user_email, process_id=process_id) if result: counter.load_and_increment_counter( 'cleaning_%s_ok_count' % (user_email), namespace=str(process_id)) cleaned_successfully.append(message_id) else: counter.load_and_increment_counter( 'cleaning_%s_error_count' % user_email, namespace=str(process_id)) logging.error( 'Error cleaning message ID [%s] for user [%s]: [%s] ', message_id, user_email, result) except Exception as e: logging.exception( 'Failed cleaning individual message ID [%s] for user [%s]', message_id, user_email) remaining = [] if retry_count < constants.MAX_CLEAN_RETRIES: for chunk_msg in chunk_ids: if chunk_msg not in cleaned_successfully: remaining.append(chunk_msg) logging.info( 'Scheduling [%s] remaining cleaning messages for user [%s]', len(remaining), user_email) deferred.defer(clean_messages, user_email=user_email, chunk_ids=remaining, process_id=process_id, retry_count=retry_count + 1) else: for chunk_msg in chunk_ids: if message_id == chunk_msg: continue if chunk_msg not in cleaned_successfully: remaining.append(chunk_msg) logging.info( 'Giving up cleaning message [%s] for ' 'user [%s]', message_id, user_email) counter.load_and_increment_counter( 'cleaning_%s_error_count' % user_email, delta=1, namespace=str(process_id)) deferred.defer(clean_messages, user_email=user_email, chunk_ids=remaining, process_id=process_id) break except Exception as e: logging.exception('Failed cleaning messages chunk') raise e finally: if imap: imap.close() if len(chunk_ids) < 10 or (len(cleaned_successfully) + 10 > len(chunk_ids)): process.status = constants.FINISHED process.put()
def move_messages(user_email=None, tag=None, chunk_ids=list(), process_id=None, retry_count=0, chunk_sizes=None): moved_successfully = [] imap = None number_moved_successfully = 0 number_moved_unsuccessfully = 0 if len(chunk_ids) <= 0: return True try: imap = IMAPHelper() imap.oauth1_2lo_login(user_email=user_email) imap.select(only_from_trash=True) for i, chunk in enumerate(chunk_ids): chunk_size = chunk_sizes[i] try: result, data = imap.copy_message( msg_id="%s:%s" % (chunk[0], chunk[-1]), destination_label=tag ) if result == 'OK': counter.load_and_increment_counter( '%s_ok_count' % (user_email), namespace=str(process_id), delta=chunk_size) moved_successfully.extend(chunk) number_moved_successfully += chunk_size else: counter.load_and_increment_counter( '%s_error_count' % user_email, namespace=str(process_id), delta=chunk_size) number_moved_unsuccessfully += chunk_size logging.error( 'Error moving message IDs [%s-%s] for user [%s]: ' 'Result [%s] data [%s]', chunk[0], chunk[-1], user_email, result, data) except Exception as e: logging.exception( 'Failed moving message range IDs [%s-%s] for user [%s]', chunk[0], chunk[-1], user_email) remaining = [] remaining_chunk_sizes = [] number_moved_unsuccessfully += chunk_size for j, original_chunk in enumerate(chunk_ids): if original_chunk not in moved_successfully: remaining.append(original_chunk) remaining_chunk_sizes.append(chunk_sizes[j]) # Keep retrying if messages are being moved if retry_count >= 3 and len(moved_successfully) == 0: logging.error('Giving up with remaining [%s] messages for ' 'user [%s]', number_moved_unsuccessfully, user_email) counter.load_and_increment_counter( '%s_error_count' % user_email, delta=number_moved_unsuccessfully, namespace=str(process_id)) else: logging.info( 'Scheduling [%s] remaining messages for user [%s]', number_moved_unsuccessfully, user_email) deferred.defer(move_messages, user_email=user_email, tag=tag, chunk_ids=remaining, process_id=process_id, retry_count=retry_count + 1, chunk_sizes=remaining_chunk_sizes) break except Exception as e: logging.exception('Authentication, connection or select problem for ' 'user [%s]', user_email) counter.load_and_increment_counter( '%s_error_count' % user_email, delta=len(chunk_ids), namespace=str(process_id)) finally: logging.info( 'Succesfully moved [%s] messages for user [%s] in this task', number_moved_successfully, user_email) if imap: imap.close()
def get(self): try: name = "visitor" counter.load_and_increment_counter(name) except Exception, e: logging.info(repr(e))