def get_timeout_dict(self): if REPORT_CPU_TIME: cpu_start = quota.get_request_cpu_usage() timeout_dict_str = memcache.get(key=KEY_TIMEOUT_DICT,namespace=self.namespace) if isinstance(timeout_dict_str,dict): timeout_dict=timeout_dict_str elif not timeout_dict_str: timeout_dict={} else: """ #timeout_dict={} #for _str in timeout_dict_str.split(u'\uffff'): # (timerid,db_timer_str) = _str.split(u'\ufffe') # timeout_dict[str(timerid)]=unpack_db_timer(db_timer_str) """ def list_to_dict(src_list): def pairwise(iterable): itnext = iter(iterable).next while True: yield itnext(),unpack_db_timer(itnext()) return dict(pairwise(src_list)) _list=re.split(u'\uffff|\ufffe',timeout_dict_str) log(u'GAE_Timer().get_timeout_dict(): total number of timers=%d' % (len(_list)/2)) timeout_dict=list_to_dict(_list) if not timeout_dict: timeout_dict={} if REPORT_CPU_TIME: cpu_end = quota.get_request_cpu_usage() if REPORT_CPU_TIME: log(u'get_timeout_dict(): %d megacycles' % (cpu_end-cpu_start)) return timeout_dict
def get(self): start = quota.get_request_cpu_usage() # entry_id = int(self.request.get('entry_id')) entries = Blognone_News().all() entries.filter('entryID == ', entry_id) # get comment #all_comment = '<table border="1"><tr><td> ' + 'http://www.blognone.com/news/' + str(entry_id) + '</td></tr>\n' CommentSpider = Comment_Spider('http://www.blognone.com/news/' + str(entry_id)) #for comment in CommentSpider.Comments: # all_comment = all_comment + '<tr><td>' + comment + '</td></tr>\n' #all_comment = all_comment + '</table>' entry = entries[0] entry_json = { 'id': entry.key().id(), 'title': entry.title, 'description': entry.description, 'comment': CommentSpider.Comments, 'url': entry.url, 'author': entry.author } self.response.out.write(json.dumps(entry_json)) end = quota.get_request_cpu_usage() logging.info("Get_Entry() cost %d megacycles." % (start - end))
def get(self): start = quota.get_request_cpu_usage() # Getting since last fetched and converting to json from blognone news from datastore last_fetched_id = int(self.request.get('last_fetched_id')) if last_fetched_id == -1: entries = Blognone_News.all() else: #last_entry = Blognone_News.get_by_id(last_fetched_id) entries = Blognone_News().all() entries.filter('entryID > ', last_fetched_id) all_news = [] for entry in entries: temp = { 'id': entry.key().id(), 'entry_id': entry.entryID, 'title': entry.title, 'short_description': entry.description[0:34], #'description': entry.description, 'url': entry.url, 'thumbnailURL': entry.thumbnailURL, 'author': entry.author } all_news.append(temp) self.response.out.write(json.dumps(all_news)) end = quota.get_request_cpu_usage() logging.info("Browse_Json() cost %d megacycles." % (start - end))
def repl_func(*args, **kwargs): start = quota.get_request_cpu_usage() ret = func(*args, **kwargs) end = quota.get_request_cpu_usage() name = func.__name__ if len(args) > 0 and hasattr(args[0], '__class__'): name = "%s.%s" % (args[0].__class__.__name__, name) logging.debug("%s method cost %d megacycles." % (name, end - start)) return ret
def set_timeout_dict(self,timeout_dict): if REPORT_CPU_TIME: cpu_start = quota.get_request_cpu_usage() #memcache.set(key=KEY_TIMEOUT_DICT,value=timeout_dict,time=0,namespace=self.namespace) _list=timeout_dict.items() timeout_dict_str=u'\uffff'.join([u'%s\ufffe%s' % (timerid,pack_db_timer(db_timer)) for (timerid,db_timer) in _list]) memcache.set(key=KEY_TIMEOUT_DICT,value=timeout_dict_str,time=0,namespace=self.namespace) log(u'GAE_Timer().set_timeout_dict(): total number of timers=%d' % (len(_list))) if REPORT_CPU_TIME: cpu_end = quota.get_request_cpu_usage() if REPORT_CPU_TIME: log(u'set_timeout_dict()_string: %d megacycles' % (cpu_end-cpu_start))
def get(self): start = quota.get_request_cpu_usage() # Deleting all of blognone news entries = db.GqlQuery("SELECT * FROM Blognone_News ORDER BY date DESC") for entry in entries: self.response.out.write('Deleting ' + entry.title + '<br />') entry.delete() end = quota.get_request_cpu_usage() logging.info("Delete() cost %d megacycles." % (start - end))
def get_last_status(self,timerid): if REPORT_CPU_TIME: cpu_start = quota.get_request_cpu_usage() last_status_str=memcache.get(key=KEY_STATUS_BASE+str(timerid),namespace=self.namespace) if isinstance(last_status_str,basestring): (last_timeout,last_result)=last_status_str.split(u'\u0000') last_status = dict(last_timeout=last_timeout,last_result=last_result) else: last_status = last_status_str if not last_status: last_status=dict(last_timeout=u'',last_result=u'') if REPORT_CPU_TIME: cpu_end = quota.get_request_cpu_usage() if REPORT_CPU_TIME: log(u'get_last_status(): %d megacycles' % (cpu_end-cpu_start)) return last_status
def get(self): start = quota.get_request_cpu_usage() # Getting blognone news from datastore entries = db.GqlQuery("SELECT * FROM Blognone_News ORDER BY date DESC") template_values = { 'entries': entries } path = os.path.join(os.path.dirname(__file__), 'browse.html') self.response.out.write(template.render(path, template_values)) end = quota.get_request_cpu_usage() logging.info("Browse() cost %d megacycles." % (start - end))
def kapowAPILiveRPC_v3(destination, price, startDate, endDate, info_type, response): startkapowAPILiveRPC_v3 = quota.get_request_cpu_usage() url = None if info_type == "flights": url = get_flights(destination) if info_type == "hotels": url = get_hotels_request_url(destination, startDate, endDate) if info_type == "city-break": url = get_citybreak(destination) rpc = urlfetch.create_rpc(100) rpc.callback = create_callback_ajax_v3(rpc, destination, price, startDate, endDate, response) urlfetch.make_fetch_call(rpc, url, "GET") rpc.wait() endkapowAPILiveRPC_v3 = quota.get_request_cpu_usage() logging.info("kapowAPILiveRPC_v3() : cost %d megacycles." % (endkapowAPILiveRPC_v3 - startkapowAPILiveRPC_v3))
def json(self): """Return a JSON-ifyable representation of the pertinent data. This is for FirePython/FireLogger so we must limit the volume by omitting stack traces and environment. Also, times and megacycles are converted to integers representing milliseconds. """ traces = [] with self._lock: for t in self.traces: d = {'start': t.start_offset_milliseconds(), 'call': t.service_call_name(), 'request': t.request_data_summary(), 'response': t.response_data_summary(), 'duration': t.duration_milliseconds(), 'api': mcycles_to_msecs(t.api_mcycles()), } traces.append(d) data = { 'start': int(self.start_timestamp * 1000), 'duration': int((self.end_timestamp - self.start_timestamp) * 1000), 'overhead': int(self.overhead * 1000), 'traces': traces, } if hasattr(quota, 'get_request_cpu_usage'): data['cpu'] = mcycles_to_msecs(quota.get_request_cpu_usage()) return data
def json(self): """Return a JSON-ifyable representation of the pertinent data. This is for FirePython/FireLogger so we must limit the volume by omitting stack traces and environment. Also, times and megacycles are converted to integers representing milliseconds. """ traces = [] for t in self.traces: d = { "start": t.start_offset_milliseconds(), "call": t.service_call_name(), "request": t.request_data_summary(), "response": t.response_data_summary(), "duration": t.duration_milliseconds(), "api": mcycles_to_msecs(t.api_mcycles()), } traces.append(d) return { "start": int(self.start_timestamp * 1000), "duration": int((self.end_timestamp - self.start_timestamp) * 1000), "cpu": mcycles_to_msecs(quota.get_request_cpu_usage()), "overhead": int(self.overhead * 1000), "traces": traces, }
def json(self): """Return a JSON-ifyable representation of the pertinent data. This is for FirePython/FireLogger so we must limit the volume by omitting stack traces and environment. Also, times and megacycles are converted to integers representing milliseconds. """ traces = [] for t in self.traces: d = { 'start': t.start_offset_milliseconds(), 'call': t.service_call_name(), 'request': t.request_data_summary(), 'response': t.response_data_summary(), 'duration': t.duration_milliseconds(), 'api': mcycles_to_msecs(t.api_mcycles()), } traces.append(d) return { 'start': int(self.start_timestamp * 1000), 'duration': int( (self.end_timestamp - self.start_timestamp) * 1000), 'cpu': mcycles_to_msecs(quota.get_request_cpu_usage()), 'overhead': int(self.overhead * 1000), 'traces': traces, }
def deactivate_tracepoint(name): global tracepoints global active_tracepoint if name != '__other__': if active_tracepoint is None: logging.warning( 'Not deactivating profiling tracepoint "%s" because it was not activated properly' % ( name )) return else: if active_tracepoint != name: logging.warning( 'Not deactivating profiling tracepoint "%s" because another tracepoint is active: %s' % ( name, active_tracepoint )) return else: if active_tracepoint is not None: logging.warning('Profiling end, but the tracepoint "%s" is still active. Forcibly deactivating it!' % (active_tracepoint)) deactivate_tracepoint(active_tracepoint) # if we got here, then it should be safe to deactivate the tracepoint if name != '__other__': active_tracepoint = None tracepoints[name]['clock_timer'].stop() tracepoints[name]['api_timer'].set_end(quota.get_request_api_cpu_usage()) tracepoints[name]['cpu_timer'].set_end(quota.get_request_cpu_usage()) for t in ('clock', 'api', 'cpu'): value = tracepoints[name]['%s_timer' % (t)].get_and_clear() tracepoints[name]['%s_usage' % (t)] += value if t == 'api': tracepoints[name]['cpu_usage'] += value # the API CPU usage is counted towards the total CPU usage too!
def wrapper(*args, **kwargs): from google.appengine.api import quota start_cpu = quota.get_request_cpu_usage() start_api = quota.get_request_api_cpu_usage() my_parent = _tlocal.parent start = time.time() _tlocal.parent = start try: return target(*args, **kwargs) finally: _tlocal.parent = my_parent end = time.time() end_cpu = quota.get_request_cpu_usage() end_api = quota.get_request_api_cpu_usage() logging.info("""*** USAGE TRACING ***: {"function": "%s.%s", "cpu": %s, "api": %s, "elapsed": %s, "start": %f, "parent": %s}""" % (target.__module__, target.__name__, int(round(quota.megacycles_to_cpu_seconds(end_cpu - start_cpu) * 1000)), int(round(quota.megacycles_to_cpu_seconds(end_api - start_api) * 1000)), int(round((end - start) * 1000)), start, "%f" % my_parent if my_parent else "null"))
def get_summary_proto(self): """Return a protobuf representing a summary of this recorder.""" summary = datamodel_pb.RequestStatProto() summary.set_start_timestamp_milliseconds(int(self.start_timestamp * 1000)) method = self.http_method() if method != 'GET': summary.set_http_method(method) path = self.http_path() if path != '/': summary.set_http_path(path) query = self.http_query() if query: summary.set_http_query(query) status = int(self.http_status) if status != 200: summary.set_http_status(status) duration = int(1000 * (self.end_timestamp - self.start_timestamp)) summary.set_duration_milliseconds(duration) api_mcycles = self.get_total_api_mcycles() if api_mcycles: summary.set_api_mcycles(api_mcycles) summary.set_processor_mcycles(quota.get_request_cpu_usage()) summary.set_overhead_walltime_milliseconds(int(self.overhead * 1000)) rpc_stats = self.get_rpcstats().items() rpc_stats.sort(key=lambda x: (-x[1], x[0])) for key, value in rpc_stats: x = summary.add_rpc_stats() x.set_service_call_name(key) x.set_total_amount_of_calls(value) return summary
def get(self): if self.repo: # To reuse the cursor from the previous task, we need to apply # exactly the same filter. So we use utcnow previously used # instead of the current time. utcnow = self.params.utcnow or utils.get_utcnow() max_entry_date = ( utcnow - datetime.timedelta( seconds=CleanUpInTestMode.DELETION_AGE_SECONDS)) query = model.Person.all_in_repo(self.repo) query.filter('entry_date <=', max_entry_date) if self.params.cursor: query.with_cursor(self.params.cursor) # Uses query.get() instead of "for person in query". # If we use for-loop, query.cursor() points to an unexpected # position. person = query.get() # When the repository is no longer in test mode, aborts the # deletion. while person and self.in_test_mode(self.repo): person.delete_related_entities(delete_self=True) if quota.get_request_cpu_usage() > CPU_MEGACYCLES_PER_REQUEST: # Stop before running into the hard limit on CPU time per # request, to avoid aborting in the middle of an operation. # Add task back in, restart at current spot: self.schedule_next_task(query, utcnow) break person = query.get() else: for repo in model.Repo.list(): if self.in_test_mode(repo): self.add_task_for_repo(repo, self.task_name(), self.ACTION)
def __call__(self, environ, start_response): # Don't record if the request is to clio itself, or the config says no. if ( environ["PATH_INFO"] == config.QUEUE_URL or environ["PATH_INFO"].startswith(config.BASE_URL) or not config.should_record(environ) ): return self.application(environ, start_response) request = webob.Request(environ) start_time = time.time() response = request.get_response(self.application) elapsed = int((time.time() - start_time) * 1000) status_code, status_text = response.status.split(" ", 1) record = model.RequestRecord( method=request.method, path=request.path_qs, request_headers=_stringifyHeaders(request.headers), status_code=int(status_code), status_text=status_text, response_headers=_stringifyHeaders(response.headers), wall_time=elapsed, cpu_time=quota.get_request_cpu_usage(), random=random.random(), ) prospective_search.match(record, result_relative_url=config.QUEUE_URL, result_task_queue=config.QUEUE_NAME) return response(environ, start_response)
def post(self): startAjaxRequestQuota = quota.get_request_cpu_usage() """ Get the config properties """ config_properties = configparsers.loadPropertyFile('config') destination = self.request.get("destination") startDateRaw = self.request.get("startDate") ratingRaw = self.request.get("rating") numberOfNightsRaw = self.request.get("nights") rating = None startDate = None logging.debug(startDateRaw) if ratingRaw is not None: rating = True if startDateRaw is not None and startDateRaw is not '': startDate = startDateRaw.split('-') try: dateTime = datetime.datetime(int(startDate[0]), int(startDate[1]), int(startDate[2])) startDate = dateTime endDateTimeDelta = datetime.timedelta(days=int(numberOfNightsRaw)) endDate = startDate + endDateTimeDelta except ValueError, e: logging.error(e) logging.error("AjaxAPIHandler_v3 : Invalid date values or date format")
def __init__(self): self.clock_timer = lib.profiler.core.Timer() self.api_timer = lib.profiler.core.Timer() self.cpu_timer = lib.profiler.core.Timer() self.clock_timer.start() self.api_timer.set_begin(quota.get_request_api_cpu_usage()) self.cpu_timer.set_begin(quota.get_request_cpu_usage())
def pre_hook(service, call, request, response): global clock_timer global api_timer global cpu_timer assert service == "datastore_v3" clock_timer.start() api_timer.set_begin(quota.get_request_api_cpu_usage()) cpu_timer.set_begin(quota.get_request_cpu_usage())
def get(self): """Presents Active and Incomplete resources.""" start = quota.get_request_cpu_usage() complete_resources = Resource().all().filter('status =', 'Active') incomplete_resources = Resource().all().filter('status =', 'Incomplete') excluded_resources = Resource().all().filter('status =', 'Excluded') deleted_resources = Resource().all().filter('status =', 'Deleted') template_values = { 'complete_resources': complete_resources, 'incomplete_resources': incomplete_resources, 'excluded_resources': excluded_resources, 'deleted_resources': deleted_resources, } end = quota.get_request_cpu_usage() logging.info('get request cost %d megacycles.' % (end - start)) path = os.path.join(os.path.dirname(__file__), 'wikistatus.html') self.response.out.write(template.render(path, template_values))
def run_count(make_query, update_counter, counter, cpu_megacycles): """Scans the entities matching a query for a limited amount of CPU time.""" cpu_limit = quota.get_request_cpu_usage() + cpu_megacycles while quota.get_request_cpu_usage() < cpu_limit: # Get the next batch of entities. query = make_query() if counter.last_key: query = query.filter('__key__ >', db.Key(counter.last_key)) entities = query.order('__key__').fetch(FETCH_LIMIT) if not entities: counter.last_key = '' break # Pass the entities to the counting function. for entity in entities: update_counter(counter, entity) # Remember where we left off. counter.last_key = str(entities[-1].key())
def test_clean_up_in_test_mode_multi_tasks(self): """Test the clean up in test mode when it is broken into multiple tasks.""" tasks.CleanUpInTestMode.DELETION_AGE_SECONDS = 2 * 3600 # 2 hours utcnow = datetime.datetime(2010, 1, 1, 7, 0, 0) set_utcnow_for_test(utcnow) self.mox = mox.Mox() cleanup = self.initialize_handler(tasks.CleanUpInTestMode()) # Simulates add_task_for_repo() because it doesn't work in unit tests. def add_task_for_repo(repo, task_name, action, **kwargs): test_handler.initialize_handler( cleanup, action, repo=repo, params=kwargs) cleanup.get() self.mox.StubOutWithMock(cleanup, 'add_task_for_repo') (cleanup.add_task_for_repo( 'haiti', mox.IsA(str), mox.IsA(str), utcnow=str(calendar.timegm(utcnow.utctimetuple())), cursor=mox.IsA(str), queue_name=mox.IsA(str)). WithSideEffects(add_task_for_repo).MultipleTimes()) # Always pretends that we have consumed more CPU than threshold, # so that it creates a new task for each entry. self.mox.StubOutWithMock(quota, 'get_request_cpu_usage') quota.get_request_cpu_usage().MultipleTimes().AndReturn( tasks.CPU_MEGACYCLES_PER_REQUEST + 1) self.mox.ReplayAll() config.set(test_mode=True, repo='haiti') # This should run multiple tasks and finally deletes all records. cleanup.get() assert db.get(self.key_p1) is None assert db.get(self.key_p2) is None self.mox.UnsetStubs() self.mox.VerifyAll()
def post(self): """Updates the Status of the selected resource.""" start = quota.get_request_cpu_usage() wikiurl = self.request.get('wiki_url') action = self.request.get('action') if action == 'Update': status = self.request.get('status') resource = Resource().all().filter('wikiurl =', wikiurl).get() resource.status = status resource.put() if action == 'WikiSync': wikiurl_encoded = wikiurl.encode('utf-8') syncResource(wikiurl_encoded) if action == 'FusionSync': wikiurl_encoded = wikiurl.encode('utf-8') updateFusionTableRow(wikiurl_encoded) end = quota.get_request_cpu_usage() logging.info('post request cost %d megacycles.' % (end - start)) self.redirect('/admin/wikistatus')
def get(self): if not jt.auth.auth(self): jt.auth.denied(self) return viewLat = float(self.request.get('viewLat')) viewLon = float(self.request.get('viewLon')) physicalLat = float(self.request.get('physicalLat')) physicalLon = float(self.request.get('physicalLon')) start = quota.get_request_cpu_usage() viewBox = jtLocation.getRangeBoxFromCoordinate(viewLat, viewLon, jt.gamesettings.tagViewRadius) pickupBox = jtLocation.getRangeBoxFromCoordinate(physicalLat, physicalLon, jt.gamesettings.tagPickupRadius) end = quota.get_request_cpu_usage() logging.info('rangebox cost: %d MegaCycles' % (end-start)) start = quota.get_request_cpu_usage() viewQuery = db.GqlQuery("SELECT * FROM Tag WHERE currentCoordinate >= :1 AND currentCoordinate <= :2 AND deleted = False AND pickedUp = False AND hasReachedDestination = False",db.GeoPt(lat=viewBox.minLat, lon=viewBox.minLon), db.GeoPt(lat=viewBox.maxLat, lon=viewBox.maxLon) ) end = quota.get_request_cpu_usage() logging.info('query cost: %d MegaCycles' % (end-start)) tagList = [] #only because I don't know if I can modify viewQuery in place start = quota.get_request_cpu_usage() #filter for ability to pickup for tag in viewQuery: if pickupBox.containsCoordinate( tag.currentCoordinate.lat, tag.currentCoordinate.lon): tag.withinPickupRange = True tagList.append(tag) end = quota.get_request_cpu_usage() logging.info('filter cost: %d MegaCycles' % (end-start)) result = jt.modelhelper.JsonQueryUtil.toArray('tags',tagList) self.response.out.write(result)
def activate(): initial_cpu_ms = quota.megacycles_to_cpu_seconds(quota.get_request_api_cpu_usage()) initial_api_ms = quota.megacycles_to_cpu_seconds(quota.get_request_cpu_usage()) global tracepoints _zero_timers() activate_tracepoint('__other__') if initial_cpu_ms > 1 or initial_api_ms > 1: # we were either not activate()'d at the very beginning, or App Engine did some trick... logging.warning('Request profiling: Initial CPU/API counters are not zero: %.1f/%.1f' % (initial_cpu_ms, initial_api_ms)) apiproxy_stub_map.apiproxy.GetPreCallHooks().Push('request_profiler', _pre_hook) apiproxy_stub_map.apiproxy.GetPostCallHooks().Append('request_profiler', _post_hook)
def get(self): start = quota.get_request_cpu_usage() # Getting and converting to json from blognone news from datastore entries = db.GqlQuery("SELECT * FROM Blognone_News ORDER BY date DESC") all_news = [] for entry in entries: temp = { 'id': entry.key().id(), 'entry_id': entry.entryID, 'title': entry.title, 'short_description': entry.description[0:34], #'description': entry.description, 'url': entry.url, 'thumbnailURL': entry.thumbnailURL, 'author': entry.author } all_news.append(temp) self.response.out.write(json.dumps(all_news)) end = quota.get_request_cpu_usage() logging.info("Browse_Json() cost %d megacycles." % (start - end))
def get(self): query = model.Person.past_due_records() for person in query: if quota.get_request_cpu_usage() > CPU_MEGACYCLES_PER_REQUEST: # Stop before running into the hard limit on CPU time per # request, to avoid aborting in the middle of an operation. # TODO(kpy): Figure out whether to queue another task here. # Is it safe for two tasks to run in parallel over the same # set of records returned by the query? break person.put_expiry_flags() if (person.expiry_date and utils.get_utcnow() - person.expiry_date > EXPIRED_TTL): person.wipe_contents()
def get(self): query = model.Person.past_due_records() for person in query: if quota.get_request_cpu_usage() > CPU_MEGACYCLES_PER_REQUEST: # Stop before running into the hard limit on CPU time per # request, to avoid aborting in the middle of an operation. # TODO(kpy): Figure out whether to queue another task here. # Is it safe for two tasks to run in parallel over the same # set of records returned by the query? break person.put_expiry_flags() if (person.expiry_date and utils.get_utcnow() - person.expiry_date > EXPIRED_TTL): person.wipe_contents()
def get( self ): foo = quota.get_request_cpu_usage() time = datetime.datetime.now() user = users.get_current_user() if not user: navbar = ( '<p>Welcome, <a href="%s">sign in</a> to customize your experience.</p>' % (users.create_login_url( self.request.path ) ) ) tz_form = '' else: prefs = models.get_userprefs( user.user_id() ) time += datetime.timedelta( 0, 0, 0, 0, 0, prefs.tz_offset ) navbar = ( '<p>Welcome %s, <a href="%s">sign out</a>.</p>' % ( user.email(), users.create_logout_url( self.request.path ) ) ) tz_form = """ <form action="/prefs" method="post"> TZ Offset from UTC (can be negative): <input name="tz_offset" id="tz_offset" type="text" size="4" value="%d" /> <input type="submit" value="Set!" /> </form> """ % prefs.tz_offset bar = quota.get_request_cpu_usage() footer = "APPLICATION_ID=%s, CURRENT_VERSION_ID=%s, AUTH_DOMAIN=%s, SERVER_SOFTWARE=%s, begin=%s, end=%s" % ( os.environ['APPLICATION_ID'], os.environ['CURRENT_VERSION_ID'], os.environ['AUTH_DOMAIN'], os.environ['SERVER_SOFTWARE'], foo, bar ) self.response.headers['Content-Type'] = 'text/html' self.response.out.write( '%s<hr><p>The time is: %s ...</p><hr>%s<hr>©2011<hr>%s' % ( navbar, str(time), tz_form, footer ) )
def run_count(make_query, update_counter, counter): """Scans the entities matching a query for a limited amount of CPU time.""" while quota.get_request_cpu_usage() < CPU_MEGACYCLES_PER_REQUEST: # Get the next batch of entities. query = make_query() if counter.last_key: query = query.filter('__key__ >', db.Key(counter.last_key)) entities = query.order('__key__').fetch(FETCH_LIMIT) if not entities: counter.last_key = '' break # Pass the entities to the counting function. for entity in entities: update_counter(counter, entity) # Remember where we left off. counter.last_key = str(entities[-1].key())
def activate_tracepoint(name): global tracepoints global active_tracepoint if active_tracepoint is not None: logging.warning('Not activating profiling tracepoint "%s" because another tracepoint is active: %s' % (name, active_tracepoint)) return if name != '__other__': active_tracepoint = name tracepoints.setdefault(name, { 'clock_usage' : 0, 'api_usage' : 0, 'cpu_usage' : 0, 'clock_timer' : lib.profiler.core.Timer(), 'api_timer' : lib.profiler.core.Timer(), 'cpu_timer' : lib.profiler.core.Timer(), }) tracepoints[name]['clock_timer'].start() tracepoints[name]['api_timer'].set_begin(quota.get_request_api_cpu_usage()) tracepoints[name]['cpu_timer'].set_begin(quota.get_request_cpu_usage())
def get(self): if self.repo: query = self.query() if self.params.cursor: query.with_cursor(self.params.cursor) for person in query: if quota.get_request_cpu_usage() > CPU_MEGACYCLES_PER_REQUEST: # Stop before running into the hard limit on CPU time per # request, to avoid aborting in the middle of an operation. # Add task back in, restart at current spot: self.schedule_next_task(query) break was_expired = person.is_expired person.put_expiry_flags() if (utils.get_utcnow() - person.get_effective_expiry_date() > EXPIRED_TTL): person.wipe_contents() else: # treat this as a regular deletion. if person.is_expired and not was_expired: delete.delete_person(self, person) else: for repo in model.Repo.list(): self.add_task_for_repo(repo, self.task_name(), self.ACTION)
def get(self): if not jt.auth.auth(self): jt.auth.denied(self) return viewLat = float(self.request.get('viewLat')) viewLon = float(self.request.get('viewLon')) physicalLat = float(self.request.get('physicalLat')) physicalLon = float(self.request.get('physicalLon')) start = quota.get_request_cpu_usage() viewBox = jtLocation.getRangeBoxFromCoordinate( viewLat, viewLon, jt.gamesettings.tagViewRadius) pickupBox = jtLocation.getRangeBoxFromCoordinate( physicalLat, physicalLon, jt.gamesettings.tagPickupRadius) end = quota.get_request_cpu_usage() logging.info('rangebox cost: %d MegaCycles' % (end - start)) start = quota.get_request_cpu_usage() viewQuery = db.GqlQuery( "SELECT * FROM Tag WHERE currentCoordinate >= :1 AND currentCoordinate <= :2 AND deleted = False AND pickedUp = False AND hasReachedDestination = False", db.GeoPt(lat=viewBox.minLat, lon=viewBox.minLon), db.GeoPt(lat=viewBox.maxLat, lon=viewBox.maxLon)) end = quota.get_request_cpu_usage() logging.info('query cost: %d MegaCycles' % (end - start)) tagList = [ ] #only because I don't know if I can modify viewQuery in place start = quota.get_request_cpu_usage() #filter for ability to pickup for tag in viewQuery: if pickupBox.containsCoordinate(tag.currentCoordinate.lat, tag.currentCoordinate.lon): tag.withinPickupRange = True tagList.append(tag) end = quota.get_request_cpu_usage() logging.info('filter cost: %d MegaCycles' % (end - start)) result = jt.modelhelper.JsonQueryUtil.toArray('tags', tagList) self.response.out.write(result)
def clientlog(text): if 'dg' in params and params['dg'] == "t": from google.appengine.api import quota print callback + ".info(" + text + ", " + str( quota.get_request_cpu_usage()) + ");"