def _log_request(call, kind, keys_only, count, response_ByteSize, request_ByteSize, cost, timers): global requests #global clock_timer #global api_timer #global cpu_timer clock_timer = timers.clock_timer api_timer = timers.api_timer cpu_timer = timers.cpu_timer if count is None: count = -1 if request_ByteSize is None: request_ByteSize = 1 if response_ByteSize is None: response_ByteSize = -1 if cost is not None: cost = str(cost).replace('\n', ', ') + '\n ' requests.append({ 'call' : call, 'kind' : kind, 'keys_only' : keys_only, 'entities_count' : count, 'sent_bytes' : request_ByteSize, 'received_bytes' : response_ByteSize, 'clock_ms' : clock_timer.get_and_clear() * 1000, 'api_ms' : quota.megacycles_to_cpu_seconds(api_timer.get_and_clear()) * 1000, 'cpu_ms' : quota.megacycles_to_cpu_seconds(cpu_timer.get_and_clear()) * 1000, 'cost' : cost, })
def _log_request(call, kind, keys_only, count, response_ByteSize, request_ByteSize, cost): global requests global clock_timer global api_timer global cpu_timer if count is None: count = -1 if request_ByteSize is None: request_ByteSize = 1 if response_ByteSize is None: response_ByteSize = -1 if cost is not None: cost = str(cost).replace("\n", ", ") + "\n " requests.append( { "call": call, "kind": kind, "keys_only": keys_only, "entities_count": count, "sent_bytes": request_ByteSize, "received_bytes": response_ByteSize, "clock_ms": clock_timer.get_and_clear() * 1000, "api_ms": quota.megacycles_to_cpu_seconds(api_timer.get_and_clear()) * 1000, "cpu_ms": quota.megacycles_to_cpu_seconds(cpu_timer.get_and_clear()) * 1000, "cost": cost, } )
def show_summary(): global tracepoints global uptime ms_sub = { 'clock_ms' : [], 'api_ms' : [], 'cpu_ms' : [], } t_to_visual = { 'clock_ms' : 'ms', 'api_ms' : 'api_cpu_ms', 'cpu_ms' : 'cpu_ms', } deactivate_tracepoint('__other__') # and populate the statistics for '__other__' for subsystem in tracepoints.keys(): tracepoints[subsystem]['clock_ms'] = tracepoints[subsystem]['clock_usage'] * 1000 tracepoints[subsystem]['api_ms'] = quota.megacycles_to_cpu_seconds(tracepoints[subsystem]['api_usage']) * 1000 tracepoints[subsystem]['cpu_ms'] = quota.megacycles_to_cpu_seconds(tracepoints[subsystem]['cpu_usage']) * 1000 summary_data = tracepoints['__other__'] subsystems = tracepoints.keys() subsystems.sort() # keep the visualize order for subsystem in subsystems: if subsystem == '__other__': continue # this is the total counter source_data = tracepoints[subsystem] for t in ms_sub.keys(): if summary_data[t] != 0: percentage = (source_data[t] / summary_data[t]) * 100 else: percentage = 0 ms_sub[t].append((subsystem, percentage)) line = '' fmt = '%s = %3.0f%%' for t in ('clock_ms', 'cpu_ms', 'api_ms'): # list to keep the visualize order info = [] total_ms = '%-10s = %7.2f' % (t_to_visual[t], summary_data[t]) other_percentage = 100 for subsystem in ms_sub[t]: percentage = subsystem[1] other_percentage -= percentage info.append(fmt % (subsystem[0], percentage)) info.append(fmt % ('other', other_percentage)) line += '\n%s (%s)' % (total_ms, ', '.join(info)) uptime += 1 logging.info('Request summary (uptime=%d, ID=%s:%s : %s @ %s):%s' % ( uptime, os.environ.get('REQUEST_ID_HASH'), os.environ.get('CURRENT_VERSION_ID'), os.environ.get('SERVER_SOFTWARE'), os.environ.get('DATACENTER'), line ))
def activate(): initial_cpu_ms = quota.megacycles_to_cpu_seconds(quota.get_request_api_cpu_usage()) initial_api_ms = quota.megacycles_to_cpu_seconds(quota.get_request_cpu_usage()) global tracepoints _zero_timers() activate_tracepoint('__other__') if initial_cpu_ms > 1 or initial_api_ms > 1: # we were either not activate()'d at the very beginning, or App Engine did some trick... logging.warning('Request profiling: Initial CPU/API counters are not zero: %.1f/%.1f' % (initial_cpu_ms, initial_api_ms)) apiproxy_stub_map.apiproxy.GetPreCallHooks().Push('request_profiler', _pre_hook) apiproxy_stub_map.apiproxy.GetPostCallHooks().Append('request_profiler', _post_hook)
def wrapper(*args, **kwargs): from google.appengine.api import quota start_cpu = quota.get_request_cpu_usage() start_api = quota.get_request_api_cpu_usage() my_parent = _tlocal.parent start = time.time() _tlocal.parent = start try: return target(*args, **kwargs) finally: _tlocal.parent = my_parent end = time.time() end_cpu = quota.get_request_cpu_usage() end_api = quota.get_request_api_cpu_usage() logging.info("""*** USAGE TRACING ***: {"function": "%s.%s", "cpu": %s, "api": %s, "elapsed": %s, "start": %f, "parent": %s}""" % ( target.__module__, target.__name__, int(round(quota.megacycles_to_cpu_seconds(end_cpu - start_cpu) * 1000)), int(round(quota.megacycles_to_cpu_seconds(end_api - start_api) * 1000)), int(round((end - start) * 1000)), start, "%f" % my_parent if my_parent else "null"))
def _log_api_post_call(service, call, request, response, rpc, error): try: if service == 'datastore_v3' and call in ('Put', 'Touch', 'Delete', 'Commit'): cost = response.cost() cost_info = ' idx_writes=%d entity_writes=%d entity_bytes=%d' % ( cost.index_writes(), cost.entity_writes(), cost.entity_write_bytes()) else: cost_info = '' logging.info('RPC(post) %s.%s %.3fapi_cpu_ms%s', service, call, quota.megacycles_to_cpu_seconds(rpc.cpu_usage_mcycles), cost_info) except Exception, e: logging.exception(e)
def mcycles_to_seconds(mcycles): """Helper function to convert megacycles to seconds.""" if mcycles is None: return 0 return quota.megacycles_to_cpu_seconds(mcycles)