def home(): logtracer = app.config['TRACER'] tracer.start_span(name='home') log("home dir", "root") result = "Tracing requests" tracer.end_span() return 'Text summarizer service. Please use /summarize'
def health(): tracer = app.config['TRACER'] tracer.start_span(name='health') log("alive", "health_check") result = "Tracing requests" tracer.end_span() return 'alive', 200 #, requests
def predict(): project_id = os.environ['GCLOUD_PROJECT'] tracer = initialize_tracer() tracer = app.config['TRACER'] tracer.start_span(name='summarize') text = request.get_data() topic = request.args.get('topic') # if not topic: # return jsonify({"error": "no topic present"}), 400 if not text.strip(): return jsonify({"error": "empty text"}), 400 summary, accuracy = model.summarize(text, topic) health() log(str(summary), "summary") js = json.dumps({ "summary": summary, "accuracy": accuracy, "text": str(text) }) result = "Tracing requests" tracer.end_span() return jsonify(js)
def dump(): tracer = app.config['TRACER'] tracer.start_span(name='dump') url = "http://db-dump/dump/" + trace_id result = requests.get(url) tracer.end_span() return result.content
def write(): tracer = app.config['TRACER'] tracer.start_span(name='write') url = "http://db-write/write/" + trace_id result = requests.get(url) tracer.end_span() return result.content
def read(name): tracer = app.config['TRACER'] tracer.start_span(name='read') url = "http://db-read/read/" + trace_id + "/" + name result = requests.get(url) tracer.end_span() return result.content
def index(): tracer = app.config['TRACER'] tracer.start_span(name='index') result = "Tracing requests" tracer.end_span() return result
def index(): tracer.start_span(name='index') # Add up to 1 sec delay, weighted toward zero time.sleep(random.random()**2) result = "Tracing requests" tracer.end_span() return result
def dump(trace_id): conn = connect() tracer = initialize_tracer(project_id, trace_id) app.config['TRACER'] = tracer tracer.start_span(name='database') db_content = select(conn) tracer.end_span() db_content_to_print = format(db_content) return render_page(db_content_to_print)
def index(): tracer = app.config['TRACER'] tracer.start_span(name='index') # Add up to 1 sec delay, weighted toward zero time.sleep(random.random() ** 2) result = "Tracing requests" tracer.end_span() return result
def insert(trace_id): conn = connect() tracer = initialize_tracer(project_id, trace_id) app.config['TRACER'] = tracer table = os.environ['TABLE'] name = names[randint(0, 14)] surname = surnames[randint(0, 14)] tracer = app.config['TRACER'] tracer.start_span(name='database') db_content = write(conn, table, name, surname) tracer.end_span() db_content_to_print = format(db_content) return render_page(db_content_to_print)
def get_gke_cpu(project_id_glob, start, end, secs): total = 'total_cpu' with tracer.start_span(name=f"{app_name}:get_gke_cpu") as outer_trace_span: with outer_trace_span.span(name=f"get_projects({project_id_glob})") as get_proj_span: projects = get_projects(project_id_glob) resp = {total: 0} thread_results = {} with outer_trace_span.span(name=f"get_metrics()") as get_metrics_span: with ThreadPoolExecutor(max_workers=int(MAX_WORKERS)) as executor: for project_id in projects: thread_results[project_id] = executor.submit(get_avg_cpu_cores, os.environ['MON_PROJECT_ID'], project_id, start, end, int(secs)) futures.wait(thread_results.values(), return_when=futures.ALL_COMPLETED) for project_id in thread_results: # exception thrown from .result() method if any exception happened try: thread_results[project_id] = thread_results[project_id].result() if thread_results[project_id]: resp[total] += thread_results[project_id] resp[project_id] = thread_results[project_id] except Exception as ex: error_reporting_client.report_exception() thread_results[project_id] = ex resp[project_id] = str(ex) return resp
async def middleware(request, handler): if handler.__name__ == "health_check": return await handler(request) span = None tracer = None try: propagator = google_cloud_format.GoogleCloudFormatPropagator() span_context = propagator.from_headers(request.headers) tracer = initialize_tracer(project_id, span_context, propagator) span = tracer.start_span() span.name = handler.__name__ tracer.add_attribute_to_current_span(HTTP_HOST, request.host) tracer.add_attribute_to_current_span(HTTP_METHOD, request.method) tracer.add_attribute_to_current_span(HTTP_PATH, request.path) tracer.add_attribute_to_current_span(HTTP_URL, str(request.url)) request["trace_header"] = propagator.to_headers(span_context) except: # NOQA logging.exception("Could not initialize the tracer") try: response = await handler(request) if tracer: tracer.add_attribute_to_current_span(HTTP_STATUS_CODE, response.status) return response finally: if tracer: tracer.end_span()
def trace(service_color): tracer = app.config['TRACER'] tracer.start_span(name='trace') headers = {} ## For Propagation test ## # Call service 'green' from service 'blue' if (os.environ['SERVICE_NAME']) == 'blue': for header in TRACE_HEADERS_TO_PROPAGATE: if header in request.headers: headers[header] = request.headers[header] ret = requests.get("http://localhost:9000/trace/green", headers=headers) # Call service 'red' from service 'green' elif (os.environ['SERVICE_NAME']) == 'green': for header in TRACE_HEADERS_TO_PROPAGATE: if header in request.headers: headers[header] = request.headers[header] ret = requests.get("http://localhost:9000/trace/red", headers=headers) result = render_page() tracer.end_span() return result
def log(message, log_name): # [START logging_quickstart] # Imports the Google Cloud client library logtracer = app.config['TRACER'] tracer.start_span(name='log') from google.cloud import logging # Instantiates a client logging_client = logging.Client() # The name of the log to write to # Selects the log to write to logger = logging_client.logger(str(log_name)) # The data to log # Writes the log entry logger.log_text(message) print('Logged: {}'.format(message)) # [END logging_quickstart] result = "Tracing requests" tracer.end_span()
def get_avg_cpu_cores(project_id, GKE_project_id, start_time, end_time, alignment_period_seconds): client = monitoring_v3.MetricServiceClient() project_name = f"projects/{project_id}" start = datetime.datetime.strptime(start_time, '%Y-%m-%d_%H:%M:%S') end = datetime.datetime.strptime(end_time, '%Y-%m-%d_%H:%M:%S') interval = monitoring_v3.TimeInterval( { "end_time": {"seconds": int(end.timestamp())}, "start_time": {"seconds": int(start.timestamp())}, } ) aggregation = monitoring_v3.Aggregation( { "alignment_period": {"seconds": alignment_period_seconds}, "per_series_aligner": monitoring_v3.Aggregation.Aligner.ALIGN_MEAN, "cross_series_reducer": monitoring_v3.Aggregation.Reducer.REDUCE_SUM, } ) cpu_cores = 0 with tracer.start_span(name=f"{app_name} get {GKE_project_id}'s metrics") as trace_span: results = client.list_time_series( request={ "name": project_name, "filter": 'metric.type = "kubernetes.io/node/cpu/total_cores" AND resource.type="k8s_node" AND project= ' + GKE_project_id, "interval": interval, "view": monitoring_v3.ListTimeSeriesRequest.TimeSeriesView.FULL, "aggregation": aggregation, } ) total = 0.0 for result in results: logger.log_text(f"data points collected: {len(result.points)}", severity=LOG_SEVERITY_DEBUG) for point in result.points: total += point.value.double_value cpu_cores += total / len(result.points) return cpu_cores
def service(service_color): tracer = app.config['TRACER'] tracer.start_span(name='service') result = render_page() tracer.end_span() return result