def _get_wiki_url(_url, q): BASE_URL = _url payload = {} resp = {"statusCode": 400} HOT_TOPICS = [ 'cholas', 'cheras', 'pandyas', 'pallavas', 'sangam_era', 'kural' ] if q: q = q.split('/')[-1] if not q: q = random.choice(HOT_TOPICS) try: random_sleep() if _trigger_exception(): xray_recorder.put_annotation("SIMULATED_ERRORS", "True") xray_recorder.begin_subsegment("BRITTLE_LEGACY_APP") d = xray_recorder.current_subsegment() d.put_annotation("MANUALLY_TRIGGERRED_IN_SUBSEGMENT", "True") xray_recorder.end_subsegment() raise Exception("RANDOM_ERROR: Simulate Mystique Failure") r1 = requests.get(f'{BASE_URL}/{q}', params=payload) xray_recorder.put_metadata('RESPONSE', resp) resp["statusCode"] = r1.status_code z = r1.json() resp["body"] = json.dumps({"message": z["body"]["message"]}) _ddb_put_item(resp) except Exception as e: resp["body"] = json.dumps({"message": str(e)}) return resp
async def run_task(task, sqs_msg): global execution_is_completed_flag xray_recorder.begin_segment('run_task') logging.info("Running Task: {}".format(task)) xray_recorder.begin_subsegment('encoding') bin_protobuf = prepare_arguments_for_execution(task) tast_str = bin_protobuf.decode("utf-8") task_def = json.loads(tast_str) submit_pre_agent_measurements(task) task_id = task["task_id"] fname_stdout = "./stdout-{task_id}.log".format(task_id=task_id) fname_stderr = "./stderr-{task_id}.log".format(task_id=task_id) f_stdout = open(fname_stdout, "w") f_stderr = open(fname_stderr, "w") xray_recorder.end_subsegment() execution_is_completed_flag = 0 task_execution = asyncio.create_task( do_task_local_lambda_execution_thread(perf_tracker_post, task, sqs_msg, task_def)) task_ttl_update = asyncio.create_task(do_ttl_updates_thread(task)) await asyncio.gather(task_execution, task_ttl_update) f_stdout.close() f_stderr.close() xray_recorder.end_segment() logging.info("Finished Task: {}".format(task)) return True
async def do_task_local_execution_thread(perf_tracker, task, sqs_msg, task_def, f_stdout, f_stderr, fname_stdout): global execution_is_completed_flag xray_recorder.begin_subsegment('sub-process-1') command = [ "./mock_compute_engine", task_def["worker_arguments"][0], task_def["worker_arguments"][1], task_def["worker_arguments"][2] ] print(command) proc = subprocess.Popen(command, stdout=f_stdout, stderr=f_stderr, shell=False) while True: retcode = proc.poll() if retcode is not None: execution_is_completed_flag = 1 # indicate that this thread is completed process_subprocess_completion(perf_tracker, task, sqs_msg, fname_stdout) xray_recorder.end_subsegment() return retcode await asyncio.sleep(work_proc_status_pull_interval_sec)
def lambda_handler(event, context): symbol = event.get('queryStringParameters', {}).get('symbol', None) xray_recorder.begin_subsegment('dynamo-call') dynamo = boto3.client('dynamodb') _item = dynamo.get_item(TableName='ChaosTrader', Key={'symbol': { 'S': symbol }}) xray_recorder.end_subsegment() item = _item.get('Item') if item: is_good_buy = _item.get('Item', {}).get('is_good_buy', {}).get('BOOL', None) return { "isBase64Encoded": False, "statusCode": 200, "body": json.dumps({"is_good_buy": is_good_buy}), "headers": { "Access-Control-Allow-Origin": "*" } } else: return { "isBase64Encoded": False, "statusCode": 404, "body": json.dumps({}), "headers": { "Access-Control-Allow-Origin": "*" } }
def handler(event, context): raw_records = event["Records"] logger.debug(raw_records) log_dict = dict() failed_dict = dict() xray_recorder.begin_subsegment("parse") for payload in kinesis.parse_records(raw_records): try: payload_parsed = json.loads(payload) except json.JSONDecodeError: logger.debug(f"Ignoring non-JSON data: {payload}") continue baikonur_logging.parse_payload_to_log_dict( payload_parsed, log_dict, failed_dict, LOG_TYPE_FIELD, LOG_TIMESTAMP_FIELD, LOG_ID_FIELD, LOG_TYPE_UNKNOWN_PREFIX, LOG_TYPE_FIELD_WHITELIST, timestamp_required=True, ) xray_recorder.end_subsegment() baikonur_logging.save_json_logs_to_s3(s3_client, failed_dict, "Valid log data") baikonur_logging.save_json_logs_to_s3( s3_client, failed_dict, "One or more necessary fields are unavailable")
def extract_dd_trace_context(event): """ Extract Datadog trace context from the Lambda `event` object. Write the context to a global `dd_trace_context`, so the trace can be continued on the outgoing requests with the context injected. Save the context to an X-Ray subsegment's metadata field, so the X-Ray trace can be converted to a Datadog trace in the Datadog backend with the correct context. """ global dd_trace_context headers = event.get('headers', {}) trace_id = headers.get(TraceHeader.TRACE_ID) parent_id = headers.get(TraceHeader.PARENT_ID) sampling_priority = headers.get(TraceHeader.SAMPLING_PRIORITY) if trace_id and parent_id and sampling_priority: dd_trace_context = { 'trace-id': trace_id, 'parent-id': parent_id, 'sampling-priority': sampling_priority, } xray_recorder.begin_subsegment(XraySubsegment.NAME) subsegment = xray_recorder.current_subsegment() subsegment.put_metadata( XraySubsegment.KEY, dd_trace_context, XraySubsegment.NAMESPACE ) xray_recorder.end_subsegment() else: # AWS Lambda runtime caches global variables between invocations, # reset to avoid using the context from the last invocation. dd_trace_context = {}
def upload_final_results(results): """Upload a file to S3 Parameters ---------- results: string, required Name of the local file with final results """ xray_recorder.begin_subsegment('## upload_final_results') subsegment = xray_recorder.current_subsegment() subsegment.put_metadata( 'filename', f's3://{output_bucket}/lambda-etl-refarch/output/{results}') results_path = os.path.join('/tmp', results) # upload to target S3 bucket try: response = s3.upload_file( results_path, output_bucket, 'lambda-etl-refarch/output/{}'.format(results)) log.info( f"Uploaded final results to s3://{output_bucket}/lambda-etl-refarch/output/{results}" ) subsegment.put_annotation('FINAL_RESULTS_UPLOAD', 'SUCCESS') except botocore.exceptions.ClientError as e: subsegment.put_annotation('FINAL_RESULTS_UPLOAD', 'FAILURE') log.error(f'Unable to upload final results: {results}') log.debug(e) raise xray_recorder.end_subsegment()
def load_prerequisites(ctx, object_list): for o in object_list: xray_recorder.begin_subsegment("prereq:%s" % o) log.debug(f"Loading prerequisite '{o}'...") ctx[o].get_prerequisites() xray_recorder.end_subsegment() log.debug(f"End prerequisite loading...")
def stuff(key): if request.method == 'PUT': xray_recorder.begin_subsegment('encryption') data = codecs.encode(request.data.decode('utf-8'), 'rot_13') xray_recorder.end_subsegment() app.logger.info('Setting \"{}\"=\"{}\"'.format(key, data)) s3_client.Object(bucket, 'cache/' + key).put(Body=data) body = json.dumps({'status': 'OK'}) status = 201 elif request.method == 'DELETE': obj = s3_client.Object(bucket, 'cache/' + key) obj.delete() body = '' status = 204 elif request.method == 'GET': try: app.logger.info('Getting \"{}\"'.format(key)) obj = s3_client.Object(bucket, 'cache/' + key) body = obj.get()['Body'].read().decode('utf-8') xray_recorder.begin_subsegment('decryption') body = codecs.decode(body, 'rot_13') xray_recorder.end_subsegment() status = 200 except ClientError as ex: app.logger.exception('Error', ex) if ex.response['Error']['Code'] == 'NoSuchKey': status = 204 body = '' else: raise ex else: body = json.dumps({'status': 'Method Not Supported'}) status = 406 app.logger.info('Status {}, Body \"{}\"'.format(status, body)) return make_response(body, status)
def download_intermediate_results(filename): """Download a file from S3 Parameters ---------- filename: string, required Name of the file in S3 source bucket (OpenAQ) Returns ------- processed_file: string Local path to downloaded file """ xray_recorder.begin_subsegment('## download_data_file') subsegment = xray_recorder.current_subsegment() subsegment.put_metadata('filename', f's3://{output_bucket}/{filename}') try: processed_file = os.path.join('/tmp', os.path.basename(filename)) s3.download_file(output_bucket, filename, processed_file) subsegment.put_annotation('DATA_DOWNLOAD', 'SUCCESS') except botocore.exceptions.ClientError as e: subsegment.put_annotation('DATA_DOWNLOAD', 'FAILURE') log.error(f'Unable to download rsult file: {filename}') log.debug(e) raise xray_recorder.end_subsegment() return processed_file
def get_text(url): """ This function uses the Python Library Newspaper, which reads the article from the URL, remove some unwanted characters and words, so the unformatted text is stored in a variable "text". It then applies the "nlp()" function to perform natural language processing on the news article. The objects "text", "header", "summary", "keywords" and "image" are the output of this function. """ logger.info(f'getText: initialised Article and ntlk') try: print("getText: Initialising Article...") subsegment = xray_recorder.begin_subsegment('getText: init article') article = Article(url) xray_recorder.end_subsegment() print("getText: Downloading Article...") subsegment = xray_recorder.begin_subsegment('getText: ' 'download article') article.download() xray_recorder.end_subsegment() print("getText: Parsing Article...") subsegment = xray_recorder.begin_subsegment('getText: parse article') article.parse() xray_recorder.end_subsegment() except Exception as e: print("getText: Exception: " + str(e)) return { 'text': '-1', 'header': '', 'summary': '', 'keywords': '', 'image': '' } subsegment = xray_recorder.begin_subsegment('getText: nlp article') article.nlp() xray_recorder.end_subsegment() text = article.text text = text.replace("\n\n", "") text = text.replace("Image copyright Getty Images ", "") text = text.replace("Image copyright Getty Images/Reuters", "") text = text.replace("Image caption ", "") text = text.replace("Media playback is unsupported on your device ", "") text = text.replace("Media caption ", "") result = { 'text': text, 'header': article.title, 'summary': article.summary, 'keywords': article.keywords, 'image': article.top_image } return result
def kinesis_put(log_records: list): xray_recorder.begin_subsegment(f"kinesis put records") retry_list = [] failed_list = [] # Each PutRecords request can support up to 500 records # see: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/kinesis.html#Kinesis.Client.put_records for batch_index, batch in enumerate(split_list(log_records, 500)): records = [] for record in batch: data_blob = json.dumps(record).encode('utf-8') partition_key: str = ''.join( random.choices(RANDOM_ALPHANUMERICAL, k=20)) # max 256 chars records.append({ 'Data': data_blob, 'PartitionKey': partition_key, }) logger.debug(records) retry_count = 0 while len(records) > 0: subsegment = xray_recorder.begin_subsegment( f"put records batch {batch_index} retry {retry_count}") response = kinesis_client.put_records( Records=records, StreamName=TARGET_STREAM_NAME, ) subsegment.put_annotation("records", len(records)) subsegment.put_annotation("failed", response['FailedRecordCount']) xray_recorder.end_subsegment() if response['FailedRecordCount'] == 0: xray_recorder.end_subsegment() break else: retry_count += 1 subsegment.put_annotation("failed_records", response['FailedRecordCount']) for index, record in enumerate(response['Records']): if 'ErrorCode' in record: if record[ 'ErrorCode'] == 'ProvisionedThroughputExceededException': retry_list.append(records[index]) elif record['ErrorCode'] == 'InternalFailure': failed_list.append(records[index]) records = retry_list retry_list = [] if len(retry_list) > 0: logger.info(f"Waiting 1 second for capacity") time.sleep(1) xray_recorder.end_subsegment() xray_recorder.end_subsegment() return failed_list
def index(): xray_recorder.begin_subsegment('SSM') secret = get_secret() xray_recorder.end_subsegment() resp = requests.get('http://example.com') return f"Secret {secret}, Status: {resp.status_code}"
def query_by_partition_and_sort_key(self, partition_key, partition_value, sort_key, sort_value): xray_recorder.begin_subsegment('query') response = self.db_table.query( KeyConditionExpression=Key(partition_key).eq(partition_value) & Key(sort_key).gte(sort_value)) xray_recorder.end_subsegment('query') return response.get('Items')
def get_secret(): secret_name = "starAlliances/dev/seatMap/provider" region_name = "us-east-1" try: xray_recorder.begin_subsegment('init_client') # Create a Secrets Manager client session = boto3.session.Session() client = session.client( service_name='secretsmanager', region_name=region_name ) xray_recorder.current_subsegment().put_annotation('init_client', 'done') finally: xray_recorder.end_subsegment() # In this sample we only handle the specific exceptions for the 'GetSecretValue' API. # See https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_GetSecretValue.html # We rethrow the exception by default. try: get_secret_value_response = client.get_secret_value( SecretId=secret_name ) except ClientError as e: if e.response['Error']['Code'] == 'DecryptionFailureException': # Secrets Manager can't decrypt the protected secret text using the provided KMS key. # Deal with the exception here, and/or rethrow at your discretion. raise e elif e.response['Error']['Code'] == 'InternalServiceErrorException': # An error occurred on the server side. # Deal with the exception here, and/or rethrow at your discretion. raise e elif e.response['Error']['Code'] == 'InvalidParameterException': # You provided an invalid value for a parameter. # Deal with the exception here, and/or rethrow at your discretion. raise e elif e.response['Error']['Code'] == 'InvalidRequestException': # You provided a parameter value that is not valid for the current state of the resource. # Deal with the exception here, and/or rethrow at your discretion. raise e elif e.response['Error']['Code'] == 'ResourceNotFoundException': # We can't find the resource that you asked for. # Deal with the exception here, and/or rethrow at your discretion. raise e else: # Decrypts secret using the associated KMS CMK. # Depending on whether the secret is a string or binary, one of these fields will be populated. if 'SecretString' in get_secret_value_response: secret = get_secret_value_response['SecretString'] return secret else: decoded_binary_secret = base64.b64decode(get_secret_value_response['SecretBinary']) return decoded_binary_secret # Your code goes here. return ""
def lambda_handler(event, context): symbol = event.get('symbol', None) if symbol == 'warm': time.sleep(10) xray_recorder.begin_subsegment('compute-duration') is_good_buy = get_buying_advice(symbol) xray_recorder.end_subsegment() store_symbol_details(symbol, is_good_buy)
def main_handler(event, context): log.debug("Handler start.") r = RLT(lambda args, kwargs, r: True, main_handler_entrypoint, event, context) # Persist all aggregated data xray_recorder.begin_subsegment("main_handler_entrypoint.persist_aggregates") KVTable.persist_aggregates() xray_recorder.end_subsegment() log.log(log.NOTICE, "Normal end.") return r
def hello_world(): r = requests.get("https://linuxacademy.com") logger.debug(r.text) xray_recorder.begin_subsegment('DynamoDB PutItem') table.put_item(Item={'key': str(uuid.uuid1()), 'response': r.text}) xray_recorder.end_subsegment() return 'Hello, World: %s' % r.url
def handler(event, context, metrics): try: r = requests.get(event["image_url"], allow_redirects=True) attributes=[] attributes.append("DEFAULT") attributes.append("ALL") xray_recorder.begin_subsegment('## Moderation') mod_response = rek.detect_moderation_labels( Image={ 'Bytes': r.content }, MinConfidence=50 ) xray_recorder.end_subsegment() if len(mod_response["ModerationLabels"]) != 0: metrics.set_namespace('TwitterRekognition') metrics.put_dimensions({"step": "Rekognition"}) metrics.put_metric("ImagesModerated", 1, "Count") metrics.set_property("RequestId", context.aws_request_id) metrics.set_property("LambdaName", context.function_name) metrics.set_property("Labels", mod_response["ModerationLabels"]) return {'result': 'Moderated' } xray_recorder.begin_subsegment('## IndexFaces') rek_response = rek.index_faces( Image={"Bytes": r.content}, CollectionId=CollectionId, DetectionAttributes=attributes ) xray_recorder.end_subsegment() if 'FaceRecords' in rek_response: hdata = {} hdata['image_url'] = str(event["image_url"]) hdata['full_text'] = event['full_text'] hdata['facerecords'] = rek_response["FaceRecords"] hdata['collectionname'] = CollectionId hdata['guidstr'] = event["guidstr"] faces_count = len(rek_response["FaceRecords"]) return {'result': 'Succeed', 'count': str(faces_count), 'data': json.dumps(hdata)} else: logger.error('Unable to rekognize any face') return {'result': 'Fail', 'msg': 'Unable to rekognize face'} except Exception as e: logger.error(str(e)) # Reckoginition Collection Initialize() return {'result': 'Fail', 'msg': str(e)}
def handler(event, context): xray_recorder.begin_subsegment('handler') xray_recorder.current_subsegment().put_annotation('Lambda', context.function_name) res = fetchall_athena(event["query"], event["type"]) logger.info(res) # AddToDdbStat(res) return {'result': True}
def index(): patch_all() try: data = fetch_weather_data() formatted_data = format_data(data) xray_recorder.begin_subsegment('mysub') update_database(json.loads(formatted_data)['data']) notify_sns(json.loads(formatted_data)['data']) xray_recorder.end_subsegment() return formatted_data except Exception as e: print(e)
def store_symbol_details(symbol, is_good_buy): xray_recorder.begin_subsegment('dynamo-call') dynamo = boto3.client('dynamodb') xray_recorder.end_subsegment() dynamo.put_item( TableName='ChaosTrader', Item={ 'symbol': { 'S': symbol }, 'is_good_buy': { 'BOOL': is_good_buy} } )
def handler(event, context): # get requestor http headers seg = xray_recorder.begin_subsegment('dynamo-session') ip = str(event['headers']['X-Forwarded-For']).split(',')[0] ua = str(event['headers']['User-Agent']) print('IP: ' + str(event['headers']['X-Forwarded-For'])) # print request headers in cloudwatch for debug purposes xray_recorder.end_subsegment() # clean the given url path and print debug xray_recorder.begin_subsegment('path-find') pa = event['path'] xray_recorder.current_subsegment().put_annotation('clientip', ip) xray_recorder.current_subsegment().put_annotation('useragent', ua) xray_recorder.current_subsegment().put_annotation('urlpath', pa) # check whether a tag, category, url or no path argument was given npa, tag = check_path(pa) #parse_debug(event) xray_recorder.end_subsegment() xray_recorder.begin_subsegment('html-parse') # if a url was submitted, redirect to it with a 301 if npa == 'redir': print('301 ***', str(event['headers']['User-Agent']), str(event['headers']['Host']), pa, '', tag) xray_recorder.current_subsegment().put_annotation('statuscode', '301') return {'statusCode': '301', 'headers': {'Location': tag}} # else parse the html page else: url = check_ua(ua) html = parse_html(d, npa, tag, url) print('200 ***', str(event['headers']['User-Agent']), str(event['headers']['Host']), pa, url, tag) xray_recorder.current_subsegment().put_annotation('statuscode', '200') return { 'statusCode': '200', 'body': str(html), 'headers': { 'Content-Type': 'text/html', 'charset': 'utf-8' } } xray_recorder.end_subsegment()
def dynamodb_table_scan(client, table_name, max_size=32 * 1024 * 1024): xray_recorder.begin_subsegment("misc.dynamodb_table_scan") items = [] items_size = [] size = 0 response = None paginator = client.get_paginator('scan') response_iterator = paginator.paginate(TableName=table_name, ConsistentRead=True) for response in response_iterator: if "Items" not in response: raise Exception("Failed to scan table '%s'!" % self.table_name) # Flatten the structure to make it more useable for i in response["Items"]: item = {} for k in i: item[k] = i[k][list(i[k].keys())[0]] if "Key" in item and "Value" in item: items_size.append({ "Key": item["Key"], "Size": len(item["Value"]) }) # Do not manage expired records if "ExpirationTime" in item: expiration_time = int(item["ExpirationTime"]) if seconds_from_epoch_utc() > expiration_time: continue if max_size != -1: item_size = 0 for k in item: item_size += len(item[k]) if size + item_size > max_size: break # Truncate too big DynamoDB table else: size += item_size items.append(item) log.log( log.NOTICE, f"DynamoDB: Table scan of '{table_name}' returned %d items (bytes={size})." % len(items)) if log.getEffectiveLevel() == log.DEBUG: log.debug(f"Biggest items for table {table_name}:") sorted_items = sorted(items_size, key=lambda item: item["Size"], reverse=True) for i in sorted_items[:10]: log.debug(f" Item: {i}") xray_recorder.end_subsegment() return items
def handler(event, context): try: xray_recorder.begin_subsegment('handler') xray_recorder.current_subsegment().put_annotation( 'Lambda', context.function_name) #logger.info(event) if 'body' not in event: logger.error('Missing parameters') return {'result': False, 'msg': 'Missing parameters'} body = json.loads(event['body']) logger.info(body) s3.Object(s3_bucket, 'selfie-reports/' + body['id'] + '.json').delete() logger.info('DELETED: selfie-reports/' + body['id'] + '.json') # Get the lastest csv for deletion get_last_modified = lambda obj: int(obj['LastModified'].strftime('%s')) objs = client.list_objects_v2(Bucket=s3_bucket, Prefix='selfie-ath-results/' + body['emotion'])['Contents'] file_list = [ obj['Key'] for obj in sorted(objs, key=get_last_modified, reverse=True) ] for csv_file in file_list: if "metadata" in csv_file: continue s3.Object(s3_bucket, csv_file).delete() logger.info('DELETED: ' + csv_file) break payload["type"] = "delselfie" response = client.invoke(FunctionName=AthDispatcherLambdaName, InvocationType='Event', Payload=json.dumps(payload)) logger.info(response) return { 'result': True, 'selfie': 'selfie-reports/' + body['id'] + '.json', 'csv': 'selfie-ath-results/' + body['emotion'] + '/' + csv_file } except Exception as e: logger.error('Something went wrong: ' + str(e)) return {'result': False, 'msg': str(e)}
def get_ssh_certificate_authority(private_key, password=None): """ Returns the proper SSHCertificateAuthority instance based off the private_key type. :param private_key: ASCII bytes of an SSH compatible Private Key (e.g., PEM or SSH Protocol 2 Private Key). It should be encrypted with a password, but that is not required. :param password: ASCII bytes of the Password to decrypt the Private Key, if it is encrypted. Which it should be. :return: An SSHCertificateAuthority instance. """ xray_recorder.begin_subsegment('ssh_decrypt') if private_key.decode('ascii').startswith( SSHCertificateAuthorityPrivateKeyType.RSA): return RSACertificateAuthority(private_key, password) else: raise TypeError("Unsupported CA Private Key Type") xray_recorder.end_subsegment()
def lambda_handler(event, context): x_api_key = event['headers']['x-api-key'] response = requests.get( CREDENTIALS_API_URL, headers={'x-api-key': x_api_key} ) if response.status_code == requests.codes.ok: metaflow_creds = response.json() print(metaflow_creds) for key in metaflow_creds.keys(): os.environ[key] = metaflow_creds[key] os.environ['USERNAME'] = '******' subsegment = xray_recorder.begin_subsegment('load_metaflow') import api xray_recorder.end_subsegment() return api.handle_request(event, context) else: print("Unable to locate credentials") return { 'statusCode': 403, 'body': json.dumps("Could not obtain Metaflow credentials") }
def lambda_handler(event, context): """ AWS Lambda handler This method is invoked by the API Gateway: /Prod/first/{proxy+} endpoint. """ xray_subsegment = xray_recorder.current_subsegment() xray_subsegment.put_annotation( 'application', '{{ cookiecutter.project_name.lower().replace(' ',' - ') }}') xray_subsegment.put_metadata( 'event', event, '{{ cookiecutter.project_name.lower().replace(' ',' - ') }}') try: subsegment = xray_recorder.begin_subsegment('message') message = { 'Id': uuid.uuid4().hex, 'Count': random.random() * 100, } subsegment.put_metadata( 'message', message, '{{ cookiecutter.project_name.lower().replace(' ',' - ') }}') xray_recorder.end_subsegment() return {"statusCode": 200, "body": json.dumps(message)} except Exception as err: # pragma: no cover logger.error(str(err)) traceback.print_exc() raise err
def hello(event, context): body = { "message": "Go Serverless v1.0! Your function executed successfully!", "input": event } # Start an xray subsegment, because hte segment is already crreated automatically subsegment = xray_recorder.begin_subsegment('numpy_subsegment') # Create a random matrix using numpy and print it a = np.arange(50).reshape(10, 5) print("Your numpy array:") print(a) big_array() # Add metadata and annotations subsegment.put_annotation('event_key', 'numpy') subsegment.put_metadata('event-data', event, 'data-received') # Close the subsegment xray_recorder.end_subsegment() response = {"statusCode": 200, "body": json.dumps(body)} return response
def handler(event, context): # Extract out S3 object details valid_bucket = event['valid_bucket_name'] valid_key = event['valid_key'] nonexistent_bucket = event['nonexistent_bucket'] nonexistent_key = event['nonexistent_key'] # Create a segment xray_recorder.begin_segment('s3trace') # Copy valid S3 object locally to /tmp valid_subsegment = xray_recorder.begin_subsegment('valid') local_file = local_directory + '/' + valid_key print 'Copying valid object s3://{}/{} to {}...'.format(valid_bucket, valid_key, local_file) try: s3.Bucket(valid_bucket).download_file(valid_key, '{}'.format(local_file)) except Exception as e: print 'Error: {}'.format(str(e)) xray_recorder.end_subsegment() # Copy invalid S3 object locally to /tmp invalid_subsegment = xray_recorder.begin_segment('invalid') local_file = local_directory + '/' + nonexistent_key print 'Copying invalid object s3://{}/{} to {}...'.format(nonexistent_bucket, nonexistent_key, local_file) try: s3.Bucket(nonexistent_bucket).download_file(nonexistent_key, '{}'.format(local_file)) except Exception as e: print 'Error: {}'.format(str(e)) xray_recorder.end_subsegment() # End segment xray_recorder.end_segment() return '{"message": "X-Ray worked"}'
def subsegment(self, name): segment = xray_recorder.begin_subsegment(name) try: yield segment except Exception as e: stack = traceback.extract_stack(limit=xray_recorder.max_trace_back) segment.add_exception(e, stack) raise finally: xray_recorder.end_subsegment(time.time())
def __enter__(self): if self.client is None: self.client = self.ctx.session_factory(assume=False).client('xray') self.emitter.client = self.client if self.in_lambda: self.segment = xray_recorder.begin_subsegment(self.service_name) else: self.segment = xray_recorder.begin_segment( self.service_name, sampling=True) p = self.ctx.policy xray_recorder.put_annotation('policy', p.name) xray_recorder.put_annotation('resource', p.resource_type) if self.ctx.options.account_id: xray_recorder.put_annotation('account', self.ctx.options.account_id)