def lambda_handler(event, context): mytime, lambda_name, env_vars = init_lambda(context) apps = init_producer(event) print(f"apps_to_parse: {apps}, size: {len(apps)}") stage = env_vars["stage"] parsed_data_bucket = env_vars["parsed_data_bucket"] mytime = mytime.set_seconds_to_zero() access_logs_bucket = sr_config["access_logs_bucket"] producer = sr_plugins.load("producer") ddb_items, identifier = producer.main( mytime=mytime, bucket_w_logs=access_logs_bucket, apps=apps, parsed_data_bucket=parsed_data_bucket, ) metric = { "name": "parsed_timestamp", "stage": stage, "lambda_name": lambda_name, "app": identifier, "identifier": "oss", "mytime": mytime, "val": mytime.epoch, } emit_metrics(metric) return identifier
def lambda_handler(event, context): try: mytime, lambda_name, env_vars = lambda_init.init_lambda( context, print_time=False) stage = env_vars["stage"] app, load, identifier, delay_random, delay_per_req, headers = init_consumer_worker( event) # Send out requests futs, timeouts, exceptions = send_requests_worker( load, delay_per_req, delay_random, headers) num_reqs_val = len(load) # Init base metric dict base_metric = { "stage": stage, "lambda_name": lambda_name, "app": app, "identifier": identifier, "mytime": mytime, "resolution": 1, } emit_metrics(base_metric, num_reqs_val, timeouts, exceptions) msg = f"app: {app}, env: {identifier} # reqs: {num_reqs_val}, " f"# timeouts: {timeouts}, # exceptions: {exceptions}" print(msg) except Exception as e: trace = traceback.format_exc() raise Exception(trace) return num_reqs_val
def lambda_handler(event, context): try: mytime, lambda_name, env_vars = init_lambda(context) apps = init_producer(event) print(f'apps_to_parse: {apps}, size: {len(apps)}') stage = env_vars['stage'] parsed_data_bucket = env_vars['parsed_data_bucket'] mytime = mytime.set_seconds_to_zero() access_logs_bucket = sr_config['access_logs_bucket'] producer = sr_plugins.load('producer') ddb_items, identifier = producer.main( mytime=mytime, bucket_w_logs=access_logs_bucket, apps=apps, parsed_data_bucket=parsed_data_bucket) metric = { 'name': 'parsed_timestamp', 'stage': stage, 'lambda_name': lambda_name, 'app': 'all', 'identifier': identifier, 'mytime': mytime, 'val': mytime.epoch } emit_metrics(metric) except Exception as e: trace = traceback.format_exc() raise Exception(trace) return identifier
def lambda_handler(event, context): try: mytime, lambda_name, env_vars = lambda_init.init_lambda( context, print_time=False) stage = env_vars['stage'] app, load, identifier, delay_random, delay_per_req, headers = init_consumer_slave( event) # Send out requests futs, timeouts, exceptions = send_requests_slave( load, delay_per_req, delay_random, headers) num_reqs_val = len(load) # Init base metric dict base_metric = { 'stage': stage, 'lambda_name': lambda_name, 'app': app, 'identifier': identifier, 'mytime': mytime, 'resolution': 1, } emit_metrics(base_metric, num_reqs_val, timeouts, exceptions) msg = f'app: {app}, env: {identifier} # reqs: {num_reqs_val}, ' \ f'# timeouts: {timeouts}, # exceptions: {exceptions}' print(msg) except Exception as e: trace = traceback.format_exc() raise Exception(trace) return num_reqs_val
def lambda_handler(event, context): """ Example event passed from orchestrator-past Lambda consumer_event = { 'app': app.name, 'env_to_test': app.env_to_test, 'cur_timestamp': app.cur_timestamp, 'rate': app.rate, 'parent_lambda': lambda_name, 'child_lambda': consumer_master_past_lambda_name, 'headers': headers, } """ try: mytime, lambda_name, env_vars = init_lambda(context) stage = env_vars["stage"] app, identifier, cur_timestamp, rate, headers, filters, base_url, baseline = init_consumer_master( event) parsed_data_bucket = env_vars["parsed_data_bucket"] replay_mode = sr_plugins.load("replay_mode") kwargs = { "lambda_start_time": mytime, "app_name": app, "app_cur_timestamp": cur_timestamp, } s3_parsed_data_key = replay_mode.main(**kwargs) print(f"s3://{parsed_data_bucket}/{s3_parsed_data_key}") # Fetch from S3 the URLs to send for this load test load = s3.fetch_from_s3(key=s3_parsed_data_key, bucket=parsed_data_bucket) num_reqs_pre_filter = len(load) # Transform the URLs based on the test params and filters load = loader_main(load=load, rate=rate, baseline=baseline, base_url=base_url, filters=filters) num_reqs_after_filter = len(load) # Init base metric dict base_metric = { "stage": stage, "lambda_name": lambda_name, "app": app, "identifier": identifier, "mytime": mytime, } emit_metrics(base_metric, num_reqs_pre_filter, num_reqs_after_filter) invoke_worker_lambdas( load=load, app=app, identifier=identifier, parent_lambda=lambda_name, headers=headers, ) except Exception as e: trace = traceback.format_exc() raise Exception(trace) return app
def lambda_handler(event, context): try: """ First gather the necessary test params, init App objects, filters and compute the current step After, then send (app, env_to_test, cur_timestamp, rate) to consumer-master consumer-master will then fetch data set (set of URLs) from S3, then pass it to multiple consumer-workers each consumer-worker will then send out requests to test environment (each worker handles up to 100 requests) """ mytime, lambda_name, env_vars = lambda_init.init_lambda(context) stage = env_vars["stage"] consumer_master_past_lambda = env_vars["consumer_master_past_name"] apps, test_params = init_apps_from_test_params(event) filters = init_filters() step = generate_step_from_mytime(mytime) print("step:", step) for app in apps: advance_app_timestamp(app, step) consumer_event = {} # Invoke the consumer-master lambda for each app in apps for app in apps: headers = Headers( shadowreader_type="past", stage=stage, app=app, step=step ).headers consumer_event = { "app": app.name, "identifier": app.identifier, "base_url": app.base_url, "cur_timestamp": app.cur_timestamp, "rate": app.rate, "baseline": app.baseline, "parent_lambda": lambda_name, "child_lambda": consumer_master_past_lambda, "headers": headers, "filters": filters, } invoke_func(consumer_event, func=consumer_master_past_lambda) if apps and consumer_event: print_to_logs(consumer_event, apps) # Collect metrics and put metrics into CW metrics = [] for app in apps: # This is the timestamp (in epoch time) that is being replayed # by the load test. metric = { "name": "replayed_timestamp", "stage": stage, "lambda_name": lambda_name, "app": app.name, "identifier": app.identifier, "mytime": mytime, "val": app.cur_timestamp, } metrics.append(metric) if sr_plugins.exists("metrics"): metric_emitter = sr_plugins.load("metrics") for metric in metrics: metric_emitter.main(metric) cur_params = {"apps": apps, "filters": filters, "test_params": test_params} if sr_plugins.exists("test_params_emitter"): params_emitter = sr_plugins.load("test_params_emitter") params_emitter.main( cur_params, lambda_name, mytime, stage, env_vars, sr_config, sr_plugins._sr_plugins, ) except Exception as e: trace = traceback.format_exc() raise Exception(trace) return json.dumps(cur_params, default=str), json.dumps(consumer_event, default=str)