Esempio n. 1
0
def send_requests_slave(load: list, delay: float, random_delay: bool,
                        headers: dict):
    """
    Start sending out requests, given load (URLs), delay (delay to insert between requests) and headers (User-Agent
    for the requests)
    """
    if len(load) == 0:
        return 0, 0, 0  # return futs, timeouts, exceptions

    if sr_plugins.exists("circuit_breaker"):
        cb = sr_plugins.load("circuit_breaker")
        first_url = load[0]["url"]
        cb.main(url=first_url)

    session = FuturesSession(max_workers=5)

    if sr_plugins.exists("slave_headers"):
        slave_headers = sr_plugins.load("slave_headers")
    else:
        slave_headers = None

    futs = _send_futs_slave(
        session,
        load,
        delay=delay,
        random_delay=random_delay,
        headers=headers,
        slave_headers=slave_headers,
    )
    futs, timeouts, exceptions = _collect_futs_slave(futs)

    return futs, timeouts, exceptions
Esempio n. 2
0
def lambda_handler(event, context):
    mytime, lambda_name, env_vars = init_lambda(context)

    apps = init_producer(event)

    print(f"apps_to_parse: {apps}, size: {len(apps)}")

    stage = env_vars["stage"]
    parsed_data_bucket = env_vars["parsed_data_bucket"]

    mytime = mytime.set_seconds_to_zero()

    access_logs_bucket = sr_config["access_logs_bucket"]

    producer = sr_plugins.load("producer")
    ddb_items, identifier = producer.main(
        mytime=mytime,
        bucket_w_logs=access_logs_bucket,
        apps=apps,
        parsed_data_bucket=parsed_data_bucket,
    )

    metric = {
        "name": "parsed_timestamp",
        "stage": stage,
        "lambda_name": lambda_name,
        "app": identifier,
        "identifier": "oss",
        "mytime": mytime,
        "val": mytime.epoch,
    }
    emit_metrics(metric)

    return identifier
Esempio n. 3
0
def emit_metrics(base_metric: dict, num_reqs_val: int, timeouts: int,
                 exceptions: int):
    """ Generate dicts to pass to metric emitter plugin """

    # Put on CW the number of requests sent
    num_reqs = {'name': 'num_requests', 'val': num_reqs_val}
    num_reqs = ChainMap(num_reqs, base_metric)

    num_reqs_all = {'name': 'num_requests', 'val': num_reqs_val, 'app': 'all'}
    num_reqs_all = ChainMap(num_reqs_all, base_metric)

    metrics = [num_reqs, num_reqs_all]

    # Put on CW the number of requests that timed out or errored out
    if timeouts > 0:
        num_timeouts = {'name': 'timeouts', 'val': timeouts}
        num_timeouts = ChainMap(num_timeouts, base_metric)
        metrics.append(num_timeouts)

    if exceptions > 0:
        num_exceptions = {'name': 'exceptions', 'val': exceptions}
        num_exceptions = ChainMap(num_exceptions, base_metric)
        metrics.append(num_exceptions)

    if sr_plugins.exists('metrics'):
        metric_emitter = sr_plugins.load('metrics')
        for metric in metrics:
            metric_emitter.main(metric)
Esempio n. 4
0
def emit_metrics(base_metric: dict, num_reqs_pre_filter_val: int,
                 num_reqs_after_filter: int):
    """ Generate dicts to pass to metric emitter plugin """
    num_reqs_pre_filter = {
        "name": "num_requests_pre_filter",
        "val": num_reqs_pre_filter_val,
    }
    num_reqs_pre_filter = ChainMap(num_reqs_pre_filter, base_metric)

    num_reqs_pre_filter_all = {
        "app": "all",
        "name": "num_requests_pre_filter",
        "val": num_reqs_pre_filter_val,
    }
    num_reqs_pre_filter_all = ChainMap(num_reqs_pre_filter_all, base_metric)

    num_reqs = {"name": "num_requests", "val": num_reqs_after_filter}
    num_reqs = ChainMap(num_reqs, base_metric)

    num_reqs_all = {
        "app": "all",
        "name": "num_requests",
        "val": num_reqs_after_filter
    }
    num_reqs_all = ChainMap(num_reqs_all, base_metric)

    metrics = [
        num_reqs_pre_filter, num_reqs_pre_filter_all, num_reqs, num_reqs_all
    ]

    if sr_plugins.exists("metrics"):
        metric_emitter = sr_plugins.load("metrics")
        for metric in metrics:
            metric_emitter.main(metric)
Esempio n. 5
0
def emit_metrics(base_metric: dict, num_reqs_val: int, timeouts: int,
                 exceptions: int):
    """ Generate dicts to pass to metric emitter plugin """

    # Put on CW the number of requests sent
    num_reqs = {"name": "num_requests", "val": num_reqs_val}
    num_reqs = ChainMap(num_reqs, base_metric)

    num_reqs_all = {"name": "num_requests", "val": num_reqs_val, "app": "all"}
    num_reqs_all = ChainMap(num_reqs_all, base_metric)

    metrics = [num_reqs, num_reqs_all]

    # Put on CW the number of requests that timed out or errored out
    if timeouts > 0:
        num_timeouts = {"name": "timeouts", "val": timeouts}
        num_timeouts = ChainMap(num_timeouts, base_metric)
        metrics.append(num_timeouts)

    if exceptions > 0:
        num_exceptions = {"name": "exceptions", "val": exceptions}
        num_exceptions = ChainMap(num_exceptions, base_metric)
        metrics.append(num_exceptions)

    if sr_plugins.exists("metrics"):
        metric_emitter = sr_plugins.load("metrics")
        for metric in metrics:
            metric_emitter.main(metric)
def emit_metrics(
    base_metric: dict, num_reqs_pre_filter_val: int, num_reqs_after_filter: int
):
    """ Generate dicts to pass to metric emitter plugin """
    num_reqs_pre_filter = {
        "name": "num_requests_pre_filter",
        "val": num_reqs_pre_filter_val,
    }
    num_reqs_pre_filter = ChainMap(num_reqs_pre_filter, base_metric)

    num_reqs_pre_filter_all = {
        "app": "all",
        "name": "num_requests_pre_filter",
        "val": num_reqs_pre_filter_val,
    }
    num_reqs_pre_filter_all = ChainMap(num_reqs_pre_filter_all, base_metric)

    num_reqs = {"name": "num_requests", "val": num_reqs_after_filter}
    num_reqs = ChainMap(num_reqs, base_metric)

    num_reqs_all = {"app": "all", "name": "num_requests", "val": num_reqs_after_filter}
    num_reqs_all = ChainMap(num_reqs_all, base_metric)

    # If debug is on, then send extra metrics to CloudWatch
    # If debug is off only send metric on total number of requests sent
    if sr_config["debug"]:
        metrics = [num_reqs_pre_filter, num_reqs_pre_filter_all, num_reqs, num_reqs_all]
    else:
        metrics = [num_reqs]

    if sr_plugins.exists("metrics"):
        metric_emitter = sr_plugins.load("metrics")
        for metric in metrics:
            metric_emitter.main(metric)
Esempio n. 7
0
def emit_metrics(base_metric: dict, num_reqs_val: int, timeouts: int, exceptions: int):
    """ Generate dicts to pass to metric emitter plugin """

    # Put on CW the number of requests sent
    num_reqs = {"name": "num_requests", "val": num_reqs_val}
    num_reqs = ChainMap(num_reqs, base_metric)

    num_reqs_all = {"name": "num_requests", "val": num_reqs_val, "app": "all"}
    num_reqs_all = ChainMap(num_reqs_all, base_metric)

    metrics = [num_reqs, num_reqs_all]

    # Put on CW the number of requests that timed out or errored out
    if timeouts > 0:
        num_timeouts = {"name": "timeouts", "val": timeouts}
        num_timeouts = ChainMap(num_timeouts, base_metric)
        metrics.append(num_timeouts)

    if exceptions > 0:
        num_exceptions = {"name": "exceptions", "val": exceptions}
        num_exceptions = ChainMap(num_exceptions, base_metric)
        metrics.append(num_exceptions)

    # If debug is on then send request rate metrics for each worker lambda.
    # Warning: If the total number of requests is high than there can be
    # 100s of worker lambdas sending custom CW metrics every minute.
    if sr_plugins.exists("metrics") and sr_config['debug']:
        metric_emitter = sr_plugins.load("metrics")
        for metric in metrics:
            metric_emitter.main(metric)
Esempio n. 8
0
def emit_metrics(base_metric: dict, num_reqs_pre_filter_val: int,
                 num_reqs_after_filter: int):
    """ Generate dicts to pass to metric emitter plugin """
    num_reqs_pre_filter = {
        'name': 'num_requests_pre_filter',
        'val': num_reqs_pre_filter_val
    }
    num_reqs_pre_filter = ChainMap(num_reqs_pre_filter, base_metric)

    num_reqs_pre_filter_all = {
        'app': 'all',
        'name': 'num_requests_pre_filter',
        'val': num_reqs_pre_filter_val
    }
    num_reqs_pre_filter_all = ChainMap(num_reqs_pre_filter_all, base_metric)

    num_reqs = {'name': 'num_requests', 'val': num_reqs_after_filter}
    num_reqs = ChainMap(num_reqs, base_metric)

    num_reqs_all = {
        'app': 'all',
        'name': 'num_requests',
        'val': num_reqs_after_filter
    }
    num_reqs_all = ChainMap(num_reqs_all, base_metric)

    metrics = [
        num_reqs_pre_filter, num_reqs_pre_filter_all, num_reqs, num_reqs_all
    ]

    if sr_plugins.exists('metrics'):
        metric_emitter = sr_plugins.load('metrics')
        for metric in metrics:
            metric_emitter.main(metric)
Esempio n. 9
0
def init_producer(lambda_event: dict = None) -> list:
    if lambda_event and 'apps_to_parse' in lambda_event:
        apps = lambda_event['apps_to_parse']
    else:
        apps = getenv('apps_to_parse', '[]')
        apps = json.loads(apps)

    if sr_plugins.exists('test_params_validator'):
        validator = sr_plugins.load('test_params_validator')
        validator.main(apps=apps)

    return apps
Esempio n. 10
0
def init_producer(lambda_event: dict = None) -> list:
    if lambda_event and "apps_to_parse" in lambda_event:
        apps = lambda_event["apps_to_parse"]
    else:
        apps = getenv("apps_to_parse", "[]")
        apps = json.loads(apps)

    if sr_plugins.exists("test_params_validator"):
        validator = sr_plugins.load("test_params_validator")
        validator.main(apps=apps)

    return apps
Esempio n. 11
0
def lambda_handler(event, context):
    try:
        mytime, lambda_name, env_vars = init_lambda(context)

        apps = init_producer(event)

        print(f'apps_to_parse: {apps}, size: {len(apps)}')

        stage = env_vars['stage']
        parsed_data_bucket = env_vars['parsed_data_bucket']

        mytime = mytime.set_seconds_to_zero()

        access_logs_bucket = sr_config['access_logs_bucket']

        producer = sr_plugins.load('producer')
        ddb_items, identifier = producer.main(
            mytime=mytime,
            bucket_w_logs=access_logs_bucket,
            apps=apps,
            parsed_data_bucket=parsed_data_bucket)

        metric = {
            'name': 'parsed_timestamp',
            'stage': stage,
            'lambda_name': lambda_name,
            'app': 'all',
            'identifier': identifier,
            'mytime': mytime,
            'val': mytime.epoch
        }
        emit_metrics(metric)

    except Exception as e:
        trace = traceback.format_exc()
        raise Exception(trace)

    return identifier
Esempio n. 12
0
def emit_metrics(metric: dict):
    """ Generate dicts to pass to metric emitter plugin """

    if sr_plugins.exists("metrics"):
        metric_emitter = sr_plugins.load("metrics")
        metric_emitter.main(metric)
Esempio n. 13
0
def lambda_handler(event, context):
    """
    Example event passed from orchestrator-past Lambda
        consumer_event = {
        'app': app.name,
        'env_to_test': app.env_to_test,
        'cur_timestamp': app.cur_timestamp,
        'rate': app.rate,
        'parent_lambda': lambda_name,
        'child_lambda': consumer_master_past_lambda_name,
        'headers': headers,
    }
    """
    try:
        mytime, lambda_name, env_vars = init_lambda(context)
        stage = env_vars["stage"]

        app, identifier, cur_timestamp, rate, headers, filters, base_url, baseline = init_consumer_master(
            event)

        parsed_data_bucket = env_vars["parsed_data_bucket"]

        replay_mode = sr_plugins.load("replay_mode")
        kwargs = {
            "lambda_start_time": mytime,
            "app_name": app,
            "app_cur_timestamp": cur_timestamp,
        }
        s3_parsed_data_key = replay_mode.main(**kwargs)

        print(f"s3://{parsed_data_bucket}/{s3_parsed_data_key}")

        # Fetch from S3 the URLs to send for this load test
        load = s3.fetch_from_s3(key=s3_parsed_data_key,
                                bucket=parsed_data_bucket)

        num_reqs_pre_filter = len(load)

        # Transform the URLs based on the test params and filters
        load = loader_main(load=load,
                           rate=rate,
                           baseline=baseline,
                           base_url=base_url,
                           filters=filters)

        num_reqs_after_filter = len(load)

        # Init base metric dict
        base_metric = {
            "stage": stage,
            "lambda_name": lambda_name,
            "app": app,
            "identifier": identifier,
            "mytime": mytime,
        }

        emit_metrics(base_metric, num_reqs_pre_filter, num_reqs_after_filter)

        invoke_worker_lambdas(
            load=load,
            app=app,
            identifier=identifier,
            parent_lambda=lambda_name,
            headers=headers,
        )

    except Exception as e:
        trace = traceback.format_exc()
        raise Exception(trace)

    return app
Esempio n. 14
0
def loader_main(
    *, load: list, rate: float, baseline: int, base_url: str, filters: dict
) -> list:
    """ Given a target rate (an integer percentage value) and a list of URIs (load)
        Transform them to a list of URLs
        Optional filters can filter out certain requests based on its attributes
    """
    num_original_reqs = len(load)

    # Given a rate and original load size, calculate the target load size
    num_targeted_reqs = _calculate_target_load(
        num_original_reqs, rate=rate, baseline=baseline
    )

    # Increase of decrease the load by the calculated target load size
    if baseline:
        print(f"rate: target={num_targeted_reqs} / baseline={baseline} = {rate}%")
    else:
        print(f"rate: target={num_targeted_reqs} / orig={num_original_reqs} = {rate}%")

    try:
        # If apply_filter key exists, set to True and there is a filter plugin
        # Filter out certain URLs according to the test params
        if (
            "apply_filter" in filters
            and filters["apply_filter"]
            and sr_plugins.exists("load_filter")
        ):
            filter_plugin = sr_plugins.load("load_filter")
            load = filter_plugin.main(load=load, filters=filters)

    except Exception as e:
        print(type(e), e)
        print("# load:", len(load))
        print("filters:", filters)
        raise Exception(f"Error applying filters in filter_load(): {type(e)}, {e}")

    if len(load) == 0:
        return load

    print(f"load_after_filter={len(load)}")
    if num_targeted_reqs > len(load):
        ratio = math.ceil(num_targeted_reqs / len(load))
        load = load * ratio
        load = _loader(load, num_targeted_reqs, num_original_reqs)
    else:
        load = _loader(load, num_targeted_reqs, num_original_reqs)
    print(f"load_after_loader={len(load)}")

    # Pass in URIs to plugin to transform into URLs
    if sr_plugins.exists("loader_middleware"):
        midware = sr_plugins.load("loader_middleware")
        midware_input = {
            "load": load,
            "rate": rate,
            "base_url": base_url,
            "filters": filters,
            "baseline": baseline,
        }
        load = midware.main(**midware_input)

    print(f"load_after_transform={len(load)}")
    return load
Esempio n. 15
0
def lambda_handler(event, context):
    try:
        """
        First gather the necessary test params, init App objects, filters and compute the current step
        After,
        then send (app, env_to_test, cur_timestamp, rate) to consumer-master
        consumer-master will then fetch data set (set of URLs) from S3, then pass it to multiple consumer-workers
        each consumer-worker will then send out requests to test environment (each worker handles up to 100 requests)
        """

        mytime, lambda_name, env_vars = lambda_init.init_lambda(context)
        stage = env_vars["stage"]
        consumer_master_past_lambda = env_vars["consumer_master_past_name"]

        apps, test_params = init_apps_from_test_params(event)
        filters = init_filters()

        step = generate_step_from_mytime(mytime)

        print("step:", step)
        for app in apps:
            advance_app_timestamp(app, step)

        consumer_event = {}

        # Invoke the consumer-master lambda for each app in apps
        for app in apps:
            headers = Headers(
                shadowreader_type="past", stage=stage, app=app, step=step
            ).headers

            consumer_event = {
                "app": app.name,
                "identifier": app.identifier,
                "base_url": app.base_url,
                "cur_timestamp": app.cur_timestamp,
                "rate": app.rate,
                "baseline": app.baseline,
                "parent_lambda": lambda_name,
                "child_lambda": consumer_master_past_lambda,
                "headers": headers,
                "filters": filters,
            }
            invoke_func(consumer_event, func=consumer_master_past_lambda)

        if apps and consumer_event:
            print_to_logs(consumer_event, apps)

        # Collect metrics and put metrics into CW
        metrics = []
        for app in apps:
            # This is the timestamp (in epoch time) that is being replayed
            # by the load test.
            metric = {
                "name": "replayed_timestamp",
                "stage": stage,
                "lambda_name": lambda_name,
                "app": app.name,
                "identifier": app.identifier,
                "mytime": mytime,
                "val": app.cur_timestamp,
            }
            metrics.append(metric)

        if sr_plugins.exists("metrics"):
            metric_emitter = sr_plugins.load("metrics")
            for metric in metrics:
                metric_emitter.main(metric)

        cur_params = {"apps": apps, "filters": filters, "test_params": test_params}

        if sr_plugins.exists("test_params_emitter"):
            params_emitter = sr_plugins.load("test_params_emitter")
            params_emitter.main(
                cur_params,
                lambda_name,
                mytime,
                stage,
                env_vars,
                sr_config,
                sr_plugins._sr_plugins,
            )

    except Exception as e:
        trace = traceback.format_exc()
        raise Exception(trace)

    return json.dumps(cur_params, default=str), json.dumps(consumer_event, default=str)
Esempio n. 16
0
def _validate_params(**kwargs):
    if sr_plugins.exists("test_params_validator"):
        validator = sr_plugins.load("test_params_validator")
        validator.main(**kwargs)