def test_incremental_apps_per_group_scale(): """ Try to reach the maximum number of apps. We start with batches of apps in a group and decay the batch size. """ cluster_info() print(available_resources()) client = marathon.create_client() batch_size_for = exponential_decay(start=500, decay=0.3) for step in itertools.count(start=0): batch_size = batch_size_for(step) shakedown.echo("Add {} apps".format(batch_size)) group_id = "/batch-{0:0>3}".format(step) app_ids = ("app-{0:0>4}".format(i) for i in range(batch_size)) app_definitions = [app_def(app_id) for app_id in app_ids] next_batch = { "apps": app_definitions, "dependencies": [], "id": group_id } client.create_group(next_batch) shakedown.deployment_wait(timeout=timedelta( minutes=15).total_seconds()) shakedown.echo("done.")
def test_incremental_group_nesting(): """ Scale depth of nested groups. Again we grow fast at the beginning and then slow the growth. """ client = marathon.create_client() batch_size_for = exponential_decay(start=5, decay=0.1) depth = 0 for step in itertools.count(start=0): batch_size = batch_size_for(step) depth += batch_size shakedown.echo("Create a group with a nesting of {}".format(depth)) group_ids = ("group-{0:0>3}".format(g) for g in range(depth)) nested_groups = '/'.join(group_ids) # Note: We always deploy into the same nested groups. app_id = '/{0}/app-1'.format(nested_groups) client.add_app(app_def(app_id)) shakedown.deployment_wait(timeout=timedelta( minutes=15).total_seconds()) shakedown.echo("done.")
def test_incremental_group_nesting(): """ Scale depth of nested groups. Again we grow fast at the beginning and then slow the growth. """ client = marathon.create_client() batch_size_for = exponential_decay(start=5, decay=0.1) depth = 0 for step in itertools.count(start=0): batch_size = batch_size_for(step) depth += batch_size shakedown.echo("Create a group with a nesting of {}".format(depth)) group_ids = ("group-{0:0>3}".format(g) for g in range(depth)) nested_groups = '/'.join(group_ids) # Note: We always deploy into the same nested groups. app_id = '/{0}/app-1'.format(nested_groups) client.add_app(app_def(app_id)) shakedown.deployment_wait( timeout=timedelta(minutes=15).total_seconds()) shakedown.echo("done.")
def test_incremental_groups_scale(): """ Scale number of groups. """ client = marathon.create_client() batch_size_for = exponential_decay(start=40, decay=0.01) total = 0 for step in itertools.count(start=0): batch_size = batch_size_for(step) total += batch_size shakedown.echo("Add {} groups totaling {}".format(batch_size, total)) group_ids = ("/group-{0:0>4}".format(step * batch_size + i) for i in range(batch_size)) app_ids = ("{}/app-1".format(g) for g in group_ids) app_definitions = [app_def(app_id) for app_id in app_ids] # There is no app id. We simply PUT /v2/apps to create groups in # batches. client.update_app('', app_definitions) shakedown.deployment_wait(timeout=timedelta( minutes=15).total_seconds()) shakedown.echo("done.")
def test_incremental_groups_scale(): """ Scale number of groups. """ client = marathon.create_client() batch_size_for = exponential_decay(start=40, decay=0.01) total = 0 for step in itertools.count(start=0): batch_size = batch_size_for(step) total += batch_size shakedown.echo("Add {} groups totaling {}".format(batch_size, total)) group_ids = ("/group-{0:0>4}".format(step * batch_size + i) for i in range(batch_size)) app_ids = ("{}/app-1".format(g) for g in group_ids) app_definitions = [app_def(app_id) for app_id in app_ids] # There is no app id. We simply PUT /v2/apps to create groups in # batches. client.update_app('', app_definitions) shakedown.deployment_wait( timeout=timedelta(minutes=15).total_seconds()) shakedown.echo("done.")
def test_incremental_apps_per_group_scale(): """ Try to reach the maximum number of apps. We start with batches of apps in a group and decay the batch size. """ client = marathon.create_client() batch_size_for = exponential_decay(start=500, decay=0.3) for step in itertools.count(start=0): batch_size = batch_size_for(step) shakedown.echo("Add {} apps".format(batch_size)) group_id = "/batch-{0:0>3}".format(step) app_ids = ("app-{0:0>4}".format(i) for i in range(batch_size)) app_definitions = [app_def(app_id) for app_id in app_ids] next_batch = { "apps": app_definitions, "dependencies": [], "id": group_id } client.create_group(next_batch) shakedown.deployment_wait( timeout=timedelta(minutes=15).total_seconds()) shakedown.echo("done.")
def test_incremental_scale(): """ Scale instances of app in steps until the first error, e.g. a timeout, is reached. """ client = marathon.create_client() client.add_app(app_def("cap-app")) for new_size in incremental_steps(linear_step_function(step_size=1000)): shakedown.echo("Scaling to {}".format(new_size)) shakedown.deployment_wait( app_id='cap-app', timeout=timedelta(minutes=10).total_seconds()) client.scale_app('/cap-app', new_size) shakedown.deployment_wait( app_id='cap-app', timeout=timedelta(minutes=10).total_seconds()) shakedown.echo("done.")
def test_incremental_app_scale(): """ Scale number of app in steps until the first error, e.g. a timeout, is reached. The apps are created in root group. """ client = marathon.create_client() client.remove_group('/') for step in itertools.count(start=1): shakedown.echo("Add new apps") app_id = "app-{0:0>4}".format(step) client.add_app(app_def(app_id)) shakedown.deployment_wait(timeout=timedelta( minutes=15).total_seconds()) shakedown.echo("done.")
def test_incremental_app_scale(): """ Scale number of app in steps until the first error, e.g. a timeout, is reached. The apps are created in root group. """ client = marathon.create_client() client.remove_group('/') for step in itertools.count(start=1): shakedown.echo("Add new apps") app_id = "app-{0:0>4}".format(step) client.add_app(app_def(app_id)) shakedown.deployment_wait( timeout=timedelta(minutes=15).total_seconds()) shakedown.echo("done.")
def test_incremental_scale(): """ Scale instances of app in steps until the first error, e.g. a timeout, is reached. """ ensure_mom_version('1.4.0-RC7') cluster_info() print(available_resources()) app_def = { "id": "cap-app", "instances": 1, "cmd": "for (( ; ; )); do sleep 100000000; done", "cpus": 0.001, "mem": 8, "disk": 0, "backoffFactor": 1.0, "backoffSeconds": 0, } with marathon_on_marathon(): # shakedown.delete_app_wait('/cap-app') client = marathon.create_client() client.add_app(app_def) for new_size in incremental_steps( linear_step_function(step_size=1000)): shakedown.echo("Scaling to {}".format(new_size)) shakedown.deployment_wait( app_id='cap-app', timeout=timedelta(minutes=10).total_seconds()) # Scale to 200 client.scale_app('/cap-app', new_size) shakedown.deployment_wait( app_id='cap-app', timeout=timedelta(minutes=10).total_seconds()) shakedown.echo("done.")
def test_incremental_scale(): """ Scale instances of app in steps until the first error, e.g. a timeout, is reached. """ ensure_mom_version('1.4.0-RC7') cluster_info() print(available_resources()) app_def = { "id": "cap-app", "instances": 1, "cmd": "for (( ; ; )); do sleep 100000000; done", "cpus": 0.001, "mem": 8, "disk": 0, "backoffFactor": 1.0, "backoffSeconds": 0, } with marathon_on_marathon(): # shakedown.delete_app_wait('/cap-app') client = marathon.create_client() client.add_app(app_def) for new_size in incremental_steps(linear_step_function(step_size=1000)): shakedown.echo("Scaling to {}".format(new_size)) shakedown.deployment_wait( app_id='cap-app', timeout=timedelta(minutes=10).total_seconds()) # Scale to 200 client.scale_app('/cap-app', new_size) shakedown.deployment_wait( app_id='cap-app', timeout=timedelta(minutes=10).total_seconds()) shakedown.echo("done.")