Esempio n. 1
0
def test_scale_app_in_group_then_group():
    """ Tests the scaling of an app in the group, then the group
    """
    with marathon_on_marathon():
        client = marathon.create_client()
        try:
            client.remove_group('/test-group', True)
            shakedown.deployment_wait()
        except Exception as e:
            pass

        client.create_group(group())
        shakedown.deployment_wait()

        group_apps = client.get_group('/test-group/sleep')
        apps = group_apps['apps']
        assert len(apps) == 2
        tasks1 = client.get_tasks('/test-group/sleep/goodnight')
        tasks2 = client.get_tasks('/test-group/sleep/goodnight2')
        assert len(tasks1) == 1
        assert len(tasks2) == 1

        # scaling just an app
        client.scale_app('/test-group/sleep/goodnight', 2)
        shakedown.deployment_wait()
        tasks1 = client.get_tasks('/test-group/sleep/goodnight')
        tasks2 = client.get_tasks('/test-group/sleep/goodnight2')
        assert len(tasks1) == 2
        assert len(tasks2) == 1

        # scaling the group after 1 app in the group was scaled.
        client.scale_group('/test-group/sleep', 2)
        shakedown.deployment_wait()
        time.sleep(1)
        tasks1 = client.get_tasks('/test-group/sleep/goodnight')
        tasks2 = client.get_tasks('/test-group/sleep/goodnight2')
        assert len(tasks1) == 4
        assert len(tasks2) == 2
def test_pinned_task_does_not_scale_to_unpinned_host():
    """ Tests when a task lands on a pinned node (and barely fits) when asked to
        scale past the resources of that node will not scale.
    """
    app_def = app('pinned')
    host = ip_other_than_mom()
    pin_to_host(app_def, host)
    # only 1 can fit on the node
    app_def['cpus'] = 3.5
    with marathon_on_marathon():
        client = marathon.create_client()
        client.add_app(app_def)
        shakedown.deployment_wait()
        tasks = client.get_tasks('/pinned')
        client.scale_app('pinned', 2)
        # typical deployments are sub 3 secs
        time.sleep(5)
        deployments = client.get_deployments()
        tasks = client.get_tasks('/pinned')

        # still deploying
        assert len(deployments) == 1
        assert len(tasks) == 1
def test_pinned_task_scales_on_host_only():
    """ Tests that scaling a pinned app scales only on the pinned node.
    """
    app_def = app('pinned')
    host = ip_other_than_mom()
    pin_to_host(app_def, host)

    with marathon_on_marathon():
        client = marathon.create_client()
        client.add_app(app_def)
        shakedown.deployment_wait()

        tasks = client.get_tasks('/pinned')
        assert len(tasks) == 1
        assert tasks[0]['host'] == host

        client.scale_app('pinned', 10)
        shakedown.deployment_wait()

        tasks = client.get_tasks('/pinned')
        assert len(tasks) == 10
        for task in tasks:
            assert task['host'] == host
def test_bad_uri():
    """ Tests marathon's response to launching a task with a bad url (a url that isn't fetchable)
    """
    app_id = uuid.uuid4().hex
    app_def = app(app_id)
    fetch = [{
      "uri": "http://mesosphere.io/missing-artifact"
    }]

    app_def['fetch'] = fetch

    with marathon_on_marathon():
        client = marathon.create_client()
        client.add_app(app_def)

        @retrying.retry(wait_fixed=1000, stop_max_delay=10000)
        def check_failure_message():
            appl = client.get_app(app_id)
            message = appl['lastTaskFailure']['message']
            error = "Failed to fetch all URIs for container"
            assert error in message

        check_failure_message()
def test_docker_dns_mapping():
    """ Tests that a running docker task is accessible from DNS.
    """
    app_id = uuid.uuid4().hex
    with marathon_on_marathon():
        client = marathon.create_client()
        app_json = app_docker(app_id)
        client.add_app(app_json)
        shakedown.deployment_wait()

        tasks = client.get_tasks(app_id)
        host = tasks[0]['host']

        bad_cmd = 'ping -c 1 docker-test.marathon-user.mesos-bad'
        status, output = shakedown.run_command_on_master(bad_cmd)
        assert not status

        @retrying.retry(stop_max_delay=10000)
        def check_dns():
            cmd = 'ping -c 1 {}.marathon-user.mesos'.format(app_id)
            wait_for_dns('{}.marathon-user.mesos'.format(app_id))
            status, output = shakedown.run_command_on_master(cmd)
            assert status
Esempio n. 6
0
def test_incremental_scale():
    """
    Scale instances of app in steps until the first error, e.g. a timeout, is
    reached.
    """
    ensure_mom_version('1.4.0-RC7')

    cluster_info()
    print(available_resources())

    app_def = {
      "id": "cap-app",
      "instances":  1,
      "cmd": "for (( ; ; )); do sleep 100000000; done",
      "cpus": 0.001,
      "mem": 8,
      "disk": 0,
      "backoffFactor": 1.0,
      "backoffSeconds": 0,
    }

    with marathon_on_marathon():
        # shakedown.delete_app_wait('/cap-app')

        client = marathon.create_client()
        client.add_app(app_def)

        for new_size in incremental_steps(linear_step_function(step_size=1000)):
            shakedown.echo("Scaling to {}".format(new_size))
            shakedown.deployment_wait(
                app_id='cap-app', timeout=timedelta(minutes=10).total_seconds())

            # Scale to 200
            client.scale_app('/cap-app', new_size)
            shakedown.deployment_wait(
                app_id='cap-app', timeout=timedelta(minutes=10).total_seconds())
            shakedown.echo("done.")
Esempio n. 7
0
def test_launch_container_with_persistent_volume():
    """ Tests launching a task with PV.  It will write to a file in the PV.
        The app is killed and restarted and we can still read from the PV.
    """
    with marathon_on_marathon():
        app_def = persistent_volume_app()
        app_id = app_def['id']
        client = marathon.create_client()
        client.add_app(app_def)
        shakedown.deployment_wait()

        tasks = client.get_tasks(app_id)
        assert len(tasks) == 1

        port = tasks[0]['ports'][0]
        host = tasks[0]['host']
        cmd = "curl {}:{}/data/foo".format(host, port)
        run, data = shakedown.run_command_on_master(cmd)

        assert run, "{} did not succeed".format(cmd)
        assert data == 'hello\n', "'{}' was not equal to hello\\n".format(data)

        client.restart_app(app_id)
        shakedown.deployment_wait()

        tasks = client.get_tasks(app_id)
        assert len(tasks) == 1

        port = tasks[0]['ports'][0]
        host = tasks[0]['host']
        cmd = "curl {}:{}/data/foo".format(host, port)
        run, data = shakedown.run_command_on_master(cmd)

        assert run, "{} did not succeed".format(cmd)
        assert data == 'hello\nhello\n', "'{}' was not equal to hello\\nhello\\n".format(
            data)
Esempio n. 8
0
def test_launch_mesos_mom_graceperiod():
    """ Test the 'taskKillGracePeriodSeconds' in a MoM environment.  Read more details
        on this test in `test_root_marathon.py::test_launch_mesos_root_marathon_graceperiod`
    """

    app_id = uuid.uuid4().hex
    app_def = app_mesos(app_id)
    default_graceperiod = 3
    graceperiod = 20

    app_def['taskKillGracePeriodSeconds'] = graceperiod
    fetch = [{"uri": "https://downloads.mesosphere.com/testing/test.py"}]
    app_def['fetch'] = fetch
    app_def['cmd'] = '/opt/mesosphere/bin/python test.py'

    with marathon_on_marathon():
        client = marathon.create_client()
        client.add_app(app_def)
        shakedown.deployment_wait()

        tasks = shakedown.get_service_task('marathon-user', app_id)
        assert tasks is not None

        client.scale_app(app_id, 0)
        tasks = shakedown.get_service_task('marathon-user', app_id)
        assert tasks is not None

        # task should still be here after the default_graceperiod
        time.sleep(default_graceperiod + 1)
        tasks = shakedown.get_service_task('marathon-user', app_id)
        assert tasks is not None

        # but not after the set graceperiod
        time.sleep(graceperiod)
        tasks = shakedown.get_service_task('marathon-user', app_id)
        assert tasks is None
Esempio n. 9
0
def test_mom_when_disconnected_from_zk():
    """ Launch an app from MoM.  Then knock out access to zk from the MoM.
        Verify the task is still good.
    """
    app_def = app('zk-failure')
    host = ip_other_than_mom()
    pin_to_host(app_def, host)
    with marathon_on_marathon():
        client = marathon.create_client()
        client.add_app(app_def)
        shakedown.deployment_wait()
        tasks = client.get_tasks('/zk-failure')
        original_task_id = tasks[0]['id']

        with shakedown.iptable_rules(host):
            block_port(host, 2181)
            #  time of the zk block
            time.sleep(10)

        # after access to zk is restored.
        @retrying.retry(wait_fixed=1000, stop_max_delay=3000)
        def check_task_is_back():
            tasks = client.get_tasks('/zk-failure')
            tasks[0]['id'] == original_task_id
Esempio n. 10
0
def teardown_module(module):
    with marathon_on_marathon():
        client = marathon.create_client()
        client.remove_group("/", True)
        shakedown.deployment_wait()
Esempio n. 11
0
def setup_module(module):
    common.ensure_mom()
    common.wait_for_marathon_up('marathon-user')
    common.cluster_info()
    with marathon_on_marathon():
        clear_marathon()
Esempio n. 12
0
def setup_function(function):
    shakedown.wait_for_service_endpoint('marathon-user')
    with marathon_on_marathon():
        delete_all_apps_wait()
Esempio n. 13
0
def teardown_module(module):
    with marathon_on_marathon():
        delete_all_apps_wait()