Example #1
0
def vip_apps(cluster):
    vip1 = '6.6.6.1:6661'
    test_app1, _ = get_test_app()
    test_app1['portDefinitions'][0]['labels'] = {'VIP_0': vip1}
    test_app2, _ = get_test_app()
    test_app2['portDefinitions'][0]['labels'] = {'VIP_0': 'foobarbaz:5432'}
    vip2 = 'foobarbaz.marathon.l4lb.thisdcos.directory:5432'
    with cluster.marathon.deploy_and_cleanup(test_app1):
        with cluster.marathon.deploy_and_cleanup(test_app2):
            yield ((test_app1, vip1), (test_app2, vip2))
Example #2
0
def test_if_minuteman_routes_to_vip(cluster):
    """Test if we are able to connect to a task with a vip using minuteman.
    """
    origin_app, origin_uuid = get_test_app()
    origin_app['portDefinitions'][0]['labels'] = {'VIP_0': '1.2.3.4:5000'}
    with cluster.marathon.deploy_and_cleanup(origin_app):
        proxy_app, proxy_uuid = get_test_app()
        with cluster.marathon.deploy_and_cleanup(proxy_app) as service_points:
            cmd = '/opt/mesosphere/bin/curl -s -f -m 5 http://1.2.3.4:5000/ping'
            ensure_routable(cmd, service_points)()
Example #3
0
def test_if_minuteman_routes_to_vip(cluster):
    """Test if we are able to connect to a task with a vip using minuteman.
    """
    origin_app, origin_uuid = get_test_app()
    origin_app['portDefinitions'][0]['labels'] = {'VIP_0': '1.2.3.4:5000'}
    with cluster.marathon.deploy_and_cleanup(origin_app):
        proxy_app, proxy_uuid = get_test_app()
        with cluster.marathon.deploy_and_cleanup(proxy_app) as service_points:
            cmd = '/opt/mesosphere/bin/curl -s -f -m 5 http://1.2.3.4:5000/ping'
            ensure_routable(cmd, service_points)()
Example #4
0
def vip_apps(dcos_api_session):
    vip1 = '6.6.6.1:6661'
    test_app1, _ = get_test_app(vip=vip1)
    name = 'myvipapp'
    port = 5432
    test_app2, _ = get_test_app(vip='{}:{}'.format(name, port))
    vip2 = '{}.marathon.l4lb.thisdcos.directory:{}'.format(name, port)
    with dcos_api_session.marathon.deploy_and_cleanup(test_app1):
        with dcos_api_session.marathon.deploy_and_cleanup(test_app2):
            yield ((test_app1, vip1), (test_app2, vip2))
Example #5
0
def vip_apps(dcos_api_session):
    vip1 = '6.6.6.1:6661'
    test_app1, _ = get_test_app()
    test_app1['portDefinitions'][0]['labels'] = {
        'VIP_0': vip1}
    test_app2, _ = get_test_app()
    test_app2['portDefinitions'][0]['labels'] = {
        'VIP_0': 'foobarbaz:5432'}
    vip2 = 'foobarbaz.marathon.l4lb.thisdcos.directory:5432'
    with dcos_api_session.marathon.deploy_and_cleanup(test_app1):
        with dcos_api_session.marathon.deploy_and_cleanup(test_app2):
            yield ((test_app1, vip1), (test_app2, vip2))
Example #6
0
def test_if_minuteman_routes_to_named_vip(dcos_api_session):
    """Test if we are able to connect to a task with a named vip using minuteman.
    """

    origin_app, origin_uuid = get_test_app()
    origin_app['portDefinitions'][0]['labels'] = {'VIP_0': 'foo:5000'}
    with dcos_api_session.marathon.deploy_and_cleanup(origin_app):
        proxy_app, proxy_uuid = get_test_app()
        with dcos_api_session.marathon.deploy_and_cleanup(
                proxy_app) as service_points:
            cmd = '/opt/mesosphere/bin/curl -s -f -m 5 http://foo.marathon.l4lb.thisdcos.directory:5000/ping'
            ensure_routable(cmd, service_points)()
Example #7
0
def octarine_runner(dcos_api_session, mode, uuid, uri, bind_port=None):
    log.info("Running octarine(mode={}, uuid={}, uri={}".format(mode, uuid, uri))

    octarine = "/opt/mesosphere/bin/octarine"

    bind_port_str = ""
    if bind_port is not None:
        bind_port_str = "-bindPort {}".format(bind_port)

    server_cmd = "{} -mode {} {} {}".format(octarine, mode, bind_port_str, uuid)
    log.info("Server: {}".format(server_cmd))

    proxy = ('http://127.0.0.1:$({} --client --port {})'.format(octarine, uuid))
    curl_cmd = '''"$(curl --fail --proxy {} {})"'''.format(proxy, uri)
    expected_output = '''"$(printf "{\\n    \\"pong\\": true\\n}")"'''
    check_cmd = """sh -c '[ {} = {} ]'""".format(curl_cmd, expected_output)
    log.info("Check: {}".format(check_cmd))

    app, uuid = get_test_app()
    app['requirePorts'] = True
    app['cmd'] = server_cmd
    app['healthChecks'] = [{
        "protocol": "COMMAND",
        "command": {"value": check_cmd},
        'gracePeriodSeconds': 5,
        'intervalSeconds': 10,
        'timeoutSeconds': 10,
        'maxConsecutiveFailures': 30
    }]

    with dcos_api_session.marathon.deploy_and_cleanup(app):
        pass
Example #8
0
def test_if_marathon_app_can_be_deployed_with_mesos_containerizer(cluster):
    """Marathon app deployment integration test using the Mesos Containerizer

    This test verifies that a Marathon app using the Mesos containerizer with
    a Docker image can be deployed.

    This is done by assigning an unique UUID to each app and passing it to the
    docker container as an env variable. After successfull deployment, the
    "GET /test_uuid" request is issued to the app. If the returned UUID matches
    the one assigned to test - test succeds.

    When port mapping is available (MESOS-4777), this test should be updated to
    reflect that.
    """
    app, test_uuid = get_test_app()
    app['container'] = {
        'type': 'MESOS',
        'docker': {
            # TODO(cmaloney): Switch to an alpine image with glibc inside.
            'image': 'debian:jessie'
        },
        'volumes': [{
            'containerPath': '/opt/mesosphere',
            'hostPath': '/opt/mesosphere',
            'mode': 'RO'
        }]
    }
    cluster.marathon.deploy_test_app_and_check(app, test_uuid)
Example #9
0
def test_if_marathon_app_can_be_deployed_with_mesos_containerizer(cluster):
    """Marathon app deployment integration test using the Mesos Containerizer

    This test verifies that a Marathon app using the Mesos containerizer with
    a Docker image can be deployed.

    This is done by assigning an unique UUID to each app and passing it to the
    docker container as an env variable. After successfull deployment, the
    "GET /test_uuid" request is issued to the app. If the returned UUID matches
    the one assigned to test - test succeds.

    When port mapping is available (MESOS-4777), this test should be updated to
    reflect that.
    """
    app, test_uuid = get_test_app()
    app['container'] = {
        'type': 'MESOS',
        'docker': {
            # TODO(cmaloney): Switch to an alpine image with glibc inside.
            'image': 'debian:jessie'
        },
        'volumes': [{
            'containerPath': '/opt/mesosphere',
            'hostPath': '/opt/mesosphere',
            'mode': 'RO'
        }]
    }
    cluster.marathon.deploy_test_app_and_check(app, test_uuid)
Example #10
0
def test_if_ucr_app_can_be_deployed_with_image_whiteout(dcos_api_session):
    """Marathon app deployment integration test using the Mesos Containerizer.

    This test verifies that a marathon ucr app can execute a docker image
    with whiteout files. Whiteouts are files with a special meaning for
    the layered filesystem. For more details, please see:
    https://github.com/docker/docker/blob/master/pkg/archive/whiteouts.go

    Please note that the image 'mesosphere/whiteout:test' is built on top
    of 'alpine' which is used for UCR whiteout support testing. See:
    https://hub.docker.com/r/mesosphere/whiteout/
    """
    app, test_uuid = get_test_app()
    app['container'] = {
        'type': 'MESOS',
        'docker': {
            'image': 'mesosphere/whiteout:test'
        }
    }
    app['cmd'] = 'while true; do sleep 1; done'
    app['healthChecks'] = [{
        'protocol': 'COMMAND',
        'command': {'value': 'test ! -f /dir1/file1 && test ! -f /dir1/dir2/file2 && test -f /dir1/dir2/file3'},
        'gracePeriodSeconds': 5,
        'intervalSeconds': 10,
        'timeoutSeconds': 10,
        'maxConsecutiveFailures': 3,
    }]
    with dcos_api_session.marathon.deploy_and_cleanup(app):
        # Trivial app if it deploys, there is nothing else to check
        pass
Example #11
0
def test_if_ucr_app_can_be_deployed_with_image_digest(dcos_api_session):
    """Marathon app deployment integration test using the Mesos Containerizer.

    This test verifies that a marathon ucr app can execute a docker image
    by digest.
    """
    app, test_uuid = get_test_app()
    app['container'] = {
        'type': 'MESOS',
        'docker': {
            'image': 'library/alpine@sha256:9f08005dff552038f0ad2f46b8e65ff3d25641747d3912e3ea8da6785046561a'
        }
    }
    app['cmd'] = 'while true; do sleep 1; done'
    app['healthChecks'] = [{
        'protocol': 'COMMAND',
        'command': {'value': 'test -d $MESOS_SANDBOX'},
        'gracePeriodSeconds': 5,
        'intervalSeconds': 10,
        'timeoutSeconds': 10,
        'maxConsecutiveFailures': 3,
    }]
    with dcos_api_session.marathon.deploy_and_cleanup(app):
        # Trivial app if it deploys, there is nothing else to check
        pass
Example #12
0
def test_if_ucr_app_runs_in_new_pid_namespace(dcos_api_session):
    # We run a marathon app instead of a metronome job because metronome
    # doesn't support running docker images with the UCR. We need this
    # functionality in order to test that the pid namespace isolator
    # is functioning correctly.
    app, test_uuid = get_test_app(container_type=Container.MESOS)

    ps_output_file = 'ps_output'
    app['cmd'] = 'ps ax -o pid= > {}; sleep 1000'.format(ps_output_file)

    with dcos_api_session.marathon.deploy_and_cleanup(app, check_health=False):
        marathon_framework_id = dcos_api_session.marathon.get(
            '/v2/info').json()['frameworkId']
        app_task = dcos_api_session.marathon.get('/v2/apps/{}/tasks'.format(
            app['id'])).json()['tasks'][0]

        # There is a short delay between the `app_task` starting and it writing
        # its output to the `pd_output_file`. Because of this, we wait up to 10
        # seconds for this file to appear before throwing an exception.
        @retrying.retry(wait_fixed=1000, stop_max_delay=10000)
        def get_ps_output():
            return dcos_api_session.mesos_sandbox_file(app_task['slaveId'],
                                                       marathon_framework_id,
                                                       app_task['id'],
                                                       ps_output_file)

        assert len(
            get_ps_output().split()
        ) <= 4, 'UCR app has more than 4 processes running in its pid namespace'
Example #13
0
def test_service_discovery_mesos_overlay(dcos_api_session):
    app_definition, test_uuid = get_test_app(
        container_type=Container.MESOS,
        host_port=9080,
        healthcheck_protocol=Healthcheck.MESOS_HTTP,
        network=Network.USER)

    assert_service_discovery(dcos_api_session, app_definition, [DNSOverlay])
Example #14
0
def test_if_ucr_app_can_be_deployed(dcos_api_session, healthcheck):
    """Marathon app inside ucr deployment integration test.

    Verifies that a marathon docker app inside of a ucr container can be
    deployed and accessed as expected.
    """
    dcos_api_session.marathon.deploy_test_app_and_check(*get_test_app(
        container_type=Container.MESOS, healthcheck_protocol=healthcheck))
Example #15
0
def vip_app(container: Container, network: Network, host: str, vip: str):
    # user_net_port is only actually used for USER network because this cannot be assigned
    # by marathon
    if network in [Network.HOST, Network.BRIDGE]:
        # both of these cases will rely on marathon to assign ports
        return get_test_app(network=network,
                            host_constraint=host,
                            vip=vip,
                            container_type=container)
    elif network == Network.USER:
        return get_test_app(network=network,
                            host_port=unused_port(Network.USER),
                            host_constraint=host,
                            vip=vip,
                            container_type=container)
    else:
        raise AssertionError('Unexpected network: {}'.format(network.value))
Example #16
0
def test_octarine(dcos_api_session, timeout=30):
    # This app binds to port 80. This is only required by the http (not srv)
    # transparent mode test. In transparent mode, we use ".mydcos.directory"
    # to go to localhost, the port attached there is only used to
    # determine which port to send traffic to on localhost. When it
    # reaches the proxy, the port is not used, and a request is made
    # to port 80.

    app, uuid = get_test_app()
    app['acceptedResourceRoles'] = ["slave_public"]
    app['portDefinitions'][0]["port"] = 80
    app['requirePorts'] = True

    with dcos_api_session.marathon.deploy_and_cleanup(app) as service_points:
        port_number = service_points[0].port
        # It didn't actually grab port 80 when requirePorts was unset
        assert port_number == app['portDefinitions'][0]["port"]

        app_name = app["id"].strip("/")
        port_name = app['portDefinitions'][0]["name"]
        port_protocol = app['portDefinitions'][0]["protocol"]

        srv = "_{}._{}._{}.marathon.mesos".format(port_name, app_name,
                                                  port_protocol)
        addr = "{}.marathon.mesos".format(app_name)
        transparent_suffix = ".mydcos.directory"

        standard_mode = "standard"
        transparent_mode = "transparent"

        t_addr_bind = 2508
        t_srv_bind = 2509

        standard_addr = "{}:{}/ping".format(addr, port_number)
        standard_srv = "{}/ping".format(srv)
        transparent_addr = "{}{}:{}/ping".format(addr, transparent_suffix,
                                                 t_addr_bind)
        transparent_srv = "{}{}:{}/ping".format(srv, transparent_suffix,
                                                t_srv_bind)

        # The uuids are different between runs so that they don't have a
        # chance of colliding. They shouldn't anyways, but just to be safe.
        octarine_runner(dcos_api_session, standard_mode, uuid + "1",
                        standard_addr)
        octarine_runner(dcos_api_session, standard_mode, uuid + "2",
                        standard_srv)
        octarine_runner(dcos_api_session,
                        transparent_mode,
                        uuid + "3",
                        transparent_addr,
                        bind_port=t_addr_bind)
        octarine_runner(dcos_api_session,
                        transparent_mode,
                        uuid + "4",
                        transparent_srv,
                        bind_port=t_srv_bind)
Example #17
0
def test_if_docker_app_can_be_deployed(dcos_api_session):
    """Marathon app inside docker deployment integration test.

    Verifies that a marathon app inside of a docker daemon container can be
    deployed and accessed as expected.
    """
    dcos_api_session.marathon.deploy_test_app_and_check(
        *get_test_app(network=Network.BRIDGE,
                      container_type=Container.DOCKER,
                      container_port=9080))
Example #18
0
def test_files_api(dcos_api_session):
    app, test_uuid = get_test_app()

    with dcos_api_session.marathon.deploy_and_cleanup(app):
        marathon_framework_id = dcos_api_session.marathon.get('/v2/info').json()['frameworkId']
        app_task = dcos_api_session.marathon.get('/v2/apps/{}/tasks'.format(app['id'])).json()['tasks'][0]

        for required_sandbox_file in ('stdout', 'stderr'):
            content = dcos_api_session.mesos_sandbox_file(
                app_task['slaveId'], marathon_framework_id, app_task['id'], required_sandbox_file)

            assert content, 'File {} should not be empty'.format(required_sandbox_file)
Example #19
0
def mesos_vip_app(num, network, host, vip, ucr=False):
    app = None
    uuid = None
    port = backend_port_st + num
    if ucr is False:
        app, uuid = get_test_app()
    else:
        app, uuid = get_test_app_in_ucr()
    app['id'] = '/viptest/' + app['id']
    app['mem'] = 16
    app['cpu'] = 0.01
    # define a health check that works with all the network options
    app['healthChecks'] = [{
        'protocol': 'MESOS_HTTP',
        'path': '/ping',
        'gracePeriodSeconds': 5,
        'intervalSeconds': 10,
        'timeoutSeconds': 10,
        'maxConsecutiveFailures': 3,
    }]
    assert network != 'BRIDGE'
    if network == 'HOST':
        app['cmd'] = '/opt/mesosphere/bin/dcos-shell python '\
                     '/opt/mesosphere/active/dcos-integration-test/util/python_test_server.py $PORT0'
        app['portDefinitions'] = [{
            'protocol': 'tcp',
            'port': 0
        }]
        if vip is not None:
            app['portDefinitions'][0]['labels'] = {'VIP_0': vip}
        app['healthChecks'][0]['portIndex'] = 0
    if network == 'USER':
        app['ipAddress'] = {
            'discovery': {
                'ports': [{
                    'protocol': 'tcp',
                    'name': 'test',
                    'number': port,
                }]
            }
        }
        app['cmd'] = '/opt/mesosphere/bin/dcos-shell python '\
                     '/opt/mesosphere/active/dcos-integration-test/util/python_test_server.py {}'.format(port)
        app['ipAddress']['networkName'] = 'dcos'
        if vip is not None:
            app['ipAddress']['discovery']['ports'][0]['labels'] = {'VIP_0': vip}
        app['healthChecks'][0]['port'] = port
        app['portDefinitions'] = []
    app['constraints'] = [['hostname', 'CLUSTER', host]]
    log.info('app: {}'.format(json.dumps(app)))
    return app, uuid
Example #20
0
def mesos_vip_app(num, network, host, vip, ucr=False):
    app = None
    uuid = None
    port = backend_port_st + num
    if ucr is False:
        app, uuid = get_test_app()
    else:
        app, uuid = get_test_app_in_ucr()
    app['id'] = '/viptest/' + app['id']
    app['mem'] = 16
    app['cpu'] = 0.01
    # define a health check that works with all the network options
    app['healthChecks'] = [{
        'protocol': 'MESOS_HTTP',
        'path': '/ping',
        'gracePeriodSeconds': 5,
        'intervalSeconds': 10,
        'timeoutSeconds': 10,
        'maxConsecutiveFailures': 3,
    }]
    assert network != 'BRIDGE'
    if network == 'HOST':
        app['cmd'] = '/opt/mesosphere/bin/dcos-shell python '\
                     '/opt/mesosphere/active/dcos-integration-test/util/python_test_server.py $PORT0'
        app['portDefinitions'] = [{
            'protocol': 'tcp',
            'port': 0
        }]
        if vip is not None:
            app['portDefinitions'][0]['labels'] = {'VIP_0': vip}
        app['healthChecks'][0]['portIndex'] = 0
    if network == 'USER':
        app['ipAddress'] = {
            'discovery': {
                'ports': [{
                    'protocol': 'tcp',
                    'name': 'test',
                    'number': port,
                }]
            }
        }
        app['cmd'] = '/opt/mesosphere/bin/dcos-shell python '\
                     '/opt/mesosphere/active/dcos-integration-test/util/python_test_server.py {}'.format(port)
        app['ipAddress']['networkName'] = 'dcos'
        if vip is not None:
            app['ipAddress']['discovery']['ports'][0]['labels'] = {'VIP_0': vip}
        app['healthChecks'][0]['port'] = port
        app['portDefinitions'] = []
    app['constraints'] = [['hostname', 'CLUSTER', host]]
    log.info('app: {}'.format(json.dumps(app)))
    return app, uuid
Example #21
0
def test_ip_per_container_with_named_vip(cluster):
    """Test if we are able to connect to a task with ip-per-container mode using named vip
    """
    origin_app, test_uuid = get_test_app_in_docker(ip_per_container=True)
    origin_app['container']['docker']['portMappings'][0]['labels'] = {'VIP_0': 'foo:6000'}
    origin_app['healthChecks'][0]['port'] = origin_app['container']['docker']['portMappings'][0]['containerPort']
    del origin_app['container']['docker']['portMappings'][0]['hostPort']
    del origin_app['healthChecks'][0]['portIndex']

    with cluster.marathon.deploy_and_cleanup(origin_app):
        proxy_app, proxy_uuid = get_test_app()
        with cluster.marathon.deploy_and_cleanup(proxy_app) as service_points:
            cmd = '/opt/mesosphere/bin/curl -s -f -m 5 http://foo.marathon.l4lb.thisdcos.directory:6000/ping'
            ensure_routable(cmd, service_points)()
Example #22
0
def test_ip_per_container_with_named_vip(cluster):
    """Test if we are able to connect to a task with ip-per-container mode using named vip
    """
    origin_app, test_uuid = get_test_app_in_docker(ip_per_container=True)
    origin_app['container']['docker']['portMappings'][0]['labels'] = {'VIP_0': 'foo:6000'}
    origin_app['healthChecks'][0]['port'] = origin_app['container']['docker']['portMappings'][0]['containerPort']
    del origin_app['container']['docker']['portMappings'][0]['hostPort']
    del origin_app['healthChecks'][0]['portIndex']

    with cluster.marathon.deploy_and_cleanup(origin_app):
        proxy_app, proxy_uuid = get_test_app()
        with cluster.marathon.deploy_and_cleanup(proxy_app) as service_points:
            cmd = '/opt/mesosphere/bin/curl -s -f -m 5 http://foo.marathon.l4lb.thisdcos.directory:6000/ping'
            ensure_routable(cmd, service_points)()
Example #23
0
def test_l4lb(dcos_api_session):
    """Test l4lb is load balancing between all the backends
       * create 5 apps using the same VIP
       * get uuid from the VIP in parallel from many threads
       * verify that 5 uuids have been returned
       * only testing if all 5 are hit at least once
    """
    numapps = 5
    numthreads = numapps * 4
    apps = []
    rvs = deque()
    with contextlib.ExitStack() as stack:
        for _ in range(numapps):
            origin_app, origin_uuid = get_test_app()
            # same vip for all the apps
            origin_app['portDefinitions'][0]['labels'] = {
                'VIP_0': '/l4lbtest:5000'
            }
            apps.append(origin_app)
            sp = stack.enter_context(
                dcos_api_session.marathon.deploy_and_cleanup(origin_app))
            # make sure that the service point responds
            geturl('http://{}:{}/ping'.format(sp[0].host, sp[0].port))
            # make sure that the VIP is responding too
            geturl(
                'http://l4lbtest.marathon.l4lb.thisdcos.directory:5000/ping')

            # make sure L4LB is actually doing some load balancing by making
            # many requests in parallel.
        def thread_request():
            # deque is thread safe
            rvs.append(
                geturl(
                    'http://l4lbtest.marathon.l4lb.thisdcos.directory:5000/test_uuid'
                ))

        threads = [
            threading.Thread(target=thread_request)
            for i in range(0, numthreads)
        ]
        for t in threads:
            t.start()
        for t in threads:
            t.join()

    expected_uuids = [a['id'].split('-')[2] for a in apps]
    received_uuids = [r['test_uuid'] for r in rvs if r is not None]
    assert len(set(expected_uuids)) == numapps
    assert len(set(received_uuids)) == numapps
    assert set(expected_uuids) == set(received_uuids)
Example #24
0
def test_ip_per_container(dcos_api_session):
    '''Test if we are able to connect to a task with ip-per-container mode
    '''
    # Launch the test_server in ip-per-container mode (user network)
    app_definition, test_uuid = get_test_app(container_type=Container.DOCKER, network=Network.USER, host_port=9080)

    assert len(dcos_api_session.slaves) >= 2, 'IP Per Container tests require 2 private agents to work'

    app_definition['instances'] = 2
    app_definition['constraints'] = [['hostname', 'UNIQUE']]

    with dcos_api_session.marathon.deploy_and_cleanup(app_definition, check_health=True) as service_points:
        app_port = app_definition['container']['docker']['portMappings'][0]['containerPort']
        cmd = '/opt/mesosphere/bin/curl -s -f -m 5 http://{}:{}/ping'.format(service_points[1].ip, app_port)
        ensure_routable(cmd, service_points[0].host, service_points[0].port)
Example #25
0
def test_if_marathon_app_can_be_deployed(cluster):
    """Marathon app deployment integration test

    This test verifies that marathon app can be deployed, and that service points
    returned by Marathon indeed point to the app that was deployed.

    The application being deployed is a simple http server written in python.
    Please test_server.py for more details.

    This is done by assigning an unique UUID to each app and passing it to the
    docker container as an env variable. After successfull deployment, the
    "GET /test_uuid" request is issued to the app. If the returned UUID matches
    the one assigned to test - test succeds.
    """
    cluster.marathon.deploy_test_app_and_check(*get_test_app())
Example #26
0
def test_if_marathon_app_can_be_deployed(cluster):
    """Marathon app deployment integration test

    This test verifies that marathon app can be deployed, and that service points
    returned by Marathon indeed point to the app that was deployed.

    The application being deployed is a simple http server written in python.
    Please test_server.py for more details.

    This is done by assigning an unique UUID to each app and passing it to the
    docker container as an env variable. After successfull deployment, the
    "GET /test_uuid" request is issued to the app. If the returned UUID matches
    the one assigned to test - test succeds.
    """
    cluster.marathon.deploy_test_app_and_check(*get_test_app())
Example #27
0
def test_files_api(dcos_api_session):
    app, test_uuid = get_test_app()

    with dcos_api_session.marathon.deploy_and_cleanup(app):
        marathon_framework_id = dcos_api_session.marathon.get(
            '/v2/info').json()['frameworkId']
        app_task = dcos_api_session.marathon.get('/v2/apps/{}/tasks'.format(
            app['id'])).json()['tasks'][0]

        for required_sandbox_file in ('stdout', 'stderr'):
            content = dcos_api_session.mesos_sandbox_file(
                app_task['slaveId'], marathon_framework_id, app_task['id'],
                required_sandbox_file)

            assert content, 'File {} should not be empty'.format(
                required_sandbox_file)
Example #28
0
def test_if_marathon_app_can_be_deployed_with_mesos_containerizer(
        dcos_api_session):
    """Marathon app deployment integration test using the Mesos Containerizer

    This test verifies that a Marathon app using the Mesos containerizer with
    a Docker image can be deployed.

    This is done by assigning an unique UUID to each app and passing it to the
    docker container as an env variable. After successfull deployment, the
    "GET /test_uuid" request is issued to the app. If the returned UUID matches
    the one assigned to test - test succeds.

    When port mapping is available (MESOS-4777), this test should be updated to
    reflect that.
    """
    app, test_uuid = get_test_app(container_type=Container.MESOS)
    dcos_api_session.marathon.deploy_test_app_and_check(app, test_uuid)
Example #29
0
def test_octarine(dcos_api_session, timeout=30):
    # This app binds to port 80. This is only required by the http (not srv)
    # transparent mode test. In transparent mode, we use ".mydcos.directory"
    # to go to localhost, the port attached there is only used to
    # determine which port to send traffic to on localhost. When it
    # reaches the proxy, the port is not used, and a request is made
    # to port 80.

    app, uuid = get_test_app()
    app['acceptedResourceRoles'] = ["slave_public"]
    app['portDefinitions'][0]["port"] = 80
    app['requirePorts'] = True

    with dcos_api_session.marathon.deploy_and_cleanup(app) as service_points:
        port_number = service_points[0].port
        # It didn't actually grab port 80 when requirePorts was unset
        assert port_number == app['portDefinitions'][0]["port"]

        app_name = app["id"].strip("/")
        port_name = app['portDefinitions'][0]["name"]
        port_protocol = app['portDefinitions'][0]["protocol"]

        srv = "_{}._{}._{}.marathon.mesos".format(port_name, app_name, port_protocol)
        addr = "{}.marathon.mesos".format(app_name)
        transparent_suffix = ".mydcos.directory"

        standard_mode = "standard"
        transparent_mode = "transparent"

        t_addr_bind = 2508
        t_srv_bind = 2509

        standard_addr = "{}:{}/ping".format(addr, port_number)
        standard_srv = "{}/ping".format(srv)
        transparent_addr = "{}{}:{}/ping".format(addr, transparent_suffix, t_addr_bind)
        transparent_srv = "{}{}:{}/ping".format(srv, transparent_suffix, t_srv_bind)

        # The uuids are different between runs so that they don't have a
        # chance of colliding. They shouldn't anyways, but just to be safe.
        octarine_runner(dcos_api_session, standard_mode, uuid + "1", standard_addr)
        octarine_runner(dcos_api_session, standard_mode, uuid + "2", standard_srv)
        octarine_runner(dcos_api_session, transparent_mode, uuid + "3", transparent_addr, bind_port=t_addr_bind)
        octarine_runner(dcos_api_session, transparent_mode, uuid + "4", transparent_srv, bind_port=t_srv_bind)
Example #30
0
def test_l4lb(dcos_api_session):
    """Test l4lb is load balancing between all the backends
       * create 5 apps using the same VIP
       * get uuid from the VIP in parallel from many threads
       * verify that 5 uuids have been returned
       * only testing if all 5 are hit at least once
    """
    numapps = 5
    numthreads = numapps * 4
    apps = []
    rvs = deque()
    with contextlib.ExitStack() as stack:
        for _ in range(numapps):
            origin_app, origin_uuid = get_test_app()
            # same vip for all the apps
            origin_app['portDefinitions'][0]['labels'] = {'VIP_0': '/l4lbtest:5000'}
            apps.append(origin_app)
            sp = stack.enter_context(dcos_api_session.marathon.deploy_and_cleanup(origin_app))
            # make sure that the service point responds
            geturl('http://{}:{}/ping'.format(sp[0].host, sp[0].port))
            # make sure that the VIP is responding too
            geturl('http://l4lbtest.marathon.l4lb.thisdcos.directory:5000/ping')

            # make sure L4LB is actually doing some load balancing by making
            # many requests in parallel.
        def thread_request():
            # deque is thread safe
            rvs.append(geturl('http://l4lbtest.marathon.l4lb.thisdcos.directory:5000/test_uuid'))

        threads = [threading.Thread(target=thread_request) for i in range(0, numthreads)]
        for t in threads:
            t.start()
        for t in threads:
            t.join()

    expected_uuids = [a['id'].split('-')[2] for a in apps]
    received_uuids = [r['test_uuid'] for r in rvs if r is not None]
    assert len(set(expected_uuids)) == numapps
    assert len(set(received_uuids)) == numapps
    assert set(expected_uuids) == set(received_uuids)
Example #31
0
def test_if_search_is_working(dcos_api_session):
    """Test if custom set search is working.

    Verifies that a marathon app running on the dcos_api_session can resolve names using
    searching the "search" the dcos_api_session was launched with (if any). It also tests
    that absolute searches still work, and search + things that aren't
    sub-domains fails properly.

    The application being deployed is a simple http server written in python.
    Please check test_server.py for more details.
    """
    # Launch the app
    app_definition, test_uuid = get_test_app()
    with dcos_api_session.marathon.deploy_and_cleanup(
            app_definition) as service_points:
        # Get the status
        r = requests.get('http://{}:{}/dns_search'.format(
            service_points[0].host, service_points[0].port))
        if r.status_code != 200:
            msg = "Test server replied with non-200 reply: '{0} {1}. "
            msg += "Detailed explanation of the problem: {2}"
            pytest.fail(msg.format(r.status_code, r.reason, r.text))

        r_data = r.json()

        # Make sure we hit the app we expected
        assert r_data['test_uuid'] == test_uuid

        expected_error = {'error': '[Errno -2] Name or service not known'}

        # Check that result matches expectations for this dcos_api_session
        if expanded_config['dns_search']:
            assert r_data['search_hit_leader'] in dcos_api_session.masters
            assert r_data['always_hit_leader'] in dcos_api_session.masters
            assert r_data['always_miss'] == expected_error
        else:  # No dns search, search hit should miss.
            assert r_data['search_hit_leader'] == expected_error
            assert r_data['always_hit_leader'] in dcos_api_session.masters
            assert r_data['always_miss'] == expected_error
Example #32
0
def test_if_search_is_working(dcos_api_session):
    """Test if custom set search is working.

    Verifies that a marathon app running on the dcos_api_session can resolve names using
    searching the "search" the dcos_api_session was launched with (if any). It also tests
    that absolute searches still work, and search + things that aren't
    sub-domains fails properly.

    The application being deployed is a simple http server written in python.
    Please check test_server.py for more details.
    """
    # Launch the app
    app_definition, test_uuid = get_test_app()
    with dcos_api_session.marathon.deploy_and_cleanup(app_definition) as service_points:
        # Get the status
        r = requests.get('http://{}:{}/dns_search'.format(service_points[0].host,
                                                          service_points[0].port))
        if r.status_code != 200:
            msg = "Test server replied with non-200 reply: '{0} {1}. "
            msg += "Detailed explanation of the problem: {2}"
            pytest.fail(msg.format(r.status_code, r.reason, r.text))

        r_data = r.json()

        # Make sure we hit the app we expected
        assert r_data['test_uuid'] == test_uuid

        expected_error = {'error': '[Errno -2] Name or service not known'}

        # Check that result matches expectations for this dcos_api_session
        if expanded_config['dns_search']:
            assert r_data['search_hit_leader'] in dcos_api_session.masters
            assert r_data['always_hit_leader'] in dcos_api_session.masters
            assert r_data['always_miss'] == expected_error
        else:  # No dns search, search hit should miss.
            assert r_data['search_hit_leader'] == expected_error
            assert r_data['always_hit_leader'] in dcos_api_session.masters
            assert r_data['always_miss'] == expected_error
Example #33
0
def octarine_runner(dcos_api_session, mode, uuid, uri, bind_port=None):
    log.info("Running octarine(mode={}, uuid={}, uri={}".format(
        mode, uuid, uri))

    octarine = "/opt/mesosphere/bin/octarine"

    bind_port_str = ""
    if bind_port is not None:
        bind_port_str = "-bindPort {}".format(bind_port)

    server_cmd = "{} -mode {} {} {}".format(octarine, mode, bind_port_str,
                                            uuid)
    log.info("Server: {}".format(server_cmd))

    proxy = ('http://127.0.0.1:$({} --client --port {})'.format(
        octarine, uuid))
    curl_cmd = '''"$(curl --fail --proxy {} {})"'''.format(proxy, uri)
    expected_output = '''"$(printf "{\\n    \\"pong\\": true\\n}")"'''
    check_cmd = """sh -c '[ {} = {} ]'""".format(curl_cmd, expected_output)
    log.info("Check: {}".format(check_cmd))

    app, uuid = get_test_app()
    app['requirePorts'] = True
    app['cmd'] = server_cmd
    app['healthChecks'] = [{
        "protocol": "COMMAND",
        "command": {
            "value": check_cmd
        },
        'gracePeriodSeconds': 5,
        'intervalSeconds': 10,
        'timeoutSeconds': 10,
        'maxConsecutiveFailures': 30
    }]

    with dcos_api_session.marathon.deploy_and_cleanup(app):
        pass
Example #34
0
def test_if_ucr_app_runs_in_new_pid_namespace(dcos_api_session):
    # We run a marathon app instead of a metronome job because metronome
    # doesn't support running docker images with the UCR. We need this
    # functionality in order to test that the pid namespace isolator
    # is functioning correctly.
    app, test_uuid = get_test_app(container_type=Container.MESOS)

    ps_output_file = 'ps_output'
    app['cmd'] = 'ps ax -o pid= > {}; sleep 1000'.format(ps_output_file)

    with dcos_api_session.marathon.deploy_and_cleanup(app, check_health=False):
        marathon_framework_id = dcos_api_session.marathon.get(
            '/v2/info').json()['frameworkId']
        app_task = dcos_api_session.marathon.get('/v2/apps/{}/tasks'.format(
            app['id'])).json()['tasks'][0]

        content = dcos_api_session.mesos_sandbox_file(app_task['slaveId'],
                                                      marathon_framework_id,
                                                      app_task['id'],
                                                      ps_output_file)

        assert len(
            content.split()
        ) <= 4, 'UCR app has more than 4 processes running in its pid namespace'
Example #35
0
def test_if_marathon_app_can_be_debugged(dcos_api_session):
    # Launch a basic marathon app (no image), so we can debug into it!
    # Cannot use deploy_and_cleanup because we must attach to a running app/task/container.
    app, test_uuid = get_test_app()
    app_id = 'integration-test-{}'.format(test_uuid)
    with dcos_api_session.marathon.deploy_and_cleanup(app):
        # Fetch the mesos master state once the task is running
        master_ip = dcos_api_session.masters[0]
        r = dcos_api_session.get('/state', host=master_ip, port=5050)
        assert r.status_code == 200
        state = r.json()

        # Find the agent_id and container_id from master state
        container_id = None
        agent_id = None
        for framework in state['frameworks']:
            for task in framework['tasks']:
                if app_id in task['id']:
                    container_id = task['statuses'][0]['container_status']['container_id']['value']
                    agent_id = task['slave_id']
        assert container_id is not None, 'Container ID not found for instance of app_id {}'.format(app_id)
        assert agent_id is not None, 'Agent ID not found for instance of app_id {}'.format(app_id)

        # Find hostname and URL from agent_id
        agent_hostname = None
        for agent in state['slaves']:
            if agent['id'] == agent_id:
                agent_hostname = agent['hostname']
        assert agent_hostname is not None, 'Agent hostname not found for agent_id {}'.format(agent_id)
        logging.debug('Located %s with containerID %s on agent %s', app_id, container_id, agent_hostname)

        def _post_agent(url, headers, json=None, data=None, stream=False):
            r = dcos_api_session.post(
                url,
                host=agent_hostname,
                port=5051,
                headers=headers,
                json=json,
                data=data,
                stream=stream)
            assert r.status_code == 200
            return r

        # Prepare nested container id data
        nested_container_id = {
            'value': 'debug-%s' % str(uuid.uuid4()),
            'parent': {'value': '%s' % container_id}}

        # Launch debug session and attach to output stream of debug container
        output_headers = {
            'Content-Type': 'application/json',
            'Accept': 'application/recordio',
            'Message-Accept': 'application/json'
        }
        lncs_data = {
            'type': 'LAUNCH_NESTED_CONTAINER_SESSION',
            'launch_nested_container_session': {
                'command': {'value': 'cat'},
                'container_id': nested_container_id}}
        launch_output = _post_agent('/api/v1', output_headers, json=lncs_data, stream=True)

        # Attach to output stream of nested container
        attach_out_data = {
            'type': 'ATTACH_CONTAINER_OUTPUT',
            'attach_container_output': {'container_id': nested_container_id}}
        attached_output = _post_agent('/api/v1', output_headers, json=attach_out_data, stream=True)

        # Attach to input stream of debug container and stream a message
        input_headers = {
            'Content-Type': 'application/recordio',
            'Message-Content-Type': 'application/json',
            'Accept': 'application/json',
            'Transfer-Encoding': 'chunked'
        }
        _post_agent('/api/v1', input_headers, data=input_streamer(nested_container_id))

        # Verify the streamed output from the launch session
        meowed = False
        decoder = Decoder(lambda s: json.loads(s.decode("UTF-8")))
        for chunk in launch_output.iter_content():
            for r in decoder.decode(chunk):
                if r['type'] == 'DATA':
                    logging.debug('Extracted data chunk: %s', r['data'])
                    assert r['data']['data'] == 'meow', 'Output did not match expected'
                    meowed = True
        assert meowed, 'Read launch output without seeing meow.'

        meowed = False
        # Verify the message from the attached output stream
        for chunk in attached_output.iter_content():
            for r in decoder.decode(chunk):
                if r['type'] == 'DATA':
                    logging.debug('Extracted data chunk: %s', r['data'])
                    assert r['data']['data'] == 'meow', 'Output did not match expected'
                    meowed = True
        assert meowed, 'Read output stream without seeing meow.'
Example #36
0
def test_if_marathon_app_can_be_debugged(cluster):
    # Launch a basic marathon app (no image), so we can debug into it!
    # Cannot use deploy_and_cleanup because we must attach to a running app/task/container.
    app, test_uuid = get_test_app()
    app_id = 'integration-test-{}'.format(test_uuid)
    with cluster.marathon.deploy_and_cleanup(app):
        # Fetch the mesos master state once the task is running
        master_state_url = 'http://{}:{}/state'.format(cluster.masters[0],
                                                       5050)
        r = requests.get(master_state_url)
        logging.debug('Got %s with request for %s. Response: \n%s',
                      r.status_code, master_state_url, r.text)
        assert r.status_code == 200
        state = r.json()

        # Find the agent_id and container_id from master state
        container_id = None
        agent_id = None
        for framework in state['frameworks']:
            for task in framework['tasks']:
                if app_id in task['id']:
                    container_id = task['statuses'][0]['container_status'][
                        'container_id']['value']
                    agent_id = task['slave_id']
        assert container_id is not None, 'Container ID not found for instance of app_id {}'.format(
            app_id)
        assert agent_id is not None, 'Agent ID not found for instance of app_id {}'.format(
            app_id)

        # Find hostname and URL from agent_id
        agent_hostname = None
        for agent in state['slaves']:
            if agent['id'] == agent_id:
                agent_hostname = agent['hostname']
        assert agent_hostname is not None, 'Agent hostname not found for agent_id {}'.format(
            agent_id)
        agent_v1_url = 'http://{}:{}/api/v1'.format(agent_hostname, 5051)
        logging.debug('Located %s with containerID %s on agent %s', app_id,
                      container_id, agent_hostname)

        # Prepare nested container id data
        nested_container_id = {
            'value': 'debug-%s' % str(uuid.uuid4()),
            'parent': {
                'value': '%s' % container_id
            }
        }

        # Launch debug session and attach to output stream of debug container
        output_headers = {
            'Content-Type': 'application/json',
            'Accept': 'application/json+recordio',
            'Connection': 'keep-alive'
        }
        lncs_data = {
            'type': 'LAUNCH_NESTED_CONTAINER_SESSION',
            'launch_nested_container_session': {
                'command': {
                    'value': 'cat'
                },
                'container_id': nested_container_id
            }
        }
        launch_output = post(agent_v1_url,
                             output_headers,
                             json=lncs_data,
                             stream=True)

        # Attach to output stream of nested container
        attach_out_data = {
            'type': 'ATTACH_CONTAINER_OUTPUT',
            'attach_container_output': {
                'container_id': nested_container_id
            }
        }
        attached_output = post(agent_v1_url,
                               output_headers,
                               json=attach_out_data,
                               stream=True)

        # Attach to input stream of debug container and stream a message
        input_headers = {
            'Content-Type': 'application/json+recordio',
            'Accept': 'application/json',
            'Connection': 'keep-alive',
            'Transfer-Encoding': 'chunked'
        }
        post(agent_v1_url,
             input_headers,
             data=input_streamer(nested_container_id))

        # Verify the streamed output from the launch session
        meowed = False
        decoder = Decoder(lambda s: json.loads(s.decode("UTF-8")))
        for chunk in launch_output.iter_content():
            for r in decoder.decode(chunk):
                if r['type'] == 'DATA':
                    logging.debug('Extracted data chunk: %s', r['data'])
                    assert r['data'][
                        'data'] == 'meow', 'Output did not match expected'
                    meowed = True
        assert meowed, 'Read launch output without seeing meow.'

        meowed = False
        # Verify the message from the attached output stream
        for chunk in attached_output.iter_content():
            for r in decoder.decode(chunk):
                if r['type'] == 'DATA':
                    logging.debug('Extracted data chunk: %s', r['data'])
                    assert r['data'][
                        'data'] == 'meow', 'Output did not match expected'
                    meowed = True
        assert meowed, 'Read output stream without seeing meow.'
Example #37
0
def test_service_discovery_docker_overlay(dcos_api_session):
    app_definition, test_uuid = get_test_app(container_type=Container.DOCKER,
                                             network=Network.USER,
                                             host_port=9080)
    del app_definition['container']['docker']['portMappings'][0]['hostPort']
    assert_service_discovery(dcos_api_session, app_definition, [DNSOverlay])
Example #38
0
def _service_discovery_test(dcos_api_session, docker_network_bridge):
    """Service discovery integration test

    This test verifies if service discovery works, by comparing marathon data
    with information from mesos-dns and from containers themselves.

    This is achieved by deploying an application to marathon with two instances
    , and ["hostname", "UNIQUE"] constraint set. This should result in containers
    being deployed to two different slaves.

    The application being deployed is a simple http server written in python.
    Please check test_server.py for more details.

    Next thing is comparing the service points provided by marathon with those
    reported by mesos-dns. The tricky part here is that may take some time for
    mesos-dns to catch up with changes in the dcos_api_session.

    And finally, one of service points is verified in as-seen-by-other-containers
    fashion.

                        +------------------------+   +------------------------+
                        |          Slave 1       |   |         Slave 2        |
                        |                        |   |                        |
                        | +--------------------+ |   | +--------------------+ |
    +--------------+    | |                    | |   | |                    | |
    |              |    | |   App instance A   +------>+   App instance B   | |
    |   TC Agent   +<---->+                    | |   | |                    | |
    |              |    | |   "test server"    +<------+    "reflector"     | |
    +--------------+    | |                    | |   | |                    | |
                        | +--------------------+ |   | +--------------------+ |
                        +------------------------+   +------------------------+

    Code running on TC agent connects to one of the containers (let's call it
    "test server") and makes a POST request with IP and PORT service point of
    the second container as parameters (let's call it "reflector"). The test
    server in turn connects to other container and makes a "GET /reflect"
    request. The reflector responds with test server's IP as seen by it and
    the session UUID as provided to it by Marathon. This data is then returned
    to TC agent in response to POST request issued earlier.

    The test succeeds if test UUIDs of the test server, reflector and the test
    itself match and the IP of the test server matches the service point of that
    container as reported by Marathon.
    """

    # TODO(cmaloney): For non docker network bridge we should just do a mesos container.
    if docker_network_bridge:
        app_definition, test_uuid = get_test_app(
            container_type=Container.DOCKER,
            network=Network.BRIDGE,
            container_port=2020,
            host_port=9080)
    else:
        app_definition, test_uuid = get_test_app(
            container_type=Container.DOCKER)

    app_definition['instances'] = 2

    assert len(
        dcos_api_session.slaves) >= 2, "Test requires a minimum of two agents"

    app_definition["constraints"] = [
        ["hostname", "UNIQUE"],
    ]

    with dcos_api_session.marathon.deploy_and_cleanup(
            app_definition) as service_points:
        # Verify if Mesos-DNS agrees with Marathon:
        @retrying.retry(wait_fixed=1000,
                        stop_max_delay=DNS_ENTRY_UPDATE_TIMEOUT * 1000,
                        retry_on_result=lambda ret: ret is None,
                        retry_on_exception=lambda x: False)
        def _pool_for_mesos_dns():
            r = dcos_api_session.get(
                '/mesos_dns/v1/services/_{}._tcp.marathon.mesos'.format(
                    app_definition['id'].lstrip('/')))
            assert r.status_code == 200

            r_data = r.json()
            if r_data == [{
                    'host': '',
                    'port': '',
                    'service': '',
                    'ip': ''
            }] or len(r_data) < len(service_points):
                logging.info("Waiting for Mesos-DNS to update entries")
                return None
            else:
                logging.info("Mesos-DNS entries have been updated!")
                return r_data

        try:
            r_data = _pool_for_mesos_dns()
        except retrying.RetryError:
            msg = "Mesos DNS has failed to update entries in {} seconds."
            pytest.fail(msg.format(DNS_ENTRY_UPDATE_TIMEOUT))

        marathon_provided_servicepoints = sorted(
            (x.host, x.port) for x in service_points)
        mesosdns_provided_servicepoints = sorted(
            (x['ip'], int(x['port'])) for x in r_data)
        assert marathon_provided_servicepoints == mesosdns_provided_servicepoints

        # Verify if containers themselves confirm what Marathon says:
        payload = {
            "reflector_ip": service_points[1].host,
            "reflector_port": service_points[1].port
        }
        r = requests.post(
            'http://{}:{}/your_ip'.format(service_points[0].host,
                                          service_points[0].port), payload)
        if r.status_code != 200:
            msg = "Test server replied with non-200 reply: '{status_code} {reason}. "
            msg += "Detailed explanation of the problem: {text}"
            pytest.fail(
                msg.format(status_code=r.status_code,
                           reason=r.reason,
                           text=r.text))

        r_data = r.json()
        assert r_data['reflector_uuid'] == test_uuid
        assert r_data['test_uuid'] == test_uuid
        if len(dcos_api_session.slaves) >= 2:
            # When len(slaves)==1, we are connecting through docker-proxy using
            # docker0 interface ip. This makes this assertion useless, so we skip
            # it and rely on matching test uuid between containers only.
            assert r_data['my_ip'] == service_points[0].host
Example #39
0
def test_service_discovery_mesos_host(dcos_api_session):
    app_definition, test_uuid = get_test_app(
        container_type=Container.MESOS, healthcheck_protocol=Healthcheck.HTTP)

    assert_service_discovery(dcos_api_session, app_definition, [DNSHost])
Example #40
0
def test_service_discovery_docker_host(dcos_api_session):
    app_definition, test_uuid = get_test_app(container_type=Container.DOCKER,
                                             network=Network.HOST)
    assert_service_discovery(dcos_api_session, app_definition, [DNSHost])
Example #41
0
def test_service_discovery_docker_overlay_port_mapping(dcos_api_session):
    app_definition, test_uuid = get_test_app(container_type=Container.DOCKER,
                                             network=Network.USER,
                                             host_port=9080)
    assert_service_discovery(dcos_api_session, app_definition,
                             [DNSOverlay, DNSPortMap])
Example #42
0
def test_service_discovery_docker_bridge(dcos_api_session):
    app_definition, test_uuid = get_test_app(container_type=Container.DOCKER,
                                             network=Network.BRIDGE,
                                             container_port=2020,
                                             host_port=9080)
    assert_service_discovery(dcos_api_session, app_definition, [DNSPortMap])