def vip_test(dcos_api_session, r): r.log('START') agents = list(dcos_api_session.slaves) # make sure we can reproduce random.seed(r.vip) random.shuffle(agents) host1 = agents[0] host2 = agents[0] if not r.samehost: host2 = agents[1] log.debug('host1 is is: {}'.format(host1)) log.debug('host2 is is: {}'.format(host2)) origin_app, app_uuid = get_test_app_in_docker() origin_app['container']['docker']['portMappings'][0]['labels'] = {'VIP_0': r.vip} origin_app['container']['docker']['network'] = r.vipnet origin_app['mem'] = 0 origin_app['cpu'] = 0 if r.vipnet == 'USER': origin_app['ipAddress'] = {'networkName': 'dcos'} if r.vipnet == 'HOST': origin_app['cmd'] = '/opt/mesosphere/bin/dcos-shell python '\ '/opt/mesosphere/active/dcos-integration-test/util/python_test_server.py $PORT0' origin_app['container']['docker']['portMappings'][0]['hostPort'] = 0 origin_app['container']['docker']['portMappings'][0]['containerPort'] = 0 origin_app['container']['docker']['portMappings'][0]['labels'] = {} origin_app['portDefinitions'] = [{'labels': {'VIP_0': r.vip}}] origin_app['constraints'] = [['hostname', 'CLUSTER', host1]] proxy_app = get_test_app_in_docker()[0] proxy_app['container']['docker']['network'] = r.proxynet proxy_app['constraints'] = [['hostname', 'CLUSTER', host2]] proxy_app['mem'] = 0 proxy_app['cpu'] = 0 if r.proxynet == 'USER': proxy_app['ipAddress'] = {'networkName': 'dcos'} if r.proxynet == 'HOST': proxy_app['cmd'] = '/opt/mesosphere/bin/dcos-shell python '\ '/opt/mesosphere/active/dcos-integration-test/util/python_test_server.py $PORT0' proxy_app['container']['docker']['portMappings'][0]['hostPort'] = 0 proxy_app['container']['docker']['portMappings'][0]['containerPort'] = 0 returned_uuid = None with contextlib.ExitStack() as stack: stack.enter_context(dcos_api_session.marathon.deploy_and_cleanup(origin_app, timeout=timeout)) sp = stack.enter_context(dcos_api_session.marathon.deploy_and_cleanup(proxy_app, timeout=timeout)) cmd = '/opt/mesosphere/bin/curl -s -f -m 5 http://{}/test_uuid'.format(r.vipaddr) returned_uuid = ensure_routable(cmd, sp) log.debug('returned_uuid is: {}'.format(returned_uuid)) assert returned_uuid is not None assert returned_uuid['test_uuid'] == app_uuid r.log('PASSED')
def test_service_discovery_docker_host(dcos_api_session): app_definition, test_uuid = get_test_app_in_docker() app_definition['container']['docker']['network'] = 'HOST' del app_definition['container']['docker']['portMappings'] app_definition = replace_marathon_cmd_port(app_definition, "$PORT0") assert_service_discovery(dcos_api_session, app_definition, [DNSHost])
def test_service_discovery_docker_host(dcos_api_session): app_definition, test_uuid = get_test_app_in_docker() app_definition['container']['docker']['network'] = 'HOST' del app_definition['container']['docker']['portMappings'] app_definition = replace_marathon_cmd_port(app_definition, "$PORT0") assert_service_discovery(dcos_api_session, app_definition, [DNSHost])
def docker_vip_app(network, host, vip): # docker app definition defines its own healthchecks app, uuid = get_test_app_in_docker() app['id'] = '/viptest/' + app['id'] app['container']['docker']['network'] = network app['mem'] = 16 app['cpu'] = 0.01 if network == 'HOST': app['cmd'] = '/opt/mesosphere/bin/dcos-shell python '\ '/opt/mesosphere/active/dcos-integration-test/util/python_test_server.py $PORT0' del app['container']['docker']['portMappings'] if vip is not None: app['portDefinitions'] = [{'labels': {'VIP_0': vip}}] else: app['cmd'] = '/opt/mesosphere/bin/dcos-shell python '\ '/opt/mesosphere/active/dcos-integration-test/util/python_test_server.py 9080' app['container']['docker']['portMappings'] = [{ 'hostPort': 0, 'containerPort': 9080, 'protocol': 'tcp', 'name': 'test', 'labels': {} }] if vip is not None: app['container']['docker']['portMappings'][0]['labels'] = {'VIP_0': vip} if network == 'USER': app['ipAddress'] = {'networkName': 'dcos'} app['constraints'] = [['hostname', 'CLUSTER', host]] return app, uuid
def test_if_docker_app_can_be_deployed(cluster): """Marathon app inside docker deployment integration test. Verifies that a marathon app inside of a docker daemon container can be deployed and accessed as expected. """ cluster.marathon.deploy_test_app_and_check(*get_test_app_in_docker(ip_per_container=False))
def test_service_discovery_docker_overlay_port_mapping(dcos_api_session): app_definition, test_uuid = get_test_app_in_docker() app_definition['container']['docker']['network'] = 'USER' app_definition['ipAddress'] = {'networkName': 'dcos'} assert_service_discovery(dcos_api_session, app_definition, [DNSOverlay, DNSPortMap])
def test_if_docker_app_can_be_deployed(cluster): """Marathon app inside docker deployment integration test. Verifies that a marathon app inside of a docker daemon container can be deployed and accessed as expected. """ cluster.marathon.deploy_test_app_and_check(*get_test_app_in_docker(ip_per_container=False))
def test_service_discovery_docker_overlay(dcos_api_session): app_definition, test_uuid = get_test_app_in_docker() app_definition['container']['docker']['network'] = 'USER' app_definition['ipAddress'] = {'networkName': 'dcos'} del app_definition['container']['docker']['portMappings'][0]['hostPort'] assert_service_discovery(dcos_api_session, app_definition, [DNSOverlay])
def docker_vip_app(network, host, vip): # docker app definition defines its own healthchecks app, uuid = get_test_app_in_docker() app['id'] = '/viptest/' + app['id'] app['container']['docker']['network'] = network app['mem'] = 16 app['cpu'] = 0.01 if network == 'HOST': app['cmd'] = '/opt/mesosphere/bin/dcos-shell python '\ '/opt/mesosphere/active/dcos-integration-test/util/python_test_server.py $PORT0' del app['container']['docker']['portMappings'] if vip is not None: app['portDefinitions'] = [{'labels': {'VIP_0': vip}}] else: app['cmd'] = '/opt/mesosphere/bin/dcos-shell python '\ '/opt/mesosphere/active/dcos-integration-test/util/python_test_server.py 9080' app['container']['docker']['portMappings'] = [{ 'hostPort': 0, 'containerPort': 9080, 'protocol': 'tcp', 'name': 'test', 'labels': {} }] if vip is not None: app['container']['docker']['portMappings'][0]['labels'] = {'VIP_0': vip} if network == 'USER': app['ipAddress'] = {'networkName': 'dcos'} app['constraints'] = [['hostname', 'CLUSTER', host]] return app, uuid
def test_ip_per_container_with_named_vip(cluster): """Test if we are able to connect to a task with ip-per-container mode using named vip """ origin_app, test_uuid = get_test_app_in_docker(ip_per_container=True) origin_app['container']['docker']['portMappings'][0]['labels'] = {'VIP_0': 'foo:6000'} origin_app['healthChecks'][0]['port'] = origin_app['container']['docker']['portMappings'][0]['containerPort'] del origin_app['container']['docker']['portMappings'][0]['hostPort'] del origin_app['healthChecks'][0]['portIndex'] with cluster.marathon.deploy_and_cleanup(origin_app): proxy_app, proxy_uuid = get_test_app() with cluster.marathon.deploy_and_cleanup(proxy_app) as service_points: cmd = '/opt/mesosphere/bin/curl -s -f -m 5 http://foo.marathon.l4lb.thisdcos.directory:6000/ping' ensure_routable(cmd, service_points)()
def test_ip_per_container_with_named_vip(cluster): """Test if we are able to connect to a task with ip-per-container mode using named vip """ origin_app, test_uuid = get_test_app_in_docker(ip_per_container=True) origin_app['container']['docker']['portMappings'][0]['labels'] = {'VIP_0': 'foo:6000'} origin_app['healthChecks'][0]['port'] = origin_app['container']['docker']['portMappings'][0]['containerPort'] del origin_app['container']['docker']['portMappings'][0]['hostPort'] del origin_app['healthChecks'][0]['portIndex'] with cluster.marathon.deploy_and_cleanup(origin_app): proxy_app, proxy_uuid = get_test_app() with cluster.marathon.deploy_and_cleanup(proxy_app) as service_points: cmd = '/opt/mesosphere/bin/curl -s -f -m 5 http://foo.marathon.l4lb.thisdcos.directory:6000/ping' ensure_routable(cmd, service_points)()
def test_ip_per_container(dcos_api_session): """Test if we are able to connect to a task with ip-per-container mode """ # Launch the test_server in ip-per-container mode app_definition, test_uuid = get_test_app_in_docker(ip_per_container=True) assert len(dcos_api_session.slaves) >= 2, "IP Per Container tests require 2 private agents to work" app_definition['instances'] = 2 app_definition['constraints'] = [['hostname', 'UNIQUE']] with dcos_api_session.marathon.deploy_and_cleanup(app_definition, check_health=True) as service_points: app_port = app_definition['container']['docker']['portMappings'][0]['containerPort'] cmd = '/opt/mesosphere/bin/curl -s -f -m 5 http://{}:{}/ping'.format(service_points[1].ip, app_port) ensure_routable(cmd, service_points)
def test_ip_per_container(dcos_api_session): '''Test if we are able to connect to a task with ip-per-container mode ''' # Launch the test_server in ip-per-container mode app_definition, test_uuid = get_test_app_in_docker(ip_per_container=True) assert len(dcos_api_session.slaves) >= 2, 'IP Per Container tests require 2 private agents to work' app_definition['instances'] = 2 app_definition['constraints'] = [['hostname', 'UNIQUE']] with dcos_api_session.marathon.deploy_and_cleanup(app_definition, check_health=True) as service_points: app_port = app_definition['container']['docker']['portMappings'][0]['containerPort'] cmd = '/opt/mesosphere/bin/curl -s -f -m 5 http://{}:{}/ping'.format(service_points[1].ip, app_port) ensure_routable(cmd, service_points[0].host, service_points[0].port)
def test_service_discovery_docker_bridge(dcos_api_session): app_definition, test_uuid = get_test_app_in_docker() app_definition['container']['docker']['network'] = 'BRIDGE' assert_service_discovery(dcos_api_session, app_definition, [DNSPortMap])
def test_service_discovery_docker_bridge(dcos_api_session): app_definition, test_uuid = get_test_app_in_docker() app_definition['container']['docker']['network'] = 'BRIDGE' assert_service_discovery(dcos_api_session, app_definition, [DNSPortMap])
def _service_discovery_test(cluster, docker_network_bridge): """Service discovery integration test This test verifies if service discovery works, by comparing marathon data with information from mesos-dns and from containers themselves. This is achieved by deploying an application to marathon with two instances , and ["hostname", "UNIQUE"] contraint set. This should result in containers being deployed to two different slaves. The application being deployed is a simple http server written in python. Please check test_server.py for more details. Next thing is comparing the service points provided by marathon with those reported by mesos-dns. The tricky part here is that may take some time for mesos-dns to catch up with changes in the cluster. And finally, one of service points is verified in as-seen-by-other-containers fashion. +------------------------+ +------------------------+ | Slave 1 | | Slave 2 | | | | | | +--------------------+ | | +--------------------+ | +--------------+ | | | | | | | | | | | | App instance A +------>+ App instance B | | | TC Agent +<---->+ | | | | | | | | | | "test server" +<------+ "reflector" | | +--------------+ | | | | | | | | | +--------------------+ | | +--------------------+ | +------------------------+ +------------------------+ Code running on TC agent connects to one of the containers (let's call it "test server") and makes a POST request with IP and PORT service point of the second container as parameters (let's call it "reflector"). The test server in turn connects to other container and makes a "GET /reflect" request. The reflector responds with test server's IP as seen by it and the session UUID as provided to it by Marathon. This data is then returned to TC agent in response to POST request issued earlier. The test succeds if test UUIDs of the test server, reflector and the test itself match and the IP of the test server matches the service point of that container as reported by Marathon. """ # TODO(cmaloney): For non docker network bridge we should just do a mesos container. app_definition, test_uuid = get_test_app_in_docker(ip_per_container=False) if not docker_network_bridge: # TODO(cmaloney): This is very hacky to make PORT0 on the end instead of 9080... app_definition['cmd'] = app_definition['cmd'][:-4] + '$PORT0' app_definition['container']['docker']['network'] = 'HOST' del app_definition['container']['docker']['portMappings'] app_definition['portDefinitions'] = [{ "protocol": "tcp", "port": 0, "name": "test" }] app_definition['instances'] = 2 assert len(cluster.slaves) >= 2, "Test requires a minimum of two agents" app_definition["constraints"] = [ ["hostname", "UNIQUE"], ] with cluster.marathon.deploy_and_cleanup(app_definition) as service_points: # Verify if Mesos-DNS agrees with Marathon: @retrying.retry(wait_fixed=1000, stop_max_delay=MESOS_DNS_ENTRY_UPDATE_TIMEOUT * 1000, retry_on_result=lambda ret: ret is None, retry_on_exception=lambda x: False) def _pool_for_mesos_dns(): r = cluster.get( '/mesos_dns/v1/services/_{}._tcp.marathon.mesos'.format( app_definition['id'].lstrip('/'))) assert r.status_code == 200 r_data = r.json() if r_data == [{ 'host': '', 'port': '', 'service': '', 'ip': '' }] or len(r_data) < len(service_points): logging.info("Waiting for Mesos-DNS to update entries") return None else: logging.info("Mesos-DNS entries have been updated!") return r_data try: r_data = _pool_for_mesos_dns() except retrying.RetryError: msg = "Mesos DNS has failed to update entries in {} seconds." pytest.fail(msg.format(MESOS_DNS_ENTRY_UPDATE_TIMEOUT)) marathon_provided_servicepoints = sorted( (x.host, x.port) for x in service_points) mesosdns_provided_servicepoints = sorted( (x['ip'], int(x['port'])) for x in r_data) assert marathon_provided_servicepoints == mesosdns_provided_servicepoints # Verify if containers themselves confirm what Marathon says: payload = { "reflector_ip": service_points[1].host, "reflector_port": service_points[1].port } r = requests.post( 'http://{}:{}/your_ip'.format(service_points[0].host, service_points[0].port), payload) if r.status_code != 200: msg = "Test server replied with non-200 reply: '{status_code} {reason}. " msg += "Detailed explanation of the problem: {text}" pytest.fail( msg.format(status_code=r.status_code, reason=r.reason, text=r.text)) r_data = r.json() assert r_data['reflector_uuid'] == test_uuid assert r_data['test_uuid'] == test_uuid if len(cluster.slaves) >= 2: # When len(slaves)==1, we are connecting through docker-proxy using # docker0 interface ip. This makes this assertion useless, so we skip # it and rely on matching test uuid between containers only. assert r_data['my_ip'] == service_points[0].host
def vip_test(dcos_api_session, r): r.log('START') agents = list(dcos_api_session.slaves) # make sure we can reproduce random.seed(r.vip) random.shuffle(agents) host1 = agents[0] host2 = agents[0] if not r.samehost: host2 = agents[1] log.debug('host1 is is: {}'.format(host1)) log.debug('host2 is is: {}'.format(host2)) origin_app, app_uuid = get_test_app_in_docker() origin_app['container']['docker']['portMappings'][0]['labels'] = { 'VIP_0': r.vip } origin_app['container']['docker']['network'] = r.vipnet origin_app['mem'] = 0 origin_app['cpu'] = 0 if r.vipnet == 'USER': origin_app['ipAddress'] = {'networkName': 'dcos'} if r.vipnet == 'HOST': origin_app['cmd'] = '/opt/mesosphere/bin/dcos-shell python '\ '/opt/mesosphere/active/dcos-integration-test/util/python_test_server.py $PORT0' origin_app['container']['docker']['portMappings'][0]['hostPort'] = 0 origin_app['container']['docker']['portMappings'][0][ 'containerPort'] = 0 origin_app['container']['docker']['portMappings'][0]['labels'] = {} origin_app['portDefinitions'] = [{'labels': {'VIP_0': r.vip}}] origin_app['constraints'] = [['hostname', 'CLUSTER', host1]] proxy_app = get_test_app_in_docker()[0] proxy_app['container']['docker']['network'] = r.proxynet proxy_app['constraints'] = [['hostname', 'CLUSTER', host2]] proxy_app['mem'] = 0 proxy_app['cpu'] = 0 if r.proxynet == 'USER': proxy_app['ipAddress'] = {'networkName': 'dcos'} if r.proxynet == 'HOST': proxy_app['cmd'] = '/opt/mesosphere/bin/dcos-shell python '\ '/opt/mesosphere/active/dcos-integration-test/util/python_test_server.py $PORT0' proxy_app['container']['docker']['portMappings'][0]['hostPort'] = 0 proxy_app['container']['docker']['portMappings'][0][ 'containerPort'] = 0 returned_uuid = None with contextlib.ExitStack() as stack: stack.enter_context( dcos_api_session.marathon.deploy_and_cleanup(origin_app, timeout=timeout)) sp = stack.enter_context( dcos_api_session.marathon.deploy_and_cleanup(proxy_app, timeout=timeout)) cmd = '/opt/mesosphere/bin/curl -s -f -m 5 http://{}/test_uuid'.format( r.vipaddr) returned_uuid = ensure_routable(cmd, sp) log.debug('returned_uuid is: {}'.format(returned_uuid)) assert returned_uuid is not None assert returned_uuid['test_uuid'] == app_uuid r.log('PASSED')
def _service_discovery_test(dcos_api_session, docker_network_bridge): """Service discovery integration test This test verifies if service discovery works, by comparing marathon data with information from mesos-dns and from containers themselves. This is achieved by deploying an application to marathon with two instances , and ["hostname", "UNIQUE"] constraint set. This should result in containers being deployed to two different slaves. The application being deployed is a simple http server written in python. Please check test_server.py for more details. Next thing is comparing the service points provided by marathon with those reported by mesos-dns. The tricky part here is that may take some time for mesos-dns to catch up with changes in the dcos_api_session. And finally, one of service points is verified in as-seen-by-other-containers fashion. +------------------------+ +------------------------+ | Slave 1 | | Slave 2 | | | | | | +--------------------+ | | +--------------------+ | +--------------+ | | | | | | | | | | | | App instance A +------>+ App instance B | | | TC Agent +<---->+ | | | | | | | | | | "test server" +<------+ "reflector" | | +--------------+ | | | | | | | | | +--------------------+ | | +--------------------+ | +------------------------+ +------------------------+ Code running on TC agent connects to one of the containers (let's call it "test server") and makes a POST request with IP and PORT service point of the second container as parameters (let's call it "reflector"). The test server in turn connects to other container and makes a "GET /reflect" request. The reflector responds with test server's IP as seen by it and the session UUID as provided to it by Marathon. This data is then returned to TC agent in response to POST request issued earlier. The test succeeds if test UUIDs of the test server, reflector and the test itself match and the IP of the test server matches the service point of that container as reported by Marathon. """ # TODO(cmaloney): For non docker network bridge we should just do a mesos container. app_definition, test_uuid = get_test_app_in_docker(ip_per_container=False) if not docker_network_bridge: # TODO(cmaloney): This is very hacky to make PORT0 on the end instead of 9080... app_definition['cmd'] = app_definition['cmd'][:-4] + '$PORT0' app_definition['container']['docker']['network'] = 'HOST' del app_definition['container']['docker']['portMappings'] app_definition['portDefinitions'] = [{ "protocol": "tcp", "port": 0, "name": "test" }] app_definition['instances'] = 2 assert len(dcos_api_session.slaves) >= 2, "Test requires a minimum of two agents" app_definition["constraints"] = [["hostname", "UNIQUE"], ] with dcos_api_session.marathon.deploy_and_cleanup(app_definition) as service_points: # Verify if Mesos-DNS agrees with Marathon: @retrying.retry(wait_fixed=1000, stop_max_delay=DNS_ENTRY_UPDATE_TIMEOUT * 1000, retry_on_result=lambda ret: ret is None, retry_on_exception=lambda x: False) def _pool_for_mesos_dns(): r = dcos_api_session.get('/mesos_dns/v1/services/_{}._tcp.marathon.mesos'.format( app_definition['id'].lstrip('/'))) assert r.status_code == 200 r_data = r.json() if r_data == [{'host': '', 'port': '', 'service': '', 'ip': ''}] or len(r_data) < len(service_points): logging.info("Waiting for Mesos-DNS to update entries") return None else: logging.info("Mesos-DNS entries have been updated!") return r_data try: r_data = _pool_for_mesos_dns() except retrying.RetryError: msg = "Mesos DNS has failed to update entries in {} seconds." pytest.fail(msg.format(DNS_ENTRY_UPDATE_TIMEOUT)) marathon_provided_servicepoints = sorted((x.host, x.port) for x in service_points) mesosdns_provided_servicepoints = sorted((x['ip'], int(x['port'])) for x in r_data) assert marathon_provided_servicepoints == mesosdns_provided_servicepoints # Verify if containers themselves confirm what Marathon says: payload = {"reflector_ip": service_points[1].host, "reflector_port": service_points[1].port} r = requests.post('http://{}:{}/your_ip'.format( service_points[0].host, service_points[0].port), payload) if r.status_code != 200: msg = "Test server replied with non-200 reply: '{status_code} {reason}. " msg += "Detailed explanation of the problem: {text}" pytest.fail(msg.format(status_code=r.status_code, reason=r.reason, text=r.text)) r_data = r.json() assert r_data['reflector_uuid'] == test_uuid assert r_data['test_uuid'] == test_uuid if len(dcos_api_session.slaves) >= 2: # When len(slaves)==1, we are connecting through docker-proxy using # docker0 interface ip. This makes this assertion useless, so we skip # it and rely on matching test uuid between containers only. assert r_data['my_ip'] == service_points[0].host