def test_multi_tenancy_deployment(self): """ Simply make sure we are able to run the multi tenancy setup and bootstrap 2 different devices to different tenants """ auth.reset_auth_token() users = [ { "email": "*****@*****.**", "password": "******", "username": "******", "container": "mender-client-deployment-1", "fail": False }, { "email": "*****@*****.**", "password": "******", "username": "******", "container": "mender-client-deployment-2", "fail": True } ] for user in users: auth.new_tenant(user["username"], user["email"], user["password"]) t = auth.current_tenant["tenant_token"] new_tenant_client(user["container"], t) adm.accept_devices(1) for user in users: auth.new_tenant(user["username"], user["email"], user["password"]) assert len(inv.get_devices()) == 1 self.perform_update(mender_client_container=user["container"], fail=user["fail"])
def test_original_deployments_persisted(self): auth.reset_auth_token() auth.get_auth_token() # wait for 10 devices to be available devices = adm.get_devices_status("accepted", 10) provisioned_devices = eval(self.provisioned_devices) # check that devices and provisioned_devices are the same assert len(devices) == provisioned_devices # not sure what else I can do here, the device admission changed from 1.0 to master assert deploy.get_statistics(self.provisioned_deployment_id)["success"] == 7 assert deploy.get_statistics(self.provisioned_deployment_id)["failure"] == 3 # check failures still contain logs for device_deployment in deploy.get_deployment_overview(self.provisioned_deployment_id): if device_deployment["status"] == "failure": assert "damn" in deploy.get_logs(device_deployment["id"], self.provisioned_deployment_id) deployments_in_progress = deploy.get_status("inprogress") deployments_pending = deploy.get_status("pending") deployments_finished = deploy.get_status("finished") assert len(deployments_in_progress) == 0 assert len(deployments_pending) == 0 assert len(deployments_finished) == 1 assert self.provisioned_artifact_id in str(deployments_finished)
def test_multi_tenancy_deployment_aborting(self): """ Simply make sure we are able to run the multi tenancy setup and bootstrap 2 different devices to different tenants """ auth.reset_auth_token() users = [ { "email": "*****@*****.**", "password": "******", "username": "******", "container": "mender-client-deployment-aborting-1", } ] for user in users: auth.new_tenant(user["username"], user["email"], user["password"]) t = auth.current_tenant["tenant_token"] new_tenant_client(user["container"], t) adm.accept_devices(1) for user in users: deployment_id, _ = common_update_procedure(install_image=conftest.get_valid_image()) deploy.abort(deployment_id) deploy.check_expected_statistics(deployment_id, "aborted", 1) execute(self.mender_log_contains_aborted_string, hosts=get_mender_client_by_container_name(user["container"]))
def standard_setup_one_client(): if setup_type() == ST_OneClient: return restart_docker_compose() auth.reset_auth_token() set_setup_type(ST_OneClient)
def setup_set_client_number_bootstrapped(clients): docker_compose_cmd("scale mender-client=%d" % clients) ssh_is_opened() auth.reset_auth_token() adm.accept_devices(clients) set_setup_type(None)
def standard_setup_one_client_bootstrapped(): if setup_type() == ST_OneClientBootstrapped: return restart_docker_compose() auth.reset_auth_token() adm.accept_devices(1) set_setup_type(ST_OneClientBootstrapped)
def standard_setup_two_clients_bootstrapped(): if setup_type() == ST_TwoClientsBootstrapped: return restart_docker_compose(2) auth.reset_auth_token() adm.accept_devices(2) set_setup_type(ST_TwoClientsBootstrapped)
def test_multi_tenancy_deployment_s3(self): def verify_object_id_and_tagging(): from boto3 import client tenant = auth.get_tenant_id() conn = client('s3') artifacts = deploy.get_artifacts() assert len(artifacts) == 1 artifact_id = artifacts[0]["id"] # verify object ID of proper MT format for key in conn.list_objects( Bucket='mender-artifacts-int-testing-us')['Contents']: if key['Key'].startswith(tenant): expectedObject = "%s/%s" % (tenant, artifact_id) assert key['Key'] == expectedObject # verify tagging is working tags = conn.get_object_tagging( Bucket='mender-artifacts-int-testing-us', Key=expectedObject)["TagSet"][0] assert tags["Value"] == tenant assert tags["Key"] == "tenant_id" # Delete artifact and make sure it's really gone conn.delete_object(Bucket="mender-artifacts-int-testing-us", Key=expectedObject) deploy.delete_artifact(artifact_id) conn.list_objects(Bucket='mender-artifacts-int-testing-us') for key in conn.list_objects( Bucket='mender-artifacts-int-testing-us').get( 'Contents', []): if key['Key'].startswith(tenant): pytest.fail("failed to delete artifact from s3") auth.reset_auth_token() users = [{ "email": "*****@*****.**", "password": "******", "username": "******", "container": "mender-client-mt-s3", }] for user in users: auth.new_tenant(user["username"], user["email"], user["password"]) t = auth.current_tenant["tenant_token"] new_tenant_client(user["container"], t) adm.accept_devices(1) self.perform_update(mender_client_container=user["container"]) verify_object_id_and_tagging()
def standard_setup_one_client(request): if getattr(request, 'param', False) and request.param != "force_new" and setup_type( ) == ST_OneClient: return restart_docker_compose() auth.reset_auth_token() set_setup_type(ST_OneClient)
def standard_setup_with_signed_artifact_client(request): stop_docker_compose() reset_mender_api() docker_compose_cmd("-f ../extra/signed-artifact-client-testing/docker-compose.signed-client.yml up -d") ssh_is_opened() auth.reset_auth_token() auth_v2.accept_devices(1) set_setup_type(ST_SignedClient)
def standard_setup_with_signed_artifact_client(request): if getattr(request, 'param', False) and request.param != "force_new" and setup_type() == ST_SignedClient: return stop_docker_compose() reset_mender_api() docker_compose_cmd("-f ../extra/signed-artifact-client-testing/docker-compose.signed-client.yml up -d") ssh_is_opened() auth.reset_auth_token() adm.accept_devices(1) set_setup_type(ST_SignedClient)
def test_token_validity(self): """ verify that only devices with valid tokens can bootstrap successfully to a multitenancy setup """ wrong_token = "wrong-token" def wait_until_bootstrap_attempt(): if not env.host_string: return execute(wait_until_bootstrap_attempt, hosts=get_mender_clients()) ssh_is_opened() for i in range(1, 20): with settings(hide('everything'), warn_only=True): out = run( 'journalctl -u mender | grep "bootstrapped -> authorize-wait"' ) if out.succeeded: return True time.sleep(20 / i) return False def set_correct_tenant_token(token): if not env.host_string: return execute(set_correct_tenant_token, token, hosts=get_mender_clients()) run("sed -i 's/%s/%s/g' /etc/mender/mender.conf" % (wrong_token, token)) run("systemctl restart mender") auth.reset_auth_token() auth.new_tenant("*****@*****.**", "hunter2hunter2") token = auth.current_tenant["tenant_token"] # create a new client with an incorrect token set new_tenant_client("mender-client", wrong_token) if wait_until_bootstrap_attempt(): for _ in range(5): time.sleep(5) adm.get_devices( expected_devices=0) # make sure device not seen else: pytest.fail("failed to bootstrap device") # setting the correct token makes the client visible to the backend set_correct_tenant_token(token) adm.get_devices(expected_devices=1)
def standard_setup_with_short_lived_token(): stop_docker_compose() reset_mender_api() docker_compose_cmd("-f ../docker-compose.yml \ -f ../docker-compose.client.yml \ -f ../docker-compose.storage.minio.yml \ -f ../docker-compose.testing.yml \ -f ../extra/expired-token-testing/docker-compose.short-token.yml up -d", use_common_files=False) ssh_is_opened() auth.reset_auth_token() adm.accept_devices(1) set_setup_type(ST_ShortLivedAuthToken)
def standard_setup_one_client_bootstrapped_with_s3(): stop_docker_compose() reset_mender_api() docker_compose_cmd("-f ../docker-compose.yml \ -f ../docker-compose.client.yml \ -f ../docker-compose.testing.yml \ -f ../docker-compose.storage.minio.yml \ -f ../docker-compose.storage.s3.yml up -d", use_common_files=False) docker_compose_cmd("logs -f &") ssh_is_opened() auth.reset_auth_token() adm.accept_devices(1) set_setup_type(ST_OneClientsBootstrapped_AWS_S3)
def standard_setup_one_client_bootstrapped_with_s3(): if setup_type() == ST_OneClientsBootstrapped_AWS_S3: return stop_docker_compose() docker_compose_cmd("-f ../docker-compose.client.yml \ -f ../docker-compose.storage.s3.yml \ -f ../docker-compose.yml \ -f ../extra/travis-testing/s3.yml up -d", use_common_files=False) docker_compose_cmd("logs -f &") ssh_is_opened() auth.reset_auth_token() adm.accept_devices(1) set_setup_type(ST_OneClientsBootstrapped_AWS_S3)
def setup_failover(): """ Setup with two servers and one client. First server (A) behaves as usual, whereas the second server (B) should not expect any clients. Client is initially set up against server A. In docker all microservices for B has a suffix "-2" """ stop_docker_compose() reset_mender_api() docker_compose_cmd("-f ../docker-compose.yml \ -f ../docker-compose.client.yml \ -f ../docker-compose.storage.minio.yml \ -f ../docker-compose.testing.yml \ -f ../extra/failover-testing/docker-compose.failover-server.yml up -d", use_common_files=False) ssh_is_opened() auth.reset_auth_token() adm.accept_devices(1) set_setup_type(ST_Failover)
def test_multi_tenancy_setup(self): """ Simply make sure we are able to run the multi tenancy setup and bootstrap 2 different devices to different tenants """ auth.reset_auth_token() users = [ { "email": "*****@*****.**", "password": "******", "container": "mender-client" }, { "email": "*****@*****.**", "password": "******", "container": "client2" }, ] for user in users: auth.new_tenant(user["email"], user["password"]) t = auth.current_tenant["tenant_token"] new_tenant_client(user["container"], t) print "sleeping" time.sleep(1000) adm.accept_devices(1) print adm.get_devices() self.perform_update() # deploy to each device for user in users: auth.set_tenant(user["email"], user["password"]) t = auth.current_tenant["tenant_token"] adm.accept_devices(1)