def mount_container_filesystem(): import logging from conu import DockerBackend backend = DockerBackend(logging_level=logging.DEBUG) image = backend.ImageClass(IMAGE_NAME) # run nginx container container = image.run_via_binary() # mount container filesystem with container.mount() as fs: # check presence of nginx configuration file assert fs.file_is_present('/etc/nginx/nginx.conf') # check presence of default nginx page index_path = '/usr/share/nginx/html/index.html' assert fs.file_is_present(index_path) # and its text index_text = fs.read_file('/usr/share/nginx/html/index.html') assert '<h1>Welcome to nginx!</h1>' in index_text print(index_text) print('Success!') # cleanup container.delete(force=True)
def run_container(local_dir): """ serve path `local_dir` using the python http webserver in a docker container :param local_dir: str, path to the directory, it should exist :return: instance of DockerContainer """ image_name = "registry.fedoraproject.org/fedora" image_tag = "27" # we'll run our container using docker engine backend = DockerBackend(logging_level=logging.DEBUG) image = backend.ImageClass(image_name, tag=image_tag) # is the image present? try: image.get_metadata() except Exception: image.pull() # helper class to create `docker run ...` -- we want test the same experience as our users b = DockerRunBuilder( # the command to run in a container command=[ "python3", "-m", "http.server", "--bind", "0.0.0.0", "%d" % port ], # additional options passed to `run` command additional_opts=["-v", "%s:/webroot" % local_dir, "-w", "/webroot"]) # let's run the container (in the background) container = image.run_via_binary(run_command_instance=b) return container
def __init__(self, section: str, **kwargs): super().__init__(section, **kwargs) if self.LOG.name != LoggerConst.DEFAULT_NAME: self.LOG.warn( "Using Docker Swarm as container manager." "This is not recommended for distributed environments") self.docker_api = self.docker_compass.get_api() self.docker_backend = DockerBackend(logging_level=logging.ERROR)
def docker_backend(): """ pytest fixture which mimics context manager: it provides new instance of DockerBackend and cleans after it once it's used; sample usage: :: def test_my_container(docker_backend): image = docker_backend.ImageClass("fedora", tag="27") :return: instance of DockerBackend """ backend = DockerBackend(logging_level=logging.DEBUG).__enter__() yield backend backend._clean()
def ipsilon_container(docker_backend: conu.DockerBackend, docker_network: dict) -> conu.DockerContainer: """ Fixture preparing and yielding an Ipsilon container. Args: docker_backend: The Docker backend (fixture). docker_network: The Docker network ID (fixture). Yields: The Ipsilon container. """ # Define the container and start it image_name = "bodhi-ci-integration-ipsilon" image = docker_backend.ImageClass(image_name) container = image.run_via_api() container.start() docker_backend.d.connect_container_to_network( container.get_id(), docker_network["Id"], aliases=["ipsilon", "ipsilon.ci", "id.dev.fedoraproject.org"]) # we need to wait for the broker to start listening container.wait_for_port(80, timeout=30) yield container container.kill() container.delete()
def test_oc_s2i_remote(self): api_key = get_oc_api_token() with OpenshiftBackend(api_key=api_key, logging_level=logging.DEBUG) as openshift_backend: openshift_backend.get_status() with DockerBackend(logging_level=logging.DEBUG) as backend: python_image = backend.ImageClass(CENTOS_PYTHON_3) login_to_registry(OC_CLUSTER_USER, token=api_key) app_name = openshift_backend.create_new_app_from_source( python_image, source="https://github.com/openshift/django-ex.git", project=MY_PROJECT) try: openshift_backend.wait_for_service( app_name=app_name, port=8080, expected_output='Welcome to your Django application on OpenShift', timeout=300) finally: openshift_backend.get_logs(app_name) openshift_backend.clean_project(app_name)
def _get_swarm_endpoints(self): if self.open_sea: from conu import DockerBackend # lazy import endpoints = [] for cont in DockerBackend( logging_level=logging.ERROR).list_containers(): details = cont.inspect() svc = details.get('Config', {}).get('Labels', {}).get( 'com.docker.swarm.service.name', '') if svc == self.service_name: address = details.get('NetworkSettings', {}) \ .get('Networks', {}) \ .get(DockerConst.NETWORK, {}) \ .get('IPAddress', None) endpoints += [address] if address else [] elif self.port is not None: endpoints = [self.host] else: # TODO use router address endpoints = [] return endpoints
def test_deploy_image(self): api_key = get_oc_api_token() with OpenshiftBackend(api_key=api_key, logging_level=logging.DEBUG) as openshift_backend: with DockerBackend(logging_level=logging.DEBUG) as backend: # builder image mariadb_image = backend.ImageClass(CENTOS_MARIADB_10_2) # docker login inside OpenShift internal registry login_to_registry(OC_CLUSTER_USER, token=api_key) # create new app from remote source in OpenShift cluster app_name = openshift_backend.deploy_image(mariadb_image, oc_new_app_args=[ "--env", "MYSQL_ROOT_PASSWORD=test"], project=MY_PROJECT) try: # wait until service is ready to accept requests openshift_backend.wait_for_service( app_name=app_name, port=3306, timeout=300) assert openshift_backend.all_pods_are_ready(app_name) finally: openshift_backend.get_logs(app_name) openshift_backend.clean_project(app_name)
def test_oc_s2i_local(self): api_key = get_oc_api_token() with OpenshiftBackend(api_key=api_key, logging_level=logging.DEBUG) as openshift_backend: openshift_backend.get_status() with DockerBackend(logging_level=logging.DEBUG) as backend: python_image = backend.ImageClass(CENTOS_PYTHON_3) login_to_registry(OC_CLUSTER_USER, token=api_key) app_name = openshift_backend.create_new_app_from_source( python_image, source="examples/openshift/standalone-test-app", project=MY_PROJECT) try: openshift_backend.wait_for_service( app_name=app_name, port=8080, expected_output="Hello World from standalone WSGI application!", timeout=300) finally: openshift_backend.get_logs(app_name) openshift_backend.clean_project(app_name)
def basics(): import logging from conu import DockerBackend # prepare backend and image backend = DockerBackend(logging_level=logging.DEBUG) image = backend.ImageClass(IMAGE_NAME) # run container container = image.run_via_binary() assert container.is_running() print('Success!') # cleanup container.stop() container.delete()
def test_list_containers(): with DockerBackend() as backend: l = len(backend.list_containers()) assert l >= 0 image = backend.ImageClass(FEDORA_MINIMAL_REPOSITORY, tag=FEDORA_MINIMAL_REPOSITORY_TAG, pull_policy=DockerImagePullPolicy.NEVER) drb = DockerRunBuilder(command=["sleep", "1"], additional_opts=["-e", "FOO=BAR", "-p", "1234"]) container = image.run_via_binary(run_command_instance=drb) try: container_list = backend.list_containers() l = len(container_list) assert l >= 1 cont_under_test = [ x for x in container_list if x.metadata.identifier == container.get_id() ][0] assert cont_under_test.metadata.image # TODO: implement parsing docker_client.containers metadata # assert cont_under_test.metadata.command # assert cont_under_test.metadata.env_variables == {"FOO": "BAR"} # assert cont_under_test.metadata.exposed_ports == ["1234"] # assert cont_under_test.get_IPv4s() finally: container.delete(force=True)
def ipsilon_container(docker_backend: conu.DockerBackend, docker_network: dict) -> conu.DockerContainer: """ Fixture preparing and yielding an Ipsilon container. Args: docker_backend: The Docker backend (fixture). docker_network: The Docker network ID (fixture). Yields: The Ipsilon container. """ # Define the container and start it image_name = "bodhi-ci-integration-ipsilon" image = docker_backend.ImageClass(image_name) run_opts = [ "--rm", "--name", "ipsilon", "--network", docker_network.get_id(), "--network-alias", "ipsilon", "--network-alias", "ipsilon.ci", "--network-alias", "id.dev.fedoraproject.org", ] container = image.run_via_binary(additional_opts=run_opts) container.start() # we need to wait for the broker to start listening container.wait_for_port(80, timeout=30) yield container stop_and_delete(container)
def test_container_metadata(): with DockerBackend(logging_level=10) as backend: image = backend.ImageClass(FEDORA_MINIMAL_REPOSITORY, tag=FEDORA_MINIMAL_REPOSITORY_TAG) c = image.run_via_binary( DockerRunBuilder(command=["cat"], additional_opts=[ '-i', '-t', '--name', 'my_container', '-p', '1234:12345', '-p', '123:12345', '-p', '8080', '--hostname', 'my_hostname', '-e', 'ENV1=my_env', '-e', 'ASD=', '-e', 'A=B=C=D', '-e', 'XYZ', '-l', 'testlabel1=testvalue1' ])) try: container_metadata = c.get_metadata() assert container_metadata.command == ["cat"] assert container_metadata.name == "my_container" assert container_metadata.env_variables["ENV1"] == "my_env" assert container_metadata.env_variables["ASD"] == "" assert container_metadata.env_variables["A"] == "B=C=D" assert container_metadata.hostname == "my_hostname" assert "XYZ" not in list(container_metadata.env_variables.keys()) assert '12345/tcp' in container_metadata.port_mappings assert container_metadata.port_mappings['12345/tcp'] == [1234, 123] assert '8080/tcp' in container_metadata.port_mappings assert container_metadata.exposed_ports == [ "12345/tcp", "8080/tcp" ] assert container_metadata.labels["testlabel1"] == "testvalue1" assert container_metadata.status == ContainerStatus.RUNNING finally: c.delete(force=True)
def test_webserver(): # let's setup the directory to serve first temp_dir_name = "shiny-%s" % random_str() temp_dir_path = os.path.join("/tmp", temp_dir_name) with DockerBackend(logging_level=logging.DEBUG) as backend: # helper class to create and initialize the dir -- will be removed once we # leave the context manager with Directory(temp_dir_path, mode=0o0700): # let's put some file in it with open(os.path.join(temp_dir_path, "candle"), "w") as fd: fd.write("You no take candle!") container = run_container(backend, temp_dir_path) try: # we need to wait for the webserver to start serving container.wait_for_port(port) # GET on / http_response = container.http_request(path="/", port=port) assert http_response.ok assert '<a href="candle">candle</a>' in http_response.content.decode( "utf-8") # now GETting the file assert 'You no take candle!' in container.http_request( path="/candle", port=port).content.decode("utf-8") finally: container.kill() container.delete()
def test_pull_never(): with DockerBackend() as backend: with pytest.raises(docker.errors.DockerException): get_client().inspect_image("docker.io/library/busybox:1.25.1") image = backend.ImageClass("docker.io/library/busybox", tag="1.25.1", pull_policy=DockerImagePullPolicy.NEVER) assert not image.is_present()
def test_oc_s2i_local_mariadb(self): api_key = get_oc_api_token() with OpenshiftBackend(api_key=api_key, logging_level=logging.DEBUG) as openshift_backend: openshift_backend.get_status() with DockerBackend(logging_level=logging.DEBUG) as backend: mariadb_image = backend.ImageClass(CENTOS_MARIADB_10_2) login_to_registry(OC_CLUSTER_USER, token=api_key) app_name = openshift_backend.create_new_app_from_source( mariadb_image, oc_new_app_args=[ "--env", "MYSQL_ROOT_PASSWORD=test", "--env", "MYSQL_OPERATIONS_USER=test1", "--env", "MYSQL_OPERATIONS_PASSWORD=test1", "--env", "MYSQL_DATABASE=testdb", "--env", "MYSQL_USER=user1", "--env", "MYSQL_PASSWORD=user1"], source="examples/openshift/extend-mariadb-image", project=MY_PROJECT) openshift_backend.get_status() try: openshift_backend.wait_for_service( app_name=app_name, port=3306, timeout=300) assert openshift_backend.all_pods_are_ready(app_name) finally: openshift_backend.get_logs(app_name) openshift_backend.clean_project(app_name)
def test_list_deployments(self): api_key = get_oc_api_token() with K8sBackend(api_key=api_key) as k8s_backend: namespace = k8s_backend.create_namespace() with DockerBackend() as backend: postgres_image = backend.ImageClass("centos/postgresql-10-centos7") postgres_image_metadata = postgres_image.get_metadata() # set up env variables db_env_variables = {"POSTGRESQL_USER": "******", "POSTGRESQL_PASSWORD": "******", "POSTGRESQL_DATABASE": "db"} postgres_image_metadata.env_variables.update(db_env_variables) db_labels = {"app": "postgres"} db_deployment = Deployment(name="database", selector=db_labels, labels=db_labels, image_metadata=postgres_image_metadata, namespace=namespace, create_in_cluster=True) try: db_deployment.wait(200) assert db_deployment.all_pods_ready() assert any(db_deployment.name == d.name for d in k8s_backend.list_deployments()) finally: db_deployment.delete() k8s_backend.delete_namespace(namespace)
def test_database_deployment(): with K8sBackend() as k8s_backend: namespace = k8s_backend.create_namespace() with DockerBackend() as backend: postgres_image = backend.ImageClass("centos/postgresql-10-centos7") postgres_image_metadata = postgres_image.get_metadata() # set up env variables db_env_variables = {"POSTGRESQL_USER": "******", "POSTGRESQL_PASSWORD": "******", "POSTGRESQL_DATABASE": "db"} postgres_image_metadata.env_variables.update(db_env_variables) db_labels = {"app": "postgres"} db_service = Service(name="database", ports=["5432"], selector=db_labels, namespace=namespace, create_in_cluster=True) db_deployment = Deployment(name="database", selector=db_labels, labels=db_labels, image_metadata=postgres_image_metadata, namespace=namespace, create_in_cluster=True) try: db_deployment.wait(200) assert db_deployment.all_pods_ready() finally: db_deployment.delete() db_service.delete() k8s_backend.delete_namespace(namespace)
def test_run_via_api(): with DockerBackend() as backend: image = backend.ImageClass(FEDORA_MINIMAL_REPOSITORY, tag=FEDORA_MINIMAL_REPOSITORY_TAG) docker_run_builder = DockerRunBuilder(additional_opts=[ '-l', 'hello=there', '-l', 'oh=noo', '--name', 'test', '-d', '--hostname', 'my_hostname', '--rm', '--memory', '1G', '--workdir', '/tmp', '--env', 'ENV1=my_env', '-p', '123:12345', '--cap-add', 'MKNOD', '--cap-add', 'SYS_ADMIN', '--cap-drop', 'SYS_ADMIN', '--dns', 'www.example.com', '--volume', '/tmp:/tmp', '--no-healthcheck' ], command=['sleep', '10']) parameters = docker_run_builder.get_parameters() c = image.run_via_api(parameters) try: assert "Config" in c.inspect() assert c.get_id() == str(c) assert repr(c) assert isinstance(c.get_id(), string_types) finally: c.delete(force=True)
def test_conu(): """ Function tests memcached container with conu """ backend = DockerBackend(logging_level=logging.DEBUG) i = backend.ImageClass("docker.io/modularitycontainers/memcached") i.pull() rb = conu.DockerRunBuilder(command=["/files/memcached.sh"]) c = i.run_via_binary(rb) assert c.is_running() c.wait_for_port(11211) session = pexpect.spawn("telnet %s 11211 " % c.get_IPv4s()[0]) session.sendline('set Test 0 100 10') session.sendline('JournalDev') assert session.expect('STORED') == 0 session.sendline('quit')
def rabbitmq_container(docker_backend: conu.DockerBackend, docker_network: str) -> conu.DockerContainer: """ Fixture preparing and yielding a RabbitMQ container. Args: docker_backend: The Docker backend (fixture). docker_network: The Docker network ID (fixture). Yields: The RabbitMQ container. """ # Define the container and start it image_name = "bodhi-ci-integration-rabbitmq" image = docker_backend.ImageClass(image_name) container = image.run_via_api() container.start() docker_backend.d.connect_container_to_network( container.get_id(), docker_network["Id"], aliases=["rabbitmq", "rabbitmq.ci"]) # we need to wait for the broker to start listening container.wait_for_port(5672, timeout=30) # wait until the embedded consumer is connected for i in range(15): if _consumer_is_connected(container, "dumper"): break print("Consumer not connected yet, retrying") time.sleep(1) else: raise RuntimeError( "The Fedora Messaging consumer did not connect in time") yield container container.kill() container.delete()
def _get_target_instance(target_name): """ Get the Container/Image instance for the given name. (Container is the first choice.) :param target_name: str :return: Container/Image """ with DockerBackend(logging_level=logging.NOTSET) as backend: try: cont = backend.ContainerClass(image=None, container_id=target_name) return cont except NotFound: name_split = target_name.split(':') if len(name_split) == 2: name, tag = name_split image = backend.ImageClass( repository=name, tag=tag, pull_policy=DockerImagePullPolicy.NEVER) else: image = backend.ImageClass( repository=target_name, pull_policy=DockerImagePullPolicy.NEVER) if image.is_present(): return image return None
def self_cleanup(): import logging import pytest from conu import DockerBackend, DockerRunBuilder backend = DockerBackend(logging_level=logging.DEBUG) image = backend.ImageClass(IMAGE_NAME) # alternative of docker run --rm nginx run_params = DockerRunBuilder(additional_opts=['--rm']) container = image.run_via_binary(run_params) assert container.is_running() # check container is removed when stopped container.stop() with pytest.raises(Exception): container.inspect()
def test_list_images(): with DockerBackend() as backend: image_list = backend.list_images() assert len(image_list) > 0 the_id = "756d8881fb18271a1d55f6ee7e355aaf38fb2973f5fbb0416cf5de628624318b" image_under_test = [x for x in image_list if x.metadata.identifier == the_id][0] assert image_under_test.metadata.digest assert image_under_test.metadata.repo_digests
def test_pull_always(): with DockerBackend() as backend: image = backend.ImageClass("docker.io/library/busybox", tag="1.25.1", pull_policy=DockerImagePullPolicy.ALWAYS) try: assert image.is_present() finally: image.rmi(force=True)
def execute_container_testing_cmd(): with DockerBackend() as backed: image = backed.ImageClass("nginix_rpw", "1.0.0") cmd = DockerRunBuilder(command=["which", "nginx"]) our_container = image.run_via_binary(cmd) assert our_container.exit_code() == 0, "command not found" print("******* ngnix was installed in container *******************8")
def docker_backend(): """Fixture yielding a Conu Docker backend. Yields: conu.DockerBackend: The Docker backend. """ # Redefined to set the scope with DockerBackend(logging_level=logging.DEBUG) as backend: yield backend
def test_pull_if_not_present(): with DockerBackend() as backend: with pytest.raises(docker.errors.DockerException): get_client().inspect_image("docker.io/library/busybox:1.25.1") image = backend.ImageClass("docker.io/library/busybox", tag="1.25.1") try: assert image.is_present() finally: image.rmi(force=True)
def check_output(): import logging from conu import DockerBackend, DockerRunBuilder backend = DockerBackend(logging_level=logging.DEBUG) image = backend.ImageClass(IMAGE_NAME) # run own command in container message = 'Hello DevConf.cz 2018!' run_params = DockerRunBuilder(command=['echo', message]) container = image.run_via_binary(run_params) # check it went ok assert container.logs().decode('utf-8') == message + '\n' print('Success!') # cleanup container.delete(force=True)
def check_localhost_port(): import logging import time from conu import DockerBackend, check_port backend = DockerBackend(logging_level=logging.DEBUG) image = backend.ImageClass(IMAGE_NAME) # publish 8080 port container = image.run_via_binary(additional_opts=['-p', '8080:8080']) time.sleep(2) # check it is published correctly check_port(host='localhost', port=8080) print('Success!') # cleanup container.delete(force=True)