Ejemplo n.º 1
0
def run_container(local_dir):
    """
    serve path `local_dir` using the python http webserver in a docker container
    :param local_dir: str, path to the directory, it should exist
    :return: instance of DockerContainer
    """
    image_name = "registry.fedoraproject.org/fedora"
    image_tag = "27"

    # we'll run our container using docker engine
    backend = DockerBackend(logging_level=logging.DEBUG)
    image = backend.ImageClass(image_name, tag=image_tag)

    # is the image present?
    try:
        image.get_metadata()
    except Exception:
        image.pull()

    # helper class to create `docker run ...` -- we want test the same experience as our users
    b = DockerRunBuilder(
        # the command to run in a container
        command=[
            "python3", "-m", "http.server", "--bind", "0.0.0.0",
            "%d" % port
        ],
        # additional options passed to `run` command
        additional_opts=["-v",
                         "%s:/webroot" % local_dir, "-w", "/webroot"])
    # let's run the container (in the background)
    container = image.run_via_binary(run_command_instance=b)
    return container
Ejemplo n.º 2
0
def rabbitmq_container(docker_backend: conu.DockerBackend,
                       docker_network: str) -> conu.DockerContainer:
    """
    Fixture preparing and yielding a RabbitMQ container.

    Args:
        docker_backend: The Docker backend (fixture).
        docker_network: The Docker network ID (fixture).

    Yields:
        The RabbitMQ container.
    """
    # Define the container and start it
    image_name = "bodhi-ci-integration-rabbitmq"
    image = docker_backend.ImageClass(image_name)
    container = image.run_via_api()
    container.start()
    docker_backend.d.connect_container_to_network(
        container.get_id(),
        docker_network["Id"],
        aliases=["rabbitmq", "rabbitmq.ci"])
    # we need to wait for the broker to start listening
    container.wait_for_port(5672, timeout=30)
    # wait until the embedded consumer is connected
    for i in range(15):
        if _consumer_is_connected(container, "dumper"):
            break
        print("Consumer not connected yet, retrying")
        time.sleep(1)
    else:
        raise RuntimeError(
            "The Fedora Messaging consumer did not connect in time")
    yield container
    container.kill()
    container.delete()
Ejemplo n.º 3
0
def ipsilon_container(docker_backend: conu.DockerBackend,
                      docker_network: dict) -> conu.DockerContainer:
    """
    Fixture preparing and yielding an Ipsilon container.

    Args:
        docker_backend: The Docker backend (fixture).
        docker_network: The Docker network ID (fixture).

    Yields:
        The Ipsilon container.
    """
    # Define the container and start it
    image_name = "bodhi-ci-integration-ipsilon"
    image = docker_backend.ImageClass(image_name)
    run_opts = [
        "--rm",
        "--name",
        "ipsilon",
        "--network",
        docker_network.get_id(),
        "--network-alias",
        "ipsilon",
        "--network-alias",
        "ipsilon.ci",
        "--network-alias",
        "id.dev.fedoraproject.org",
    ]
    container = image.run_via_binary(additional_opts=run_opts)
    container.start()
    # we need to wait for the broker to start listening
    container.wait_for_port(80, timeout=30)
    yield container
    stop_and_delete(container)
Ejemplo n.º 4
0
def ipsilon_container(docker_backend: conu.DockerBackend,
                      docker_network: dict) -> conu.DockerContainer:
    """
    Fixture preparing and yielding an Ipsilon container.

    Args:
        docker_backend: The Docker backend (fixture).
        docker_network: The Docker network ID (fixture).

    Yields:
        The Ipsilon container.
    """
    # Define the container and start it
    image_name = "bodhi-ci-integration-ipsilon"
    image = docker_backend.ImageClass(image_name)
    container = image.run_via_api()
    container.start()
    docker_backend.d.connect_container_to_network(
        container.get_id(),
        docker_network["Id"],
        aliases=["ipsilon", "ipsilon.ci", "id.dev.fedoraproject.org"])
    # we need to wait for the broker to start listening
    container.wait_for_port(80, timeout=30)
    yield container
    container.kill()
    container.delete()
Ejemplo n.º 5
0
def mount_container_filesystem():
    import logging
    from conu import DockerBackend

    backend = DockerBackend(logging_level=logging.DEBUG)
    image = backend.ImageClass(IMAGE_NAME)

    # run nginx container
    container = image.run_via_binary()

    # mount container filesystem
    with container.mount() as fs:
        # check presence of nginx configuration file
        assert fs.file_is_present('/etc/nginx/nginx.conf')

        # check presence of default nginx page
        index_path = '/usr/share/nginx/html/index.html'
        assert fs.file_is_present(index_path)

        # and its text
        index_text = fs.read_file('/usr/share/nginx/html/index.html')
        assert '<h1>Welcome to nginx!</h1>' in index_text
        print(index_text)

    print('Success!')

    # cleanup
    container.delete(force=True)
def test_conu():
    """
    Function tests memcached container with conu
    """
    backend = DockerBackend(logging_level=logging.DEBUG)
    i = backend.ImageClass("docker.io/modularitycontainers/memcached")
    i.pull()
    rb = conu.DockerRunBuilder(command=["/files/memcached.sh"])
    c = i.run_via_binary(rb)
    assert c.is_running()
    c.wait_for_port(11211)
    session = pexpect.spawn("telnet %s 11211 " % c.get_IPv4s()[0])
    session.sendline('set Test 0 100 10')
    session.sendline('JournalDev')
    assert session.expect('STORED') == 0
    session.sendline('quit')
Ejemplo n.º 7
0
def basics():
    import logging
    from conu import DockerBackend

    # prepare backend and image
    backend = DockerBackend(logging_level=logging.DEBUG)
    image = backend.ImageClass(IMAGE_NAME)

    # run container
    container = image.run_via_binary()
    assert container.is_running()
    print('Success!')

    # cleanup
    container.stop()
    container.delete()
Ejemplo n.º 8
0
def self_cleanup():
    import logging
    import pytest
    from conu import DockerBackend, DockerRunBuilder

    backend = DockerBackend(logging_level=logging.DEBUG)
    image = backend.ImageClass(IMAGE_NAME)

    # alternative of docker run --rm nginx
    run_params = DockerRunBuilder(additional_opts=['--rm'])
    container = image.run_via_binary(run_params)
    assert container.is_running()

    # check container is removed when stopped
    container.stop()
    with pytest.raises(Exception):
        container.inspect()
Ejemplo n.º 9
0
def check_localhost_port():
    import logging
    import time
    from conu import DockerBackend, check_port

    backend = DockerBackend(logging_level=logging.DEBUG)
    image = backend.ImageClass(IMAGE_NAME)

    # publish 8080 port
    container = image.run_via_binary(additional_opts=['-p', '8080:8080'])
    time.sleep(2)

    # check it is published correctly
    check_port(host='localhost', port=8080)
    print('Success!')

    # cleanup
    container.delete(force=True)
Ejemplo n.º 10
0
def check_output():
    import logging
    from conu import DockerBackend, DockerRunBuilder

    backend = DockerBackend(logging_level=logging.DEBUG)
    image = backend.ImageClass(IMAGE_NAME)

    # run own command in container
    message = 'Hello DevConf.cz 2018!'
    run_params = DockerRunBuilder(command=['echo', message])
    container = image.run_via_binary(run_params)

    # check it went ok
    assert container.logs().decode('utf-8') == message + '\n'
    print('Success!')

    # cleanup
    container.delete(force=True)
Ejemplo n.º 11
0
def rabbitmq_container(docker_backend: conu.DockerBackend,
                       docker_network: str) -> conu.DockerContainer:
    """
    Fixture preparing and yielding a RabbitMQ container.

    Args:
        docker_backend: The Docker backend (fixture).
        docker_network: The Docker network ID (fixture).

    Yields:
        The RabbitMQ container.
    """
    # Define the container and start it
    image_name = "bodhi-ci-integration-rabbitmq"
    image = docker_backend.ImageClass(image_name)
    run_opts = [
        "--rm",
        "--name",
        "rabbitmq",
        "--network",
        docker_network.get_id(),
        "--network-alias",
        "rabbitmq",
        "--network-alias",
        "rabbitmq.ci",
    ]
    container = image.run_via_binary(additional_opts=run_opts)
    container.start()
    # we need to wait for the broker to start listening
    container.wait_for_port(5672, timeout=30)
    # wait until the embedded consumer is connected
    for i in range(60):
        if _consumer_is_connected(container, "dumper"):
            break
        print("Consumer not connected yet, retrying")
        time.sleep(1)
    else:
        raise RuntimeError(
            "The Fedora Messaging consumer did not connect in time")
    yield container
    stop_and_delete(container)
Ejemplo n.º 12
0
def check_port():
    import logging
    from conu import DockerBackend

    backend = DockerBackend(logging_level=logging.DEBUG)
    image = backend.ImageClass(IMAGE_NAME)

    # run container and wait for successful response from port
    port = 80
    container = image.run_via_binary()
    container.wait_for_port(port)

    # check response manually
    http_response = container.http_request(port=port)
    assert http_response.ok

    # check nginx runs
    assert '<h1>Welcome to nginx!</h1>' in http_response.text
    print('Success!')

    # cleanup
    container.delete(force=True)
Ejemplo n.º 13
0
#!/usr/bin/python3

import logging

from conu import DockerRunBuilder, DockerBackend, random_str, Directory

# our webserver will be accessible on this port
port = 8765

# we'll utilize this container image
image_name = "registry.fedoraproject.org/fedora"
image_tag = "27"

# we'll run our container using docker engine
backend = DockerBackend(logging_level=logging.DEBUG)
image = backend.ImageClass(image_name, tag=image_tag)

# is the image present? if not, pull it
try:
    image.get_metadata()
except Exception:
    image.pull()

# helper class to create `docker run ...` command -- we want to test the same
# experience as our users
b = DockerRunBuilder(
    # the command to run in a container
    command=["python3", "-m", "http.server", "--bind", "0.0.0.0", "%d" % port],
)
# let's run the container (in the background)
container = image.run_via_binary(run_command_instance=b)
Ejemplo n.º 14
0
class SwarmCaptain(Captain):

    compass_cls = SwarmCompass

    _CPU_RATE = 10**9  # vCores to nanoCores
    _MEM_RATE = 1024 * 1024  # MB to bytes
    _SEC_RATE = 10**9  # seconds to nanoseconds

    def __init__(self, section: str, **kwargs):

        super().__init__(section, **kwargs)

        if self.LOG.name != LoggerConst.DEFAULT_NAME:
            self.LOG.warn(
                "Using Docker Swarm as container manager."
                "This is not recommended for distributed environments")

        self.docker_api = self.docker_compass.get_api()
        self.docker_backend = DockerBackend(logging_level=logging.ERROR)

    def run(self,
            img: ImageSpec,
            env_vars,
            mounts,
            cargos,
            ports,
            cmd: list,
            name: str,
            foreground=False):

        self.make_name_available(name)
        [self.load_vol(v, name) for v in cargos]
        image = self.docker_backend.ImageClass(img.repo, tag=img.tag)

        additional_opts = \
            self.conu_ports(ports) + \
            self.conu_env_vars(env_vars) + \
            self.conu_name(name) + \
            self.conu_resources()

        kwargs = self.cleaner(
            dict(command=cmd,
                 volumes=self.conu_mounts(mounts) + self.conu_vols(cargos),
                 additional_opts=additional_opts,
                 popen_params=dict(stdout=self.LOG.file_handle,
                                   stderr=self.LOG.file_handle)
                 if self.LOG.background else None))

        if foreground:
            cont = image.run_via_binary_in_foreground(**kwargs)
            self.watch_cont(cont)
        else:
            cont = image.run_via_binary(**kwargs)

        return cont

    def deploy(self,
               img: ImageSpec,
               env_vars,
               mounts,
               cargos,
               ports,
               cmd: list,
               name: str,
               tasks: int = 1,
               allow_probe=False):

        [self.load_vol(v, name) for v in cargos]
        self.assert_network()
        depl = self.find_depl(name)

        kwargs = self.cleaner(
            dict(name=name,
                 endpoint_spec=dict(Ports=self.swarm_ports(ports)),
                 networks=[DockerConst.NETWORK],
                 mode=ServiceMode('replicated', replicas=tasks),
                 task_template=TaskTemplate(
                     force_update=5,
                     resources=self.swarm_resources(),
                     container_spec=ContainerSpec(
                         command=cmd,
                         image=img.target,
                         env=env_vars,
                         mounts=mounts + [v.mount for v in cargos],
                         healthcheck=self.swarm_healthcheck(allow_probe)))))

        if depl is None:
            self.LOG.debug(
                "Creating container service '{}' with kwargs:".format(name))
            self.LOG.debug(kwargs)
            return self.docker_api.create_service(**kwargs)
        else:
            kwargs.update(service=depl['ID'], version=depl['Version']['Index'])
            self.LOG.debug(
                "Updating container service '{}' with kwargs:".format(name))
            self.LOG.debug(kwargs)
            return self.docker_api.update_service(**kwargs)

    def dispose_run(self, name: str):

        cont = self.find_cont(name)

        if cont is not None:
            return self.rm_cont(cont)
        else:
            return False

    def dispose_deploy(self, name: str):

        return self.rm_depl(name)

    @patient
    def rm_vol(self, cargo: Cargo, ignore=False):

        if isinstance(cargo, MappedCargo):
            return False

        try:
            self.docker_api.remove_volume(name=cargo.name, force=True)
            return True
        except DockerAPIError as e:
            if ignore:
                self.LOG.error(e)
                return False
            else:
                msg = "Waiting up to {} seconds for removal of volume {}".format(
                    self.timeout, cargo.name)
                raise PatientError(wait_callback=lambda: self.LOG.info(msg),
                                   original_exception=e)

    def rm_cont(self, x: DockerContainer):

        try:
            x.delete(force=True)
        except DockerAPIError as e:
            self.LOG.error(e)
            return False
        else:
            return True

    def rm_depl(self, name: str):

        try:
            self.docker_api.remove_service(name)
        except Exception as e:
            self.LOG.error(e)
            return False
        else:
            return True

    def list_cont_or_pod_ids(self):

        return [
            cont.get_id() for cont in self.docker_backend.list_containers()
        ]

    def find_cont(self, name):

        return self._find_sth(what='containers',
                              method=self.docker_backend.list_containers,
                              name=name)

    def find_depl(self, name):

        return self._find_sth(what='deployments',
                              method=self.docker_api.services,
                              filters=dict(name=name),
                              key=lambda _: True,
                              name=name)

    def find_net(self):

        return self._find_sth(what='networks',
                              method=self.docker_api.networks,
                              names=[DockerConst.NETWORK],
                              key=lambda _: True,
                              name=DockerConst.NETWORK)

    def make_name_available(self, name):

        existing = self.find_cont(name)

        if existing is not None:
            self.LOG.warn("Removing old container '{}'".format(name))
            self.rm_cont(existing)

    def watch_cont(self, container: DockerContainer):

        try:
            while container.is_running():
                time.sleep(1)
        except (KeyboardInterrupt, InterruptedError):
            self.interrupted = True

    def wait_for_net(self):

        for _ in range(self.timeout):
            if self.find_net() is None:
                time.sleep(1)
            else:
                break
        else:
            raise NhaDockerError("Timed out waiting for Docker network")

    def assert_network(self):

        if self.find_net() is not None:
            return

        kwargs = dict(
            name=DockerConst.NETWORK,
            driver="overlay",
            attachable=False,
            scope="global",
            ingress=False
        )  # standard properties for a network that is meant to be used only by Swarm services

        self.LOG.info("Creating Docker network")
        self.LOG.debug(kwargs)
        self.docker_api.create_network(**kwargs)
        self.wait_for_net()

    def assert_vol(self, cargo: Cargo):

        vols = self.docker_api.volumes(dict(name=cargo.name))

        if len(vols) == 0:
            self.docker_api.create_volume(name=cargo.name)

            return True
        elif len(vols) == 1:
            return False
        else:
            return NotImplementedError()

    def load_vol(self, cargo: Cargo, mule_alias: str = None):

        work_path, mule, error = None, None, None

        if not isinstance(cargo, MappedCargo):
            self.assert_vol(cargo)

        if isinstance(cargo, EmptyCargo) or len(cargo.contents) == 0:
            return False

        try:
            self.LOG.debug("Loading volume '{}'".format(cargo.name))
            mule = self.get_mule(cargo, mule_alias)
            self.clear_mule(mule)
            work_path = Workpath.get_tmp()
            kwargs = dict(include_heavy_cargos=True) if isinstance(
                cargo, SharedCargo) else {}
            cargo.deploy(work_path, **kwargs)

            for file_name in os.listdir(work_path):
                self.copy_to(src=work_path.join(file_name),
                             dest=DockerConst.STG_MOUNT,
                             cont=mule)

        except Exception as e:
            error = e
        else:
            return True
        finally:
            if work_path is not None:
                work_path.dispose()
            if mule is not None:
                self.rm_cont(mule)
            if error is not None:
                self.rm_vol(cargo, ignore=True)
                raise error

    def get_mule(self, cargo: Cargo, mule_alias: str = None):

        repo, tag = DockerConst.MULE_IMG.split(':')
        image = self.docker_backend.ImageClass(repo, tag=tag)
        name = self.mule_name(mule_alias)

        kwargs = dict(additional_opts=self.conu_name(name),
                      command=DockerConst.MULE_CMD,
                      volumes=[(cargo.name, DockerConst.STG_MOUNT, 'rw')])

        return image.run_via_binary(**kwargs)

    def clear_mule(self, mule: DockerContainer):

        ls_output = mule.execute('ls {}'.format(DockerConst.STG_MOUNT))

        if not ls_output:
            return

        for file_name in ls_output[0].strip().decode('utf-8').split('\n'):
            mule.execute('rm -rf {}/{}'.format(DockerConst.STG_MOUNT,
                                               file_name))

    def copy_to(self, src: str, dest: str, cont: DockerContainer):

        cont.copy_to(src=src, dest=dest)

    def _exec_in_cont(self, cont: DockerContainer, cmd: str):

        cont.execute(cmd.split(' '), blocking=True)

    def conu_vols(self, vols: List[Cargo]):

        return [tuple(v.mount.split(':')) for v in vols]

    def conu_mounts(self, mounts: List[str]):

        return [tuple(m.split(':')) for m in mounts]

    def conu_ports(self, ports: list):

        ports_opt = []

        for p in ports:
            ports_opt += ['-p', p]

        return ports_opt

    def conu_env_vars(self, env_vars: dict):

        env_vars_opt = []

        for e in dict_to_kv_list(env_vars):
            env_vars_opt += ['-e', e]

        return env_vars_opt

    def conu_name(self, name: str = None):

        if name is None:
            return []
        else:
            return ['--name', name]

    def swarm_ports(self, ports):

        port_specs = []

        for p in ports:
            if ':' in p:
                p_from, p_to = p.split(':')
                port_specs.append(
                    dict(PublishedPort=int(p_from),
                         TargetPort=int(p_to),
                         Protocol='tcp'))
            else:
                continue  # single port exposure is not necessary in swarm mode

        return port_specs

    def swarm_resources(self):

        if self.resources is None:
            return None
        else:
            if self.resources.get('enable_gpu', False):
                return Resources(
                    cpu_limit=int(self.resources['limits']['cpu'] *
                                  self._CPU_RATE),
                    mem_limit=self.resources['limits']['memory'] *
                    self._MEM_RATE,
                    cpu_reservation=int(self.resources['requests']['cpu'] *
                                        self._CPU_RATE),
                    mem_reservation=self.resources['requests']['memory'] *
                    self._MEM_RATE,
                    generic_resources={'gpu': 1})
            else:
                return Resources(
                    cpu_limit=int(self.resources['limits']['cpu'] *
                                  self._CPU_RATE),
                    mem_limit=self.resources['limits']['memory'] *
                    self._MEM_RATE,
                    cpu_reservation=int(self.resources['requests']['cpu'] *
                                        self._CPU_RATE),
                    mem_reservation=self.resources['requests']['memory'] *
                    self._MEM_RATE)

    def conu_resources(self):

        if self.resources is None:
            return []
        else:
            res = [
                '--cpus',
                str(self.resources['limits']['cpu']), '--memory-reservation',
                '{}m'.format(self.resources['requests']['memory'])
            ]

            if self.resources.get('enable_gpu', False):
                res = res + ['--gpus', 'all']

            return res

    def swarm_healthcheck(self, allow_probe=False):

        if allow_probe and self.healthcheck['enabled']:
            return Healthcheck(
                test=["CMD", "curl", "-f", "http://localhost:8080/health"],
                interval=self.healthcheck['interval'] * self._SEC_RATE,
                timeout=self.healthcheck['timeout'] * self._SEC_RATE,
                retries=self.healthcheck['retries'],
                start_period=self.healthcheck['start_period'] * self._SEC_RATE)
        else:
            return None