Пример #1
0
class docker_driver():
    def __init__(self, socket, image, launch_cmd, hostname):
        self.socket = socket
        self.image = image
        self.hostname = hostname
        self.launch_cmd = launch_cmd
        self.make_connection()

    def make_connection(self):
        self.connection = Client(self.socket)

    def launch_container(self):
        self.container_id = self.connection.create_container(
            image=self.image, tty=True, hostname=self.hostname)['Id']
        self.connection.start(self.container_id)
        exec_id = self.connection.exec_create(self.container_id,
                                              self.launch_cmd)['Id']
        self.connection.exec_start(exec_id, tty=True)
        self.container_data = self.connection.inspect_container(
            self.container_id)
        return {
            "id":
            self.container_id,
            "ip":
            self.container_data['NetworkSettings']['Networks']['bridge']
            ['IPAddress']
        }

    def teardown_container(self):
        self.connection.stop(self.container_id)
Пример #2
0
def main():
    pod_name = os.environ.get("POD_NAME", None)

    if not pod_name:
        print 'make sure POD_NAME is injected to the container as an env variable'
        sys.exit(1)

    cli = Client(base_url='unix://var/run/docker.sock')

    containers = cli.containers(filters={'label': 'io.kubernetes.pod.name='+pod_name})

    container_id = None
    for container in containers:
        if container['Labels']['io.kubernetes.container.name'] == 'nginx':
            container_id = container['Id']
            break
        # if container['Labels']['io.kubernetes.container.name'] not in ('cdsmon-collectd', 'POD'):
        #     container_id = container['Id']
        #     break

    if not container_id:
        print 'cannot pick container id automatically'
        sys.exit(1)

    cmd = cli.exec_create(container=container_id, cmd='hostname')
    print cli.exec_start(cmd)
Пример #3
0
 def runCommand(self, container, cmd, stream=True):
     cli = Client(base_url='unix://var/run/docker.sock')
     ex = cli.exec_create(container=container, cmd=cmd)
     if stream:
         for result in cli.exec_start(exec_id=ex["Id"], stream=True):
             print(result)
         return cli.exec_inspect(exec_id=ex["Id"])['ExitCode']
     else:
         return cli.exec_start(exec_id=ex["Id"])
Пример #4
0
def write_to_ct(port, service_name, project_name, username):
    machines = database_update.get_machines()
    for machine in machines:
        cli = Client(base_url=machine, version=config.c_version)
        tt = cli.exec_create(
            container='nginx',
            cmd=
            '/bin/bash -c \"cd /etc/consul-templates && sh refresh.sh %s %s %s %s\"'
            % (port, service_name, project_name, username))
        cli.exec_start(exec_id=tt, detach=True)
Пример #5
0
def delete_from_ct(project_name, username):
    machines = database_update.get_machines()
    for machine in machines:
        cli = Client(base_url=machine, version=config.c_version)
        tt = cli.exec_create(
            container='nginx',
            cmd=
            '/bin/bash -c \"cd /etc/consul-templates && bash delete.sh %s-%s\"'
            % (project_name, username))
        cli.exec_start(exec_id=tt, detach=True)
Пример #6
0
class Docker:
    def __init__(self,
                 client_url='unix://var/run/docker.sock',
                 client_version='auto',
                 timeout=50,
                 client_tls=False):
        self.client = Client(base_url=client_url,
                             version=client_version,
                             tls=client_tls)

        self.all_containers = self.client.containers()

        self.attached_container = self.all_containers[0]['Id']

    def container_has_file(self, file_name, path="/"):
        has_file = False

        bash_command = "find %s -maxdepth 1 -type f" % path

        exec_id = self.client.exec_create(self.attached_container,
                                          bash_command)

        files_in_container = self.client.exec_start(exec_id['Id'])

        files_in_container = files_in_container.split('\n')

        for i in files_in_container:
            if file_name in i.split('/'):
                has_file = True

        return has_file

    def container_has_dir(self, dir_name, path="/"):
        has_dir = False

        bash_command = "find %s -maxdepth 1 -type d" % path

        exec_id = self.client.exec_create(self.attached_container,
                                          bash_command)

        dirs_in_container = self.client.exec_start(exec_id['Id'])

        dirs_in_container = dirs_in_container.split('\n')

        for i in dirs_in_container:
            if dir_name in i.split('/'):
                has_dir = True

        return has_dir

    def attach_container(self, id):
        self.attached_container = id
Пример #7
0
def run():
    code = request.form.get("code")
    runner = request.form.get("runner")

    if not code:
        return "you should send a code!", 500

    f = open("code", "w")
    f.write(code)
    f.close()
    d = Client(base_url=DOCKER_ADDRESS)
    test_connection = d.ping()
    if test_connection != "OK":
        return "docker unavaileble", 500
    container = d.create_container(image=IMAGE_NAME, command="tail -f /dev/null", detach=True)
    response = d.start(container)
    with tarfile.open("code.tar.gz", "w:gz") as tar:
        tar.add(f.name, arcname=os.path.basename(f.name))
    t = open("code.tar.gz", "rb")
    works = d.put_archive(container=container["Id"], path="/root", data=t)
    t.close()
    if not works:
        return "Can't create file in container", 500
    run = RUNNERS.get(runner, None)
    if not run:
        return "Invalid runner", 500
    exe = d.exec_create(container=container["Id"], cmd="{0} /root/code".format(run))
    gen = d.exec_start(exec_id=exe["Id"], stream=True)
    return Response(gen, mimetype="text/plain")
Пример #8
0
class docker_driver(container_base):
    
    def make_connection(self):
        self.connection = Client(self.socket)

    def launch_container(self):
        self.container_id = self.connection.create_container(image=self.image, tty=True, hostname=self.hostname)['Id']
        self.connection.start(self.container_id)
        exec_id = self.connection.exec_create(self.container_id, self.launch_cmd)['Id']
        self.connection.exec_start(exec_id, tty=True)
        self.container_data = self.connection.inspect_container(self.container_id)
        return {"id": self.container_id,
                "ip": self.container_data['NetworkSettings']['Networks']['bridge']['IPAddress']}

    def teardown_container(self):
        self.connection.stop(self.container_id)
    def barman_check(self):
        """Connect to the Barman Docker object and check configuration. Error out on failure."""

        docker_client = Client(base_url=self.url)

        try:
            # TODO: verify that the barman container is running
            docker_client.inspect_container(self.container)
        except Exception:
            print('stats err failed when inspecting the container.')
            sys.exit(1)

        try:
            exec_id = docker_client.exec_create(self.container, self.command)
            response = docker_client.exec_start(exec_id)
        except Exception:
            print('stats err failed to execute barman check command in the container.')
            sys.exit(1)

        failed = False
        for line in response.splitlines()[1:]:
            check, value = line.strip().split(': ', 1)
            slug = check.lower().replace(' ', '_').replace('-', '_')
            print('metric {} string {}'.format(slug, value))
            if value.startswith('FAILED'):
                failed = True
        if failed:
            print('status err failure in barman check')
            sys.exit(1)
        print('status ok all checks passed')
Пример #10
0
class Container:
    def __init__(self, name):
        self.name = name

        self.docker = Client()

    def start(self):
        return self.docker.start(self.name)

    def stop(self):
        return self.docker.stop(self.name)

    def execute(self, command):
        data_obj = data.Data()

        cmd = self.docker.exec_create(
            self.name, '/bin/sh -c "' + data_obj.get_env_vars_string(True) +
            ' && ' + command + '"')
        cmd_id = cmd['Id']

        for line in self.docker.exec_start(cmd_id, stream=True):
            yield (line.decode('ascii'))

    def restart(self):
        return self.docker.restart(self.name)

    def status(self):
        return self.docker.status(self.name)

    def destroy(self):
        return self.docker.remove_container(self.name)
Пример #11
0
def system_db_dump_docker(list_dump):
    #   Dump the database schemas (for backup)
    from docker import Client
    c = Client(base_url='unix://var/run/docker.sock')
    # commandid = c.exec_create('postgres', '/usr/bin/pg_dump -h localhost -U estation -F p -a --column-inserts -f
    # /data/static_data/db_dump/estationdb_products_2020-04-21-15:21:33.sql -n products estationdb')

    logger.debug("Entering routine %s" % 'system_db_dump')
    now = datetime.datetime.now()
    # Use db_dump
    dump_dir = es_constants.es2globals['db_dump_dir']
    db_dump = es_constants.es2globals['db_dump_exec']
    status = False
    if len(list_dump) > 0:
        for dump_schema in list_dump:
            # Check if there is one dump for the current day
            existing_dumps = glob.glob(dump_dir + os.path.sep + 'estationdb_' +
                                       dump_schema + '_*')
            match_name = '.*estationdb_' + dump_schema + '_' + now.strftime(
                "%Y-%m-%d-") + '.*.sql'
            matches = [s for s in existing_dumps if re.match(match_name, s)]
            if len(matches) == 0:
                dump_file = dump_dir + os.path.sep + 'estationdb_' + dump_schema + '_' + now.strftime(
                    "%Y-%m-%d-%H:%M:%S") + '.sql'
                command = db_dump + ' -h localhost' + \
                                    ' -U estation' + \
                                    ' -F p -a --column-inserts' + \
                                    ' -f ' + dump_file + \
                                    ' -n ' + dump_schema + ' estationdb'

                logger.info('Command is: %s' % command)
                commandid = c.exec_create('postgres', command)
                status = c.exec_start(commandid)
                # status = + os.system(shlex_quote(command))
        return status
Пример #12
0
class DockerClient(object):
    def __init__(self):
        self.client = Client(base_url='unix://var/run/docker.sock')

    def build_images(self, dockerfile: str, tag: str):
        with open(dockerfile) as file:
            dkfile = BytesIO(file.read().encode('utf-8'))
        response = [line for line in self.client.build(
            fileobj=dkfile, rm=True, tag=tag)]
        return response

    def run_container(self, image, mem_limit=None, volume_binds: list = None, command=None):
        container = self.client.create_container(image=image,
                                                 host_config=self.client.create_host_config(
                                                     binds=volume_binds, mem_limit=mem_limit), command=command)
        self.client.start(container)
        try:
            self.client.wait(container, timeout=3)
        except ReadTimeout:
            print('time out')
        self.client.stop(container)
        self.client.remove_container(container)

    def exec_container(self, container, cmd):
        container_id = self.client.exec_create(container, cmd)['Id']
        return self.client.exec_start(container_id)
Пример #13
0
def node_exit_handler(addr):
    collection = db.containers
    containers = collection.find()
    resource_shares = {'high':{'cpu_shares' : 1000, 'mem_limit' : '600m'}, 'medium':{'cpu_shares' : 100, 'mem_limit' : '400m'}, 'low':{'cpu_shares' : 10, 'mem_limit' : '200m'}}
    for container in containers:
    	if container['host_ip'] == addr and container['checkpointed'] == "true":
            host_ip = choose_least_loaded(container['privelege_level'])
            cli = Client(base_url=host_ip+":2375")
            cli.pull(repository=registry+"/"+container['source_image'], tag=container['username']+"_"+container['container_name'], stream=False)
            #Create
            image_name = registry+"/"+container['source_image']+":"+container['username']+"_"+container['container_name']
            privelege_level = container['privelege_level']
            portlist = []
            portmap = {}
            if container['source_image'] == "tomcat":
                portlist.append(22)
                portmap[22] = container['ssh_port']

            host_config = cli.create_host_config(mem_limit=resource_shares[privelege_level]['mem_limit'], port_bindings=portmap)
            container_new = cli.create_container(image=image_name,cpu_shares=resource_shares[privelege_level]['cpu_shares'],host_config=host_config,ports=portlist)

            original_load = host_collection.find({"ip":host_ip})[0][privelege_level]
            host_collection.update_one({"ip":host_ip},{"$set":{privelege_level:original_load+1}})
            collection.update_one({"container_id":container['container_id']},{"$set":{"host_ip":host_ip}})
            collection.update_one({"container_id":container['container_id']},{"$set":{"container_id":container_new['Id']}})
            #Start
            if container['status'] == "Started":
                container_id = container_new['Id']
                response = cli.start(container=container_id)
                executor = cli.exec_create(container=container_id,cmd="bash service ssh start")
                response = cli.exec_start(executor.get('Id'))
    print "Failure handler called"
Пример #14
0
def docker_abs_net_io(container_id):
    """
    Network traffic of all network interfaces within the controller.

    :param container_id: The full ID of the docker container.
    :type container_id: ``str``
    :return: Returns the absolute network I/O till container startup, in bytes. The return dict also contains the
        system time.
    :rtype: ``dict``
    """
    c = Client()
    command = c.exec_create(container_id, 'ifconfig')
    ifconfig = c.exec_start(command['Id'])
    sys_time = int(time.time() * 1000000000)

    in_bytes = 0
    m = re.findall('RX bytes:(\d+)', str(ifconfig))
    if m:
        for number in m:
            in_bytes += int(number)
    else:
        in_bytes = None

    out_bytes = 0
    m = re.findall('TX bytes:(\d+)', str(ifconfig))
    if m:
        for number in m:
            out_bytes += int(number)
    else:
        out_bytes = None

    return {'NET_in': in_bytes, 'NET_out': out_bytes, 'NET_systime': sys_time}
Пример #15
0
    def exec_command(cls, command, project, service, timeout_seconds=60):
        """Executes the command in the project and service container running under
        docker-compose.
        Throws ContainerUnavailableError if the retry limit is reached.

        Args:
            command: The command to be executed in the container
            project: Name of the project the container is hosting
            service: Name of the service that the container is hosting
            timeout_seconds: Retry time limit to wait for containers to start
                             Default in seconds: 60
        """
        docker_client = Client(version='auto')
        container_id = cls._get_container_id(project, service, docker_client, timeout_seconds)
        exec_id = docker_client.exec_create(container_id, command)['Id']
        docker_client.exec_start(exec_id)
Пример #16
0
def remove_attachments(odoo_config):
    """Remove attachments folder
    Args:
        odoo_config (dict): Odoo configuration
    """
    if 'odoo_container' in odoo_config:
        env_vars = get_docker_env(odoo_config.get('odoo_container'))
        odoo_config_file = env_vars.get('ODOO_CONFIG_FILE')
        cli = Client()
        try:
            res = cli.copy(odoo_config.get('odoo_container'), odoo_config_file)
        except docker.errors.APIError as error:
            if "Could not find the file" in error.message:
                logger.error("Odoo config file is not in the path '%s'", odoo_config_file)
            else:
                logger.error("Could not get the config file '%s'", error.message)
            return None

        for i in res.data.split('\n'):
            if i.strip().startswith("data_dir"):
                data_dir = i.split("=")[1].strip()
                break
        fs_name = os.path.join(data_dir, "filestore", odoo_config.get('database'))
        exec_id = cli.exec_create(odoo_config.get('odoo_container'), "rm -r {}".format(fs_name))
        res = cli.exec_start(exec_id.get('Id'))
        if res:
            logger.info("Removing previous filestore returned '%s'", res)
    else:
        fs_name = os.path.join(odoo_config.get('data_dir'),
                               'filestore',
                               odoo_config.get('database'))
        shutil.rmtree(fs_name)
Пример #17
0
    def barman_check(self):
        """Connect to the Barman Docker object and check configuration. Error out on failure."""

        docker_client = Client(base_url=self.url)

        try:
            # TODO: verify that the barman container is running
            docker_client.inspect_container(self.container)
        except Exception:
            print('stats err failed when inspecting the container.')
            sys.exit(1)

        try:
            exec_id = docker_client.exec_create(self.container, self.command)
            response = docker_client.exec_start(exec_id)
        except Exception:
            print(
                'stats err failed to execute barman check command in the container.'
            )
            sys.exit(1)

        failed = False
        for line in response.splitlines()[1:]:
            check, value = line.strip().split(': ', 1)
            slug = check.lower().replace(' ', '_').replace('-', '_')
            print('metric {} string {}'.format(slug, value))
            if value.startswith('FAILED'):
                failed = True
        if failed:
            print('status err failure in barman check')
            sys.exit(1)
        print('status ok all checks passed')
Пример #18
0
class Container:
    def __init__(self, name):
        self.name = name

        self.docker = Client()

    def start(self):
        return self.docker.start(self.name)

    def stop(self):
        return self.docker.stop(self.name)

    def execute(self, command):
        data_obj = data.Data()

        cmd = self.docker.exec_create(self.name, '/bin/sh -c "' + data_obj.get_env_vars_string(True) + ' && ' + command + '"')
        cmd_id = cmd['Id']

        for line in self.docker.exec_start(cmd_id, stream=True):
            yield(line.decode('ascii'))

    def restart(self):
        return self.docker.restart(self.name)

    def status(self):
        return self.docker.status(self.name)

    def destroy(self):
        return self.docker.remove_container(self.name)
Пример #19
0
    def exec_command(cls, command, project, service, timeout_seconds=60):
        """Executes the command in the project and service container running under
        docker-compose.
        Throws ContainerUnavailableError if the retry limit is reached.

        Args:
            command: The command to be executed in the container
            project: Name of the project the container is hosting
            service: Name of the service that the container is hosting
            timeout_seconds: Retry time limit to wait for containers to start
                             Default in seconds: 60
        """
        docker_client = Client(version='auto')
        container_id = cls._get_container_id(project, service, docker_client,
                                             timeout_seconds)
        exec_id = docker_client.exec_create(container_id, command)['Id']
        docker_client.exec_start(exec_id)
Пример #20
0
 def local(self, cmd, capture=False, stream=False, detach=False):
     if stream:
         dckr = Client(timeout=120, version='auto')
         i = dckr.exec_create(container=self.docker_name(), cmd=cmd)
         return dckr.exec_start(i['Id'], tty=True, stream=stream, detach=detach)
     else:
         flag = '-d' if detach else ''
         return local('docker exec {0} {1} {2}'.format(flag, self.docker_name(), cmd), capture)
Пример #21
0
 def local(self, cmd, capture=False, stream=False, detach=False):
     if stream:
         dckr = Client(timeout=120, version="auto")
         i = dckr.exec_create(container=self.docker_name(), cmd=cmd)
         return dckr.exec_start(i["Id"], tty=True, stream=stream, detach=detach)
     else:
         flag = "-d" if detach else ""
         return local("docker exec {0} {1} {2}".format(flag, self.docker_name(), cmd), capture)
Пример #22
0
 def local(self, cmd, capture=False, stream=False, detach=False, tty=True):
     if stream:
         dckr = Client(timeout=120, version='auto')
         i = dckr.exec_create(container=self.docker_name(), cmd=cmd)
         return dckr.exec_start(i['Id'], tty=tty, stream=stream, detach=detach)
     else:
         flag = '-d' if detach else ''
         return local('docker exec {0} {1} {2}'.format(flag, self.docker_name(), cmd), capture)
Пример #23
0
class Docker:
    def __init__(self, client_url='unix://var/run/docker.sock', client_version='auto', timeout=50, client_tls=False):
        self.client = Client(base_url=client_url, version=client_version, tls=client_tls)

        self.all_containers = self.client.containers()

        self.attached_container = self.all_containers[0]['Id']

    def container_has_file(self, file_name, path="/"):
        has_file = False

        bash_command = "find %s -maxdepth 1 -type f" % path

        exec_id = self.client.exec_create(self.attached_container, bash_command)

        files_in_container = self.client.exec_start(exec_id['Id'])

        files_in_container = files_in_container.split('\n')

        for i in files_in_container:
            if file_name in i.split('/'):
                has_file = True

        return has_file

    def container_has_dir(self, dir_name, path="/"):
        has_dir = False

        bash_command = "find %s -maxdepth 1 -type d" % path

        exec_id = self.client.exec_create(self.attached_container, bash_command)

        dirs_in_container = self.client.exec_start(exec_id['Id'])

        dirs_in_container = dirs_in_container.split('\n')

        for i in dirs_in_container:
            if dir_name in i.split('/'):
                has_dir = True

        return has_dir

    def attach_container(self, id):
        self.attached_container = id
Пример #24
0
class DockerDB:
    """
    https://docker-py.readthedocs.org/en/latest/api/
    Version mismatch: Edit file /usr/lib/python2.7/site-packages/docker/constants.py
        DEFAULT_DOCKER_API_VERSION = '1.15'

    """

    def import_db(self, chapters):
        if chapters:
            if self.story_name in self.apps_api_story_list:
                if isinstance(chapters, str):
                    self.insert_chapter(chapters)
                if isinstance(chapters, list):
                    self.insert_chapters(chapters)
            else:
                DockerDB(self.story_name).insert_story()

    def __init__(self, story_name):
        self.client = Client(base_url='unix://var/run/docker.sock')
        self.story_dir = IMG_DIR + '/' + story_name
        self.python_bin = DOCKER_PYTHON_BIN
        self.python_app = DOCKER_PYTHON_APP
        self.story_name = story_name

        if HOST_NAME == 'livemanga':
            self.apps_api_story_list = abstract_docker_api.apps_api_get_stories()
        else:
            self.apps_api_story_list = dict()

    def test_connection(self):
        cmd = self.python_bin, ' --version'
        self.run(" ".join(cmd))

    def run(self, cmd):
        """
        :param cmd : python_bin + python_app + action + option
        :return:
        """
        try:
            p = self.client.exec_create(container=CONTAINER_ID, cmd=cmd)
            print self.client.exec_start(p)
        except Exception, e:
            print 'DockerDB: ', e
Пример #25
0
 def exec_on_ctn(self, cmd, capture=True, stream=False, detach=False):
     name = self.docker_name()
     if stream:
         # This needs root permission.
         dcli = Client(timeout=120, version='auto')
         i = dcli.exec_create(container=name, cmd=cmd)
         return dcli.exec_start(i['Id'], tty=True,
                                stream=stream, detach=detach)
     else:
         flag = '-d' if detach else ''
         return self.dcexec('docker exec {0} {1} {2}'.format(
             flag, name, cmd), capture=capture)
Пример #26
0
 def exec_on_ctn(self, cmd, capture=True, stream=False, detach=False):
     name = self.docker_name()
     if stream:
         # This needs root permission.
         dcli = Client(timeout=120, version='auto')
         i = dcli.exec_create(container=name, cmd=cmd)
         return dcli.exec_start(i['Id'], tty=True,
                                stream=stream, detach=detach)
     else:
         flag = '-d' if detach else ''
         return self.dcexec('docker exec {0} {1} {2}'.format(
             flag, name, cmd), capture=capture)
Пример #27
0
def get_group(name):
    x = []
    cmd = 'getent group "{0}"'.format(name)
    cli = Client(base_url='unix://tmp/docker.sock')
    id = cli.inspect_container('/samba')['Id']
    proc = cli.exec_create(id, cmd)
    print('DEBUG', file=sys.stdout)
    for i in cli.exec_start(proc['Id'],
                            stream=False).decode('utf-8').split(sep='\n'):
        x.append(i)
    print('Cmd:', cmd, file=sys.stdout)
    print('Answer:', x, file=sys.stdout)
    return str(''.join(x)).split(sep=':')[2]
Пример #28
0
class DockerExec(object):
    def __init__(self):
        self.cl = Client(base_url="unix://var/run/docker.sock")

    def execute(self, container, command, detach=False, interactive=False, tty=False):
        try:
            exec_id = self.cl.exec_create(container, command, True, True, tty)
            print exec_id
            ret = self.cl.exec_start(exec_id["Id"], detach, tty, False)
            print ret
            ins = self.cl.exec_inspect(exec_id["Id"])
            print ins["ExitCode"]
        except docker.errors.APIError as ex:
            raise
Пример #29
0
class DockerManager:
    def __init__(self):
        try:
            self.cli = Client(base_url="tcp://192.168.0.2:2376")
        except Exception as e:
            print "Falhou %s" % e

    def createContainer(self, nome, hn):
        try:
            container = self.cli.create_container(name=nome,
                                                  hostname=hn,
                                                  image="debian",
                                                  detach=True,
                                                  stdin_open=True,
                                                  tty=True)
            self.cli.start(container=container.get("Id"))
            return container
        except Exception as e:
            print "Falhou ao criar o container: ", e

    def listContainers(self):
        try:
            for c in self.cli.containers(all=True):
                rede = c.get("NetworkSettings").get("Networks").keys()[0]
                ip = c.get("NetworkSettings").get("Networks").get(rede).get(
                    "IPAddress")
                print c.get(
                    "Names"
                )[0], " rede: ", rede, "IP: ", ip, "Status: ", c.get("Status")
                print "Removendo o container ", c.get("Names")
        except Exception as e:
            print "Falhou ao listar os containers: ", e

    def inspectContainer(self, container_id):
        try:
            container = self.cli.inspect_container(container_id)
            print container
        except Exception as e:
            print "Falhou ao criar o container: ", e

    def execContainer(self, container_id, command):
        try:
            exec_id = self.cli.exec_create(container_id, command)
            print exec_id
            log = self.cli.exec_start(exec_id.get("Id"))
            print log
        except Exception as e:
            print "Falhou ao criar o container: ", e
Пример #30
0
class Docker:
    def __init__(self):
        config = ConfigParser.ConfigParser()
        config.read(
            os.path.dirname(os.path.abspath(__file__)) + '/../config.cfg')
        self.client = Client(base_url='tcp://%s:2376' %
                             config.get('docker', 'server'))

    def listar_containers(self):
        for c in self.client.containers():
            print c

    def criar_container(self,
                        nome='novo',
                        imagem='ubuntu',
                        comando='/bin/bash'):
        container = self.client.create_container(
            image=imagem,
            command=comando,
            name=nome,
            stdin_open=True,
            tty=True,
            detach=True,
            ports=[80],
            host_config=self.client.create_host_config(port_bindings={80: 80}))
        return container

    def iniciar_container(self, id):
        self.client.start(container=id)
        print 'Container iniciado'

    def parar_container(self, id):
        self.client.stop(container=id)
        print 'Container parado.'

    def rem_container(self, id):
        self.client.stop(container=id)
        self.client.remove_container(container=id)
        print 'Container removido.'

    def exec_comando(self, id, comando):
        exec_id = self.client.exec_create(container=id, cmd=comando)
        resultado = self.client.exec_start(exec_id)
        return resultado

    def inspec_container(self, id):
        container = self.client.inspect_container(container=id)
        return container
Пример #31
0
class TestConnection(unittest.TestCase):

    def setUp(self):
        # logging.basicConfig(level=logging.DEBUG)
        self.docker = Client(base_url='unix://var/run/docker.sock')
        host_config = self.docker.create_host_config(publish_all_ports=True)
        self.container = self.docker.create_container("jrabbit/taskd", name="taskc_test", host_config=host_config)
        self.docker.start(self.container["Id"])
        our_exec = self.docker.exec_create(self.container["Id"], "taskd add user Public test_user")
        self.tc = TaskdConnection()
        o = self.docker.exec_start(our_exec['Id'])
        # print o
        self.tc.uuid = o.split('\n')[0].split()[-1]
        # print self.tc.uuid
        self.tc.server = "localhost"
        c = self.docker.inspect_container("taskc_test")
        
        self.tc.port = int(c['NetworkSettings']['Ports']['53589/tcp'][0]['HostPort'])
        # self.tc.uuid = os.getenv("TEST_UUID")
        self.tc.group = "Public"
        self.tc.username = "******"
        self.tc.client_cert = "taskc/fixture/pki/client.cert.pem"
        self.tc.client_key = "taskc/fixture/pki/client.key.pem"
        self.tc.cacert_file = "taskc/fixture/pki/ca.cert.pem"
        time.sleep(2)
    def test_connect(self):

        self.tc._connect()
        # print self.tc.conn.getpeername()
        self.assertEqual(self.tc.conn.getpeername(), ('127.0.0.1', self.tc.port))
        # make sure we're on TLS v2 per spec
        self.assertEqual(self.tc.conn.context.protocol, 2)
        self.tc.conn.close()
        # from IPython import embed
        # embed()

    def test_put(self):
        assert self.tc.uuid
        self.tc.put("")
        tasks = """{"description":"hang up posters","entry":"20141130T081652Z","status":"pending","uuid":"0037aa92-45e5-44a6-8f34-2f92989f173a"}
{"description":"make pb ramen","entry":"20141130T081700Z","status":"pending","uuid":"dd9b71db-f51c-4026-9e46-bb099df8dd3f"}
{"description":"fold clothes","entry":"20141130T081709Z","status":"pending","uuid":"d0f53865-2f01-42a8-9f9e-3652c63f216d"}"""
        resp = self.tc.put(tasks)
        self.assertEqual(resp.status_code, 200)
        # might not be correct depends on state of taskd
    def tearDown(self):
        self.docker.remove_container(self.container['Id'], force=True)
Пример #32
0
class Docker:
    def __init__(self):
        self.client = Client(base_url="tcp://192.168.0.2:2376")

    def listar_containers(self):
        for c in self.client.containers(all=True):
            print c

    def criar_container(self,
                        nome="novosdad",
                        imagem="ubuntu",
                        comando="/bin/bash"):

        container = self.client.create_container(
            image=imagem,
            command=comando,
            name=nome,
            stdin_open=True,
            tty=True,
            detach=True,
            ports=[80, 80],
            host_config=self.client.create_host_config(port_bindings={80: 80}))

        return container

    def iniciar_container(self, id):
        self.client.start(container=id)
        print "Container iniciado!"

    def parar_container(self, id):
        self.client.stop(container=id)
        print "Container parado!"

    def remove_container(self, id):
        self.client.stop(container=id)
        self.client.remove_container(container=id)
        print "Container removido!"

    def executar_comando(self, id, comando):
        exec_id = self.client.exec_create(container=id, cmd=comando)
        resultado = self.client.exec_start(exec_id)
        return resultado

    def inspecionar_container(self, id):
        container = self.client.inspect_container(container=id)
        return container
Пример #33
0
class DockerModule:
    def __init__(self):
        try:
            self.client = Client("tcp://127.0.0.1:2376")
            print "Conectou!"
        except Exception as e:
            print "Falhou ao conectar no docker: ",e

    def list_containers(self):
        containers = self.client.containers(all=True)
        return containers

    def stop_container(self,id):
        res = self.client.stop(container=id)
        print res
        return res

    def start_container(self,id):
        res = self.client.start(container=id)
        return res

    def create_container(self,**kwargs):
        ports = kwargs.get("port").split(":")
        print ports
        host_config = self.client.create_host_config(port_bindings={ports[1]:ports[0]})
        print host_config
        res = self.client.create_container(name=kwargs.get("name"),
                                           image=kwargs.get("image"),
                                           command=kwargs.get("command"),
                                           ports=[ports[1]],
                                           host_config=host_config,
                                           stdin_open=True,
                                           detach=True,
                                           tty=True)
        return res


    def execute_command(self,id,cmd):
        res = self.client.exec_create(container=id,cmd=cmd)
        res = self.client.exec_start(res)
        return res

    def delete_container(self,container):
        res = self.stop_container(container)
        res = self.client.remove_container(container=container)
        return "Container removed successful!!!"
Пример #34
0
class DockerAPI(object):
    def __init__(self, cid, base_url="unix://var/run/docker.sock"):
        super(DockerAPI, self).__init__()
        self.cid = cid
        self.docker = DockerClient(base_url=base_url)
        # 'ping' docker in order to raise error if docker is down
        self.docker.info()

    def inspect(self):
        return self.docker.inspect_container(self.cid)

    def top(self):
        return self.docker.top(self.cid)

    def exec_cmd(self, cmd, detach=True):
        eid = self.docker.exec_create(self.cid, cmd)['Id']
        return self.docker.exec_start(eid, detach=detach)
Пример #35
0
class DockerAPI(object):
    def __init__(self, cid, base_url="unix://var/run/docker.sock"):
        super(DockerAPI, self).__init__()
        self.cid = cid
        self.docker = DockerClient(base_url=base_url)
        # 'ping' docker in order to raise error if docker is down
        self.docker.info()

    def inspect(self):
        return self.docker.inspect_container(self.cid)

    def top(self):
        return self.docker.top(self.cid)

    def exec_cmd(self, cmd, detach=True):
        eid = self.docker.exec_create(self.cid, cmd)['Id']
        return self.docker.exec_start(eid, detach=detach)
Пример #36
0
class Docker:
    def __init__(self):
        self.client = Client(base_url='tcp://192.168.0.2:2376')

    def listar_containers(self):
        containers = self.client.containers(all=True)
        return containers

    def criar_container(self,
                        nome='novo',
                        imagem='ubuntu',
                        comando='/bin/bash'):
        container = self.client.create_container(
            image=imagem,
            command=comando,
            name=nome,
            stdin_open=True,
            tty=True,
            detach=True,
            ports=[80],
            host_config=self.client.create_host_config(port_bindings={80: 80}))
        return container

    def iniciar_container(self, id):
        self.client.start(container=id)
        print 'Container iniciado'

    def parar_container(self, id):
        self.client.stop(container=id)
        print 'Container parado.'

    def rem_container(self, id):
        self.client.stop(container=id)
        self.client.remove_container(container=id)
        print 'Container removido.'

    def exec_comando(self, id, comando):
        exec_id = self.client.exec_create(container=id, cmd=comando)
        resultado = self.client.exec_start(exec_id)
        return resultado

    def inspec_container(self, id):
        container = self.client.inspect_container(container=id)
        return container
Пример #37
0
class DockerExec(object):
    def __init__(self):
        self.cl = Client(base_url='unix://var/run/docker.sock')

    def execute(self,
                container,
                command,
                detach=False,
                interactive=False,
                tty=False):
        try:
            exec_id = self.cl.exec_create(container, command, True, True, tty)
            print exec_id
            ret = self.cl.exec_start(exec_id['Id'], detach, tty, False)
            print ret
            ins = self.cl.exec_inspect(exec_id['Id'])
            print ins['ExitCode']
        except docker.errors.APIError as ex:
            raise
Пример #38
0
class DockerManager:
    def __init__(self):
        try:
            self.cli = Client(base_url="tcp://192.168.0.2:2376")            
        except Exception as e:
            print "Falhou %s"%e

    def createContainer(self,nome,hn):
        try:
            container = self.cli.create_container(name=nome,hostname=hn,image="debian",
                                            detach=True,stdin_open=True,tty=True)
            self.cli.start(container=container.get("Id"))
            return container
        except Exception as e:
            print "Falhou ao criar o container: ",e

    def listContainers(self):
        try:
            for c in self.cli.containers(all=True):
                rede = c.get("NetworkSettings").get("Networks").keys()[0]
                ip = c.get("NetworkSettings").get("Networks").get(rede).get("IPAddress")
                print c.get("Names")[0]," rede: ",rede,"IP: ",ip,"Status: ",c.get("Status")
                print "Removendo o container ",c.get("Names")
        except Exception as e:
            print "Falhou ao listar os containers: ",e

    def inspectContainer(self,container_id):
        try:
            container = self.cli.inspect_container(container_id)
            print container
        except Exception as e:
            print "Falhou ao criar o container: ",e

    def execContainer(self,container_id,command):
        try:
            exec_id = self.cli.exec_create(container_id,command)
            print exec_id
            log = self.cli.exec_start(exec_id.get("Id"))
            print log
        except Exception as e:
            print "Falhou ao criar o container: ",e
Пример #39
0
class DockerClient(object):
    '''
    classdocs
    '''

    __lock = threading.Lock()

    @classmethod
    def instance(cls):
        if not os.path.exists('/var/run/docker.sock'):
            Log(1, 'DockerClient init fail,as the sock file not exist.')
            return None

        with LockGuard(cls.__lock):
            if not hasattr(cls, "_instance"):
                cls._instance = cls()
        return cls._instance

    def __init__(self):
        self.registry_ct_id = GetSysConfig(
            'registry_container_id') or 'install_registry_1'
        self.client = Client(base_url='unix://var/run/docker.sock')

    def garbage_collect(self):
        try:
            exec_id = self.client.exec_create(self.registry_ct_id, [
                'bin/registry', 'garbage-collect',
                '/etc/docker/registry/config.yml'
            ])
            Log(3, 'exec_create return[%s]' % (str(exec_id)))

            res = b''
            for chunk in self.client.exec_start(exec_id, stream=True):
                res += chunk
            Log(3, 'garbage_collect return[%s]' % (res))
            self.client.restart(self.registry_ct_id)
        except Exception, e:
            PrintStack()
            return Result('', CALL_DOCKER_INTERFACE_FAIL_ERR,
                          'garbage_collect except[%s]' % (str(e)))
        else:
Пример #40
0
def system_db_sync_full(pc_role):
    #   Manage the transition from Recovery to Nominal, by forcing a full sync of both DB schemas
    #   pc_role:    role of my PC (either PC2 or PC3)
    from docker import Client

    logger.debug("Entering routine %s" % 'system_db_sync_full')

    dump_dir = es_constants.es2globals['db_dump_dir']

    dump_filename = dump_dir + '/dump_data_all_' + datetime.datetime.now(
    ).strftime("%Y-%m-%d-%H:%M:%S") + '.sql'

    # Create a full dump
    dumpcommand = 'psql -h localhost -p 5432 -U estation -d estationdb -t -A -c "SELECT products.export_all_data()" -o ' + \
                  dump_filename
    if systemsettings['docker_install']:
        c = Client(base_url='unix://var/run/docker.sock')
        commandid = c.exec_create('postgres', dumpcommand)
        status = c.exec_start(commandid)
    else:
        status = os.system(dumpcommand)

    # Wait a second
    time.sleep(1)

    # Check the other computer is ready
    if pc_role == 'PC2':
        other_pc = 'MESA-PC3'
    else:
        other_pc = 'MESA-PC2'

    # Inject the data into the DB of the other PC
    sync_command = 'psql -h ' + other_pc + ' -p 5432 -U estation -d estationdb -f ' + dump_filename + \
                   ' 1>/dev/null 2>/eStation2/log/system_db_sync_full.log'

    status += os.system(sync_command)

    return status
Пример #41
0
class docker_operate:  

	# def __init__(self):  
	# 	self.client = Client(base_url='unix:///var/run/docker.sock')
	# 	self.client = Client(base_url='tcp://192.168.122.227:2375',version="1.7.1")

	'''
	@功能:解析参数

	@格式:
		control={
		"type":"create",     #操作类型
		"operation":{具体操作}     #具体操作字典
		}
	'''

	#解析命令参数
	def resolve(self,control):
		if control["type"]=="create":
			return self.create(control["operation"])

		if control["type"]=="execute":
			return self.execute(control["operation"])

		if control["type"]=="delete":
			return self.delete(control["operation"])

		if control["type"]=="display":
			return self.display(control["operation"])



	#创建容器
	def create(self,operation):
		self.host="tcp://"+operation["host"]+":2375"
		self.version=operation["version"]
		self.client = Client(base_url=self.host)

		contains_num=len(self.client.containers(all=True))
		try:
			for num in range(operation["create_num"]):
				name=operation["name_pro"]+str(num+contains_num)
				container = self.client.create_container(
					image=operation["image"],
					name=name,
					stdin_open=True,
					host_config=c.create_host_config(network_mode='none',privileged=True,publish_all_ports=True)
					)
				self.client.start(container=container.get('Id'))
			result={
				"operation":"create",
				"return":0,
				"error":None
			}
		except:
			result={
				"operation":"create",
				"return":-1,
				"error":str(traceback.format_exc())
			}
		return result


	#执行命令
	def execute(self,operation):
		self.host="tcp://"+operation["host"]+":2375"
		self.version=operation["version"]
		self.client = Client(base_url=self.host)
		try:
			if operation["all_exec"]==True:
				all_containers=self.client.containers(all=True)
				for container in all_containers:
					exec_container=self.client.exec_create(
						container=container["Id"],
						cmd=operation["cmd"],
						)
					response=self.client.exec_start(exec_id=exec_container.get('Id'),tty=True)
					returns.append(str(response))					

					if operation["delete"]==True:
						self.client.remove_container(container=exec_container.get('Id'),force=True)
				result={
					"operation":"execute",
					"return":returns,
					"error":None
				}
			else:
				exec_container=self.client.exec_create(
					container=operation["name"],
					cmd=operation["cmd"],
					)
				response=self.client.exec_start(exec_id=exec_container.get('Id'),tty=True)
				returns.append(str(response))
				if operation["delete"]==True:
					self.client.remove_container(container=exec_container.get('Id'),force=True)

				result={
				"operation":"execute",
				"return":returns,
				"error":None
				}

		except:
			result={
				"operation":"execute",
				"return":-1,
				"error":str(traceback.format_exc())
			}
		return result





	#删除容器
	def delete(self,operation):
		self.host="tcp://"+operation["host"]+":2375"
		self.version=operation["version"]
		self.client = Client(base_url=self.host)
		try:
			if operation["del_all"]==True:
				all_containers=self.client.containers(all=True)
				for container in all_containers:
					self.client.remove_container(container=container["Id"],force=True)
			else:
				self.client.remove_container(container=operation["name"],force=True)

			result={
				"operation":"delete",
				"return":0,
				"error":None
			}

		except:
			result={
				"operation":"delete",
				"return":-1,
				"error":str(traceback.format_exc())
			}
		return result



	#查看容器
	def display(self,operation):
		self.host="tcp://"+operation["host"]+":2375"
		self.version=operation["version"]
		self.client = Client(base_url=self.host)
		try:
			if operation["image"]==True:
				images=self.client.images(all=True)
			else:
				images=[]

			if operation["all_container"]==True:
				containers=self.client.containers(all=True)
			else:
				containers=self.client.containers()

			result={
				"operation":"display",
				"return":{
					"images":images,
					"containers":containers
					},
				"error":None
			}

		except:
			result={
				"operation":"display",
				"return":-1,
				"error":str(traceback.format_exc())
			}
		return result

	#建立控制方连接
	def socket_message(self):
		port=8081
		s=socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
		s.bind(('localhost',port))
		while 1:
			json,addr=s.recvfrom(1024)
   			data=simplejson.loads(json)
   			print "接受来自:"+addr[0]+" 的命令"
   			print "命令为: "+data["type"]
   			json=simplejson.dumps(self.resolve(data))
   			print "成功返回结果"
   			s.sendto(json,addr)
Пример #42
0
from docker import Client

c = Client(base_url='unix:///var/run/docker.sock')

container=c.create_container(image="ubuntu:test",name="test1",command="/bin/bash",stdin_open=True,host_config=c.create_host_config(privileged=True,publish_all_ports=True))
c.start(container=container.get('Id'))

container=c.create_container(image="ubuntu:test",name="test2",command="/bin/bash",stdin_open=True,host_config=c.create_host_config(privileged=True,publish_all_ports=True))
c.start(container=container.get('Id'))

exec_container=c.exec_create(container="test1",cmd="python  /home/server.py")
response=c.exec_start(exec_id=exec_container.get('Id'),tty=True)
Пример #43
0
def docker_monitor(host_ip, container_id, network_card):
    try:
        lists = []
        network_alls = []
        memory_alls = []
        cpu_alls = []
        io_alls = []
        times = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
        datetimes = int(time.mktime(time.strptime(times, '%Y-%m-%d %H:%M:%S')))
        #    datetimes = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
        #    timeStamp = int(time.mktime(datatimes))
        cli = Client(base_url='tcp://%s:5555' % (host_ip), version='1.20')
        stats_obj = cli.stats('%s' % (container_id))
        for i in stats_obj:
            lists.append(i)
            break
        for i in lists:
            i_json = json.loads(i)
            for k, v in i_json.items():
                #            print k
                if re.search('network', k):
                    network = {'network_stats': [v]}
                    network_alls.append(network)
                if re.search('memory_stats', k):
                    memory = {'memory_stats': [v]}
                    #               print memory
                    for memory_i in memory.get('memory_stats'):
                        memory_stats = {
                            'mem_usage':
                            str(round(memory_i.get('usage')) / 1000000),
                            'mem_limit':
                            str(memory_i.get('limit') / 1000000000 * 1000)
                        }

#               memory_alls.append(memory)
                if re.search('cpu_stats', k):
                    cpu = {'cpu_stats': [v]}
                    for cpu_i in cpu.get('cpu_stats'):
                        #                    print cpu
                        #                   for k,v in cpu_i.items():
                        #                       print v
                        #                   if re.search('throttling_data',cpu_i.get('cpu_usage').get('throttling_data')):
                        #                 print cpu_i
                        #                    print cpu_i.get('cpu_usage').get('total_usage')
                        #                    print cpu_i.get('cpu_usage').get('percpu_usage')
                        if int(cpu_i.get('cpu_usage').get(
                                'total_usage')) != 0 and cpu_i.get(
                                    'cpu_usage').get('total_usage') != None:
                            #stats = cpu_i.get('cpu_usage').get('total_usage') / cpu_i.get('cpu_usage').get('percpu_usage')[0]
                            cpu_stats = str(
                                float(
                                    cpu_i.get('cpu_usage').get('total_usage') /
                                    cpu_i.get('cpu_usage').get(
                                        'percpu_usage')[0]) / 100) + '%'
#                        print stats
#                        print cpu_i.get('cpu_usage').get('total_usage')
#                        print cpu_i.get('cpu_usage').get('percpu_usage')[0]
#                        print stats
                        else:
                            pass

#               cpu_alls.append(cpu)
                if re.search('blkio_stats', k):
                    io = {'io_stats': [v]}
                    io_alls.append(io)

        linux = 'python /home/zeusadmin/monitor/monitor_flow.py %s' % network_card
        cli = docker.Client(base_url='tcp://%s:5555' % (host_ip),
                            version='1.20',
                            timeout=10)
        ex = cli.exec_create(container=container_id, cmd=linux, user='******')
        ls = cli.exec_start(exec_id=ex["Id"], tty=True).strip('\r\n')
        if len(ls) == 0:
            flow = u"监控无数据"

        else:
            flow = eval(ls)
#        print ls

#    print memory_stats
#    print cpu_stats
#    print io_alls
#    print flow
        all_stats = {
            'code':
            200,
            'monitor_time':
            datetimes,
            'message': [{
                'memory_stats': memory_stats
            }, {
                'flow_stats': flow
            }, {
                'cpu_stats': {
                    'cpu_usage': cpu_stats
                }
            }]
        }
        return (json.dumps(all_stats, sort_keys=True, indent=4))
#    print stats
#   status = {'code':200,'message': [network_alls,memory_alls,cpu_alls,io_alls]}
#    return (json.dumps(status,sort_keys=True,indent=4))
    except docker.errors.NotFound, e:
        return e
Пример #44
0
class Haproxyctl:
    def __init__(self):
        self.__haproxyctl_config_file = '/root/.haproxyctl.cfg'
        self.__haproxy_template_file = 'haproxy.jinja'
        self.__haproxy_config_file = 'haproxy.cfg'
        self.__haproxy_config_path = '/usr/local/etc/haproxy/'
        self.__haproxy_container_id = None
        self.__docker_client = Client(base_url='unix://var/run/docker.sock')

    def get_haproxy_container(self):
        containers = self.__docker_client.containers()
        for container in containers:
            for name in container['Names']:
                if 'haproxy' in name.lower():
                    self.__haproxy_container_id = container['Id']
                    return self.__haproxy_container_id
        return None

    def get_container_ip(self, container_name):
        try:
            container = self.__docker_client.inspect_container(container_name)
            return container['NetworkSettings']['IPAddress']
        except errors.NotFound:
            return None

    def read_config_file(self):
        with open(self.__haproxyctl_config_file, 'w+') as f_handle:
            try:
                json_data = json.load(f_handle)
                return json_data
            except ValueError:
                return []
        return []

    def write_config_file(self, existing_config):
        with open(self.__haproxyctl_config_file, 'w') as f_handle:
            json.dump(existing_config,
                      f_handle,
                      sort_keys=True,
                      indent=4,
                      ensure_ascii=False)

    def add_url(self, url, container_name, port, existing_config):
        if port is None:
            port = 80

        for item in existing_config:
            if type(item) is dict:
                if 'url' in item:
                    if item['url'] == url:
                        item['container_name'] = container_name
                        item['port'] = port
                        return existing_config

        existing_config.append({
            'url': url,
            'container_name': container_name,
            'port': port
        })
        return existing_config

    def remove_url(self, url, existing_config):
        for item in existing_config:
            if type(item) is dict:
                if 'url' in item:
                    if item['url'] == url:
                        existing_config.remove(item)

        return existing_config

    def generate_haproxy_config(self, new_config):
        env = Environment(loader=FileSystemLoader(
            pkg_resources.resource_filename('haproxyctl', 'templates')),
                          trim_blocks=True)
        template = env.get_template(self.__haproxy_template_file)
        f = open(self.__haproxy_config_file, 'w')
        items = []
        counter = 1
        for item in new_config:
            ip = self.get_container_ip(item['container_name'])
            if ip:
                items.append({
                    'id': counter,
                    'url': item['url'],
                    'port': item['port'],
                    'ip': ip
                })
                counter += 1
                print " => [ OK ] Url: '%s', Container: '%s', IP:%s, Port:%s" % (
                    item['url'], item['container_name'], ip, item['port'])
            else:
                print " => [SKIP] Url: '%s', Container: '%s'(not found), IP:N/A, Port:%s" % (
                    item['url'], item['container_name'], item['port'])

        f.write(template.render(items=items))
        f.close()

    def update_haproxy_config(self):
        si = StringIO()
        tar = tarfile.open(mode='w', fileobj=si)
        tar.add(self.__haproxy_config_file)
        tar.close()
        tar_content = si.getvalue()
        self.__docker_client.put_archive(self.__haproxy_container_id,
                                         self.__haproxy_config_path,
                                         tar_content)

    def test_haproxy_config(self):
        cmd = "haproxy -c -f %s%s 2>&1" % (self.__haproxy_config_path,
                                           self.__haproxy_config_file)
        res = self.__docker_client.exec_create(
            container=self.__haproxy_container_id, cmd=['bash', '-c', cmd])
        output = self.__docker_client.exec_start(res)
        output = output.strip()
        if output == "Configuration file is valid":
            print " => Haproxy config is OK"
            return True
        else:
            print " => Haproxy config contains error!"
        return False

    def restart_haproxy_container(self):
        print " => Restarting haproxy..."
        self.__docker_client.restart(container=self.__haproxy_container_id,
                                     timeout=0)
        print " => Restarted."
Пример #45
0
    volumes.append(volume.split(':')[1])
host_config = cli.create_host_config(binds=config['volumes'])

# Create a container and start it
container = cli.create_container(image=config['image'] + ':' + config['tag'],
                                 command='tail -f /dev/null',
                                 detach=True,
                                 stdin_open=True,
                                 tty=True,
                                 environment=config['environment'],
                                 volumes=volumes,
                                 name=config['name'],
                                 host_config=host_config)
cli.start(container=container.get('Id'))

# Execute the commands
for cmd in config['cmd']:
    print('[+] ' + cmd)
    execute = cli.exec_create(container['Id'], cmd=cmd, stdout=True, stderr=True)
    for char in cli.exec_start(execute['Id'], tty=True, stream=True):
        sys.stdout.write(char.decode(sys.stdout.encoding))
    status = cli.exec_inspect(execute['Id'])['ExitCode']
    if status != 0:
        break

# Stop the container and remove it
cli.stop(container=container.get('Id'))
cli.remove_container(container=container['Id'])

sys.exit(status)
Пример #46
0
class Docker_interface:
    def __init__(self,
                 net_name='tosker_net',
                 tmp_dir='/tmp',
                 socket='unix://var/run/docker.sock'):
        self._log = Logger.get(__name__)
        self._net_name = net_name
        self._cli = Client(base_url=os.environ.get('DOCKER_HOST') or socket)
        self._tmp_dir = tmp_dir

    # TODO: aggiungere un parametro per eliminare i container se esistono gia'!
    def create(self, con, cmd=None, entrypoint=None, saved_image=False):
        def create_container():
            tmp_dir = path.join(self._tmp_dir, con.name)
            try:
                os.makedirs(tmp_dir)
            except:
                pass
            saved_img_name = '{}/{}'.format(self._net_name, con.name)
            img_name = con.image
            if saved_image and self.inspect(saved_img_name):
                img_name = saved_img_name

            self._log.debug('container: {}'.format(con.get_str_obj()))

            con.id = self._cli.create_container(
                name=con.name,
                image=img_name,
                entrypoint=entrypoint if entrypoint else con.entrypoint,
                command=cmd if cmd else con.cmd,
                environment=con.env,
                detach=True,
                # stdin_open=True,
                ports=[key for key in con.ports.keys()] if con.ports else None,
                volumes=['/tmp/dt'] +
                ([k for k, v in con.volume.items()] if con.volume else []),
                networking_config=self._cli.create_networking_config({
                    self._net_name:
                    self._cli.create_endpoint_config(links=con.link
                                                     # ,aliases=['db']
                                                     )
                }),
                host_config=self._cli.create_host_config(
                    port_bindings=con.ports,
                    # links=con.link,
                    binds=[tmp_dir + ':/tmp/dt'] +
                    ([v + ':' + k
                      for k, v in con.volume.items()] if con.volume else []),
                )).get('Id')

        assert isinstance(con, Container)

        if con.to_build:
            self._log.debug('start building..')
            # utility.print_json(
            self._cli.build(path='/'.join(con.dockerfile.split('/')[0:-1]),
                            dockerfile='./' + con.dockerfile.split('/')[-1],
                            tag=con.image,
                            pull=True,
                            quiet=True)
            # )
            self._log.debug('stop building..')
        elif not saved_image:
            # TODO: da evitare se si deve utilizzare un'immagine custom
            self._log.debug('start pulling.. {}'.format(con.image))
            utility.print_json(self._cli.pull(con.image, stream=True),
                               self._log.debug)
            self._log.debug('end pulling..')

        try:
            create_container()
        except errors.APIError as e:
            self._log.debug(e)
            # self.stop(con)
            self.delete(con)
            create_container()
            # raise e

    def stop(self, container):
        name = self._get_name(container)
        try:
            return self._cli.stop(name)
        except errors.NotFound as e:
            self._log.error(e)

    def start(self, container, wait=False):
        name = self._get_name(container)
        self._cli.start(name)
        if wait:
            self._log.debug('wait container..')
            self._cli.wait(name)
            utility.print_byte(self._cli.logs(name, stream=True),
                               self._log.debug)

    def delete(self, container):
        name = self._get_name(container)
        try:
            self._cli.remove_container(name, v=True)
        except (errors.NotFound, errors.APIError) as e:
            self._log.error(e)
            raise e

    def exec_cmd(self, container, cmd):
        name = self._get_name(container)
        if not self.is_running(name):
            return False
        try:
            exec_id = self._cli.exec_create(name, cmd)
            status = self._cli.exec_start(exec_id)

            # TODO: verificare attendibilita' di questo check!
            check = 'rpc error:' != status[:10].decode("utf-8")
            self._log.debug('check: {}'.format(check))
            return check
        except errors.APIError as e:
            self._log.error(e)
            return False
        except requests.exceptions.ConnectionError as e:
            # TODO: questo errore arriva dopo un timeout di 10 secodi
            self._log.error(e)
            return False

    def create_volume(self, volume):
        assert isinstance(volume, Volume)
        self._log.debug('volume opt: {}'.format(volume.get_all_opt()))
        return self._cli.create_volume(volume.name, volume.driver,
                                       volume.get_all_opt())

    def delete_volume(self, volume):
        name = self._get_name(volume)
        return self._cli.remove_volume(name)

    def get_containers(self, all=False):
        return self._cli.containers(all=all)

    def get_volumes(self):
        volumes = self._cli.volumes()
        return volumes['Volumes'] or []

    def inspect(self, item):
        name = self._get_name(item)
        try:
            return self._cli.inspect_container(name)
        except errors.NotFound:
            pass
        try:
            return self._cli.inspect_image(name)
        except errors.NotFound:
            pass
        try:
            return self._cli.inspect_volume(name)
        except errors.NotFound:
            return None

    def remove_all_containers(self):
        for c in self.get_containers(all=True):
            self.stop(c['Id'])
            self.delete(c['Id'])

    def remove_all_volumes(self):
        for v in self.get_volumes():
            self.delete_volume(v['Name'])

    def create_network(self, name, subnet='172.25.0.0/16'):
        # docker network create -d bridge --subnet 172.25.0.0/16 isolated_nw
        # self.delete_network(name)
        try:
            self._cli.create_network(name=name,
                                     driver='bridge',
                                     ipam={'subnet': subnet},
                                     check_duplicate=True)
        except errors.APIError:
            self._log.debug('network already exists!')

    def delete_network(self, name):
        assert isinstance(name, str)
        try:
            self._cli.remove_network(name)
        except errors.APIError:
            self._log.debug('network not exists!')

    def delete_image(self, name):
        assert isinstance(name, str)
        try:
            self._cli.remove_image(name)
        except errors.NotFound:
            pass

    # TODO: splittare questo metodo in due, semantica non chiara!
    def update_container(self, node, cmd, saved_image=True):
        assert isinstance(node, Container)
        # self._log.debug('container_conf: {}'.format(node.host_container))
        stat = self.inspect(node.image)
        old_cmd = stat['Config']['Cmd'] or None
        old_entry = stat['Config']['Entrypoint'] or None

        if self.inspect(node):
            self.stop(node)
            self.delete(node)
        self.create(node, cmd=cmd, entrypoint='', saved_image=saved_image)

        self.start(node.id, wait=True)
        self.stop(node.id)

        name = '{}/{}'.format(self._net_name, node.name)

        self._cli.commit(node.id, name)

        self.stop(node)
        self.delete(node)
        self.create(node,
                    cmd=node.cmd or old_cmd,
                    entrypoint=node.entrypoint or old_entry,
                    saved_image=True)

        self._cli.commit(node.id, name)

    def is_running(self, container):
        name = self._get_name(container)
        stat = self.inspect(name)
        stat = stat is not None and stat['State']['Running'] is True
        self._log.debug('State: {}'.format(stat))
        return stat

    def _get_name(self, name):
        if isinstance(name, six.string_types):
            return name
        else:
            assert isinstance(name, (Container, Volume))
            return name.name
Пример #47
0
class DockerDriver(object):
    """
    Handles all the interactions with the docker container the
    Android emulator runs in.
    """
    class Commands:
        build = [
            'docker', 'build', '--no-cache=false', '--pull=true',
            '--quiet=false', '--rm=true', '-t',
            '{0}:latest'.format(DockerConfig.CONTAINER), '.'
        ]
        run = [
            'docker', 'run', '-e',
            '"EMULATOR={0}"'.format(DockerConfig.EMULATOR), '-e',
            '"ARCH={0}"'.format(DockerConfig.ARCH), '-d', '-P', '--name',
            '{0}'.format(DockerConfig.ALIAS), '--log-driver=json-file',
            DockerConfig.CONTAINER
        ]
        start = ['docker', 'start', '{0}'.format(DockerConfig.ALIAS)]
        inspect = ['docker', 'inspect', '{0}'.format(DockerConfig.ALIAS)]
        pm_list = 'adb shell "pm list packages"'
        install_drozer = "docker exec {0} python /home/drozer/install_agent.py"
        run_drozer = 'python /home/drozer/drozer.py {0}'
        copy_to_container = 'docker cp "{0}" {1}:{2}'
        copy_from_container = 'docker cp {0}:{1} "{2}"'

    def __init__(self, init_only=False, fresh_start=False, clean_only=False):
        self.container_id = None
        self.ip_address = None

        self.cli = Client(base_url='unix://var/run/docker.sock')

        if fresh_start or clean_only:
            self.clean()

        if clean_only:
            logging.info("Cleaned containers and quitting.")
            exit(0)

        self.init_docker()

        if init_only:
            logging.info("Initialized and quitting.")
            exit(0)

    def _copy_to_container(self, src_path, dest_path):
        """
        Copies a file (presumed to be an apk) from src_path
        to home directory on container.
        """
        path = '/home/drozer/{path}.apk'.format(path=dest_path)
        command = self.Commands.copy_to_container.format(
            src_path, self.container_id, path)

        try:
            check_output(command, shell=True)
        except CalledProcessError as e:
            logging.error(('Command "{command}" failed with '
                           'error code {code}'.format(command=command,
                                                      code=e.returncode)))
            raise

    def _copy_from_container(self, src_path, dest_path):
        """
        Copies a file from src_path on the container to
        dest_path on the host machine.
        """
        command = self.Commands.copy_from_container.format(
            self.container_id, src_path, dest_path)
        try:
            check_output(command, shell=True)
        except CalledProcessError as e:
            logging.error(('Command "{command}" failed with '
                           'error code {code}'.format(command=command,
                                                      code=e.returncode)))
            raise

        logging.info("Log stored at {path}".format(path=dest_path))

    def _adb_install_apk(self, apk_path):
        """
        Installs an apk on the device running in the container
        using adb.
        """
        logging.info("Attempting to install an apk.")
        exec_id = self.cli.exec_create(
            self.container_id, 'adb install {0}'.format(apk_path))['Id']
        output = self.cli.exec_start(exec_id).decode('utf-8')

        if "INSTALL_PARSE_FAILED_NO_CERTIFICATES" in output:
            raise Exception('Install parse failed, no certificates')
        elif "INSTALL_FAILED_ALREADY_EXISTS" in output:
            logging.info("APK already installed. Skipping.")
        elif "Success" not in output:
            logging.error("APK didn't install properly")
            return False
        return True

    def _adb_uninstall_apk(self, app_id):
        """
        Uninstalls an application from the device running in the container
        via its app_id.
        """
        logging.info(
            "Uninstalling {app_id} from the emulator.".format(app_id=app_id))
        exec_id = self.cli.exec_create(
            self.container_id, 'adb uninstall {0}'.format(app_id))['Id']
        output = self.cli.exec_start(exec_id).decode('utf-8')

        if 'Success' in output:
            logging.info("Successfully uninstalled.")

        return True

    def _verify_apk_install(self, app_id):
        """
        Checks that the app_id is installed on the device running in the
        container.
        """
        logging.info(
            "Verifying {app} is installed on the device.".format(app=app_id))
        exec_id = self.cli.exec_create(self.container_id,
                                       self.Commands.pm_list)['Id']
        output = self.cli.exec_start(exec_id).decode('utf-8')

        if ("Could not access the Package Manager" in output
                or "device offline" in output):
            logging.info("Device or package manager isn't up")

        if app_id.split('_')[0] in output:  # TODO: this is a temporary fix
            logging.info("{app} is installed.".format(app=app_id))
            return True

        logging.error("APK not found in packages list on emulator.")

    def _delete_file(self, path):
        """
        Deletes file off the container to preserve space if scanning many apps
        """
        command = "rm {path}".format(path=path)
        exec_id = self.cli.exec_create(self.container_id, command)['Id']
        logging.info("Deleting {path} on the container.".format(path=path))
        self.cli.exec_start(exec_id)

    def _install_apk(self, apk_path, app_id):
        """
        Installs apk found at apk_path on the emulator. Will then
        verify it installed properly by looking up its app_id in
        the package manager.
        """
        if not all([self.container_id, self.ip_address]):
            # TODO: maybe have this fail nicely
            raise Exception("Went to install apk and couldn't find container")

        path = "/home/drozer/{app_id}.apk".format(app_id=app_id)
        self._copy_to_container(apk_path, app_id)
        self._adb_install_apk(path)
        self._verify_apk_install(app_id)
        self._delete_file(path)

    def _install_drozer(self):
        """
        Performs all the initialization of drozer within the emulator.
        """
        logging.info("Attempting to install com.mwr.dz on the emulator")
        logging.info("This could take a while so be patient...")
        logging.info(("We need to wait for the device to boot AND"
                      " the package manager to come online."))
        command = self.Commands.install_drozer.format(self.container_id)
        try:
            output = check_output(command, shell=True).decode('utf-8')
        except CalledProcessError as e:
            logging.error(('Command "{command}" failed with '
                           'error code {code}'.format(command=command,
                                                      code=e.returncode)))
            raise

        if 'Installed ok' in output:
            return True

    def _run_drozer_scan(self, app):
        """
        Runs the drozer agent which connects to the app running
        on the emulator.
        """
        logging.info("Running the drozer agent")
        exec_id = self.cli.exec_create(
            self.container_id, self.Commands.run_drozer.format(app))['Id']
        self.cli.exec_start(exec_id)

    def _container_is_running(self):
        """
        Checks whether the emulator container is running.
        """
        for container in self.cli.containers():
            if DockerConfig.ALIAS in container['Image']:
                return True

    def _docker_image_exists(self):
        """
        Check whether the docker image exists already.
        If this returns false we'll need to build the image
        from the DockerFile.
        """
        for image in self.cli.images():
            for tag in image['RepoTags']:
                if DockerConfig.ALIAS in tag:
                    return True

    _image_queue = {}

    def _build_docker_image(self):
        """
        Builds the docker container so we can run the android emulator
        inside it.
        """
        logging.info("Pulling the container from docker hub")
        logging.info("Image is roughly 5 GB so be patient")

        logging.info("(Progress output is slow and requires a tty.)")
        # we pause briefly to narrow race condition windows of opportunity
        sleep(1)

        is_a_tty = os.isatty(sys.stdout.fileno())

        for output in self.cli.pull(DockerConfig.CONTAINER,
                                    stream=True,
                                    tag="latest"):
            if not is_a_tty:
                # run silent, run quick
                continue
            try:
                p = json.loads(output.decode('utf-8'))
                p_id = p['id']
                self._image_queue[p_id] = p
                t, c, j = 1, 1, 0
                for k in sorted(self._image_queue):
                    j += 1
                    v = self._image_queue[k]
                    vd = v['progressDetail']
                    t += vd['total']
                    c += vd['current']
                msg = "\rDownloading: {0}/{1} {2}% [{3} jobs]"
                msg = msg.format(c, t, int(c / t * 100), j)
                sys.stdout.write(msg)
                sys.stdout.flush()
            except Exception:
                pass
        print("\nDONE!\n")

    def _verify_apk_exists(self, full_apk_path):
        """
        Verifies that the apk path we have is actually a file.
        """
        return os.path.isfile(full_apk_path)

    def init_docker(self):
        """
        Perform all the initialization required before a drozer scan.
        1. build the image
        2. run the container
        3. install drozer and enable the service within the app
        """
        built = self._docker_image_exists()

        if not built:
            self._build_docker_image()

        running = self._container_is_running()

        if not running:
            logging.info('Trying to run container...')
            try:
                check_output(self.Commands.run)
            except CalledProcessError as e:
                logging.error((
                    'Command "{command}" failed with error code {code}'.format(
                        command=self.Commands.run, code=e.returncode)))
            running = self._container_is_running()

        if not running:
            logging.info('Trying to start container...')
            try:
                check_output(self.Commands.start)
            except CalledProcessError as e:
                logging.error((
                    'Command "{command}" failed with error code {code}'.format(
                        command=self.Commands.run, code=e.returncode)))
            running = self._container_is_running()

        if not running:
            raise Exception("Running container not found, critical error.")

        containers = self.cli.containers()

        for container in containers:
            if DockerConfig.ALIAS in container['Image']:
                self.container_id = container['Id']
                n = container['NetworkSettings']['Networks']
                self.ip_address = n['bridge']['IPAddress']
                break

        if not self.container_id or not self.ip_address:
            logging.error("No ip address or container id found.")
            exit(1)

        if self._verify_apk_install('com.mwr.dz'):
            return

        self._install_drozer()

    def clean(self):
        """
        Clean up all the containers made by this script.
        Should be run after the drozer scan completes.
        """
        for container in self.cli.containers():
            if DockerConfig.ALIAS in container['Image']:
                logging.info("Removing container {0}".format(container['Id']))
                self.cli.remove_container(container['Id'], force=True)

    def perform_drozer_scan(self, apk_path, app_id):
        """
        Entrypoint for scanning an android app. Performs the following steps:
        1. installs an apk on the device
        2. runs a drozer scan
        3. copies the report off the container
        4. uninstalls the apk to save space on the device
        """
        self._install_apk(apk_path, app_id)
        logging.info("Running the drozer scan.")
        self._run_drozer_scan(app_id)
        logging.info("Scan finished. Moving the report off the container")
        dest = apk_path + '.drozer'
        self._copy_from_container('/tmp/drozer_report.log', dest)
        self._adb_uninstall_apk(app_id)
Пример #48
0
class DockerAPI(SSHAPI):
    def __init__(self):
        try:
            SSHAPI.__init__(self)
            config = ConfigParser.ConfigParser()
            config.read('deploy.cfg')
            self.docker_client = Client('tcp://%s:2376' % config.get('docker','server'))
        except Exception as e:
            print 'Falhou ao conectar com o Docker: ', e

    def list_containers(self):
        for c in self.docker_client.containers():
            print c.get('Names')

    def create_container(self, nome, imagem='ubuntu'):
        res = self.docker_client.create_container(name=nome,
                                                  image=imagem,
                                                  command='/bin/bash',
                                                  tty=True,
                                                  stdin_open=True,
                                                  detach=True)

        if res:
            return res
        else:
            print 'Falhou ao criar container: ', res

    def start_container(self, id):
        try:
            res = self.docker_client.start(container=id)
            print 'Container executado'
        except Exception as e:
            print 'Falhou ao executar container: ', e

    def inspect_container(self, id):
        try:
            res = self.docker_client.inspect_container(id)
            return res
        except Exception as e:
            print 'Falhou ao buscar informacoes do container: ', e

    def remove_container(self, id):
        try:
            self.docker_client.stop(id)
            self.docker_client.remove_container(id)
            print ('Container removido com sucesso')
        except Exception as e:
            print 'Falhou ao apagar o container: ', e

    def get_container(self, nome):
        try:
            todos = self.docker_client.containers(all=True)
            nome = '/' + nome
            container = [ c for c in todos if nome in c.get('Names') ][0]
            return container
        except Exception as e:
            print 'Falhou ao buscar container: ', e

    def get_container_address(self, nome):
        address = self.get_container(nome)
        address = address.get("NetworkSettings").get("Networks")\
            .get("bridge").get("IPAddress")
        return address


    def exec_command(self, id, cmd):
        try:
            create_id = self.docker_client.exec_create(id, cmd)
            print 'Executando comando: ', cmd
            print self.docker_client.exec_start(create_id)
        except Exception as e:
            print 'Falhou ao executar o comando: ', e

    def _exec(self, container, cmd):
        c = 'docker exec %s %s' % (container, cmd)
        self.exec_ssh_command(c)
Пример #49
0
class Two1ComposerContainers(Two1Composer):
    """ Manage machine-payable microservices in containers.
    """

    def __init__(self):
        self._connected = ComposerState.DISCONNECTED
        self.provider = TwentyOneProvider()
        self.default_wallet = Two1Wallet(self.wallet_file, self.provider)

    # public api
    def connect(self, machine_env, host, machine_config_file):
        """ Connect service composer to machine layer.

        Args:
            machine_env (dict): Environment dictionary for the docker client of the machine layer
            host: Hostname of the machine layer docker daemon
            machine_config_file (str): Path to the config file for the machine layer
        """
        self.machine_env = machine_env
        self.machine_host = host
        with open(machine_config_file, 'r') as f:
            self.machine_config = json.load(f)
        self.docker_client = Client(**docker_env(assert_hostname=False,
                                                 environment=self.machine_env))
        self._connected = ComposerState.CONNECTED

    def initialize_server(self, username, password, server_port, wallet=None):
        """ Initialize micropayments server.

        Define boilerplate services, networks, and volumes composer file
        and nginx server config.

        Generates a wallet mnemonic if non-existent.

        Args:
            username (str): Username to log in with
            password (str): Password to log in with
            server_port (int): The server port that the router is running on
            wallet: The wallet to use for the payments server and subsequent services
        """
        self._create_base_server(server_port)  # create base router server config
        self._create_payments_route()  # create route to payments server

        new_wallet = None  # rv[1], not None if mnemonic is replaced in this function

        # generate service description (yaml)
        with self.ComposerYAMLContext(username, password, server_port) as composer_yaml:
            try:
                mnemonic = composer_yaml['services']['payments']['environment']['TWO1_WALLET_MNEMONIC']
                if not mnemonic or mnemonic == str(None):  # if mnemonic is Falsy or uninitialized
                    raise ValueError()
            except (KeyError, ValueError):  # catches if mnemonic is Falsy or doesn't exist in dict tree
                new_machine_wallet = self.default_wallet.create(self.provider)[1]
                composer_yaml['services']['payments']['environment']['TWO1_WALLET_MNEMONIC'] = new_machine_wallet
                new_wallet = new_machine_wallet

        return 0, new_wallet

    def list_services(self):
        """ List available services to sell.
        """
        service_image_data = requests.get(os.path.join(
            Two1Composer.DOCKERHUB_API_URL, Two1Composer.DOCKERHUB_REPO, 'tags')).json().get('results')
        valid_service_names = set([image_data['name'].split('service-')[1] for image_data in
                                   service_image_data if re.match(r'^service-', image_data['name'])])
        return list(valid_service_names)

    def pull_latest_images(self, images):
        """ Pull latest images from 21 DockerHub.

        Args:
            images (list): List of images to pull from the 21 DockerHub.
        """
        for image_tag in images:
            self.docker_client.pull(Two1Composer.DOCKERHUB_REPO, image_tag, stream=False)
        return 0

    def start_services(self, services, failed_to_start_hook, started_hook, failed_to_restart_hook, restarted_hook,
                       failed_to_up_hook, up_hook):
        """ Start selected services.

        Args:
            services (list): List of services to start.
            failed_to_start_hook (Callable): A callable hook that takes in a service name and is run when said service
                                             fails to start.
            started_hook (Callable): A callable hook that takes in a service name and is run when said service starts.
            failed_to_restart_hook (Callable): A callable hook that takes in a service name and is run when said service
                                               fails to restart.
            restarted_hook (Callable): A callable hook that takes in a service name and is run when said service
                                       restarts.
            failed_to_up_hook (Callable): A callable hook that takes in a service name and is run when said service
                                          fails to go up.
            up_hook (Callable): A callable hook that takes in a service name and is run when said service goes up.

        Returns:
            dict: Dictionary with service as key and value as dictionary.
                  Inner dictionary has format {"started": bool, "message": str, "order": int}.

        Raises:

        """
        self._start_sell_service('base', failed_to_start_hook, started_hook, failed_to_up_hook, up_hook)
        self._start_sell_service('router', failed_to_start_hook, started_hook, failed_to_up_hook, up_hook)
        self._start_sell_service('payments', failed_to_start_hook, started_hook, failed_to_up_hook, up_hook)

        self._restart_sell_service('router', failed_to_start_hook, started_hook, failed_to_restart_hook, restarted_hook,
                                   failed_to_up_hook, up_hook)

        # Attempt to start all market services
        for service_name in services:
            # create nginx routes for service_name
            self._create_service_route(service_name)
            # add service_name to docker compose file
            with self.ComposerYAMLContext() as docker_compose_yaml:
                username = docker_compose_yaml['services']['payments']['environment']['TWO1_USERNAME']
                password = docker_compose_yaml['services']['payments']['environment']['TWO1_PASSWORD']
                mnemonic = docker_compose_yaml['services']['payments']['environment']['TWO1_WALLET_MNEMONIC']
                docker_compose_yaml['services'][service_name] = {
                    'image': '%s:%s' % (Two1Composer.DOCKERHUB_REPO, 'service-' + service_name),
                    'container_name': 'sell_%s' % service_name,
                    'depends_on': ['base'],
                    'restart': 'always',
                    'environment': {
                        "TWO1_USERNAME": str(username),
                        "TWO1_PASSWORD": str(password),
                        "TWO1_WALLET_MNEMONIC": str(mnemonic),
                        "SERVICE": str(service_name),
                        "PAYMENT_SERVER_IP": "http://%s:%s" % (self.machine_host, self.machine_config["server_port"])
                    },
                    'volumes': [
                        Two1Composer.DB_DIR + ":/usr/src/db/"
                    ],
                    'logging': {
                        'driver': 'json-file'
                    },
                    'cap_drop': [
                        'ALL'
                    ],
                    'cap_add': [
                        'DAC_OVERRIDE',
                        'NET_RAW',
                    ],
                }
                link_str = '%s:%s' % (service_name, service_name)
                if link_str not in docker_compose_yaml['services']['router']['links']:
                    docker_compose_yaml['services']['router']['links'].append(link_str)

            # attempt to build service_name
            self._start_sell_service(service_name, failed_to_start_hook, started_hook, failed_to_up_hook, up_hook)

        self._restart_sell_service('router', failed_to_start_hook, started_hook, failed_to_restart_hook, restarted_hook,
                                   failed_to_up_hook, up_hook)

    def _start_sell_service(self, service_name, failed_to_start_hook, started_hook, failed_to_up_hook, up_hook,
                            timeout=Two1Composer.SERVICE_START_TIMEOUT):
        try:
            subprocess.check_output(["docker-compose", "-f", Two1Composer.COMPOSE_FILE, "up", "-d", service_name],
                                    stderr=subprocess.DEVNULL, env=self.machine_env)
        except subprocess.CalledProcessError:
            failed_to_start_hook(service_name)
        else:
            started_hook(service_name)
            if service_name == 'router':
                time.sleep(5)
            elif service_name != 'router' and service_name != 'base':
                start = time.clock()

                exec_id = self.docker_client.exec_create('sell_router', "curl %s:5000" % service_name)['Id']
                self.docker_client.exec_start(exec_id)
                running = True

                while time.clock() - start < timeout and running is True:
                    running = self.docker_client.exec_inspect(exec_id)['Running']

                if running is True:
                    failed_to_up_hook(service_name)
                else:
                    up_hook(service_name)

    def _restart_sell_service(self, service_name, failed_to_start_hook, started_hook, failed_to_restart_hook,
                              restarted_hook, failed_to_up_hook, up_hook):
        try:
            self.docker_client.stop("sell_%s" % service_name)
        except:
            is_restart = False
        else:
            is_restart = True

        self._start_sell_service(service_name, failed_to_restart_hook if is_restart else failed_to_start_hook,
                                 restarted_hook if is_restart else started_hook, failed_to_up_hook, up_hook)

    def stop_services(self, services,
                      service_found_stopped_and_removed_hook,
                      service_failed_to_stop_hook,
                      service_failed_to_be_removed_hook,
                      service_not_found_hook):
        """ Stop selected services and remove containers.

        Args:
            services (list): List of services to stop.
            service_found_stopped_and_removed_hook (Callable): A callable hook that takes in a service name and is run
                                                               when said service is found, stopped, and removed.
            service_failed_to_stop_hook (Callable): A callable hook that takes in a service name and is run when said
                                                    service fails to be stopped.
            service_failed_to_be_removed_hook (Callable): A callable hook that takes in a service name and is run when
                                                          said service fails to be removed.
            service_not_found_hook (Callable): A callable hook that takes in a service name and is run when said service
                                               isn't found.

        """
        running_container_names = self.docker_client.containers(filters={"status": "running"})
        for container_name in running_container_names:
            running_service_name = list(self.names_from_containers([container_name]))[0]
            if running_service_name in services:
                try:
                    self.docker_client.stop(container_name)
                except:
                    service_failed_to_stop_hook(running_service_name)
                else:  # container stopped
                    try:
                        self.docker_client.remove_container(container_name)
                    except:
                        service_failed_to_be_removed_hook(running_service_name)
                    else:  # container
                        service_found_stopped_and_removed_hook(running_service_name)

    def silently_force_stop_all_services(self):
        running_container_names = self.docker_client.containers(filters={"status": "running"})
        for container_name in running_container_names:
            self.docker_client.remove_container(container_name, force=True)

    @staticmethod
    def names_from_containers(containers):
        """ Return names from containers.

        Args:
            containers (list): List of containers as returned by self.docker_client.containers
        """
        return frozenset([service['Names'][0][6:] for service in containers])

    def status_services(self, services,
                        service_nonexistent_hook,
                        service_running_hook,
                        service_exited_hook,
                        service_unknown_state_hook):
        """ Gets running status of specified services.

        Args:
            services (list): List of services to get status for.
            service_nonexistent_hook (Callable): A callable hook that takes in a service name and is run when said
                                                 service is non-existent.
            service_running_hook (Callable): A callable hook that takes in a service name and is run when said service
                                             is running.
            service_exited_hook (Callable): A callable hook that takes in a service name and is run when said service
                                            has exited.
            service_unknown_state_hook (Callable): A callable hook that takes in a service name and is run when said
                                                   service is in an unknown state.
        Returns:
            dict: Dictionary with service as key and value as dictionary.
            Inner dictionary has format: {"status": str, "message": str}.
            "Status" choices are: Not found, Running, Exited, Unable to contact.
        """

        existent_services = self.names_from_containers(self.docker_client.containers(all=True))
        running_services = self.names_from_containers(self.docker_client.containers(filters={"status": "running"}))
        exited_services = self.names_from_containers(self.docker_client.containers(filters={"status": "exited"}))

        for service_name in services:
            if service_name in running_services:
                service_running_hook(service_name)
            elif service_name in exited_services:
                service_exited_hook(service_name)
            elif service_name in existent_services:
                service_unknown_state_hook(service_name)
            else:
                service_nonexistent_hook(service_name)

    def get_running_services(self):
        """ Get list of running services.

        Returns:
            (list) started services
        """
        return list(set(self.names_from_containers(self.docker_client.containers(
            filters={"status": "running"}))).difference(set(Two1Composer.BASE_SERVICES)))

    def status_router(self, service_running_hook, service_unknown_state_hook):
        """ Get status of Nginx router container.

        Args:
            service_running_hook (Callable): A callable hook that takes in a service name and is run when said service
                                             is running.
            service_unknown_state_hook (Callable): A callable hook that takes in a service name and is run when said
                                                   service is in an unknown state.
        """
        if len(self.docker_client.containers(all=True, filters={"name": "sell_router", "status": "running"})) == 1:
            service_running_hook("router")
        else:
            service_unknown_state_hook("router")

    def status_payments_server(self, service_running_hook, service_unknown_state_hook):
        """ Get status of payment channels server.

        Args:
            service_running_hook (Callable): A callable hook that takes in a service name and is run when said service
                                             is running.
            service_unknown_state_hook (Callable): A callable hook that takes in a service name and is run when said
                                                   service is in an unknown state.
        """
        if len(self.docker_client.containers(all=True, filters={"name": "sell_payments", "status": "running"})) == 1:
            service_running_hook("payments")
        else:
            service_unknown_state_hook("payments")

    @staticmethod
    def _create_base_server(server_port):
        """ Create nginx base server config.

        Args:
            server_port (int): port for 21 sell server.
        """
        try:
            # create nginx router dirs
            shutil.rmtree(Two1Composer.SITES_ENABLED_PATH, ignore_errors=True)
            shutil.rmtree(Two1Composer.SITES_AVAILABLE_PATH, ignore_errors=True)
            os.makedirs(Two1Composer.SITES_ENABLED_PATH, exist_ok=True)
            os.makedirs(Two1Composer.SITES_AVAILABLE_PATH, exist_ok=True)

            # create base nginx server
            with open(os.path.join(Two1Composer.SITES_ENABLED_PATH,
                                   "two1baseserver"), 'w') as f:
                f.write("server {\n"
                        "    listen " + str(server_port) + ";\n"
                        "    include /etc/nginx/sites-available/*;\n"
                        "}\n"
                        )
        except Exception:
            raise exceptions.Two1ComposerServiceDefinitionException()

    @staticmethod
    def _create_service_route(service):
        """ Create route for container service.
        """
        os.makedirs(Two1Composer.SITES_AVAILABLE_PATH, exist_ok=True)
        try:
            with open(os.path.join(Two1Composer.SITES_AVAILABLE_PATH, service), 'w') as f:
                f.write("location /" + service + " {\n"
                        "    rewrite ^/" + service + "(.*) /$1 break;\n"
                        "    proxy_pass http://" + service + ":" + str(5000) + ";\n"
                        "    proxy_set_header Host $host;\n"
                        "    proxy_set_header X-Real-IP $remote_addr;\n"
                        "    proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\n"
                        "}\n")
        except Exception:
            raise exceptions.Two1ComposerRouteException()

    @staticmethod
    def _create_payments_route():
        """ Add route to payments server.
        """
        os.makedirs(Two1Composer.SITES_AVAILABLE_PATH, exist_ok=True)
        try:
            # write nginx route for payments server
            with open(os.path.join(Two1Composer.SITES_AVAILABLE_PATH, "payments"), 'w') as f:
                f.write("location /payment {\n"
                        "    proxy_pass http://payments:" + str(5000) + ";\n"
                        "    proxy_set_header Host $host;\n"
                        "    proxy_set_header X-Real-IP $remote_addr;\n"
                        "    proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\n"
                        "}\n")
        except Exception:
            raise exceptions.Two1ComposerRouteException()

    def publish_service(self, service_name, rest_client, published_hook, already_published_hook, failed_to_publish_hook,
                        unknown_publish_error_hook):
        strm, stat = self.docker_client.get_archive('sell_%s' % service_name, '/usr/src/app/manifest.yaml')

        with tarfile.open(fileobj=BytesIO(strm.read()), mode='r') as tf:
            manifest = yaml.load(tf.extractfile(stat[u'name']).read().decode())

        try:
            resp = rest_client.publish({"manifest": manifest,
                                        "marketplace": "21mkt"})
        except ServerRequestError as e:
            if e.status_code == 403 and e.data.get("error") == "TO600":
                already_published_hook(service_name)
            else:
                failed_to_publish_hook(service_name)
        except:
            unknown_publish_error_hook(service_name)
        else:
            if resp.status_code == 201:
                published_hook(service_name)
            else:
                failed_to_publish_hook(service_name)

    def read_server_config(self):
        try:
            with open(Two1Composer.COMPOSE_FILE) as f:
                return yaml.load(f)

        except FileNotFoundError:
            return {}

    def get_services_mnemonic(self):
        if os.path.isfile(Two1Composer.COMPOSE_FILE):
            with self.ComposerYAMLContext() as composer_yaml:
                try:
                    maybe_mnemonic = composer_yaml['services']['payments']['environment']['TWO1_WALLET_MNEMONIC']
                except KeyError:
                    rv = None
                else:
                    rv = maybe_mnemonic
        else:
            rv = None
        return rv
Пример #50
0
 def _run_cmd_cli(container, cmd):
     c = Client(base_url='unix://var/run/docker.sock')
     exec_id = c.exec_create(container, cmd)
     time.sleep(5)
     out = c.exec_start(exec_id)
     return out
Пример #51
0
class docker_operate:
    '''
	@功能:解析参数

	@格式:
		control={
		"type":"create",     #操作类型
		"operation":{具体操作}     #具体操作字典
		}
	'''

    def __init__(self):
        self.ip_solve = ip_distribution.ip_distribution()
        self.mongo = mongodb.mongo_opreate()

    #解析命令参数
    def resolve(self, task):
        control = task["operate"]
        if control["type"] == "create":
            return self.create(control["operation"])

        if control["type"] == "execute":
            return self.execute(control["operation"])

        if control["type"] == "delete":
            return self.delete(control["operation"])

        if control["type"] == "display":
            return self.display(control["operation"])

    #创建容器
    def create(self, operation):
        try:
            self.host = "tcp://" + operation["host"] + ":2375"
            version = operation["version"]
            self.client = Client(base_url=self.host)
            contains_num = len(self.client.containers(all=True))
            ip_num_int = self.ip_solve.ip2num(operation["start_ip"]) + 1
            docker_list = []
            for num in range(operation["create_num"]):
                name = operation["name_pro"] + str(num + contains_num)
                container = self.client.create_container(
                    image=operation["image"],
                    name=name,
                    stdin_open=True,
                    host_config=self.client.create_host_config(
                        network_mode='none',
                        privileged=True,
                        publish_all_ports=True))
                self.client.start(container=container.get('Id'))

                #构建docker节点数据,存库
                docker_info = {
                    "docker_id": container.get('Id'),
                    "ip": self.ip_solve.num2ip(ip_num_int),
                    "broadcast": operation["end_ip"],
                    "gateway": operation["start_ip"],
                    "link_ovs": operation["link_ovs"],
                }
                ip_num_int += 1
                docker_list.append(docker_info)
            docker_dic = {
                "state": "no_ip",
                "docker_list": docker_list,
            }
            self.mongo.save_tpl(docker_list, "docker_info")
            result = {"operation": "create", "return": 0, "error": None}
        except:
            result = {
                "operation": "create",
                "return": -1,
                "error": str(traceback.format_exc())
            }
        return result["error"]

    #执行命令
    def execute(self, operation):
        self.host = "tcp://" + operation["host"] + ":2375"
        self.version = operation["version"]
        self.client = Client(base_url=self.host)
        try:
            if operation["all_exec"] == True:
                all_containers = self.client.containers(all=True)
                for container in all_containers:
                    exec_container = self.client.exec_create(
                        container=container["Id"],
                        cmd=operation["cmd"],
                    )
                    response = self.client.exec_start(
                        exec_id=exec_container.get('Id'), tty=True)
                    returns.append(str(response))

                    if operation["delete"] == True:
                        self.client.remove_container(
                            container=exec_container.get('Id'), force=True)
                result = {
                    "operation": "execute",
                    "return": returns,
                    "error": None
                }
            else:
                exec_container = self.client.exec_create(
                    container=operation["name"],
                    cmd=operation["cmd"],
                )
                response = self.client.exec_start(
                    exec_id=exec_container.get('Id'), tty=True)
                returns.append(str(response))
                if operation["delete"] == True:
                    self.client.remove_container(
                        container=exec_container.get('Id'), force=True)

                result = {
                    "operation": "execute",
                    "return": returns,
                    "error": None
                }

        except:
            result = {
                "operation": "execute",
                "return": -1,
                "error": str(traceback.format_exc())
            }
        return result

    #删除容器
    def delete(self, operation):
        self.host = "tcp://" + operation["host"] + ":2375"
        self.version = operation["version"]
        self.client = Client(base_url=self.host)
        try:
            if operation["del_all"] == True:
                all_containers = self.client.containers(all=True)
                for container in all_containers:
                    self.client.remove_container(
                        container=container["Id"], force=True)
            else:
                self.client.remove_container(
                    container=operation["name"], force=True)

            result = {"operation": "delete", "return": 0, "error": None}

        except:
            result = {
                "operation": "delete",
                "return": -1,
                "error": str(traceback.format_exc())
            }
        return result

    #查看容器
    def display(self, operation):
        self.host = "tcp://" + operation["host"] + ":2375"
        self.version = operation["version"]
        self.client = Client(base_url=self.host)
        try:
            if operation["image"] == True:
                images = self.client.images(all=True)
            else:
                images = []

            if operation["all_container"] == True:
                containers = self.client.containers(all=True)
            else:
                containers = self.client.containers()

            result = {
                "operation": "display",
                "return": {
                    "images": images,
                    "containers": containers
                },
                "error": None
            }

        except:
            result = {
                "operation": "display",
                "return": -1,
                "error": str(traceback.format_exc())
            }
        return result
Пример #52
0
class docker_operate:
    '''
	@功能:解析参数

	@格式:
		control={
		"type":"create",     #操作类型
		"operation":{具体操作}     #具体操作字典
		}
	'''
    def __init__(self):
        self.ip_solve = ip_distribution.ip_distribution(types=False)
        self.mongo = mongodb.mongo_opreate()

    #解析命令参数
    def resolve(self, task):
        control = task["operate"]
        if control["type"] == "create":
            return self.create(task)

        if control["type"] == "execute":
            return self.execute(task)

        if control["type"] == "delete":
            return self.delete(task)

        if control["type"] == "display":
            return self.display(task)

    #创建容器
    def create(self, task):
        operations = task["operate"]["operation"]
        try:
            for operation in operations:
                self.host = "tcp://" + operation["host"] + ":2375"
                version = operation["version"]
                self.client = Client(base_url=self.host)
                contains_num = len(self.client.containers(all=True))
                ip_num_int = self.ip_solve.ip2num(operation["start_ip"]) + 1
                docker_list = []
                for num in range(operation["create_num"]):
                    name = operation["name_pro"] + str(num + contains_num)
                    container = self.client.create_container(
                        image=operation["image"],
                        name=name,
                        command="/bin/bash",
                        stdin_open=True,
                        host_config=self.client.create_host_config(
                            network_mode='none',
                            privileged=True,
                            publish_all_ports=True))
                    self.client.start(container=container.get('Id'))

                    docker_ip = self.ip_solve.num2ip(ip_num_int)

                    ip = docker_ip + "/" + str(operation["mask"])

                    docker_id = container.get('Id')
                    switch_id = operation["ovs_id"]
                    #此处分配ip 操作
                    run = RunCommand()
                    run.runCommand('pipework ' + switch_id + ' ' + docker_id +
                                   ' ' + ip)
                    print "已分配ip:", ip

                    #构建docker节点数据,存库
                    docker_info = {
                        "image": operation["image"],
                        "docker_id": container.get('Id'),
                        "name": name,
                        "ip": docker_ip,
                        "broadcast": operation["end_ip"],
                        "gateway": operation["start_ip"],
                        "link_ovs": operation["ovs_id"],
                        "mask": operation["mask"],
                        "host_ip": operation["host"],
                        "version": operation["version"],
                        "state": "up"
                    }
                    ip_num_int += 1
                    docker_list.append(docker_info)
                docker_dic = {
                    "state": "no_ip",
                    "docker_list": docker_list,
                }
                self.mongo.save_tpl_list(docker_list, "docker_info")
            self.mongo.save_tpl_list(task["ovs_list"], "ovs_list", "OVS_ID")
            ovs_link = {"ovs_relation": task["ovs_link"]}
            self.mongo.save_tpl_dic(ovs_link, "ovs_link")
            result = {"operation": "create", "return": 0, "error": None}
        except:
            print traceback.format_exc()
            result = {
                "operation": "create",
                "return": -1,
                "error": str(traceback.format_exc())
            }
        return result["error"]

    #执行命令
    def execute(self, task):
        operations = task["operate"]["operation"]
        try:
            returns = []
            for operation in operations:
                self.host = "tcp://" + operation["host"] + ":2375"
                self.version = operation["version"]
                self.client = Client(base_url=self.host)
                for docker_id in operation["docker_list"]:
                    exec_container = self.client.exec_create(
                        container=docker_id,
                        cmd=operation["cmd"],
                    )
                    response = self.client.exec_start(
                        exec_id=exec_container.get('Id'), tty=True)
                    print str(response)
                    returns.append("from" + docker_id + " resault:\n" +
                                   str(response))

            result = {"operation": "execute", "return": returns, "error": None}

        except:
            print traceback.format_exc()
            result = {
                "operation": "execute",
                "return": -1,
                "error": str(traceback.format_exc())
            }
        return result

    #删除容器
    def delete(self, task):
        operation = task["operate"]["operation"]
        host_ip = "127.0.0.1"
        self.host = "tcp://" + operation["docker_list"][0]["host"] + ":2375"
        self.version = operation["docker_list"][0]["version"]
        self.client = Client(base_url=self.host)
        try:
            for docker in operation["docker_list"]:
                if docker["host"] != host_ip:
                    host_ip = docker["host"]
                    self.host = "tcp://" + docker["host"] + ":2375"
                    self.version = docker["version"]
                    self.client = Client(base_url=self.host)
                try:
                    self.client.remove_container(container=docker["name"],
                                                 force=True)
                    self.mongo.del_tpl("docker_info",
                                       {"docker_id": docker["name"]})
                except:
                    print "删除容器:", docker["name"], "错误!该容器可能不存在,或已经被删除!"
            result = {"operation": "delete", "return": 0, "error": None}

        except:
            print traceback.format_exc()
            result = {
                "operation": "delete",
                "return": -1,
                "error": str(traceback.format_exc())
            }
        return result

    #查看容器
    def display(self, task):
        operations = task["operate"]["operation"]
        self.host = "tcp://" + operation["host"] + ":2375"
        self.version = operation["version"]
        self.client = Client(base_url=self.host)
        try:
            if operation["image"] == True:
                images = self.client.images(all=True)
            else:
                images = []

            if operation["all_container"] == True:
                containers = self.client.containers(all=True)
            else:
                containers = self.client.containers()

            result = {
                "operation": "display",
                "return": {
                    "images": images,
                    "containers": containers
                },
                "error": None
            }

        except:
            print traceback.format_exc()
            result = {
                "operation": "display",
                "return": -1,
                "error": str(traceback.format_exc())
            }
        return result
Пример #53
0
class AXDockerClient(object):
    def __init__(self, url=DEFAULT_DOCKER_SOCK):
        self._conn = Client(base_url=url)
        self._retry = AXRetry(retry_exception=(Exception, ),
                              success_check=lambda x: x,
                              default=False,
                              success_default=True)

    @property
    def version(self):
        """Cached version information"""
        if hasattr(self, '_version'):
            return self._version
        self._version = self._conn.version()
        return self._version

    @property
    def version_tuple(self):
        """Version tuple of docker daemon (e.g. (1, 11, 2))"""
        return tuple([int(i) for i in self.version['Version'].split('.')])

    # Public APIs
    def start(self, registry, image, tag="latest", **kwargs):
        """
        Start a new container described by image.

        :param registry: The registry to use
        :type registry: DockerRegistry
        :param image: Full image name for this container.
        :param kwargs: Other args passed to container start.
        :return:
        """
        assert "tag" not in kwargs
        assert registry is not None and "Cannot start a container without providing a DockerRegistry"

        if not self._pull_with_caching(registry, image, tag):
            return None

        full_image = registry.servername + "/" + image + ":" + tag
        container = self._create(full_image, **kwargs)
        if container is None:
            return None

        started = self._start(container)
        if started:
            return container
        else:
            self._remove(container["Id"])
            return None

    def stop(self, container, **kwargs):
        """
        Stop sepcified container. Wrapper for docker API and handle exception.
        :param container: (string) Id or name of container.
        :param kwargs: Pass through kwargs for docker. Currently using only timeout.
        """
        if "timeout" not in kwargs:
            kwargs["timeout"] = 1
        self._stop(container, **kwargs)

    def remove(self, container, **kwargs):
        """
        Remove a container.
        :param container: (string) Id or name of container.
        :param kwargs: Pass through for docker.
        :return:
        """
        self._remove(container, **kwargs)

    def run(self, image, cmd, timeout=1200, **kwargs):
        """
        Run a command inside a container and check result.

        Container image is automatically pulled.
        Container will be stopped and removed after comamnd.

        :param image: Container image to run.
        :param cmd: Command inside container. This overwrites docker "command" in kwargs
        :param timeout: Timeout to wait for container.
        :param kwargs: Dict for parameters. It includes AX parameters and pass through ones.
                       All AX parameters start with "ax_" and will be removed before passing to docker create.
                       Currently supported AX parameters:
                         - ax_net_host: Set network mode to "host"
                         - ax_privileged: Run container in privileged mode
        :return: Tuple of: (True/False, return code)
        """
        assert "tag" not in kwargs
        logger.debug("Run %s inside %s on host %s, kwargs %s", cmd, image,
                     self._host, kwargs)

        # Always overwrite command in kwargs.
        kwargs["command"] = cmd

        started = False
        container = {}
        rc = -1
        try:
            container = self._create(image, **kwargs)
            assert container, "Failed to create from %s, %s" % (image, kwargs)

            started = self._start(container)
            assert started, "Failed to start %s, %s" % (image, container)

            rc = self._conn.wait(container, timeout=timeout)
            assert rc == 0, "Command %s failed rc=%s" % (cmd, rc)

        except Exception:
            logger.exception("Failed to run %s in %s on %s", cmd, image,
                             self._host)
            return False, rc

        finally:
            if started:
                self._stop(container["Id"], timeout=1)
            if container:
                self._remove(container["Id"])
        logger.debug("Completed run %s inside %s on %s, rc=%s", cmd, image,
                     self._host, rc)
        return True, rc

    def cache_image(self, registry, name, tag="latest"):
        """
        Cache the image to local registry

        :param registry: The registry to use
        :type registry: DockerRegistry
        :param name: name of repo
        :param tag: repo tag
        """
        fetcher = DockerImageFetcher()
        full_image = registry.servername + "/" + name + ":" + tag
        return fetcher.single_executor(full_image, self._pull_with_caching,
                                       registry, name, tag)

    def get_container_uuid(self, name):
        """
        Get UUID for a container.
        """
        try:
            info = self._conn.inspect_container(name)
        except Exception:
            info = {}
        return info.get("Id", None)

    def get_container_version(self, name):
        """
        Get image namespace and version for a running container

        Sample return:
        [
            "docker.example.com/lcj/axagent:latest",
            "docker.local/lcj/axagent:latest"
        ]
        """
        try:
            info = self._conn.inspect_container(name)
        except NotFound:
            return []
        image = info["Image"].split(":")[1]
        info = self._conn.inspect_image(image)
        return info["RepoTags"]

    def exec_cmd(self, container_id, cmd, **kwargs):
        """Executes a command inside a running container and returns its output on completion

        :param container_id: container id
        :param cmd: command to execute
        :return: output from the command
        """
        logger.debug("Executing %s in container %s (kwargs: %s)", cmd,
                     container_id, kwargs)
        try:
            exec_id = self._conn.exec_create(container_id, cmd, **kwargs)
            response = self._conn.exec_start(exec_id)
            return response
        # Docker API can actually return either error at different time.
        except NotFound:
            logger.debug("Container %s not exist on host %s", container_id,
                         self._host)
        except APIError as e:
            if "not running" in str(e):
                logger.debug("Container %s not running on host %s",
                             container_id, self._host)
            else:
                raise

    def exec_kill(self, pid, exec_id=None, container_id=None, signal=None):
        """
        Kill a pid in a container. Optionally checks if exec session is still valid before killing.

        :param pid: pid to kill in the container.
        :param exec_id: perform kill only if exec id is still running.
        :param container_id: perform kill only if exec id is still running.
        :param signal: kill signal to send to process
        """
        if not any([exec_id, container_id]):
            raise ValueError("exec_id or container_id must be supplied")
        pid = int(pid)
        assert pid != -1, "Killing all processes prohibited"
        if exec_id is not None:
            if isinstance(exec_id, dict):
                exec_id = exec_id['Id']
            try:
                exec_info = self._conn.exec_inspect(exec_id)
            except APIError as e:
                logger.warn(
                    "Failed to inspect exec session {} for killing. Skipping kill: {}"
                    .format(exec_id, str(e)))
                return
            if container_id:
                if container_id not in exec_info['ContainerID']:
                    raise ValueError(
                        "Supplied container id {} mismatched with exec container id: {}"
                        .format(container_id, exec_info['ContainerID']))
            else:
                container_id = exec_info['ContainerID']
            if not exec_info['Running']:
                logger.debug(
                    "Exec session {} no longer running. Skipping kill".format(
                        exec_id))
                return
        # perform kill
        kill_cmd_args = ['kill']
        if signal:
            kill_cmd_args.append('-{}'.format(signal))
        kill_cmd_args.append(str(pid))
        kill_cmd = ' '.join(kill_cmd_args)
        response = self.exec_cmd(container_id,
                                 'sh -c "{} 2>&1; echo $?"'.format(kill_cmd))
        lines = response.splitlines()
        rc = int(lines[-1])
        if rc != 0:
            reason = lines[0] if len(lines) > 1 else "reason unknown"
            logger.warn("Failed to kill pid {} in container {}: {}".format(
                pid, container_id, reason))
        else:
            logger.debug("Successfully killed pid {} in container {}".format(
                pid, container_id))

    def containers(self, **kwargs):
        return self._conn.containers(**kwargs)

    def stats(self, name, **kwargs):
        return self._conn.stats(name, **kwargs)

    def clean_graph(self, age=86400):
        """
        Clean graph storage to remove old containers and any unreferenced docker image layers
        """
        # Exit time is in free form string. Parse it. And real coarse time granularity.
        pattern = ["month ago", "months ago", "year ago", "years ago"]
        if age >= SECONDS_PER_MINUTE:
            pattern += ["minutes ago", "minute ago"]
        if age >= SECONDS_PER_HOUR:
            pattern += ["hours ago", "hour ago"]
        if age >= SECONDS_PER_DAY:
            pattern += ["days ago", "day ago"]
        if age >= SECONDS_PER_WEEK:
            pattern += ["weeks ago", "week ago"]

        for c in self._conn.containers(filters={"status": "exited"}):
            if any([p in c["Status"] for p in pattern]):
                try:
                    self._remove(c)
                except Exception:
                    logger.exception("Failed to remove %s", c["Id"])

        for i in self._conn.images():
            if i["RepoTags"][0] == "<none>:<none>" and time.time(
            ) > i["Created"] + age:
                try:
                    self._conn.remove_image(i["Id"])
                except Exception:
                    # This is probably OK.
                    logger.debug("Failed to delete %s", i["Id"])

    def search(self, searchstr=None):
        if searchstr is None or searchstr == "":
            raise AXPlatformException(
                "Docker hub search string needs to a non-empty string")
        response = self._conn.search(searchstr)
        return [{
            "ctime": "",
            "repo": x['name'],
            "tag": "latest"
        } for x in response or []]

    def login(self, registry, username, password):
        """
        Returns a base64 encoded token of username and password
        only if login is successful else it raises exceptions
        """
        try:
            self._conn.login(username,
                             password=password,
                             registry=registry,
                             reauth=True)
        except APIError as e:
            code = e.response.status_code
            if code == 401:
                # on login failure it raises a docker.errors.APIError:
                # 401 Client Error: Unauthorized
                raise AXUnauthorizedException(e.explanation)
            elif code == 404:
                raise AXNotFoundException(e.explanation)
            elif code == 500:
                if "x509: certificate signed by unknown authority" in e.response.text:
                    raise AXIllegalArgumentException(
                        "Certificate signed by unknown authority for {}".
                        format(registry))
                else:
                    raise e
            else:
                raise e
        token = base64.b64encode("{}:{}".format(username, password))
        return token

    @staticmethod
    def generate_kubernetes_image_secret(registry, token):
        """
        Create the image pull secret by concatenating the secrets required
        for the passed token
        Args:
            registry: string
            token: base64 encoded

        Returns:
            base64 encoded string used for imagepull secrets
        """
        ret = {"auths": {registry: {"auth": token}}}
        return base64.b64encode(json.dumps(ret))

    # Internal implementations
    def _pull_with_caching(self, registry, name, tag, **kwargs):
        """
        Pull a new container with AX caching enabled.
        :param registry: DockerRegistry instance.
        :param name: Container short name.
        :param tag: Tag.
        :param kwargs: Other kwargs for pull.
                       Docker API requires tag to be in kwargs.
                       AX needs to process it and enforce tag to be separate.
        :return: True or False
        """
        assert "tag" not in kwargs, "%s" % kwargs

        if registry.user is not None and registry.passwd is not None:
            kwargs["auth_config"] = kwargs.get("auth_config", {
                "username": registry.user,
                "password": registry.passwd
            })

        return self._pull_with_retry(registry.servername, name, tag, **kwargs)

    def _pull_with_retry(self, registry, name, tag, **kwargs):
        return ax_retry(self._pull, self._retry, registry, name, tag, **kwargs)

    def _pull(self, registry, name, tag, **kwargs):
        """
        Do pull. Call docker API and check errors.
        :param registry: Registry host name.
        :param name: Container short name.
        :param tag: Tag.
        :param kwargs: Other pull args.
        :return: True or False.
        """
        # All must be set not empty.
        assert all([registry, name, tag]), "%s, %s, %s" % (registry, name, tag)

        repo = DockerImage(registry=registry, name=name).docker_repo()
        kwargs["tag"] = tag
        try:
            ret = self._conn.pull(repo, stream=True, **kwargs)
        except Exception:
            logger.exception("Failed to pull %s, %s", repo, tag)
            return False

        logger.info("Pull image %s:%s starting", repo, tag)
        # Search pull result to determine status. Must have digest and success message.
        has_digest = False
        has_image = False
        try:
            for l in ret:
                try:
                    progress = json.loads(l)
                    if progress["status"].startswith("Digest:"):
                        has_digest = True
                    if "Image is up to date" in progress[
                            "status"] or "Downloaded newer image" in progress[
                                "status"]:
                        has_image = True
                except (KeyError, ValueError):
                    logger.debug("Failed to parse pull progress line %s", l)
        except Exception:
            logger.exception("Failed to pull %s:%s", repo, tag)
            return False
        logger.info("Pull image %s:%s result %s %s", repo, tag, has_digest,
                    has_image)
        return has_digest and has_image

    def _push_with_retry(self, registry, name, tag):
        return ax_retry(self._push, self._retry, registry, name, tag)

    def _push(self, registry, name, tag):
        """
        Do push. Call docker API and check errors.
        :param registry: Registry host name.
        :param name: Container short name.
        :param tag: Tag.
        :return: True or False.
        """
        # All must be set not empty.
        assert all([registry, name, tag]), "%s, %s, %s" % (registry, name, tag)

        repo = DockerImage(registry=registry, name=name).docker_repo()
        try:
            ret = self._conn.push(repo, tag, stream=True)
        except Exception:
            logger.exception("Failed to push %s, %s", repo, tag)
            return False

        logger.info("Push image %s:%s starting", repo, tag)
        # Search push result to determine status. Must have digest.
        has_digest = False
        try:
            for l in ret:
                try:
                    progress = json.loads(l)
                    has_digest = progress["status"].startswith("%s: digest:" %
                                                               tag)
                except (KeyError, ValueError):
                    logger.debug("Failed to parse push progress line %s", l)
        except Exception:
            logger.exception("Failed to push %s:%s", repo, tag)
            return False
        logger.info("Push image %s:%s result %s", repo, tag, has_digest)
        return has_digest

    def _create(self, image, **kwargs):
        """
        Create a new container.

        :param image: (string) Container image with tag
        :param kwargs: AX and docker parameters.
        :return: container or None
        """
        # Docker API has two levels of dict. Top level specify mostly "create" configs.
        # One key at first level is "host_config". This defines second level "run" configs.
        # It's yet another dict. It was specified in docker run API and moved here.
        # We need to set both levels correctly.
        self._validate_config(kwargs)

        kwargs = self._parse_ax_create_config(kwargs)
        kwargs = self._parse_ax_host_config(kwargs)
        kwargs = self._remove_ax_config(kwargs)
        logger.debug("Final kwargs for create %s: %s", image, kwargs)
        try:
            return self._conn.create_container(image, **kwargs)
        except Exception:
            logger.exception("Failed to create container from %s %s", image,
                             kwargs)
            return None

    def _start(self, container):
        try:
            self._conn.start(container)
            return True
        except Exception:
            logger.exception("Failed to start container %s", container)
            return False

    def _stop(self, container, **kwargs):
        try:
            self._conn.stop(container, **kwargs)
        except NotFound:
            pass
        except Exception:
            logger.exception("Failed to stop %s", container)

    def _remove(self, container, **kwargs):
        try:
            self._conn.remove_container(container, v=True, **kwargs)
        except NotFound:
            pass
        except APIError as e:
            if "Conflict" in str(e):
                logger.error("Not removing running container %s", container)
            elif "device or resource busy" in str(e):
                # Work around https://github.com/google/cadvisor/issues/771
                logger.error("Container removal temporary failure. Retrying.")
                retry = AXRetry(retries=10,
                                delay=1,
                                retry_exception=(Exception, ),
                                success_exception=(NotFound, ))
                ax_retry(self._conn.remove_container,
                         retry,
                         container,
                         v=True,
                         force=True)
            else:
                logger.exception("Failed to remove container %s", container)
        except Exception:
            logger.exception("Failed to remove container %s", container)

    def _validate_config(self, config):
        if "volumes" in config:
            assert isinstance(
                config["volumes"],
                list), "Support only list of volumes %s" % config["volumes"]
        if "host_config" in config and "Binds" in config["host_config"]:
            assert isinstance(
                config["host"]["Binds"], list
            ), "Support only list of volumes %s" % config["host"]["Binds"]
        if "ports" in config:
            assert isinstance(
                config["ports"],
                list), "Support only list of ports %s" % config["ports"]
        if "host_config" in config and "port_bindings" in config["host_config"]:
            assert isinstance(
                config["host"]["PortBindings"],
                dict), "Support only dict of port_bindings %s" % config[
                    "host"]["PortBindings"]
        if "environment" in config:
            assert isinstance(
                config["environment"], list
            ), "Support only list of environments %s" % config["environment"]

    def _parse_ax_create_config(self, config):
        if config.get("ax_daemon", False):
            config["detach"] = True

        if "ax_volumes" in config:
            axv = [v.split(":")[1] for v in config["ax_volumes"]]
            if "volumes" in config:
                assert isinstance(config["volumes"],
                                  list), "must be list {}".format(
                                      config["volumes"])
                config["volumes"] += axv
            else:
                config["volumes"] = axv

        if "ax_ports" in config:
            config["ports"] = config["ax_ports"].keys()

        return config

    def _parse_ax_host_config(self, config):
        ax_config = {}
        if config.get("ax_net_host", False):
            ax_config["network_mode"] = "host"

        if config.get("ax_privileged", False):
            ax_config["privileged"] = True

        if config.get("ax_host_namespace", False):
            ax_config["pid_mode"] = "host"

        if config.get("ax_daemon", False):
            ax_config["restart_policy"] = {
                "MaximumRetryCount": 0,
                "Name": "unless-stopped"
            }

        if "ax_volumes" in config:
            if "binds" in ax_config:
                assert isinstance(ax_config["binds"],
                                  list), "must be list {}".format(
                                      ax_config["binds"])
                ax_config["binds"] += config["ax_volumes"]
            else:
                ax_config["binds"] = config["ax_volumes"]

        if "ax_ports" in config:
            ax_config["port_bindings"] = config["ax_ports"]

        ax_host_config = self._conn.create_host_config(**ax_config)
        if "host_config" in config:
            config["host_config"].update(ax_host_config)
        else:
            config["host_config"] = ax_host_config
        return config

    def _remove_ax_config(self, config):
        for key in config.keys():
            if key.startswith("ax_"):
                del config[key]
        return config
Пример #54
0
class Two1ComposerContainers(Two1Composer):
    """ Manage machine-payable microservices in containers.
    """

    def __init__(self):
        self._connected = ComposerState.DISCONNECTED
        self.provider = TwentyOneProvider()
        self.default_wallet = Two1Wallet(self.wallet_file, self.provider)

    class ServiceManager:
        """ Query and modify user services persisting at cls.USER_SERVICES_FILE.
        """

        USER_SERVICES_FILE = os.path.join(Two1Composer.BASE_DIR, "user-services.json")

        class Image(namedtuple('Image', 'docker_hub_account repository tag')):

            def _asdict(self):
                # Fixes a bug for Python 3.4 users
                # https://bugs.python.org/issue24931
                'A new OrderedDict mapping field names to their values'
                return OrderedDict(zip(self._fields, self))

            @property
            def is_dockerhub_image(self):
                """ Returns: True iff Image instance has all fields.
                """
                return self.docker_hub_account and self.repository and self.tag

            @property
            def is_local_image(self):
                """ Returns: True iff Image instance doesn't have docker_hub_account but has all other fields.
                """
                return not self.docker_hub_account and self.repository and self.tag

            def __str__(self):
                """ Returns: Docker image name constructed from Image instance fields.
                """
                if self.is_dockerhub_image:
                    return '%s/%s:%s' % (self.docker_hub_account, self.repository, self.tag)
                elif self.is_local_image:
                    return '%s:%s' % (self.repository, self.tag)
                else:
                    raise ValueError()

            @classmethod
            def from_string(cls, image_name):
                """ Constructs an Image instance from a docker image name.

                Args:
                    image_name (str): A docker image name.

                Returns:
                    Image: An Image instance.
                """
                slashes = re.findall('/', image_name)
                colons = re.findall(':', image_name)

                if len(slashes) == 1:
                    if len(colons) == 1 and image_name.find('/') < image_name.find(':'):
                        docker_hub_account, rest = image_name.split('/')
                        repository, tag = rest.split(':')
                        return cls(docker_hub_account=docker_hub_account, repository=repository, tag=tag)
                    elif len(colons) == 0:
                        docker_hub_account, repository = image_name.split('/')
                        return cls(docker_hub_account=docker_hub_account, repository=repository, tag='latest')
                elif len(slashes) == 0:
                    if len(colons) == 1:
                        repository, tag = image_name.split(':')
                        return cls(docker_hub_account=None, repository=repository, tag=tag)
                    elif len(colons) == 0:
                        return cls(docker_hub_account=None, repository=image_name, tag='latest')
                raise ValueError()

        @classmethod
        def get_image(cls, service_name):
            """ Constructs an Image instance for a service.

            Args:
                service_name (str): The name of either a 21 service in the 21dotco/two1 repository or a user service
                                    added to ServiceManager.USER_SERVICES_FILE by ServiceManager.add_service.

            Returns:
                Image: An Image instance corresponding to the given service.
            """
            if service_name in cls.available_21_services():
                return cls.Image(
                    docker_hub_account='21dotco',
                    repository='two1',
                    tag=service_name if service_name in Two1Composer.BASE_SERVICES else 'service-%s' % service_name
                )
            elif service_name in cls.available_user_services():
                return cls.Image(**cls._get_user_service_dict()[service_name])
            else:
                raise ValueError()

        @classmethod
        def available_services(cls):
            """ Returns: All available service names.
            """
            return cls.available_21_services() | cls.available_user_services()

        @classmethod
        def available_21_services(cls):
            """ Returns: All available 21 services by querying Docker Hub.
            """
            service_image_data = requests.get(os.path.join(
                Two1Composer.DOCKERHUB_API_URL, Two1Composer.DOCKERHUB_REPO, 'tags')).json().get('results')
            return set([image_data['name'].split('service-')[1] for image_data in
                        service_image_data if re.match(r'^service-', image_data['name'])])

        @classmethod
        def available_user_services(cls):
            """ Returns: All available user services.
            """
            return set(cls._get_user_service_dict().keys())

        @classmethod
        def add_service(cls, service_name, image_name_string,
                        service_successfully_added_hook, service_already_exists_hook,
                        service_failed_to_add_hook):
            """ Adds a new service definition to ServiceManager.USER_SERVICES_FILE.

            Args:
                service_name (str): Name of the service definition to add.
                image_name_string (str): Docker image name for the service definition.
            """
            service_dict = cls._get_user_service_dict()
            if service_name in service_dict:
                service_already_exists_hook(service_name)
            else:
                service_dict[service_name] = cls.Image.from_string(image_name_string)._asdict()
                if cls._commit_user_service_dict(service_dict):
                    service_successfully_added_hook(service_name)
                else:
                    service_failed_to_add_hook(service_name)

        @classmethod
        def remove_service(cls, service_name,
                           service_successfully_removed_hook,
                           service_does_not_exists_hook,
                           service_failed_to_remove_hook):
            """ Removes a service definition from ServiceManager.USER_SERVICES_FILE.

            Args:
                service_name (str): Name of the service definition to remove.
            """
            service_dict = cls._get_user_service_dict()
            if service_name in service_dict:
                del service_dict[service_name]
                if cls._commit_user_service_dict(service_dict):
                    service_successfully_removed_hook(service_name)
                else:
                    service_failed_to_remove_hook(service_name)
            else:
                service_does_not_exists_hook(service_name)

        @classmethod
        def _get_user_service_dict(cls):
            """ Returns: ServiceManager.USER_SERVICES_FILE as a dict.
            """
            try:
                with open(cls.USER_SERVICES_FILE, 'r') as data_file:
                    service_dict = json.load(data_file)
            except:
                return {}
            else:
                return service_dict

        @classmethod
        def _commit_user_service_dict(cls, service_dict):
            """ Writes a dict of user services to ServiceManager.USER_SERVICES_FILE in json format.

            Args:
                service_dict (dict): A dictionary of user services of the form
                                     {service_name : _as_dict representation of corresponding Image instance..}.

            Returns:
                bool: True iff no exceptions were raised when writing service_dict to ServiceManager.USER_SERVICES_FILE
                      as json.
            """
            try:
                with open(cls.USER_SERVICES_FILE, 'w') as outfile:
                    json.dump(service_dict, outfile)
            except:
                return False
            else:
                return True

    class ComposerYAMLContext(YamlDataContext):
        """ Context manager for composer YAML service file.
        """

        def __init__(self, username=None, password=None, server_port=None, mnemonic=None):
            self.username = username
            self.password = password
            self.server_port = server_port
            self.mnemonic = mnemonic
            super().__init__(Two1Composer.COMPOSE_FILE)

        def __enter__(self):
            sup = super().__enter__()
            for service in self.data['services']:
                service_definition = self.data['services'][service]
                if 'environment' in service_definition:

                    if 'TWO1_USERNAME' in service_definition['environment'] and self.username is not None:
                        service_definition['environment']['TWO1_USERNAME'] = self.username

                    if 'TWO1_PASSWORD' in service_definition['environment'] and self.password is not None:
                        service_definition['environment']['TWO1_PASSWORD'] = self.password

                    if 'TWO1_WALLET_MNEMONIC' in service_definition['environment'] and self.mnemonic is not None:
                        service_definition['environment']['TWO1_WALLET_MNEMONIC'] = self.mnemonic

                    if 'PAYMENT_SERVER_IP' in service_definition['environment'] and self.server_port is not None:
                        rest = service_definition['environment']['PAYMENT_SERVER_IP'].rsplit(':', maxsplit=1)[-1]
                        service_definition['environment']['PAYMENT_SERVER_IP'] = '%s:%s' % (rest, self.server_port)
            return sup

        def _filler(self):
            """ Create the base service description file.
            """
            return {
                'version': '2',
                'services': {
                    'base': {
                        'image': '%s:base' % Two1Composer.DOCKERHUB_REPO,
                    },
                    'router': {
                        'image': '%s:router' % Two1Composer.DOCKERHUB_REPO,
                        'container_name': 'sell_router',
                        'restart': 'always',
                        'volumes': [
                            Two1Composer.SITES_ENABLED_PATH + ":/etc/nginx/sites-enabled",
                            Two1Composer.SITES_AVAILABLE_PATH + ":/etc/nginx/sites-available",
                        ],
                        'ports': ['%s:%s' % (self.server_port, self.server_port)],
                        'links': [
                            'payments:payments',
                        ],
                    },
                    'payments': {
                        'image': '%s:payments' % Two1Composer.DOCKERHUB_REPO,
                        'depends_on': ['base'],
                        'container_name': 'sell_payments',
                        'restart': 'always',
                        'environment': {
                            "TWO1_USERNAME": str(self.username),
                            "TWO1_PASSWORD": str(self.password),
                            "TWO1_WALLET_MNEMONIC": str(self.mnemonic)
                        },
                        'volumes': [
                            Two1Composer.DB_DIR + ":/usr/src/db/"
                        ],
                        'logging': {
                            'driver': 'json-file'
                        },
                        'cap_drop': [
                            'ALL'
                        ],
                        'cap_add': [
                            'DAC_OVERRIDE',
                            'NET_RAW',
                        ],
                    }
                }
            }

    # public api
    def connect(self, machine_env, host, machine_config_file):
        """ Connect service composer to machine layer.

        Args:
            machine_env (dict): Environment dictionary for the docker client of the machine layer.
            host (str): Hostname of the machine layer docker daemon.
            machine_config_file (str): Path to the config file for the machine layer.
        """
        self.machine_env = machine_env
        self.machine_host = host
        with open(machine_config_file, 'r') as f:
            self.machine_config = json.load(f)
        self.docker_client = Client(**docker_env(assert_hostname=False,
                                                 environment=self.machine_env))
        self._connected = ComposerState.CONNECTED

    def initialize_server(self, username, password, server_port, wallet=None):
        """ Initialize micropayments server.

        Define boilerplate services, networks, and volumes composer file
        and nginx server config.

        Generates a wallet mnemonic if non-existent.

        Args:
            username (str): Username to log in with.
            password (str): Password to log in with.
            server_port (int): The server port that the router is running on.
            wallet: The wallet to use for the payments server and subsequent services.
        """
        self._create_base_server(server_port)  # create base router server config
        self._create_payments_route()  # create route to payments server

        new_wallet = None  # rv[1], not None if mnemonic is replaced in this function

        # generate service description (yaml)
        with self.ComposerYAMLContext(username, password, server_port) as composer_yaml:
            try:
                mnemonic = composer_yaml['services']['payments']['environment']['TWO1_WALLET_MNEMONIC']
                if not mnemonic or mnemonic == str(None):  # if mnemonic is Falsy or uninitialized
                    raise ValueError()
            except (KeyError, ValueError):  # catches if mnemonic is Falsy or doesn't exist in dict tree
                new_machine_wallet = self.default_wallet.create(self.provider)[1]
                composer_yaml['services']['payments']['environment']['TWO1_WALLET_MNEMONIC'] = new_machine_wallet
                new_wallet = new_machine_wallet

        return 0, new_wallet

    def pull_image(self, image,
                   image_sucessfully_pulled_hook, image_failed_to_pull_hook, image_is_local_hook,
                   image_is_malformed_hook):
        """ Pulls an Image instance iff it is a Docker Hub image.

        Args:
            image (Image): An Image instance.
        """
        if image.is_dockerhub_image:
            try:
                self.docker_client.pull('%s/%s' % (image.docker_hub_account, image.repository),
                                        tag=image.tag, stream=False)
            except:
                image_failed_to_pull_hook(image)
            else:
                image_sucessfully_pulled_hook(image)
        elif image.is_local_image:
            image_is_local_hook(image)
        else:
            image_is_malformed_hook(image)

    def start_services(self, service_names,
                       failed_to_start_hook, started_hook, failed_to_restart_hook, restarted_hook, failed_to_up_hook,
                       up_hook):
        """ Start selected services.

        Args:
            service_names (list): List of service names to start.
            failed_to_start_hook (Callable): A callable hook that takes in a service name and is run when said service
                                             fails to start.
            started_hook (Callable): A callable hook that takes in a service name and is run when said service starts.
            failed_to_restart_hook (Callable): A callable hook that takes in a service name and is run when said service
                                               fails to restart.
            restarted_hook (Callable): A callable hook that takes in a service name and is run when said service
                                       restarts.
            failed_to_up_hook (Callable): A callable hook that takes in a service name and is run when said service
                                          fails to go up.
            up_hook (Callable): A callable hook that takes in a service name and is run when said service goes up.
        """
        self._start_sell_service('base', failed_to_start_hook, started_hook, failed_to_up_hook, up_hook)
        self._start_sell_service('router', failed_to_start_hook, started_hook, failed_to_up_hook, up_hook)
        self._start_sell_service('payments', failed_to_start_hook, started_hook, failed_to_up_hook, up_hook)

        self._restart_sell_service('router', failed_to_start_hook, started_hook, failed_to_restart_hook, restarted_hook,
                                   failed_to_up_hook, up_hook)

        # Attempt to start all market services
        for service_name in service_names:
            try:
                image = self.ServiceManager.get_image(service_name)
                container_name = self.service_name_2_container_name(service_name)

                # create nginx routes for service_name
                self._create_service_route(service_name)
                # add service_name to docker compose file
                with self.ComposerYAMLContext() as docker_compose_yaml:
                    username = docker_compose_yaml['services']['payments']['environment']['TWO1_USERNAME']
                    password = docker_compose_yaml['services']['payments']['environment']['TWO1_PASSWORD']
                    mnemonic = docker_compose_yaml['services']['payments']['environment']['TWO1_WALLET_MNEMONIC']
                    docker_compose_yaml['services'][service_name] = {
                        'image': str(image),
                        'container_name': container_name,
                        'depends_on': ['base'],
                        'restart': 'always',
                        'environment': {
                            "TWO1_USERNAME": str(username),
                            "TWO1_PASSWORD": str(password),
                            "TWO1_WALLET_MNEMONIC": str(mnemonic),
                            "SERVICE": str(service_name),
                            "PAYMENT_SERVER_IP":
                                "http://%s:%s" % (self.machine_host, self.machine_config["server_port"])
                        },
                        'volumes': [
                            Two1Composer.DB_DIR + ":/usr/src/db/"
                        ],
                        'logging': {
                            'driver': 'json-file'
                        },
                        'cap_drop': [
                            'ALL'
                        ],
                        'cap_add': [
                            'DAC_OVERRIDE',
                            'NET_RAW',
                        ],
                    }
                    link_str = '%s:%s' % (service_name, service_name)
                    if link_str not in docker_compose_yaml['services']['router']['links']:
                        docker_compose_yaml['services']['router']['links'].append(link_str)
            except:
                # something went wrong while configuring service_name
                failed_to_start_hook(service_name)
            else:
                # attempt to build service_name
                self._start_sell_service(service_name, failed_to_start_hook, started_hook, failed_to_up_hook, up_hook)

        self._restart_sell_service('router', failed_to_start_hook, started_hook, failed_to_restart_hook, restarted_hook,
                                   failed_to_up_hook, up_hook)

    def _start_sell_service(self, service_name, failed_to_start_hook, started_hook, failed_to_up_hook, up_hook,
                            timeout=Two1Composer.SERVICE_START_TIMEOUT):
        try:
            subprocess.check_output(["docker-compose", "-f", Two1Composer.COMPOSE_FILE, "up", "-d", service_name],
                                    stderr=subprocess.DEVNULL, env=self.machine_env)
        except subprocess.CalledProcessError:
            failed_to_start_hook(service_name)
        else:
            started_hook(service_name)
            if service_name == 'router':
                time.sleep(5)
            elif service_name != 'router' and service_name != 'base':
                start = time.clock()

                exec_id = self.docker_client.exec_create('sell_router', "curl %s:5000" % service_name)['Id']
                self.docker_client.exec_start(exec_id)
                running = True

                while time.clock() - start < timeout and running is True:
                    running = self.docker_client.exec_inspect(exec_id)['Running']

                if running is True:
                    failed_to_up_hook(service_name)
                else:
                    up_hook(service_name)

    def _restart_sell_service(self, service_name, failed_to_start_hook, started_hook, failed_to_restart_hook,
                              restarted_hook, failed_to_up_hook, up_hook):
        try:
            self.docker_client.stop("sell_%s" % service_name)
        except:
            is_restart = False
        else:
            is_restart = True

        self._start_sell_service(service_name, failed_to_restart_hook if is_restart else failed_to_start_hook,
                                 restarted_hook if is_restart else started_hook, failed_to_up_hook, up_hook)

    def stop_services(self, service_names,
                      service_found_stopped_and_removed_hook,
                      service_failed_to_stop_hook,
                      service_failed_to_be_removed_hook,
                      service_not_found_hook):
        """ Stop selected services and remove containers.

        Args:
            service_names (set): Set of services to stop.
            service_found_stopped_and_removed_hook (Callable): A callable hook that takes in a service name and is run
                                                               when said service is found, stopped, and removed.
            service_failed_to_stop_hook (Callable): A callable hook that takes in a service name and is run when said
                                                    service fails to be stopped.
            service_failed_to_be_removed_hook (Callable): A callable hook that takes in a service name and is run when
                                                          said service fails to be removed.
            service_not_found_hook (Callable): A callable hook that takes in a service name and is run when said service
                                               isn't found.

        """
        for service_name in service_names:
            if service_name in self.get_running_services():
                container_name = self.service_name_2_container_name(service_name)
                try:
                    self.docker_client.stop(container_name)
                except:
                    service_failed_to_stop_hook(service_name)
                else:
                    try:
                        self.docker_client.remove_container(container_name)
                    except:
                        service_failed_to_be_removed_hook(service_name)
                    else:
                        service_found_stopped_and_removed_hook(service_name)
            else:
                service_not_found_hook(service_name)

    def silently_force_stop_all_services(self):
        running_container_names = self.docker_client.containers(filters={"status": "running"})
        for container_name in running_container_names:
            self.docker_client.remove_container(container_name, force=True)

    @staticmethod
    def container_names_2_service_names(container_definitions):
        """ Return service names from container definitions.

        See service_name_2_container_name for the inverse operation but on one service name.

        Args:
            container_definitions (list): List of container descriptions as returned by self.docker_client.containers.

        Returns:
            set: Set of service names generated by removing the 'sell_' prefix from the containers' names.
        """
        return set([container_definition['Names'][0][6:] for container_definition in container_definitions])

    @staticmethod
    def service_name_2_container_name(service_name):
        """ Generates a container name from a service name by prepending 'sell_'
        """
        return 'sell_%s' % service_name

    def status_services(self, services):
        """ Gets running status of specified services.

        Args:
            services (list): List of services to get status for.
        """

        existent_services = self.get_services(all=True)
        running_services = self.get_services(filters={"status": "running"})
        exited_services = self.get_services(filters={"status": "exited"})

        return {
            "running": running_services & services,
            "exited": exited_services & services,
            "nonexistent": services - existent_services
        }

    def get_services(self, *args, **kwargs):
        """ Call docker_client.containers | convert resulting container names to service names | remove base services
        """
        return self.container_names_2_service_names(
            self.docker_client.containers(*args, **kwargs)
        ) - Two1Composer.BASE_SERVICES

    def get_running_services(self):
        """ Get list of currently running services that aren't 21 base services.

        Returns:
            set: Set of currently running services.
        """
        return self.get_services(filters={"status": "running"})

    def status_router(self, service_running_hook, service_unknown_state_hook):
        """ Get status of Nginx router container.

        Args:
            service_running_hook (Callable): A callable hook that takes in a service name and is run when said service
                                             is running.
            service_unknown_state_hook (Callable): A callable hook that takes in a service name and is run when said
                                                   service is in an unknown state.
        """
        if len(self.docker_client.containers(all=True, filters={"name": "sell_router", "status": "running"})) == 1:
            service_running_hook("router")
        else:
            service_unknown_state_hook("router")

    def status_payments_server(self, service_running_hook, service_unknown_state_hook):
        """ Get status of payment channels server.

        Args:
            service_running_hook (Callable): A callable hook that takes in a service name and is run when said service
                                             is running.
            service_unknown_state_hook (Callable): A callable hook that takes in a service name and is run when said
                                                   service is in an unknown state.
        """
        if len(self.docker_client.containers(all=True, filters={"name": "sell_payments", "status": "running"})) == 1:
            service_running_hook("payments")
        else:
            service_unknown_state_hook("payments")

    @staticmethod
    def _create_base_server(server_port):
        """ Create nginx base server config.

        Args:
            server_port (int): port for 21 sell server.
        """
        try:
            # create nginx router dirs
            shutil.rmtree(Two1Composer.SITES_ENABLED_PATH, ignore_errors=True)
            shutil.rmtree(Two1Composer.SITES_AVAILABLE_PATH, ignore_errors=True)
            os.makedirs(Two1Composer.SITES_ENABLED_PATH, exist_ok=True)
            os.makedirs(Two1Composer.SITES_AVAILABLE_PATH, exist_ok=True)

            # create base nginx server
            with open(os.path.join(Two1Composer.SITES_ENABLED_PATH,
                                   "two1baseserver"), 'w') as f:
                f.write("server {\n"
                        "    listen " + str(server_port) + ";\n"
                        "    include /etc/nginx/sites-available/*;\n"
                        "}\n"
                        )
        except Exception:
            raise exceptions.Two1ComposerServiceDefinitionException()

    @staticmethod
    def _create_service_route(service):
        """ Create route for container service.
        """
        os.makedirs(Two1Composer.SITES_AVAILABLE_PATH, exist_ok=True)
        try:
            with open(os.path.join(Two1Composer.SITES_AVAILABLE_PATH, service), 'w') as f:
                f.write("location /" + service + " {\n"
                        "    rewrite ^/" + service + "/?(.*) /$1 break;\n"
                        "    proxy_pass http://" + service + ":" + str(5000) + ";\n"
                        "    proxy_set_header Host $host;\n"
                        "    proxy_set_header X-Real-IP $remote_addr;\n"
                        "    proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\n"
                        "}\n")
        except Exception:
            raise exceptions.Two1ComposerRouteException()

    @staticmethod
    def _create_payments_route():
        """ Add route to payments server.
        """
        os.makedirs(Two1Composer.SITES_AVAILABLE_PATH, exist_ok=True)
        try:
            # write nginx route for payments server
            with open(os.path.join(Two1Composer.SITES_AVAILABLE_PATH, "payments"), 'w') as f:
                f.write("location /payment {\n"
                        "    proxy_pass http://payments:" + str(5000) + ";\n"
                        "    proxy_set_header Host $host;\n"
                        "    proxy_set_header X-Real-IP $remote_addr;\n"
                        "    proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\n"
                        "}\n")
        except Exception:
            raise exceptions.Two1ComposerRouteException()

    def publish_service(self, service_name, host_override, rest_client, published_hook,
                        already_published_hook, failed_to_publish_hook,
                        unknown_publish_error_hook):
        strm, stat = self.docker_client.get_archive('sell_%s' % service_name,
                                                    '/usr/src/app/manifest.yaml')

        with tarfile.open(fileobj=BytesIO(strm.read()), mode='r') as tf:
            manifest = yaml.load(tf.extractfile(stat[u'name']).read().decode())
        manifest['host'] = host_override

        try:
            resp = rest_client.publish({"manifest": manifest,
                                        "marketplace": "21mkt"})
        except ServerRequestError as e:
            if e.status_code == 403 and e.data.get("error") == "TO600":
                already_published_hook(service_name)
            else:
                failed_to_publish_hook(service_name)
        except:
            unknown_publish_error_hook(service_name)
        else:
            if resp.status_code == 201:
                published_hook(service_name)
            else:
                failed_to_publish_hook(service_name)

    def read_server_config(self):
        try:
            with open(Two1Composer.COMPOSE_FILE) as f:
                return yaml.load(f)

        except FileNotFoundError:
            return {}

    def get_services_mnemonic(self):
        if os.path.isfile(Two1Composer.COMPOSE_FILE):
            with self.ComposerYAMLContext() as composer_yaml:
                try:
                    maybe_mnemonic = composer_yaml['services']['payments']['environment']['TWO1_WALLET_MNEMONIC']
                except KeyError:
                    rv = None
                else:
                    rv = maybe_mnemonic
        else:
            rv = None
        return rv
Пример #55
0
class DockerCluster(object):
    IMAGE_NAME_BASE = os.path.join('teradatalabs', 'pa_test')
    BARE_CLUSTER_TYPE = 'bare'

    """Start/stop/control/query arbitrary clusters of docker containers.

    This class is aimed at product test writers to create docker containers
    for testing purposes.

    """
    def __init__(self, master_host, slave_hosts,
                 local_mount_dir, docker_mount_dir):
        # see PyDoc for all_internal_hosts() for an explanation on the
        # difference between an internal and regular host
        self.internal_master = master_host
        self.internal_slaves = slave_hosts
        self.master = master_host + '-' + str(uuid.uuid4())
        self.slaves = [slave + '-' + str(uuid.uuid4())
                       for slave in slave_hosts]
        # the root path for all local mount points; to get a particular
        # container mount point call get_local_mount_dir()
        self.local_mount_dir = local_mount_dir
        self.mount_dir = docker_mount_dir

        kwargs = kwargs_from_env()
        if 'tls' in kwargs:
            kwargs['tls'].assert_hostname = False
        kwargs['timeout'] = 240
        self.client = Client(**kwargs)

        self._DOCKER_START_TIMEOUT = 30
        DockerCluster.__check_if_docker_exists()

    def all_hosts(self):
        return self.slaves + [self.master]

    def get_master(self):
        return self.master

    def all_internal_hosts(self):
        """The difference between this method and all_hosts() is that
        all_hosts() returns the unique, "outside facing" hostnames that
        docker uses. On the other hand all_internal_hosts() returns the
        more human readable host aliases for the containers used internally
        between containers. For example the unique master host will
        look something like 'master-07d1774e-72d7-45da-bf84-081cfaa5da9a',
        whereas the internal master host will be 'master'.

        Returns:
            List of all internal hosts with the random suffix stripped out.
        """
        return [host.split('-')[0] for host in self.all_hosts()]

    def get_local_mount_dir(self, host):
        return os.path.join(self.local_mount_dir,
                            self.__get_unique_host(host))

    def get_dist_dir(self, unique):
        if unique:
            return os.path.join(DIST_DIR, self.master)
        else:
            return DIST_DIR

    def __get_unique_host(self, host):
        matches = [unique_host for unique_host in self.all_hosts()
                   if unique_host.startswith(host)]
        if matches:
            return matches[0]
        elif host in self.all_hosts():
            return host
        else:
            raise DockerClusterException(
                'Specified host: {0} does not exist.'.format(host))

    @staticmethod
    def __check_if_docker_exists():
        try:
            subprocess.call(['docker', '--version'])
        except OSError:
            sys.exit('Docker is not installed. Try installing it with '
                     'presto-admin/bin/install-docker.sh.')

    def create_image(self, path_to_dockerfile_dir, image_tag, base_image,
                     base_image_tag=None):
        self.fetch_image_if_not_present(base_image, base_image_tag)
        output = self._execute_and_wait(self.client.build,
                                        path=path_to_dockerfile_dir,
                                        tag=image_tag,
                                        rm=True)
        if not self._is_image_present_locally(image_tag, 'latest'):
            raise OSError('Unable to build image %s: %s' % (image_tag, output))

    def fetch_image_if_not_present(self, image, tag=None):
        if not tag and not self.client.images(image):
            self._execute_and_wait(self.client.pull, image)
        elif tag and not self._is_image_present_locally(image, tag):
            self._execute_and_wait(self.client.pull, image, tag)

    def _is_image_present_locally(self, image_name, tag):
        image_name_and_tag = image_name + ':' + tag
        images = self.client.images(image_name)
        if images:
            for image in images:
                if image_name_and_tag in image['RepoTags']:
                    return True
        return False

    def start_containers(self, master_image, slave_image=None,
                         cmd=None, **kwargs):
        self.tear_down()
        self._create_host_mount_dirs()

        self._create_and_start_containers(master_image, slave_image,
                                          cmd, **kwargs)
        self._ensure_docker_containers_started(master_image)

    def tear_down(self):
        for container_name in self.all_hosts():
            self._tear_down_container(container_name)
        self._remove_host_mount_dirs()

    def _tear_down_container(self, container_name):
        try:
            shutil.rmtree(self.get_dist_dir(unique=True))
        except OSError as e:
            # no such file or directory
            if e.errno != errno.ENOENT:
                raise

        try:
            self.stop_host(container_name)
            self.client.remove_container(container_name, v=True, force=True)
        except APIError as e:
            # container does not exist
            if e.response.status_code != 404:
                raise

    def stop_host(self, container_name):
        self.client.stop(container_name)
        self.client.wait(container_name)

    def start_host(self, container_name):
        self.client.start(container_name)

    def get_down_hostname(self, host_name):
        return host_name

    def _remove_host_mount_dirs(self):
        for container_name in self.all_hosts():
            try:
                shutil.rmtree(
                    self.get_local_mount_dir(container_name))
            except OSError as e:
                # no such file or directory
                if e.errno != errno.ENOENT:
                    raise

    def _create_host_mount_dirs(self):
        for container_name in self.all_hosts():
            try:
                os.makedirs(
                    self.get_local_mount_dir(container_name))
            except OSError as e:
                # file exists
                if e.errno != errno.EEXIST:
                    raise

    @staticmethod
    def _execute_and_wait(func, *args, **kwargs):
        ret = func(*args, **kwargs)
        # go through all lines in returned stream to ensure func finishes
        output = ''
        for line in ret:
            output += line
        return output

    def _create_and_start_containers(self, master_image, slave_image=None,
                                     cmd=None, **kwargs):
        if slave_image:
            for container_name in self.slaves:
                container_mount_dir = \
                    self.get_local_mount_dir(container_name)
                self._create_container(
                    slave_image, container_name,
                    container_name.split('-')[0], cmd
                )
                self.client.start(container_name,
                                  binds={container_mount_dir:
                                         {'bind': self.mount_dir,
                                          'ro': False}},
                                  **kwargs)

        master_mount_dir = self.get_local_mount_dir(self.master)
        self._create_container(
            master_image, self.master, hostname=self.internal_master,
            cmd=cmd
        )
        self.client.start(self.master,
                          binds={master_mount_dir:
                                 {'bind': self.mount_dir,
                                  'ro': False}},
                          links=zip(self.slaves, self.slaves), **kwargs)
        self._add_hostnames_to_slaves()

    def _create_container(self, image, container_name, hostname=None,
                          cmd=None):
        self._execute_and_wait(self.client.create_container,
                               image,
                               detach=True,
                               name=container_name,
                               hostname=hostname,
                               volumes=self.local_mount_dir,
                               command=cmd,
                               mem_limit='2g')

    def _add_hostnames_to_slaves(self):
        ips = self.get_ip_address_dict()
        additions_to_etc_hosts = ''
        for host in self.all_internal_hosts():
            additions_to_etc_hosts += '%s\t%s\n' % (ips[host], host)

        for host in self.slaves:
            self.exec_cmd_on_host(
                host,
                'bin/bash -c \'echo "%s" >> /etc/hosts\''
                % additions_to_etc_hosts
            )

    def _ensure_docker_containers_started(self, image):
        centos_based_images = [BASE_TD_IMAGE_NAME]

        timeout = 0
        is_host_started = {}
        for host in self.all_hosts():
            is_host_started[host] = False
        while timeout < self._DOCKER_START_TIMEOUT:
            for host in self.all_hosts():
                atomic_is_started = True
                atomic_is_started &= \
                    self.client.inspect_container(host)['State']['Running']
                if image in centos_based_images or \
                        image.startswith(self.IMAGE_NAME_BASE):
                    atomic_is_started &= \
                        self._are_centos_container_services_up(host)
                is_host_started[host] = atomic_is_started
            if not DockerCluster._are_all_hosts_started(is_host_started):
                timeout += 1
                sleep(1)
            else:
                break
        if timeout is self._DOCKER_START_TIMEOUT:
            raise DockerClusterException(
                'Docker container timed out on start.' + str(is_host_started))

    @staticmethod
    def _are_all_hosts_started(host_started_map):
        all_started = True
        for host in host_started_map.keys():
            all_started &= host_started_map[host]
        return all_started

    def _are_centos_container_services_up(self, host):
        """Some essential services in our CentOS containers take some time
        to start after the container itself is up. This function checks
        whether those services are up and returns a boolean accordingly.
        Specifically, we check that the app-admin user has been created
        and that the ssh daemon is up.

        Args:
          host: the host to check.

        Returns:
          True if the specified services have started, False otherwise.

        """
        ps_output = self.exec_cmd_on_host(host, 'ps')
        # also ensure that the app-admin user exists
        try:
            user_output = self.exec_cmd_on_host(
                host, 'grep app-admin /etc/passwd'
            )
            user_output += self.exec_cmd_on_host(host, 'stat /home/app-admin')
        except OSError:
            user_output = ''
        if 'sshd_bootstrap' in ps_output or 'sshd\n' not in ps_output\
                or not user_output:
            return False
        return True

    def exec_cmd_on_host(self, host, cmd, raise_error=True, tty=False):
        ex = self.client.exec_create(self.__get_unique_host(host), cmd,
                                     tty=tty)
        output = self.client.exec_start(ex['Id'], tty=tty)
        exit_code = self.client.exec_inspect(ex['Id'])['ExitCode']
        if raise_error and exit_code:
            raise OSError(exit_code, output)
        return output

    @staticmethod
    def _get_master_image_name(cluster_type):
        return os.path.join(DockerCluster.IMAGE_NAME_BASE,
                            '%s_master' % (cluster_type))

    @staticmethod
    def _get_slave_image_name(cluster_type):
        return os.path.join(DockerCluster.IMAGE_NAME_BASE,
                            '%s_slave' % (cluster_type))

    @staticmethod
    def start_bare_cluster():
        dc = DockerCluster
        master_name = dc._get_master_image_name(dc.BARE_CLUSTER_TYPE)
        slave_name = dc._get_slave_image_name(dc.BARE_CLUSTER_TYPE)
        centos_cluster = DockerCluster('master',
                                       ['slave1', 'slave2', 'slave3'],
                                       DEFAULT_LOCAL_MOUNT_POINT,
                                       DEFAULT_DOCKER_MOUNT_POINT)

        if not dc._check_for_images(master_name, slave_name):
            centos_cluster.create_image(
                BASE_TD_DOCKERFILE_DIR,
                master_name,
                BASE_IMAGE_NAME,
                BASE_IMAGE_TAG
            )

            centos_cluster.create_image(
                BASE_TD_DOCKERFILE_DIR,
                slave_name,
                BASE_IMAGE_NAME,
                BASE_IMAGE_TAG
            )

        centos_cluster.start_containers(master_name, slave_name)

        return centos_cluster

    @staticmethod
    def start_existing_images(cluster_type):
        dc = DockerCluster
        master_name = dc._get_master_image_name(cluster_type)
        slave_name = dc._get_slave_image_name(cluster_type)

        if not dc._check_for_images(master_name, slave_name):
            return None

        centos_cluster = DockerCluster('master',
                                       ['slave1', 'slave2', 'slave3'],
                                       DEFAULT_LOCAL_MOUNT_POINT,
                                       DEFAULT_DOCKER_MOUNT_POINT)

        centos_cluster.start_containers(master_name, slave_name)
        return centos_cluster

    @staticmethod
    def _check_for_images(master_image_name, slave_image_name):
        client = Client(timeout=180)
        images = client.images()
        has_master_image = False
        has_slave_image = False
        for image in images:
            if master_image_name in image['RepoTags'][0]:
                has_master_image = True
            if slave_image_name in image['RepoTags'][0]:
                has_slave_image = True
        return has_master_image and has_slave_image

    def commit_images(self, cluster_type):
        self.client.commit(self.master,
                           self._get_master_image_name(cluster_type))
        self.client.commit(self.slaves[0],
                           self._get_slave_image_name(cluster_type))

    def run_script_on_host(self, script_contents, host):
        temp_script = '/tmp/tmp.sh'
        self.write_content_to_host('#!/bin/bash\n%s' % script_contents,
                                   temp_script, host)
        self.exec_cmd_on_host(host, 'chmod +x %s' % temp_script)
        return self.exec_cmd_on_host(host, temp_script, tty=True)

    def write_content_to_host(self, content, path, host):
        filename = os.path.basename(path)
        dest_dir = os.path.dirname(path)
        host_local_mount_point = self.get_local_mount_dir(host)
        local_path = os.path.join(host_local_mount_point, filename)

        with open(local_path, 'w') as config_file:
            config_file.write(content)

        self.exec_cmd_on_host(host, 'mkdir -p ' + dest_dir)
        self.exec_cmd_on_host(
            host, 'cp %s %s' % (os.path.join(self.mount_dir, filename),
                                dest_dir))

    def copy_to_host(self, source_path, dest_host):
        shutil.copy(source_path, self.get_local_mount_dir(dest_host))

    def get_ip_address_dict(self):
        ip_addresses = {}
        for host, internal_host in zip(self.all_hosts(),
                                       self.all_internal_hosts()):
            inspect = self.client.inspect_container(host)
            ip_addresses[host] = inspect['NetworkSettings']['IPAddress']
            ip_addresses[internal_host] = \
                inspect['NetworkSettings']['IPAddress']
        return ip_addresses

    def _post_presto_install(self):
        for worker in self.slaves:
            self.run_script_on_host(
                'sed -i /node.id/d /etc/presto/node.properties; '
                'uuid=$(uuidgen); '
                'echo node.id=$uuid >> /etc/presto/node.properties',
                worker
            )

    def postinstall(self, installer):
        from tests.product.standalone.presto_installer \
            import StandalonePrestoInstaller

        _post_install_hooks = {
            StandalonePrestoInstaller: DockerCluster._post_presto_install
        }

        hook = _post_install_hooks.get(installer, None)
        if hook:
            hook(self)
Пример #56
0
class TestConnection(unittest.TestCase):

    def setUp(self):
        # logging.basicConfig(level=logging.DEBUG)
        self.docker = Client(base_url='unix://var/run/docker.sock')
        # self.volume_name = "taskc_fixture_pki"
        try:
            self.docker.remove_container("taskc_test", force=True)
        except APIError as e:
            logging.error(e)
        # volume = self.docker.create_volume(self.volume_name)
        # logging.debug(volume)
        pki_abs_path = os.path.abspath("taskc/fixture/pki")
        host_config = self.docker.create_host_config(binds=['{}:/var/lib/taskd/pki'.format(pki_abs_path)],publish_all_ports=True)
        self.container = self.docker.create_container("jrabbit/taskd", volumes=["/var/lib/taskd/pki"], name="taskc_test", host_config=host_config)
        # print(self.container)
        self.docker.start(self.container["Id"])
        time.sleep(1)
        our_exec = self.docker.exec_create(self.container["Id"], "taskd add user Public test_user")
        self.tc = TaskdConnection()
        o = self.docker.exec_start(our_exec['Id'])
        logging.debug(o)
        #bytes
        our_uuid = o.split(b'\n')[0].split()[-1]
        if six.PY3:
            our_uuid = our_uuid.decode("utf8")
        self.tc.uuid = our_uuid
        logging.debug("Type of uuid: %s", type(self.tc.uuid))

        self.tc.server = "localhost"
        c = self.docker.inspect_container("taskc_test")
        
        self.tc.port = int(c['NetworkSettings']['Ports']['53589/tcp'][0]['HostPort'])
        # self.tc.uuid = os.getenv("TEST_UUID")
        self.tc.group = "Public"
        self.tc.username = "******"
        self.tc.client_cert = "taskc/fixture/pki/client.cert.pem"
        self.tc.client_key = "taskc/fixture/pki/client.key.pem"
        self.tc.cacert_file = "taskc/fixture/pki/ca.cert.pem"
        time.sleep(1)

    def test_connect(self):

        self.tc._connect()
        # print self.tc.conn.getpeername()
        self.assertEqual(self.tc.conn.getpeername(), ('127.0.0.1', self.tc.port))
        # make sure we're on TLS v2 per spec
        self.assertEqual(self.tc.conn.context.protocol, 2)
        self.tc.conn.close()
        # from IPython import embed
        # embed()

    def test_put(self):
        assert self.tc.uuid
        self.tc.put("")
        tasks = """{"description":"hang up posters","entry":"20141130T081652Z","status":"pending","uuid":"0037aa92-45e5-44a6-8f34-2f92989f173a"}
{"description":"make pb ramen","entry":"20141130T081700Z","status":"pending","uuid":"dd9b71db-f51c-4026-9e46-bb099df8dd3f"}
{"description":"fold clothes","entry":"20141130T081709Z","status":"pending","uuid":"d0f53865-2f01-42a8-9f9e-3652c63f216d"}"""
        resp = self.tc.put(tasks)
        self.assertEqual(resp.status_code, 200)
        # might not be correct depends on state of taskd

    if six.PY3:
        def test_cadata(self):
            "This doesn't work in python2.7??"
            self.tc.cacert_file = False
            with open("taskc/fixture/pki/ca.cert.pem") as ca:
                self.tc.cacert = ca.read()
            self.tc._connect()
            # print self.tc.conn.getpeername()
            self.assertEqual(self.tc.conn.getpeername(), ('127.0.0.1', self.tc.port))
            # make sure we're on TLS v2 per spec
            self.assertEqual(self.tc.conn.context.protocol, 2)
            self.tc.conn.close()

    def tearDown(self):
        print(self.docker.logs(self.container['Id'], stdout=True, stderr=True))
        self.docker.remove_container(self.container['Id'], force=True)
Пример #57
0
            id_container = raw_input("Digite o ID do container: ")
            heydoc.start(id_container)
            print "Container vai ser iniciado"
        elif resposta_menu == 5:
            show_containers()
            id_container = raw_input("Desligar container ID: ")
            heydoc.stop(id_container)
        elif resposta_menu == 6:
            print "Nao quero remover containers"
        elif resposta_menu == 7:
            show_containers()
            container = raw_input(
                "Em qual container deseja executar o comando: ")
            comando = raw_input("Qual comando deseja executar ?: ")
            create = heydoc.exec_create(container, comando)
            heydoc.exec_start(create.get("Id"))
        elif resposta_menu == 8:
            print " TESTE"
            for c in heydoc.containers(all=True):
                name = str(c.get("Names"))
                ids = c.get("Id")
                print "ID: %s | Name: %s" % (ids, name)
            numero_container = raw_input("Digite o ID do container: ")
            print json.dumps(heydoc.inspect_container(numero_container),
                             indent=4,
                             sort_keys=True)
        elif resposta_menu == 9:
            stay_on_menu = False
    except Exception as e:
        print "Error: %s" % e
Пример #58
0
class Docker(SnapshotableContainerBackend, SuspendableContainerBackend):
    """
    Docker container backend powered by docker-py bindings.
    """
    """
    The prefix that is prepended to the name of created containers.
    """
    CONTAINER_NAME_PREFIX = 'coco-'
    """
    The prefix that is prepended to the name of created container snapshots.
    """
    CONTAINER_SNAPSHOT_NAME_PREFIX = 'snapshot-'

    def __init__(self,
                 base_url='unix://var/run/docker.sock',
                 version=None,
                 registry=None):
        """
        Initialize a new Docker container backend.

        :param base_url: The URL or unix path to the Docker API endpoint.
        :param version: The Docker API version number (see docker version).
        :param registry: If set, created images will be pushed to this registery.
        """
        try:
            self._client = Client(base_url=base_url,
                                  timeout=600,
                                  version=version)
            self._registry = registry
        except Exception as ex:
            raise ConnectionError(ex)

    def container_exists(self, container, **kwargs):
        """
        :inherit.
        """
        try:
            self._client.inspect_container(container)
            return True
        except DockerError as ex:
            if ex.response.status_code == requests.codes.not_found:
                return False
            raise ContainerBackendError(ex)
        except Exception as ex:
            raise ContainerBackendError(ex)

    def container_image_exists(self, image, **kwargs):
        """
        :inherit.
        """
        try:
            image = self._client.inspect_image(image)
            return True
        except DockerError as ex:
            if ex.response.status_code == requests.codes.not_found:
                return False
            raise ContainerBackendError(ex)
        except Exception as ex:
            raise ContainerBackendError(ex)

    def container_is_running(self, container, **kwargs):
        """
        :inherit.
        """
        if not self.container_exists(container):
            raise ContainerNotFoundError

        try:
            return self._client.inspect_container(container).get(
                'State', {}).get('Running', {}) is True
        except DockerError as ex:
            if ex.response.status_code == requests.codes.not_found:
                raise ContainerNotFoundError
            raise ContainerBackendError(ex)
        except Exception as ex:
            raise ContainerBackendError(ex)

    def container_is_suspended(self, container, **kwargs):
        """
        :inherit.
        """
        if not self.container_exists(container):
            raise ContainerNotFoundError

        try:
            return self._client.inspect_container(container).get(
                'State', {}).get('Paused', {}) is True
        except DockerError as ex:
            if ex.response.status_code == requests.codes.not_found:
                raise ContainerNotFoundError
            raise ContainerBackendError(ex)
        except Exception as ex:
            raise ContainerBackendError(ex)

    def container_snapshot_exists(self, snapshot, **kwargs):
        """
        :inherit.
        """
        return self.container_image_exists(snapshot, **kwargs)

    def create_container(self,
                         username,
                         uid,
                         name,
                         ports,
                         volumes,
                         cmd=None,
                         base_url=None,
                         image=None,
                         clone_of=None,
                         **kwargs):
        """
        :inherit.
        """
        name = "%su%i-%s" % (self.CONTAINER_NAME_PREFIX, uid, name)
        if self.container_exists(name):
            raise ContainerBackendError(
                "A container with that name already exists")
        if clone_of is not None and not self.container_exists(clone_of):
            raise ContainerNotFoundError(
                "Base container for the clone does not exist")

        # cloning
        if clone_of:
            # TODO: some way to ensure no regular image is created with that name
            image = self.create_container_image(clone_of,
                                                'for-clone-' + name + '-at-' +
                                                str(int(time.time())),
                                                push=False)
            image_pk = image.get(ContainerBackend.KEY_PK)
        else:
            image_pk = image
        # bind mounts
        mount_points = [
            vol.get(ContainerBackend.VOLUME_KEY_TARGET) for vol in volumes
        ]
        binds = map(
            lambda bind: "%s:%s" %
            (bind.get(ContainerBackend.VOLUME_KEY_SOURCE),
             bind.get(ContainerBackend.VOLUME_KEY_TARGET)), volumes)
        # port mappings
        port_mappings = {}
        for port in ports:
            port_mappings[port.get(
                ContainerBackend.PORT_MAPPING_KEY_INTERNAL)] = (
                    port.get(ContainerBackend.PORT_MAPPING_KEY_ADDRESS),
                    port.get(ContainerBackend.PORT_MAPPING_KEY_EXTERNAL))

        container = None
        try:
            if self._registry and not clone_of:
                parts = image_pk.split('/')
                if len(parts) > 2:  # includes registry
                    repository = parts[0] + '/' + parts[1] + '/' + parts[
                        2].split(':')[0]
                    tag = parts[2].split(':')[1]
                else:
                    repository = image_pk.split(':')[0]
                    tag = image_pk.split(':')[1]
                # FIXME: should be done automatically
                self._client.pull(repository=repository, tag=tag)

            container = self._client.create_container(
                image=image_pk,
                command=cmd,
                name=name,
                ports=[
                    port.get(ContainerBackend.PORT_MAPPING_KEY_INTERNAL)
                    for port in ports
                ],
                volumes=mount_points,
                host_config=docker_utils.create_host_config(
                    binds=binds, port_bindings=port_mappings),
                environment={
                    'OWNER': username,
                    'BASE_URL': base_url
                },
                detach=True)
            container = self.get_container(container.get('Id'))
            self.start_container(container.get(ContainerBackend.KEY_PK))
        except Exception as ex:
            raise ContainerBackendError(ex)

        if clone_of is None:
            ret = container
        else:
            ret = {
                ContainerBackend.CONTAINER_KEY_CLONE_CONTAINER: container,
                ContainerBackend.CONTAINER_KEY_CLONE_IMAGE: image
            }
        return ret

    def create_container_image(self, container, name, **kwargs):
        """
        :inherit.
        """
        if not self.container_exists(container):
            raise ContainerNotFoundError
        full_image_name = self.get_internal_container_image_name(
            container, name)
        if self.container_image_exists(full_image_name):
            raise ContainerBackendError(
                "An image with that name already exists for the given container"
            )

        if self._registry:
            parts = full_image_name.split('/')
            registry = parts[0]
            repository = parts[1] + '/' + parts[2].split(':')[0]
            tag = parts[2].split(':')[1]
            commit_name = registry + '/' + repository
        else:
            repository = full_image_name.split(':')[0]
            tag = full_image_name.split(':')[1]
            commit_name = repository

        try:
            self._client.commit(container=container,
                                repository=commit_name,
                                tag=tag)
            if self._registry and kwargs.get('push', True):
                self._client.push(
                    repository=full_image_name,
                    stream=False,
                    insecure_registry=True  # TODO: constructor?
                )
            return {ContainerBackend.KEY_PK: full_image_name}
        except Exception as ex:
            print ex
            raise ContainerBackendError(ex)

    def create_container_snapshot(self, container, name, **kwargs):
        """
        :inherit.
        """
        return self.create_container_image(
            container, self.CONTAINER_SNAPSHOT_NAME_PREFIX + name, push=False)

    def delete_container(self, container, **kwargs):
        """
        :inherit.
        """
        if not self.container_exists(container):
            raise ContainerNotFoundError

        try:
            if self.container_is_suspended(container):
                self.resume_container(container)
            if self.container_is_running(container):
                self.stop_container(container)
        except:
            pass

        try:
            return self._client.remove_container(container=container,
                                                 force=True)
        except DockerError as ex:
            if ex.response.status_code == requests.codes.not_found:
                raise ContainerNotFoundError
            raise ContainerBackendError(ex)
        except Exception as ex:
            raise ContainerBackendError(ex)

    def delete_container_image(self, image, **kwargs):
        """
        :inherit.
        """
        if not self.container_image_exists(image):
            raise ContainerImageNotFoundError

        try:
            self._client.remove_image(image=image, force=True)
        except DockerError as ex:
            if ex.response.status_code == requests.codes.not_found:
                raise ContainerImageNotFoundError
            raise ContainerBackendError(ex)
        except Exception as ex:
            raise ContainerBackendError(ex)

    def delete_container_snapshot(self, snapshot, **kwargs):
        """
        :inherit.
        """
        try:
            self.delete_container_image(snapshot, **kwargs)
        except ContainerImageNotFoundError as ex:
            raise ContainerSnapshotNotFoundError
        except ContainerBackendError as ex:
            raise ex
        except Exception as ex:
            raise ContainerBackendError(ex)

    def exec_in_container(self, container, cmd, **kwargs):
        """
        :inherit.
        """
        if not self.container_exists(container):
            raise ContainerNotFoundError
        if not self.container_is_running(
                container) or self.container_is_suspended(container):
            raise IllegalContainerStateError

        try:
            exec_id = self._client.exec_create(container=container, cmd=cmd)
            return self._client.exec_start(exec_id=exec_id, stream=False)
        except DockerError as ex:
            if ex.response.status_code == requests.codes.not_found:
                raise ContainerNotFoundError
            raise ContainerBackendError(ex)
        except Exception as ex:
            raise ContainerBackendError(ex)

    def get_container(self, container, **kwargs):
        """
        :inherit.
        """
        if not self.container_exists(container):
            raise ContainerNotFoundError

        try:
            container = self._client.inspect_container(container)
            return self.make_container_contract_conform(container)
        except DockerError as ex:
            if ex.response.status_code == requests.codes.not_found:
                raise ContainerNotFoundError
            raise ContainerBackendError(ex)
        except Exception as ex:
            raise ContainerBackendError(ex)

    def get_container_image(self, image, **kwargs):
        """
        :inherit.
        """
        if not self.container_image_exists(image):
            raise ContainerImageNotFoundError

        try:
            self._client.inspect_image(image)
            return {ContainerBackend.KEY_PK: image}
        except DockerError as ex:
            if ex.response.status_code == requests.codes.not_found:
                raise ContainerImageNotFoundError
            raise ContainerBackendError(ex)
        except Exception as ex:
            raise ContainerBackendError(ex)

    def get_container_images(self, **kwargs):
        """
        :inherit.
        """
        try:
            images = []
            for image in self._client.images():
                if not self.is_container_snapshot(image):
                    images.append(
                        {ContainerBackend.KEY_PK: image.get('RepoTags')[0]})
            return images
        except Exception as ex:
            raise ContainerBackendError(ex)

    def get_container_logs(self, container, **kwargs):
        """
        :inherit.

        :param timestamps: If true, the log messages' timestamps are included.
        """
        if not self.container_exists(container):
            raise ContainerNotFoundError

        timestamps = kwargs.get('timestamps')
        try:
            logs = self._client.logs(container=container,
                                     stream=False,
                                     timestamps=(timestamps is True))
            return filter(lambda x: len(x) > 0,
                          logs.split('\n'))  # remove empty lines
        except DockerError as ex:
            if ex.response.status_code == requests.codes.not_found:
                raise ContainerNotFoundError
            raise ContainerBackendError(ex)
        except Exception as ex:
            raise ContainerBackendError(ex)

    def get_container_snapshot(self, snapshot, **kwargs):
        """
        :inherit.
        """
        if not self.container_snapshot_exists(snapshot):
            raise ContainerSnapshotNotFoundError

        return next(sh for sh in self.get_container_snapshots()
                    if sh.get(ContainerBackend.KEY_PK).startswith(snapshot))

    def get_internal_container_image_name(self, container, name):
        """
        Return the name how the image with name `name` for the given container is named internally.

        :param container: The container the snapshot belongs to.
        :param name: The image's name.
        """
        if not self.container_exists(container):
            raise ContainerNotFoundError

        try:
            container = self._client.inspect_container(container)
            container_name = container.get('Name')
            container_name = re.sub(
                # i.e. coco-u2500-ipython
                r'^/' + self.CONTAINER_NAME_PREFIX + r'u(\d+)-(.+)$',
                # i.e. coco-u2500/ipython:shared-name
                self.CONTAINER_NAME_PREFIX + r'u\g<1>' + '/' + r'\g<2>' + ':' +
                name,
                container_name)
            if self._registry:
                container_name = self._registry + '/' + container_name
            return container_name
        except DockerError as ex:
            if ex.response.status_code == requests.codes.not_found:
                raise ContainerNotFoundError
            raise ContainerBackendError(ex)
        except Exception as ex:
            raise ContainerBackendError(ex)

    def get_container_snapshots(self, **kwargs):
        """
        :inherit.
        """
        try:
            snapshots = []
            for image in self._client.images():
                if self.is_container_snapshot(image):
                    snapshots.append(
                        self.make_snapshot_contract_conform(image))
            return snapshots
        except Exception as ex:
            raise ContainerBackendError(ex)

    def get_containers(self, only_running=False, **kwargs):
        """
        :inherit.
        """
        try:
            containers = []
            for container in self._client.containers(all=(not only_running)):
                containers.append(
                    self.make_container_contract_conform(container))
            return containers
        except Exception as ex:
            raise ContainerBackendError(ex)

    def get_containers_snapshots(self, container, **kwargs):
        """
        TODO: implement.
        """
        raise NotImplementedError

    def get_status(self):
        """
        :inherit.
        """
        try:
            self._client.info()
            return ContainerBackend.BACKEND_STATUS_OK
        except Exception:
            return ContainerBackend.BACKEND_STATUS_ERROR

    def is_container_snapshot(self, image):
        """
        Return true if `image` is internally used as a container snapshot.

        :param image: The image to check.
        """
        parts = image.get('RepoTags', [' : '])[0].split(':')
        if len(parts) > 1:
            return parts[1].startswith(self.CONTAINER_SNAPSHOT_NAME_PREFIX)
        return False

    def make_container_contract_conform(self, container):
        """
        Ensure the container dict returned from Docker is confirm with that the contract requires.

        :param container: The container to make conform.
        """
        if not self.container_is_running(container.get('Id')):
            status = ContainerBackend.CONTAINER_STATUS_STOPPED
        elif self.container_is_suspended(container.get('Id')):
            status = SuspendableContainerBackend.CONTAINER_STATUS_SUSPENDED
        else:
            status = ContainerBackend.CONTAINER_STATUS_RUNNING

        return {
            ContainerBackend.KEY_PK: container.get('Id'),
            ContainerBackend.CONTAINER_KEY_STATUS: status
        }

    def make_snapshot_contract_conform(self, snapshot):
        """
        Ensure the snapshot dict returned from Docker is confirm with that the contract requires.

        :param snapshot: The snapshot to make conform.
        """
        return self.make_image_contract_conform(snapshot)

    def restart_container(self, container, **kwargs):
        """
        :inherit.
        """
        if not self.container_exists(container):
            raise ContainerNotFoundError

        try:
            return self._client.restart(container=container, timeout=0)
        except DockerError as ex:
            if ex.response.status_code == requests.codes.not_found:
                raise ContainerImageNotFoundError
            raise ContainerBackendError(ex)
        except Exception as ex:
            raise ContainerBackendError(ex)

    def restore_container_snapshot(self, container, snapshot, **kwargs):
        """
        :inherit.
        """
        if not self.container_exists(container):
            raise ContainerNotFoundError
        if not self.container_snapshot_exists(snapshot):
            raise ContainerSnapshotNotFoundError

        raise NotImplementedError

    def resume_container(self, container, **kwargs):
        """
        :inherit.
        """
        if not self.container_exists(container):
            raise ContainerNotFoundError
        if not self.container_is_running(
                container) or not self.container_is_suspended(container):
            raise IllegalContainerStateError

        try:
            return self._client.unpause(container=container)
        except DockerError as ex:
            if ex.response.status_code == requests.codes.not_found:
                raise ContainerImageNotFoundError
            raise ContainerBackendError(ex)
        except Exception as ex:
            raise ContainerBackendError(ex)

    def start_container(self, container, **kwargs):
        """
        :inherit.

        :param kwargs: All optional arguments the docker-py library accepts as well.
        """
        if not self.container_exists(container):
            raise ContainerNotFoundError
        # if self.container_is_running(container):
        #     raise IllegalContainerStateError

        try:
            return self._client.start(container=container, **kwargs)
        except Exception as ex:
            raise ContainerBackendError(ex)

    def stop_container(self, container, **kwargs):
        """
        :inherit.
        """
        if not self.container_exists(container):
            raise ContainerNotFoundError
        # if not self.container_is_running(container):
        #     raise IllegalContainerStateError

        try:
            self.resume_container(container)
        except:
            pass

        try:
            return self._client.stop(container=container, timeout=0)
        except DockerError as ex:
            if ex.response.status_code == requests.codes.not_found:
                raise ContainerImageNotFoundError
            raise ContainerBackendError(ex)
        except Exception as ex:
            raise ContainerBackendError(ex)

    def suspend_container(self, container, **kwargs):
        """
        :inherit.
        """
        if not self.container_exists(container):
            raise ContainerNotFoundError
        if not self.container_is_running(
                container):  # or self.container_is_suspended(container):
            raise IllegalContainerStateError

        try:
            return self._client.pause(container=container)
        except DockerError as ex:
            if ex.response.status_code == requests.codes.not_found:
                raise ContainerImageNotFoundError
            raise ContainerBackendError(ex)
        except Exception as ex:
            raise ContainerBackendError(ex)