Exemple #1
0
 def client(self):
     base_url = self._get_base_url()
     if base_url is not None:
         try:
             tls = False
             _tlsconfig = self._get_tlsconfig()
             if _tlsconfig is not None:
                 client_cert = (_tlsconfig.get('tlscert'),
                                _tlsconfig.get('tlskey'))
                 ca_cert = _tlsconfig.get('tlscacert')
                 verify = True if _tlsconfig.get(
                     'tlsverify') == '1' else False
                 tls = TLSConfig(client_cert=client_cert,
                                 ca_cert=ca_cert,
                                 verify=verify)
             cli = Client(base_url,
                          version=self.version,
                          timeout=3,
                          tls=tls)
             # Hits the /_ping endpoint of the remote API and returns the result.
             # An exception will be raised if the endpoint isn't responding.
             if cli.ping() == 'OK':
                 cli.close()
                 return Client(base_url,
                               version=self.version,
                               timeout=600,
                               tls=tls)
             return
         except errors.DockerException as e:
             pyprint(e)
             return
     print('No available swarm api')
     return
Exemple #2
0
class DockerEventManager(Thread):

    def __init__(self, manager, url=None):
        super(DockerEventManager, self).__init__()

        self.manager = manager
        self.url = url

        self.daemon = True

        self.client = Client(self.url)

    def run(self):
        for payload in self.client.events():
            event = loads(payload)
            status = event.pop("status")
            docker_event = DOCKER_EVENTS.get(status)
            if docker_event is not None:
                self.manager.fire(docker_event(**event), "docker")
            else:
                print(
                    "WARNING: Unknown Docker Event <{0:s}({1:s})>".format(
                        status, repr(event)
                    ),
                    file=sys.stderr
                )

    def stop(self):
        self.client.close()
Exemple #3
0
class DockerEventManager(Thread):
    def __init__(self, manager, url=None):
        super(DockerEventManager, self).__init__()

        self.manager = manager
        self.url = url

        self.daemon = True

        self.client = Client(self.url)

    def run(self):
        for payload in self.client.events():
            event = loads(payload)
            status = event.pop("status")
            docker_event = DOCKER_EVENTS.get(status)
            if docker_event is not None:
                self.manager.fire(docker_event(**event), "docker")
            else:
                print("WARNING: Unknown Docker Event <{0:s}({1:s})>".format(
                    status, repr(event)),
                      file=sys.stderr)

    def stop(self):
        self.client.close()
Exemple #4
0
class DockerLayer(object):

    def __init__(self, name, tag):
        self.__name__ = name
        self.__bases__ = tuple([])
        self.tag = tag
        self.client = Client(base_url='unix://var/run/docker.sock')
        try:
            self.client.ping()
        except ConnectionError as e:
            # http://docker-py.readthedocs.org/en/latest/boot2docker/
            kwargs = kwargs_from_env()
            kwargs['tls'].assert_hostname = False
            self.client = Client(**kwargs)

    def setUp(self):
        if self.client.ping() == u'OK':
            self.start()
        else:
            raise RuntimeError('Docker is not available.\nMake sure you have Docker installed before running tests.\nVisit https://docker.com for installation instructions.')

    def start(self):
        for line in self.client.build(
                path=os.path.abspath(os.path.join(DIR, '..')),
                tag=self.tag, rm=True, forcerm=True):
            sys.stdout.write(line)

    def tearDown(self):
        self.stop()

    def stop(self):
        self.client.close()
Exemple #5
0
class DockerLayer(object):
    def __init__(self, name, tag):
        self.__name__ = name
        self.__bases__ = tuple([])
        self.tag = tag
        self.client = Client(base_url='unix://var/run/docker.sock')
        try:
            self.client.ping()
        except ConnectionError as e:
            # http://docker-py.readthedocs.org/en/latest/boot2docker/
            kwargs = kwargs_from_env()
            kwargs['tls'].assert_hostname = False
            self.client = Client(**kwargs)

    def setUp(self):
        if self.client.ping() == u'OK':
            self.start()
        else:
            raise RuntimeError(
                'Docker is not available.\nMake sure you have Docker installed before running tests.\nVisit https://docker.com for installation instructions.'
            )

    def start(self):
        for line in self.client.build(path=os.path.abspath(
                os.path.join(DIR, '..')),
                                      tag=self.tag,
                                      rm=True,
                                      forcerm=True):
            sys.stdout.write(line)

    def tearDown(self):
        self.stop()

    def stop(self):
        self.client.close()
class Main(object):
    __version = '16.6.22'
    __name = path.basename(argv[0])
    __docker_host = 'unix://var/run/docker.sock'
    __cli = None

    def __init__(self):
        action = self.parse_args()

        try:
            self.__cli = Client(base_url=self.__docker_host)
        except errors.DockerException as err:
            self.die(err.message)

        if action == 'list':
            self.display_list()

    def __del__(self):
        if self.__cli:
            self.__cli.close()

    @staticmethod
    def die(message=None, code=1):
        if message is not None:
            print(message, file=stderr)
        exit(code)

    def parse_args(self):
        parser = ArgumentParser()
        parser.add_argument('-v', '--version',
                            help='show version', action="store_true")
        parser.add_argument('action', help='dupa', choices=['list'])
        parser.add_argument('-H', '--host', help='Socket or URL to bind to (default: unix:///var/run/docker.sock)')
        args = parser.parse_args()
        if args.version:
            self.display_version()
            exit()
        if args.host:
            self.__docker_host = args.host
        return args.action

    def display_version(self):
        print('%s version %s' % (self.__name, self.__version))

    def display_list(self):
        containers = None
        try:
            containers = self.__cli.containers(all=True)
        except exceptions.ConnectionError:
            self.die('Problem connecting to Docker Host')

        print('Containers found: %s' % len(containers))
        if containers:
            table = PrettyTable(['Name', 'IP Address', 'Image', 'Status'])
            for container in containers:
                table.add_row([container['Names'][0].replace('/', ''),
                               container['NetworkSettings']['Networks']['bridge']['IPAddress'],
                               container['Image'], container['Status']])
            print(table)
Exemple #7
0
 def client(self):
     base_url = self._get_base_url()
     if base_url is not None:
         try:
             cli = Client(base_url, version=self.version, timeout=3)
             # Hits the /_ping endpoint of the remote API and returns the result.
             # An exception will be raised if the endpoint isn't responding.
             if cli.ping() == "OK":
                 cli.close()
                 return Client(base_url, version=self.version, timeout=3600)
             return
         except errors.DockerException as e:
             print(e)
             return
     print("No available swarm api")
     return
Exemple #8
0
class MonitorThread(Thread):
    def __init__(self, app, sock, dockerEvents):
        super(MonitorThread, self).__init__()

        self.app = app
        self.sock = sock
        self.dockerEvents = dockerEvents
        self.cli = Client(base_url=self.sock)

    def run(self):
        # Listen for Docker events
        for event in self.cli.events():
            event = json.loads(event.decode('utf-8'))
            if event.get("status") in self.dockerEvents:
                self.app.updateProxy()

    def stop(self):
        self.cli.close()
class DockerCluster(BaseCluster):
    IMAGE_NAME_BASE = os.path.join('teradatalabs', 'pa_test')
    BARE_CLUSTER_TYPE = 'bare'

    """Start/stop/control/query arbitrary clusters of docker containers.

    This class is aimed at product test writers to create docker containers
    for testing purposes.

    """
    def __init__(self, master_host, slave_hosts,
                 local_mount_dir, docker_mount_dir):
        # see PyDoc for all_internal_hosts() for an explanation on the
        # difference between an internal and regular host
        self.internal_master = master_host
        self.internal_slaves = slave_hosts
        self.master = master_host + '-' + str(uuid.uuid4())
        self.slaves = [slave + '-' + str(uuid.uuid4())
                       for slave in slave_hosts]
        # the root path for all local mount points; to get a particular
        # container mount point call get_local_mount_dir()
        self.local_mount_dir = local_mount_dir
        self.mount_dir = docker_mount_dir

        kwargs = kwargs_from_env()
        if 'tls' in kwargs:
            kwargs['tls'].assert_hostname = False
        kwargs['timeout'] = 300
        self.client = Client(**kwargs)

        self._DOCKER_START_TIMEOUT = 30
        DockerCluster.__check_if_docker_exists()

    def all_hosts(self):
        return self.slaves + [self.master]

    def get_master(self):
        return self.master

    def all_internal_hosts(self):
        return [host.split('-')[0] for host in self.all_hosts()]

    def get_local_mount_dir(self, host):
        return os.path.join(self.local_mount_dir,
                            self.__get_unique_host(host))

    def get_dist_dir(self, unique):
        if unique:
            return os.path.join(DIST_DIR, self.master)
        else:
            return DIST_DIR

    def __get_unique_host(self, host):
        matches = [unique_host for unique_host in self.all_hosts()
                   if unique_host.startswith(host)]
        if matches:
            return matches[0]
        elif host in self.all_hosts():
            return host
        else:
            raise DockerClusterException(
                'Specified host: {0} does not exist.'.format(host))

    @staticmethod
    def __check_if_docker_exists():
        try:
            subprocess.call(['docker', '--version'])
        except OSError:
            sys.exit('Docker is not installed. Try installing it with '
                     'presto-admin/bin/install-docker.sh.')

    def create_image(self, path_to_dockerfile_dir, image_tag, base_image,
                     base_image_tag=None):
        self.fetch_image_if_not_present(base_image, base_image_tag)
        output = self._execute_and_wait(self.client.build,
                                        path=path_to_dockerfile_dir,
                                        tag=image_tag,
                                        rm=True)
        if not self._is_image_present_locally(image_tag, 'latest'):
            raise OSError('Unable to build image %s: %s' % (image_tag, output))

    def fetch_image_if_not_present(self, image, tag=None):
        if not tag and not self.client.images(image):
            self._execute_and_wait(self.client.pull, image)
        elif tag and not self._is_image_present_locally(image, tag):
            self._execute_and_wait(self.client.pull, image, tag)

    def _is_image_present_locally(self, image_name, tag):
        image_name_and_tag = image_name + ':' + tag
        images = self.client.images(image_name)
        if images:
            for image in images:
                if image_name_and_tag in image['RepoTags']:
                    return True
        return False

    def start_containers(self, master_image, slave_image=None,
                         cmd=None, **kwargs):
        self._create_host_mount_dirs()

        self._create_and_start_containers(master_image, slave_image,
                                          cmd, **kwargs)
        self._ensure_docker_containers_started(master_image)
        sleep(3)

    def tear_down(self):
        for container_name in self.all_hosts():
            self._tear_down_container(container_name)
        self._remove_host_mount_dirs()
        if self.client:
            self.client.close()
            self.client = None

    def _tear_down_container(self, container_name):
        try:
            shutil.rmtree(self.get_dist_dir(unique=True))
        except OSError as e:
            # no such file or directory
            if e.errno != errno.ENOENT:
                raise

        try:
            self.stop_host(container_name)
            self.client.remove_container(container_name, v=True, force=True)
        except APIError as e:
            # container does not exist
            if e.response.status_code != 404:
                raise

    def stop_host(self, container_name):
        self.client.stop(container_name)
        self.client.wait(container_name)

    def start_host(self, container_name):
        self.client.start(container_name)

    def get_down_hostname(self, host_name):
        return host_name

    def _remove_host_mount_dirs(self):
        for container_name in self.all_hosts():
            try:
                shutil.rmtree(
                    self.get_local_mount_dir(container_name))
            except OSError as e:
                # no such file or directory
                if e.errno != errno.ENOENT:
                    raise

    def _create_host_mount_dirs(self):
        for container_name in self.all_hosts():
            try:
                os.makedirs(
                    self.get_local_mount_dir(container_name))
            except OSError as e:
                # file exists
                if e.errno != errno.EEXIST:
                    raise

    @staticmethod
    def _execute_and_wait(func, *args, **kwargs):
        ret = func(*args, **kwargs)
        # go through all lines in returned stream to ensure func finishes
        output = ''
        for line in ret:
            output += line
        return output

    def _create_and_start_containers(self, master_image, slave_image=None,
                                     cmd=None, **kwargs):
        if slave_image:
            for container_name in self.slaves:
                container_mount_dir = \
                    self.get_local_mount_dir(container_name)
                self._create_container(
                    slave_image, container_name,
                    container_name.split('-')[0], cmd
                )
                self.client.start(container_name,
                                  binds={container_mount_dir:
                                         {'bind': self.mount_dir,
                                          'ro': False}},
                                  **kwargs)

        master_mount_dir = self.get_local_mount_dir(self.master)
        self._create_container(
            master_image, self.master, hostname=self.internal_master,
            cmd=cmd
        )
        self.client.start(self.master,
                          binds={master_mount_dir:
                                 {'bind': self.mount_dir,
                                  'ro': False}},
                          links=zip(self.slaves, self.slaves), **kwargs)
        self._add_hostnames_to_slaves()

    def _create_container(self, image, container_name, hostname=None,
                          cmd=None):
        self._execute_and_wait(self.client.create_container,
                               image,
                               detach=True,
                               name=container_name,
                               hostname=hostname,
                               volumes=self.local_mount_dir,
                               command=cmd,
                               host_config={'mem_limit': '2g'})

    def _add_hostnames_to_slaves(self):
        ips = self.get_ip_address_dict()
        additions_to_etc_hosts = ''
        for host in self.all_internal_hosts():
            additions_to_etc_hosts += '%s\t%s\n' % (ips[host], host)

        for host in self.slaves:
            self.exec_cmd_on_host(
                host,
                'bin/bash -c \'echo "%s" >> /etc/hosts\''
                % additions_to_etc_hosts
            )

    def _ensure_docker_containers_started(self, image):
        centos_based_images = [BASE_TD_IMAGE_NAME]

        timeout = 0
        is_host_started = {}
        for host in self.all_hosts():
            is_host_started[host] = False
        while timeout < self._DOCKER_START_TIMEOUT:
            for host in self.all_hosts():
                atomic_is_started = True
                atomic_is_started &= \
                    self.client.inspect_container(host)['State']['Running']
                if image in centos_based_images or \
                        image.startswith(self.IMAGE_NAME_BASE):
                    atomic_is_started &= \
                        self._are_centos_container_services_up(host)
                is_host_started[host] = atomic_is_started
            if not DockerCluster._are_all_hosts_started(is_host_started):
                timeout += 1
                sleep(1)
            else:
                break
        if timeout is self._DOCKER_START_TIMEOUT:
            raise DockerClusterException(
                'Docker container timed out on start.' + str(is_host_started))

    @staticmethod
    def _are_all_hosts_started(host_started_map):
        all_started = True
        for host in host_started_map.keys():
            all_started &= host_started_map[host]
        return all_started

    def _are_centos_container_services_up(self, host):
        """Some essential services in our CentOS containers take some time
        to start after the container itself is up. This function checks
        whether those services are up and returns a boolean accordingly.
        Specifically, we check that the app-admin user has been created
        and that the ssh daemon is up, as well as that the SSH keys are
        in the right place.

        Args:
          host: the host to check.

        Returns:
          True if the specified services have started, False otherwise.

        """
        ps_output = self.exec_cmd_on_host(host, 'ps')
        # also ensure that the app-admin user exists
        try:
            user_output = self.exec_cmd_on_host(
                host, 'grep app-admin /etc/passwd'
            )
            user_output += self.exec_cmd_on_host(host, 'stat /home/app-admin')
        except OSError:
            user_output = ''
        if 'sshd_bootstrap' in ps_output or 'sshd\n' not in ps_output\
                or not user_output:
            return False
        # check for .ssh being in the right place
        ssh_output = self.exec_cmd_on_host(host, 'ls /home/app-admin/.ssh')
        if 'id_rsa' not in ssh_output:
            return False
        return True

    def exec_cmd_on_host(self, host, cmd, user=None, raise_error=True,
                         tty=False):
        ex = self.client.exec_create(self.__get_unique_host(host), cmd,
                                     tty=tty, user=user)
        output = self.client.exec_start(ex['Id'], tty=tty)
        exit_code = self.client.exec_inspect(ex['Id'])['ExitCode']
        if raise_error and exit_code:
            raise OSError(exit_code, output)
        return output

    @staticmethod
    def _get_tag_basename(bare_image_provider, cluster_type, ms):
        return '_'.join(
            [bare_image_provider.get_tag_decoration(), cluster_type, ms])

    @staticmethod
    def _get_master_image_name(bare_image_provider, cluster_type):
        return os.path.join(DockerCluster.IMAGE_NAME_BASE,
                            DockerCluster._get_tag_basename(
                                bare_image_provider, cluster_type, 'master'))

    @staticmethod
    def _get_slave_image_name(bare_image_provider, cluster_type):
        return os.path.join(DockerCluster.IMAGE_NAME_BASE,
                            DockerCluster._get_tag_basename(
                                bare_image_provider, cluster_type, 'slave'))

    @staticmethod
    def start_bare_cluster(bare_image_provider):
        dc = DockerCluster
        master_name = dc._get_master_image_name(bare_image_provider,
                                                dc.BARE_CLUSTER_TYPE)
        slave_name = dc._get_slave_image_name(bare_image_provider,
                                              dc.BARE_CLUSTER_TYPE)
        centos_cluster = DockerCluster('master',
                                       ['slave1', 'slave2', 'slave3'],
                                       DEFAULT_LOCAL_MOUNT_POINT,
                                       DEFAULT_DOCKER_MOUNT_POINT)

        if not dc._check_for_images(master_name, slave_name):
            bare_image_provider.create_bare_images(centos_cluster, master_name,
                                                   slave_name)

        centos_cluster.start_containers(master_name, slave_name)

        return centos_cluster

    @staticmethod
    def start_existing_images(bare_image_provider, cluster_type):
        dc = DockerCluster
        master_name = dc._get_master_image_name(bare_image_provider,
                                                cluster_type)
        slave_name = dc._get_slave_image_name(bare_image_provider,
                                              cluster_type)

        if not dc._check_for_images(master_name, slave_name):
            return None

        centos_cluster = DockerCluster('master',
                                       ['slave1', 'slave2', 'slave3'],
                                       DEFAULT_LOCAL_MOUNT_POINT,
                                       DEFAULT_DOCKER_MOUNT_POINT)

        centos_cluster.start_containers(master_name, slave_name)
        return centos_cluster

    @staticmethod
    def _check_for_images(master_image_name, slave_image_name, tag='latest'):
        master_repotag = '%s:%s' % (master_image_name, tag)
        slave_repotag = '%s:%s' % (slave_image_name, tag)
        with Client(timeout=180) as client:
            images = client.images()
        has_master_image = False
        has_slave_image = False
        for image in images:
            if master_repotag in image['RepoTags']:
                has_master_image = True
            if slave_repotag in image['RepoTags']:
                has_slave_image = True
        return has_master_image and has_slave_image

    def commit_images(self, bare_image_provider, cluster_type):
        self.client.commit(self.master,
                           self._get_master_image_name(bare_image_provider,
                                                       cluster_type))
        self.client.commit(self.slaves[0],
                           self._get_slave_image_name(bare_image_provider,
                                                      cluster_type))

    def run_script_on_host(self, script_contents, host):
        temp_script = '/tmp/tmp.sh'
        self.write_content_to_host('#!/bin/bash\n%s' % script_contents,
                                   temp_script, host)
        self.exec_cmd_on_host(host, 'chmod +x %s' % temp_script)
        return self.exec_cmd_on_host(host, temp_script, tty=True)

    def write_content_to_host(self, content, path, host):
        filename = os.path.basename(path)
        dest_dir = os.path.dirname(path)
        host_local_mount_point = self.get_local_mount_dir(host)
        local_path = os.path.join(host_local_mount_point, filename)

        with open(local_path, 'w') as config_file:
            config_file.write(content)

        self.exec_cmd_on_host(host, 'mkdir -p ' + dest_dir)
        self.exec_cmd_on_host(
            host, 'cp %s %s' % (os.path.join(self.mount_dir, filename),
                                dest_dir))

    def copy_to_host(self, source_path, dest_host, **kwargs):
        shutil.copy(source_path, self.get_local_mount_dir(dest_host))

    def get_ip_address_dict(self):
        ip_addresses = {}
        for host, internal_host in zip(self.all_hosts(),
                                       self.all_internal_hosts()):
            inspect = self.client.inspect_container(host)
            ip_addresses[host] = inspect['NetworkSettings']['IPAddress']
            ip_addresses[internal_host] = \
                inspect['NetworkSettings']['IPAddress']
        return ip_addresses

    def _post_presto_install(self):
        for worker in self.slaves:
            self.run_script_on_host(
                'sed -i /node.id/d /etc/presto/node.properties; '
                'uuid=$(uuidgen); '
                'echo node.id=$uuid >> /etc/presto/node.properties',
                worker
            )

    def postinstall(self, installer):
        from tests.product.standalone.presto_installer \
            import StandalonePrestoInstaller

        _post_install_hooks = {
            StandalonePrestoInstaller: DockerCluster._post_presto_install
        }

        hook = _post_install_hooks.get(installer, None)
        if hook:
            hook(self)
Exemple #10
0
class DockerCluster(BaseCluster):
    IMAGE_NAME_BASE = os.path.join('teradatalabs', 'pa_test')
    BARE_CLUSTER_TYPE = 'bare'

    """Start/stop/control/query arbitrary clusters of docker containers.

    This class is aimed at product test writers to create docker containers
    for testing purposes.

    """
    def __init__(self, master_host, slave_hosts,
                 local_mount_dir, docker_mount_dir):
        # see PyDoc for all_internal_hosts() for an explanation on the
        # difference between an internal and regular host
        self.internal_master = master_host
        self.internal_slaves = slave_hosts
        self._master = master_host + '-' + str(uuid.uuid4())
        self.slaves = [slave + '-' + str(uuid.uuid4())
                       for slave in slave_hosts]
        # the root path for all local mount points; to get a particular
        # container mount point call get_local_mount_dir()
        self.local_mount_dir = local_mount_dir
        self._mount_dir = docker_mount_dir

        kwargs = kwargs_from_env()
        if 'tls' in kwargs:
            kwargs['tls'].assert_hostname = False
        kwargs['timeout'] = 300
        self.client = Client(**kwargs)
        self._user = '******'

        DockerCluster.__check_if_docker_exists()

    def all_hosts(self):
        return self.slaves + [self.master]

    def all_internal_hosts(self):
        return [host.split('-')[0] for host in self.all_hosts()]

    def get_local_mount_dir(self, host):
        return os.path.join(self.local_mount_dir,
                            self.__get_unique_host(host))

    def get_dist_dir(self, unique):
        if unique:
            return os.path.join(DIST_DIR, self.master)
        else:
            return DIST_DIR

    def __get_unique_host(self, host):
        matches = [unique_host for unique_host in self.all_hosts()
                   if unique_host.startswith(host)]
        if matches:
            return matches[0]
        elif host in self.all_hosts():
            return host
        else:
            raise DockerClusterException(
                'Specified host: {0} does not exist.'.format(host))

    @staticmethod
    def __check_if_docker_exists():
        try:
            subprocess.call(['docker', '--version'])
        except OSError:
            sys.exit('Docker is not installed. Try installing it with '
                     'presto-admin/bin/install-docker.sh.')

    def _is_image_present_locally(self, image_name, tag):
        image_name_and_tag = image_name + ':' + tag
        images = self.client.images(image_name)
        if images:
            for image in images:
                if image_name_and_tag in image['RepoTags']:
                    return True
        return False

    def start_containers(self, master_image, slave_image=None,
                         cmd=None, **kwargs):
        self._create_host_mount_dirs()

        self._create_and_start_containers(master_image, slave_image,
                                          cmd, **kwargs)
        self._ensure_docker_containers_started(master_image)

    def tear_down(self):
        for container_name in self.all_hosts():
            self._tear_down_container(container_name)
        self._remove_host_mount_dirs()
        if self.client:
            self.client.close()
            self.client = None

    def _tear_down_container(self, container_name):
        try:
            shutil.rmtree(self.get_dist_dir(unique=True))
        except OSError as e:
            # no such file or directory
            if e.errno != errno.ENOENT:
                raise

        try:
            self.stop_host(container_name)
            self.client.remove_container(container_name, v=True, force=True)
        except APIError as e:
            # container does not exist
            if e.response.status_code != 404:
                raise

    def stop_host(self, container_name):
        self.client.stop(container_name)
        self.client.wait(container_name)

    def start_host(self, container_name):
        self.client.start(container_name)

    def get_down_hostname(self, host_name):
        return host_name

    def _remove_host_mount_dirs(self):
        for container_name in self.all_hosts():
            try:
                shutil.rmtree(
                    self.get_local_mount_dir(container_name))
            except OSError as e:
                # no such file or directory
                if e.errno != errno.ENOENT:
                    raise

    def _create_host_mount_dirs(self):
        for container_name in self.all_hosts():
            try:
                os.makedirs(
                    self.get_local_mount_dir(container_name))
            except OSError as e:
                # file exists
                if e.errno != errno.EEXIST:
                    raise

    @staticmethod
    def _execute_and_wait(func, *args, **kwargs):
        ret = func(*args, **kwargs)
        # go through all lines in returned stream to ensure func finishes
        output = ''
        for line in ret:
            output += line
        return output

    def _create_and_start_containers(self, master_image, slave_image=None,
                                     cmd=None, **kwargs):
        if slave_image:
            for container_name in self.slaves:
                container_mount_dir = \
                    self.get_local_mount_dir(container_name)
                self._create_container(
                    slave_image, container_name,
                    container_name.split('-')[0], cmd
                )
                self.client.start(container_name,
                                  binds={container_mount_dir:
                                         {'bind': self.mount_dir,
                                          'ro': False}},
                                  **kwargs)

        master_mount_dir = self.get_local_mount_dir(self.master)
        self._create_container(
            master_image, self.master, hostname=self.internal_master,
            cmd=cmd
        )
        self.client.start(self.master,
                          binds={master_mount_dir:
                                 {'bind': self.mount_dir,
                                  'ro': False}},
                          links=zip(self.slaves, self.slaves), **kwargs)
        self._add_hostnames_to_slaves()

    def _create_container(self, image, container_name, hostname=None,
                          cmd=None):
        self._execute_and_wait(self.client.create_container,
                               image,
                               detach=True,
                               name=container_name,
                               hostname=hostname,
                               volumes=self.local_mount_dir,
                               command=cmd,
                               host_config={'mem_limit': '2g'})

    def _add_hostnames_to_slaves(self):
        ips = self.get_ip_address_dict()
        additions_to_etc_hosts = ''
        for host in self.all_internal_hosts():
            additions_to_etc_hosts += '%s\t%s\n' % (ips[host], host)

        for host in self.slaves:
            self.exec_cmd_on_host(
                host,
                'bin/bash -c \'echo "%s" >> /etc/hosts\''
                % additions_to_etc_hosts
            )

    @retry(stop_max_delay=_DOCKER_START_TIMEOUT, wait_fixed=_DOCKER_START_WAIT)
    def _ensure_docker_containers_started(self, image):
        # Strip off the tag, if there is one. We don't want to have to update
        # the NO_WAIT_SSH_IMAGES list every time we update the docker images.
        image_no_tag = image.split(':')[0]
        host_started = {}
        for host in self.all_hosts():
            host_started[host] = False
        for host in host_started.keys():
            if host_started[host]:
                continue
            is_started = True
            is_started &= \
                self.client.inspect_container(host)['State']['Running']
            if is_started and image_no_tag not in NO_WAIT_SSH_IMAGES:
                is_started &= self._are_centos_container_services_up(host)
            host_started[host] = is_started
        not_started = [host for (host, started) in host_started.items() if not started]
        if len(not_started):
            raise NotStartedException(not_started)

    @staticmethod
    def _are_all_hosts_started(host_started_map):
        all_started = True
        for host in host_started_map.keys():
            all_started &= host_started_map[host]
        return all_started

    def _are_centos_container_services_up(self, host):
        """Some essential services in our CentOS containers take some time
        to start after the container itself is up. This function checks
        whether those services are up and returns a boolean accordingly.
        Specifically, we check that the app-admin user has been created
        and that the ssh daemon is up, as well as that the SSH keys are
        in the right place.

        Args:
          host: the host to check.

        Returns:
          True if the specified services have started, False otherwise.

        """
        ps_output = self.exec_cmd_on_host(host, 'ps')
        # also ensure that the app-admin user exists
        try:
            user_output = self.exec_cmd_on_host(
                host, 'grep app-admin /etc/passwd'
            )
            user_output += self.exec_cmd_on_host(host, 'stat /home/app-admin')
        except OSError:
            user_output = ''
        if 'sshd_bootstrap' in ps_output or 'sshd\n' not in ps_output\
                or not user_output:
            return False
        # check for .ssh being in the right place
        try:
            ssh_output = self.exec_cmd_on_host(host, 'ls /home/app-admin/.ssh')
            if 'id_rsa' not in ssh_output:
                return False
        except OSError:
            return False
        return True

    def exec_cmd_on_host(self, host, cmd, user=None, raise_error=True,
                         tty=False, invoke_sudo=False):
        ex = self.client.exec_create(self.__get_unique_host(host), ['sh', '-c', cmd],
                                     tty=tty, user=user)
        output = self.client.exec_start(ex['Id'], tty=tty)
        exit_code = self.client.exec_inspect(ex['Id'])['ExitCode']
        if raise_error and exit_code:
            raise OSError(exit_code, output)
        return output

    @staticmethod
    def _get_tag_basename(bare_image_provider, cluster_type, ms):
        return '_'.join(
            [bare_image_provider.get_tag_decoration(), cluster_type, ms])

    @staticmethod
    def _get_master_image_name(bare_image_provider, cluster_type):
        return os.path.join(DockerCluster.IMAGE_NAME_BASE,
                            DockerCluster._get_tag_basename(
                                bare_image_provider, cluster_type, 'master'))

    @staticmethod
    def _get_slave_image_name(bare_image_provider, cluster_type):
        return os.path.join(DockerCluster.IMAGE_NAME_BASE,
                            DockerCluster._get_tag_basename(
                                bare_image_provider, cluster_type, 'slave'))

    @staticmethod
    def _get_image_names(bare_image_provider, cluster_type):
        dc = DockerCluster
        return (dc._get_master_image_name(bare_image_provider, cluster_type),
                dc._get_slave_image_name(bare_image_provider, cluster_type))

    @staticmethod
    def start_cluster(bare_image_provider, cluster_type, master_host='master',
                      slave_hosts=None, **kwargs):
        if slave_hosts is None:
            slave_hosts = ['slave1', 'slave2', 'slave3']
        created_bare = False
        dc = DockerCluster

        centos_cluster = DockerCluster(master_host, slave_hosts,
                                       DEFAULT_LOCAL_MOUNT_POINT,
                                       DEFAULT_DOCKER_MOUNT_POINT)

        master_name, slave_name = dc._get_image_names(
            bare_image_provider, cluster_type)

        if not dc._check_for_images(master_name, slave_name):
            master_name, slave_name = dc._get_image_names(
                bare_image_provider, dc.BARE_CLUSTER_TYPE)
            if not dc._check_for_images(master_name, slave_name):
                bare_image_provider.create_bare_images(
                    centos_cluster, master_name, slave_name)
            created_bare = True

        centos_cluster.start_containers(master_name, slave_name, **kwargs)

        return centos_cluster, created_bare

    @staticmethod
    def _check_for_images(master_image_name, slave_image_name, tag='latest'):
        master_repotag = '%s:%s' % (master_image_name, tag)
        slave_repotag = '%s:%s' % (slave_image_name, tag)
        with Client(timeout=180) as client:
            images = client.images()
        has_master_image = False
        has_slave_image = False
        for image in images:
            if image['RepoTags'] is not None and master_repotag in image['RepoTags']:
                has_master_image = True
            if image['RepoTags'] is not None and slave_repotag in image['RepoTags']:
                has_slave_image = True
        return has_master_image and has_slave_image

    def commit_images(self, bare_image_provider, cluster_type):
        self.client.commit(self.master,
                           self._get_master_image_name(bare_image_provider,
                                                       cluster_type))
        if self.slaves:
            self.client.commit(self.slaves[0],
                               self._get_slave_image_name(bare_image_provider,
                                                          cluster_type))

    def run_script_on_host(self, script_contents, host, tty=True):
        temp_script = '/tmp/tmp.sh'
        self.write_content_to_host('#!/bin/bash\n%s' % script_contents,
                                   temp_script, host)
        self.exec_cmd_on_host(host, 'chmod +x %s' % temp_script)
        return self.exec_cmd_on_host(host, temp_script, tty=tty)

    def write_content_to_host(self, content, path, host):
        filename = os.path.basename(path)
        dest_dir = os.path.dirname(path)
        host_local_mount_point = self.get_local_mount_dir(host)
        local_path = os.path.join(host_local_mount_point, filename)

        with open(local_path, 'w') as config_file:
            config_file.write(content)

        self.exec_cmd_on_host(host, 'mkdir -p ' + dest_dir)
        self.exec_cmd_on_host(
            host, 'cp %s %s' % (os.path.join(self.mount_dir, filename),
                                dest_dir))

    def copy_to_host(self, source_path, dest_host, **kwargs):
        shutil.copy(source_path, self.get_local_mount_dir(dest_host))

    def get_ip_address_dict(self):
        ip_addresses = {}
        for host, internal_host in zip(self.all_hosts(),
                                       self.all_internal_hosts()):
            inspect = self.client.inspect_container(host)
            ip_addresses[host] = inspect['NetworkSettings']['IPAddress']
            ip_addresses[internal_host] = \
                inspect['NetworkSettings']['IPAddress']
        return ip_addresses

    def _post_presto_install(self):
        for worker in self.slaves:
            self.run_script_on_host(
                'sed -i /node.id/d /etc/presto/node.properties; '
                'uuid=$(uuidgen); '
                'echo node.id=$uuid >> /etc/presto/node.properties',
                worker
            )

    def postinstall(self, installer):
        from tests.product.standalone.presto_installer \
            import StandalonePrestoInstaller

        _post_install_hooks = {
            StandalonePrestoInstaller: DockerCluster._post_presto_install
        }

        hook = _post_install_hooks.get(installer, None)
        if hook:
            hook(self)

    @property
    def rpm_cache_dir(self):
        return self._mount_dir

    @property
    def mount_dir(self):
        return self._mount_dir

    @property
    def user(self):
        return self._user

    @property
    def master(self):
        return self._master
Exemple #11
0
        if not isinstance(args, Iterable):
            args = (args, )
        try:
            self.cursor.execute(query_string, args)
        except Exception as e:
            return e

if __name__ == '__main__':
    # docker stop $(docker ps -a -q)
    from docker import Client as DockerClient

    cli = DockerClient(base_url='unix:///var/run/docker.sock')
    try:
        container = cli.create_container(image='test_db_aioframe')
        container_id = container.get('Id')
        cli.start(container=container_id)
        info = cli.inspect_container(container=container_id)
        container_ip = info['NetworkSettings']['IPAddress']
        time.sleep(1)  # wait docker container restart postgresql
        print('docker start')

        import psycopg2
        d = Model(psycopg2, {'database': 'test', 'user': '******', 'host': container_ip, 'password': '******'})
        _c = d.query('select %s as vasa, %s as petya', (1, 1), name='A1')

    except Exception as e:
        for x in traceback.format_tb(e.__traceback__): print(x)
    finally:
        cli.close()
        print('docker stop')
Exemple #12
0
class ContainerHandler(ResourceHandler):
    @classmethod
    def is_available(self, io):
        return True

    def __init__(self, agent, io=None):
        super().__init__(agent, io)
        self._client = None

    def pre(self, ctx, resource: Container):
        self._client = Client(base_url="unix://var/run/docker.sock")

    def post(self, ctx, resource: Container):
        self._client.close()

    def check_resource(self, ctx, resource: Container):
        current = resource.clone()
        containers = self._client.containers(all=True)

        docker_resource = None
        for container in containers:
            names = container["Names"]
            search_name = resource.name
            if search_name[0] != "/":
                search_name = "/" + search_name

            if search_name in names:
                docker_resource = container

        if docker_resource is None:
            current.state = "purged"
            return current
        else:
            data = self._client.inspect_container(docker_resource["Id"])
            current.state = data["State"]["Status"]
            ctx.set("container_id", docker_resource["Id"])
            return current

    def do_changes(self, ctx, resource: Container, changes) -> bool:
        """
            Enforce the changes
        """
        if "state" in changes:
            state = changes["state"]
            if state["current"] == "purged" and state["desired"] == "running":
                # ensure the image is pulled
                images = self._client.images(name=resource.image)
                if len(images) == 0:
                    msg = self._client.pull(resource.image)
                    if "not found" in msg:
                        raise Exception("Failed to pull image %s: %s" %
                                        (resource.image, msg))

                cont = self._client.create_container(
                    image=resource.image,
                    command=resource.command,
                    detach=resource.detach,
                    host_config={"memory_limit": resource.memory_limit})
                self._client.start(cont["Id"])
                self._client.rename(cont["Id"], resource.name)

                ctx.set_created()

            elif state["desired"] == "purged":
                container_id = ctx.get("container_id")
                if state["current"] == "running":
                    self._client.stop(container_id)

                self._client.remove_container(container_id)

                ctx.set_purged()

    def facts(self, resource: Container):
        """
            Get facts about this resource
        """
        return {}