Exemplo n.º 1
0
 def runCommand(self, container, cmd, stream=True):
     cli = Client(base_url='unix://var/run/docker.sock')
     ex = cli.exec_create(container=container, cmd=cmd)
     if stream:
         for result in cli.exec_start(exec_id=ex["Id"], stream=True):
             print(result)
         return cli.exec_inspect(exec_id=ex["Id"])['ExitCode']
     else:
         return cli.exec_start(exec_id=ex["Id"])
Exemplo n.º 2
0
class DockerExec(object):
    def __init__(self):
        self.cl = Client(base_url="unix://var/run/docker.sock")

    def execute(self, container, command, detach=False, interactive=False, tty=False):
        try:
            exec_id = self.cl.exec_create(container, command, True, True, tty)
            print exec_id
            ret = self.cl.exec_start(exec_id["Id"], detach, tty, False)
            print ret
            ins = self.cl.exec_inspect(exec_id["Id"])
            print ins["ExitCode"]
        except docker.errors.APIError as ex:
            raise
Exemplo n.º 3
0
class DockerExec(object):
    def __init__(self):
        self.cl = Client(base_url='unix://var/run/docker.sock')

    def execute(self,
                container,
                command,
                detach=False,
                interactive=False,
                tty=False):
        try:
            exec_id = self.cl.exec_create(container, command, True, True, tty)
            print exec_id
            ret = self.cl.exec_start(exec_id['Id'], detach, tty, False)
            print ret
            ins = self.cl.exec_inspect(exec_id['Id'])
            print ins['ExitCode']
        except docker.errors.APIError as ex:
            raise
Exemplo n.º 4
0
    volumes.append(volume.split(':')[1])
host_config = cli.create_host_config(binds=config['volumes'])

# Create a container and start it
container = cli.create_container(image=config['image'] + ':' + config['tag'],
                                 command='tail -f /dev/null',
                                 detach=True,
                                 stdin_open=True,
                                 tty=True,
                                 environment=config['environment'],
                                 volumes=volumes,
                                 name=config['name'],
                                 host_config=host_config)
cli.start(container=container.get('Id'))

# Execute the commands
for cmd in config['cmd']:
    print('[+] ' + cmd)
    execute = cli.exec_create(container['Id'], cmd=cmd, stdout=True, stderr=True)
    for char in cli.exec_start(execute['Id'], tty=True, stream=True):
        sys.stdout.write(char.decode(sys.stdout.encoding))
    status = cli.exec_inspect(execute['Id'])['ExitCode']
    if status != 0:
        break

# Stop the container and remove it
cli.stop(container=container.get('Id'))
cli.remove_container(container=container['Id'])

sys.exit(status)
Exemplo n.º 5
0
class Two1ComposerContainers(Two1Composer):
    """ Manage machine-payable microservices in containers.
    """

    def __init__(self):
        self._connected = ComposerState.DISCONNECTED
        self.provider = TwentyOneProvider()
        self.default_wallet = Two1Wallet(self.wallet_file, self.provider)

    class ServiceManager:
        """ Query and modify user services persisting at cls.USER_SERVICES_FILE.
        """

        USER_SERVICES_FILE = os.path.join(Two1Composer.BASE_DIR, "user-services.json")

        class Image(namedtuple('Image', 'docker_hub_account repository tag')):

            def _asdict(self):
                # Fixes a bug for Python 3.4 users
                # https://bugs.python.org/issue24931
                'A new OrderedDict mapping field names to their values'
                return OrderedDict(zip(self._fields, self))

            @property
            def is_dockerhub_image(self):
                """ Returns: True iff Image instance has all fields.
                """
                return self.docker_hub_account and self.repository and self.tag

            @property
            def is_local_image(self):
                """ Returns: True iff Image instance doesn't have docker_hub_account but has all other fields.
                """
                return not self.docker_hub_account and self.repository and self.tag

            def __str__(self):
                """ Returns: Docker image name constructed from Image instance fields.
                """
                if self.is_dockerhub_image:
                    return '%s/%s:%s' % (self.docker_hub_account, self.repository, self.tag)
                elif self.is_local_image:
                    return '%s:%s' % (self.repository, self.tag)
                else:
                    raise ValueError()

            @classmethod
            def from_string(cls, image_name):
                """ Constructs an Image instance from a docker image name.

                Args:
                    image_name (str): A docker image name.

                Returns:
                    Image: An Image instance.
                """
                slashes = re.findall('/', image_name)
                colons = re.findall(':', image_name)

                if len(slashes) == 1:
                    if len(colons) == 1 and image_name.find('/') < image_name.find(':'):
                        docker_hub_account, rest = image_name.split('/')
                        repository, tag = rest.split(':')
                        return cls(docker_hub_account=docker_hub_account, repository=repository, tag=tag)
                    elif len(colons) == 0:
                        docker_hub_account, repository = image_name.split('/')
                        return cls(docker_hub_account=docker_hub_account, repository=repository, tag='latest')
                elif len(slashes) == 0:
                    if len(colons) == 1:
                        repository, tag = image_name.split(':')
                        return cls(docker_hub_account=None, repository=repository, tag=tag)
                    elif len(colons) == 0:
                        return cls(docker_hub_account=None, repository=image_name, tag='latest')
                raise ValueError()

        @classmethod
        def get_image(cls, service_name):
            """ Constructs an Image instance for a service.

            Args:
                service_name (str): The name of either a 21 service in the 21dotco/two1 repository or a user service
                                    added to ServiceManager.USER_SERVICES_FILE by ServiceManager.add_service.

            Returns:
                Image: An Image instance corresponding to the given service.
            """
            if service_name in cls.available_21_services():
                return cls.Image(
                    docker_hub_account='21dotco',
                    repository='two1',
                    tag=service_name if service_name in Two1Composer.BASE_SERVICES else 'service-%s' % service_name
                )
            elif service_name in cls.available_user_services():
                return cls.Image(**cls._get_user_service_dict()[service_name])
            else:
                raise ValueError()

        @classmethod
        def available_services(cls):
            """ Returns: All available service names.
            """
            return cls.available_21_services() | cls.available_user_services()

        @classmethod
        def available_21_services(cls):
            """ Returns: All available 21 services by querying Docker Hub.
            """
            service_image_data = requests.get(os.path.join(
                Two1Composer.DOCKERHUB_API_URL, Two1Composer.DOCKERHUB_REPO, 'tags')).json().get('results')
            return set([image_data['name'].split('service-')[1] for image_data in
                        service_image_data if re.match(r'^service-', image_data['name'])])

        @classmethod
        def available_user_services(cls):
            """ Returns: All available user services.
            """
            return set(cls._get_user_service_dict().keys())

        @classmethod
        def add_service(cls, service_name, image_name_string,
                        service_successfully_added_hook, service_already_exists_hook,
                        service_failed_to_add_hook):
            """ Adds a new service definition to ServiceManager.USER_SERVICES_FILE.

            Args:
                service_name (str): Name of the service definition to add.
                image_name_string (str): Docker image name for the service definition.
            """
            service_dict = cls._get_user_service_dict()
            if service_name in service_dict:
                service_already_exists_hook(service_name)
            else:
                service_dict[service_name] = cls.Image.from_string(image_name_string)._asdict()
                if cls._commit_user_service_dict(service_dict):
                    service_successfully_added_hook(service_name)
                else:
                    service_failed_to_add_hook(service_name)

        @classmethod
        def remove_service(cls, service_name,
                           service_successfully_removed_hook,
                           service_does_not_exists_hook,
                           service_failed_to_remove_hook):
            """ Removes a service definition from ServiceManager.USER_SERVICES_FILE.

            Args:
                service_name (str): Name of the service definition to remove.
            """
            service_dict = cls._get_user_service_dict()
            if service_name in service_dict:
                del service_dict[service_name]
                if cls._commit_user_service_dict(service_dict):
                    service_successfully_removed_hook(service_name)
                else:
                    service_failed_to_remove_hook(service_name)
            else:
                service_does_not_exists_hook(service_name)

        @classmethod
        def _get_user_service_dict(cls):
            """ Returns: ServiceManager.USER_SERVICES_FILE as a dict.
            """
            try:
                with open(cls.USER_SERVICES_FILE, 'r') as data_file:
                    service_dict = json.load(data_file)
            except:
                return {}
            else:
                return service_dict

        @classmethod
        def _commit_user_service_dict(cls, service_dict):
            """ Writes a dict of user services to ServiceManager.USER_SERVICES_FILE in json format.

            Args:
                service_dict (dict): A dictionary of user services of the form
                                     {service_name : _as_dict representation of corresponding Image instance..}.

            Returns:
                bool: True iff no exceptions were raised when writing service_dict to ServiceManager.USER_SERVICES_FILE
                      as json.
            """
            try:
                with open(cls.USER_SERVICES_FILE, 'w') as outfile:
                    json.dump(service_dict, outfile)
            except:
                return False
            else:
                return True

    class ComposerYAMLContext(YamlDataContext):
        """ Context manager for composer YAML service file.
        """

        def __init__(self, username=None, password=None, server_port=None, mnemonic=None):
            self.username = username
            self.password = password
            self.server_port = server_port
            self.mnemonic = mnemonic
            super().__init__(Two1Composer.COMPOSE_FILE)

        def __enter__(self):
            sup = super().__enter__()
            for service in self.data['services']:
                service_definition = self.data['services'][service]
                if 'environment' in service_definition:

                    if 'TWO1_USERNAME' in service_definition['environment'] and self.username is not None:
                        service_definition['environment']['TWO1_USERNAME'] = self.username

                    if 'TWO1_PASSWORD' in service_definition['environment'] and self.password is not None:
                        service_definition['environment']['TWO1_PASSWORD'] = self.password

                    if 'TWO1_WALLET_MNEMONIC' in service_definition['environment'] and self.mnemonic is not None:
                        service_definition['environment']['TWO1_WALLET_MNEMONIC'] = self.mnemonic

                    if 'PAYMENT_SERVER_IP' in service_definition['environment'] and self.server_port is not None:
                        rest = service_definition['environment']['PAYMENT_SERVER_IP'].rsplit(':', maxsplit=1)[-1]
                        service_definition['environment']['PAYMENT_SERVER_IP'] = '%s:%s' % (rest, self.server_port)
            return sup

        def _filler(self):
            """ Create the base service description file.
            """
            return {
                'version': '2',
                'services': {
                    'base': {
                        'image': '%s:base' % Two1Composer.DOCKERHUB_REPO,
                    },
                    'router': {
                        'image': '%s:router' % Two1Composer.DOCKERHUB_REPO,
                        'container_name': 'sell_router',
                        'restart': 'always',
                        'volumes': [
                            Two1Composer.SITES_ENABLED_PATH + ":/etc/nginx/sites-enabled",
                            Two1Composer.SITES_AVAILABLE_PATH + ":/etc/nginx/sites-available",
                        ],
                        'ports': ['%s:%s' % (self.server_port, self.server_port)],
                        'links': [
                            'payments:payments',
                        ],
                    },
                    'payments': {
                        'image': '%s:payments' % Two1Composer.DOCKERHUB_REPO,
                        'depends_on': ['base'],
                        'container_name': 'sell_payments',
                        'restart': 'always',
                        'environment': {
                            "TWO1_USERNAME": str(self.username),
                            "TWO1_PASSWORD": str(self.password),
                            "TWO1_WALLET_MNEMONIC": str(self.mnemonic)
                        },
                        'volumes': [
                            Two1Composer.DB_DIR + ":/usr/src/db/"
                        ],
                        'logging': {
                            'driver': 'json-file'
                        },
                        'cap_drop': [
                            'ALL'
                        ],
                        'cap_add': [
                            'DAC_OVERRIDE',
                            'NET_RAW',
                        ],
                    }
                }
            }

    # public api
    def connect(self, machine_env, host, machine_config_file):
        """ Connect service composer to machine layer.

        Args:
            machine_env (dict): Environment dictionary for the docker client of the machine layer.
            host (str): Hostname of the machine layer docker daemon.
            machine_config_file (str): Path to the config file for the machine layer.
        """
        self.machine_env = machine_env
        self.machine_host = host
        with open(machine_config_file, 'r') as f:
            self.machine_config = json.load(f)
        self.docker_client = Client(**docker_env(assert_hostname=False,
                                                 environment=self.machine_env))
        self._connected = ComposerState.CONNECTED

    def initialize_server(self, username, password, server_port, wallet=None):
        """ Initialize micropayments server.

        Define boilerplate services, networks, and volumes composer file
        and nginx server config.

        Generates a wallet mnemonic if non-existent.

        Args:
            username (str): Username to log in with.
            password (str): Password to log in with.
            server_port (int): The server port that the router is running on.
            wallet: The wallet to use for the payments server and subsequent services.
        """
        self._create_base_server(server_port)  # create base router server config
        self._create_payments_route()  # create route to payments server

        new_wallet = None  # rv[1], not None if mnemonic is replaced in this function

        # generate service description (yaml)
        with self.ComposerYAMLContext(username, password, server_port) as composer_yaml:
            try:
                mnemonic = composer_yaml['services']['payments']['environment']['TWO1_WALLET_MNEMONIC']
                if not mnemonic or mnemonic == str(None):  # if mnemonic is Falsy or uninitialized
                    raise ValueError()
            except (KeyError, ValueError):  # catches if mnemonic is Falsy or doesn't exist in dict tree
                new_machine_wallet = self.default_wallet.create(self.provider)[1]
                composer_yaml['services']['payments']['environment']['TWO1_WALLET_MNEMONIC'] = new_machine_wallet
                new_wallet = new_machine_wallet

        return 0, new_wallet

    def pull_image(self, image,
                   image_sucessfully_pulled_hook, image_failed_to_pull_hook, image_is_local_hook,
                   image_is_malformed_hook):
        """ Pulls an Image instance iff it is a Docker Hub image.

        Args:
            image (Image): An Image instance.
        """
        if image.is_dockerhub_image:
            try:
                self.docker_client.pull('%s/%s' % (image.docker_hub_account, image.repository),
                                        tag=image.tag, stream=False)
            except:
                image_failed_to_pull_hook(image)
            else:
                image_sucessfully_pulled_hook(image)
        elif image.is_local_image:
            image_is_local_hook(image)
        else:
            image_is_malformed_hook(image)

    def start_services(self, service_names,
                       failed_to_start_hook, started_hook, failed_to_restart_hook, restarted_hook, failed_to_up_hook,
                       up_hook):
        """ Start selected services.

        Args:
            service_names (list): List of service names to start.
            failed_to_start_hook (Callable): A callable hook that takes in a service name and is run when said service
                                             fails to start.
            started_hook (Callable): A callable hook that takes in a service name and is run when said service starts.
            failed_to_restart_hook (Callable): A callable hook that takes in a service name and is run when said service
                                               fails to restart.
            restarted_hook (Callable): A callable hook that takes in a service name and is run when said service
                                       restarts.
            failed_to_up_hook (Callable): A callable hook that takes in a service name and is run when said service
                                          fails to go up.
            up_hook (Callable): A callable hook that takes in a service name and is run when said service goes up.
        """
        self._start_sell_service('base', failed_to_start_hook, started_hook, failed_to_up_hook, up_hook)
        self._start_sell_service('router', failed_to_start_hook, started_hook, failed_to_up_hook, up_hook)
        self._start_sell_service('payments', failed_to_start_hook, started_hook, failed_to_up_hook, up_hook)

        self._restart_sell_service('router', failed_to_start_hook, started_hook, failed_to_restart_hook, restarted_hook,
                                   failed_to_up_hook, up_hook)

        # Attempt to start all market services
        for service_name in service_names:
            try:
                image = self.ServiceManager.get_image(service_name)
                container_name = self.service_name_2_container_name(service_name)

                # create nginx routes for service_name
                self._create_service_route(service_name)
                # add service_name to docker compose file
                with self.ComposerYAMLContext() as docker_compose_yaml:
                    username = docker_compose_yaml['services']['payments']['environment']['TWO1_USERNAME']
                    password = docker_compose_yaml['services']['payments']['environment']['TWO1_PASSWORD']
                    mnemonic = docker_compose_yaml['services']['payments']['environment']['TWO1_WALLET_MNEMONIC']
                    docker_compose_yaml['services'][service_name] = {
                        'image': str(image),
                        'container_name': container_name,
                        'depends_on': ['base'],
                        'restart': 'always',
                        'environment': {
                            "TWO1_USERNAME": str(username),
                            "TWO1_PASSWORD": str(password),
                            "TWO1_WALLET_MNEMONIC": str(mnemonic),
                            "SERVICE": str(service_name),
                            "PAYMENT_SERVER_IP":
                                "http://%s:%s" % (self.machine_host, self.machine_config["server_port"])
                        },
                        'volumes': [
                            Two1Composer.DB_DIR + ":/usr/src/db/"
                        ],
                        'logging': {
                            'driver': 'json-file'
                        },
                        'cap_drop': [
                            'ALL'
                        ],
                        'cap_add': [
                            'DAC_OVERRIDE',
                            'NET_RAW',
                        ],
                    }
                    link_str = '%s:%s' % (service_name, service_name)
                    if link_str not in docker_compose_yaml['services']['router']['links']:
                        docker_compose_yaml['services']['router']['links'].append(link_str)
            except:
                # something went wrong while configuring service_name
                failed_to_start_hook(service_name)
            else:
                # attempt to build service_name
                self._start_sell_service(service_name, failed_to_start_hook, started_hook, failed_to_up_hook, up_hook)

        self._restart_sell_service('router', failed_to_start_hook, started_hook, failed_to_restart_hook, restarted_hook,
                                   failed_to_up_hook, up_hook)

    def _start_sell_service(self, service_name, failed_to_start_hook, started_hook, failed_to_up_hook, up_hook,
                            timeout=Two1Composer.SERVICE_START_TIMEOUT):
        try:
            subprocess.check_output(["docker-compose", "-f", Two1Composer.COMPOSE_FILE, "up", "-d", service_name],
                                    stderr=subprocess.DEVNULL, env=self.machine_env)
        except subprocess.CalledProcessError:
            failed_to_start_hook(service_name)
        else:
            started_hook(service_name)
            if service_name == 'router':
                time.sleep(5)
            elif service_name != 'router' and service_name != 'base':
                start = time.clock()

                exec_id = self.docker_client.exec_create('sell_router', "curl %s:5000" % service_name)['Id']
                self.docker_client.exec_start(exec_id)
                running = True

                while time.clock() - start < timeout and running is True:
                    running = self.docker_client.exec_inspect(exec_id)['Running']

                if running is True:
                    failed_to_up_hook(service_name)
                else:
                    up_hook(service_name)

    def _restart_sell_service(self, service_name, failed_to_start_hook, started_hook, failed_to_restart_hook,
                              restarted_hook, failed_to_up_hook, up_hook):
        try:
            self.docker_client.stop("sell_%s" % service_name)
        except:
            is_restart = False
        else:
            is_restart = True

        self._start_sell_service(service_name, failed_to_restart_hook if is_restart else failed_to_start_hook,
                                 restarted_hook if is_restart else started_hook, failed_to_up_hook, up_hook)

    def stop_services(self, service_names,
                      service_found_stopped_and_removed_hook,
                      service_failed_to_stop_hook,
                      service_failed_to_be_removed_hook,
                      service_not_found_hook):
        """ Stop selected services and remove containers.

        Args:
            service_names (set): Set of services to stop.
            service_found_stopped_and_removed_hook (Callable): A callable hook that takes in a service name and is run
                                                               when said service is found, stopped, and removed.
            service_failed_to_stop_hook (Callable): A callable hook that takes in a service name and is run when said
                                                    service fails to be stopped.
            service_failed_to_be_removed_hook (Callable): A callable hook that takes in a service name and is run when
                                                          said service fails to be removed.
            service_not_found_hook (Callable): A callable hook that takes in a service name and is run when said service
                                               isn't found.

        """
        for service_name in service_names:
            if service_name in self.get_running_services():
                container_name = self.service_name_2_container_name(service_name)
                try:
                    self.docker_client.stop(container_name)
                except:
                    service_failed_to_stop_hook(service_name)
                else:
                    try:
                        self.docker_client.remove_container(container_name)
                    except:
                        service_failed_to_be_removed_hook(service_name)
                    else:
                        service_found_stopped_and_removed_hook(service_name)
            else:
                service_not_found_hook(service_name)

    def silently_force_stop_all_services(self):
        running_container_names = self.docker_client.containers(filters={"status": "running"})
        for container_name in running_container_names:
            self.docker_client.remove_container(container_name, force=True)

    @staticmethod
    def container_names_2_service_names(container_definitions):
        """ Return service names from container definitions.

        See service_name_2_container_name for the inverse operation but on one service name.

        Args:
            container_definitions (list): List of container descriptions as returned by self.docker_client.containers.

        Returns:
            set: Set of service names generated by removing the 'sell_' prefix from the containers' names.
        """
        return set([container_definition['Names'][0][6:] for container_definition in container_definitions])

    @staticmethod
    def service_name_2_container_name(service_name):
        """ Generates a container name from a service name by prepending 'sell_'
        """
        return 'sell_%s' % service_name

    def status_services(self, services):
        """ Gets running status of specified services.

        Args:
            services (list): List of services to get status for.
        """

        existent_services = self.get_services(all=True)
        running_services = self.get_services(filters={"status": "running"})
        exited_services = self.get_services(filters={"status": "exited"})

        return {
            "running": running_services & services,
            "exited": exited_services & services,
            "nonexistent": services - existent_services
        }

    def get_services(self, *args, **kwargs):
        """ Call docker_client.containers | convert resulting container names to service names | remove base services
        """
        return self.container_names_2_service_names(
            self.docker_client.containers(*args, **kwargs)
        ) - Two1Composer.BASE_SERVICES

    def get_running_services(self):
        """ Get list of currently running services that aren't 21 base services.

        Returns:
            set: Set of currently running services.
        """
        return self.get_services(filters={"status": "running"})

    def status_router(self, service_running_hook, service_unknown_state_hook):
        """ Get status of Nginx router container.

        Args:
            service_running_hook (Callable): A callable hook that takes in a service name and is run when said service
                                             is running.
            service_unknown_state_hook (Callable): A callable hook that takes in a service name and is run when said
                                                   service is in an unknown state.
        """
        if len(self.docker_client.containers(all=True, filters={"name": "sell_router", "status": "running"})) == 1:
            service_running_hook("router")
        else:
            service_unknown_state_hook("router")

    def status_payments_server(self, service_running_hook, service_unknown_state_hook):
        """ Get status of payment channels server.

        Args:
            service_running_hook (Callable): A callable hook that takes in a service name and is run when said service
                                             is running.
            service_unknown_state_hook (Callable): A callable hook that takes in a service name and is run when said
                                                   service is in an unknown state.
        """
        if len(self.docker_client.containers(all=True, filters={"name": "sell_payments", "status": "running"})) == 1:
            service_running_hook("payments")
        else:
            service_unknown_state_hook("payments")

    @staticmethod
    def _create_base_server(server_port):
        """ Create nginx base server config.

        Args:
            server_port (int): port for 21 sell server.
        """
        try:
            # create nginx router dirs
            shutil.rmtree(Two1Composer.SITES_ENABLED_PATH, ignore_errors=True)
            shutil.rmtree(Two1Composer.SITES_AVAILABLE_PATH, ignore_errors=True)
            os.makedirs(Two1Composer.SITES_ENABLED_PATH, exist_ok=True)
            os.makedirs(Two1Composer.SITES_AVAILABLE_PATH, exist_ok=True)

            # create base nginx server
            with open(os.path.join(Two1Composer.SITES_ENABLED_PATH,
                                   "two1baseserver"), 'w') as f:
                f.write("server {\n"
                        "    listen " + str(server_port) + ";\n"
                        "    include /etc/nginx/sites-available/*;\n"
                        "}\n"
                        )
        except Exception:
            raise exceptions.Two1ComposerServiceDefinitionException()

    @staticmethod
    def _create_service_route(service):
        """ Create route for container service.
        """
        os.makedirs(Two1Composer.SITES_AVAILABLE_PATH, exist_ok=True)
        try:
            with open(os.path.join(Two1Composer.SITES_AVAILABLE_PATH, service), 'w') as f:
                f.write("location /" + service + " {\n"
                        "    rewrite ^/" + service + "/?(.*) /$1 break;\n"
                        "    proxy_pass http://" + service + ":" + str(5000) + ";\n"
                        "    proxy_set_header Host $host;\n"
                        "    proxy_set_header X-Real-IP $remote_addr;\n"
                        "    proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\n"
                        "}\n")
        except Exception:
            raise exceptions.Two1ComposerRouteException()

    @staticmethod
    def _create_payments_route():
        """ Add route to payments server.
        """
        os.makedirs(Two1Composer.SITES_AVAILABLE_PATH, exist_ok=True)
        try:
            # write nginx route for payments server
            with open(os.path.join(Two1Composer.SITES_AVAILABLE_PATH, "payments"), 'w') as f:
                f.write("location /payment {\n"
                        "    proxy_pass http://payments:" + str(5000) + ";\n"
                        "    proxy_set_header Host $host;\n"
                        "    proxy_set_header X-Real-IP $remote_addr;\n"
                        "    proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\n"
                        "}\n")
        except Exception:
            raise exceptions.Two1ComposerRouteException()

    def publish_service(self, service_name, host_override, rest_client, published_hook,
                        already_published_hook, failed_to_publish_hook,
                        unknown_publish_error_hook):
        strm, stat = self.docker_client.get_archive('sell_%s' % service_name,
                                                    '/usr/src/app/manifest.yaml')

        with tarfile.open(fileobj=BytesIO(strm.read()), mode='r') as tf:
            manifest = yaml.load(tf.extractfile(stat[u'name']).read().decode())
        manifest['host'] = host_override

        try:
            resp = rest_client.publish({"manifest": manifest,
                                        "marketplace": "21mkt"})
        except ServerRequestError as e:
            if e.status_code == 403 and e.data.get("error") == "TO600":
                already_published_hook(service_name)
            else:
                failed_to_publish_hook(service_name)
        except:
            unknown_publish_error_hook(service_name)
        else:
            if resp.status_code == 201:
                published_hook(service_name)
            else:
                failed_to_publish_hook(service_name)

    def read_server_config(self):
        try:
            with open(Two1Composer.COMPOSE_FILE) as f:
                return yaml.load(f)

        except FileNotFoundError:
            return {}

    def get_services_mnemonic(self):
        if os.path.isfile(Two1Composer.COMPOSE_FILE):
            with self.ComposerYAMLContext() as composer_yaml:
                try:
                    maybe_mnemonic = composer_yaml['services']['payments']['environment']['TWO1_WALLET_MNEMONIC']
                except KeyError:
                    rv = None
                else:
                    rv = maybe_mnemonic
        else:
            rv = None
        return rv
Exemplo n.º 6
0
class AXDockerClient(object):
    def __init__(self, url=DEFAULT_DOCKER_SOCK):
        self._conn = Client(base_url=url)
        self._retry = AXRetry(retry_exception=(Exception, ),
                              success_check=lambda x: x,
                              default=False,
                              success_default=True)

    @property
    def version(self):
        """Cached version information"""
        if hasattr(self, '_version'):
            return self._version
        self._version = self._conn.version()
        return self._version

    @property
    def version_tuple(self):
        """Version tuple of docker daemon (e.g. (1, 11, 2))"""
        return tuple([int(i) for i in self.version['Version'].split('.')])

    # Public APIs
    def start(self, registry, image, tag="latest", **kwargs):
        """
        Start a new container described by image.

        :param registry: The registry to use
        :type registry: DockerRegistry
        :param image: Full image name for this container.
        :param kwargs: Other args passed to container start.
        :return:
        """
        assert "tag" not in kwargs
        assert registry is not None and "Cannot start a container without providing a DockerRegistry"

        if not self._pull_with_caching(registry, image, tag):
            return None

        full_image = registry.servername + "/" + image + ":" + tag
        container = self._create(full_image, **kwargs)
        if container is None:
            return None

        started = self._start(container)
        if started:
            return container
        else:
            self._remove(container["Id"])
            return None

    def stop(self, container, **kwargs):
        """
        Stop sepcified container. Wrapper for docker API and handle exception.
        :param container: (string) Id or name of container.
        :param kwargs: Pass through kwargs for docker. Currently using only timeout.
        """
        if "timeout" not in kwargs:
            kwargs["timeout"] = 1
        self._stop(container, **kwargs)

    def remove(self, container, **kwargs):
        """
        Remove a container.
        :param container: (string) Id or name of container.
        :param kwargs: Pass through for docker.
        :return:
        """
        self._remove(container, **kwargs)

    def run(self, image, cmd, timeout=1200, **kwargs):
        """
        Run a command inside a container and check result.

        Container image is automatically pulled.
        Container will be stopped and removed after comamnd.

        :param image: Container image to run.
        :param cmd: Command inside container. This overwrites docker "command" in kwargs
        :param timeout: Timeout to wait for container.
        :param kwargs: Dict for parameters. It includes AX parameters and pass through ones.
                       All AX parameters start with "ax_" and will be removed before passing to docker create.
                       Currently supported AX parameters:
                         - ax_net_host: Set network mode to "host"
                         - ax_privileged: Run container in privileged mode
        :return: Tuple of: (True/False, return code)
        """
        assert "tag" not in kwargs
        logger.debug("Run %s inside %s on host %s, kwargs %s", cmd, image,
                     self._host, kwargs)

        # Always overwrite command in kwargs.
        kwargs["command"] = cmd

        started = False
        container = {}
        rc = -1
        try:
            container = self._create(image, **kwargs)
            assert container, "Failed to create from %s, %s" % (image, kwargs)

            started = self._start(container)
            assert started, "Failed to start %s, %s" % (image, container)

            rc = self._conn.wait(container, timeout=timeout)
            assert rc == 0, "Command %s failed rc=%s" % (cmd, rc)

        except Exception:
            logger.exception("Failed to run %s in %s on %s", cmd, image,
                             self._host)
            return False, rc

        finally:
            if started:
                self._stop(container["Id"], timeout=1)
            if container:
                self._remove(container["Id"])
        logger.debug("Completed run %s inside %s on %s, rc=%s", cmd, image,
                     self._host, rc)
        return True, rc

    def cache_image(self, registry, name, tag="latest"):
        """
        Cache the image to local registry

        :param registry: The registry to use
        :type registry: DockerRegistry
        :param name: name of repo
        :param tag: repo tag
        """
        fetcher = DockerImageFetcher()
        full_image = registry.servername + "/" + name + ":" + tag
        return fetcher.single_executor(full_image, self._pull_with_caching,
                                       registry, name, tag)

    def get_container_uuid(self, name):
        """
        Get UUID for a container.
        """
        try:
            info = self._conn.inspect_container(name)
        except Exception:
            info = {}
        return info.get("Id", None)

    def get_container_version(self, name):
        """
        Get image namespace and version for a running container

        Sample return:
        [
            "docker.example.com/lcj/axagent:latest",
            "docker.local/lcj/axagent:latest"
        ]
        """
        try:
            info = self._conn.inspect_container(name)
        except NotFound:
            return []
        image = info["Image"].split(":")[1]
        info = self._conn.inspect_image(image)
        return info["RepoTags"]

    def exec_cmd(self, container_id, cmd, **kwargs):
        """Executes a command inside a running container and returns its output on completion

        :param container_id: container id
        :param cmd: command to execute
        :return: output from the command
        """
        logger.debug("Executing %s in container %s (kwargs: %s)", cmd,
                     container_id, kwargs)
        try:
            exec_id = self._conn.exec_create(container_id, cmd, **kwargs)
            response = self._conn.exec_start(exec_id)
            return response
        # Docker API can actually return either error at different time.
        except NotFound:
            logger.debug("Container %s not exist on host %s", container_id,
                         self._host)
        except APIError as e:
            if "not running" in str(e):
                logger.debug("Container %s not running on host %s",
                             container_id, self._host)
            else:
                raise

    def exec_kill(self, pid, exec_id=None, container_id=None, signal=None):
        """
        Kill a pid in a container. Optionally checks if exec session is still valid before killing.

        :param pid: pid to kill in the container.
        :param exec_id: perform kill only if exec id is still running.
        :param container_id: perform kill only if exec id is still running.
        :param signal: kill signal to send to process
        """
        if not any([exec_id, container_id]):
            raise ValueError("exec_id or container_id must be supplied")
        pid = int(pid)
        assert pid != -1, "Killing all processes prohibited"
        if exec_id is not None:
            if isinstance(exec_id, dict):
                exec_id = exec_id['Id']
            try:
                exec_info = self._conn.exec_inspect(exec_id)
            except APIError as e:
                logger.warn(
                    "Failed to inspect exec session {} for killing. Skipping kill: {}"
                    .format(exec_id, str(e)))
                return
            if container_id:
                if container_id not in exec_info['ContainerID']:
                    raise ValueError(
                        "Supplied container id {} mismatched with exec container id: {}"
                        .format(container_id, exec_info['ContainerID']))
            else:
                container_id = exec_info['ContainerID']
            if not exec_info['Running']:
                logger.debug(
                    "Exec session {} no longer running. Skipping kill".format(
                        exec_id))
                return
        # perform kill
        kill_cmd_args = ['kill']
        if signal:
            kill_cmd_args.append('-{}'.format(signal))
        kill_cmd_args.append(str(pid))
        kill_cmd = ' '.join(kill_cmd_args)
        response = self.exec_cmd(container_id,
                                 'sh -c "{} 2>&1; echo $?"'.format(kill_cmd))
        lines = response.splitlines()
        rc = int(lines[-1])
        if rc != 0:
            reason = lines[0] if len(lines) > 1 else "reason unknown"
            logger.warn("Failed to kill pid {} in container {}: {}".format(
                pid, container_id, reason))
        else:
            logger.debug("Successfully killed pid {} in container {}".format(
                pid, container_id))

    def containers(self, **kwargs):
        return self._conn.containers(**kwargs)

    def stats(self, name, **kwargs):
        return self._conn.stats(name, **kwargs)

    def clean_graph(self, age=86400):
        """
        Clean graph storage to remove old containers and any unreferenced docker image layers
        """
        # Exit time is in free form string. Parse it. And real coarse time granularity.
        pattern = ["month ago", "months ago", "year ago", "years ago"]
        if age >= SECONDS_PER_MINUTE:
            pattern += ["minutes ago", "minute ago"]
        if age >= SECONDS_PER_HOUR:
            pattern += ["hours ago", "hour ago"]
        if age >= SECONDS_PER_DAY:
            pattern += ["days ago", "day ago"]
        if age >= SECONDS_PER_WEEK:
            pattern += ["weeks ago", "week ago"]

        for c in self._conn.containers(filters={"status": "exited"}):
            if any([p in c["Status"] for p in pattern]):
                try:
                    self._remove(c)
                except Exception:
                    logger.exception("Failed to remove %s", c["Id"])

        for i in self._conn.images():
            if i["RepoTags"][0] == "<none>:<none>" and time.time(
            ) > i["Created"] + age:
                try:
                    self._conn.remove_image(i["Id"])
                except Exception:
                    # This is probably OK.
                    logger.debug("Failed to delete %s", i["Id"])

    def search(self, searchstr=None):
        if searchstr is None or searchstr == "":
            raise AXPlatformException(
                "Docker hub search string needs to a non-empty string")
        response = self._conn.search(searchstr)
        return [{
            "ctime": "",
            "repo": x['name'],
            "tag": "latest"
        } for x in response or []]

    def login(self, registry, username, password):
        """
        Returns a base64 encoded token of username and password
        only if login is successful else it raises exceptions
        """
        try:
            self._conn.login(username,
                             password=password,
                             registry=registry,
                             reauth=True)
        except APIError as e:
            code = e.response.status_code
            if code == 401:
                # on login failure it raises a docker.errors.APIError:
                # 401 Client Error: Unauthorized
                raise AXUnauthorizedException(e.explanation)
            elif code == 404:
                raise AXNotFoundException(e.explanation)
            elif code == 500:
                if "x509: certificate signed by unknown authority" in e.response.text:
                    raise AXIllegalArgumentException(
                        "Certificate signed by unknown authority for {}".
                        format(registry))
                else:
                    raise e
            else:
                raise e
        token = base64.b64encode("{}:{}".format(username, password))
        return token

    @staticmethod
    def generate_kubernetes_image_secret(registry, token):
        """
        Create the image pull secret by concatenating the secrets required
        for the passed token
        Args:
            registry: string
            token: base64 encoded

        Returns:
            base64 encoded string used for imagepull secrets
        """
        ret = {"auths": {registry: {"auth": token}}}
        return base64.b64encode(json.dumps(ret))

    # Internal implementations
    def _pull_with_caching(self, registry, name, tag, **kwargs):
        """
        Pull a new container with AX caching enabled.
        :param registry: DockerRegistry instance.
        :param name: Container short name.
        :param tag: Tag.
        :param kwargs: Other kwargs for pull.
                       Docker API requires tag to be in kwargs.
                       AX needs to process it and enforce tag to be separate.
        :return: True or False
        """
        assert "tag" not in kwargs, "%s" % kwargs

        if registry.user is not None and registry.passwd is not None:
            kwargs["auth_config"] = kwargs.get("auth_config", {
                "username": registry.user,
                "password": registry.passwd
            })

        return self._pull_with_retry(registry.servername, name, tag, **kwargs)

    def _pull_with_retry(self, registry, name, tag, **kwargs):
        return ax_retry(self._pull, self._retry, registry, name, tag, **kwargs)

    def _pull(self, registry, name, tag, **kwargs):
        """
        Do pull. Call docker API and check errors.
        :param registry: Registry host name.
        :param name: Container short name.
        :param tag: Tag.
        :param kwargs: Other pull args.
        :return: True or False.
        """
        # All must be set not empty.
        assert all([registry, name, tag]), "%s, %s, %s" % (registry, name, tag)

        repo = DockerImage(registry=registry, name=name).docker_repo()
        kwargs["tag"] = tag
        try:
            ret = self._conn.pull(repo, stream=True, **kwargs)
        except Exception:
            logger.exception("Failed to pull %s, %s", repo, tag)
            return False

        logger.info("Pull image %s:%s starting", repo, tag)
        # Search pull result to determine status. Must have digest and success message.
        has_digest = False
        has_image = False
        try:
            for l in ret:
                try:
                    progress = json.loads(l)
                    if progress["status"].startswith("Digest:"):
                        has_digest = True
                    if "Image is up to date" in progress[
                            "status"] or "Downloaded newer image" in progress[
                                "status"]:
                        has_image = True
                except (KeyError, ValueError):
                    logger.debug("Failed to parse pull progress line %s", l)
        except Exception:
            logger.exception("Failed to pull %s:%s", repo, tag)
            return False
        logger.info("Pull image %s:%s result %s %s", repo, tag, has_digest,
                    has_image)
        return has_digest and has_image

    def _push_with_retry(self, registry, name, tag):
        return ax_retry(self._push, self._retry, registry, name, tag)

    def _push(self, registry, name, tag):
        """
        Do push. Call docker API and check errors.
        :param registry: Registry host name.
        :param name: Container short name.
        :param tag: Tag.
        :return: True or False.
        """
        # All must be set not empty.
        assert all([registry, name, tag]), "%s, %s, %s" % (registry, name, tag)

        repo = DockerImage(registry=registry, name=name).docker_repo()
        try:
            ret = self._conn.push(repo, tag, stream=True)
        except Exception:
            logger.exception("Failed to push %s, %s", repo, tag)
            return False

        logger.info("Push image %s:%s starting", repo, tag)
        # Search push result to determine status. Must have digest.
        has_digest = False
        try:
            for l in ret:
                try:
                    progress = json.loads(l)
                    has_digest = progress["status"].startswith("%s: digest:" %
                                                               tag)
                except (KeyError, ValueError):
                    logger.debug("Failed to parse push progress line %s", l)
        except Exception:
            logger.exception("Failed to push %s:%s", repo, tag)
            return False
        logger.info("Push image %s:%s result %s", repo, tag, has_digest)
        return has_digest

    def _create(self, image, **kwargs):
        """
        Create a new container.

        :param image: (string) Container image with tag
        :param kwargs: AX and docker parameters.
        :return: container or None
        """
        # Docker API has two levels of dict. Top level specify mostly "create" configs.
        # One key at first level is "host_config". This defines second level "run" configs.
        # It's yet another dict. It was specified in docker run API and moved here.
        # We need to set both levels correctly.
        self._validate_config(kwargs)

        kwargs = self._parse_ax_create_config(kwargs)
        kwargs = self._parse_ax_host_config(kwargs)
        kwargs = self._remove_ax_config(kwargs)
        logger.debug("Final kwargs for create %s: %s", image, kwargs)
        try:
            return self._conn.create_container(image, **kwargs)
        except Exception:
            logger.exception("Failed to create container from %s %s", image,
                             kwargs)
            return None

    def _start(self, container):
        try:
            self._conn.start(container)
            return True
        except Exception:
            logger.exception("Failed to start container %s", container)
            return False

    def _stop(self, container, **kwargs):
        try:
            self._conn.stop(container, **kwargs)
        except NotFound:
            pass
        except Exception:
            logger.exception("Failed to stop %s", container)

    def _remove(self, container, **kwargs):
        try:
            self._conn.remove_container(container, v=True, **kwargs)
        except NotFound:
            pass
        except APIError as e:
            if "Conflict" in str(e):
                logger.error("Not removing running container %s", container)
            elif "device or resource busy" in str(e):
                # Work around https://github.com/google/cadvisor/issues/771
                logger.error("Container removal temporary failure. Retrying.")
                retry = AXRetry(retries=10,
                                delay=1,
                                retry_exception=(Exception, ),
                                success_exception=(NotFound, ))
                ax_retry(self._conn.remove_container,
                         retry,
                         container,
                         v=True,
                         force=True)
            else:
                logger.exception("Failed to remove container %s", container)
        except Exception:
            logger.exception("Failed to remove container %s", container)

    def _validate_config(self, config):
        if "volumes" in config:
            assert isinstance(
                config["volumes"],
                list), "Support only list of volumes %s" % config["volumes"]
        if "host_config" in config and "Binds" in config["host_config"]:
            assert isinstance(
                config["host"]["Binds"], list
            ), "Support only list of volumes %s" % config["host"]["Binds"]
        if "ports" in config:
            assert isinstance(
                config["ports"],
                list), "Support only list of ports %s" % config["ports"]
        if "host_config" in config and "port_bindings" in config["host_config"]:
            assert isinstance(
                config["host"]["PortBindings"],
                dict), "Support only dict of port_bindings %s" % config[
                    "host"]["PortBindings"]
        if "environment" in config:
            assert isinstance(
                config["environment"], list
            ), "Support only list of environments %s" % config["environment"]

    def _parse_ax_create_config(self, config):
        if config.get("ax_daemon", False):
            config["detach"] = True

        if "ax_volumes" in config:
            axv = [v.split(":")[1] for v in config["ax_volumes"]]
            if "volumes" in config:
                assert isinstance(config["volumes"],
                                  list), "must be list {}".format(
                                      config["volumes"])
                config["volumes"] += axv
            else:
                config["volumes"] = axv

        if "ax_ports" in config:
            config["ports"] = config["ax_ports"].keys()

        return config

    def _parse_ax_host_config(self, config):
        ax_config = {}
        if config.get("ax_net_host", False):
            ax_config["network_mode"] = "host"

        if config.get("ax_privileged", False):
            ax_config["privileged"] = True

        if config.get("ax_host_namespace", False):
            ax_config["pid_mode"] = "host"

        if config.get("ax_daemon", False):
            ax_config["restart_policy"] = {
                "MaximumRetryCount": 0,
                "Name": "unless-stopped"
            }

        if "ax_volumes" in config:
            if "binds" in ax_config:
                assert isinstance(ax_config["binds"],
                                  list), "must be list {}".format(
                                      ax_config["binds"])
                ax_config["binds"] += config["ax_volumes"]
            else:
                ax_config["binds"] = config["ax_volumes"]

        if "ax_ports" in config:
            ax_config["port_bindings"] = config["ax_ports"]

        ax_host_config = self._conn.create_host_config(**ax_config)
        if "host_config" in config:
            config["host_config"].update(ax_host_config)
        else:
            config["host_config"] = ax_host_config
        return config

    def _remove_ax_config(self, config):
        for key in config.keys():
            if key.startswith("ax_"):
                del config[key]
        return config
Exemplo n.º 7
0
class Two1ComposerContainers(Two1Composer):
    """ Manage machine-payable microservices in containers.
    """

    def __init__(self):
        self._connected = ComposerState.DISCONNECTED
        self.provider = TwentyOneProvider()
        self.default_wallet = Two1Wallet(self.wallet_file, self.provider)

    # public api
    def connect(self, machine_env, host, machine_config_file):
        """ Connect service composer to machine layer.

        Args:
            machine_env (dict): Environment dictionary for the docker client of the machine layer
            host: Hostname of the machine layer docker daemon
            machine_config_file (str): Path to the config file for the machine layer
        """
        self.machine_env = machine_env
        self.machine_host = host
        with open(machine_config_file, 'r') as f:
            self.machine_config = json.load(f)
        self.docker_client = Client(**docker_env(assert_hostname=False,
                                                 environment=self.machine_env))
        self._connected = ComposerState.CONNECTED

    def initialize_server(self, username, password, server_port, wallet=None):
        """ Initialize micropayments server.

        Define boilerplate services, networks, and volumes composer file
        and nginx server config.

        Generates a wallet mnemonic if non-existent.

        Args:
            username (str): Username to log in with
            password (str): Password to log in with
            server_port (int): The server port that the router is running on
            wallet: The wallet to use for the payments server and subsequent services
        """
        self._create_base_server(server_port)  # create base router server config
        self._create_payments_route()  # create route to payments server

        new_wallet = None  # rv[1], not None if mnemonic is replaced in this function

        # generate service description (yaml)
        with self.ComposerYAMLContext(username, password, server_port) as composer_yaml:
            try:
                mnemonic = composer_yaml['services']['payments']['environment']['TWO1_WALLET_MNEMONIC']
                if not mnemonic or mnemonic == str(None):  # if mnemonic is Falsy or uninitialized
                    raise ValueError()
            except (KeyError, ValueError):  # catches if mnemonic is Falsy or doesn't exist in dict tree
                new_machine_wallet = self.default_wallet.create(self.provider)[1]
                composer_yaml['services']['payments']['environment']['TWO1_WALLET_MNEMONIC'] = new_machine_wallet
                new_wallet = new_machine_wallet

        return 0, new_wallet

    def list_services(self):
        """ List available services to sell.
        """
        service_image_data = requests.get(os.path.join(
            Two1Composer.DOCKERHUB_API_URL, Two1Composer.DOCKERHUB_REPO, 'tags')).json().get('results')
        valid_service_names = set([image_data['name'].split('service-')[1] for image_data in
                                   service_image_data if re.match(r'^service-', image_data['name'])])
        return list(valid_service_names)

    def pull_latest_images(self, images):
        """ Pull latest images from 21 DockerHub.

        Args:
            images (list): List of images to pull from the 21 DockerHub.
        """
        for image_tag in images:
            self.docker_client.pull(Two1Composer.DOCKERHUB_REPO, image_tag, stream=False)
        return 0

    def start_services(self, services, failed_to_start_hook, started_hook, failed_to_restart_hook, restarted_hook,
                       failed_to_up_hook, up_hook):
        """ Start selected services.

        Args:
            services (list): List of services to start.
            failed_to_start_hook (Callable): A callable hook that takes in a service name and is run when said service
                                             fails to start.
            started_hook (Callable): A callable hook that takes in a service name and is run when said service starts.
            failed_to_restart_hook (Callable): A callable hook that takes in a service name and is run when said service
                                               fails to restart.
            restarted_hook (Callable): A callable hook that takes in a service name and is run when said service
                                       restarts.
            failed_to_up_hook (Callable): A callable hook that takes in a service name and is run when said service
                                          fails to go up.
            up_hook (Callable): A callable hook that takes in a service name and is run when said service goes up.

        Returns:
            dict: Dictionary with service as key and value as dictionary.
                  Inner dictionary has format {"started": bool, "message": str, "order": int}.

        Raises:

        """
        self._start_sell_service('base', failed_to_start_hook, started_hook, failed_to_up_hook, up_hook)
        self._start_sell_service('router', failed_to_start_hook, started_hook, failed_to_up_hook, up_hook)
        self._start_sell_service('payments', failed_to_start_hook, started_hook, failed_to_up_hook, up_hook)

        self._restart_sell_service('router', failed_to_start_hook, started_hook, failed_to_restart_hook, restarted_hook,
                                   failed_to_up_hook, up_hook)

        # Attempt to start all market services
        for service_name in services:
            # create nginx routes for service_name
            self._create_service_route(service_name)
            # add service_name to docker compose file
            with self.ComposerYAMLContext() as docker_compose_yaml:
                username = docker_compose_yaml['services']['payments']['environment']['TWO1_USERNAME']
                password = docker_compose_yaml['services']['payments']['environment']['TWO1_PASSWORD']
                mnemonic = docker_compose_yaml['services']['payments']['environment']['TWO1_WALLET_MNEMONIC']
                docker_compose_yaml['services'][service_name] = {
                    'image': '%s:%s' % (Two1Composer.DOCKERHUB_REPO, 'service-' + service_name),
                    'container_name': 'sell_%s' % service_name,
                    'depends_on': ['base'],
                    'restart': 'always',
                    'environment': {
                        "TWO1_USERNAME": str(username),
                        "TWO1_PASSWORD": str(password),
                        "TWO1_WALLET_MNEMONIC": str(mnemonic),
                        "SERVICE": str(service_name),
                        "PAYMENT_SERVER_IP": "http://%s:%s" % (self.machine_host, self.machine_config["server_port"])
                    },
                    'volumes': [
                        Two1Composer.DB_DIR + ":/usr/src/db/"
                    ],
                    'logging': {
                        'driver': 'json-file'
                    },
                    'cap_drop': [
                        'ALL'
                    ],
                    'cap_add': [
                        'DAC_OVERRIDE',
                        'NET_RAW',
                    ],
                }
                link_str = '%s:%s' % (service_name, service_name)
                if link_str not in docker_compose_yaml['services']['router']['links']:
                    docker_compose_yaml['services']['router']['links'].append(link_str)

            # attempt to build service_name
            self._start_sell_service(service_name, failed_to_start_hook, started_hook, failed_to_up_hook, up_hook)

        self._restart_sell_service('router', failed_to_start_hook, started_hook, failed_to_restart_hook, restarted_hook,
                                   failed_to_up_hook, up_hook)

    def _start_sell_service(self, service_name, failed_to_start_hook, started_hook, failed_to_up_hook, up_hook,
                            timeout=Two1Composer.SERVICE_START_TIMEOUT):
        try:
            subprocess.check_output(["docker-compose", "-f", Two1Composer.COMPOSE_FILE, "up", "-d", service_name],
                                    stderr=subprocess.DEVNULL, env=self.machine_env)
        except subprocess.CalledProcessError:
            failed_to_start_hook(service_name)
        else:
            started_hook(service_name)
            if service_name == 'router':
                time.sleep(5)
            elif service_name != 'router' and service_name != 'base':
                start = time.clock()

                exec_id = self.docker_client.exec_create('sell_router', "curl %s:5000" % service_name)['Id']
                self.docker_client.exec_start(exec_id)
                running = True

                while time.clock() - start < timeout and running is True:
                    running = self.docker_client.exec_inspect(exec_id)['Running']

                if running is True:
                    failed_to_up_hook(service_name)
                else:
                    up_hook(service_name)

    def _restart_sell_service(self, service_name, failed_to_start_hook, started_hook, failed_to_restart_hook,
                              restarted_hook, failed_to_up_hook, up_hook):
        try:
            self.docker_client.stop("sell_%s" % service_name)
        except:
            is_restart = False
        else:
            is_restart = True

        self._start_sell_service(service_name, failed_to_restart_hook if is_restart else failed_to_start_hook,
                                 restarted_hook if is_restart else started_hook, failed_to_up_hook, up_hook)

    def stop_services(self, services,
                      service_found_stopped_and_removed_hook,
                      service_failed_to_stop_hook,
                      service_failed_to_be_removed_hook,
                      service_not_found_hook):
        """ Stop selected services and remove containers.

        Args:
            services (list): List of services to stop.
            service_found_stopped_and_removed_hook (Callable): A callable hook that takes in a service name and is run
                                                               when said service is found, stopped, and removed.
            service_failed_to_stop_hook (Callable): A callable hook that takes in a service name and is run when said
                                                    service fails to be stopped.
            service_failed_to_be_removed_hook (Callable): A callable hook that takes in a service name and is run when
                                                          said service fails to be removed.
            service_not_found_hook (Callable): A callable hook that takes in a service name and is run when said service
                                               isn't found.

        """
        running_container_names = self.docker_client.containers(filters={"status": "running"})
        for container_name in running_container_names:
            running_service_name = list(self.names_from_containers([container_name]))[0]
            if running_service_name in services:
                try:
                    self.docker_client.stop(container_name)
                except:
                    service_failed_to_stop_hook(running_service_name)
                else:  # container stopped
                    try:
                        self.docker_client.remove_container(container_name)
                    except:
                        service_failed_to_be_removed_hook(running_service_name)
                    else:  # container
                        service_found_stopped_and_removed_hook(running_service_name)

    def silently_force_stop_all_services(self):
        running_container_names = self.docker_client.containers(filters={"status": "running"})
        for container_name in running_container_names:
            self.docker_client.remove_container(container_name, force=True)

    @staticmethod
    def names_from_containers(containers):
        """ Return names from containers.

        Args:
            containers (list): List of containers as returned by self.docker_client.containers
        """
        return frozenset([service['Names'][0][6:] for service in containers])

    def status_services(self, services,
                        service_nonexistent_hook,
                        service_running_hook,
                        service_exited_hook,
                        service_unknown_state_hook):
        """ Gets running status of specified services.

        Args:
            services (list): List of services to get status for.
            service_nonexistent_hook (Callable): A callable hook that takes in a service name and is run when said
                                                 service is non-existent.
            service_running_hook (Callable): A callable hook that takes in a service name and is run when said service
                                             is running.
            service_exited_hook (Callable): A callable hook that takes in a service name and is run when said service
                                            has exited.
            service_unknown_state_hook (Callable): A callable hook that takes in a service name and is run when said
                                                   service is in an unknown state.
        Returns:
            dict: Dictionary with service as key and value as dictionary.
            Inner dictionary has format: {"status": str, "message": str}.
            "Status" choices are: Not found, Running, Exited, Unable to contact.
        """

        existent_services = self.names_from_containers(self.docker_client.containers(all=True))
        running_services = self.names_from_containers(self.docker_client.containers(filters={"status": "running"}))
        exited_services = self.names_from_containers(self.docker_client.containers(filters={"status": "exited"}))

        for service_name in services:
            if service_name in running_services:
                service_running_hook(service_name)
            elif service_name in exited_services:
                service_exited_hook(service_name)
            elif service_name in existent_services:
                service_unknown_state_hook(service_name)
            else:
                service_nonexistent_hook(service_name)

    def get_running_services(self):
        """ Get list of running services.

        Returns:
            (list) started services
        """
        return list(set(self.names_from_containers(self.docker_client.containers(
            filters={"status": "running"}))).difference(set(Two1Composer.BASE_SERVICES)))

    def status_router(self, service_running_hook, service_unknown_state_hook):
        """ Get status of Nginx router container.

        Args:
            service_running_hook (Callable): A callable hook that takes in a service name and is run when said service
                                             is running.
            service_unknown_state_hook (Callable): A callable hook that takes in a service name and is run when said
                                                   service is in an unknown state.
        """
        if len(self.docker_client.containers(all=True, filters={"name": "sell_router", "status": "running"})) == 1:
            service_running_hook("router")
        else:
            service_unknown_state_hook("router")

    def status_payments_server(self, service_running_hook, service_unknown_state_hook):
        """ Get status of payment channels server.

        Args:
            service_running_hook (Callable): A callable hook that takes in a service name and is run when said service
                                             is running.
            service_unknown_state_hook (Callable): A callable hook that takes in a service name and is run when said
                                                   service is in an unknown state.
        """
        if len(self.docker_client.containers(all=True, filters={"name": "sell_payments", "status": "running"})) == 1:
            service_running_hook("payments")
        else:
            service_unknown_state_hook("payments")

    @staticmethod
    def _create_base_server(server_port):
        """ Create nginx base server config.

        Args:
            server_port (int): port for 21 sell server.
        """
        try:
            # create nginx router dirs
            shutil.rmtree(Two1Composer.SITES_ENABLED_PATH, ignore_errors=True)
            shutil.rmtree(Two1Composer.SITES_AVAILABLE_PATH, ignore_errors=True)
            os.makedirs(Two1Composer.SITES_ENABLED_PATH, exist_ok=True)
            os.makedirs(Two1Composer.SITES_AVAILABLE_PATH, exist_ok=True)

            # create base nginx server
            with open(os.path.join(Two1Composer.SITES_ENABLED_PATH,
                                   "two1baseserver"), 'w') as f:
                f.write("server {\n"
                        "    listen " + str(server_port) + ";\n"
                        "    include /etc/nginx/sites-available/*;\n"
                        "}\n"
                        )
        except Exception:
            raise exceptions.Two1ComposerServiceDefinitionException()

    @staticmethod
    def _create_service_route(service):
        """ Create route for container service.
        """
        os.makedirs(Two1Composer.SITES_AVAILABLE_PATH, exist_ok=True)
        try:
            with open(os.path.join(Two1Composer.SITES_AVAILABLE_PATH, service), 'w') as f:
                f.write("location /" + service + " {\n"
                        "    rewrite ^/" + service + "(.*) /$1 break;\n"
                        "    proxy_pass http://" + service + ":" + str(5000) + ";\n"
                        "    proxy_set_header Host $host;\n"
                        "    proxy_set_header X-Real-IP $remote_addr;\n"
                        "    proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\n"
                        "}\n")
        except Exception:
            raise exceptions.Two1ComposerRouteException()

    @staticmethod
    def _create_payments_route():
        """ Add route to payments server.
        """
        os.makedirs(Two1Composer.SITES_AVAILABLE_PATH, exist_ok=True)
        try:
            # write nginx route for payments server
            with open(os.path.join(Two1Composer.SITES_AVAILABLE_PATH, "payments"), 'w') as f:
                f.write("location /payment {\n"
                        "    proxy_pass http://payments:" + str(5000) + ";\n"
                        "    proxy_set_header Host $host;\n"
                        "    proxy_set_header X-Real-IP $remote_addr;\n"
                        "    proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\n"
                        "}\n")
        except Exception:
            raise exceptions.Two1ComposerRouteException()

    def publish_service(self, service_name, rest_client, published_hook, already_published_hook, failed_to_publish_hook,
                        unknown_publish_error_hook):
        strm, stat = self.docker_client.get_archive('sell_%s' % service_name, '/usr/src/app/manifest.yaml')

        with tarfile.open(fileobj=BytesIO(strm.read()), mode='r') as tf:
            manifest = yaml.load(tf.extractfile(stat[u'name']).read().decode())

        try:
            resp = rest_client.publish({"manifest": manifest,
                                        "marketplace": "21mkt"})
        except ServerRequestError as e:
            if e.status_code == 403 and e.data.get("error") == "TO600":
                already_published_hook(service_name)
            else:
                failed_to_publish_hook(service_name)
        except:
            unknown_publish_error_hook(service_name)
        else:
            if resp.status_code == 201:
                published_hook(service_name)
            else:
                failed_to_publish_hook(service_name)

    def read_server_config(self):
        try:
            with open(Two1Composer.COMPOSE_FILE) as f:
                return yaml.load(f)

        except FileNotFoundError:
            return {}

    def get_services_mnemonic(self):
        if os.path.isfile(Two1Composer.COMPOSE_FILE):
            with self.ComposerYAMLContext() as composer_yaml:
                try:
                    maybe_mnemonic = composer_yaml['services']['payments']['environment']['TWO1_WALLET_MNEMONIC']
                except KeyError:
                    rv = None
                else:
                    rv = maybe_mnemonic
        else:
            rv = None
        return rv
Exemplo n.º 8
0
class DockerCluster(object):
    IMAGE_NAME_BASE = os.path.join('teradatalabs', 'pa_test')
    BARE_CLUSTER_TYPE = 'bare'
    """Start/stop/control/query arbitrary clusters of docker containers.

    This class is aimed at product test writers to create docker containers
    for testing purposes.

    """
    def __init__(self, master_host, slave_hosts, local_mount_dir,
                 docker_mount_dir):
        # see PyDoc for all_internal_hosts() for an explanation on the
        # difference between an internal and regular host
        self.internal_master = master_host
        self.internal_slaves = slave_hosts
        self.master = master_host + '-' + str(uuid.uuid4())
        self.slaves = [
            slave + '-' + str(uuid.uuid4()) for slave in slave_hosts
        ]
        # the root path for all local mount points; to get a particular
        # container mount point call get_local_mount_dir()
        self.local_mount_dir = local_mount_dir
        self.mount_dir = docker_mount_dir

        kwargs = kwargs_from_env()
        if 'tls' in kwargs:
            kwargs['tls'].assert_hostname = False
        kwargs['timeout'] = 240
        self.client = Client(**kwargs)

        self._DOCKER_START_TIMEOUT = 30
        DockerCluster.__check_if_docker_exists()

    def all_hosts(self):
        return self.slaves + [self.master]

    def get_master(self):
        return self.master

    def all_internal_hosts(self):
        """The difference between this method and all_hosts() is that
        all_hosts() returns the unique, "outside facing" hostnames that
        docker uses. On the other hand all_internal_hosts() returns the
        more human readable host aliases for the containers used internally
        between containers. For example the unique master host will
        look something like 'master-07d1774e-72d7-45da-bf84-081cfaa5da9a',
        whereas the internal master host will be 'master'.

        Returns:
            List of all internal hosts with the random suffix stripped out.
        """
        return [host.split('-')[0] for host in self.all_hosts()]

    def get_local_mount_dir(self, host):
        return os.path.join(self.local_mount_dir, self.__get_unique_host(host))

    def get_dist_dir(self, unique):
        if unique:
            return os.path.join(DIST_DIR, self.master)
        else:
            return DIST_DIR

    def __get_unique_host(self, host):
        matches = [
            unique_host for unique_host in self.all_hosts()
            if unique_host.startswith(host)
        ]
        if matches:
            return matches[0]
        elif host in self.all_hosts():
            return host
        else:
            raise DockerClusterException(
                'Specified host: {0} does not exist.'.format(host))

    @staticmethod
    def __check_if_docker_exists():
        try:
            subprocess.call(['docker', '--version'])
        except OSError:
            sys.exit('Docker is not installed. Try installing it with '
                     'presto-admin/bin/install-docker.sh.')

    def create_image(self,
                     path_to_dockerfile_dir,
                     image_tag,
                     base_image,
                     base_image_tag=None):
        self.fetch_image_if_not_present(base_image, base_image_tag)
        output = self._execute_and_wait(self.client.build,
                                        path=path_to_dockerfile_dir,
                                        tag=image_tag,
                                        rm=True)
        if not self._is_image_present_locally(image_tag, 'latest'):
            raise OSError('Unable to build image %s: %s' % (image_tag, output))

    def fetch_image_if_not_present(self, image, tag=None):
        if not tag and not self.client.images(image):
            self._execute_and_wait(self.client.pull, image)
        elif tag and not self._is_image_present_locally(image, tag):
            self._execute_and_wait(self.client.pull, image, tag)

    def _is_image_present_locally(self, image_name, tag):
        image_name_and_tag = image_name + ':' + tag
        images = self.client.images(image_name)
        if images:
            for image in images:
                if image_name_and_tag in image['RepoTags']:
                    return True
        return False

    def start_containers(self,
                         master_image,
                         slave_image=None,
                         cmd=None,
                         **kwargs):
        self.tear_down()
        self._create_host_mount_dirs()

        self._create_and_start_containers(master_image, slave_image, cmd,
                                          **kwargs)
        self._ensure_docker_containers_started(master_image)

    def tear_down(self):
        for container_name in self.all_hosts():
            self._tear_down_container(container_name)
        self._remove_host_mount_dirs()

    def _tear_down_container(self, container_name):
        try:
            shutil.rmtree(self.get_dist_dir(unique=True))
        except OSError as e:
            # no such file or directory
            if e.errno != errno.ENOENT:
                raise

        try:
            self.stop_host(container_name)
            self.client.remove_container(container_name, v=True, force=True)
        except APIError as e:
            # container does not exist
            if e.response.status_code != 404:
                raise

    def stop_host(self, container_name):
        self.client.stop(container_name)
        self.client.wait(container_name)

    def start_host(self, container_name):
        self.client.start(container_name)

    def get_down_hostname(self, host_name):
        return host_name

    def _remove_host_mount_dirs(self):
        for container_name in self.all_hosts():
            try:
                shutil.rmtree(self.get_local_mount_dir(container_name))
            except OSError as e:
                # no such file or directory
                if e.errno != errno.ENOENT:
                    raise

    def _create_host_mount_dirs(self):
        for container_name in self.all_hosts():
            try:
                os.makedirs(self.get_local_mount_dir(container_name))
            except OSError as e:
                # file exists
                if e.errno != errno.EEXIST:
                    raise

    @staticmethod
    def _execute_and_wait(func, *args, **kwargs):
        ret = func(*args, **kwargs)
        # go through all lines in returned stream to ensure func finishes
        output = ''
        for line in ret:
            output += line
        return output

    def _create_and_start_containers(self,
                                     master_image,
                                     slave_image=None,
                                     cmd=None,
                                     **kwargs):
        if slave_image:
            for container_name in self.slaves:
                container_mount_dir = \
                    self.get_local_mount_dir(container_name)
                self._create_container(slave_image, container_name,
                                       container_name.split('-')[0], cmd)
                self.client.start(container_name,
                                  binds={
                                      container_mount_dir: {
                                          'bind': self.mount_dir,
                                          'ro': False
                                      }
                                  },
                                  **kwargs)

        master_mount_dir = self.get_local_mount_dir(self.master)
        self._create_container(master_image,
                               self.master,
                               hostname=self.internal_master,
                               cmd=cmd)
        self.client.start(
            self.master,
            binds={master_mount_dir: {
                'bind': self.mount_dir,
                'ro': False
            }},
            links=zip(self.slaves, self.slaves),
            **kwargs)
        self._add_hostnames_to_slaves()

    def _create_container(self,
                          image,
                          container_name,
                          hostname=None,
                          cmd=None):
        self._execute_and_wait(self.client.create_container,
                               image,
                               detach=True,
                               name=container_name,
                               hostname=hostname,
                               volumes=self.local_mount_dir,
                               command=cmd,
                               mem_limit='2g')

    def _add_hostnames_to_slaves(self):
        ips = self.get_ip_address_dict()
        additions_to_etc_hosts = ''
        for host in self.all_internal_hosts():
            additions_to_etc_hosts += '%s\t%s\n' % (ips[host], host)

        for host in self.slaves:
            self.exec_cmd_on_host(
                host, 'bin/bash -c \'echo "%s" >> /etc/hosts\'' %
                additions_to_etc_hosts)

    def _ensure_docker_containers_started(self, image):
        centos_based_images = [BASE_TD_IMAGE_NAME]

        timeout = 0
        is_host_started = {}
        for host in self.all_hosts():
            is_host_started[host] = False
        while timeout < self._DOCKER_START_TIMEOUT:
            for host in self.all_hosts():
                atomic_is_started = True
                atomic_is_started &= \
                    self.client.inspect_container(host)['State']['Running']
                if image in centos_based_images or \
                        image.startswith(self.IMAGE_NAME_BASE):
                    atomic_is_started &= \
                        self._are_centos_container_services_up(host)
                is_host_started[host] = atomic_is_started
            if not DockerCluster._are_all_hosts_started(is_host_started):
                timeout += 1
                sleep(1)
            else:
                break
        if timeout is self._DOCKER_START_TIMEOUT:
            raise DockerClusterException(
                'Docker container timed out on start.' + str(is_host_started))

    @staticmethod
    def _are_all_hosts_started(host_started_map):
        all_started = True
        for host in host_started_map.keys():
            all_started &= host_started_map[host]
        return all_started

    def _are_centos_container_services_up(self, host):
        """Some essential services in our CentOS containers take some time
        to start after the container itself is up. This function checks
        whether those services are up and returns a boolean accordingly.
        Specifically, we check that the app-admin user has been created
        and that the ssh daemon is up.

        Args:
          host: the host to check.

        Returns:
          True if the specified services have started, False otherwise.

        """
        ps_output = self.exec_cmd_on_host(host, 'ps')
        # also ensure that the app-admin user exists
        try:
            user_output = self.exec_cmd_on_host(host,
                                                'grep app-admin /etc/passwd')
            user_output += self.exec_cmd_on_host(host, 'stat /home/app-admin')
        except OSError:
            user_output = ''
        if 'sshd_bootstrap' in ps_output or 'sshd\n' not in ps_output\
                or not user_output:
            return False
        return True

    def exec_cmd_on_host(self, host, cmd, raise_error=True, tty=False):
        ex = self.client.exec_create(self.__get_unique_host(host),
                                     cmd,
                                     tty=tty)
        output = self.client.exec_start(ex['Id'], tty=tty)
        exit_code = self.client.exec_inspect(ex['Id'])['ExitCode']
        if raise_error and exit_code:
            raise OSError(exit_code, output)
        return output

    @staticmethod
    def _get_master_image_name(cluster_type):
        return os.path.join(DockerCluster.IMAGE_NAME_BASE,
                            '%s_master' % (cluster_type))

    @staticmethod
    def _get_slave_image_name(cluster_type):
        return os.path.join(DockerCluster.IMAGE_NAME_BASE,
                            '%s_slave' % (cluster_type))

    @staticmethod
    def start_bare_cluster():
        dc = DockerCluster
        master_name = dc._get_master_image_name(dc.BARE_CLUSTER_TYPE)
        slave_name = dc._get_slave_image_name(dc.BARE_CLUSTER_TYPE)
        centos_cluster = DockerCluster('master',
                                       ['slave1', 'slave2', 'slave3'],
                                       DEFAULT_LOCAL_MOUNT_POINT,
                                       DEFAULT_DOCKER_MOUNT_POINT)

        if not dc._check_for_images(master_name, slave_name):
            centos_cluster.create_image(BASE_TD_DOCKERFILE_DIR, master_name,
                                        BASE_IMAGE_NAME, BASE_IMAGE_TAG)

            centos_cluster.create_image(BASE_TD_DOCKERFILE_DIR, slave_name,
                                        BASE_IMAGE_NAME, BASE_IMAGE_TAG)

        centos_cluster.start_containers(master_name, slave_name)

        return centos_cluster

    @staticmethod
    def start_existing_images(cluster_type):
        dc = DockerCluster
        master_name = dc._get_master_image_name(cluster_type)
        slave_name = dc._get_slave_image_name(cluster_type)

        if not dc._check_for_images(master_name, slave_name):
            return None

        centos_cluster = DockerCluster('master',
                                       ['slave1', 'slave2', 'slave3'],
                                       DEFAULT_LOCAL_MOUNT_POINT,
                                       DEFAULT_DOCKER_MOUNT_POINT)

        centos_cluster.start_containers(master_name, slave_name)
        return centos_cluster

    @staticmethod
    def _check_for_images(master_image_name, slave_image_name):
        client = Client(timeout=180)
        images = client.images()
        has_master_image = False
        has_slave_image = False
        for image in images:
            if master_image_name in image['RepoTags'][0]:
                has_master_image = True
            if slave_image_name in image['RepoTags'][0]:
                has_slave_image = True
        return has_master_image and has_slave_image

    def commit_images(self, cluster_type):
        self.client.commit(self.master,
                           self._get_master_image_name(cluster_type))
        self.client.commit(self.slaves[0],
                           self._get_slave_image_name(cluster_type))

    def run_script_on_host(self, script_contents, host):
        temp_script = '/tmp/tmp.sh'
        self.write_content_to_host('#!/bin/bash\n%s' % script_contents,
                                   temp_script, host)
        self.exec_cmd_on_host(host, 'chmod +x %s' % temp_script)
        return self.exec_cmd_on_host(host, temp_script, tty=True)

    def write_content_to_host(self, content, path, host):
        filename = os.path.basename(path)
        dest_dir = os.path.dirname(path)
        host_local_mount_point = self.get_local_mount_dir(host)
        local_path = os.path.join(host_local_mount_point, filename)

        with open(local_path, 'w') as config_file:
            config_file.write(content)

        self.exec_cmd_on_host(host, 'mkdir -p ' + dest_dir)
        self.exec_cmd_on_host(
            host,
            'cp %s %s' % (os.path.join(self.mount_dir, filename), dest_dir))

    def copy_to_host(self, source_path, dest_host):
        shutil.copy(source_path, self.get_local_mount_dir(dest_host))

    def get_ip_address_dict(self):
        ip_addresses = {}
        for host, internal_host in zip(self.all_hosts(),
                                       self.all_internal_hosts()):
            inspect = self.client.inspect_container(host)
            ip_addresses[host] = inspect['NetworkSettings']['IPAddress']
            ip_addresses[internal_host] = \
                inspect['NetworkSettings']['IPAddress']
        return ip_addresses

    def _post_presto_install(self):
        for worker in self.slaves:
            self.run_script_on_host(
                'sed -i /node.id/d /etc/presto/node.properties; '
                'uuid=$(uuidgen); '
                'echo node.id=$uuid >> /etc/presto/node.properties', worker)

    def postinstall(self, installer):
        from tests.product.standalone.presto_installer \
            import StandalonePrestoInstaller

        _post_install_hooks = {
            StandalonePrestoInstaller: DockerCluster._post_presto_install
        }

        hook = _post_install_hooks.get(installer, None)
        if hook:
            hook(self)
Exemplo n.º 9
0
class DockerControl:
    def __init__(self, containerObject):
        self.containerObject = containerObject
        self.dockerCli = Client(base_url='unix://var/run/docker.sock')

    def remove(self):
        labelString = 'name=' + self.containerObject.name
        labelDict = [labelString]
        label = dict({'label': labelDict})
        nameString = '/' + self.containerObject.name
        containerList = self.dockerCli.containers()
        for container in containerList:
            if container['Names'][0] == nameString:
                containerId = container['Id']
        self.dockerCli.stop(container=containerId)
        self.dockerCli.remove_container(container=containerId)

    def runCmd(self, cmd):
        nameString = '/' + self.containerObject.name
        containerList = self.dockerCli.containers()
        for container in containerList:
            if container['Names'][0] == nameString:
                containerId = container['Id']
        execKey = self.dockerCli.exec_create(containerId, cmd)
        execResult = self.dockerCli.exec_start(execKey['Id'])
        dockerInfo = self.dockerCli.exec_inspect(execKey['Id'])
        return execResult

    def create(self):
        image = self.containerObject.type
        name = self.containerObject.name
        domain = self.containerObject.domain
        hostname = self.containerObject.name
        directory = CONTAINER_VOL_DIR + '/' + name
        if os.path.isdir(directory):
            rmtree(directory)
        os.makedirs(directory)
        if image == 'dns':
            copy(CONTAINER_CONF_DIR + '/dnsmasq.conf',
                 directory + '/dnsmasq.conf')
            copytree(CONTAINER_CONF_DIR + '/dnsmasq.d',
                     directory + '/dnsmasq.d')
            dnsmasqConfVolume = directory + '/dnsmasq.conf:/etc/dnsmasq.conf'
            dnsmasqDVolume = directory + '/dnsmasq.d:/etc/dnsmasq.d'
            dVolumes = [dnsmasqConfVolume, dnsmasqDVolume]
        elif image == 'puppet':
            puppetConfVolume = CONTAINER_VOL_DIR + '/puppet-master.conf:/etc/puppet/puppet.conf'
            hieradataVolume = CONTAINER_VOL_DIR + '/hieradata:/etc/puppet/hieradata'
            siteVolume = CONTAINER_VOL_DIR + '/site.pp:/etc/puppet/manifests/site.pp'
            modulesVolume = CONTAINER_VOL_DIR + '/modules:/etc/puppet/modules'
            dVolumes = [
                puppetConfVolume, hieradataVolume, siteVolume, modulesVolume
            ]
        else:
            copy(CONTAINER_CONF_DIR + '/puppet.conf',
                 directory + '/puppet.conf')
            copy(CONTAINER_CONF_DIR + '/auth.conf', directory + '/auth.conf')
            puppetConfVolume = directory + '/puppet.conf:/etc/puppet/puppet.conf'
            authConfVolume = directory + '/auth.conf:/etc/puppet/auth.conf'
            dVolumes = [puppetConfVolume, authConfVolume]
        dnsList = []
        if isinstance(self.containerObject.dns, dict):
            for dnsServer in self.containerObject.dns.keys():
                dnsServerString = self.containerObject.dns[dnsServer][
                    'ipaddress'].split('/')[0]
                dnsList.append(dnsServerString)
        else:
            dnsList.append(self.containerObject.dns)
        dnsSearchList = [domain]
        command = '/sbin/init'
        host_config = create_host_config(privileged=True,
                                         cap_add=['NET_ADMIN'],
                                         dns=dnsList,
                                         dns_search=dnsSearchList,
                                         binds=dVolumes,
                                         network_mode="none")
        container = self.dockerCli.create_container(
            image=image,
            name=name,
            command=command,
            #domainname=domain, hostname=name, volumes = dVolumes,
            domainname=domain,
            hostname=name,
            volumes=dVolumes,
            detach=True,
            host_config=host_config)
        self.dockerCli.start(container=container.get('Id'))
        containerInfo = self.dockerCli.inspect_container(
            container=container.get('Id'))
        containerPid = containerInfo['State']['Pid']
        pidPath = '/proc/' + str(containerPid) + '/ns/net'
        netNsPath = '/var/run/netns/' + name
        os.symlink(pidPath, netNsPath)
        return containerInfo

    def start(self):
        nameString = '/' + self.containerObject.name
        containerList = self.dockerCli.containers(all=True)
        for container in containerList:
            if container['Names'][0] == nameString:
                containerId = container['Id']
        self.dockerCli.start(container=containerId)
        containerInfo = self.dockerCli.inspect_container(container=containerId)
        containerPid = containerInfo['State']['Pid']
        pidPath = '/proc/' + str(containerPid) + '/ns/net'
        netNsPath = '/var/run/netns/' + self.containerObject.name
        os.symlink(pidPath, netNsPath)
        return containerInfo

    def stop(self):
        nameString = '/' + self.containerObject.name
        containerList = self.dockerCli.containers()
        for container in containerList:
            if container['Names'][0] == nameString:
                containerId = container['Id']
        self.dockerCli.stop(container=containerId)
        containerInfo = self.dockerCli.inspect_container(container=containerId)
        return containerInfo
Exemplo n.º 10
0
from docker import Client
from pprint import pprint
import time

cli = Client(base_url='unix://var/run/docker.sock')

exec_id = cli.exec_create(
        'midonet-agent.1', 
        'timeout 10 sh -c "while true; do echo new line; sleep 1; done"',
        stdout=True,
        stderr=True)
generator = cli.exec_start(exec_id, detach=False, stream=True)
print generator
print "Before blocking"
try:
    while True:
        print generator.next().rstrip()
except:
    pass
finally:
    pprint(cli.exec_inspect(exec_id))
Exemplo n.º 11
0
class DockerCluster(BaseCluster):
    IMAGE_NAME_BASE = os.path.join('teradatalabs', 'pa_test')
    BARE_CLUSTER_TYPE = 'bare'

    """Start/stop/control/query arbitrary clusters of docker containers.

    This class is aimed at product test writers to create docker containers
    for testing purposes.

    """
    def __init__(self, master_host, slave_hosts,
                 local_mount_dir, docker_mount_dir):
        # see PyDoc for all_internal_hosts() for an explanation on the
        # difference between an internal and regular host
        self.internal_master = master_host
        self.internal_slaves = slave_hosts
        self.master = master_host + '-' + str(uuid.uuid4())
        self.slaves = [slave + '-' + str(uuid.uuid4())
                       for slave in slave_hosts]
        # the root path for all local mount points; to get a particular
        # container mount point call get_local_mount_dir()
        self.local_mount_dir = local_mount_dir
        self.mount_dir = docker_mount_dir

        kwargs = kwargs_from_env()
        if 'tls' in kwargs:
            kwargs['tls'].assert_hostname = False
        kwargs['timeout'] = 300
        self.client = Client(**kwargs)

        DockerCluster.__check_if_docker_exists()

    def all_hosts(self):
        return self.slaves + [self.master]

    def get_master(self):
        return self.master

    def all_internal_hosts(self):
        return [host.split('-')[0] for host in self.all_hosts()]

    def get_local_mount_dir(self, host):
        return os.path.join(self.local_mount_dir,
                            self.__get_unique_host(host))

    def get_dist_dir(self, unique):
        if unique:
            return os.path.join(DIST_DIR, self.master)
        else:
            return DIST_DIR

    def __get_unique_host(self, host):
        matches = [unique_host for unique_host in self.all_hosts()
                   if unique_host.startswith(host)]
        if matches:
            return matches[0]
        elif host in self.all_hosts():
            return host
        else:
            raise DockerClusterException(
                'Specified host: {0} does not exist.'.format(host))

    @staticmethod
    def __check_if_docker_exists():
        try:
            subprocess.call(['docker', '--version'])
        except OSError:
            sys.exit('Docker is not installed. Try installing it with '
                     'presto-admin/bin/install-docker.sh.')

    def fetch_image_if_not_present(self, image, tag=None):
        if not tag and not self.client.images(image):
            self._execute_and_wait(self.client.pull, image)
        elif tag and not self._is_image_present_locally(image, tag):
            self._execute_and_wait(self.client.pull, image, tag)

    def _is_image_present_locally(self, image_name, tag):
        image_name_and_tag = image_name + ':' + tag
        images = self.client.images(image_name)
        if images:
            for image in images:
                if image_name_and_tag in image['RepoTags']:
                    return True
        return False

    def start_containers(self, master_image, slave_image=None,
                         cmd=None, **kwargs):
        self._create_host_mount_dirs()

        self._create_and_start_containers(master_image, slave_image,
                                          cmd, **kwargs)
        self._ensure_docker_containers_started(master_image)

    def tear_down(self):
        for container_name in self.all_hosts():
            self._tear_down_container(container_name)
        self._remove_host_mount_dirs()
        if self.client:
            self.client.close()
            self.client = None

    def _tear_down_container(self, container_name):
        try:
            shutil.rmtree(self.get_dist_dir(unique=True))
        except OSError as e:
            # no such file or directory
            if e.errno != errno.ENOENT:
                raise

        try:
            self.stop_host(container_name)
            self.client.remove_container(container_name, v=True, force=True)
        except APIError as e:
            # container does not exist
            if e.response.status_code != 404:
                raise

    def stop_host(self, container_name):
        self.client.stop(container_name)
        self.client.wait(container_name)

    def start_host(self, container_name):
        self.client.start(container_name)

    def get_down_hostname(self, host_name):
        return host_name

    def _remove_host_mount_dirs(self):
        for container_name in self.all_hosts():
            try:
                shutil.rmtree(
                    self.get_local_mount_dir(container_name))
            except OSError as e:
                # no such file or directory
                if e.errno != errno.ENOENT:
                    raise

    def _create_host_mount_dirs(self):
        for container_name in self.all_hosts():
            try:
                os.makedirs(
                    self.get_local_mount_dir(container_name))
            except OSError as e:
                # file exists
                if e.errno != errno.EEXIST:
                    raise

    @staticmethod
    def _execute_and_wait(func, *args, **kwargs):
        ret = func(*args, **kwargs)
        # go through all lines in returned stream to ensure func finishes
        output = ''
        for line in ret:
            output += line
        return output

    def _create_and_start_containers(self, master_image, slave_image=None,
                                     cmd=None, **kwargs):
        if slave_image:
            for container_name in self.slaves:
                container_mount_dir = \
                    self.get_local_mount_dir(container_name)
                self._create_container(
                    slave_image, container_name,
                    container_name.split('-')[0], cmd
                )
                self.client.start(container_name,
                                  binds={container_mount_dir:
                                         {'bind': self.mount_dir,
                                          'ro': False}},
                                  **kwargs)

        master_mount_dir = self.get_local_mount_dir(self.master)
        self._create_container(
            master_image, self.master, hostname=self.internal_master,
            cmd=cmd
        )
        self.client.start(self.master,
                          binds={master_mount_dir:
                                 {'bind': self.mount_dir,
                                  'ro': False}},
                          links=zip(self.slaves, self.slaves), **kwargs)
        self._add_hostnames_to_slaves()

    def _create_container(self, image, container_name, hostname=None,
                          cmd=None):
        self._execute_and_wait(self.client.create_container,
                               image,
                               detach=True,
                               name=container_name,
                               hostname=hostname,
                               volumes=self.local_mount_dir,
                               command=cmd,
                               host_config={'mem_limit': '2g'})

    def _add_hostnames_to_slaves(self):
        ips = self.get_ip_address_dict()
        additions_to_etc_hosts = ''
        for host in self.all_internal_hosts():
            additions_to_etc_hosts += '%s\t%s\n' % (ips[host], host)

        for host in self.slaves:
            self.exec_cmd_on_host(
                host,
                'bin/bash -c \'echo "%s" >> /etc/hosts\''
                % additions_to_etc_hosts
            )

    @retry(stop_max_delay=_DOCKER_START_TIMEOUT, wait_fixed=_DOCKER_START_WAIT)
    def _ensure_docker_containers_started(self, image):
        host_started = {}
        for host in self.all_hosts():
            host_started[host] = False
        for host in host_started.keys():
            if host_started[host]:
                continue
            is_started = True
            is_started &= \
                self.client.inspect_container(host)['State']['Running']
            if is_started and image not in NO_WAIT_SSH_IMAGES:
                is_started &= self._are_centos_container_services_up(host)
            host_started[host] = is_started
        not_started = [host for (host, started) in host_started.items() if not started]
        if len(not_started):
            raise NotStartedException(not_started)

    @staticmethod
    def _are_all_hosts_started(host_started_map):
        all_started = True
        for host in host_started_map.keys():
            all_started &= host_started_map[host]
        return all_started

    def _are_centos_container_services_up(self, host):
        """Some essential services in our CentOS containers take some time
        to start after the container itself is up. This function checks
        whether those services are up and returns a boolean accordingly.
        Specifically, we check that the app-admin user has been created
        and that the ssh daemon is up, as well as that the SSH keys are
        in the right place.

        Args:
          host: the host to check.

        Returns:
          True if the specified services have started, False otherwise.

        """
        ps_output = self.exec_cmd_on_host(host, 'ps')
        # also ensure that the app-admin user exists
        try:
            user_output = self.exec_cmd_on_host(
                host, 'grep app-admin /etc/passwd'
            )
            user_output += self.exec_cmd_on_host(host, 'stat /home/app-admin')
        except OSError:
            user_output = ''
        if 'sshd_bootstrap' in ps_output or 'sshd\n' not in ps_output\
                or not user_output:
            return False
        # check for .ssh being in the right place
        try:
            ssh_output = self.exec_cmd_on_host(host, 'ls /home/app-admin/.ssh')
            if 'id_rsa' not in ssh_output:
                return False
        except OSError:
            return False
        return True

    def exec_cmd_on_host(self, host, cmd, user=None, raise_error=True,
                         tty=False):
        ex = self.client.exec_create(self.__get_unique_host(host), cmd,
                                     tty=tty, user=user)
        output = self.client.exec_start(ex['Id'], tty=tty)
        exit_code = self.client.exec_inspect(ex['Id'])['ExitCode']
        if raise_error and exit_code:
            raise OSError(exit_code, output)
        return output

    @staticmethod
    def _get_tag_basename(bare_image_provider, cluster_type, ms):
        return '_'.join(
            [bare_image_provider.get_tag_decoration(), cluster_type, ms])

    @staticmethod
    def _get_master_image_name(bare_image_provider, cluster_type):
        return os.path.join(DockerCluster.IMAGE_NAME_BASE,
                            DockerCluster._get_tag_basename(
                                bare_image_provider, cluster_type, 'master'))

    @staticmethod
    def _get_slave_image_name(bare_image_provider, cluster_type):
        return os.path.join(DockerCluster.IMAGE_NAME_BASE,
                            DockerCluster._get_tag_basename(
                                bare_image_provider, cluster_type, 'slave'))

    @staticmethod
    def _get_image_names(bare_image_provider, cluster_type):
        dc = DockerCluster
        return (dc._get_master_image_name(bare_image_provider, cluster_type),
                dc._get_slave_image_name(bare_image_provider, cluster_type))

    @staticmethod
    def start_cluster(bare_image_provider, cluster_type, master_host='master',
                      slave_hosts=None, **kwargs):
        if slave_hosts is None:
            slave_hosts = ['slave1', 'slave2', 'slave3']
        created_bare = False
        dc = DockerCluster

        centos_cluster = DockerCluster(master_host, slave_hosts,
                                       DEFAULT_LOCAL_MOUNT_POINT,
                                       DEFAULT_DOCKER_MOUNT_POINT)

        master_name, slave_name = dc._get_image_names(
            bare_image_provider, cluster_type)

        if not dc._check_for_images(master_name, slave_name):
            master_name, slave_name = dc._get_image_names(
                bare_image_provider, dc.BARE_CLUSTER_TYPE)
            if not dc._check_for_images(master_name, slave_name):
                bare_image_provider.create_bare_images(
                    centos_cluster, master_name, slave_name)
            created_bare = True

        centos_cluster.start_containers(master_name, slave_name, **kwargs)

        return centos_cluster, created_bare

    @staticmethod
    def _check_for_images(master_image_name, slave_image_name, tag='latest'):
        master_repotag = '%s:%s' % (master_image_name, tag)
        slave_repotag = '%s:%s' % (slave_image_name, tag)
        with Client(timeout=180) as client:
            images = client.images()
        has_master_image = False
        has_slave_image = False
        for image in images:
            if master_repotag in image['RepoTags']:
                has_master_image = True
            if slave_repotag in image['RepoTags']:
                has_slave_image = True
        return has_master_image and has_slave_image

    def commit_images(self, bare_image_provider, cluster_type):
        self.client.commit(self.master,
                           self._get_master_image_name(bare_image_provider,
                                                       cluster_type))
        if self.slaves:
            self.client.commit(self.slaves[0],
                               self._get_slave_image_name(bare_image_provider,
                                                          cluster_type))

    def run_script_on_host(self, script_contents, host):
        temp_script = '/tmp/tmp.sh'
        self.write_content_to_host('#!/bin/bash\n%s' % script_contents,
                                   temp_script, host)
        self.exec_cmd_on_host(host, 'chmod +x %s' % temp_script)
        return self.exec_cmd_on_host(host, temp_script, tty=True)

    def write_content_to_host(self, content, path, host):
        filename = os.path.basename(path)
        dest_dir = os.path.dirname(path)
        host_local_mount_point = self.get_local_mount_dir(host)
        local_path = os.path.join(host_local_mount_point, filename)

        with open(local_path, 'w') as config_file:
            config_file.write(content)

        self.exec_cmd_on_host(host, 'mkdir -p ' + dest_dir)
        self.exec_cmd_on_host(
            host, 'cp %s %s' % (os.path.join(self.mount_dir, filename),
                                dest_dir))

    def copy_to_host(self, source_path, dest_host, **kwargs):
        shutil.copy(source_path, self.get_local_mount_dir(dest_host))

    def get_ip_address_dict(self):
        ip_addresses = {}
        for host, internal_host in zip(self.all_hosts(),
                                       self.all_internal_hosts()):
            inspect = self.client.inspect_container(host)
            ip_addresses[host] = inspect['NetworkSettings']['IPAddress']
            ip_addresses[internal_host] = \
                inspect['NetworkSettings']['IPAddress']
        return ip_addresses

    def _post_presto_install(self):
        for worker in self.slaves:
            self.run_script_on_host(
                'sed -i /node.id/d /etc/presto/node.properties; '
                'uuid=$(uuidgen); '
                'echo node.id=$uuid >> /etc/presto/node.properties',
                worker
            )

    def postinstall(self, installer):
        from tests.product.standalone.presto_installer \
            import StandalonePrestoInstaller

        _post_install_hooks = {
            StandalonePrestoInstaller: DockerCluster._post_presto_install
        }

        hook = _post_install_hooks.get(installer, None)
        if hook:
            hook(self)
Exemplo n.º 12
0
class DockerCluster(BaseCluster):
    IMAGE_NAME_BASE = os.path.join('teradatalabs', 'pa_test')
    BARE_CLUSTER_TYPE = 'bare'

    """Start/stop/control/query arbitrary clusters of docker containers.

    This class is aimed at product test writers to create docker containers
    for testing purposes.

    """
    def __init__(self, master_host, slave_hosts,
                 local_mount_dir, docker_mount_dir):
        # see PyDoc for all_internal_hosts() for an explanation on the
        # difference between an internal and regular host
        self.internal_master = master_host
        self.internal_slaves = slave_hosts
        self._master = master_host + '-' + str(uuid.uuid4())
        self.slaves = [slave + '-' + str(uuid.uuid4())
                       for slave in slave_hosts]
        # the root path for all local mount points; to get a particular
        # container mount point call get_local_mount_dir()
        self.local_mount_dir = local_mount_dir
        self._mount_dir = docker_mount_dir

        kwargs = kwargs_from_env()
        if 'tls' in kwargs:
            kwargs['tls'].assert_hostname = False
        kwargs['timeout'] = 300
        self.client = Client(**kwargs)
        self._user = '******'

        DockerCluster.__check_if_docker_exists()

    def all_hosts(self):
        return self.slaves + [self.master]

    def all_internal_hosts(self):
        return [host.split('-')[0] for host in self.all_hosts()]

    def get_local_mount_dir(self, host):
        return os.path.join(self.local_mount_dir,
                            self.__get_unique_host(host))

    def get_dist_dir(self, unique):
        if unique:
            return os.path.join(DIST_DIR, self.master)
        else:
            return DIST_DIR

    def __get_unique_host(self, host):
        matches = [unique_host for unique_host in self.all_hosts()
                   if unique_host.startswith(host)]
        if matches:
            return matches[0]
        elif host in self.all_hosts():
            return host
        else:
            raise DockerClusterException(
                'Specified host: {0} does not exist.'.format(host))

    @staticmethod
    def __check_if_docker_exists():
        try:
            subprocess.call(['docker', '--version'])
        except OSError:
            sys.exit('Docker is not installed. Try installing it with '
                     'presto-admin/bin/install-docker.sh.')

    def _is_image_present_locally(self, image_name, tag):
        image_name_and_tag = image_name + ':' + tag
        images = self.client.images(image_name)
        if images:
            for image in images:
                if image_name_and_tag in image['RepoTags']:
                    return True
        return False

    def start_containers(self, master_image, slave_image=None,
                         cmd=None, **kwargs):
        self._create_host_mount_dirs()

        self._create_and_start_containers(master_image, slave_image,
                                          cmd, **kwargs)
        self._ensure_docker_containers_started(master_image)

    def tear_down(self):
        for container_name in self.all_hosts():
            self._tear_down_container(container_name)
        self._remove_host_mount_dirs()
        if self.client:
            self.client.close()
            self.client = None

    def _tear_down_container(self, container_name):
        try:
            shutil.rmtree(self.get_dist_dir(unique=True))
        except OSError as e:
            # no such file or directory
            if e.errno != errno.ENOENT:
                raise

        try:
            self.stop_host(container_name)
            self.client.remove_container(container_name, v=True, force=True)
        except APIError as e:
            # container does not exist
            if e.response.status_code != 404:
                raise

    def stop_host(self, container_name):
        self.client.stop(container_name)
        self.client.wait(container_name)

    def start_host(self, container_name):
        self.client.start(container_name)

    def get_down_hostname(self, host_name):
        return host_name

    def _remove_host_mount_dirs(self):
        for container_name in self.all_hosts():
            try:
                shutil.rmtree(
                    self.get_local_mount_dir(container_name))
            except OSError as e:
                # no such file or directory
                if e.errno != errno.ENOENT:
                    raise

    def _create_host_mount_dirs(self):
        for container_name in self.all_hosts():
            try:
                os.makedirs(
                    self.get_local_mount_dir(container_name))
            except OSError as e:
                # file exists
                if e.errno != errno.EEXIST:
                    raise

    @staticmethod
    def _execute_and_wait(func, *args, **kwargs):
        ret = func(*args, **kwargs)
        # go through all lines in returned stream to ensure func finishes
        output = ''
        for line in ret:
            output += line
        return output

    def _create_and_start_containers(self, master_image, slave_image=None,
                                     cmd=None, **kwargs):
        if slave_image:
            for container_name in self.slaves:
                container_mount_dir = \
                    self.get_local_mount_dir(container_name)
                self._create_container(
                    slave_image, container_name,
                    container_name.split('-')[0], cmd
                )
                self.client.start(container_name,
                                  binds={container_mount_dir:
                                         {'bind': self.mount_dir,
                                          'ro': False}},
                                  **kwargs)

        master_mount_dir = self.get_local_mount_dir(self.master)
        self._create_container(
            master_image, self.master, hostname=self.internal_master,
            cmd=cmd
        )
        self.client.start(self.master,
                          binds={master_mount_dir:
                                 {'bind': self.mount_dir,
                                  'ro': False}},
                          links=zip(self.slaves, self.slaves), **kwargs)
        self._add_hostnames_to_slaves()

    def _create_container(self, image, container_name, hostname=None,
                          cmd=None):
        self._execute_and_wait(self.client.create_container,
                               image,
                               detach=True,
                               name=container_name,
                               hostname=hostname,
                               volumes=self.local_mount_dir,
                               command=cmd,
                               host_config={'mem_limit': '2g'})

    def _add_hostnames_to_slaves(self):
        ips = self.get_ip_address_dict()
        additions_to_etc_hosts = ''
        for host in self.all_internal_hosts():
            additions_to_etc_hosts += '%s\t%s\n' % (ips[host], host)

        for host in self.slaves:
            self.exec_cmd_on_host(
                host,
                'bin/bash -c \'echo "%s" >> /etc/hosts\''
                % additions_to_etc_hosts
            )

    @retry(stop_max_delay=_DOCKER_START_TIMEOUT, wait_fixed=_DOCKER_START_WAIT)
    def _ensure_docker_containers_started(self, image):
        # Strip off the tag, if there is one. We don't want to have to update
        # the NO_WAIT_SSH_IMAGES list every time we update the docker images.
        image_no_tag = image.split(':')[0]
        host_started = {}
        for host in self.all_hosts():
            host_started[host] = False
        for host in host_started.keys():
            if host_started[host]:
                continue
            is_started = True
            is_started &= \
                self.client.inspect_container(host)['State']['Running']
            if is_started and image_no_tag not in NO_WAIT_SSH_IMAGES:
                is_started &= self._are_centos_container_services_up(host)
            host_started[host] = is_started
        not_started = [host for (host, started) in host_started.items() if not started]
        if len(not_started):
            raise NotStartedException(not_started)

    @staticmethod
    def _are_all_hosts_started(host_started_map):
        all_started = True
        for host in host_started_map.keys():
            all_started &= host_started_map[host]
        return all_started

    def _are_centos_container_services_up(self, host):
        """Some essential services in our CentOS containers take some time
        to start after the container itself is up. This function checks
        whether those services are up and returns a boolean accordingly.
        Specifically, we check that the app-admin user has been created
        and that the ssh daemon is up, as well as that the SSH keys are
        in the right place.

        Args:
          host: the host to check.

        Returns:
          True if the specified services have started, False otherwise.

        """
        ps_output = self.exec_cmd_on_host(host, 'ps')
        # also ensure that the app-admin user exists
        try:
            user_output = self.exec_cmd_on_host(
                host, 'grep app-admin /etc/passwd'
            )
            user_output += self.exec_cmd_on_host(host, 'stat /home/app-admin')
        except OSError:
            user_output = ''
        if 'sshd_bootstrap' in ps_output or 'sshd\n' not in ps_output\
                or not user_output:
            return False
        # check for .ssh being in the right place
        try:
            ssh_output = self.exec_cmd_on_host(host, 'ls /home/app-admin/.ssh')
            if 'id_rsa' not in ssh_output:
                return False
        except OSError:
            return False
        return True

    def exec_cmd_on_host(self, host, cmd, user=None, raise_error=True,
                         tty=False, invoke_sudo=False):
        ex = self.client.exec_create(self.__get_unique_host(host), ['sh', '-c', cmd],
                                     tty=tty, user=user)
        output = self.client.exec_start(ex['Id'], tty=tty)
        exit_code = self.client.exec_inspect(ex['Id'])['ExitCode']
        if raise_error and exit_code:
            raise OSError(exit_code, output)
        return output

    @staticmethod
    def _get_tag_basename(bare_image_provider, cluster_type, ms):
        return '_'.join(
            [bare_image_provider.get_tag_decoration(), cluster_type, ms])

    @staticmethod
    def _get_master_image_name(bare_image_provider, cluster_type):
        return os.path.join(DockerCluster.IMAGE_NAME_BASE,
                            DockerCluster._get_tag_basename(
                                bare_image_provider, cluster_type, 'master'))

    @staticmethod
    def _get_slave_image_name(bare_image_provider, cluster_type):
        return os.path.join(DockerCluster.IMAGE_NAME_BASE,
                            DockerCluster._get_tag_basename(
                                bare_image_provider, cluster_type, 'slave'))

    @staticmethod
    def _get_image_names(bare_image_provider, cluster_type):
        dc = DockerCluster
        return (dc._get_master_image_name(bare_image_provider, cluster_type),
                dc._get_slave_image_name(bare_image_provider, cluster_type))

    @staticmethod
    def start_cluster(bare_image_provider, cluster_type, master_host='master',
                      slave_hosts=None, **kwargs):
        if slave_hosts is None:
            slave_hosts = ['slave1', 'slave2', 'slave3']
        created_bare = False
        dc = DockerCluster

        centos_cluster = DockerCluster(master_host, slave_hosts,
                                       DEFAULT_LOCAL_MOUNT_POINT,
                                       DEFAULT_DOCKER_MOUNT_POINT)

        master_name, slave_name = dc._get_image_names(
            bare_image_provider, cluster_type)

        if not dc._check_for_images(master_name, slave_name):
            master_name, slave_name = dc._get_image_names(
                bare_image_provider, dc.BARE_CLUSTER_TYPE)
            if not dc._check_for_images(master_name, slave_name):
                bare_image_provider.create_bare_images(
                    centos_cluster, master_name, slave_name)
            created_bare = True

        centos_cluster.start_containers(master_name, slave_name, **kwargs)

        return centos_cluster, created_bare

    @staticmethod
    def _check_for_images(master_image_name, slave_image_name, tag='latest'):
        master_repotag = '%s:%s' % (master_image_name, tag)
        slave_repotag = '%s:%s' % (slave_image_name, tag)
        with Client(timeout=180) as client:
            images = client.images()
        has_master_image = False
        has_slave_image = False
        for image in images:
            if image['RepoTags'] is not None and master_repotag in image['RepoTags']:
                has_master_image = True
            if image['RepoTags'] is not None and slave_repotag in image['RepoTags']:
                has_slave_image = True
        return has_master_image and has_slave_image

    def commit_images(self, bare_image_provider, cluster_type):
        self.client.commit(self.master,
                           self._get_master_image_name(bare_image_provider,
                                                       cluster_type))
        if self.slaves:
            self.client.commit(self.slaves[0],
                               self._get_slave_image_name(bare_image_provider,
                                                          cluster_type))

    def run_script_on_host(self, script_contents, host, tty=True):
        temp_script = '/tmp/tmp.sh'
        self.write_content_to_host('#!/bin/bash\n%s' % script_contents,
                                   temp_script, host)
        self.exec_cmd_on_host(host, 'chmod +x %s' % temp_script)
        return self.exec_cmd_on_host(host, temp_script, tty=tty)

    def write_content_to_host(self, content, path, host):
        filename = os.path.basename(path)
        dest_dir = os.path.dirname(path)
        host_local_mount_point = self.get_local_mount_dir(host)
        local_path = os.path.join(host_local_mount_point, filename)

        with open(local_path, 'w') as config_file:
            config_file.write(content)

        self.exec_cmd_on_host(host, 'mkdir -p ' + dest_dir)
        self.exec_cmd_on_host(
            host, 'cp %s %s' % (os.path.join(self.mount_dir, filename),
                                dest_dir))

    def copy_to_host(self, source_path, dest_host, **kwargs):
        shutil.copy(source_path, self.get_local_mount_dir(dest_host))

    def get_ip_address_dict(self):
        ip_addresses = {}
        for host, internal_host in zip(self.all_hosts(),
                                       self.all_internal_hosts()):
            inspect = self.client.inspect_container(host)
            ip_addresses[host] = inspect['NetworkSettings']['IPAddress']
            ip_addresses[internal_host] = \
                inspect['NetworkSettings']['IPAddress']
        return ip_addresses

    def _post_presto_install(self):
        for worker in self.slaves:
            self.run_script_on_host(
                'sed -i /node.id/d /etc/presto/node.properties; '
                'uuid=$(uuidgen); '
                'echo node.id=$uuid >> /etc/presto/node.properties',
                worker
            )

    def postinstall(self, installer):
        from tests.product.standalone.presto_installer \
            import StandalonePrestoInstaller

        _post_install_hooks = {
            StandalonePrestoInstaller: DockerCluster._post_presto_install
        }

        hook = _post_install_hooks.get(installer, None)
        if hook:
            hook(self)

    @property
    def rpm_cache_dir(self):
        return self._mount_dir

    @property
    def mount_dir(self):
        return self._mount_dir

    @property
    def user(self):
        return self._user

    @property
    def master(self):
        return self._master
Exemplo n.º 13
0
                                 tty=True,
                                 environment=config['environment'],
                                 volumes=volumes,
                                 name=config['name'],
                                 host_config=host_config)
# Forward warning messages to stderr if any
if container.get('Warnings') is not None:
    warn(container.get('Warnings'), RuntimeWarning)
# Start the container
cli.start(container=container.get('Id'))

# Execute the commands
for cmd in config['cmd']:
    print('[+] ' + cmd)
    execute = cli.exec_create(container['Id'],
                              cmd=cmd,
                              stdout=True,
                              stderr=True,
                              stdin=False)
    for char in cli.exec_start(execute['Id'], stream=True):
        sys.stdout.write(char.decode(sys.stdout.encoding))
    status = cli.exec_inspect(execute['Id'])['ExitCode']
    if status != 0:
        break

# Stop the container and remove it
cli.stop(container=container.get('Id'))
cli.remove_container(container=container['Id'])

sys.exit(status)
Exemplo n.º 14
0
class Two1ComposerContainers(Two1Composer):
    """ Manage machine-payable microservices in containers.
    """
    def __init__(self):
        self._connected = ComposerState.DISCONNECTED
        self.provider = TwentyOneProvider()
        self.default_wallet = Two1Wallet(self.wallet_file, self.provider)

    class ServiceManager:
        """ Query and modify user services persisting at cls.USER_SERVICES_FILE.
        """

        USER_SERVICES_FILE = os.path.join(Two1Composer.BASE_DIR,
                                          "user-services.json")

        class Image(namedtuple('Image', 'docker_hub_account repository tag')):
            def _asdict(self):
                # Fixes a bug for Python 3.4 users
                # https://bugs.python.org/issue24931
                'A new OrderedDict mapping field names to their values'
                return OrderedDict(zip(self._fields, self))

            @property
            def is_dockerhub_image(self):
                """ Returns: True iff Image instance has all fields.
                """
                return self.docker_hub_account and self.repository and self.tag

            @property
            def is_local_image(self):
                """ Returns: True iff Image instance doesn't have docker_hub_account but has all other fields.
                """
                return not self.docker_hub_account and self.repository and self.tag

            def __str__(self):
                """ Returns: Docker image name constructed from Image instance fields.
                """
                if self.is_dockerhub_image:
                    return '%s/%s:%s' % (self.docker_hub_account,
                                         self.repository, self.tag)
                elif self.is_local_image:
                    return '%s:%s' % (self.repository, self.tag)
                else:
                    raise ValueError()

            @classmethod
            def from_string(cls, image_name):
                """ Constructs an Image instance from a docker image name.

                Args:
                    image_name (str): A docker image name.

                Returns:
                    Image: An Image instance.
                """
                slashes = re.findall('/', image_name)
                colons = re.findall(':', image_name)

                if len(slashes) == 1:
                    if len(colons) == 1 and image_name.find(
                            '/') < image_name.find(':'):
                        docker_hub_account, rest = image_name.split('/')
                        repository, tag = rest.split(':')
                        return cls(docker_hub_account=docker_hub_account,
                                   repository=repository,
                                   tag=tag)
                    elif len(colons) == 0:
                        docker_hub_account, repository = image_name.split('/')
                        return cls(docker_hub_account=docker_hub_account,
                                   repository=repository,
                                   tag='latest')
                elif len(slashes) == 0:
                    if len(colons) == 1:
                        repository, tag = image_name.split(':')
                        return cls(docker_hub_account=None,
                                   repository=repository,
                                   tag=tag)
                    elif len(colons) == 0:
                        return cls(docker_hub_account=None,
                                   repository=image_name,
                                   tag='latest')
                raise ValueError()

        @classmethod
        def get_image(cls, service_name):
            """ Constructs an Image instance for a service.

            Args:
                service_name (str): The name of either a 21 service in the 21dotco/two1 repository or a user service
                                    added to ServiceManager.USER_SERVICES_FILE by ServiceManager.add_service.

            Returns:
                Image: An Image instance corresponding to the given service.
            """
            if service_name in cls.available_21_services():
                return cls.Image(
                    docker_hub_account='21dotco',
                    repository='two1',
                    tag=service_name if service_name
                    in Two1Composer.BASE_SERVICES else 'service-%s' %
                    service_name)
            elif service_name in cls.available_user_services():
                return cls.Image(**cls._get_user_service_dict()[service_name])
            else:
                raise ValueError()

        @classmethod
        def available_services(cls):
            """ Returns: All available service names.
            """
            return cls.available_21_services() | cls.available_user_services()

        @classmethod
        def available_21_services(cls):
            """ Returns: All available 21 services by querying Docker Hub.
            """
            service_image_data = requests.get(
                os.path.join(Two1Composer.DOCKERHUB_API_URL,
                             Two1Composer.DOCKERHUB_REPO,
                             'tags')).json().get('results')
            return set([
                image_data['name'].split('service-')[1]
                for image_data in service_image_data
                if re.match(r'^service-', image_data['name'])
            ])

        @classmethod
        def available_user_services(cls):
            """ Returns: All available user services.
            """
            return set(cls._get_user_service_dict().keys())

        @classmethod
        def add_service(cls, service_name, image_name_string,
                        service_successfully_added_hook,
                        service_already_exists_hook,
                        service_failed_to_add_hook):
            """ Adds a new service definition to ServiceManager.USER_SERVICES_FILE.

            Args:
                service_name (str): Name of the service definition to add.
                image_name_string (str): Docker image name for the service definition.
            """
            service_dict = cls._get_user_service_dict()
            if service_name in service_dict:
                service_already_exists_hook(service_name)
            else:
                service_dict[service_name] = cls.Image.from_string(
                    image_name_string)._asdict()
                if cls._commit_user_service_dict(service_dict):
                    service_successfully_added_hook(service_name)
                else:
                    service_failed_to_add_hook(service_name)

        @classmethod
        def remove_service(cls, service_name,
                           service_successfully_removed_hook,
                           service_does_not_exists_hook,
                           service_failed_to_remove_hook):
            """ Removes a service definition from ServiceManager.USER_SERVICES_FILE.

            Args:
                service_name (str): Name of the service definition to remove.
            """
            service_dict = cls._get_user_service_dict()
            if service_name in service_dict:
                del service_dict[service_name]
                if cls._commit_user_service_dict(service_dict):
                    service_successfully_removed_hook(service_name)
                else:
                    service_failed_to_remove_hook(service_name)
            else:
                service_does_not_exists_hook(service_name)

        @classmethod
        def _get_user_service_dict(cls):
            """ Returns: ServiceManager.USER_SERVICES_FILE as a dict.
            """
            try:
                with open(cls.USER_SERVICES_FILE, 'r') as data_file:
                    service_dict = json.load(data_file)
            except:
                return {}
            else:
                return service_dict

        @classmethod
        def _commit_user_service_dict(cls, service_dict):
            """ Writes a dict of user services to ServiceManager.USER_SERVICES_FILE in json format.

            Args:
                service_dict (dict): A dictionary of user services of the form
                                     {service_name : _as_dict representation of corresponding Image instance..}.

            Returns:
                bool: True iff no exceptions were raised when writing service_dict to ServiceManager.USER_SERVICES_FILE
                      as json.
            """
            try:
                with open(cls.USER_SERVICES_FILE, 'w') as outfile:
                    json.dump(service_dict, outfile)
            except:
                return False
            else:
                return True

    class ComposerYAMLContext(YamlDataContext):
        """ Context manager for composer YAML service file.
        """
        def __init__(self,
                     username=None,
                     password=None,
                     server_port=None,
                     mnemonic=None):
            self.username = username
            self.password = password
            self.server_port = server_port
            self.mnemonic = mnemonic
            super().__init__(Two1Composer.COMPOSE_FILE)

        def __enter__(self):
            sup = super().__enter__()
            for service in self.data['services']:
                service_definition = self.data['services'][service]
                if 'environment' in service_definition:

                    if 'TWO1_USERNAME' in service_definition[
                            'environment'] and self.username is not None:
                        service_definition['environment'][
                            'TWO1_USERNAME'] = self.username

                    if 'TWO1_PASSWORD' in service_definition[
                            'environment'] and self.password is not None:
                        service_definition['environment'][
                            'TWO1_PASSWORD'] = self.password

                    if 'TWO1_WALLET_MNEMONIC' in service_definition[
                            'environment'] and self.mnemonic is not None:
                        service_definition['environment'][
                            'TWO1_WALLET_MNEMONIC'] = self.mnemonic

                    if 'PAYMENT_SERVER_IP' in service_definition[
                            'environment'] and self.server_port is not None:
                        rest = service_definition['environment'][
                            'PAYMENT_SERVER_IP'].rsplit(':', maxsplit=1)[-1]
                        service_definition['environment'][
                            'PAYMENT_SERVER_IP'] = '%s:%s' % (rest,
                                                              self.server_port)
            return sup

        def _filler(self):
            """ Create the base service description file.
            """
            return {
                'version': '2',
                'services': {
                    'base': {
                        'image': '%s:base' % Two1Composer.DOCKERHUB_REPO,
                    },
                    'router': {
                        'image':
                        '%s:router' % Two1Composer.DOCKERHUB_REPO,
                        'container_name':
                        'sell_router',
                        'restart':
                        'always',
                        'volumes': [
                            Two1Composer.SITES_ENABLED_PATH +
                            ":/etc/nginx/sites-enabled",
                            Two1Composer.SITES_AVAILABLE_PATH +
                            ":/etc/nginx/sites-available",
                        ],
                        'ports':
                        ['%s:%s' % (self.server_port, self.server_port)],
                        'links': [
                            'payments:payments',
                        ],
                    },
                    'payments': {
                        'image': '%s:payments' % Two1Composer.DOCKERHUB_REPO,
                        'depends_on': ['base'],
                        'container_name': 'sell_payments',
                        'restart': 'always',
                        'environment': {
                            "TWO1_USERNAME": str(self.username),
                            "TWO1_PASSWORD": str(self.password),
                            "TWO1_WALLET_MNEMONIC": str(self.mnemonic)
                        },
                        'volumes': [Two1Composer.DB_DIR + ":/usr/src/db/"],
                        'logging': {
                            'driver': 'json-file'
                        },
                        'cap_drop': ['ALL'],
                        'cap_add': [
                            'DAC_OVERRIDE',
                            'NET_RAW',
                        ],
                    }
                }
            }

    # public api
    def connect(self, machine_env, host, machine_config_file):
        """ Connect service composer to machine layer.

        Args:
            machine_env (dict): Environment dictionary for the docker client of the machine layer.
            host (str): Hostname of the machine layer docker daemon.
            machine_config_file (str): Path to the config file for the machine layer.
        """
        self.machine_env = machine_env
        self.machine_host = host
        with open(machine_config_file, 'r') as f:
            self.machine_config = json.load(f)
        self.docker_client = Client(
            **docker_env(assert_hostname=False, environment=self.machine_env))
        self._connected = ComposerState.CONNECTED

    def initialize_server(self, username, password, server_port, wallet=None):
        """ Initialize micropayments server.

        Define boilerplate services, networks, and volumes composer file
        and nginx server config.

        Generates a wallet mnemonic if non-existent.

        Args:
            username (str): Username to log in with.
            password (str): Password to log in with.
            server_port (int): The server port that the router is running on.
            wallet: The wallet to use for the payments server and subsequent services.
        """
        self._create_base_server(
            server_port)  # create base router server config
        self._create_payments_route()  # create route to payments server

        new_wallet = None  # rv[1], not None if mnemonic is replaced in this function

        # generate service description (yaml)
        with self.ComposerYAMLContext(username, password,
                                      server_port) as composer_yaml:
            try:
                mnemonic = composer_yaml['services']['payments'][
                    'environment']['TWO1_WALLET_MNEMONIC']
                if not mnemonic or mnemonic == str(
                        None):  # if mnemonic is Falsy or uninitialized
                    raise ValueError()
            except (
                    KeyError, ValueError
            ):  # catches if mnemonic is Falsy or doesn't exist in dict tree
                new_machine_wallet = self.default_wallet.create(
                    self.provider)[1]
                composer_yaml['services']['payments']['environment'][
                    'TWO1_WALLET_MNEMONIC'] = new_machine_wallet
                new_wallet = new_machine_wallet

        return 0, new_wallet

    def pull_image(self, image, image_sucessfully_pulled_hook,
                   image_failed_to_pull_hook, image_is_local_hook,
                   image_is_malformed_hook):
        """ Pulls an Image instance iff it is a Docker Hub image.

        Args:
            image (Image): An Image instance.
        """
        if image.is_dockerhub_image:
            try:
                self.docker_client.pull(
                    '%s/%s' % (image.docker_hub_account, image.repository),
                    tag=image.tag,
                    stream=False)
            except:
                image_failed_to_pull_hook(image)
            else:
                image_sucessfully_pulled_hook(image)
        elif image.is_local_image:
            image_is_local_hook(image)
        else:
            image_is_malformed_hook(image)

    def start_services(self, service_names, failed_to_start_hook, started_hook,
                       failed_to_restart_hook, restarted_hook,
                       failed_to_up_hook, up_hook):
        """ Start selected services.

        Args:
            service_names (list): List of service names to start.
            failed_to_start_hook (Callable): A callable hook that takes in a service name and is run when said service
                                             fails to start.
            started_hook (Callable): A callable hook that takes in a service name and is run when said service starts.
            failed_to_restart_hook (Callable): A callable hook that takes in a service name and is run when said service
                                               fails to restart.
            restarted_hook (Callable): A callable hook that takes in a service name and is run when said service
                                       restarts.
            failed_to_up_hook (Callable): A callable hook that takes in a service name and is run when said service
                                          fails to go up.
            up_hook (Callable): A callable hook that takes in a service name and is run when said service goes up.
        """
        self._start_sell_service('base', failed_to_start_hook, started_hook,
                                 failed_to_up_hook, up_hook)
        self._start_sell_service('router', failed_to_start_hook, started_hook,
                                 failed_to_up_hook, up_hook)
        self._start_sell_service('payments', failed_to_start_hook,
                                 started_hook, failed_to_up_hook, up_hook)

        self._restart_sell_service('router', failed_to_start_hook,
                                   started_hook, failed_to_restart_hook,
                                   restarted_hook, failed_to_up_hook, up_hook)

        # Attempt to start all market services
        for service_name in service_names:
            try:
                image = self.ServiceManager.get_image(service_name)
                container_name = self.service_name_2_container_name(
                    service_name)

                # create nginx routes for service_name
                self._create_service_route(service_name)
                # add service_name to docker compose file
                with self.ComposerYAMLContext() as docker_compose_yaml:
                    username = docker_compose_yaml['services']['payments'][
                        'environment']['TWO1_USERNAME']
                    password = docker_compose_yaml['services']['payments'][
                        'environment']['TWO1_PASSWORD']
                    mnemonic = docker_compose_yaml['services']['payments'][
                        'environment']['TWO1_WALLET_MNEMONIC']
                    docker_compose_yaml['services'][service_name] = {
                        'image': str(image),
                        'container_name': container_name,
                        'depends_on': ['base'],
                        'restart': 'always',
                        'environment': {
                            "TWO1_USERNAME":
                            str(username),
                            "TWO1_PASSWORD":
                            str(password),
                            "TWO1_WALLET_MNEMONIC":
                            str(mnemonic),
                            "SERVICE":
                            str(service_name),
                            "PAYMENT_SERVER_IP":
                            "http://%s:%s" %
                            (self.machine_host,
                             self.machine_config["server_port"])
                        },
                        'volumes': [Two1Composer.DB_DIR + ":/usr/src/db/"],
                        'logging': {
                            'driver': 'json-file'
                        },
                        'cap_drop': ['ALL'],
                        'cap_add': [
                            'DAC_OVERRIDE',
                            'NET_RAW',
                        ],
                    }
                    link_str = '%s:%s' % (service_name, service_name)
                    if link_str not in docker_compose_yaml['services'][
                            'router']['links']:
                        docker_compose_yaml['services']['router'][
                            'links'].append(link_str)
            except:
                # something went wrong while configuring service_name
                failed_to_start_hook(service_name)
            else:
                # attempt to build service_name
                self._start_sell_service(service_name, failed_to_start_hook,
                                         started_hook, failed_to_up_hook,
                                         up_hook)

        self._restart_sell_service('router', failed_to_start_hook,
                                   started_hook, failed_to_restart_hook,
                                   restarted_hook, failed_to_up_hook, up_hook)

    def _start_sell_service(self,
                            service_name,
                            failed_to_start_hook,
                            started_hook,
                            failed_to_up_hook,
                            up_hook,
                            timeout=Two1Composer.SERVICE_START_TIMEOUT):
        try:
            subprocess.check_output([
                "docker-compose", "-f", Two1Composer.COMPOSE_FILE, "up", "-d",
                service_name
            ],
                                    stderr=subprocess.DEVNULL,
                                    env=self.machine_env)
        except subprocess.CalledProcessError:
            failed_to_start_hook(service_name)
        else:
            started_hook(service_name)
            if service_name == 'router':
                time.sleep(5)
            elif service_name != 'router' and service_name != 'base':
                start = time.clock()

                exec_id = self.docker_client.exec_create(
                    'sell_router', "curl %s:5000" % service_name)['Id']
                self.docker_client.exec_start(exec_id)
                running = True

                while time.clock() - start < timeout and running is True:
                    running = self.docker_client.exec_inspect(
                        exec_id)['Running']

                if running is True:
                    failed_to_up_hook(service_name)
                else:
                    up_hook(service_name)

    def _restart_sell_service(self, service_name, failed_to_start_hook,
                              started_hook, failed_to_restart_hook,
                              restarted_hook, failed_to_up_hook, up_hook):
        try:
            self.docker_client.stop("sell_%s" % service_name)
        except:
            is_restart = False
        else:
            is_restart = True

        self._start_sell_service(
            service_name,
            failed_to_restart_hook if is_restart else failed_to_start_hook,
            restarted_hook if is_restart else started_hook, failed_to_up_hook,
            up_hook)

    def stop_services(self, service_names,
                      service_found_stopped_and_removed_hook,
                      service_failed_to_stop_hook,
                      service_failed_to_be_removed_hook,
                      service_not_found_hook):
        """ Stop selected services and remove containers.

        Args:
            service_names (set): Set of services to stop.
            service_found_stopped_and_removed_hook (Callable): A callable hook that takes in a service name and is run
                                                               when said service is found, stopped, and removed.
            service_failed_to_stop_hook (Callable): A callable hook that takes in a service name and is run when said
                                                    service fails to be stopped.
            service_failed_to_be_removed_hook (Callable): A callable hook that takes in a service name and is run when
                                                          said service fails to be removed.
            service_not_found_hook (Callable): A callable hook that takes in a service name and is run when said service
                                               isn't found.

        """
        for service_name in service_names:
            if service_name in self.get_running_services():
                container_name = self.service_name_2_container_name(
                    service_name)
                try:
                    self.docker_client.stop(container_name)
                except:
                    service_failed_to_stop_hook(service_name)
                else:
                    try:
                        self.docker_client.remove_container(container_name)
                    except:
                        service_failed_to_be_removed_hook(service_name)
                    else:
                        service_found_stopped_and_removed_hook(service_name)
            else:
                service_not_found_hook(service_name)

    def silently_force_stop_all_services(self):
        running_container_names = self.docker_client.containers(
            filters={"status": "running"})
        for container_name in running_container_names:
            self.docker_client.remove_container(container_name, force=True)

    @staticmethod
    def container_names_2_service_names(container_definitions):
        """ Return service names from container definitions.

        See service_name_2_container_name for the inverse operation but on one service name.

        Args:
            container_definitions (list): List of container descriptions as returned by self.docker_client.containers.

        Returns:
            set: Set of service names generated by removing the 'sell_' prefix from the containers' names.
        """
        return set([
            container_definition['Names'][0][6:]
            for container_definition in container_definitions
        ])

    @staticmethod
    def service_name_2_container_name(service_name):
        """ Generates a container name from a service name by prepending 'sell_'
        """
        return 'sell_%s' % service_name

    def status_services(self, services):
        """ Gets running status of specified services.

        Args:
            services (list): List of services to get status for.
        """

        existent_services = self.get_services(all=True)
        running_services = self.get_services(filters={"status": "running"})
        exited_services = self.get_services(filters={"status": "exited"})

        return {
            "running": running_services & services,
            "exited": exited_services & services,
            "nonexistent": services - existent_services
        }

    def get_services(self, *args, **kwargs):
        """ Call docker_client.containers | convert resulting container names to service names | remove base services
        """
        return self.container_names_2_service_names(
            self.docker_client.containers(*args, **
                                          kwargs)) - Two1Composer.BASE_SERVICES

    def get_running_services(self):
        """ Get list of currently running services that aren't 21 base services.

        Returns:
            set: Set of currently running services.
        """
        return self.get_services(filters={"status": "running"})

    def status_router(self, service_running_hook, service_unknown_state_hook):
        """ Get status of Nginx router container.

        Args:
            service_running_hook (Callable): A callable hook that takes in a service name and is run when said service
                                             is running.
            service_unknown_state_hook (Callable): A callable hook that takes in a service name and is run when said
                                                   service is in an unknown state.
        """
        if len(
                self.docker_client.containers(all=True,
                                              filters={
                                                  "name": "sell_router",
                                                  "status": "running"
                                              })) == 1:
            service_running_hook("router")
        else:
            service_unknown_state_hook("router")

    def status_payments_server(self, service_running_hook,
                               service_unknown_state_hook):
        """ Get status of payment channels server.

        Args:
            service_running_hook (Callable): A callable hook that takes in a service name and is run when said service
                                             is running.
            service_unknown_state_hook (Callable): A callable hook that takes in a service name and is run when said
                                                   service is in an unknown state.
        """
        if len(
                self.docker_client.containers(all=True,
                                              filters={
                                                  "name": "sell_payments",
                                                  "status": "running"
                                              })) == 1:
            service_running_hook("payments")
        else:
            service_unknown_state_hook("payments")

    @staticmethod
    def _create_base_server(server_port):
        """ Create nginx base server config.

        Args:
            server_port (int): port for 21 sell server.
        """
        try:
            # create nginx router dirs
            shutil.rmtree(Two1Composer.SITES_ENABLED_PATH, ignore_errors=True)
            shutil.rmtree(Two1Composer.SITES_AVAILABLE_PATH,
                          ignore_errors=True)
            os.makedirs(Two1Composer.SITES_ENABLED_PATH, exist_ok=True)
            os.makedirs(Two1Composer.SITES_AVAILABLE_PATH, exist_ok=True)

            # create base nginx server
            with open(
                    os.path.join(Two1Composer.SITES_ENABLED_PATH,
                                 "two1baseserver"), 'w') as f:
                f.write("server {\n"
                        "    listen " + str(server_port) + ";\n"
                        "    include /etc/nginx/sites-available/*;\n"
                        "}\n")
        except Exception:
            raise exceptions.Two1ComposerServiceDefinitionException()

    @staticmethod
    def _create_service_route(service):
        """ Create route for container service.
        """
        os.makedirs(Two1Composer.SITES_AVAILABLE_PATH, exist_ok=True)
        try:
            with open(os.path.join(Two1Composer.SITES_AVAILABLE_PATH, service),
                      'w') as f:
                f.write(
                    "location /" + service + " {\n"
                    "    rewrite ^/" + service + "/?(.*) /$1 break;\n"
                    "    proxy_pass http://" + service + ":" + str(5000) +
                    ";\n"
                    "    proxy_set_header Host $host;\n"
                    "    proxy_set_header X-Real-IP $remote_addr;\n"
                    "    proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\n"
                    "}\n")
        except Exception:
            raise exceptions.Two1ComposerRouteException()

    @staticmethod
    def _create_payments_route():
        """ Add route to payments server.
        """
        os.makedirs(Two1Composer.SITES_AVAILABLE_PATH, exist_ok=True)
        try:
            # write nginx route for payments server
            with open(
                    os.path.join(Two1Composer.SITES_AVAILABLE_PATH,
                                 "payments"), 'w') as f:
                f.write(
                    "location /payment {\n"
                    "    proxy_pass http://payments:" + str(5000) + ";\n"
                    "    proxy_set_header Host $host;\n"
                    "    proxy_set_header X-Real-IP $remote_addr;\n"
                    "    proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\n"
                    "}\n")
        except Exception:
            raise exceptions.Two1ComposerRouteException()

    def publish_service(self, service_name, host_override, rest_client,
                        published_hook, already_published_hook,
                        failed_to_publish_hook, unknown_publish_error_hook):
        strm, stat = self.docker_client.get_archive(
            'sell_%s' % service_name, '/usr/src/app/manifest.yaml')

        with tarfile.open(fileobj=BytesIO(strm.read()), mode='r') as tf:
            manifest = yaml.load(tf.extractfile(stat[u'name']).read().decode())
        manifest['host'] = host_override

        try:
            resp = rest_client.publish({
                "manifest": manifest,
                "marketplace": "21mkt"
            })
        except ServerRequestError as e:
            if e.status_code == 403 and e.data.get("error") == "TO600":
                already_published_hook(service_name)
            else:
                failed_to_publish_hook(service_name)
        except:
            unknown_publish_error_hook(service_name)
        else:
            if resp.status_code == 201:
                published_hook(service_name)
            else:
                failed_to_publish_hook(service_name)

    def read_server_config(self):
        try:
            with open(Two1Composer.COMPOSE_FILE) as f:
                return yaml.load(f)

        except FileNotFoundError:
            return {}

    def get_services_mnemonic(self):
        if os.path.isfile(Two1Composer.COMPOSE_FILE):
            with self.ComposerYAMLContext() as composer_yaml:
                try:
                    maybe_mnemonic = composer_yaml['services']['payments'][
                        'environment']['TWO1_WALLET_MNEMONIC']
                except KeyError:
                    rv = None
                else:
                    rv = maybe_mnemonic
        else:
            rv = None
        return rv
Exemplo n.º 15
0
class dockerizer(default_logger):
    def __init__(self, args):
        default_logger.__init__(self, "dockerizer")
        self.args = args
        self.config = self.get_config()
        self.args["eula"] = self.config["installer"]["eula"]
        self.directory = (os.path.dirname(os.path.realpath(__file__)))

        self.base_image_name = args["base_image"]
        self.docker = Client(base_url='unix://var/run/docker.sock')

        self.installer = jedox_installer(args)
        self.installer.start()
        sleep(15)
        self.installer.stop()
        sleep(15)

        self.patch()
        self.add()
        self.build_base_image(self.base_image_name)
        self.base_container = self.docker.create_container(
            self.base_image_name)
        self.docker.start(self.base_container)
        self.docker_exec(self.base_container, self.config["docker"]["exec"])
        self.commit(self.args["docker_repository"], self.args["docker_tag"])
        #remove intermediate container
        self.logger.info("removing base container")
        self.docker.remove_container(container=self.base_container, force=True)

    def get_config(self):
        try:
            config_file = self.args["config"]
            version = self.args["jedox_version"]

            j = json.load(open(config_file))
            return j[version]

        except KeyError as e:
            self.logger.exception(e)
            self.logger.error(
                "Could not find the right config for version=%s in file=%s \n Aborting..."
                % (version, config_file))
            sys.exit(1)

    def patch(self):
        self.logger.info("patching files from installer")
        self.change_working_directory("patch")

        for p in self.config["patch"]:
            target = os.path.join(self.args["jedox_home"], p["target"])
            description = p.get("description", p["target"])

            self.logger.info("patching : %s" % description)
            subprocess.check_call("patch %s < %s" % (target, p["source"]),
                                  shell=True)

    def add(self):
        self.logger.info("adding additional content to installation")
        self.change_working_directory("add")

        for a in self.config["add"]:
            target = os.path.join(self.args["jedox_home"], a["target"])
            self.logger.info("copy %s to %s" % (a["source"], target))
            shutil.copy(a["source"], target)

    def change_working_directory(self, area):
        working_directory = os.path.join(self.directory, area,
                                         self.args["jedox_version"])
        self.logger.info("working dir is now %s" % working_directory)
        os.chdir(working_directory)

    def build_base_image(self, image_name="jedox/base"):
        os.chdir(self.args["jedox_home"])
        self.logger.info(
            "Import Jedox Suite into intermediate docker image '%s'" %
            image_name)
        subprocess.check_call(
            """tar --to-stdout --numeric-owner --exclude=/proc --exclude=/sys --exclude='*.tar.gz' --exclude='*.log' -c ./ | docker import --change "CMD while true; do ping 8.8.8.8; done" --change "ENV TERM=xterm" - %s"""
            % image_name,
            shell=True)
        self.logger.info("successfully create basecontainer %s" % image_name)

    def docker_exec(self, myContainer, exec_list):

        self.docker.timeout = 300
        for e in exec_list:
            if "description" in e:  #print description in logs if available
                self.logger.info(e["description"])
            exec_c = self.docker.exec_create(myContainer,
                                             e["cmd"],
                                             stdout=True,
                                             stderr=True)
            output = self.docker.exec_start(exec_c)
            self.logger.debug(self.docker.exec_inspect(exec_c))
            self.logger.info(output)

        self.logger.debug("all exec done")

    def commit(self, repository, tag):
        tag = Template(self.args["docker_tag"]).safe_substitute(
            jedox_version=self.args["jedox_version"])
        self.logger.info("commiting finale image %s to %s : %s" %
                         (self.base_container, repository, tag))

        config = {
            "CMD": "/entrypoint",
            "EXPOSE": "[80,7777]",
        }
        self.docker.commit(self.base_container, repository, tag, conf=config)
Exemplo n.º 16
0
class dockerizer(default_logger):
    def __init__(self,args):
        default_logger.__init__(self,"dockerizer")
        self.args=args
        self.config=self.get_config()
        self.args["eula"]=self.config["installer"]["eula"]
        self.directory=(os.path.dirname(os.path.realpath(__file__)))

        self.base_image_name=args["base_image"]
        self.docker=Client(base_url='unix://var/run/docker.sock')


        self.installer=jedox_installer(args)
        self.installer.start()
        sleep(15)
        self.installer.stop()
        sleep(15)

        self.patch()
        self.add()
        self.build_base_image(self.base_image_name)
        self.base_container=self.docker.create_container(self.base_image_name)
        self.docker.start(self.base_container)
        self.docker_exec(self.base_container,self.config["docker"]["exec"])
        self.commit(self.args["docker_repository"],self.args["docker_tag"])
        #remove intermediate container
        self.logger.info("removing base container")
        self.docker.remove_container(container=self.base_container,force=True)

    def get_config(self):
        try :
            config_file=self.args["config"]
            version=self.args["jedox_version"]

            j=json.load(open(config_file))
            return j[version]

        except KeyError as e:
            self.logger.exception(e)
            self.logger.error("Could not find the right config for version=%s in file=%s \n Aborting..." % (version,config_file))
            sys.exit(1)

    def patch(self):
        self.logger.info("patching files from installer")
        self.change_working_directory("patch")

        for p in self.config["patch"]:
            target=os.path.join(self.args["jedox_home"],p["target"])
            description=p.get("description",p["target"])

            self.logger.info("patching : %s" % description)
            subprocess.check_call("patch %s < %s" % (target,p["source"]),shell=True)

    def add(self):
        self.logger.info("adding additional content to installation")
        self.change_working_directory("add")

        for a in self.config["add"]:
            target=os.path.join(self.args["jedox_home"],a["target"])
            self.logger.info("copy %s to %s" % (a["source"],target))
            shutil.copy(a["source"],target)

    def change_working_directory(self,area):
        working_directory=os.path.join(self.directory,area,self.args["jedox_version"])
        self.logger.info("working dir is now %s" % working_directory)
        os.chdir(working_directory)

    def build_base_image(self,image_name="jedox/base"):
        os.chdir(self.args["jedox_home"])
        self.logger.info("Import Jedox Suite into intermediate docker image '%s'" % image_name)
        subprocess.check_call("""tar --to-stdout --numeric-owner --exclude=/proc --exclude=/sys --exclude='*.tar.gz' --exclude='*.log' -c ./ | docker import --change "CMD while true; do ping 8.8.8.8; done" --change "ENV TERM=xterm" - %s""" % image_name, shell=True)
        self.logger.info("successfully create basecontainer %s" % image_name)


    def docker_exec(self,myContainer,exec_list):

        self.docker.timeout=300
        for e in exec_list:
            if "description" in e : #print description in logs if available
                self.logger.info(e["description"])
            exec_c=self.docker.exec_create(myContainer,e["cmd"],stdout=True,stderr=True)
            output=self.docker.exec_start(exec_c)
            self.logger.debug(self.docker.exec_inspect(exec_c))
            self.logger.info(output)

        self.logger.debug("all exec done")

    def commit(self,repository,tag):
        tag=Template(self.args["docker_tag"]).safe_substitute(jedox_version=self.args["jedox_version"])
        self.logger.info("commiting finale image %s to %s : %s" % (self.base_container,repository,tag))

        config={"CMD":"/entrypoint",
                "EXPOSE": "[80,7777]",
                }
        self.docker.commit(self.base_container,repository,tag,conf=config)
Exemplo n.º 17
0
class DockerCluster(object):
    IMAGE_NAME_BASE = os.path.join('teradatalabs', 'pa_test')
    BARE_CLUSTER_TYPE = 'bare'

    """Start/stop/control/query arbitrary clusters of docker containers.

    This class is aimed at product test writers to create docker containers
    for testing purposes.

    """
    def __init__(self, master_host, slave_hosts,
                 local_mount_dir, docker_mount_dir):
        # see PyDoc for all_internal_hosts() for an explanation on the
        # difference between an internal and regular host
        self.internal_master = master_host
        self.internal_slaves = slave_hosts
        self.master = master_host + '-' + str(uuid.uuid4())
        self.slaves = [slave + '-' + str(uuid.uuid4())
                       for slave in slave_hosts]
        # the root path for all local mount points; to get a particular
        # container mount point call get_local_mount_dir()
        self.local_mount_dir = local_mount_dir
        self.mount_dir = docker_mount_dir

        kwargs = kwargs_from_env()
        if 'tls' in kwargs:
            kwargs['tls'].assert_hostname = False
        kwargs['timeout'] = 240
        self.client = Client(**kwargs)

        self._DOCKER_START_TIMEOUT = 30
        DockerCluster.__check_if_docker_exists()

    def all_hosts(self):
        return self.slaves + [self.master]

    def get_master(self):
        return self.master

    def all_internal_hosts(self):
        """The difference between this method and all_hosts() is that
        all_hosts() returns the unique, "outside facing" hostnames that
        docker uses. On the other hand all_internal_hosts() returns the
        more human readable host aliases for the containers used internally
        between containers. For example the unique master host will
        look something like 'master-07d1774e-72d7-45da-bf84-081cfaa5da9a',
        whereas the internal master host will be 'master'.

        Returns:
            List of all internal hosts with the random suffix stripped out.
        """
        return [host.split('-')[0] for host in self.all_hosts()]

    def get_local_mount_dir(self, host):
        return os.path.join(self.local_mount_dir,
                            self.__get_unique_host(host))

    def get_dist_dir(self, unique):
        if unique:
            return os.path.join(DIST_DIR, self.master)
        else:
            return DIST_DIR

    def __get_unique_host(self, host):
        matches = [unique_host for unique_host in self.all_hosts()
                   if unique_host.startswith(host)]
        if matches:
            return matches[0]
        elif host in self.all_hosts():
            return host
        else:
            raise DockerClusterException(
                'Specified host: {0} does not exist.'.format(host))

    @staticmethod
    def __check_if_docker_exists():
        try:
            subprocess.call(['docker', '--version'])
        except OSError:
            sys.exit('Docker is not installed. Try installing it with '
                     'presto-admin/bin/install-docker.sh.')

    def create_image(self, path_to_dockerfile_dir, image_tag, base_image,
                     base_image_tag=None):
        self.fetch_image_if_not_present(base_image, base_image_tag)
        output = self._execute_and_wait(self.client.build,
                                        path=path_to_dockerfile_dir,
                                        tag=image_tag,
                                        rm=True)
        if not self._is_image_present_locally(image_tag, 'latest'):
            raise OSError('Unable to build image %s: %s' % (image_tag, output))

    def fetch_image_if_not_present(self, image, tag=None):
        if not tag and not self.client.images(image):
            self._execute_and_wait(self.client.pull, image)
        elif tag and not self._is_image_present_locally(image, tag):
            self._execute_and_wait(self.client.pull, image, tag)

    def _is_image_present_locally(self, image_name, tag):
        image_name_and_tag = image_name + ':' + tag
        images = self.client.images(image_name)
        if images:
            for image in images:
                if image_name_and_tag in image['RepoTags']:
                    return True
        return False

    def start_containers(self, master_image, slave_image=None,
                         cmd=None, **kwargs):
        self.tear_down()
        self._create_host_mount_dirs()

        self._create_and_start_containers(master_image, slave_image,
                                          cmd, **kwargs)
        self._ensure_docker_containers_started(master_image)

    def tear_down(self):
        for container_name in self.all_hosts():
            self._tear_down_container(container_name)
        self._remove_host_mount_dirs()

    def _tear_down_container(self, container_name):
        try:
            shutil.rmtree(self.get_dist_dir(unique=True))
        except OSError as e:
            # no such file or directory
            if e.errno != errno.ENOENT:
                raise

        try:
            self.stop_host(container_name)
            self.client.remove_container(container_name, v=True, force=True)
        except APIError as e:
            # container does not exist
            if e.response.status_code != 404:
                raise

    def stop_host(self, container_name):
        self.client.stop(container_name)
        self.client.wait(container_name)

    def start_host(self, container_name):
        self.client.start(container_name)

    def get_down_hostname(self, host_name):
        return host_name

    def _remove_host_mount_dirs(self):
        for container_name in self.all_hosts():
            try:
                shutil.rmtree(
                    self.get_local_mount_dir(container_name))
            except OSError as e:
                # no such file or directory
                if e.errno != errno.ENOENT:
                    raise

    def _create_host_mount_dirs(self):
        for container_name in self.all_hosts():
            try:
                os.makedirs(
                    self.get_local_mount_dir(container_name))
            except OSError as e:
                # file exists
                if e.errno != errno.EEXIST:
                    raise

    @staticmethod
    def _execute_and_wait(func, *args, **kwargs):
        ret = func(*args, **kwargs)
        # go through all lines in returned stream to ensure func finishes
        output = ''
        for line in ret:
            output += line
        return output

    def _create_and_start_containers(self, master_image, slave_image=None,
                                     cmd=None, **kwargs):
        if slave_image:
            for container_name in self.slaves:
                container_mount_dir = \
                    self.get_local_mount_dir(container_name)
                self._create_container(
                    slave_image, container_name,
                    container_name.split('-')[0], cmd
                )
                self.client.start(container_name,
                                  binds={container_mount_dir:
                                         {'bind': self.mount_dir,
                                          'ro': False}},
                                  **kwargs)

        master_mount_dir = self.get_local_mount_dir(self.master)
        self._create_container(
            master_image, self.master, hostname=self.internal_master,
            cmd=cmd
        )
        self.client.start(self.master,
                          binds={master_mount_dir:
                                 {'bind': self.mount_dir,
                                  'ro': False}},
                          links=zip(self.slaves, self.slaves), **kwargs)
        self._add_hostnames_to_slaves()

    def _create_container(self, image, container_name, hostname=None,
                          cmd=None):
        self._execute_and_wait(self.client.create_container,
                               image,
                               detach=True,
                               name=container_name,
                               hostname=hostname,
                               volumes=self.local_mount_dir,
                               command=cmd,
                               mem_limit='2g')

    def _add_hostnames_to_slaves(self):
        ips = self.get_ip_address_dict()
        additions_to_etc_hosts = ''
        for host in self.all_internal_hosts():
            additions_to_etc_hosts += '%s\t%s\n' % (ips[host], host)

        for host in self.slaves:
            self.exec_cmd_on_host(
                host,
                'bin/bash -c \'echo "%s" >> /etc/hosts\''
                % additions_to_etc_hosts
            )

    def _ensure_docker_containers_started(self, image):
        centos_based_images = [BASE_TD_IMAGE_NAME]

        timeout = 0
        is_host_started = {}
        for host in self.all_hosts():
            is_host_started[host] = False
        while timeout < self._DOCKER_START_TIMEOUT:
            for host in self.all_hosts():
                atomic_is_started = True
                atomic_is_started &= \
                    self.client.inspect_container(host)['State']['Running']
                if image in centos_based_images or \
                        image.startswith(self.IMAGE_NAME_BASE):
                    atomic_is_started &= \
                        self._are_centos_container_services_up(host)
                is_host_started[host] = atomic_is_started
            if not DockerCluster._are_all_hosts_started(is_host_started):
                timeout += 1
                sleep(1)
            else:
                break
        if timeout is self._DOCKER_START_TIMEOUT:
            raise DockerClusterException(
                'Docker container timed out on start.' + str(is_host_started))

    @staticmethod
    def _are_all_hosts_started(host_started_map):
        all_started = True
        for host in host_started_map.keys():
            all_started &= host_started_map[host]
        return all_started

    def _are_centos_container_services_up(self, host):
        """Some essential services in our CentOS containers take some time
        to start after the container itself is up. This function checks
        whether those services are up and returns a boolean accordingly.
        Specifically, we check that the app-admin user has been created
        and that the ssh daemon is up.

        Args:
          host: the host to check.

        Returns:
          True if the specified services have started, False otherwise.

        """
        ps_output = self.exec_cmd_on_host(host, 'ps')
        # also ensure that the app-admin user exists
        try:
            user_output = self.exec_cmd_on_host(
                host, 'grep app-admin /etc/passwd'
            )
            user_output += self.exec_cmd_on_host(host, 'stat /home/app-admin')
        except OSError:
            user_output = ''
        if 'sshd_bootstrap' in ps_output or 'sshd\n' not in ps_output\
                or not user_output:
            return False
        return True

    def exec_cmd_on_host(self, host, cmd, raise_error=True, tty=False):
        ex = self.client.exec_create(self.__get_unique_host(host), cmd,
                                     tty=tty)
        output = self.client.exec_start(ex['Id'], tty=tty)
        exit_code = self.client.exec_inspect(ex['Id'])['ExitCode']
        if raise_error and exit_code:
            raise OSError(exit_code, output)
        return output

    @staticmethod
    def _get_master_image_name(cluster_type):
        return os.path.join(DockerCluster.IMAGE_NAME_BASE,
                            '%s_master' % (cluster_type))

    @staticmethod
    def _get_slave_image_name(cluster_type):
        return os.path.join(DockerCluster.IMAGE_NAME_BASE,
                            '%s_slave' % (cluster_type))

    @staticmethod
    def start_bare_cluster():
        dc = DockerCluster
        master_name = dc._get_master_image_name(dc.BARE_CLUSTER_TYPE)
        slave_name = dc._get_slave_image_name(dc.BARE_CLUSTER_TYPE)
        centos_cluster = DockerCluster('master',
                                       ['slave1', 'slave2', 'slave3'],
                                       DEFAULT_LOCAL_MOUNT_POINT,
                                       DEFAULT_DOCKER_MOUNT_POINT)

        if not dc._check_for_images(master_name, slave_name):
            centos_cluster.create_image(
                BASE_TD_DOCKERFILE_DIR,
                master_name,
                BASE_IMAGE_NAME,
                BASE_IMAGE_TAG
            )

            centos_cluster.create_image(
                BASE_TD_DOCKERFILE_DIR,
                slave_name,
                BASE_IMAGE_NAME,
                BASE_IMAGE_TAG
            )

        centos_cluster.start_containers(master_name, slave_name)

        return centos_cluster

    @staticmethod
    def start_existing_images(cluster_type):
        dc = DockerCluster
        master_name = dc._get_master_image_name(cluster_type)
        slave_name = dc._get_slave_image_name(cluster_type)

        if not dc._check_for_images(master_name, slave_name):
            return None

        centos_cluster = DockerCluster('master',
                                       ['slave1', 'slave2', 'slave3'],
                                       DEFAULT_LOCAL_MOUNT_POINT,
                                       DEFAULT_DOCKER_MOUNT_POINT)

        centos_cluster.start_containers(master_name, slave_name)
        return centos_cluster

    @staticmethod
    def _check_for_images(master_image_name, slave_image_name):
        client = Client(timeout=180)
        images = client.images()
        has_master_image = False
        has_slave_image = False
        for image in images:
            if master_image_name in image['RepoTags'][0]:
                has_master_image = True
            if slave_image_name in image['RepoTags'][0]:
                has_slave_image = True
        return has_master_image and has_slave_image

    def commit_images(self, cluster_type):
        self.client.commit(self.master,
                           self._get_master_image_name(cluster_type))
        self.client.commit(self.slaves[0],
                           self._get_slave_image_name(cluster_type))

    def run_script_on_host(self, script_contents, host):
        temp_script = '/tmp/tmp.sh'
        self.write_content_to_host('#!/bin/bash\n%s' % script_contents,
                                   temp_script, host)
        self.exec_cmd_on_host(host, 'chmod +x %s' % temp_script)
        return self.exec_cmd_on_host(host, temp_script, tty=True)

    def write_content_to_host(self, content, path, host):
        filename = os.path.basename(path)
        dest_dir = os.path.dirname(path)
        host_local_mount_point = self.get_local_mount_dir(host)
        local_path = os.path.join(host_local_mount_point, filename)

        with open(local_path, 'w') as config_file:
            config_file.write(content)

        self.exec_cmd_on_host(host, 'mkdir -p ' + dest_dir)
        self.exec_cmd_on_host(
            host, 'cp %s %s' % (os.path.join(self.mount_dir, filename),
                                dest_dir))

    def copy_to_host(self, source_path, dest_host):
        shutil.copy(source_path, self.get_local_mount_dir(dest_host))

    def get_ip_address_dict(self):
        ip_addresses = {}
        for host, internal_host in zip(self.all_hosts(),
                                       self.all_internal_hosts()):
            inspect = self.client.inspect_container(host)
            ip_addresses[host] = inspect['NetworkSettings']['IPAddress']
            ip_addresses[internal_host] = \
                inspect['NetworkSettings']['IPAddress']
        return ip_addresses

    def _post_presto_install(self):
        for worker in self.slaves:
            self.run_script_on_host(
                'sed -i /node.id/d /etc/presto/node.properties; '
                'uuid=$(uuidgen); '
                'echo node.id=$uuid >> /etc/presto/node.properties',
                worker
            )

    def postinstall(self, installer):
        from tests.product.standalone.presto_installer \
            import StandalonePrestoInstaller

        _post_install_hooks = {
            StandalonePrestoInstaller: DockerCluster._post_presto_install
        }

        hook = _post_install_hooks.get(installer, None)
        if hook:
            hook(self)
Exemplo n.º 18
0
class DockerControl:
    def __init__(self, containerObject):
        self.containerObject = containerObject
        self.dockerCli = Client(base_url="unix://var/run/docker.sock")

    def remove(self):
        labelString = "name=" + self.containerObject.name
        labelDict = [labelString]
        label = dict({"label": labelDict})
        nameString = "/" + self.containerObject.name
        containerList = self.dockerCli.containers()
        for container in containerList:
            if container["Names"][0] == nameString:
                containerId = container["Id"]
        self.dockerCli.stop(container=containerId)
        self.dockerCli.remove_container(container=containerId)

    def runCmd(self, cmd):
        nameString = "/" + self.containerObject.name
        containerList = self.dockerCli.containers()
        for container in containerList:
            if container["Names"][0] == nameString:
                containerId = container["Id"]
        execKey = self.dockerCli.exec_create(containerId, cmd)
        execResult = self.dockerCli.exec_start(execKey["Id"])
        dockerInfo = self.dockerCli.exec_inspect(execKey["Id"])
        return execResult

    def create(self):
        image = self.containerObject.type
        name = self.containerObject.name
        domain = self.containerObject.domain
        hostname = self.containerObject.name
        directory = CONTAINER_VOL_DIR + "/" + name
        if os.path.isdir(directory):
            rmtree(directory)
        os.makedirs(directory)
        if image == "dns":
            copy(CONTAINER_CONF_DIR + "/dnsmasq.conf", directory + "/dnsmasq.conf")
            copytree(CONTAINER_CONF_DIR + "/dnsmasq.d", directory + "/dnsmasq.d")
            dnsmasqConfVolume = directory + "/dnsmasq.conf:/etc/dnsmasq.conf"
            dnsmasqDVolume = directory + "/dnsmasq.d:/etc/dnsmasq.d"
            dVolumes = [dnsmasqConfVolume, dnsmasqDVolume]
        elif image == "puppet":
            puppetConfVolume = CONTAINER_VOL_DIR + "/puppet-master.conf:/etc/puppet/puppet.conf"
            hieradataVolume = CONTAINER_VOL_DIR + "/hieradata:/etc/puppet/hieradata"
            siteVolume = CONTAINER_VOL_DIR + "/site.pp:/etc/puppet/manifests/site.pp"
            modulesVolume = CONTAINER_VOL_DIR + "/modules:/etc/puppet/modules"
            dVolumes = [puppetConfVolume, hieradataVolume, siteVolume, modulesVolume]
        else:
            copy(CONTAINER_CONF_DIR + "/puppet.conf", directory + "/puppet.conf")
            copy(CONTAINER_CONF_DIR + "/auth.conf", directory + "/auth.conf")
            puppetConfVolume = directory + "/puppet.conf:/etc/puppet/puppet.conf"
            authConfVolume = directory + "/auth.conf:/etc/puppet/auth.conf"
            dVolumes = [puppetConfVolume, authConfVolume]
        dnsList = []
        if isinstance(self.containerObject.dns, dict):
            for dnsServer in self.containerObject.dns.keys():
                dnsServerString = self.containerObject.dns[dnsServer]["ipaddress"].split("/")[0]
                dnsList.append(dnsServerString)
        else:
            dnsList.append(self.containerObject.dns)
        dnsSearchList = [domain]
        command = "/sbin/init"
        host_config = create_host_config(
            privileged=True,
            cap_add=["NET_ADMIN"],
            dns=dnsList,
            dns_search=dnsSearchList,
            binds=dVolumes,
            network_mode="none",
        )
        container = self.dockerCli.create_container(
            image=image,
            name=name,
            command=command,
            # domainname=domain, hostname=name, volumes = dVolumes,
            domainname=domain,
            hostname=name,
            volumes=dVolumes,
            detach=True,
            host_config=host_config,
        )
        self.dockerCli.start(container=container.get("Id"))
        containerInfo = self.dockerCli.inspect_container(container=container.get("Id"))
        containerPid = containerInfo["State"]["Pid"]
        pidPath = "/proc/" + str(containerPid) + "/ns/net"
        netNsPath = "/var/run/netns/" + name
        os.symlink(pidPath, netNsPath)
        return containerInfo

    def start(self):
        nameString = "/" + self.containerObject.name
        containerList = self.dockerCli.containers(all=True)
        for container in containerList:
            if container["Names"][0] == nameString:
                containerId = container["Id"]
        self.dockerCli.start(container=containerId)
        containerInfo = self.dockerCli.inspect_container(container=containerId)
        containerPid = containerInfo["State"]["Pid"]
        pidPath = "/proc/" + str(containerPid) + "/ns/net"
        netNsPath = "/var/run/netns/" + self.containerObject.name
        os.symlink(pidPath, netNsPath)
        return containerInfo

    def stop(self):
        nameString = "/" + self.containerObject.name
        containerList = self.dockerCli.containers()
        for container in containerList:
            if container["Names"][0] == nameString:
                containerId = container["Id"]
        self.dockerCli.stop(container=containerId)
        containerInfo = self.dockerCli.inspect_container(container=containerId)
        return containerInfo