Exemplo n.º 1
0
    def __init__(self, ibm_cos_config, **kwargs):
        logger.debug("Creating IBM COS client")
        self.ibm_cos_config = ibm_cos_config
        self.is_lithops_worker = is_lithops_worker()
        user_agent = self.ibm_cos_config['user_agent']

        api_key = None
        if 'api_key' in self.ibm_cos_config:
            api_key = self.ibm_cos_config.get('api_key')
            api_key_type = 'COS'
        elif 'iam_api_key' in self.ibm_cos_config:
            api_key = self.ibm_cos_config.get('iam_api_key')
            api_key_type = 'IAM'

        service_endpoint = self.ibm_cos_config.get('endpoint').replace('http:', 'https:')
        if self.is_lithops_worker and 'private_endpoint' in self.ibm_cos_config:
            service_endpoint = self.ibm_cos_config.get('private_endpoint')
            if api_key:
                service_endpoint = service_endpoint.replace('http:', 'https:')

        logger.debug("Set IBM COS Endpoint to {}".format(service_endpoint))

        if {'secret_key', 'access_key'} <= set(self.ibm_cos_config):
            logger.debug("Using access_key and secret_key")
            access_key = self.ibm_cos_config.get('access_key')
            secret_key = self.ibm_cos_config.get('secret_key')
            client_config = ibm_botocore.client.Config(max_pool_connections=128,
                                                       user_agent_extra=user_agent,
                                                       connect_timeout=CONN_READ_TIMEOUT,
                                                       read_timeout=CONN_READ_TIMEOUT,
                                                       retries={'max_attempts': OBJ_REQ_RETRIES})

            self.cos_client = ibm_boto3.client('s3',
                                               aws_access_key_id=access_key,
                                               aws_secret_access_key=secret_key,
                                               config=client_config,
                                               endpoint_url=service_endpoint)

        elif api_key is not None:
            client_config = ibm_botocore.client.Config(signature_version='oauth',
                                                       max_pool_connections=128,
                                                       user_agent_extra=user_agent,
                                                       connect_timeout=CONN_READ_TIMEOUT,
                                                       read_timeout=CONN_READ_TIMEOUT,
                                                       retries={'max_attempts': OBJ_REQ_RETRIES})

            token = self.ibm_cos_config.get('token', None)
            token_expiry_time = self.ibm_cos_config.get('token_expiry_time', None)

            iam_token_manager = IBMTokenManager(api_key, api_key_type, token, token_expiry_time)
            token, token_expiry_time = iam_token_manager.get_token()

            self.ibm_cos_config['token'] = token
            self.ibm_cos_config['token_expiry_time'] = token_expiry_time

            self.cos_client = ibm_boto3.client('s3', token_manager=iam_token_manager._token_manager,
                                               config=client_config,
                                               endpoint_url=service_endpoint)

        logger.info("IBM COS client created successfully")
Exemplo n.º 2
0
    def __init__(self, ibm_cf_config, storage_config):
        logger.debug("Creating IBM Cloud Functions client")
        self.name = 'ibm_cf'
        self.config = ibm_cf_config
        self.is_lithops_worker = is_lithops_worker()

        self.user_agent = ibm_cf_config['user_agent']
        self.region = ibm_cf_config['region']
        self.endpoint = ibm_cf_config['regions'][self.region]['endpoint']
        self.namespace = ibm_cf_config['regions'][self.region]['namespace']
        self.namespace_id = ibm_cf_config['regions'][self.region].get(
            'namespace_id', None)
        self.api_key = ibm_cf_config['regions'][self.region].get(
            'api_key', None)
        self.iam_api_key = ibm_cf_config.get('iam_api_key', None)

        logger.debug("Set IBM CF Namespace to {}".format(self.namespace))
        logger.debug("Set IBM CF Endpoint to {}".format(self.endpoint))

        self.user_key = self.api_key.split(
            ':')[1][:4] if self.api_key else self.iam_api_key[:4]
        self.package = 'lithops_v{}_{}'.format(__version__, self.user_key)

        if self.api_key:
            enc_api_key = str.encode(self.api_key)
            auth_token = base64.encodebytes(enc_api_key).replace(b'\n', b'')
            auth = 'Basic %s' % auth_token.decode('UTF-8')

            self.cf_client = OpenWhiskClient(endpoint=self.endpoint,
                                             namespace=self.namespace,
                                             auth=auth,
                                             user_agent=self.user_agent)

        elif self.iam_api_key:
            api_key_type = 'IAM'
            token = self.config.get('token', None)
            token_expiry_time = self.config.get('token_expiry_time', None)
            self.ibm_token_manager = IBMTokenManager(self.iam_api_key,
                                                     api_key_type, token,
                                                     token_expiry_time)
            token, token_expiry_time = self.ibm_token_manager.get_token()

            self.config['token'] = token
            self.config['token_expiry_time'] = token_expiry_time

            auth = 'Bearer ' + token

            self.cf_client = OpenWhiskClient(endpoint=self.endpoint,
                                             namespace=self.namespace_id,
                                             auth=auth,
                                             user_agent=self.user_agent)

        msg = COMPUTE_CLI_MSG.format('IBM CF')
        logger.info("{} - Region: {} - Namespace: {}".format(
            msg, self.region, self.namespace))
Exemplo n.º 3
0
    def _get_iam_token(self):
        """ Requests and IBM IAM token """
        token = self.code_engine_config.get('token', None)
        token_expiry_time = self.code_engine_config.get(
            'token_expiry_time', None)
        self.ibm_token_manager = IBMTokenManager(self.iam_api_key, 'IAM',
                                                 token, token_expiry_time)
        token, token_expiry_time = self.ibm_token_manager.get_token()
        self.code_engine_config['token'] = token
        self.code_engine_config['token_expiry_time'] = token_expiry_time

        return token
Exemplo n.º 4
0
    def _get_iam_token(self):
        """ Requests an IBM IAM token """
        configuration = client.Configuration.get_default_copy()
        if self.namespace and self.region:
            configuration.host = self.cluster

        if not self.ibm_token_manager:
            token = self.code_engine_config.get('token', None)
            token_expiry_time = self.code_engine_config.get('token_expiry_time', None)
            self.ibm_token_manager = IBMTokenManager(self.iam_api_key,
                                                     'IAM', token,
                                                     token_expiry_time)

        token, token_expiry_time = self.ibm_token_manager.get_token()
        self.code_engine_config['token'] = token
        self.code_engine_config['token_expiry_time'] = token_expiry_time

        configuration.api_key = {"authorization": "Bearer " + token}
        client.Configuration.set_default(configuration)
Exemplo n.º 5
0
    def __init__(self, ibm_vpc_config):
        logger.debug("Creating IBM VPC client")
        self.log_active = logger.getEffectiveLevel() != logging.WARNING
        self.name = 'ibm_vpc'
        self.config = ibm_vpc_config

        self.endpoint = self.config['endpoint']
        self.region = self.endpoint.split('//')[1].split('.')[0]
        self.instance_id = self.config['instance_id']
        self.ip_address = self.config.get('ip_address', None)

        self.instance_data = None

        self.ssh_credentials = {
            'username': self.config.get('ssh_user', 'root'),
            'password': self.config.get('ssh_password', None),
            'key_filename': self.config.get('ssh_key_filename', None)
        }

        self.session = requests.session()

        iam_api_key = self.config.get('iam_api_key')
        token = self.config.get('token', None)
        token_expiry_time = self.config.get('token_expiry_time', None)
        api_key_type = 'IAM'
        self.iam_token_manager = IBMTokenManager(iam_api_key, api_key_type,
                                                 token, token_expiry_time)

        headers = {'content-type': 'application/json'}
        default_user_agent = self.session.headers['User-Agent']
        headers['User-Agent'] = default_user_agent + ' {}'.format(
            self.config['user_agent'])
        self.session.headers.update(headers)

        adapter = requests.adapters.HTTPAdapter()
        self.session.mount('https://', adapter)

        log_msg = (
            'Lithops v{} init for IBM Virtual Private Cloud - Host: {} - Region: {}'
            .format(__version__, self.ip_address, self.region))
        if not self.log_active:
            print(log_msg)
        logger.info("IBM VPC client created successfully")
Exemplo n.º 6
0
    def __init__(self, ibm_vpc_config):
        logger.debug("Creating IBM VPC client")
        self.name = 'ibm_vpc'
        self.config = ibm_vpc_config

        self.endpoint = self.config['endpoint']
        self.region = self.endpoint.split('//')[1].split('.')[0]
        self.instance_id = self.config['instance_id']
        self.ip_address = self.config.get('ip_address', None)

        self.instance_data = None

        self.ssh_credentials = {
            'username': self.config.get('ssh_user', 'root'),
            'password': self.config.get('ssh_password', None),
            'key_filename': self.config.get('ssh_key_filename', None)
        }

        self.session = requests.session()

        iam_api_key = self.config.get('iam_api_key')
        token = self.config.get('token', None)
        token_expiry_time = self.config.get('token_expiry_time', None)
        api_key_type = 'IAM'
        self.iam_token_manager = IBMTokenManager(iam_api_key, api_key_type,
                                                 token, token_expiry_time)

        headers = {'content-type': 'application/json'}
        default_user_agent = self.session.headers['User-Agent']
        headers['User-Agent'] = default_user_agent + ' {}'.format(
            self.config['user_agent'])
        self.session.headers.update(headers)

        adapter = requests.adapters.HTTPAdapter()
        self.session.mount('https://', adapter)

        msg = COMPUTE_CLI_MSG.format('IBM VPC')
        logger.info("{} - Region: {} - Host: {}".format(
            msg, self.region, self.ip_address))
Exemplo n.º 7
0
class CodeEngineBackend:
    """
    A wrap-up around Code Engine backend.
    """

    def __init__(self, code_engine_config, internal_storage):
        logger.debug("Creating IBM Code Engine client")
        self.name = 'code_engine'
        self.type = 'batch'
        self.code_engine_config = code_engine_config
        self.internal_storage = internal_storage

        self.kubecfg_path = code_engine_config.get('kubecfg_path')
        self.user_agent = code_engine_config['user_agent']

        self.iam_api_key = code_engine_config.get('iam_api_key', None)
        self.namespace = code_engine_config.get('namespace', None)
        self.region = code_engine_config.get('region', None)

        self.ibm_token_manager = None
        self.is_lithops_worker = is_lithops_worker()

        if self.namespace and self.region:
            self.cluster = ce_config.CLUSTER_URL.format(self.region)

        if self.iam_api_key and not self.is_lithops_worker:
            self._get_iam_token()

        else:
            try:
                config.load_kube_config(config_file=self.kubecfg_path)
                logger.debug("Loading kubecfg file")
                contexts = config.list_kube_config_contexts(config_file=self.kubecfg_path)
                current_context = contexts[1].get('context')
                self.namespace = current_context.get('namespace')
                self.cluster = current_context.get('cluster')

                if self.iam_api_key:
                    self._get_iam_token()

            except Exception:
                logger.debug('Loading incluster kubecfg')
                config.load_incluster_config()

        self.code_engine_config['namespace'] = self.namespace
        self.code_engine_config['cluster'] = self.cluster
        logger.debug("Set namespace to {}".format(self.namespace))
        logger.debug("Set cluster to {}".format(self.cluster))

        self.custom_api = client.CustomObjectsApi()
        self.core_api = client.CoreV1Api()

        try:
            self.region = self.cluster.split('//')[1].split('.')[1]
        except Exception:
            self.region = self.cluster.replace('http://', '').replace('https://', '')

        self.jobs = []  # list to store executed jobs (job_keys)

        msg = COMPUTE_CLI_MSG.format('IBM Code Engine')
        logger.info("{} - Region: {}".format(msg, self.region))

    def _get_iam_token(self):
        """ Requests an IBM IAM token """
        configuration = client.Configuration.get_default_copy()
        if self.namespace and self.region:
            configuration.host = self.cluster

        if not self.ibm_token_manager:
            token = self.code_engine_config.get('token', None)
            token_expiry_time = self.code_engine_config.get('token_expiry_time', None)
            self.ibm_token_manager = IBMTokenManager(self.iam_api_key,
                                                     'IAM', token,
                                                     token_expiry_time)

        token, token_expiry_time = self.ibm_token_manager.get_token()
        self.code_engine_config['token'] = token
        self.code_engine_config['token_expiry_time'] = token_expiry_time

        configuration.api_key = {"authorization": "Bearer " + token}
        client.Configuration.set_default(configuration)

    def _format_jobdef_name(self, runtime_name, runtime_memory):
        runtime_name = runtime_name.replace('/', '--')
        runtime_name = runtime_name.replace(':', '--')
        runtime_name = runtime_name.replace('.', '')
        runtime_name = runtime_name.replace('_', '-')
        return '{}--{}mb'.format(runtime_name, runtime_memory)

    def _get_default_runtime_image_name(self):
        docker_user = self.code_engine_config.get('docker_user')
        python_version = version_str(sys.version_info).replace('.', '')
        revision = 'latest' if 'dev' in __version__ else __version__.replace('.', '')
        return '{}/{}-v{}:{}'.format(docker_user, ce_config.RUNTIME_NAME, python_version, revision)

    def _delete_function_handler_zip(self):
        os.remove(ce_config.FH_ZIP_LOCATION)

    def build_runtime(self, docker_image_name, dockerfile, extra_args=[]):
        """
        Builds a new runtime from a Docker file and pushes it to the Docker hub
        """
        logger.debug('Building new docker image from Dockerfile')
        logger.debug('Docker image name: {}'.format(docker_image_name))

        entry_point = os.path.join(os.path.dirname(__file__), 'entry_point.py')
        create_handler_zip(ce_config.FH_ZIP_LOCATION, entry_point, 'lithopsentry.py')

        if dockerfile:
            cmd = '{} build -t {} -f {} . '.format(ce_config.DOCKER_PATH,
                                                   docker_image_name,
                                                   dockerfile)
        else:
            cmd = '{} build -t {} . '.format(ce_config.DOCKER_PATH, docker_image_name)

        cmd = cmd+' '.join(extra_args)

        if logger.getEffectiveLevel() != logging.DEBUG:
            cmd = cmd + " >{} 2>&1".format(os.devnull)

        logger.info('Building runtime')
        res = os.system(cmd)
        if res != 0:
            raise Exception('There was an error building the runtime')

        self._delete_function_handler_zip()

        cmd = '{} push {}'.format(ce_config.DOCKER_PATH, docker_image_name)
        if logger.getEffectiveLevel() != logging.DEBUG:
            cmd = cmd + " >{} 2>&1".format(os.devnull)
        res = os.system(cmd)
        if res != 0:
            raise Exception('There was an error pushing the runtime to the container registry')
        logger.debug('Building done!')

    def _build_default_runtime(self, default_runtime_img_name):
        """
        Builds the default runtime
        """
        if os.system('{} --version >{} 2>&1'.format(ce_config.DOCKER_PATH, os.devnull)) == 0:
            # Build default runtime using local dokcer
            python_version = version_str(sys.version_info)
            dockerfile = "Dockefile.default-codeengine-runtime"
            with open(dockerfile, 'w') as f:
                f.write("FROM python:{}-slim-buster\n".format(python_version))
                f.write(ce_config.DOCKERFILE_DEFAULT)
            self.build_runtime(default_runtime_img_name, dockerfile)
            os.remove(dockerfile)
        else:
            raise Exception('docker command not found. Install docker or use '
                            'an already built runtime')

    def create_runtime(self, docker_image_name, memory, timeout):
        """
        Creates a new runtime from an already built Docker image
        """
        default_runtime_img_name = self._get_default_runtime_image_name()
        if docker_image_name in ['default', default_runtime_img_name]:
            # We only build the default image. rest of images must already exist
            # in the docker registry.
            docker_image_name = default_runtime_img_name
            self._build_default_runtime(default_runtime_img_name)

        logger.debug('Creating new Lithops runtime based on '
                     'Docker image: {}'.format(docker_image_name))
        self._create_job_definition(docker_image_name, memory, timeout)

        runtime_meta = self._generate_runtime_meta(docker_image_name, memory)

        return runtime_meta

    def delete_runtime(self, docker_image_name, memory):
        """
        Deletes a runtime
        We need to delete job definition
        """
        def_id = self._format_jobdef_name(docker_image_name, memory)
        self._job_def_cleanup(def_id)

    def _job_run_cleanup(self, jobrun_name):
        logger.debug("Deleting jobrun {}".format(jobrun_name))
        try:
            self.custom_api.delete_namespaced_custom_object(
                group=ce_config.DEFAULT_GROUP,
                version=ce_config.DEFAULT_VERSION,
                name=jobrun_name,
                namespace=self.namespace,
                plural="jobruns",
                body=client.V1DeleteOptions(),
            )
        except ApiException as e:
            logger.debug("Deleting a jobrun failed with {} {}"
                         .format(e.status, e.reason))

    def _job_def_cleanup(self, jobdef_id):
        logger.info("Deleting runtime: {}".format(jobdef_id))
        try:
            self.custom_api.delete_namespaced_custom_object(
                group=ce_config.DEFAULT_GROUP,
                version=ce_config.DEFAULT_VERSION,
                name=jobdef_id,
                namespace=self.namespace,
                plural="jobdefinitions",
                body=client.V1DeleteOptions(),
            )
        except ApiException as e:
            logger.debug("Deleting a jobdef failed with {} {}"
                         .format(e.status, e.reason))

    def clean(self):
        """
        Deletes all runtimes from all packages
        """
        self.clear()
        runtimes = self.list_runtimes()
        for docker_image_name, memory in runtimes:
            self.delete_runtime(docker_image_name, memory)

        logger.debug('Deleting all lithops configmaps')
        configmaps = self.core_api.list_namespaced_config_map(namespace=self.namespace)
        for configmap in configmaps.items:
            config_name = configmap.metadata.name
            if config_name.startswith('lithops'):
                logger.debug('Deleting configmap {}'.format(config_name))
                self.core_api.delete_namespaced_config_map(
                    name=config_name,
                    namespace=self.namespace,
                    grace_period_seconds=0)

    def list_runtimes(self, docker_image_name='all'):
        """
        List all the runtimes
        return: list of tuples (docker_image_name, memory)
        """

        runtimes = []
        try:
            jobdefs = self.custom_api.list_namespaced_custom_object(
                            group=ce_config.DEFAULT_GROUP,
                            version=ce_config.DEFAULT_VERSION,
                            namespace=self.namespace,
                            plural="jobdefinitions")
        except ApiException as e:
            logger.debug("List all jobdefinitions failed with {} {}".format(e.status, e.reason))
            return runtimes

        for jobdef in jobdefs['items']:
            try:
                if jobdef['metadata']['labels']['type'] == 'lithops-runtime':
                    container = jobdef['spec']['template']['containers'][0]
                    image_name = container['image']
                    memory = container['resources']['requests']['memory'].replace('M', '')
                    memory = int(int(memory)/1000*1024)
                    if docker_image_name in image_name or docker_image_name == 'all':
                        runtimes.append((image_name, memory))
            except Exception:
                # It is not a lithops runtime
                pass

        return runtimes

    def clear(self, job_keys=None):
        """
        Clean all completed jobruns in the current executor
        """
        if self.iam_api_key and not self.is_lithops_worker:
            # try to refresh the token
            self._get_iam_token()
            self.custom_api = client.CustomObjectsApi()
            self.core_api = client.CoreV1Api()

        jobs_to_delete = job_keys or self.jobs
        for job_key in jobs_to_delete:
            jobrun_name = 'lithops-{}'.format(job_key.lower())
            try:
                self._job_run_cleanup(jobrun_name)
                self._delete_config_map(jobrun_name)
            except Exception as e:
                logger.debug("Deleting a jobrun failed with: {}".format(e))
            try:
                self.jobs.remove(job_key)
            except ValueError:
                pass

    def invoke(self, docker_image_name, runtime_memory, job_payload):
        """
        Invoke -- return information about this invocation
        For array jobs only remote_invocator is allowed
        """
        if self.iam_api_key and not self.is_lithops_worker:
            # try to refresh the token
            self._get_iam_token()
            self.custom_api = client.CustomObjectsApi()
            self.core_api = client.CoreV1Api()

        executor_id = job_payload['executor_id']
        job_id = job_payload['job_id']

        job_key = job_payload['job_key']
        self.jobs.append(job_key)

        total_calls = job_payload['total_calls']
        chunksize = job_payload['chunksize']
        total_workers = total_calls // chunksize + (total_calls % chunksize > 0)

        jobdef_name = self._format_jobdef_name(docker_image_name, runtime_memory)

        if not self._job_def_exists(jobdef_name):
            jobdef_name = self._create_job_definition(docker_image_name, runtime_memory, jobdef_name)

        jobrun_res = yaml.safe_load(ce_config.JOBRUN_DEFAULT)

        activation_id = 'lithops-{}'.format(job_key.lower())

        jobrun_res['metadata']['name'] = activation_id
        jobrun_res['metadata']['namespace'] = self.namespace
        jobrun_res['spec']['jobDefinitionRef'] = str(jobdef_name)
        jobrun_res['spec']['jobDefinitionSpec']['arraySpec'] = '0-' + str(total_workers - 1)

        container = jobrun_res['spec']['jobDefinitionSpec']['template']['containers'][0]
        container['name'] = str(jobdef_name)
        container['env'][0]['value'] = 'run'

        config_map = self._create_config_map(activation_id, job_payload)
        container['env'][1]['valueFrom']['configMapKeyRef']['name'] = config_map

        container['resources']['requests']['memory'] = '{}G'.format(runtime_memory/1024)
        container['resources']['requests']['cpu'] = str(self.code_engine_config['runtime_cpu'])

        # logger.debug("request - {}".format(jobrun_res)

        logger.debug('ExecutorID {} | JobID {} - Going to run {} activations '
                     '{} workers'.format(executor_id, job_id, total_calls, total_workers))

        try:
            self.custom_api.create_namespaced_custom_object(
                group=ce_config.DEFAULT_GROUP,
                version=ce_config.DEFAULT_VERSION,
                namespace=self.namespace,
                plural="jobruns",
                body=jobrun_res,
            )
        except Exception as e:
            raise e

        # logger.debug("response - {}".format(res))

        return activation_id

    def _create_container_registry_secret(self):
        """
        Create the container registry secret in the cluster
        (only if credentials are present in config)
        """
        if not all(key in self.code_engine_config for key in ["docker_user", "docker_password"]):
            return

        logger.debug('Creating container registry secret')
        docker_server = self.code_engine_config.get('docker_server', 'https://index.docker.io/v1/')
        docker_user = self.code_engine_config.get('docker_user')
        docker_password = self.code_engine_config.get('docker_password')

        cred_payload = {
            "auths": {
                docker_server: {
                    "Username": docker_user,
                    "Password": docker_password
                }
            }
        }

        data = {
            ".dockerconfigjson": base64.b64encode(
                json.dumps(cred_payload).encode()
            ).decode()
        }

        secret = client.V1Secret(
            api_version="v1",
            data=data,
            kind="Secret",
            metadata=dict(name="lithops-regcred", namespace=self.namespace),
            type="kubernetes.io/dockerconfigjson",
        )

        try:
            self.core_api.delete_namespaced_secret("lithops-regcred", self.namespace)
        except ApiException as e:
            pass

        try:
            self.core_api.create_namespaced_secret(self.namespace, secret)
        except ApiException as e:
            if e.status != 409:
                raise e

    def _create_job_definition(self, image_name, runtime_memory, timeout):
        """
        Creates a Job definition
        """
        self._create_container_registry_secret()

        jobdef_name = self._format_jobdef_name(image_name, runtime_memory)
        jobdef_res = yaml.safe_load(ce_config.JOBDEF_DEFAULT)
        jobdef_res['metadata']['name'] = jobdef_name
        container = jobdef_res['spec']['template']['containers'][0]
        container['image'] = image_name
        container['name'] = jobdef_name
        container['env'][0]['value'] = 'run'
        container['resources']['requests']['memory'] = '{}G'.format(runtime_memory/1024)
        container['resources']['requests']['cpu'] = str(self.code_engine_config['runtime_cpu'])

        try:
            self.custom_api.delete_namespaced_custom_object(
                group=ce_config.DEFAULT_GROUP,
                version=ce_config.DEFAULT_VERSION,
                namespace=self.namespace,
                plural="jobdefinitions",
                name=jobdef_name,
            )
        except Exception:
            pass

        try:
            self.custom_api.create_namespaced_custom_object(
                group=ce_config.DEFAULT_GROUP,
                version=ce_config.DEFAULT_VERSION,
                namespace=self.namespace,
                plural="jobdefinitions",
                body=jobdef_res,
            )
            # logger.debug("response - {}".format(res))
        except Exception as e:
            raise e

        logger.debug('Job Definition {} created'.format(jobdef_name))

        return jobdef_name

    def get_runtime_key(self, docker_image_name, runtime_memory):
        """
        Method that creates and returns the runtime key.
        Runtime keys are used to uniquely identify runtimes within the storage,
        in order to know which runtimes are installed and which not.
        """
        jobdef_name = self._format_jobdef_name(docker_image_name, 256)
        runtime_key = os.path.join(self.name, self.region, self.namespace, jobdef_name)

        return runtime_key

    def _job_def_exists(self, jobdef_name):
        logger.debug("Check if job_definition {} exists".format(jobdef_name))
        try:
            self.custom_api.get_namespaced_custom_object(
                group=ce_config.DEFAULT_GROUP,
                version=ce_config.DEFAULT_VERSION,
                namespace=self.namespace,
                plural="jobdefinitions",
                name=jobdef_name
            )
        except ApiException as e:
            # swallow error
            if (e.status == 404):
                logger.debug("Job definition {} not found (404)".format(jobdef_name))
                return False
        logger.debug("Job definition {} found".format(jobdef_name))
        return True

    def _generate_runtime_meta(self, docker_image_name, memory):

        logger.info("Extracting Python modules from: {}".format(docker_image_name))
        jobrun_res = yaml.safe_load(ce_config.JOBRUN_DEFAULT)

        jobdef_name = self._format_jobdef_name(docker_image_name, memory)
        jobrun_name = 'lithops-runtime-preinstalls'

        job_payload = copy.deepcopy(self.internal_storage.storage.storage_config)
        job_payload['log_level'] = logger.getEffectiveLevel()
        job_payload['runtime_name'] = jobdef_name

        jobrun_res['metadata']['name'] = jobrun_name
        jobrun_res['metadata']['namespace'] = self.namespace
        jobrun_res['spec']['jobDefinitionRef'] = str(jobdef_name)
        container = jobrun_res['spec']['jobDefinitionSpec']['template']['containers'][0]
        container['name'] = str(jobdef_name)
        container['env'][0]['value'] = 'preinstalls'

        config_map_name = 'lithops-{}-preinstalls'.format(jobdef_name)
        config_map_name = self._create_config_map(config_map_name, job_payload)
        container['env'][1]['valueFrom']['configMapKeyRef']['name'] = config_map_name

        try:
            self.custom_api.delete_namespaced_custom_object(
                group=ce_config.DEFAULT_GROUP,
                version=ce_config.DEFAULT_VERSION,
                namespace=self.namespace,
                plural="jobruns",
                name=jobrun_name
            )
        except Exception:
            pass

        try:
            self.custom_api.create_namespaced_custom_object(
                group=ce_config.DEFAULT_GROUP,
                version=ce_config.DEFAULT_VERSION,
                namespace=self.namespace,
                plural="jobruns",
                body=jobrun_res,
            )
        except Exception:
            pass

        logger.debug("Waiting for runtime metadata")

        done = False
        failed = False

        while not done or failed:
            try:
                w = watch.Watch()
                for event in w.stream(self.custom_api.list_namespaced_custom_object,
                                      namespace=self.namespace, group=ce_config.DEFAULT_GROUP,
                                      version=ce_config.DEFAULT_VERSION, plural="jobruns",
                                      field_selector="metadata.name={0}".format(jobrun_name),
                                      timeout_seconds=10):
                    failed = int(event['object'].get('status')['failed'])
                    done = int(event['object'].get('status')['succeeded'])
                    logger.debug('...')
                    if done or failed:
                        w.stop()
            except Exception:
                pass

        if done:
            logger.debug("Runtime metadata generated successfully")

        try:
            self.custom_api.delete_namespaced_custom_object(
                group=ce_config.DEFAULT_GROUP,
                version=ce_config.DEFAULT_VERSION,
                namespace=self.namespace,
                plural="jobruns",
                name=jobrun_name
            )
        except Exception:
            pass

        self._delete_config_map(config_map_name)

        if failed:
            raise Exception("Unable to extract Python preinstalled modules from the runtime")

        data_key = '/'.join([JOBS_PREFIX, jobdef_name+'.meta'])
        json_str = self.internal_storage.get_data(key=data_key)
        runtime_meta = json.loads(json_str.decode("ascii"))
        self.internal_storage.del_data(key=data_key)

        return runtime_meta

    def _create_config_map(self, config_map_name, payload):
        """
        Creates a configmap
        """
        cmap = client.V1ConfigMap()
        cmap.metadata = client.V1ObjectMeta(name=config_map_name)
        cmap.data = {}
        cmap.data["lithops.payload"] = dict_to_b64str(payload)

        logger.debug("Creating ConfigMap {}".format(config_map_name))
        try:
            self.core_api.create_namespaced_config_map(
                namespace=self.namespace,
                body=cmap,
                field_manager='lithops'
            )
        except ApiException as e:
            if (e.status != 409):
                logger.debug("Creating a configmap failed with {} {}"
                             .format(e.status, e.reason))
                raise Exception('Failed to create ConfigMap')
            else:
                logger.debug("ConfigMap {} already exists".format(config_map_name))

        return config_map_name

    def _delete_config_map(self, config_map_name):
        """
        Deletes a configmap
        """
        grace_period_seconds = 0
        try:
            logger.debug("Deleting ConfigMap {}".format(config_map_name))
            self.core_api.delete_namespaced_config_map(
                name=config_map_name,
                namespace=self.namespace,
                grace_period_seconds=grace_period_seconds
            )
        except ApiException as e:
            logger.debug("Deleting a configmap failed with {} {}"
                         .format(e.status, e.reason))
Exemplo n.º 8
0
class IBMVPCInstanceClient:
    def __init__(self, ibm_vpc_config):
        logger.debug("Creating IBM VPC client")
        self.name = 'ibm_vpc'
        self.config = ibm_vpc_config

        self.endpoint = self.config['endpoint']
        self.region = self.endpoint.split('//')[1].split('.')[0]
        self.instance_id = self.config['instance_id']
        self.ip_address = self.config.get('ip_address', None)

        self.instance_data = None

        self.ssh_credentials = {
            'username': self.config.get('ssh_user', 'root'),
            'password': self.config.get('ssh_password', None),
            'key_filename': self.config.get('ssh_key_filename', None)
        }

        self.session = requests.session()

        iam_api_key = self.config.get('iam_api_key')
        token = self.config.get('token', None)
        token_expiry_time = self.config.get('token_expiry_time', None)
        api_key_type = 'IAM'
        self.iam_token_manager = IBMTokenManager(iam_api_key, api_key_type,
                                                 token, token_expiry_time)

        headers = {'content-type': 'application/json'}
        default_user_agent = self.session.headers['User-Agent']
        headers['User-Agent'] = default_user_agent + ' {}'.format(
            self.config['user_agent'])
        self.session.headers.update(headers)

        adapter = requests.adapters.HTTPAdapter()
        self.session.mount('https://', adapter)

        msg = COMPUTE_CLI_MSG.format('IBM VPC')
        logger.info("{} - Region: {} - Host: {}".format(
            msg, self.region, self.ip_address))

    def _authorize_session(self):
        self.config['token'], self.config[
            'token_expiry_time'] = self.iam_token_manager.get_token()
        self.session.headers[
            'Authorization'] = 'Bearer ' + self.config['token']

    def get_ssh_credentials(self):
        return self.ssh_credentials

    def get_instance(self):
        url = '/'.join([
            self.endpoint, 'v1', 'instances', self.instance_id +
            f'?version={self.config["version"]}&generation={self.config["generation"]}'
        ])
        self._authorize_session()
        res = self.session.get(url)
        return res.json()

    def get_ip_address(self):
        if self.ip_address:
            return self.ip_address
        else:
            if not self.instance_data:
                self.instance_data = self.get_instance()
            network_interface_id = self.instance_data[
                'primary_network_interface']['id']

            url = '/'.join([
                self.endpoint, 'v1', 'floating_ips' +
                f'?version={self.config["version"]}&generation={self.config["generation"]}'
            ])
            self._authorize_session()
            res = self.session.get(url)
            floating_ips_info = res.json()

            ip_address = None
            for floating_ip in floating_ips_info['floating_ips']:
                if floating_ip['target']['id'] == network_interface_id:
                    ip_address = floating_ip['address']

            if ip_address is None:
                raise Exception('Could not find the public IP address')

        return ip_address

    def create_instance_action(self, action):
        if action in ['start', 'reboot']:
            expected_status = 'running'
        elif action == 'stop':
            expected_status = 'stopped'
        else:
            msg = 'An error occurred cant create instance action \"{}\"'.format(
                action)
            raise Exception(msg)

        url = '/'.join([
            self.config['endpoint'], 'v1', 'instances',
            self.config['instance_id'],
            f'actions?version={self.config["version"]}&generation={self.config["generation"]}'
        ])
        self._authorize_session()
        res = self.session.post(url, json={'type': action})
        resp_text = res.json()

        if res.status_code != 201:
            msg = 'An error occurred creating instance action {}: {}'.format(
                action, resp_text['errors'])
            raise Exception(msg)

        self.instance_data = self.get_instance()
        while self.instance_data['status'] != expected_status:
            time.sleep(1)
            self.instance_data = self.get_instance()

    def start(self):
        logger.info("Starting VM instance")
        self.create_instance_action('start')
        logger.debug("VM instance started successfully")

    def stop(self):
        logger.info("Stopping VM instance")
        self.create_instance_action('stop')
        logger.debug("VM instance stopped successfully")

    def get_runtime_key(self, runtime_name):
        runtime_key = os.path.join(self.name, self.ip_address,
                                   self.instance_id, runtime_name.strip("/"))

        return runtime_key
Exemplo n.º 9
0
class IBMCloudFunctionsBackend:
    """
    A wrap-up around IBM Cloud Functions backend.
    """

    def __init__(self, ibm_cf_config, internal_storage):
        logger.debug("Creating IBM Cloud Functions client")
        self.name = 'ibm_cf'
        self.type = 'faas'
        self.config = ibm_cf_config
        self.is_lithops_worker = is_lithops_worker()

        self.user_agent = ibm_cf_config['user_agent']
        self.region = ibm_cf_config['region']
        self.endpoint = ibm_cf_config['regions'][self.region]['endpoint']
        self.namespace = ibm_cf_config['regions'][self.region]['namespace']
        self.namespace_id = ibm_cf_config['regions'][self.region].get('namespace_id', None)
        self.api_key = ibm_cf_config['regions'][self.region].get('api_key', None)
        self.iam_api_key = ibm_cf_config.get('iam_api_key', None)

        logger.debug("Set IBM CF Namespace to {}".format(self.namespace))
        logger.debug("Set IBM CF Endpoint to {}".format(self.endpoint))

        self.user_key = self.api_key.split(':')[1][:4] if self.api_key else self.iam_api_key[:4]
        self.package = 'lithops_v{}_{}'.format(__version__, self.user_key)

        if self.api_key:
            enc_api_key = str.encode(self.api_key)
            auth_token = base64.encodebytes(enc_api_key).replace(b'\n', b'')
            auth = 'Basic %s' % auth_token.decode('UTF-8')

            self.cf_client = OpenWhiskClient(endpoint=self.endpoint,
                                             namespace=self.namespace,
                                             auth=auth,
                                             user_agent=self.user_agent)

        elif self.iam_api_key:
            api_key_type = 'IAM'
            token = self.config.get('token', None)
            token_expiry_time = self.config.get('token_expiry_time', None)
            self.ibm_token_manager = IBMTokenManager(self.iam_api_key,
                                                     api_key_type, token,
                                                     token_expiry_time)
            token, token_expiry_time = self.ibm_token_manager.get_token()

            self.config['token'] = token
            self.config['token_expiry_time'] = token_expiry_time

            auth = 'Bearer ' + token

            self.cf_client = OpenWhiskClient(endpoint=self.endpoint,
                                             namespace=self.namespace_id,
                                             auth=auth,
                                             user_agent=self.user_agent)

        msg = COMPUTE_CLI_MSG.format('IBM CF')
        logger.info("{} - Region: {} - Namespace: {}".format(msg, self.region,
                                                             self.namespace))

    def _format_action_name(self, runtime_name, runtime_memory):
        runtime_name = runtime_name.replace('/', '_').replace(':', '_')
        return '{}_{}MB'.format(runtime_name, runtime_memory)

    def _unformat_action_name(self, action_name):
        runtime_name, memory = action_name.rsplit('_', 1)
        image_name = runtime_name.replace('_', '/', 1)
        image_name = image_name.replace('_', ':', -1)
        return image_name, int(memory.replace('MB', ''))

    def _get_default_runtime_image_name(self):
        python_version = version_str(sys.version_info)
        return ibmcf_config.RUNTIME_DEFAULT[python_version]

    def _delete_function_handler_zip(self):
        os.remove(ibmcf_config.FH_ZIP_LOCATION)

    def build_runtime(self, docker_image_name, dockerfile):
        """
        Builds a new runtime from a Docker file and pushes it to the Docker hub
        """
        logger.info('Building a new docker image from Dockerfile')
        logger.info('Docker image name: {}'.format(docker_image_name))

        if dockerfile:
            cmd = 'docker build -t {} -f {} .'.format(docker_image_name, dockerfile)
        else:
            cmd = 'docker build -t {} .'.format(docker_image_name)

        res = os.system(cmd)
        if res != 0:
            raise Exception('There was an error building the runtime')

        cmd = 'docker push {}'.format(docker_image_name)
        res = os.system(cmd)
        if res != 0:
            raise Exception('There was an error pushing the runtime to the container registry')

    def create_runtime(self, docker_image_name, memory, timeout):
        """
        Creates a new runtime into IBM CF namespace from an already built Docker image
        """
        if docker_image_name == 'default':
            docker_image_name = self._get_default_runtime_image_name()

        logger.info('Creating new Lithops runtime based on Docker image {}'.format(docker_image_name))

        self.cf_client.create_package(self.package)
        action_name = self._format_action_name(docker_image_name, memory)

        entry_point = os.path.join(os.path.dirname(__file__), 'entry_point.py')
        create_handler_zip(ibmcf_config.FH_ZIP_LOCATION, entry_point, '__main__.py')

        with open(ibmcf_config.FH_ZIP_LOCATION, "rb") as action_zip:
            action_bin = action_zip.read()
        self.cf_client.create_action(self.package, action_name, docker_image_name, code=action_bin,
                                     memory=memory, is_binary=True, timeout=timeout * 1000)

        self._delete_function_handler_zip()

        runtime_meta = self._generate_runtime_meta(docker_image_name, memory)

        return runtime_meta

    def delete_runtime(self, docker_image_name, memory):
        """
        Deletes a runtime
        """
        if docker_image_name == 'default':
            docker_image_name = self._get_default_runtime_image_name()
        action_name = self._format_action_name(docker_image_name, memory)
        self.cf_client.delete_action(self.package, action_name)

    def clean(self):
        """
        Deletes all runtimes from all packages
        """
        packages = self.cf_client.list_packages()
        for pkg in packages:
            if (pkg['name'].startswith('lithops') and pkg['name'].endswith(self.user_key)) or \
                    (pkg['name'].startswith('lithops') and pkg['name'].count('_') == 1):
                actions = self.cf_client.list_actions(pkg['name'])
                while actions:
                    for action in actions:
                        self.cf_client.delete_action(pkg['name'], action['name'])
                    actions = self.cf_client.list_actions(pkg['name'])
                self.cf_client.delete_package(pkg['name'])

    def list_runtimes(self, docker_image_name='all'):
        """
        List all the runtimes deployed in the IBM CF service
        return: list of tuples (docker_image_name, memory)
        """
        if docker_image_name == 'default':
            docker_image_name = self._get_default_runtime_image_name()
        runtimes = []
        actions = self.cf_client.list_actions(self.package)

        for action in actions:
            action_image_name, memory = self._unformat_action_name(action['name'])
            if docker_image_name == action_image_name or docker_image_name == 'all':
                runtimes.append((action_image_name, memory))
        return runtimes

    def invoke(self, docker_image_name, runtime_memory, payload):
        """
        Invoke -- return information about this invocation
        """
        action_name = self._format_action_name(docker_image_name, runtime_memory)

        activation_id = self.cf_client.invoke(package=self.package,
                                              action_name=action_name,
                                              payload=payload,
                                              is_ow_action=self.is_lithops_worker)

        if activation_id == 401:
            # unauthorized. Probably token expired if using IAM auth
            if self.iam_api_key and not self.is_lithops_worker:
                self._refresh_cf_client()
                return self.invoke(docker_image_name, runtime_memory, payload)
            else:
                raise Exception('Unauthorized. Review your API key')

        return activation_id

    def _refresh_cf_client(self):
        """ Recreates the OpenWhisk client with a new token.
        This is only called by the invoke method when it receives
        a 401 error.
        """
        token_mutex.acquire()
        token, token_expiry_time = self.ibm_token_manager.get_token()
        if token != self.config['token']:
            self.config['token'] = token
            self.config['token_expiry_time'] = token_expiry_time
            auth = 'Bearer ' + token
            self.cf_client = OpenWhiskClient(endpoint=self.endpoint,
                                             namespace=self.namespace_id,
                                             auth=auth,
                                             user_agent=self.user_agent)
        token_mutex.release()

    def get_runtime_key(self, docker_image_name, runtime_memory):
        """
        Method that creates and returns the runtime key.
        Runtime keys are used to uniquely identify runtimes within the storage,
        in order to know which runtimes are installed and which not.
        """
        action_name = self._format_action_name(docker_image_name, runtime_memory)
        runtime_key = os.path.join(self.name, self.region, self.namespace, action_name)

        return runtime_key

    def _generate_runtime_meta(self, docker_image_name, memory):
        """
        Extract installed Python modules from the docker image
        """
        logger.debug("Extracting Python modules list from: {}".format(docker_image_name))
        action_name = self._format_action_name(docker_image_name, memory)
        payload = {'log_level': logger.getEffectiveLevel(), 'get_preinstalls': True}
        try:
            retry_invoke = True
            while retry_invoke:
                retry_invoke = False
                runtime_meta = self.cf_client.invoke_with_result(self.package, action_name, payload)
                if 'activationId' in runtime_meta:
                    retry_invoke = True
        except Exception as e:
            raise ("Unable to extract runtime preinstalls: {}".format(e))

        if not runtime_meta or 'preinstalls' not in runtime_meta:
            raise Exception(runtime_meta)

        return runtime_meta

    def calc_cost(self, runtimes, memory, *argv, **arg):
        """ returns total cost associated with executing the calling function-executor's job.
        :params *argv and **arg: made to support compatibility with similarly named functions in
        alternative computational backends.
        """
        return ibmcf_config.UNIT_PRICE * sum(runtimes[i] * memory[i] / 1024 for i in range(len(runtimes)))
Exemplo n.º 10
0
    def __init__(self, ibm_cf_config, storage_config):
        logger.debug("Creating IBM Cloud Functions client")
        self.log_active = logger.getEffectiveLevel() != logging.WARNING
        self.name = 'ibm_cf'
        self.config = ibm_cf_config
        self.is_lithops_worker = is_lithops_worker()

        self.user_agent = ibm_cf_config['user_agent']
        self.region = ibm_cf_config['region']
        self.endpoint = ibm_cf_config['regions'][self.region]['endpoint']
        self.namespace = ibm_cf_config['regions'][self.region]['namespace']
        self.namespace_id = ibm_cf_config['regions'][self.region].get(
            'namespace_id', None)
        self.api_key = ibm_cf_config['regions'][self.region].get(
            'api_key', None)
        self.iam_api_key = ibm_cf_config.get('iam_api_key', None)

        logger.debug("Set IBM CF Namespace to {}".format(self.namespace))
        logger.debug("Set IBM CF Endpoint to {}".format(self.endpoint))

        self.user_key = self.api_key[:
                                     5] if self.api_key else self.iam_api_key[:
                                                                              5]
        self.package = 'lithops_v{}_{}'.format(__version__, self.user_key)

        if self.api_key:
            enc_api_key = str.encode(self.api_key)
            auth_token = base64.encodebytes(enc_api_key).replace(b'\n', b'')
            auth = 'Basic %s' % auth_token.decode('UTF-8')

            self.cf_client = OpenWhiskClient(endpoint=self.endpoint,
                                             namespace=self.namespace,
                                             auth=auth,
                                             user_agent=self.user_agent)

        elif self.iam_api_key:
            iam_api_key = self.config.get('iam_api_key')
            api_key_type = 'IAM'
            token = self.config.get('token', None)
            token_expiry_time = self.config.get('token_expiry_time', None)

            self.ibm_iam_api_key_manager = IBMTokenManager(
                iam_api_key, api_key_type, token, token_expiry_time)
            token, token_expiry_time = self.ibm_iam_api_key_manager.get_token()

            self.config['token'] = token
            self.config['token_expiry_time'] = token_expiry_time

            auth = 'Bearer ' + token

            self.cf_client = OpenWhiskClient(endpoint=self.endpoint,
                                             namespace=self.namespace_id,
                                             auth=auth,
                                             user_agent=self.user_agent)

        log_msg = (
            'Lithops v{} init for IBM Cloud Functions - Namespace: {} - '
            'Region: {}'.format(__version__, self.namespace, self.region))
        if not self.log_active:
            print(log_msg)
        logger.info("IBM CF client created successfully")
Exemplo n.º 11
0
class IBMCloudFunctionsBackend:
    """
    A wrap-up around IBM Cloud Functions backend.
    """
    def __init__(self, ibm_cf_config, storage_config):
        logger.debug("Creating IBM Cloud Functions client")
        self.log_active = logger.getEffectiveLevel() != logging.WARNING
        self.name = 'ibm_cf'
        self.config = ibm_cf_config
        self.is_lithops_worker = is_lithops_worker()

        self.user_agent = ibm_cf_config['user_agent']
        self.region = ibm_cf_config['region']
        self.endpoint = ibm_cf_config['regions'][self.region]['endpoint']
        self.namespace = ibm_cf_config['regions'][self.region]['namespace']
        self.namespace_id = ibm_cf_config['regions'][self.region].get(
            'namespace_id', None)
        self.api_key = ibm_cf_config['regions'][self.region].get(
            'api_key', None)
        self.iam_api_key = ibm_cf_config.get('iam_api_key', None)

        logger.debug("Set IBM CF Namespace to {}".format(self.namespace))
        logger.debug("Set IBM CF Endpoint to {}".format(self.endpoint))

        self.user_key = self.api_key[:
                                     5] if self.api_key else self.iam_api_key[:
                                                                              5]
        self.package = 'lithops_v{}_{}'.format(__version__, self.user_key)

        if self.api_key:
            enc_api_key = str.encode(self.api_key)
            auth_token = base64.encodebytes(enc_api_key).replace(b'\n', b'')
            auth = 'Basic %s' % auth_token.decode('UTF-8')

            self.cf_client = OpenWhiskClient(endpoint=self.endpoint,
                                             namespace=self.namespace,
                                             auth=auth,
                                             user_agent=self.user_agent)

        elif self.iam_api_key:
            iam_api_key = self.config.get('iam_api_key')
            api_key_type = 'IAM'
            token = self.config.get('token', None)
            token_expiry_time = self.config.get('token_expiry_time', None)

            self.ibm_iam_api_key_manager = IBMTokenManager(
                iam_api_key, api_key_type, token, token_expiry_time)
            token, token_expiry_time = self.ibm_iam_api_key_manager.get_token()

            self.config['token'] = token
            self.config['token_expiry_time'] = token_expiry_time

            auth = 'Bearer ' + token

            self.cf_client = OpenWhiskClient(endpoint=self.endpoint,
                                             namespace=self.namespace_id,
                                             auth=auth,
                                             user_agent=self.user_agent)

        log_msg = (
            'Lithops v{} init for IBM Cloud Functions - Namespace: {} - '
            'Region: {}'.format(__version__, self.namespace, self.region))
        if not self.log_active:
            print(log_msg)
        logger.info("IBM CF client created successfully")

    def _format_action_name(self, runtime_name, runtime_memory):
        runtime_name = runtime_name.replace('/', '_').replace(':', '_')
        return '{}_{}MB'.format(runtime_name, runtime_memory)

    def _unformat_action_name(self, action_name):
        runtime_name, memory = action_name.rsplit('_', 1)
        image_name = runtime_name.replace('_', '/', 1)
        image_name = image_name.replace('_', ':', -1)
        return image_name, int(memory.replace('MB', ''))

    def _get_default_runtime_image_name(self):
        python_version = version_str(sys.version_info)
        return ibmcf_config.RUNTIME_DEFAULT[python_version]

    def _delete_function_handler_zip(self):
        os.remove(ibmcf_config.FH_ZIP_LOCATION)

    def build_runtime(self, docker_image_name, dockerfile):
        """
        Builds a new runtime from a Docker file and pushes it to the Docker hub
        """
        logger.info('Building a new docker image from Dockerfile')
        logger.info('Docker image name: {}'.format(docker_image_name))

        if dockerfile:
            cmd = 'docker build -t {} -f {} .'.format(docker_image_name,
                                                      dockerfile)
        else:
            cmd = 'docker build -t {} .'.format(docker_image_name)

        res = os.system(cmd)
        if res != 0:
            raise Exception('There was an error building the runtime')

        cmd = 'docker push {}'.format(docker_image_name)
        res = os.system(cmd)
        if res != 0:
            raise Exception(
                'There was an error pushing the runtime to the container registry'
            )

    def create_runtime(self, docker_image_name, memory, timeout):
        """
        Creates a new runtime into IBM CF namespace from an already built Docker image
        """
        if docker_image_name == 'default':
            docker_image_name = self._get_default_runtime_image_name()

        runtime_meta = self._generate_runtime_meta(docker_image_name)

        logger.info(
            'Creating new Lithops runtime based on Docker image {}'.format(
                docker_image_name))

        self.cf_client.create_package(self.package)
        action_name = self._format_action_name(docker_image_name, memory)

        entry_point = os.path.join(os.path.dirname(__file__), 'entry_point.py')
        create_handler_zip(ibmcf_config.FH_ZIP_LOCATION, entry_point,
                           '__main__.py')

        with open(ibmcf_config.FH_ZIP_LOCATION, "rb") as action_zip:
            action_bin = action_zip.read()
        self.cf_client.create_action(self.package,
                                     action_name,
                                     docker_image_name,
                                     code=action_bin,
                                     memory=memory,
                                     is_binary=True,
                                     timeout=timeout * 1000)
        self._delete_function_handler_zip()
        return runtime_meta

    def delete_runtime(self, docker_image_name, memory):
        """
        Deletes a runtime
        """
        if docker_image_name == 'default':
            docker_image_name = self._get_default_runtime_image_name()
        action_name = self._format_action_name(docker_image_name, memory)
        self.cf_client.delete_action(self.package, action_name)

    def clean(self):
        """
        Deletes all runtimes from all packages
        """
        packages = self.cf_client.list_packages()
        for pkg in packages:
            if (pkg['name'].startswith('lithops') and pkg['name'].endswith(self.user_key)) or \
               (pkg['name'].startswith('lithops') and pkg['name'].count('_') == 1):
                actions = self.cf_client.list_actions(pkg['name'])
                while actions:
                    for action in actions:
                        self.cf_client.delete_action(pkg['name'],
                                                     action['name'])
                    actions = self.cf_client.list_actions(pkg['name'])
                self.cf_client.delete_package(pkg['name'])

    def list_runtimes(self, docker_image_name='all'):
        """
        List all the runtimes deployed in the IBM CF service
        return: list of tuples (docker_image_name, memory)
        """
        if docker_image_name == 'default':
            docker_image_name = self._get_default_runtime_image_name()
        runtimes = []
        actions = self.cf_client.list_actions(self.package)

        for action in actions:
            action_image_name, memory = self._unformat_action_name(
                action['name'])
            if docker_image_name == action_image_name or docker_image_name == 'all':
                runtimes.append((action_image_name, memory))
        return runtimes

    def invoke(self, docker_image_name, runtime_memory, payload):
        """
        Invoke -- return information about this invocation
        """
        action_name = self._format_action_name(docker_image_name,
                                               runtime_memory)

        activation_id = self.cf_client.invoke(
            package=self.package,
            action_name=action_name,
            payload=payload,
            is_ow_action=self.is_lithops_worker)

        return activation_id

    def get_runtime_key(self, docker_image_name, runtime_memory):
        """
        Method that creates and returns the runtime key.
        Runtime keys are used to uniquely identify runtimes within the storage,
        in order to know which runtimes are installed and which not.
        """
        action_name = self._format_action_name(docker_image_name,
                                               runtime_memory)
        runtime_key = os.path.join(self.name, self.region, self.namespace,
                                   action_name)

        return runtime_key

    def _generate_runtime_meta(self, docker_image_name):
        """
        Extract installed Python modules from docker image
        """
        action_code = """
            import sys
            import pkgutil

            def main(args):
                print("Extracting preinstalled Python modules...")
                runtime_meta = dict()
                mods = list(pkgutil.iter_modules())
                runtime_meta["preinstalls"] = [entry for entry in sorted([[mod, is_pkg] for _, mod, is_pkg in mods])]
                python_version = sys.version_info
                runtime_meta["python_ver"] = str(python_version[0])+"."+str(python_version[1])
                print("Done!")
                return runtime_meta
            """

        runtime_memory = 128
        # old_stdout = sys.stdout
        # sys.stdout = open(os.devnull, 'w')
        action_name = self._format_action_name(docker_image_name,
                                               runtime_memory)
        self.cf_client.create_package(self.package)
        self.cf_client.create_action(self.package,
                                     action_name,
                                     docker_image_name,
                                     is_binary=False,
                                     code=textwrap.dedent(action_code),
                                     memory=runtime_memory,
                                     timeout=30000)
        # sys.stdout = old_stdout
        logger.debug("Extracting Python modules list from: {}".format(
            docker_image_name))

        try:
            retry_invoke = True
            while retry_invoke:
                retry_invoke = False
                runtime_meta = self.cf_client.invoke_with_result(
                    self.package, action_name)
                if 'activationId' in runtime_meta:
                    retry_invoke = True
        except Exception:
            raise ("Unable to invoke 'modules' action")
        try:
            self.delete_runtime(docker_image_name, runtime_memory)
        except Exception:
            raise Exception("Unable to delete 'modules' action")

        if not runtime_meta or 'preinstalls' not in runtime_meta:
            raise Exception(runtime_meta)

        return runtime_meta
Exemplo n.º 12
0
class CodeEngineBackend:
    """
    A wrap-up around Code Engine backend.
    """
    def __init__(self, code_engine_config, internal_storage):
        logger.debug("Creating IBM Code Engine client")
        self.name = 'code_engine'
        self.type = 'batch'
        self.code_engine_config = code_engine_config
        self.internal_storage = internal_storage

        self.kubecfg_path = code_engine_config.get('kubecfg_path')
        self.user_agent = code_engine_config['user_agent']

        self.iam_api_key = code_engine_config.get('iam_api_key', None)
        self.namespace = code_engine_config.get('namespace', None)
        self.region = code_engine_config.get('region', None)

        if self.namespace and self.region and self.iam_api_key:
            self.cluster = ce_config.CLUSTER_URL.format(self.region)
            configuration = client.Configuration()
            configuration.host = self.cluster
            token = self._get_iam_token()
            configuration.api_key = {"authorization": "Bearer " + token}
            client.Configuration.set_default(configuration)

        else:
            try:
                config.load_kube_config(config_file=self.kubecfg_path)
                contexts = config.list_kube_config_contexts(
                    config_file=self.kubecfg_path)
                current_context = contexts[1].get('context')
                self.namespace = current_context.get('namespace')
                self.cluster = current_context.get('cluster')
                self.code_engine_config['namespace'] = self.namespace
                self.code_engine_config['cluster'] = self.cluster

                if self.iam_api_key:
                    configuration = client.Configuration.get_default_copy()
                    token = self._get_iam_token()
                    configuration.api_key = {
                        "authorization": "Bearer " + token
                    }
                    client.Configuration.set_default(configuration)

            except Exception:
                logger.debug('Loading incluster config')
                config.load_incluster_config()
                self.namespace = self.code_engine_config.get('namespace')
                self.cluster = self.code_engine_config.get('cluster')

        logger.debug("Set namespace to {}".format(self.namespace))
        logger.debug("Set cluster to {}".format(self.cluster))

        self.capi = client.CustomObjectsApi()
        self.coreV1Api = client.CoreV1Api()

        try:
            self.region = self.cluster.split('//')[1].split('.')[1]
        except Exception:
            self.region = self.cluster.replace('http://',
                                               '').replace('https://', '')

        self.jobs = []  # list to store executed jobs (job_keys)

        msg = COMPUTE_CLI_MSG.format('IBM Code Engine')
        logger.info("{} - Region: {}".format(msg, self.region))

    def _get_iam_token(self):
        """ Requests and IBM IAM token """
        token = self.code_engine_config.get('token', None)
        token_expiry_time = self.code_engine_config.get(
            'token_expiry_time', None)
        self.ibm_token_manager = IBMTokenManager(self.iam_api_key, 'IAM',
                                                 token, token_expiry_time)
        token, token_expiry_time = self.ibm_token_manager.get_token()
        self.code_engine_config['token'] = token
        self.code_engine_config['token_expiry_time'] = token_expiry_time

        return token

    def _format_jobdef_name(self, runtime_name, runtime_memory):
        if runtime_name.count('/') == 2:
            # it contains the docker registry
            runtime_name = runtime_name.split('/', 1)[1]

        runtime_name = runtime_name.replace('.', '')
        runtime_name = runtime_name.replace('/', '--')
        runtime_name = runtime_name.replace(':', '--')
        return '{}--{}mb'.format(runtime_name, runtime_memory)

    def _get_default_runtime_image_name(self):
        docker_user = self.code_engine_config.get('docker_user')
        python_version = version_str(sys.version_info).replace('.', '')
        revision = 'latest' if 'dev' in __version__ else __version__.replace(
            '.', '')
        return '{}/{}-v{}:{}'.format(docker_user, ce_config.RUNTIME_NAME,
                                     python_version, revision)

    def _delete_function_handler_zip(self):
        os.remove(ce_config.FH_ZIP_LOCATION)

    def build_runtime(self, docker_image_name, dockerfile):
        """
        Builds a new runtime from a Docker file and pushes it to the Docker hub
        """
        logger.debug('Building new docker image from Dockerfile')
        logger.debug('Docker image name: {}'.format(docker_image_name))

        expression = '^([a-z0-9]+)/([-a-z0-9]+)(:[a-z0-9]+)?'
        result = re.match(expression, docker_image_name)

        if not result or result.group() != docker_image_name:
            raise Exception(
                "Invalid docker image name: All letters must be "
                "lowercase and '.' or '_' characters are not allowed")

        entry_point = os.path.join(os.path.dirname(__file__), 'entry_point.py')
        create_handler_zip(ce_config.FH_ZIP_LOCATION, entry_point,
                           'lithopsentry.py')

        if dockerfile:
            cmd = '{} build -t {} -f {} .'.format(ce_config.DOCKER_PATH,
                                                  docker_image_name,
                                                  dockerfile)
        else:
            cmd = '{} build -t {} .'.format(ce_config.DOCKER_PATH,
                                            docker_image_name)

        if logger.getEffectiveLevel() != logging.DEBUG:
            cmd = cmd + " >{} 2>&1".format(os.devnull)

        logger.info('Building default runtime')
        res = os.system(cmd)
        if res != 0:
            raise Exception('There was an error building the runtime')

        self._delete_function_handler_zip()

        cmd = '{} push {}'.format(ce_config.DOCKER_PATH, docker_image_name)
        if logger.getEffectiveLevel() != logging.DEBUG:
            cmd = cmd + " >{} 2>&1".format(os.devnull)
        res = os.system(cmd)
        if res != 0:
            raise Exception(
                'There was an error pushing the runtime to the container registry'
            )
        logger.debug('Done!')

    def _build_default_runtime(self, default_runtime_img_name):
        """
        Builds the default runtime
        """
        if os.system('{} --version >{} 2>&1'.format(ce_config.DOCKER_PATH,
                                                    os.devnull)) == 0:
            # Build default runtime using local dokcer
            python_version = version_str(sys.version_info)
            dockerfile = "Dockefile.default-codeengine-runtime"
            with open(dockerfile, 'w') as f:
                f.write("FROM python:{}-slim-buster\n".format(python_version))
                f.write(ce_config.DOCKERFILE_DEFAULT)
            self.build_runtime(default_runtime_img_name, dockerfile)
            os.remove(dockerfile)
        else:
            raise Exception('docker command not found. Install docker or use '
                            'an already built runtime')

    def create_runtime(self, docker_image_name, memory, timeout):
        """
        Creates a new runtime from an already built Docker image
        """
        default_runtime_img_name = self._get_default_runtime_image_name()
        if docker_image_name in ['default', default_runtime_img_name]:
            # We only build the default image. rest of images must already exist
            # in the docker registry.
            docker_image_name = default_runtime_img_name
            self._build_default_runtime(default_runtime_img_name)

        logger.debug('Creating new Lithops runtime based on '
                     'Docker image: {}'.format(docker_image_name))
        self._create_job_definition(docker_image_name, memory, timeout)

        runtime_meta = self._generate_runtime_meta(docker_image_name, memory)

        return runtime_meta

    def delete_runtime(self, docker_image_name, memory):
        """
        Deletes a runtime
        We need to delete job definition
        """
        def_id = self._format_jobdef_name(docker_image_name, memory)
        self._job_def_cleanup(def_id)

    def _job_run_cleanup(self, jobrun_name):
        logger.debug("Deleting jobrun {}".format(jobrun_name))
        try:
            self.capi.delete_namespaced_custom_object(
                group=ce_config.DEFAULT_GROUP,
                version=ce_config.DEFAULT_VERSION,
                name=jobrun_name,
                namespace=self.namespace,
                plural="jobruns",
                body=client.V1DeleteOptions(),
            )
        except ApiException as e:
            logger.debug("Deleting a jobrun failed with {} {}".format(
                e.status, e.reason))

    def _job_def_cleanup(self, jobdef_id):
        logger.info("Deleting runtime: {}".format(jobdef_id))
        try:
            self.capi.delete_namespaced_custom_object(
                group=ce_config.DEFAULT_GROUP,
                version=ce_config.DEFAULT_VERSION,
                name=jobdef_id,
                namespace=self.namespace,
                plural="jobdefinitions",
                body=client.V1DeleteOptions(),
            )
        except ApiException as e:
            logger.debug("Deleting a jobdef failed with {} {}".format(
                e.status, e.reason))

    def clean(self):
        """
        Deletes all runtimes from all packages
        """
        self.clear()
        jobdefs = self.list_runtimes()
        for docker_image_name, memory in jobdefs:
            self.delete_runtime(docker_image_name, memory)

        logger.debug('Deleting all lithops configmaps')
        configmaps = self.coreV1Api.list_namespaced_config_map(
            namespace=self.namespace)
        for configmap in configmaps.items:
            config_name = configmap.metadata.name
            if config_name.startswith('lithops'):
                logger.debug('Deleting configmap {}'.format(config_name))
                self.coreV1Api.delete_namespaced_config_map(
                    name=config_name,
                    namespace=self.namespace,
                    grace_period_seconds=0)

    def list_runtimes(self, docker_image_name='all'):
        """
        List all the runtimes
        return: list of tuples (docker_image_name, memory)
        """

        runtimes = []
        try:
            jobdefs = self.capi.list_namespaced_custom_object(
                group=ce_config.DEFAULT_GROUP,
                version=ce_config.DEFAULT_VERSION,
                namespace=self.namespace,
                plural="jobdefinitions")
        except ApiException as e:
            logger.debug("List all jobdefinitions failed with {} {}".format(
                e.status, e.reason))
            return runtimes

        for jobdef in jobdefs['items']:
            try:
                if jobdef['metadata']['labels']['type'] == 'lithops-runtime':
                    container = jobdef['spec']['template']['containers'][0]
                    image_name = container['image']
                    memory = container['resources']['requests'][
                        'memory'].replace('Mi', '')
                    if docker_image_name in image_name or docker_image_name == 'all':
                        runtimes.append((image_name, memory))
            except Exception:
                # It is not a lithops runtime
                pass

        return runtimes

    def clear(self, job_keys=None):
        """
        Clean all completed jobruns in the current executor
        """
        if job_keys:
            for job_key in job_keys:
                if job_key in self.jobs:
                    jobrun_name = 'lithops-{}'.format(job_key.lower())
                    try:
                        self._job_run_cleanup(jobrun_name)
                        self._delete_config_map(jobrun_name)
                    except Exception as e:
                        logger.debug(
                            "Deleting a jobrun failed with: {}".format(e))
                    self.jobs.remove(job_key)
        else:
            for job_key in self.jobs:
                jobrun_name = 'lithops-{}'.format(job_key.lower())
                try:
                    self._job_run_cleanup(jobrun_name)
                    self._delete_config_map(jobrun_name)
                except Exception as e:
                    logger.debug("Deleting a jobrun failed with: {}".format(e))
            self.jobs = []

    def invoke(self, docker_image_name, runtime_memory, job_payload):
        """
        Invoke -- return information about this invocation
        For array jobs only remote_invocator is allowed
        """
        executor_id = job_payload['executor_id']
        job_id = job_payload['job_id']

        job_key = job_payload['job_key']
        self.jobs.append(job_key)

        total_calls = job_payload['total_calls']
        chunksize = job_payload['chunksize']
        array_size = total_calls // chunksize + (total_calls % chunksize > 0)

        jobdef_name = self._format_jobdef_name(docker_image_name,
                                               runtime_memory)
        logger.debug("Job definition id {}".format(jobdef_name))
        if not self._job_def_exists(jobdef_name):
            jobdef_name = self._create_job_definition(docker_image_name,
                                                      runtime_memory,
                                                      jobdef_name)

        jobrun_res = yaml.safe_load(ce_config.JOBRUN_DEFAULT)

        activation_id = 'lithops-{}'.format(job_key.lower())

        jobrun_res['metadata']['name'] = activation_id
        jobrun_res['metadata']['namespace'] = self.namespace
        jobrun_res['spec']['jobDefinitionRef'] = str(jobdef_name)
        jobrun_res['spec']['jobDefinitionSpec']['arraySpec'] = '0-' + str(
            array_size - 1)

        container = jobrun_res['spec']['jobDefinitionSpec']['template'][
            'containers'][0]
        container['name'] = str(jobdef_name)
        container['env'][0]['value'] = 'run'

        config_map = self._create_config_map(job_payload, activation_id)
        container['env'][1]['valueFrom']['configMapKeyRef'][
            'name'] = config_map

        container['resources']['requests']['memory'] = '{}G'.format(
            runtime_memory / 1024)
        container['resources']['requests']['cpu'] = str(
            self.code_engine_config['runtime_cpu'])

        # logger.debug("request - {}".format(jobrun_res)

        logger.debug('ExecutorID {} | JobID {} - Going '
                     'to run {} activations in {} workers'.format(
                         executor_id, job_id, total_calls, array_size))

        try:
            res = self.capi.create_namespaced_custom_object(
                group=ce_config.DEFAULT_GROUP,
                version=ce_config.DEFAULT_VERSION,
                namespace=self.namespace,
                plural="jobruns",
                body=jobrun_res,
            )
        except Exception as e:
            raise e

        # logger.debug("response - {}".format(res))

        return activation_id

    def _create_job_definition(self, image_name, runtime_memory, timeout):
        """
        Creates a Job definition
        """
        jobdef_name = self._format_jobdef_name(image_name, runtime_memory)

        jobdef_res = yaml.safe_load(ce_config.JOBDEF_DEFAULT)
        jobdef_res['metadata']['name'] = jobdef_name
        container = jobdef_res['spec']['template']['containers'][0]
        container['image'] = '/'.join(
            [self.code_engine_config['container_registry'], image_name])
        container['name'] = jobdef_name
        container['env'][0]['value'] = 'run'
        container['resources']['requests']['memory'] = '{}G'.format(
            runtime_memory / 1024)
        container['resources']['requests']['cpu'] = str(
            self.code_engine_config['runtime_cpu'])

        try:
            res = self.capi.delete_namespaced_custom_object(
                group=ce_config.DEFAULT_GROUP,
                version=ce_config.DEFAULT_VERSION,
                namespace=self.namespace,
                plural="jobdefinitions",
                name=jobdef_name,
            )
        except Exception:
            pass

        try:
            res = self.capi.create_namespaced_custom_object(
                group=ce_config.DEFAULT_GROUP,
                version=ce_config.DEFAULT_VERSION,
                namespace=self.namespace,
                plural="jobdefinitions",
                body=jobdef_res,
            )
            # logger.debug("response - {}".format(res))
        except Exception as e:
            raise e

        logger.debug('Job Definition {} created'.format(jobdef_name))

        return jobdef_name

    def get_runtime_key(self, docker_image_name, runtime_memory):
        """
        Method that creates and returns the runtime key.
        Runtime keys are used to uniquely identify runtimes within the storage,
        in order to know which runtimes are installed and which not.
        """
        jobdef_name = self._format_jobdef_name(docker_image_name, 256)
        runtime_key = os.path.join(self.name, self.region, self.namespace,
                                   jobdef_name)

        return runtime_key

    def _job_def_exists(self, jobdef_name):
        logger.debug("Check if job_definition {} exists".format(jobdef_name))
        try:
            self.capi.get_namespaced_custom_object(
                group=ce_config.DEFAULT_GROUP,
                version=ce_config.DEFAULT_VERSION,
                namespace=self.namespace,
                plural="jobdefinitions",
                name=jobdef_name)
        except ApiException as e:
            # swallow error
            if (e.status == 404):
                logger.info(
                    "Job definition {} not found (404)".format(jobdef_name))
                return False
        logger.debug("Job definition {} found".format(jobdef_name))
        return True

    def _generate_runtime_meta(self, docker_image_name, memory):

        logger.info(
            "Extracting Python modules from: {}".format(docker_image_name))
        jobrun_res = yaml.safe_load(ce_config.JOBRUN_DEFAULT)

        jobdef_name = self._format_jobdef_name(docker_image_name, memory)

        payload = copy.deepcopy(self.internal_storage.storage.storage_config)
        payload['log_level'] = logger.getEffectiveLevel()
        payload['runtime_name'] = jobdef_name

        jobrun_res['metadata']['name'] = 'lithops-runtime-preinstalls'
        jobrun_res['metadata']['namespace'] = self.namespace
        jobrun_res['spec']['jobDefinitionRef'] = str(jobdef_name)
        container = jobrun_res['spec']['jobDefinitionSpec']['template'][
            'containers'][0]
        container['name'] = str(jobdef_name)
        container['env'][0]['value'] = 'preinstalls'

        config_map = self._create_config_map(payload, jobdef_name)
        container['env'][1]['valueFrom']['configMapKeyRef'][
            'name'] = config_map

        try:
            self.capi.delete_namespaced_custom_object(
                group=ce_config.DEFAULT_GROUP,
                version=ce_config.DEFAULT_VERSION,
                namespace=self.namespace,
                plural="jobruns",
                name='lithops-runtime-preinstalls')
        except Exception:
            pass

        try:
            self.capi.create_namespaced_custom_object(
                group=ce_config.DEFAULT_GROUP,
                version=ce_config.DEFAULT_VERSION,
                namespace=self.namespace,
                plural="jobruns",
                body=jobrun_res,
            )
        except Exception:
            pass

        # we need to read runtime metadata from COS in retry
        status_key = '/'.join([JOBS_PREFIX, jobdef_name + '.meta'])

        retry = int(1)
        found = False
        while retry < 10 and not found:
            try:
                logger.debug("Retry attempt {} to read {}".format(
                    retry, status_key))
                json_str = self.internal_storage.get_data(key=status_key)
                logger.debug("Found in attempt {} to read {}".format(
                    retry, status_key))
                runtime_meta = json.loads(json_str.decode("ascii"))
                found = True
            except StorageNoSuchKeyError:
                logger.debug(
                    "{} not found in attempt {}. Sleep before retry".format(
                        status_key, retry))
                retry = retry + 1
                time.sleep(10)

        if not found:
            raise Exception(
                "Unable to extract Python preinstalled modules from the runtime"
            )

        try:
            self.capi.delete_namespaced_custom_object(
                group=ce_config.DEFAULT_GROUP,
                version=ce_config.DEFAULT_VERSION,
                namespace=self.namespace,
                plural="jobruns",
                name='lithops-runtime-preinstalls')
        except Exception:
            pass

        self._delete_config_map(jobdef_name)
        return runtime_meta

    def _create_config_map(self, payload, jobrun_name):
        """
        Creates a configmap
        """
        config_name = '{}-configmap'.format(jobrun_name)
        cmap = client.V1ConfigMap()
        cmap.metadata = client.V1ObjectMeta(name=config_name)
        cmap.data = {}
        cmap.data["lithops.payload"] = dict_to_b64str(payload)

        field_manager = 'lithops'

        try:
            logger.debug("Generate ConfigMap {} for namespace {}".format(
                config_name, self.namespace))
            self.coreV1Api.create_namespaced_config_map(
                namespace=self.namespace,
                body=cmap,
                field_manager=field_manager)
            logger.debug("ConfigMap {} for namespace {} created".format(
                config_name, self.namespace))
        except ApiException as e:
            if (e.status != 409):
                logger.debug("Creating a configmap failed with {} {}".format(
                    e.status, e.reason))
                raise Exception('Failed to create ConfigMap')
            else:
                logger.debug(
                    "ConfigMap {} for namespace {} already exists".format(
                        config_name, self.namespace))

        return config_name

    def _delete_config_map(self, jobrun_name):
        """
        Deletes a configmap
        """
        config_name = '{}-configmap'.format(jobrun_name)
        grace_period_seconds = 0
        try:
            logger.debug("Deleting ConfigMap {} for namespace {}".format(
                config_name, self.namespace))
            self.coreV1Api.delete_namespaced_config_map(
                name=config_name,
                namespace=self.namespace,
                grace_period_seconds=grace_period_seconds)
        except ApiException as e:
            logger.debug("Deleting a configmap failed with {} {}".format(
                e.status, e.reason))