示例#1
0
    def _create_config_map(self, config_map_name, payload):
        """
        Creates a configmap
        """
        cmap = client.V1ConfigMap()
        cmap.metadata = client.V1ObjectMeta(name=config_map_name)
        cmap.data = {}
        cmap.data["lithops.payload"] = dict_to_b64str(payload)

        logger.debug("Creating ConfigMap {}".format(config_map_name))
        try:
            self.core_api.create_namespaced_config_map(
                namespace=self.namespace,
                body=cmap,
                field_manager='lithops'
            )
        except ApiException as e:
            if (e.status != 409):
                logger.debug("Creating a configmap failed with {} {}"
                             .format(e.status, e.reason))
                raise Exception('Failed to create ConfigMap')
            else:
                logger.debug("ConfigMap {} already exists".format(config_map_name))

        return config_map_name
示例#2
0
    def invoke(self,
               docker_image_name,
               memory=None,
               payload={},
               return_result=False):
        """
        Invoke function
        """
        action_name = self._format_action_name(docker_image_name)
        in_q_name = self._format_queue_name(action_name, az_config.IN_QUEUE)
        in_queue = self.queue_service.get_queue_client(in_q_name)
        msg = in_queue.send_message(dict_to_b64str(payload))
        activation_id = msg.id

        if return_result:
            out_q_name = self._format_queue_name(action_name,
                                                 az_config.OUT_QUEUE)
            out_queue = self.queue_service.get_queue_client(out_q_name)
            msg = []
            while not msg:
                time.sleep(1)
                msg = out_queue.receive_message()
            out_queue.clear_messages()
            return b64str_to_dict(msg.content)

        return activation_id
示例#3
0
    def _create_config_map(self, payload, jobrun_name):
        """
        Creates a configmap
        """
        config_name = '{}-configmap'.format(jobrun_name)
        cmap = client.V1ConfigMap()
        cmap.metadata = client.V1ObjectMeta(name=config_name)
        cmap.data = {}
        cmap.data["lithops.payload"] = dict_to_b64str(payload)

        field_manager = 'lithops'

        try:
            logger.debug("Generate ConfigMap {} for namespace {}".format(config_name, self.namespace))
            self.coreV1Api.create_namespaced_config_map(namespace=self.namespace,
                                                        body=cmap,
                                                        field_manager=field_manager)
            logger.debug("ConfigMap {} for namespace {} created".format(config_name, self.namespace))
        except ApiException as e:
            if (e.status != 409):
                logger.warning("Exception when calling CoreV1Api->create_namespaced_config_map: %s\n" % e)
                raise Exception('Failed to create ConfigMap')
            else:
                logger.debug("ConfigMap {} for namespace {} already exists".format(config_name, self.namespace))

        return config_name
示例#4
0
    def invoke(self, docker_image_name, runtime_memory, job_payload):
        """
        Invoke -- return information about this invocation
        For array jobs only remote_invocator is allowed
        """
        master_ip = self._start_master(docker_image_name)

        workers = job_payload['max_workers']
        executor_id = job_payload['executor_id']
        job_id = job_payload['job_id']

        job_key = job_payload['job_key']
        self.jobs.append(job_key)

        total_calls = job_payload['total_calls']
        chunksize = job_payload['chunksize']
        total_workers = min(
            workers, total_calls // chunksize + (total_calls % chunksize > 0))

        job_res = yaml.safe_load(k8s_config.JOB_DEFAULT)

        activation_id = 'lithops-{}'.format(job_key.lower())

        job_res['metadata']['name'] = activation_id
        job_res['metadata']['namespace'] = self.namespace

        job_res['spec']['activeDeadlineSeconds'] = self.k8s_config[
            'runtime_timeout']
        job_res['spec']['parallelism'] = total_workers

        container = job_res['spec']['template']['spec']['containers'][0]
        container['image'] = docker_image_name
        if not docker_image_name.endswith(':latest'):
            container['imagePullPolicy'] = 'IfNotPresent'

        container['env'][0]['value'] = 'run'
        container['env'][1]['value'] = dict_to_b64str(job_payload)
        container['env'][2]['value'] = master_ip

        container['resources']['requests']['memory'] = '{}Mi'.format(
            runtime_memory)
        container['resources']['requests']['cpu'] = str(
            self.k8s_config['runtime_cpu'])
        container['resources']['limits']['memory'] = '{}Mi'.format(
            runtime_memory)
        container['resources']['limits']['cpu'] = str(
            self.k8s_config['runtime_cpu'])

        logger.debug('ExecutorID {} | JobID {} - Going '
                     'to run {} activations in {} workers'.format(
                         executor_id, job_id, total_calls, total_workers))

        try:
            self.batch_api.create_namespaced_job(namespace=self.namespace,
                                                 body=job_res)
        except Exception as e:
            raise e

        return activation_id
示例#5
0
    def invoke(self,
               docker_image_name,
               memory=None,
               payload={},
               return_result=False):
        """
        Invoke function
        """
        action_name = self._format_action_name(docker_image_name, memory)
        if self.invocation_type == 'event':

            in_q_name = self._format_queue_name(action_name,
                                                az_config.IN_QUEUE)
            in_queue = self.queue_service.get_queue_client(in_q_name)
            msg = in_queue.send_message(dict_to_b64str(payload))
            activation_id = msg.id

            if return_result:
                out_q_name = self._format_queue_name(action_name,
                                                     az_config.OUT_QUEUE)
                out_queue = self.queue_service.get_queue_client(out_q_name)
                msg = []
                while not msg:
                    time.sleep(1)
                    msg = out_queue.receive_message()
                out_queue.clear_messages()
                return b64str_to_dict(msg.content)

        elif self.invocation_type == 'http':
            endpoint = "https://{}.azurewebsites.net".format(action_name)
            parsed_url = urlparse(endpoint)
            ctx = ssl._create_unverified_context()
            conn = http.client.HTTPSConnection(parsed_url.netloc, context=ctx)

            route = "/api/lithops_handler"
            if return_result:
                conn.request("GET",
                             route,
                             body=json.dumps(payload, default=str))
                resp = conn.getresponse()
                data = json.loads(resp.read().decode("utf-8"))
                conn.close()
                return data
            else:
                # logger.debug('Invoking calls {}'.format(', '.join(payload['call_ids'])))
                conn.request("POST",
                             route,
                             body=json.dumps(payload, default=str))
                resp = conn.getresponse()
                if resp.status == 429:
                    time.sleep(0.2)
                    conn.close()
                    return None
                activation_id = resp.read().decode("utf-8")
                conn.close()

        return activation_id
示例#6
0
    def _generate_runtime_meta(self, docker_image_name):
        runtime_name = self._format_job_name(docker_image_name, 128)
        modules_job_name = '{}-modules'.format(runtime_name)

        logger.info(
            "Extracting Python modules from: {}".format(docker_image_name))

        payload = copy.deepcopy(self.internal_storage.storage.storage_config)
        payload['runtime_name'] = runtime_name
        payload['log_level'] = logger.getEffectiveLevel()

        job_res = yaml.safe_load(k8s_config.JOB_DEFAULT)
        job_res['metadata']['name'] = modules_job_name
        job_res['metadata']['namespace'] = self.namespace

        container = job_res['spec']['template']['spec']['containers'][0]
        container['image'] = docker_image_name
        container['env'][0]['value'] = 'preinstalls'
        container['env'][1]['value'] = dict_to_b64str(payload)

        try:
            self.batch_api.delete_namespaced_job(
                namespace=self.namespace,
                name=modules_job_name,
                propagation_policy='Background')
        except Exception as e:
            pass

        try:
            self.batch_api.create_namespaced_job(namespace=self.namespace,
                                                 body=job_res)
        except Exception as e:
            raise e
            pass

        logger.debug("Waiting for runtime metadata")

        done = False
        failed = False

        while not done or failed:
            try:
                w = watch.Watch()
                for event in w.stream(
                        self.batch_api.list_namespaced_job,
                        namespace=self.namespace,
                        field_selector="metadata.name={0}".format(
                            modules_job_name),
                        timeout_seconds=10):
                    failed = event['object'].status.failed
                    done = event['object'].status.succeeded
                    logger.debug('...')
                    if done or failed:
                        w.stop()
            except Exception as e:
                pass

        if done:
            logger.debug("Runtime metadata generated successfully")

        try:
            self.batch_api.delete_namespaced_job(
                namespace=self.namespace,
                name=modules_job_name,
                propagation_policy='Background')
        except Exception as e:
            pass

        if failed:
            raise Exception(
                "Unable to extract Python preinstalled modules from the runtime"
            )

        status_key = '/'.join([JOBS_PREFIX, runtime_name + '.meta'])
        json_str = self.internal_storage.get_data(key=status_key)
        runtime_meta = json.loads(json_str.decode("ascii"))

        return runtime_meta
示例#7
0
    def _generate_runtime_meta(self, docker_image_name, memory):
        runtime_name = self._format_job_name(docker_image_name, 256)
        modules_job_name = '{}-modules'.format(runtime_name)

        logger.info(
            "Extracting Python modules from: {}".format(docker_image_name))

        payload = copy.deepcopy(self.internal_storage.storage.storage_config)
        payload['runtime_name'] = runtime_name
        payload['log_level'] = logger.getEffectiveLevel()

        job_res = yaml.safe_load(k8s_config.JOB_DEFAULT)
        job_res['metadata']['name'] = modules_job_name
        job_res['metadata']['namespace'] = self.namespace

        container = job_res['spec']['template']['spec']['containers'][0]
        container['image'] = docker_image_name
        container['env'][0]['value'] = 'preinstalls'
        container['env'][1]['value'] = dict_to_b64str(payload)

        try:
            self.batch_api.delete_namespaced_job(
                namespace=self.namespace,
                name=modules_job_name,
                propagation_policy='Background')
        except Exception as e:
            pass

        try:
            self.batch_api.create_namespaced_job(namespace=self.namespace,
                                                 body=job_res)
        except Exception as e:
            raise e
            pass

        # we need to read runtime metadata from COS in retry
        status_key = '/'.join([JOBS_PREFIX, runtime_name + '.meta'])

        retry = 1
        found = False
        while retry < 20 and not found:
            try:
                logger.debug("Retry attempt {} to read {}".format(
                    retry, status_key))
                json_str = self.internal_storage.get_data(key=status_key)
                logger.debug("Found in attempt {} to read {}".format(
                    retry, status_key))
                runtime_meta = json.loads(json_str.decode("ascii"))
                found = True
            except StorageNoSuchKeyError:
                logger.debug(
                    "{} not found in attempt {}. Sleep before retry".format(
                        status_key, retry))
                retry += 1
                time.sleep(5)

        if not found:
            raise Exception(
                "Unable to extract Python preinstalled modules from the runtime"
            )

        try:
            self.batch_api.delete_namespaced_job(
                namespace=self.namespace,
                name=modules_job_name,
                propagation_policy='Background')
        except Exception as e:
            pass

        return runtime_meta