Example #1
0
 def _generate_new_token(self):
     self._token_manager._token = None
     self._token_manager.get_token()
     token_data = {}
     token_data['token'] = self._token_manager._token
     token_data['token_expiry_time'] = self._token_manager._expiry_time.strftime('%Y-%m-%d %H:%M:%S.%f%z')
     dump_yaml_config(self._token_filename, token_data)
Example #2
0
    def init(self):
        """
        Initialize the VPC
        """
        vpc_data_filename = os.path.join(CACHE_DIR, self.name, 'data')
        vpc_data = load_yaml_config(vpc_data_filename)

        if self.mode == 'consume':
            logger.debug('Initializing IBM VPC backend (Consume mode)')
            if 'instance_name' not in vpc_data:
                instance_data = self.ibm_vpc_client.get_instance(
                    self.config['instance_id'])
                name = instance_data.get_result()['name']
                vpc_data = {'instance_name': name}
                dump_yaml_config(vpc_data_filename, vpc_data)
            self.master = IBMVPCInstance(vpc_data['instance_name'],
                                         self.config,
                                         self.ibm_vpc_client,
                                         public=True)
            self.master.instance_id = self.config['instance_id']
            self.master.public_ip = self.config['ip_address']
            self.master.delete_on_dismantle = False
            return

        logger.debug('Initializing IBM VPC backend (Create mode)')
        # Create the VPC if not exists
        self._create_vpc(vpc_data)
        # Set the prefix used for the VPC resources
        self.vpc_key = self.config['vpc_id'].split('-')[2]
        # Create a new gateway if not exists
        self._create_gateway(vpc_data)
        # Create a new subnaet if not exists
        self._create_subnet(vpc_data)
        # Create a new floating IP if not exists
        self._create_floating_ip(vpc_data)

        vpc_data = {
            'vpc_id': self.config['vpc_id'],
            'subnet_id': self.config['subnet_id'],
            'security_group_id': self.config['security_group_id'],
            'floating_ip': self.config['floating_ip'],
            'floating_ip_id': self.config['floating_ip_id'],
            'gateway_id': self.config['gateway_id']
        }

        dump_yaml_config(vpc_data_filename, vpc_data)

        # create the master VM insatnce
        name = 'lithops-master-{}'.format(self.vpc_key)
        self.master = IBMVPCInstance(name,
                                     self.config,
                                     self.ibm_vpc_client,
                                     public=True)
        self.master.public_ip = self.config['floating_ip']
        self.master.profile_name = self.config['master_profile_name']
        self.master.delete_on_dismantle = False
Example #3
0
 def _generate_new_token(self):
     self._token_manager._token = None
     self._token_manager.get_token()
     token_data = {}
     token_data['token'] = self._token_manager._token
     token_data['token_expiry_time'] = self._token_manager._expiry_time.strftime('%Y-%m-%d %H:%M:%S.%f%z')
     dump_yaml_config(self._token_filename, token_data)
     logger.debug("Token expiry time: {} - Minutes left: {}".
                  format(self._token_manager._expiry_time,
                         self._get_token_minutes_diff()))
Example #4
0
    def __init__(self, ibm_cf_config):
        logger.debug("Creating IBM Cloud Functions client")
        self.log_active = logger.getEffectiveLevel() != logging.WARNING
        self.name = 'ibm_cf'
        self.ibm_cf_config = ibm_cf_config
        self.is_lithops_function = is_lithops_function()

        self.user_agent = ibm_cf_config['user_agent']
        self.region = ibm_cf_config['region']
        self.endpoint = ibm_cf_config['regions'][self.region]['endpoint']
        self.namespace = ibm_cf_config['regions'][self.region]['namespace']
        self.namespace_id = ibm_cf_config['regions'][self.region].get(
            'namespace_id', None)
        self.api_key = ibm_cf_config['regions'][self.region].get(
            'api_key', None)
        self.iam_api_key = ibm_cf_config.get('iam_api_key', None)

        logger.info("Set IBM CF Namespace to {}".format(self.namespace))
        logger.info("Set IBM CF Endpoint to {}".format(self.endpoint))

        self.user_key = self.api_key[:
                                     5] if self.api_key else self.iam_api_key[:
                                                                              5]
        self.package = 'lithops_v{}_{}'.format(__version__, self.user_key)

        if self.api_key:
            enc_api_key = str.encode(self.api_key)
            auth_token = base64.encodebytes(enc_api_key).replace(b'\n', b'')
            auth = 'Basic %s' % auth_token.decode('UTF-8')

            self.cf_client = OpenWhiskClient(endpoint=self.endpoint,
                                             namespace=self.namespace,
                                             auth=auth,
                                             user_agent=self.user_agent)
        elif self.iam_api_key:
            token_manager = DefaultTokenManager(api_key_id=self.iam_api_key)
            token_filename = os.path.join(CACHE_DIR, 'ibm_cf', 'iam_token')

            if 'token' in self.ibm_cf_config:
                logger.debug(
                    "Using IBM IAM API Key - Reusing Token from config")
                token_manager._token = self.ibm_cf_config['token']
                token_manager._expiry_time = datetime.strptime(
                    self.ibm_cf_config['token_expiry_time'],
                    '%Y-%m-%d %H:%M:%S.%f%z')
                token_minutes_diff = int(
                    (token_manager._expiry_time -
                     datetime.now(timezone.utc)).total_seconds() / 60.0)
                logger.debug("Token expiry time: {} - Minutes left: {}".format(
                    token_manager._expiry_time, token_minutes_diff))

            elif os.path.exists(token_filename):
                logger.debug(
                    "Using IBM IAM API Key - Reusing Token from local cache")
                token_data = load_yaml_config(token_filename)
                token_manager._token = token_data['token']
                token_manager._expiry_time = datetime.strptime(
                    token_data['token_expiry_time'], '%Y-%m-%d %H:%M:%S.%f%z')
                token_minutes_diff = int(
                    (token_manager._expiry_time -
                     datetime.now(timezone.utc)).total_seconds() / 60.0)
                logger.debug("Token expiry time: {} - Minutes left: {}".format(
                    token_manager._expiry_time, token_minutes_diff))

            if (token_manager._is_expired()
                    or token_minutes_diff < 11) and not is_lithops_function():
                logger.debug(
                    "Using IBM IAM API Key - Token expired. Requesting new token"
                )
                token_manager._token = None
                token_manager.get_token()
                token_data = {}
                token_data['token'] = token_manager._token
                token_data[
                    'token_expiry_time'] = token_manager._expiry_time.strftime(
                        '%Y-%m-%d %H:%M:%S.%f%z')
                dump_yaml_config(token_filename, token_data)

            ibm_cf_config['token'] = token_manager._token
            ibm_cf_config[
                'token_expiry_time'] = token_manager._expiry_time.strftime(
                    '%Y-%m-%d %H:%M:%S.%f%z')

            auth_token = token_manager._token
            auth = 'Bearer ' + auth_token

            self.cf_client = OpenWhiskClient(endpoint=self.endpoint,
                                             namespace=self.namespace_id,
                                             auth=auth,
                                             user_agent=self.user_agent)

        log_msg = (
            'Lithops v{} init for IBM Cloud Functions - Namespace: {} - '
            'Region: {}'.format(__version__, self.namespace, self.region))
        if not self.log_active:
            print(log_msg)
        logger.info("IBM CF client created successfully")
Example #5
0
    def _create_service(self, docker_image_name, runtime_memory, timeout):
        """
        Creates a service in knative based on the docker_image_name and the memory provided
        """
        logger.debug("Creating Lithops runtime service in Knative")
        svc_res = yaml.safe_load(kconfig.service_res)

        service_name = self._format_service_name(docker_image_name,
                                                 runtime_memory)
        svc_res['metadata']['name'] = service_name
        svc_res['metadata']['namespace'] = self.namespace

        logger.debug("Service name: {}".format(service_name))
        logger.debug("Namespace: {}".format(self.namespace))

        svc_res['spec']['template']['spec']['timeoutSeconds'] = timeout
        full_docker_image_name = '/'.join(
            [self.knative_config['docker_repo'], docker_image_name])
        svc_res['spec']['template']['spec']['containers'][0][
            'image'] = full_docker_image_name
        svc_res['spec']['template']['spec']['containers'][0]['resources'][
            'limits']['memory'] = '{}Mi'.format(runtime_memory)
        svc_res['spec']['template']['spec']['containers'][0]['resources'][
            'limits']['cpu'] = '{}m'.format(self.knative_config['cpu'])
        svc_res['spec']['template']['spec']['containers'][0]['resources'][
            'requests']['memory'] = '{}Mi'.format(runtime_memory)
        svc_res['spec']['template']['spec']['containers'][0]['resources'][
            'requests']['cpu'] = '{}m'.format(self.knative_config['cpu'])

        try:
            # delete the service resource if exists
            self.api.delete_namespaced_custom_object(
                group="serving.knative.dev",
                version="v1",
                name=service_name,
                namespace=self.namespace,
                plural="services",
                body=client.V1DeleteOptions())
            time.sleep(2)
        except Exception:
            pass

        # create the service resource
        self.api.create_namespaced_custom_object(group="serving.knative.dev",
                                                 version="v1",
                                                 namespace=self.namespace,
                                                 plural="services",
                                                 body=svc_res)

        w = watch.Watch()
        for event in w.stream(
                self.api.list_namespaced_custom_object,
                namespace=self.namespace,
                group="serving.knative.dev",
                version="v1",
                plural="services",
                field_selector="metadata.name={0}".format(service_name),
                timeout_seconds=300):
            if event['object'].get('status'):
                service_url = event['object']['status'].get('url')
                conditions = event['object']['status']['conditions']
                if conditions[0]['status'] == 'True' and \
                   conditions[1]['status'] == 'True' and \
                   conditions[2]['status'] == 'True':
                    w.stop()
                    time.sleep(2)

        log_msg = 'Runtime Service created - URL: {}'.format(service_url)
        logger.debug(log_msg)

        self.service_host_suffix = service_url[7:].replace(service_name, '')
        # Store service host suffix in local cache
        serice_host_data = {}
        serice_host_data['service_host_suffix'] = self.service_host_suffix
        dump_yaml_config(self.serice_host_filename, serice_host_data)
        self.knative_config['service_host_suffix'] = self.service_host_suffix

        return service_url
Example #6
0
    def init(self):
        """
        Initialize the VPC
        """
        vpc_data_filename = os.path.join(self.cache_dir, 'data')
        self.vpc_data = load_yaml_config(vpc_data_filename)

        cahced_mode = self.vpc_data.get('mode')
        logger.debug(f'Initializing IBM VPC backend ({self.mode} mode)')

        if self.mode == 'consume':
            cahced_instance_id = self.vpc_data.get('instance_id')
            if self.mode != cahced_mode or self.config[
                    'instance_id'] != cahced_instance_id:
                ins_id = self.config['instance_id']
                instance_data = self.ibm_vpc_client.get_instance(ins_id)
                name = instance_data.get_result()['name']
                self.vpc_data = {
                    'mode': 'consume',
                    'instance_id': self.config['instance_id'],
                    'instance_name': name,
                    'floating_ip': self.config['ip_address']
                }
                dump_yaml_config(vpc_data_filename, self.vpc_data)

            self.master = IBMVPCInstance(self.vpc_data['instance_name'],
                                         self.config,
                                         self.ibm_vpc_client,
                                         public=True)
            self.master.instance_id = self.config['instance_id']
            self.master.public_ip = self.config['ip_address']
            self.master.delete_on_dismantle = False
            self.master.ssh_credentials.pop('password')

        elif self.mode in ['create', 'reuse']:
            if self.mode != cahced_mode:
                # invalidate cached data
                self.vpc_data = {}

            # Create the VPC if not exists
            self._create_vpc(self.vpc_data)
            # Set the prefix used for the VPC resources
            self.vpc_key = self.config['vpc_id'].split('-')[2]
            # Create a new gateway if not exists
            self._create_gateway(self.vpc_data)
            # Create a new subnaet if not exists
            self._create_subnet(self.vpc_data)
            # Create a new floating IP if not exists
            self._create_floating_ip(self.vpc_data)

            # create the master VM insatnce
            name = 'lithops-master-{}'.format(self.vpc_key)
            self.master = IBMVPCInstance(name,
                                         self.config,
                                         self.ibm_vpc_client,
                                         public=True)
            self.master.public_ip = self.config['floating_ip']
            self.master.profile_name = self.config['master_profile_name']
            self.master.delete_on_dismantle = False
            self.master.ssh_credentials.pop('password')

            instance_data = self.master.get_instance_data()
            if instance_data:
                self.master.private_ip = instance_data[
                    'primary_network_interface']['primary_ipv4_address']
                self.master.instance_id = instance_data['id']

            self.vpc_data = {
                'mode': 'consume',
                'instance_name': self.master.name,
                'instance_id': '0af1',
                'vpc_id': self.config['vpc_id'],
                'subnet_id': self.config['subnet_id'],
                'security_group_id': self.config['security_group_id'],
                'floating_ip': self.config['floating_ip'],
                'floating_ip_id': self.config['floating_ip_id'],
                'gateway_id': self.config['gateway_id']
            }

            dump_yaml_config(vpc_data_filename, self.vpc_data)
Example #7
0
    def _create_service(self, docker_image_name, runtime_memory, timeout):
        """
        Creates a service in knative based on the docker_image_name and the memory provided
        """
        logger.debug("Creating Lithops runtime service in Knative")
        svc_res = yaml.safe_load(kconfig.service_res)

        service_name = self._format_service_name(docker_image_name, runtime_memory)
        svc_res['metadata']['name'] = service_name
        svc_res['metadata']['namespace'] = self.namespace

        logger.debug("Service name: {}".format(service_name))
        logger.debug("Namespace: {}".format(self.namespace))

        svc_res['spec']['template']['spec']['timeoutSeconds'] = timeout
        svc_res['spec']['template']['spec']['containerConcurrency'] = self.knative_config['runtime_concurrency']

        container = svc_res['spec']['template']['spec']['containers'][0]
        container['image'] = docker_image_name
        container['env'][0] = {'name': 'CONCURRENCY', 'value': str(self.knative_config['runtime_concurrency'])}
        container['env'][1] = {'name': 'TIMEOUT', 'value': str(timeout)}
        container['resources']['limits']['memory'] = '{}Mi'.format(runtime_memory)
        container['resources']['limits']['cpu'] = str(self.knative_config['runtime_cpu'])
        container['resources']['requests']['memory'] = '{}Mi'.format(runtime_memory)
        container['resources']['requests']['cpu'] = str(self.knative_config['runtime_cpu'])

        svc_res['spec']['template']['metadata']['annotations']['autoscaling.knative.dev/minScale'] = str(self.knative_config['runtime_min_instances'])
        svc_res['spec']['template']['metadata']['annotations']['autoscaling.knative.dev/maxScale'] = str(self.knative_config['runtime_max_instances'])
        svc_res['spec']['template']['metadata']['annotations']['autoscaling.knative.dev/target'] = str(self.knative_config['runtime_concurrency'])

        try:
            # delete the service resource if exists
            self.custom_api.delete_namespaced_custom_object(
                    group=kconfig.DEFAULT_GROUP,
                    version=kconfig.DEFAULT_VERSION,
                    name=service_name,
                    namespace=self.namespace,
                    plural="services",
                    body=client.V1DeleteOptions()
                )
            time.sleep(2)
        except Exception:
            pass

        # create the service resource
        self.custom_api.create_namespaced_custom_object(
                group=kconfig.DEFAULT_GROUP,
                version=kconfig.DEFAULT_VERSION,
                namespace=self.namespace,
                plural="services",
                body=svc_res
            )

        w = watch.Watch()
        for event in w.stream(self.custom_api.list_namespaced_custom_object,
                              namespace=self.namespace, group=kconfig.DEFAULT_GROUP,
                              version=kconfig.DEFAULT_VERSION, plural="services",
                              field_selector="metadata.name={0}".format(service_name),
                              timeout_seconds=300):
            if event['object'].get('status'):
                service_url = event['object']['status'].get('url')
                conditions = event['object']['status']['conditions']
                if conditions[0]['status'] == 'True' and \
                   conditions[1]['status'] == 'True' and \
                   conditions[2]['status'] == 'True':
                    w.stop()
                    time.sleep(2)

        log_msg = 'Runtime Service created - URL: {}'.format(service_url)
        logger.debug(log_msg)

        self.service_host_suffix = service_url[7:].replace(service_name, '')
        # Store service host suffix in local cache
        serice_host_data = {}
        serice_host_data['service_host_suffix'] = self.service_host_suffix
        dump_yaml_config(self.serice_host_filename, serice_host_data)
        self.knative_config['service_host_suffix'] = self.service_host_suffix

        return service_url
Example #8
0
    def __init__(self, ibm_cos_config, **kwargs):
        logger.debug("Creating IBM COS client")
        self.ibm_cos_config = ibm_cos_config
        self.is_lithops_function = is_lithops_function()
        user_agent = ibm_cos_config['user_agent']

        service_endpoint = ibm_cos_config.get('endpoint').replace('http:', 'https:')
        if self.is_lithops_function and 'private_endpoint' in ibm_cos_config:
            service_endpoint = ibm_cos_config.get('private_endpoint')
            if 'api_key' in ibm_cos_config:
                service_endpoint = service_endpoint.replace('http:', 'https:')

        logger.debug("Set IBM COS Endpoint to {}".format(service_endpoint))

        api_key = None
        if 'api_key' in ibm_cos_config:
            api_key = ibm_cos_config.get('api_key')
            api_key_type = 'COS'
        elif 'iam_api_key' in ibm_cos_config:
            api_key = ibm_cos_config.get('iam_api_key')
            api_key_type = 'IAM'

        if {'secret_key', 'access_key'} <= set(ibm_cos_config):
            logger.debug("Using access_key and secret_key")
            access_key = ibm_cos_config.get('access_key')
            secret_key = ibm_cos_config.get('secret_key')
            client_config = ibm_botocore.client.Config(max_pool_connections=128,
                                                       user_agent_extra=user_agent,
                                                       connect_timeout=CONN_READ_TIMEOUT,
                                                       read_timeout=CONN_READ_TIMEOUT,
                                                       retries={'max_attempts': OBJ_REQ_RETRIES})

            self.cos_client = ibm_boto3.client('s3',
                                               aws_access_key_id=access_key,
                                               aws_secret_access_key=secret_key,
                                               config=client_config,
                                               endpoint_url=service_endpoint)

        elif api_key is not None:
            client_config = ibm_botocore.client.Config(signature_version='oauth',
                                                       max_pool_connections=128,
                                                       user_agent_extra=user_agent,
                                                       connect_timeout=CONN_READ_TIMEOUT,
                                                       read_timeout=CONN_READ_TIMEOUT,
                                                       retries={'max_attempts': OBJ_REQ_RETRIES})

            token_manager = DefaultTokenManager(api_key_id=api_key)
            token_filename = os.path.join(CACHE_DIR, 'ibm_cos', api_key_type.lower()+'_token')
            token_minutes_diff = 0

            if 'token' in self.ibm_cos_config:
                logger.debug("Using IBM {} API Key - Reusing Token from config".format(api_key_type))
                token_manager._token = self.ibm_cos_config['token']
                token_manager._expiry_time = datetime.strptime(self.ibm_cos_config['token_expiry_time'],
                                                               '%Y-%m-%d %H:%M:%S.%f%z')
                token_minutes_diff = int((token_manager._expiry_time - datetime.now(timezone.utc)).total_seconds() / 60.0)
                logger.debug("Token expiry time: {} - Minutes left: {}".format(token_manager._expiry_time, token_minutes_diff))

            elif os.path.exists(token_filename):
                token_data = load_yaml_config(token_filename)
                logger.debug("Using IBM {} API Key - Reusing Token from local cache".format(api_key_type))
                token_manager._token = token_data['token']
                token_manager._expiry_time = datetime.strptime(token_data['token_expiry_time'],
                                                               '%Y-%m-%d %H:%M:%S.%f%z')
                token_minutes_diff = int((token_manager._expiry_time - datetime.now(timezone.utc)).total_seconds() / 60.0)
                logger.debug("Token expiry time: {} - Minutes left: {}".format(token_manager._expiry_time, token_minutes_diff))

            if (token_manager._is_expired() or token_minutes_diff < 11) and not is_lithops_function():
                logger.debug("Using IBM {} API Key - Token expired. Requesting new token".format(api_key_type))
                token_manager._token = None
                token_manager.get_token()
                token_data = {}
                token_data['token'] = token_manager._token
                token_data['token_expiry_time'] = token_manager._expiry_time.strftime('%Y-%m-%d %H:%M:%S.%f%z')
                dump_yaml_config(token_filename, token_data)

            if token_manager._token:
                self.ibm_cos_config['token'] = token_manager._token
            if token_manager._expiry_time:
                self.ibm_cos_config['token_expiry_time'] = token_manager._expiry_time.strftime('%Y-%m-%d %H:%M:%S.%f%z')

            self.cos_client = ibm_boto3.client('s3', token_manager=token_manager,
                                               config=client_config,
                                               endpoint_url=service_endpoint)
        logger.debug("IBM COS client created successfully")
Example #9
0
    def init(self):
        """
        Initialize the backend by defining the Master VM
        """
        ec2_data_filename = os.path.join(self.cache_dir, 'data')
        self.ec2_data = load_yaml_config(ec2_data_filename)

        cahced_mode = self.ec2_data.get('mode')
        cahced_instance_id = self.ec2_data.get('instance_id')

        logger.debug(f'Initializing AWS EC2 backend ({self.mode} mode)')

        if self.mode == 'consume':
            ins_id = self.config['instance_id']

            if self.mode != cahced_mode or ins_id != cahced_instance_id:
                instances = self.ec2_client.describe_instances(InstanceIds=[ins_id])
                instance_data = instances['Reservations'][0]['Instances'][0]
                name = 'lithops-consume'
                for tag in instance_data['Tags']:
                    if tag['Key'] == 'Name':
                        name = tag['Value']
                private_ip = instance_data['PrivateIpAddress']
                self.ec2_data = {'mode': self.mode,
                                 'instance_id': ins_id,
                                 'instance_name': name,
                                 'private_ip': private_ip}
                dump_yaml_config(ec2_data_filename, self.ec2_data)

            self.master = EC2Instance(self.ec2_data['instance_name'], self.config,
                                      self.ec2_client, public=True)
            self.master.instance_id = ins_id
            self.master.private_ip = self.ec2_data['private_ip']
            self.master.delete_on_dismantle = False
            self.master.ssh_credentials.pop('password')

        elif self.mode in ['create', 'reuse']:
            if self.mode != cahced_mode:
                # invalidate cached data
                self.ec2_data = {}

            self.vpc_key = self.config['vpc_id'][-4:]
            master_name = 'lithops-master-{}'.format(self.vpc_key)
            self.master = EC2Instance(master_name, self.config, self.ec2_client, public=True)
            self.master.instance_type = self.config['master_instance_type']
            self.master.delete_on_dismantle = False
            self.master.ssh_credentials.pop('password')

            instance_data = self.master.get_instance_data()
            if instance_data and 'InstanceId' in instance_data:
                self.master.instance_id = instance_data['InstanceId']
            if instance_data and 'PrivateIpAddress' in instance_data:
                self.master.private_ip = instance_data['PrivateIpAddress']
            if instance_data and instance_data['State']['Name'] == 'running' and \
               'PublicIpAddress' in instance_data:
                self.master.public_ip = instance_data['PublicIpAddress']

            self.ec2_data['instance_id'] = '0af1'

            if self.config['request_spot_instances']:
                wit = self.config["worker_instance_type"]
                logger.debug(f'Requesting current spot price for worker VMs of type {wit}')
                response = self.ec2_client.describe_spot_price_history(
                    EndTime=datetime.today(), InstanceTypes=[wit],
                    ProductDescriptions=['Linux/UNIX (Amazon VPC)'],
                    StartTime=datetime.today()
                )
                for az in response['SpotPriceHistory']:
                    spot_price = az['SpotPrice']
                self.config["spot_price"] = spot_price
                logger.debug(f'Current spot instance price for {wit} is ${spot_price}')