예제 #1
0
def deploy(name, storage, backend, memory, timeout, config, debug):
    """ deploy a serverless runtime """
    setup_lithops_logger(logging.DEBUG)

    verify_runtime_name(name)

    if config:
        config = load_yaml_config(config)

    config_ow = set_config_ow(backend, storage, runtime_name=name)
    config = default_config(config, config_ow)

    if config['lithops']['mode'] != SERVERLESS:
        raise Exception('"lithops runtime create" command is only valid for serverless backends')

    logger.info('Creating new lithops runtime: {}'.format(name))
    storage_config = extract_storage_config(config)
    internal_storage = InternalStorage(storage_config)

    compute_config = extract_serverless_config(config)
    compute_handler = ServerlessHandler(compute_config, internal_storage)
    mem = memory if memory else compute_config['runtime_memory']
    to = timeout if timeout else compute_config['runtime_timeout']
    runtime_key = compute_handler.get_runtime_key(name, mem)
    runtime_meta = compute_handler.deploy_runtime(name, mem, timeout=to)
    internal_storage.put_runtime_meta(runtime_key, runtime_meta)
예제 #2
0
def delete(name, config, backend, storage, debug):
    """ delete a serverless runtime """
    setup_lithops_logger(logging.DEBUG)

    verify_runtime_name(name)

    if config:
        config = load_yaml_config(config)

    setup_lithops_logger(logging.DEBUG)

    config_ow = set_config_ow(backend, storage, runtime_name=name)
    config = default_config(config, config_ow)

    if config['lithops']['mode'] != SERVERLESS:
        raise Exception('"lithops runtime delete" command is only valid for serverless backends')

    storage_config = extract_storage_config(config)
    internal_storage = InternalStorage(storage_config)
    compute_config = extract_serverless_config(config)
    compute_handler = ServerlessHandler(compute_config, internal_storage)

    runtimes = compute_handler.list_runtimes(name)
    for runtime in runtimes:
        compute_handler.delete_runtime(runtime[0], runtime[1])
        runtime_key = compute_handler.get_runtime_key(runtime[0], runtime[1])
        internal_storage.delete_runtime_meta(runtime_key)
예제 #3
0
def delete(name, config, backend, storage):
    """ delete a serverless runtime """
    if config:
        config = load_yaml_config(config)

    setup_lithops_logger(logging.DEBUG)

    mode = SERVERLESS
    config_ow = {'lithops': {'mode': mode}}
    if storage:
        config_ow['lithops']['storage'] = storage
    if backend:
        config_ow[mode] = {'backend': backend}
    config = default_config(config, config_ow)

    if name:
        verify_runtime_name(name)
    else:
        name = config[mode]['runtime']

    storage_config = extract_storage_config(config)
    internal_storage = InternalStorage(storage_config)
    compute_config = extract_serverless_config(config)
    compute_handler = ServerlessHandler(compute_config, storage_config)

    runtimes = compute_handler.list_runtimes(name)
    for runtime in runtimes:
        compute_handler.delete_runtime(runtime[0], runtime[1])
        runtime_key = compute_handler.get_runtime_key(runtime[0], runtime[1])
        internal_storage.delete_runtime_meta(runtime_key)
예제 #4
0
def create(name, storage, backend, memory, timeout, config):
    """ Create a serverless runtime """
    if config:
        config = load_yaml_config(config)

    setup_lithops_logger(logging.DEBUG)

    mode = SERVERLESS
    config_ow = {'lithops': {'mode': mode}}
    if storage:
        config_ow['lithops']['storage'] = storage
    if backend:
        config_ow[mode] = {'backend': backend}
    config = default_config(config, config_ow)

    if name:
        verify_runtime_name(name)
    else:
        name = config[mode]['runtime']

    logger.info('Creating new lithops runtime: {}'.format(name))
    storage_config = extract_storage_config(config)
    internal_storage = InternalStorage(storage_config)

    compute_config = extract_serverless_config(config)
    compute_handler = ServerlessHandler(compute_config, storage_config)
    mem = memory if memory else compute_config['runtime_memory']
    to = timeout if timeout else compute_config['runtime_timeout']
    runtime_key = compute_handler.get_runtime_key(name, mem)
    runtime_meta = compute_handler.create_runtime(name, mem, timeout=to)

    try:
        internal_storage.put_runtime_meta(runtime_key, runtime_meta)
    except Exception:
        raise ("Unable to upload 'preinstalled-modules' file into {}".format(internal_storage.backend))
예제 #5
0
def test_function(config, backend, storage, debug):
    if config:
        config = load_yaml_config(config)

    log_level = logging.INFO if not debug else logging.DEBUG
    setup_lithops_logger(log_level)

    try:
        import getpass
        username = getpass.getuser()
    except Exception:
        username = '******'

    def hello(name):
        return 'Hello {}!'.format(name)

    fexec = lithops.FunctionExecutor(config=config, backend=backend, storage=storage)
    fexec.call_async(hello, username)
    result = fexec.get_result()
    print()
    if result == 'Hello {}!'.format(username):
        print(result, 'Lithops is working as expected :)')
    else:
        print(result, 'Something went wrong :(')
    print()
예제 #6
0
def update(name, config, backend, storage, debug):
    """ Update a serverless runtime """
    setup_lithops_logger(logging.DEBUG)

    verify_runtime_name(name)

    if config:
        config = load_yaml_config(config)

    config_ow = set_config_ow(backend, storage, runtime_name=name)
    config = default_config(config, config_ow)

    if config['lithops']['mode'] != SERVERLESS:
        raise Exception('"lithops runtime update" command is only valid for serverless backends')

    storage_config = extract_storage_config(config)
    internal_storage = InternalStorage(storage_config)
    compute_config = extract_serverless_config(config)
    compute_handler = ServerlessHandler(compute_config, internal_storage)

    timeout = compute_config['runtime_memory']
    logger.info('Updating runtime: {}'.format(name))

    runtimes = compute_handler.list_runtimes(name)

    for runtime in runtimes:
        runtime_key = compute_handler.get_runtime_key(runtime[0], runtime[1])
        runtime_meta = compute_handler.deploy_runtime(runtime[0], runtime[1], timeout)
        internal_storage.put_runtime_meta(runtime_key, runtime_meta)
예제 #7
0
def update(name, config, backend, storage):
    """ Update a serverless runtime """
    if config:
        config = load_yaml_config(config)

    verify_runtime_name(name)
    setup_lithops_logger(logging.DEBUG)

    mode = SERVERLESS
    config_ow = {'lithops': {'mode': mode}}
    if storage:
        config_ow['lithops']['storage'] = storage
    if backend:
        config_ow[mode] = {'backend': backend}
    config = default_config(config, config_ow)

    storage_config = extract_storage_config(config)
    internal_storage = InternalStorage(storage_config)
    compute_config = extract_serverless_config(config)
    compute_handler = ServerlessHandler(compute_config, storage_config)

    timeout = compute_config['runtime_memory']
    logger.info('Updating runtime: {}'.format(name))

    runtimes = compute_handler.list_runtimes(name)

    for runtime in runtimes:
        runtime_key = compute_handler.get_runtime_key(runtime[0], runtime[1])
        runtime_meta = compute_handler.create_runtime(runtime[0], runtime[1], timeout)

        try:
            internal_storage.put_runtime_meta(runtime_key, runtime_meta)
        except Exception:
            raise("Unable to upload 'preinstalled-modules' file into {}".format(internal_storage.backend))
예제 #8
0
def list_bucket(prefix, bucket, backend, debug, config):
    if config:
        config = load_yaml_config(config)
    log_level = logging.INFO if not debug else logging.DEBUG
    setup_lithops_logger(log_level)
    storage = Storage(config=config, backend=backend)
    logger.info('Listing objects in bucket {}'.format(bucket))
    objects = storage.list_objects(bucket, prefix=prefix)

    if objects:
        width = max([len(obj['Key']) for obj in objects])

        print('\n{:{width}} \t {} \t\t {:>9}'.format('Key', 'Last modified', 'Size', width=width))
        print('-' * width, '\t', '-' * 20, '\t', '-' * 9)
        for obj in objects:
            key = obj['Key']
            date = obj['LastModified'].strftime("%b %d %Y %H:%M:%S")
            size = sizeof_fmt(obj['Size'])
            print('{:{width}} \t {} \t {:>9}'.format(key, date, size, width=width))
        print()
        print('Total objects: {}'.format(len(objects)))
    else:
        width = 10
        print('\n{:{width}} \t {} \t\t {:>9}'.format('Key', 'Last modified', 'Size', width=width))
        print('-' * width, '\t', '-' * 20, '\t', '-' * 9)
        print('\nThe bucket is empty')
예제 #9
0
파일: cli.py 프로젝트: aitorarjona/lithops
def download_file(bucket, key, out, backend, debug, config):
    if config:
        config = load_yaml_config(config)

    log_level = logging.INFO if not debug else logging.DEBUG
    setup_lithops_logger(log_level)
    storage = Storage(config=config, backend=backend)

    def download_file():
        logger.info(
            f'Downloading file {storage.backend}://{bucket}/{key} to {out or key}'
        )
        if storage.download_file(bucket, key, out):
            file_size = os.path.getsize(out or key)
            logger.info(
                f'Download File {key} - Size: {sizeof_fmt(file_size)} - Ok')
        else:
            logger.error(f'Download File {key} - Error')

    with ThreadPoolExecutor() as ex:
        future = ex.submit(download_file)
        cy = cycle(r"-\|/")
        while not future.done():
            print("Downloading file " + next(cy), end="\r")
            time.sleep(0.1)
        future.result()
예제 #10
0
    def __init__(self,
                 api_key,
                 api_key_type='IAM',
                 token=None,
                 token_expiry_time=None):
        self.api_key = api_key
        self.api_key_type = api_key_type

        self._token_manager = DefaultTokenManager(api_key_id=self.api_key)
        self._token_filename = os.path.join(
            CACHE_DIR, 'ibm_{}'.format(api_key_type.lower()), 'token')

        if token:
            logger.debug(
                "Using IBM {} API Key - Reusing Token from config".format(
                    self.api_key_type))
            self._token_manager._token = token
            self._token_manager._expiry_time = datetime.strptime(
                token_expiry_time, '%Y-%m-%d %H:%M:%S.%f%z')
            logger.debug("Token expiry time: {} - Minutes left: {}".format(
                self._token_manager._expiry_time,
                self._get_token_minutes_diff()))

        elif os.path.exists(self._token_filename):
            logger.debug(
                "Using IBM {} API Key - Reusing Token from local cache".format(
                    self.api_key_type))
            token_data = load_yaml_config(self._token_filename)
            self._token_manager._token = token_data['token']
            self._token_manager._expiry_time = datetime.strptime(
                token_data['token_expiry_time'], '%Y-%m-%d %H:%M:%S.%f%z')
            logger.debug("Token expiry time: {} - Minutes left: {}".format(
                self._token_manager._expiry_time,
                self._get_token_minutes_diff()))
예제 #11
0
def list_runtimes(config, backend, debug):
    """ list all deployed serverless runtime. """
    log_level = logging.INFO if not debug else logging.DEBUG
    setup_lithops_logger(log_level)

    if config:
        config = load_yaml_config(config)

    config_ow = set_config_ow(backend, runtime_name='None')
    config = default_config(config, config_ow, load_storage_config=False)

    if config['lithops']['mode'] != SERVERLESS:
        raise Exception('"lithops runtime list" command is only valid for serverless backends')

    compute_config = extract_serverless_config(config)
    compute_handler = ServerlessHandler(compute_config, None)
    runtimes = compute_handler.list_runtimes()

    if runtimes:
        width = max([len(runtime[0]) for runtime in runtimes])

        print('\n{:{width}} \t {}'.format('Runtime Name', 'Memory Size (MB)', width=width))
        print('-' * width, '\t', '-' * 20)
        for runtime in runtimes:
            name = runtime[0]
            mem = runtime[1]
            print('{:{width}} \t {}'.format(name, mem, width=width))
        print()
        print('Total runtimes: {}'.format(len(runtimes)))
    else:
        width = 10
        print('\n{:{width}} \t {}'.format('Runtime Name', 'Memory Size (MB)', width=width))
        print('-' * width, '\t', '-' * 20)
        print('\nNo runtimes deployed')
예제 #12
0
    def init(self):
        """
        Initialize the VPC
        """
        vpc_data_filename = os.path.join(CACHE_DIR, self.name, 'data')
        vpc_data = load_yaml_config(vpc_data_filename)

        if self.mode == 'consume':
            logger.debug('Initializing IBM VPC backend (Consume mode)')
            if 'instance_name' not in vpc_data:
                instance_data = self.ibm_vpc_client.get_instance(
                    self.config['instance_id'])
                name = instance_data.get_result()['name']
                vpc_data = {'instance_name': name}
                dump_yaml_config(vpc_data_filename, vpc_data)
            self.master = IBMVPCInstance(vpc_data['instance_name'],
                                         self.config,
                                         self.ibm_vpc_client,
                                         public=True)
            self.master.instance_id = self.config['instance_id']
            self.master.public_ip = self.config['ip_address']
            self.master.delete_on_dismantle = False
            return

        logger.debug('Initializing IBM VPC backend (Create mode)')
        # Create the VPC if not exists
        self._create_vpc(vpc_data)
        # Set the prefix used for the VPC resources
        self.vpc_key = self.config['vpc_id'].split('-')[2]
        # Create a new gateway if not exists
        self._create_gateway(vpc_data)
        # Create a new subnaet if not exists
        self._create_subnet(vpc_data)
        # Create a new floating IP if not exists
        self._create_floating_ip(vpc_data)

        vpc_data = {
            'vpc_id': self.config['vpc_id'],
            'subnet_id': self.config['subnet_id'],
            'security_group_id': self.config['security_group_id'],
            'floating_ip': self.config['floating_ip'],
            'floating_ip_id': self.config['floating_ip_id'],
            'gateway_id': self.config['gateway_id']
        }

        dump_yaml_config(vpc_data_filename, vpc_data)

        # create the master VM insatnce
        name = 'lithops-master-{}'.format(self.vpc_key)
        self.master = IBMVPCInstance(name,
                                     self.config,
                                     self.ibm_vpc_client,
                                     public=True)
        self.master.public_ip = self.config['floating_ip']
        self.master.profile_name = self.config['master_profile_name']
        self.master.delete_on_dismantle = False
예제 #13
0
def verify(test, config, mode, backend, storage, debug):
    if config:
        config = load_yaml_config(config)

    log_level = logging.INFO if not debug else logging.DEBUG
    setup_lithops_logger(log_level)

    if test == 'help':
        print_help()
    else:
        run_tests(test, config, mode, backend, storage)
예제 #14
0
 def __init__(self, config=None):
     if isinstance(config, str):
         config = load_yaml_config(config)
         self._config = extract_storage_config(config)
     elif isinstance(config, dict):
         if 'lithops' in config:
             self._config = extract_storage_config(config)
         else:
             self._config = config
     else:
         self._config = extract_storage_config(default_storage_config())
     super().__init__(storage_config=self._config)
예제 #15
0
def delete_object(bucket, key, prefix, backend, debug, config):
    if config:
        config = load_yaml_config(config)
    log_level = logging.INFO if not debug else logging.DEBUG
    setup_lithops_logger(log_level)
    storage = Storage(config=config, backend=backend)

    if key:
        logger.info('Deleting object "{}" from bucket "{}"'.format(key, bucket))
        storage.delete_object(bucket, key)
        logger.info('Object deleted successfully')
    elif prefix:
        objs = storage.list_keys(bucket, prefix)
        logger.info('Deleting {} objects with prefix "{}" from bucket "{}"'.format(len(objs), prefix, bucket))
        storage.delete_objects(bucket, objs)
        logger.info('Object deleted successfully')
예제 #16
0
def verify(test, config, backend, groups, storage, debug, fail_fast, keep_datasets):
    if config:
        config = load_yaml_config(config)

    log_level = logging.INFO if not debug else logging.DEBUG
    setup_lithops_logger(log_level)

    if groups and test == 'all':  # if user specified a group(s) avoid running all tests.
        test = ''

    if test == 'help':
        print_test_functions()
    elif groups == 'help':
        print_test_groups()

    else:
        run_tests(test, config, groups, backend, storage, fail_fast, keep_datasets)
예제 #17
0
def build(ctx, name, file, config, backend):
    """ build a serverless runtime. """
    setup_lithops_logger(logging.DEBUG)

    verify_runtime_name(name)

    if config:
        config = load_yaml_config(config)

    config_ow = set_config_ow(backend, runtime_name=name)
    config = default_config(config, config_ow, load_storage_config=False)

    if config['lithops']['mode'] != SERVERLESS:
        raise Exception('"lithops build" command is only valid for serverless backends')

    compute_config = extract_serverless_config(config)
    compute_handler = ServerlessHandler(compute_config, None)
    compute_handler.build_runtime(name, file, ctx.args)
예제 #18
0
def build(name, file, config, backend):
    """ build a serverless runtime. """
    if config:
        config = load_yaml_config(config)

    verify_runtime_name(name)
    setup_lithops_logger(logging.DEBUG)

    mode = SERVERLESS
    config_ow = {'lithops': {'mode': mode}}
    if backend:
        config_ow[mode] = {'backend': backend}
    config = default_config(config, config_ow)

    storage_config = extract_storage_config(config)
    compute_config = extract_serverless_config(config)
    compute_handler = ServerlessHandler(compute_config, storage_config)
    compute_handler.build_runtime(name, file)
예제 #19
0
    def __init__(self, component_name, iam_api_key, token=None, token_expiry_time=None):
        self.component_name = component_name
        self.iam_api_key = iam_api_key

        self._token_manager = DefaultTokenManager(api_key_id=self.iam_api_key)
        self._token_filename = os.path.join(CACHE_DIR, self.component_name, 'iam_token')

        if token:
            logger.debug("Using IBM IAM API Key - Reusing Token from config")
            self._token_manager._token = token
            self._token_manager._expiry_time = datetime.strptime(token_expiry_time, '%Y-%m-%d %H:%M:%S.%f%z')
            logger.debug("Token expiry time: {} - Minutes left: {}".format(self._token_manager._expiry_time, self._get_token_minutes_diff()))
        elif os.path.exists(self._token_filename):
            logger.debug("Using IBM IAM API Key - Reusing Token from local cache")
            token_data = load_yaml_config(self._token_filename)
            self._token_manager._token = token_data['token']
            self._token_manager._expiry_time = datetime.strptime(token_data['token_expiry_time'], '%Y-%m-%d %H:%M:%S.%f%z')
            logger.debug("Token expiry time: {} - Minutes left: {}".format(self._token_manager._expiry_time, self._get_token_minutes_diff()))
예제 #20
0
def test_function(config, mode, backend, storage, debug):
    if config:
        config = load_yaml_config(config)

    log_level = logging.INFO if not debug else logging.DEBUG
    setup_lithops_logger(log_level)

    def hello(name):
        return 'Hello {}!'.format(name)

    fexec = lithops.FunctionExecutor(config=config, mode=mode,
                                     backend=backend, storage=storage)
    fexec.call_async(hello, 'World')
    result = fexec.get_result()
    print()
    if result == 'Hello World!':
        print(result, 'Lithops is working as expected :)')
    else:
        print(result, 'Something went wrong :(')
    print()
예제 #21
0
파일: cli.py 프로젝트: aitorarjona/lithops
def attach(config, backend, start, debug):
    """Create or attach to a SSH session on Lithops master VM"""
    if config:
        config = load_yaml_config(config)

    log_level = logging.INFO if not debug else logging.DEBUG
    setup_lithops_logger(log_level)

    config_ow = set_config_ow(backend)
    config = default_config(config, config_ow)

    if config['lithops']['mode'] != STANDALONE:
        raise Exception(
            'lithops attach method is only available for standalone backends')

    compute_config = extract_standalone_config(config)
    compute_handler = StandaloneHandler(compute_config)
    compute_handler.init()

    if start:
        compute_handler.backend.master.start()

    master_ip = compute_handler.backend.master.get_public_ip()
    user = compute_handler.backend.master.ssh_credentials['username']
    key_file = compute_handler.backend.master.ssh_credentials[
        'key_filename'] or '~/.ssh/id_rsa'
    key_file = os.path.abspath(os.path.expanduser(key_file))

    if not os.path.exists(key_file):
        raise Exception(f'Private key file {key_file} does not exists')

    print(f'Got master VM public IP address: {master_ip}')
    print(f'Loading ssh private key from: {key_file}')
    print('Creating SSH Connection to lithops master VM')
    cmd = (
        'ssh -o "UserKnownHostsFile=/dev/null" -o "StrictHostKeyChecking=no" '
        f'-i {key_file} {user}@{master_ip}')

    compute_handler.backend.master.wait_ready()

    sp.run(shlex.split(cmd))
예제 #22
0
def clean(config, mode, backend, storage, debug):
    if config:
        config = load_yaml_config(config)

    log_level = logging.INFO if not debug else logging.DEBUG
    setup_lithops_logger(log_level)
    logger.info('Cleaning all Lithops information')

    mode = mode or get_mode(backend, config)
    config_ow = {'lithops': {'mode': mode}}
    if storage:
        config_ow['lithops']['storage'] = storage
    if backend:
        config_ow[mode] = {'backend': backend}
    config = default_config(config, config_ow)

    storage_config = extract_storage_config(config)
    internal_storage = InternalStorage(storage_config)

    mode = config['lithops']['mode'] if not mode else mode
    if mode == LOCALHOST:
        compute_config = extract_localhost_config(config)
        compute_handler = LocalhostHandler(compute_config)
    elif mode == SERVERLESS:
        compute_config = extract_serverless_config(config)
        compute_handler = ServerlessHandler(compute_config, storage_config)
    elif mode == STANDALONE:
        compute_config = extract_standalone_config(config)
        compute_handler = StandaloneHandler(compute_config)

    compute_handler.clean()

    # Clean object storage temp dirs
    storage = internal_storage.storage
    clean_bucket(storage, storage_config['bucket'], RUNTIMES_PREFIX, sleep=1)
    clean_bucket(storage, storage_config['bucket'], JOBS_PREFIX, sleep=1)

    # Clean localhost executor temp dirs
    shutil.rmtree(LITHOPS_TEMP_DIR, ignore_errors=True)
    # Clean local lithops cache
    shutil.rmtree(CACHE_DIR, ignore_errors=True)
예제 #23
0
    def init(self):
        """
        Initialize the VPC
        """
        vpc_data_filename = os.path.join(self.cache_dir, 'data')
        self.vpc_data = load_yaml_config(vpc_data_filename)

        cahced_mode = self.vpc_data.get('mode')
        logger.debug(f'Initializing IBM VPC backend ({self.mode} mode)')

        if self.mode == 'consume':
            cahced_instance_id = self.vpc_data.get('instance_id')
            if self.mode != cahced_mode or self.config[
                    'instance_id'] != cahced_instance_id:
                ins_id = self.config['instance_id']
                instance_data = self.ibm_vpc_client.get_instance(ins_id)
                name = instance_data.get_result()['name']
                self.vpc_data = {
                    'mode': 'consume',
                    'instance_id': self.config['instance_id'],
                    'instance_name': name,
                    'floating_ip': self.config['ip_address']
                }
                dump_yaml_config(vpc_data_filename, self.vpc_data)

            self.master = IBMVPCInstance(self.vpc_data['instance_name'],
                                         self.config,
                                         self.ibm_vpc_client,
                                         public=True)
            self.master.instance_id = self.config['instance_id']
            self.master.public_ip = self.config['ip_address']
            self.master.delete_on_dismantle = False
            self.master.ssh_credentials.pop('password')

        elif self.mode in ['create', 'reuse']:
            if self.mode != cahced_mode:
                # invalidate cached data
                self.vpc_data = {}

            # Create the VPC if not exists
            self._create_vpc(self.vpc_data)
            # Set the prefix used for the VPC resources
            self.vpc_key = self.config['vpc_id'].split('-')[2]
            # Create a new gateway if not exists
            self._create_gateway(self.vpc_data)
            # Create a new subnaet if not exists
            self._create_subnet(self.vpc_data)
            # Create a new floating IP if not exists
            self._create_floating_ip(self.vpc_data)

            # create the master VM insatnce
            name = 'lithops-master-{}'.format(self.vpc_key)
            self.master = IBMVPCInstance(name,
                                         self.config,
                                         self.ibm_vpc_client,
                                         public=True)
            self.master.public_ip = self.config['floating_ip']
            self.master.profile_name = self.config['master_profile_name']
            self.master.delete_on_dismantle = False
            self.master.ssh_credentials.pop('password')

            instance_data = self.master.get_instance_data()
            if instance_data:
                self.master.private_ip = instance_data[
                    'primary_network_interface']['primary_ipv4_address']
                self.master.instance_id = instance_data['id']

            self.vpc_data = {
                'mode': 'consume',
                'instance_name': self.master.name,
                'instance_id': '0af1',
                'vpc_id': self.config['vpc_id'],
                'subnet_id': self.config['subnet_id'],
                'security_group_id': self.config['security_group_id'],
                'floating_ip': self.config['floating_ip'],
                'floating_ip_id': self.config['floating_ip_id'],
                'gateway_id': self.config['gateway_id']
            }

            dump_yaml_config(vpc_data_filename, self.vpc_data)
예제 #24
0
    def __init__(self, knative_config, storage_config):
        self.log_active = logger.getEffectiveLevel() != logging.WARNING
        self.name = 'knative'
        self.knative_config = knative_config
        self.istio_endpoint = self.knative_config.get('istio_endpoint')

        # k8s config can be incluster, in ~/.kube/config or generate kube-config.yaml file and
        # set env variable KUBECONFIG=<path-to-kube-confg>
        try:
            config.load_kube_config()
            current_context = config.list_kube_config_contexts()[1].get(
                'context')
            self.namespace = current_context.get('namespace', 'default')
            self.cluster = current_context.get('cluster')
            self.knative_config['namespace'] = self.namespace
            self.knative_config['cluster'] = self.cluster
            self.is_incluster = False
        except Exception:
            config.load_incluster_config()
            self.namespace = self.knative_config.get('namespace', 'default')
            self.cluster = self.knative_config.get('cluster', 'default')
            self.is_incluster = True

        self.api = client.CustomObjectsApi()
        self.v1 = client.CoreV1Api()

        if self.istio_endpoint is None:
            try:
                ingress = self.v1.read_namespaced_service(
                    'istio-ingressgateway', 'istio-system')
                http_port = list(
                    filter(lambda port: port.port == 80,
                           ingress.spec.ports))[0].node_port
                # https_port = list(filter(lambda port: port.port == 443, ingress.spec.ports))[0].node_port

                if ingress.status.load_balancer.ingress is not None:
                    # get loadbalancer ip
                    ip = ingress.status.load_balancer.ingress[0].ip
                else:
                    # for minikube or a baremetal cluster that has no external load balancer
                    node = self.v1.list_node()
                    ip = node.items[0].status.addresses[0].address

                self.istio_endpoint = 'http://{}:{}'.format(ip, http_port)
                self.knative_config['istio_endpoint'] = self.istio_endpoint
            except Exception:
                logger.info("istio-ingressgateway endpoint not found")

        if 'service_host_suffix' not in self.knative_config:
            self.serice_host_filename = os.path.join(CACHE_DIR, 'knative',
                                                     self.cluster,
                                                     'service_host')
            self.service_host_suffix = None
            if os.path.exists(self.serice_host_filename):
                serice_host_data = load_yaml_config(self.serice_host_filename)
                self.service_host_suffix = serice_host_data[
                    'service_host_suffix']
                self.knative_config[
                    'service_host_suffix'] = self.service_host_suffix
        else:
            self.service_host_suffix = self.knative_config[
                'service_host_suffix']

        logger.debug('Loaded service host suffix: {}'.format(
            self.service_host_suffix))

        if self.istio_endpoint:
            log_msg = 'Lithops v{} init for Knative - Istio Endpoint: {}'.format(
                __version__, self.istio_endpoint)
        else:
            log_msg = 'Lithops v{} init for Knative'.format(__version__)
        if not self.log_active:
            print(log_msg)
        logger.info(log_msg)
예제 #25
0
파일: tests.py 프로젝트: zhanggbj/lithops
                        metavar='',
                        default=None,
                        help='compute backend')
    parser.add_argument('-s',
                        '--storage',
                        metavar='',
                        default=None,
                        help='storage backend')
    parser.add_argument('-d',
                        '--debug',
                        action='store_true',
                        default=False,
                        help='activate debug logging')
    args = parser.parse_args()

    if args.config:
        if os.path.exists(args.config):
            args.config = load_yaml_config(args.config)
        else:
            raise FileNotFoundError(
                "Provided config file '{}' does not exist".format(args.config))

    log_level = logging.INFO if not args.debug else logging.DEBUG
    setup_lithops_logger(log_level)

    if args.test == 'help':
        print_help()
    else:
        run_tests(args.test, args.config, args.mode, args.backend,
                  args.storage)
예제 #26
0
    def __init__(self, ibm_cos_config, **kwargs):
        logger.debug("Creating IBM COS client")
        self.ibm_cos_config = ibm_cos_config
        self.is_lithops_function = is_lithops_function()
        user_agent = ibm_cos_config['user_agent']

        service_endpoint = ibm_cos_config.get('endpoint').replace('http:', 'https:')
        if self.is_lithops_function and 'private_endpoint' in ibm_cos_config:
            service_endpoint = ibm_cos_config.get('private_endpoint')
            if 'api_key' in ibm_cos_config:
                service_endpoint = service_endpoint.replace('http:', 'https:')

        logger.debug("Set IBM COS Endpoint to {}".format(service_endpoint))

        api_key = None
        if 'api_key' in ibm_cos_config:
            api_key = ibm_cos_config.get('api_key')
            api_key_type = 'COS'
        elif 'iam_api_key' in ibm_cos_config:
            api_key = ibm_cos_config.get('iam_api_key')
            api_key_type = 'IAM'

        if {'secret_key', 'access_key'} <= set(ibm_cos_config):
            logger.debug("Using access_key and secret_key")
            access_key = ibm_cos_config.get('access_key')
            secret_key = ibm_cos_config.get('secret_key')
            client_config = ibm_botocore.client.Config(max_pool_connections=128,
                                                       user_agent_extra=user_agent,
                                                       connect_timeout=CONN_READ_TIMEOUT,
                                                       read_timeout=CONN_READ_TIMEOUT,
                                                       retries={'max_attempts': OBJ_REQ_RETRIES})

            self.cos_client = ibm_boto3.client('s3',
                                               aws_access_key_id=access_key,
                                               aws_secret_access_key=secret_key,
                                               config=client_config,
                                               endpoint_url=service_endpoint)

        elif api_key is not None:
            client_config = ibm_botocore.client.Config(signature_version='oauth',
                                                       max_pool_connections=128,
                                                       user_agent_extra=user_agent,
                                                       connect_timeout=CONN_READ_TIMEOUT,
                                                       read_timeout=CONN_READ_TIMEOUT,
                                                       retries={'max_attempts': OBJ_REQ_RETRIES})

            token_manager = DefaultTokenManager(api_key_id=api_key)
            token_filename = os.path.join(CACHE_DIR, 'ibm_cos', api_key_type.lower()+'_token')
            token_minutes_diff = 0

            if 'token' in self.ibm_cos_config:
                logger.debug("Using IBM {} API Key - Reusing Token from config".format(api_key_type))
                token_manager._token = self.ibm_cos_config['token']
                token_manager._expiry_time = datetime.strptime(self.ibm_cos_config['token_expiry_time'],
                                                               '%Y-%m-%d %H:%M:%S.%f%z')
                token_minutes_diff = int((token_manager._expiry_time - datetime.now(timezone.utc)).total_seconds() / 60.0)
                logger.debug("Token expiry time: {} - Minutes left: {}".format(token_manager._expiry_time, token_minutes_diff))

            elif os.path.exists(token_filename):
                token_data = load_yaml_config(token_filename)
                logger.debug("Using IBM {} API Key - Reusing Token from local cache".format(api_key_type))
                token_manager._token = token_data['token']
                token_manager._expiry_time = datetime.strptime(token_data['token_expiry_time'],
                                                               '%Y-%m-%d %H:%M:%S.%f%z')
                token_minutes_diff = int((token_manager._expiry_time - datetime.now(timezone.utc)).total_seconds() / 60.0)
                logger.debug("Token expiry time: {} - Minutes left: {}".format(token_manager._expiry_time, token_minutes_diff))

            if (token_manager._is_expired() or token_minutes_diff < 11) and not is_lithops_function():
                logger.debug("Using IBM {} API Key - Token expired. Requesting new token".format(api_key_type))
                token_manager._token = None
                token_manager.get_token()
                token_data = {}
                token_data['token'] = token_manager._token
                token_data['token_expiry_time'] = token_manager._expiry_time.strftime('%Y-%m-%d %H:%M:%S.%f%z')
                dump_yaml_config(token_filename, token_data)

            if token_manager._token:
                self.ibm_cos_config['token'] = token_manager._token
            if token_manager._expiry_time:
                self.ibm_cos_config['token_expiry_time'] = token_manager._expiry_time.strftime('%Y-%m-%d %H:%M:%S.%f%z')

            self.cos_client = ibm_boto3.client('s3', token_manager=token_manager,
                                               config=client_config,
                                               endpoint_url=service_endpoint)
        logger.debug("IBM COS client created successfully")
예제 #27
0
    def __init__(self, knative_config, storage_config):
        self.name = 'knative'
        self.knative_config = knative_config
        self.istio_endpoint = self.knative_config.get('istio_endpoint')
        self.kubecfg_path = self.knative_config.get('kubecfg_path')

        # k8s config can be incluster, in ~/.kube/config or generate kube-config.yaml file and
        # set env variable KUBECONFIG=<path-to-kube-confg>
        try:
            config.load_kube_config(config_file=self.kubecfg_path)
            contexts = config.list_kube_config_contexts(
                config_file=self.kubecfg_path)
            current_context = contexts[1].get('context')
            self.namespace = current_context.get('namespace', 'default')
            self.cluster = current_context.get('cluster')
            self.knative_config['namespace'] = self.namespace
            self.knative_config['cluster'] = self.cluster
            self.is_incluster = False
        except Exception:
            config.load_incluster_config()
            self.namespace = self.knative_config.get('namespace', 'default')
            self.cluster = self.knative_config.get('cluster', 'default')
            self.is_incluster = True

        logger.debug("Set namespace to {}".format(self.namespace))
        logger.debug("Set cluster to {}".format(self.cluster))

        self.api = client.CustomObjectsApi()
        self.v1 = client.CoreV1Api()

        if self.istio_endpoint is None:
            try:
                ingress = self.v1.read_namespaced_service(
                    'istio-ingressgateway', 'istio-system')
                http_port = list(
                    filter(lambda port: port.port == 80,
                           ingress.spec.ports))[0].node_port
                # https_port = list(filter(lambda port: port.port == 443, ingress.spec.ports))[0].node_port

                if ingress.status.load_balancer.ingress is not None:
                    # get loadbalancer ip
                    ip = ingress.status.load_balancer.ingress[0].ip
                else:
                    # for minikube or a baremetal cluster that has no external load balancer
                    node = self.v1.list_node()
                    ip = node.items[0].status.addresses[0].address

                if ip and http_port:
                    self.istio_endpoint = 'http://{}:{}'.format(ip, http_port)
                    self.knative_config['istio_endpoint'] = self.istio_endpoint
            except Exception:
                pass

        if 'service_host_suffix' not in self.knative_config:
            self.serice_host_filename = os.path.join(CACHE_DIR, 'knative',
                                                     self.cluster,
                                                     'service_host')
            self.service_host_suffix = None
            if os.path.exists(self.serice_host_filename):
                serice_host_data = load_yaml_config(self.serice_host_filename)
                self.service_host_suffix = serice_host_data[
                    'service_host_suffix']
                self.knative_config[
                    'service_host_suffix'] = self.service_host_suffix
        else:
            self.service_host_suffix = self.knative_config[
                'service_host_suffix']

        logger.debug('Loaded service host suffix: {}'.format(
            self.service_host_suffix))

        msg = COMPUTE_CLI_MSG.format('Knative')
        if self.istio_endpoint:
            msg += ' - Istio Endpoint: {}'.format(self.istio_endpoint)
        elif self.cluster:
            msg += ' - Cluster: {}'.format(self.cluster)
        logger.info("{}".format(msg))
예제 #28
0
    def __init__(self, ibm_cf_config):
        logger.debug("Creating IBM Cloud Functions client")
        self.log_active = logger.getEffectiveLevel() != logging.WARNING
        self.name = 'ibm_cf'
        self.ibm_cf_config = ibm_cf_config
        self.is_lithops_function = is_lithops_function()

        self.user_agent = ibm_cf_config['user_agent']
        self.region = ibm_cf_config['region']
        self.endpoint = ibm_cf_config['regions'][self.region]['endpoint']
        self.namespace = ibm_cf_config['regions'][self.region]['namespace']
        self.namespace_id = ibm_cf_config['regions'][self.region].get(
            'namespace_id', None)
        self.api_key = ibm_cf_config['regions'][self.region].get(
            'api_key', None)
        self.iam_api_key = ibm_cf_config.get('iam_api_key', None)

        logger.info("Set IBM CF Namespace to {}".format(self.namespace))
        logger.info("Set IBM CF Endpoint to {}".format(self.endpoint))

        self.user_key = self.api_key[:
                                     5] if self.api_key else self.iam_api_key[:
                                                                              5]
        self.package = 'lithops_v{}_{}'.format(__version__, self.user_key)

        if self.api_key:
            enc_api_key = str.encode(self.api_key)
            auth_token = base64.encodebytes(enc_api_key).replace(b'\n', b'')
            auth = 'Basic %s' % auth_token.decode('UTF-8')

            self.cf_client = OpenWhiskClient(endpoint=self.endpoint,
                                             namespace=self.namespace,
                                             auth=auth,
                                             user_agent=self.user_agent)
        elif self.iam_api_key:
            token_manager = DefaultTokenManager(api_key_id=self.iam_api_key)
            token_filename = os.path.join(CACHE_DIR, 'ibm_cf', 'iam_token')

            if 'token' in self.ibm_cf_config:
                logger.debug(
                    "Using IBM IAM API Key - Reusing Token from config")
                token_manager._token = self.ibm_cf_config['token']
                token_manager._expiry_time = datetime.strptime(
                    self.ibm_cf_config['token_expiry_time'],
                    '%Y-%m-%d %H:%M:%S.%f%z')
                token_minutes_diff = int(
                    (token_manager._expiry_time -
                     datetime.now(timezone.utc)).total_seconds() / 60.0)
                logger.debug("Token expiry time: {} - Minutes left: {}".format(
                    token_manager._expiry_time, token_minutes_diff))

            elif os.path.exists(token_filename):
                logger.debug(
                    "Using IBM IAM API Key - Reusing Token from local cache")
                token_data = load_yaml_config(token_filename)
                token_manager._token = token_data['token']
                token_manager._expiry_time = datetime.strptime(
                    token_data['token_expiry_time'], '%Y-%m-%d %H:%M:%S.%f%z')
                token_minutes_diff = int(
                    (token_manager._expiry_time -
                     datetime.now(timezone.utc)).total_seconds() / 60.0)
                logger.debug("Token expiry time: {} - Minutes left: {}".format(
                    token_manager._expiry_time, token_minutes_diff))

            if (token_manager._is_expired()
                    or token_minutes_diff < 11) and not is_lithops_function():
                logger.debug(
                    "Using IBM IAM API Key - Token expired. Requesting new token"
                )
                token_manager._token = None
                token_manager.get_token()
                token_data = {}
                token_data['token'] = token_manager._token
                token_data[
                    'token_expiry_time'] = token_manager._expiry_time.strftime(
                        '%Y-%m-%d %H:%M:%S.%f%z')
                dump_yaml_config(token_filename, token_data)

            ibm_cf_config['token'] = token_manager._token
            ibm_cf_config[
                'token_expiry_time'] = token_manager._expiry_time.strftime(
                    '%Y-%m-%d %H:%M:%S.%f%z')

            auth_token = token_manager._token
            auth = 'Bearer ' + auth_token

            self.cf_client = OpenWhiskClient(endpoint=self.endpoint,
                                             namespace=self.namespace_id,
                                             auth=auth,
                                             user_agent=self.user_agent)

        log_msg = (
            'Lithops v{} init for IBM Cloud Functions - Namespace: {} - '
            'Region: {}'.format(__version__, self.namespace, self.region))
        if not self.log_active:
            print(log_msg)
        logger.info("IBM CF client created successfully")
예제 #29
0
    def init(self):
        """
        Initialize the backend by defining the Master VM
        """
        ec2_data_filename = os.path.join(self.cache_dir, 'data')
        self.ec2_data = load_yaml_config(ec2_data_filename)

        cahced_mode = self.ec2_data.get('mode')
        cahced_instance_id = self.ec2_data.get('instance_id')

        logger.debug(f'Initializing AWS EC2 backend ({self.mode} mode)')

        if self.mode == 'consume':
            ins_id = self.config['instance_id']

            if self.mode != cahced_mode or ins_id != cahced_instance_id:
                instances = self.ec2_client.describe_instances(InstanceIds=[ins_id])
                instance_data = instances['Reservations'][0]['Instances'][0]
                name = 'lithops-consume'
                for tag in instance_data['Tags']:
                    if tag['Key'] == 'Name':
                        name = tag['Value']
                private_ip = instance_data['PrivateIpAddress']
                self.ec2_data = {'mode': self.mode,
                                 'instance_id': ins_id,
                                 'instance_name': name,
                                 'private_ip': private_ip}
                dump_yaml_config(ec2_data_filename, self.ec2_data)

            self.master = EC2Instance(self.ec2_data['instance_name'], self.config,
                                      self.ec2_client, public=True)
            self.master.instance_id = ins_id
            self.master.private_ip = self.ec2_data['private_ip']
            self.master.delete_on_dismantle = False
            self.master.ssh_credentials.pop('password')

        elif self.mode in ['create', 'reuse']:
            if self.mode != cahced_mode:
                # invalidate cached data
                self.ec2_data = {}

            self.vpc_key = self.config['vpc_id'][-4:]
            master_name = 'lithops-master-{}'.format(self.vpc_key)
            self.master = EC2Instance(master_name, self.config, self.ec2_client, public=True)
            self.master.instance_type = self.config['master_instance_type']
            self.master.delete_on_dismantle = False
            self.master.ssh_credentials.pop('password')

            instance_data = self.master.get_instance_data()
            if instance_data and 'InstanceId' in instance_data:
                self.master.instance_id = instance_data['InstanceId']
            if instance_data and 'PrivateIpAddress' in instance_data:
                self.master.private_ip = instance_data['PrivateIpAddress']
            if instance_data and instance_data['State']['Name'] == 'running' and \
               'PublicIpAddress' in instance_data:
                self.master.public_ip = instance_data['PublicIpAddress']

            self.ec2_data['instance_id'] = '0af1'

            if self.config['request_spot_instances']:
                wit = self.config["worker_instance_type"]
                logger.debug(f'Requesting current spot price for worker VMs of type {wit}')
                response = self.ec2_client.describe_spot_price_history(
                    EndTime=datetime.today(), InstanceTypes=[wit],
                    ProductDescriptions=['Linux/UNIX (Amazon VPC)'],
                    StartTime=datetime.today()
                )
                for az in response['SpotPriceHistory']:
                    spot_price = az['SpotPrice']
                self.config["spot_price"] = spot_price
                logger.debug(f'Current spot instance price for {wit} is ${spot_price}')