def parse_input_path_for_lithops(sm_config, input_path):
    if input_path.startswith('s3://') or input_path.startswith('s3a://'):
        backend = 'aws_s3'
        bucket, prefix = split_s3_path(input_path)
    else:
        backend = 'ibm_cos'
        bucket, prefix = split_cos_path(input_path)

    storage = Storage(sm_config['lithops'], backend)
    if backend == 'aws_s3' and sm_config['lithops']['aws_s3'][
            'endpoint'].startswith('http://'):
        # WORKAROUND for local Minio access
        # Lithops forces the url to HTTPS, so overwrite the S3 client with a fixed client
        # https://github.com/lithops-cloud/lithops/issues/708
        storage.storage_handler.s3_client = get_s3_client()

    keys_in_path = storage.list_keys(bucket, prefix)
    imzml_keys = [
        key for key in keys_in_path if key.lower().endswith('.imzml')
    ]
    ibd_keys = [key for key in keys_in_path if key.lower().endswith('.ibd')]

    debug_info = f'Path {input_path} had keys: {keys_in_path}'
    assert len(
        imzml_keys) == 1, f'Couldn\'t determine imzML file. {debug_info}'
    assert len(ibd_keys) == 1, f'Couldn\'t determine ibd file. {debug_info}'

    imzml_cobject = CloudObject(storage.backend, bucket, imzml_keys[0])
    ibd_cobject = CloudObject(storage.backend, bucket, ibd_keys[0])
    return storage, imzml_cobject, ibd_cobject
def validate_summaries(key_prefix, bucket_name, storage):
    storage_client = Storage()
    key_list = storage_client.list_keys(bucket_name, key_prefix + '/')
    sorted_key_list = sorted(key_list, key=lambda x: int(x.split('/')[-1]))

    summaries_buf = io.BytesIO()

    # Get all summaries into one buffer
    for key_name in sorted_key_list:
        with open(f's3://{storage.bucket}/{key_name}',
                  'rb',
                  transport_params=dict(
                      client=storage.get_client())) as source_file:

            copyfileobj(source_file, summaries_buf)

    cmd = ['./valsort', '-s', '/dev/stdin']
    with subprocess.Popen(cmd,
                          stdout=subprocess.PIPE,
                          stdin=subprocess.PIPE,
                          stderr=subprocess.PIPE) as p:
        with p.stdin as valinput, p.stdout as valoutput, p.stderr as valerr:
            with p.stdin as valinput:  # Need to close input for valsort to finish
                valinput.write(summaries_buf.getbuffer())
            returncode = p.wait()
            if returncode != 0:
                raise Exception(
                    f'Non-zero return code for valsort: {returncode}\n' +
                    valerr.read().decode('utf-8'))
            valoutput_str = valoutput.read().decode('utf-8')

            return valoutput_str
def validate_command(prefix, image):
    storage_client = Storage()

    with FunctionExecutor(runtime=image) as fexec:
        bucket = fexec.config['lithops']['storage_bucket']
        key_list = storage_client.list_keys(bucket, prefix + '/')

        validate_records_futures = fexec.map(validate_records,
                                             key_list,
                                             extra_args=[bucket, prefix],
                                             include_modules=['util'])
        results = fexec.get_result(fs=validate_records_futures)
        for index, r in enumerate(results):
            if not r['success']:
                print(f'Failed to validate partition: {key_list[index]}')
                print(r['stderr'])
                return

        validate_summaries_futures = fexec.map(validate_summaries,
                                               [prefix + summary_postfix],
                                               extra_args=[bucket],
                                               include_modules=['util'])
        results = fexec.get_result(fs=validate_summaries_futures)
        if results[0] == '':
            print('Success!')
        else:
            print(results)
Beispiel #4
0
def empty_bucket(bucket, backend, debug):
    log_level = logging.INFO if not debug else logging.DEBUG
    setup_lithops_logger(log_level)
    storage = Storage(backend=backend)
    logger.info('Deleting all objects in bucket "{}"'.format(bucket))
    keys = storage.list_keys(bucket)
    logger.info('Total objects found: {}'.format(len(keys)))
    storage.delete_objects(bucket, keys)
    logger.info('All objects deleted successfully')
Beispiel #5
0
def delete_object(bucket, key, prefix, backend, debug):
    log_level = logging.INFO if not debug else logging.DEBUG
    setup_lithops_logger(log_level)
    storage = Storage(backend=backend)

    if key:
        logger.info('Deleting object "{}" from bucket "{}"'.format(key, bucket))
        storage.delete_object(bucket, key)
        logger.info('Object deleted successfully')
    elif prefix:
        objs = storage.list_keys(bucket, prefix)
        logger.info('Deleting {} objects with prefix "{}" from bucket "{}"'.format(len(objs), prefix, bucket))
        storage.delete_objects(bucket, objs)
        logger.info('Object deleted successfully')
def sort_command(input_prefix, output_prefix, max_parallelism, image):
    storage_client = Storage()
    bucket = None
    input_info_lis = None

    with FunctionExecutor(runtime=image, workers=max_parallelism) as fexec:
        bucket = fexec.config['lithops']['storage_bucket']
        input_info_list = storage_client.list_objects(bucket,
                                                      input_prefix + '/')
        input_size = sum(info['Size'] for info in input_info_list)
        (num_shuffles, last_values_per_category) = make_plan(input_size)

        current_values_per_category = 1
        current_prefix = input_prefix
        current_keys_list = [{
            'keys_list': [key_name],
            'prefix': input_prefix + '-intermediate0',
            'category_stack': []
        } for key_name in storage_client.list_keys(bucket, input_prefix + '/')]
        for current_shuffle in range(num_shuffles):
            # Change values per category of last shuffle
            if current_shuffle == num_shuffles - 1:
                current_values_per_category = last_values_per_category

            radix_sort_futures = fexec.map(radix_sort_by_byte,
                                           current_keys_list,
                                           extra_args={
                                               'values_per_category':
                                               current_values_per_category
                                           },
                                           include_modules=['util'])
            radix_sort_results = fexec.get_result(fs=radix_sort_futures)

            categories_keys_lists = {}
            for res in radix_sort_results:
                intermediate_keys_list = res['keys_list']
                input_category_stack = res['category_stack']
                for key_name in intermediate_keys_list:
                    category_id = int(key_name.rsplit(sep='/', maxsplit=3)[-3])
                    new_category_stack = input_category_stack + [category_id]
                    new_category_stack_str = '/'.join(
                        [str(x) for x in new_category_stack])
                    if new_category_stack_str in categories_keys_lists:
                        categories_keys_lists[new_category_stack_str].append(
                            key_name)
                    else:
                        categories_keys_lists[new_category_stack_str] = [
                            key_name
                        ]

            # Partition category lists
            # Attach prefix metadata so that sorter knows what to name files
            each_category_size = input_size / (
                (256 / current_values_per_category) * (current_shuffle + 1))
            num_partitions_per_category = math.ceil(each_category_size /
                                                    buffer_size_to_categorize)

            current_keys_list = []
            for category_stack_str, cat_keys_list in categories_keys_lists.items(
            ):
                for sub_list in np.array_split(cat_keys_list,
                                               num_partitions_per_category):
                    partition_entry = {
                        'keys_list':
                        sub_list,
                        'prefix':
                        f'{input_prefix}-intermediate{str(current_shuffle + 1)}',
                        'category_stack':
                        [int(x) for x in category_stack_str.split('/')]
                    }
                    current_keys_list.append(partition_entry)

        consider_last_byte_sorted = False
        if last_values_per_category == 1:
            consider_last_byte_sorted = True
        for entry in current_keys_list:
            entry['prefix'] = output_prefix
        sorted_keys_list = sorted(current_keys_list,
                                  key=lambda x: x['category_stack'])
        sort_category_futures = fexec.map(sort_category,
                                          sorted_keys_list,
                                          extra_args={
                                              'consider_last_byte_sorted':
                                              consider_last_byte_sorted
                                          },
                                          include_modules=['util'])
        results = fexec.get_result(fs=sort_category_futures)
        # print(results)

    # Check if size of output matches size of input

    output_info_list = storage_client.list_objects(bucket, output_prefix)
    output_size = sum(info['Size'] for info in output_info_list)
    assert input_size == output_size, f'input size: {input_size}, output_size: {output_size}'

    print('Done!')