Esempio n. 1
0
def test_config(test_job):
    global logger

    host = '127.0.0.1'
    port = '8081'

    # Setup logging and get logger
    log_config_file = os.path.join(os.path.dirname(__file__), 'config',
                                   'logger_config.yaml')
    print("Using log config {}".format(log_config_file))
    logger = setup_logger('client_tests', log_config_file)
    if logger is None:
        print("Failed to configure logging.\n")
        sys.exit(-1)

    # Connection config
    config = os.path.join(os.path.dirname(__file__), '..', '..', 'src',
                          'config', 'config.yaml')

    with open(config, 'r') as file_config:
        config = yaml.safe_load(file_config)
        host = config['replicator'].get('host')
        port = str(config['replicator'].get('port'))

    # URL for non-secure http endpoint
    url = 'http://' + host + ':' + port

    return {'url': url, 'test_job': test_job}
Esempio n. 2
0
async def main():
    config = Config()

    # Setup logging and get logger
    log_config_file = os.path.join(os.path.dirname(__file__), 'config',
                                   'logger_config.yaml')

    print("Using log config {}".format(log_config_file))
    logger = setup_logger('client_tests', log_config_file)
    if logger is None:
        print("Failed to configure logging.\n")
        sys.exit(-1)

    s3_site = S3Site(config.endpoint, config.s3_service_name, config.s3_region)

    session = S3Session(logger, s3_site, config.access_key, config.secret_key)

    # Generate object name
    object_name = str(config.object_name_prefix)
    bucket_name = config.source_bucket_name
    request_id = "dummy-request-id"

    obj = S3AsyncCreateMultipartUpload(session, request_id, bucket_name,
                                       object_name)

    await obj.create()
    logger.info("S3AsyncCreateMultipartUpload test passed!")

    await session.close()
Esempio n. 3
0
def logger():
    """Setup logger for tests."""
    # Setup logging and get logger
    log_config_file = os.path.join(os.path.dirname(__file__), 'config',
                                   'logger_config.yaml')
    logger = setup_logger('client_tests', log_config_file)
    if logger is None:
        sys.exit(-1)
    return logger
Esempio n. 4
0
async def main():

    config = Config()

    # Setup logging and get logger
    log_config_file = os.path.join(os.path.dirname(__file__),
                                   'config', 'logger_config.yaml')

    print("Using log config {}".format(log_config_file))
    logger = setup_logger('client_tests', log_config_file)
    if logger is None:
        print("Failed to configure logging.\n")
        sys.exit(-1)

    s3_site = S3Site(config.endpoint, config.s3_service_name, config.s3_region)

    session = S3Session(logger, s3_site, config.access_key, config.secret_key)

    # Generate bucket names
    bucket_name = config.source_bucket_name

    # Generate object names
    object_name = str(config.object_name_prefix)
    object_size = config.object_size
    range_read_offset = config.range_read_offset
    range_read_length = config.range_read_length
    request_id = "dummy-request-id"

    object_reader = S3AsyncGetObject(session, request_id,
                                     bucket_name, object_name,
                                     object_size, range_read_offset,
                                     range_read_length)

    reader_generator = object_reader.fetch(object_size)
    async for _ in reader_generator:
        pass

    content_length = object_reader.get_content_length()

    if range_read_length >= 0:
        # Validate if content length matches to total object range
        if object_reader.get_total_object_range() == content_length:
            logger.info("Content-Length matched!")
            logger.info("S3AsyncGetObjectRangeRead test passed!")
        else:
            logger.error("Error : size mismatched")
            logger.info("S3AsyncGetObjectRangeRead test failed!")
    else:
        # Validate if content length matches to object size in config
        if object_size == content_length:
            logger.info("Content-Length matched!")
            logger.info("S3AsyncGetObject test passed!")
        else:
            logger.error("Error : size mismatched")
            logger.info("S3AsyncGetObject test failed!")

    await session.close()
Esempio n. 5
0
def init_logger():
    log_config_file = os.path.join(os.path.dirname(__file__), 'config',
                                   'logger_config.yaml')

    print("Using log config {}".format(log_config_file))
    logger = setup_logger('client_tests', log_config_file)
    if logger is None:
        print("Failed to configure logging.\n")
        sys.exit(-1)
    return logger
Esempio n. 6
0
    def __init__(self, config_file, log_config_file):
        """Initialise logger and configuration."""
        self._config = Config(config_file)
        if self._config.load() is None:
            print("Failed to load configuration.\n")
            sys.exit(-1)

        # Setup logging.
        self._logger = setup_logger('s3replicationmanager', log_config_file)
        if self._logger is None:
            print("Failed to configure logging.\n")
            sys.exit(-1)

        self._config.print_with(self._logger)

        self._jobs = Jobs(self._logger, "all-jobs")
        self._subscribers = Subscribers()
Esempio n. 7
0
async def main():
    config = Config()

    # Setup logging and get logger
    log_config_file = os.path.join(os.path.dirname(__file__), 'config',
                                   'logger_config.yaml')

    print("Using log config {}".format(log_config_file))
    logger = setup_logger('client_tests', log_config_file)
    if logger is None:
        print("Failed to configure logging.\n")
        sys.exit(-1)

    s3_site = S3Site(config.endpoint, config.s3_service_name, config.s3_region)

    session = S3Session(logger, s3_site, config.access_key, config.secret_key)

    # Generate object name
    object_name = str(config.object_name_prefix)
    bucket_name = config.source_bucket_name
    request_id = "dummy-request-id"

    # For Cortx, default value of version_id=None
    # For AWS, user of this class should provide version-id as well.
    version_id = None

    # For Cortx, default value of part_number=None
    # For AWS, user of this class should provide part-number as well.
    part_number = None

    head_obj = S3AsyncHeadObject(session, request_id, bucket_name, object_name,
                                 version_id)

    # Pass the part_number to get specific part's information
    await head_obj.get(part_number)

    # Validate if content length matches to object size
    if config.object_size == head_obj.get_content_length():
        logger.info("Content-Length matched!")
        logger.info("S3AsyncHeadObject test passed!")
    else:
        logger.error("Error : Content-Length mismatched")
        logger.info("S3AsyncHeadObject test failed!")

    await session.close()
Esempio n. 8
0
async def main():

    config = Config()

    # Setup logging and get logger
    log_config_file = os.path.join(os.path.dirname(__file__), 'config',
                                   'logger_config.yaml')

    print("Using log config {}".format(log_config_file))
    logger = setup_logger('client_tests', log_config_file)
    if logger is None:
        print("Failed to configure logging.\n")
        sys.exit(-1)

    # Read configs for future comparision
    replication_prefix = str(config.object_name_prefix)
    test_replication_object = replication_prefix
    replication_dest_bucket = config.target_bucket_name

    s3_site = S3Site(config.endpoint, config.s3_service_name, config.s3_region)

    session = S3Session(logger, s3_site, config.access_key, config.secret_key)

    # Generate object names
    request_id = "dummy-request-id"
    obj = S3AsyncGetBucketReplication(session, request_id,
                                      config.source_bucket_name)

    # Start transfer
    await obj.get()

    replication_rule = obj.get_replication_rule(test_replication_object)
    logger.debug(replication_rule)

    assert replication_rule._prefix == replication_prefix, \
        "replication_prefix mismatched"
    assert replication_rule._dest_bucket == replication_dest_bucket, \
        "replication_dest_bucket mismatched"

    await session.close()

    logger.info("AsyncS3GetBucketReplication test passed!")
Esempio n. 9
0
async def main():
    config = Config()

    # Setup logging and get logger
    log_config_file = os.path.join(os.path.dirname(__file__), 'config',
                                   'logger_config.yaml')

    print("Using log config {}".format(log_config_file))
    logger = setup_logger('client_tests', log_config_file)
    if logger is None:
        print("Failed to configure logging.\n")
        sys.exit(-1)

    s3_site = S3Site(config.endpoint, config.s3_service_name, config.s3_region)

    session = S3Session(logger, s3_site, config.access_key, config.secret_key)

    # Generate object name
    object_name = str(config.object_name_prefix)
    bucket_name = config.source_bucket_name
    total_parts = config.total_parts
    request_id = "dummy-request-id"

    # Provide actual upload ID in place of __UPLOAD_ID__
    upload_id = "__UPLOAD_ID__"

    obj_data_generator = MultipartObjectDataGenerator(logger,
                                                      config.object_size,
                                                      total_parts)

    obj = S3AsyncUploadPart(session, request_id, bucket_name, object_name,
                            upload_id)

    total_chunks = int(config.object_size / total_parts)
    print("Total chunks {}".format(total_chunks))

    for part_no in range(1, int(total_parts + 1)):
        await obj.upload(obj_data_generator, part_no, total_chunks)
        print("\n")

    logger.info("S3AsyncUploadPart test passed!")
    await session.close()
async def main():

    config = Config()

    # Setup logging and get logger
    log_config_file = os.path.join(os.path.dirname(__file__), 'config',
                                   'logger_config.yaml')

    print("Using log config {}".format(log_config_file))
    logger = setup_logger('client_tests', log_config_file)
    if logger is None:
        print("Failed to configure logging.\n")
        sys.exit(-1)

    s3_site = S3Site(config.endpoint, config.s3_service_name, config.s3_region)
    session = S3Session(logger, s3_site, config.access_key, config.secret_key)

    # Generate bucket names
    bucket_name = config.source_bucket_name

    # Generate object names
    object_name = str(config.object_name_prefix)
    request_id = "dummy-request-id"

    # Get tag-name and tag-value from config
    tagset = {}

    tag_name = config.object_tag_name
    tag_value = config.object_tag_value
    tagset[tag_name] = tag_value

    tag_object = S3AsyncPutObjectTagging(session, request_id, bucket_name,
                                         object_name, tagset)

    await tag_object.send()

    logger.info("S3AsyncPutObjectTagging test passed!")
    await session.close()
Esempio n. 11
0
    def __init__(self, config_file, log_config_file):
        """Initialise logger and configuration."""
        self._config = Config(config_file)
        if self._config.load() is None:
            print("Failed to load configuration.\n")
            sys.exit(-1)

        # Setup logging.
        self._logger = setup_logger('s3replicator', log_config_file)
        if self._logger is None:
            print("Failed to configure logging.\n")
            sys.exit(-1)

        self._inprogress_jobs = Jobs(self._logger, "all-jobs")
        if self._config.job_cache_enabled:
            self._completed_jobs = Jobs(self._logger, "completed-jobs",
                                        self._config.job_cache_timeout_secs)
        else:
            self._completed_jobs = Jobs(self._logger, "completed-jobs")

        self._replication_managers = ReplicationManagers()

        self._config.print_with(self._logger)
Esempio n. 12
0
async def main():

    config = Config()

    # Setup logging and get logger
    log_config_file = os.path.join(os.path.dirname(__file__), 'config',
                                   'logger_config.yaml')

    print("Using log config {}".format(log_config_file))
    logger = setup_logger('client_tests', log_config_file)
    if logger is None:
        print("Failed to configure logging.\n")
        sys.exit(-1)

    s3_site = S3Site(config.endpoint, config.s3_service_name, config.s3_region)
    session = S3Session(logger, s3_site, config.access_key, config.secret_key)

    # Generate bucket names
    bucket_name = config.source_bucket_name
    # Generate object names
    object_name = str(config.object_name_prefix)
    request_id = "dummy-request-id"

    tag_object = S3AsyncGetObjectTagging(session, request_id, bucket_name,
                                         object_name)

    await tag_object.fetch()

    # Validate if tags value matches to object tag value in config
    if config.object_tag_value == tag_object.get_tags_value(
            config.object_tag_name):
        logger.info("Tag value matched!")
        logger.info("S3AsyncGetObjectTagging test passed!")
    else:
        logger.error("Error : Tag value mismatched")

    await session.close()
Esempio n. 13
0
async def main():
    config = Config()

    # Setup logging and get logger
    log_config_file = os.path.join(os.path.dirname(__file__), 'config',
                                   'logger_config.yaml')

    print("Using log config {}".format(log_config_file))
    logger = setup_logger('client_tests', log_config_file)
    if logger is None:
        print("Failed to configure logging.\n")
        sys.exit(-1)

    s3_site = S3Site(config.endpoint, config.s3_service_name, config.s3_region)

    session = S3Session(logger, s3_site, config.access_key, config.secret_key)

    # Generate object name
    object_name = str(config.object_name_prefix)
    bucket_name = config.source_bucket_name
    request_id = "dummy-request-id"

    # Provide actual upload ID in place of __UPLOAD_ID__
    upload_id = "__UPLOAD_ID__"

    # Etag dictionary
    # Example : etag_dict = {1: '"7ade650b8547b04d5fa6ba96e10c8b5f"'}
    etag_dict = {1: "__ETAG_PART1__", 2: "__ETAG_PART2__"}

    obj = S3AsyncCompleteMultipartUpload(session, request_id, bucket_name,
                                         object_name, upload_id, etag_dict)

    await obj.complete_upload()

    logger.info("Final ETag : {}".format(obj.get_final_etag()))
    logger.info("S3AsyncCompleteMultipartUpload test passed!")
    await session.close()
Esempio n. 14
0
async def main():

    config = Config()

    # Setup logging and get logger
    log_config_file = os.path.join(os.path.dirname(__file__),
                                   'config', 'logger_config.yaml')

    print("Using log config {}".format(log_config_file))
    logger = setup_logger('client_tests', log_config_file)
    if logger is None:
        print("Failed to configure logging.\n")
        sys.exit(-1)

    s3_site = S3Site(config.endpoint, config.s3_service_name, config.s3_region)

    session = S3Session(logger, s3_site, config.access_key, config.secret_key)

    # Generate object names
    source_object_name = str(config.object_name_prefix)
    target_object_name = str(config.object_name_prefix)
    request_id = "dummy-request-id"
    object_reader = S3AsyncGetObject(session, request_id,
                                     config.source_bucket_name,
                                     source_object_name, config.object_size,
                                     config.range_read_offset, config.range_read_length)
    object_writer = S3AsyncPutObject(session, request_id,
                                     config.target_bucket_name,
                                     target_object_name, config.object_size)

    # Start transfer
    await object_writer.send(object_reader, config.transfer_chunk_size)

    logger.info("S3AsyncTransferObject test passed!")

    await session.close()
Esempio n. 15
0
async def main():

    config = Config()

    # Setup logging and get logger
    log_config_file = os.path.join(os.path.dirname(__file__), 'config',
                                   'logger_config.yaml')

    print("Using log config {}".format(log_config_file))
    logger = setup_logger('client_tests', log_config_file)
    if logger is None:
        print("Failed to configure logging.\n")
        sys.exit(-1)

    s3_site = S3Site(config.endpoint, config.s3_service_name, config.s3_region)

    session = S3Session(logger, s3_site, config.access_key, config.secret_key)

    # Generate bucket names
    bucket_name = config.source_bucket_name

    # Generate object names
    object_name = str(config.object_name_prefix)
    object_size = config.object_size
    request_id = "dummy-request-id"

    object_reader = FixedObjectDataGenerator(logger, object_name, object_size)

    object_writer = S3AsyncPutObject(session, request_id, bucket_name,
                                     object_name, object_size)

    # Write to the object
    await object_writer.send(object_reader, object_size)

    logger.info("S3AsyncPutObject test passed!")
    await session.close()
async def main():

    config = Config()

    # Setup logging and get logger
    log_config_file = os.path.join(os.path.dirname(__file__), 'config',
                                   'logger_config.yaml')

    print("Using log config {}".format(log_config_file))
    logger = setup_logger('client_tests', log_config_file)
    if logger is None:
        print("Failed to configure logging.\n")
        sys.exit(-1)

    s3_site = S3Site(config.endpoint, config.s3_service_name, config.s3_region)

    session = S3Session(logger, s3_site, config.access_key, config.secret_key)

    source_bucket_name = config.source_bucket_name
    # Generate object names
    source_object_name = str(config.object_name_prefix)
    request_id = "dummy-request-id"

    total_parts = config.total_parts

    # Start Multipart Upload
    # Create multipart
    print("\nCreate multipart upload")
    print("_______________________\n")
    obj_create = S3AsyncCreateMultipartUpload(session, request_id,
                                              source_bucket_name,
                                              source_object_name)

    await obj_create.create()
    upload_id = obj_create.get_response_header("UploadId")

    # Object data generator
    obj_data_generator = MultipartObjectDataGenerator(logger,
                                                      config.object_size,
                                                      total_parts)

    # Upload part
    print("\nUpload part")
    print("___________\n")
    obj_upload = S3AsyncUploadPart(session, request_id, source_bucket_name,
                                   source_object_name, upload_id)

    total_chunks = int(config.object_size / total_parts)

    for part_no in range(1, int(total_parts + 1)):
        await obj_upload.upload(obj_data_generator, part_no, total_chunks)
        print("\n")

    etag_dict = obj_upload.get_etag_dict()

    # Complete multipart upload
    print("Complete multipart upload")
    print("__________________________\n")
    obj_complete = S3AsyncCompleteMultipartUpload(session, request_id,
                                                  source_bucket_name,
                                                  source_object_name,
                                                  upload_id, etag_dict)

    await obj_complete.complete_upload()

    final_etag = obj_complete.get_final_etag()
    logger.info("Final ETag : {}".format(final_etag))

    logger.info("S3AsyncMultipartUpload test passed!")
    await session.close()
Esempio n. 17
0
async def main():
    """Main function for calling various REST requests."""
    host = '127.0.0.1'
    port = '8081'

    # Setup logging and get logger
    log_config_file = os.path.join(os.path.dirname(__file__), 'config',
                                   'logger_config.yaml')

    print("Using log config {}".format(log_config_file))
    logger = setup_logger('client_tests', log_config_file)
    if logger is None:
        print("Failed to configure logging.\n")
        sys.exit(-1)

    # Create parser object
    parser = argparse.ArgumentParser(description='''Replicator server help''')

    # Adding an arguments
    parser.add_argument(
        '--configfile',
        type=str,
        metavar='path',
        help='Path to replication manager configuration file(format: yaml)')

    parser.add_argument('--jobfile',
                        type=str,
                        metavar='path',
                        help='Test job record')

    # Parsing arguments
    args = parser.parse_args()

    # Read input config file and get host, port
    if args.configfile is None:
        args.configfile = os.path.join(os.path.dirname(__file__), "..", "..",
                                       "src", 'config', 'config.yaml')
    with open(args.configfile, 'r') as file_config:
        config = yaml.safe_load(file_config)
        host = config['replicator'].get('host')
        port = str(config['replicator'].get('port'))

    # URL for non-secure http endpoint
    url = 'http://' + host + ':' + port

    # Load the test job record.
    test_job = {}
    if args.jobfile is None:
        args.jobfile = os.path.join(os.path.dirname(__file__), 'data',
                                    'test_job.json')
    with open(args.jobfile, 'r') as file_config:
        test_job = json.load(file_config)

    # Start client session
    async with aiohttp.ClientSession() as session:

        # Post the replication job
        async with session.post(url + '/jobs', json=test_job) as response:
            logger.info('POST jobs Status: {}'.format(response.status))
            html = await response.json()
            logger.info('Body: {}'.format(html))
def main():

    config = Config()

    # Setup logging and get logger
    log_config_file = os.path.join(os.path.dirname(__file__), 'config',
                                   'logger_config.yaml')

    print("Using log config {}".format(log_config_file))
    logger = setup_logger('client_tests', log_config_file)
    if logger is None:
        print("Failed to configure logging.\n")
        sys.exit(-1)

    # CONFIG OPTIONS
    bucket_name = config.source_bucket_name
    dest_bucket_name = config.target_bucket_name
    object_name_prefix = str(config.object_name_prefix)
    iam_role = config.iam_role

    # Create temp replication policy file.
    os.system(
        'cp ./tests/system/config/replication_policy_sample.json temp_policy.json'
    )

    matches = ['_ACCOUNT_ID_', '_REPLICATION_ENABLED_BUCKET_']

    # Read policy and make replacements based on config options.
    with fileinput.FileInput('temp_policy.json', inplace=True) as file:

        # Read each line and match the pattern and do replacement.
        for line in file:
            if all(x in line for x in matches):
                line = re.sub('(_ACCOUNT_ID_)', str(iam_role), line)
                line = re.sub('(_REPLICATION_ENABLED_BUCKET_)', bucket_name,
                              line)
                print(line)
            elif '_DESTINATION_BUCKET_' in line:
                line = re.sub('(_DESTINATION_BUCKET_)', dest_bucket_name, line)
                print(line)
            elif '_PREFIX_' in line:
                line = re.sub('(_PREFIX_)', object_name_prefix, line)
                print(line)
            else:
                print(line, end='')

    # updated replication policy.
    # os.system('cat temp_policy.json')

    command = 'aws s3api put-bucket-replication --bucket ' + \
        bucket_name + ' --replication-configuration file://temp_policy.json'

    exit_status = os.system(command)

    if exit_status == 0:
        logger.info("put-bucket-replication passed! ")
    else:
        os.system('rm -rf temp_policy.json')
        logger.error("put-bucket-replication failed! ")
        os._exit(exit_status)

    # Delete temp file.
    os.system('rm -rf temp_policy.json')
async def main():

    config = Config()

    bucket_name = config.source_bucket_name
    total_count = config.total_objects  # Number of objects to upload.
    object_size = config.object_size  # Bytes.

    # Setup logging and get logger
    log_config_file = os.path.join(os.path.dirname(__file__), 'config',
                                   'logger_config.yaml')

    print("Using log config {}".format(log_config_file))
    logger = setup_logger('client_tests', log_config_file)
    if logger is None:
        print("Failed to configure logging.\n")
        sys.exit(-1)

    s3_site = S3Site(config.endpoint, config.s3_service_name, config.s3_region)

    session = S3Session(logger, s3_site, config.access_key, config.secret_key,
                        config.max_s3_connections)

    reader_list = []
    writer_list = []
    put_task_list = []
    start_time = time.perf_counter()
    # Prepare for upload.
    for i in range(total_count):
        # Generate object name
        object_name = "test_object_" + str(i) + "_sz" + str(object_size)

        request_id = "request-id" + str(i)
        object_reader = FixedObjectDataGenerator(logger, object_name,
                                                 object_size)
        object_writer = S3AsyncPutObject(session, request_id, bucket_name,
                                         object_name, object_size)
        reader_list.append(object_reader)
        writer_list.append(object_writer)

        task = asyncio.ensure_future(writer_list[i].send(
            reader_list[i], reader_list[i].object_size))
        put_task_list.append(task)

    # Trigger upload and Wait for uploads to complete.
    await asyncio.gather(*put_task_list)
    end_time = time.perf_counter()
    total_time_ms = int(round((end_time - start_time) * 1000))

    for index in range(total_count):
        # Validate object uploaded successfully.
        assert writer_list[index].get_state() == S3RequestState.COMPLETED

        source_etag = reader_list[index].get_etag()
        target_etag = writer_list[index].get_etag()
        assert target_etag == source_etag, \
            "PUT ETag = {} and Data ETag = {}".format(target_etag, source_etag)

    logger.info("Total time to upload {} objects = {} ms.".format(
        total_count, total_time_ms))
    logger.info("Avg time per upload = {} ms.".format(total_time_ms /
                                                      total_count))

    await session.close()
def main():

    config = Config()

    bucket_name = config.source_bucket_name
    total_count = config.total_objects  # Number of objects to upload.
    # Must be multiple of total_count
    max_pool_connections = config.max_s3_connections
    # Must be less than and multiple of total_count
    max_threads = config.max_threads_for_boto3
    object_size = config.object_size  # Bytes.

    assert total_count % max_pool_connections == 0, \
        "max_pool_connections must be multiple of total_count"

    assert max_threads <= total_count and total_count % max_threads == 0, \
        "max_threads must be less than or equal to and multiple of total_count"

    # Setup logging and get logger
    log_config_file = os.path.join(os.path.dirname(__file__), 'config',
                                   'logger_config.yaml')

    print("Using log config {}".format(log_config_file))
    logger = setup_logger('client_tests', log_config_file)
    if logger is None:
        print("Failed to configure logging.\n")
        sys.exit(-1)

    # Init Global.
    GlobalTestDataBlock.create(object_size)

    session = boto3.session.Session()

    client = session.client("s3",
                            use_ssl=False,
                            endpoint_url=config.endpoint,
                            aws_access_key_id=config.access_key,
                            aws_secret_access_key=config.secret_key,
                            config=botocore.client.Config(
                                max_pool_connections=max_pool_connections))

    # Create resources for each thread.
    work_items = []
    start_time = time.perf_counter()
    with concurrent.futures.ThreadPoolExecutor(max_workers=max_threads) \
            as executor:
        futures = []
        for i in range(total_count):
            # Generate object name
            object_name = "test_object_" + str(i) + "_sz" + str(object_size)
            work_item = WorkItem(bucket_name, object_name, object_size, client)
            work_items.append(work_item)
            futures.append(
                executor.submit(upload_object,
                                logger=logger,
                                work_item=work_items[i]))
        # Wait for all threads to complete.
        for future in concurrent.futures.as_completed(futures):
            future.result()

    end_time = time.perf_counter()
    total_time_ms_threads_requests = int(round((end_time - start_time) * 1000))

    logger.info(
        "Total time to upload {} objects including thread creation = {} ms.".
        format(total_count, total_time_ms_threads_requests))
    logger.info("Avg time per upload = {} ms.".format(
        total_time_ms_threads_requests / total_count))