Ejemplo n.º 1
1
def main():
    """
    Functional testing of minio python library.
    """
    fake = Factory.create()
    client = Minio('s3.amazonaws.com',
                   os.getenv('ACCESS_KEY'),
                   os.getenv('SECRET_KEY'))

    _http = urllib3.PoolManager(
        cert_reqs='CERT_REQUIRED',
        ca_certs=certifi.where()
    )

    # Get unique bucket_name, object_name.
    bucket_name = uuid.uuid4().__str__()
    object_name = uuid.uuid4().__str__()

    # Enable trace
    # client.trace_on(sys.stderr)

    # Make a new bucket.
    bucket_name = 'minio-pytest'

    print(client.make_bucket(bucket_name))
    print(client.make_bucket(bucket_name+'.unique',
                             location='us-west-1'))

    ## Check if return codes a valid from server.
    try:
        client.make_bucket(bucket_name+'.unique',
                           location='us-west-1')
    except ResponseError as err:
        if str(err.code) in ['BucketAlreadyOwnedByYou', 'BucketAlreadyExists']:
            pass
        else:
            raise

    # Check if bucket was created properly.
    print(client.bucket_exists(bucket_name))
    print(client.bucket_exists(bucket_name+'.unique'))

    # List all buckets.
    buckets = client.list_buckets()
    for bucket in buckets:
        print(bucket.name, bucket.creation_date)

    with open('testfile', 'wb') as file_data:
        file_data.write(fake.text().encode('utf-8'))
    file_data.close()

    # Put a file
    file_stat = os.stat('testfile')
    with open('testfile', 'rb') as file_data:
        client.put_object(bucket_name, object_name, file_data, file_stat.st_size)
    file_data.close()

    # Fput a file
    print(client.fput_object(bucket_name, object_name+'-f', 'testfile'))

    # Fetch stats on your object.
    print(client.stat_object(bucket_name, object_name))

    # Get a full object
    object_data = client.get_object(bucket_name, object_name)
    with open('newfile', 'wb') as file_data:
        for data in object_data:
            file_data.write(data)
    file_data.close()

    # Get a full object locally.
    print(client.fget_object(bucket_name, object_name, 'newfile-f'))

    # List all object paths in bucket.
    objects = client.list_objects(bucket_name, recursive=True)
    for obj in objects:
        print(obj.bucket_name, obj.object_name, obj.last_modified, \
            obj.etag, obj.size, obj.content_type)

    presigned_get_object_url = client.presigned_get_object(bucket_name, object_name)
    response = _http.urlopen('GET', presigned_get_object_url)
    if response.status != 200:
        response_error = ResponseError(response)
        raise response_error.get(bucket_name, object_name)

    presigned_put_object_url = client.presigned_put_object(bucket_name, object_name)
    value = fake.text().encode('utf-8')
    data = io.BytesIO(value).getvalue()
    response = _http.urlopen('PUT', presigned_put_object_url, body=data)
    if response.status != 200:
        response_error = ResponseError(response)
        raise response_error.put(bucket_name, object_name)

    object_data = client.get_object(bucket_name, object_name)
    if object_data.read() != value:
        raise ValueError('Bytes not equal')

    # Post policy.
    policy = PostPolicy()
    policy.set_bucket_name(bucket_name)
    policy.set_key_startswith('objectPrefix/')

    expires_date = datetime.utcnow()+timedelta(days=10)
    policy.set_expires(expires_date)
    print(client.presigned_post_policy(policy))

    # Remove an object.
    print(client.remove_object(bucket_name, object_name))
    print(client.remove_object(bucket_name, object_name+'-f'))

    # Remove a bucket. This operation will only work if your bucket is empty.
    print(client.remove_bucket(bucket_name))
    print(client.remove_bucket(bucket_name+'.unique'))

    # Remove temporary files.
    os.remove('testfile')
    os.remove('newfile')
    os.remove('newfile-f')
Ejemplo n.º 2
0
    def put(self, request, user_id, format=None):
        try:
            # allow admin
            if request.user.character == 1:
                pass
            else:
                # make sure it's your own
                if user_id != request.user.user_id:
                    return Response({"msg": "Forbidden. You are not the man."},
                                    status=403)
        except AttributeError:
            # Anonymous
            return Response({"msg": "Forbidden. You are not logged in."},
                            status=403)
        post_policy = PostPolicy()
        # set bucket name location for uploads.
        post_policy.set_bucket_name(DEFAULT_BUCKET)
        # set key prefix for all incoming uploads.
        file_token = generate_avatar_token(user_id)
        post_policy.set_key_startswith(file_token)
        # set content length for incoming uploads.
        post_policy.set_content_length_range(0, MAX_AVATAR_SIZE)
        # set expiry
        expires_date = datetime.utcnow() + DEFAULT_FILE_URL_TIMEOUT
        post_policy.set_expires(expires_date)

        url, signed_form_data = local_minio_client.presigned_post_policy(
            post_policy)
        response = {"url": url, "request_form": signed_form_data}
        return Response(response)
Ejemplo n.º 3
0
def test_presigned_post_policy(client, log_output):
    bucket_name = generate_bucket_name()
    no_of_days = 10
    prefix = 'objectPrefix/'
    try:
        client.make_bucket(bucket_name)
        # Post policy.
        policy = PostPolicy()
        policy.set_bucket_name(bucket_name)
        policy.set_key_startswith(prefix)
        expires_date = datetime.utcnow() + timedelta(days=no_of_days)
        policy.set_expires(expires_date)
        # post_policy arg is a class. To avoid displaying meaningless value
        # for the class, policy settings are made part of the args for
        # clarity and debugging purposes.
        log_output.args['post_policy'] = {
            'bucket_name': bucket_name,
            'prefix': prefix,
            'expires_in_days': no_of_days
        }
        client.presigned_post_policy(policy)
    except Exception as err:
        raise Exception(err)
    finally:
        try:
            client.remove_bucket(bucket_name)
        except Exception as err:
            raise Exception(err)
    # Test passes
    print(log_output.json_report())
Ejemplo n.º 4
0
    def presigned_post_form_data(self, bucket, object_path):
        post_policy = PostPolicy()
        post_policy.set_bucket_name(bucket)
        post_policy.set_key(object_path)
        post_policy.set_expires(timezone.now() + timedelta(days=1))

        return self.presigned_post_policy(post_policy)
Ejemplo n.º 5
0
def presigned_post_policy_test(client, bucket_name):
    _log_test()
    # Post policy.
    policy = PostPolicy()
    policy.set_bucket_name(bucket_name)
    policy.set_key_startswith('objectPrefix/')

    expires_date = datetime.utcnow() + timedelta(days=10)
    policy.set_expires(expires_date)
    client.presigned_post_policy(policy)
Ejemplo n.º 6
0
    def generate_container_upload_url(
        self,
        container: Container,
        blob_name: str,
        expires: int = 3600,
        acl: str = None,
        meta_data: MetaData = None,
        content_disposition: str = None,
        content_length: ContentLength = None,
        content_type: str = None,
        cache_control: str = None,
        extra: ExtraOptions = None,
    ) -> FormPost:
        if content_disposition:
            logger.warning(messages.OPTION_NOT_SUPPORTED, "content_disposition")

        if cache_control:
            logger.warning(messages.OPTION_NOT_SUPPORTED, "cache_control")

        meta_data = {} if meta_data is None else meta_data

        post_policy = PostPolicy()
        post_policy.set_bucket_name(container.name)
        post_policy.set_key_startswith(blob_name)

        if content_length:
            min_range, max_range = content_length
            post_policy.set_content_length_range(min_range, max_range)

        if content_type:
            post_policy.set_content_type(content_type)

        for meta_name, meta_value in meta_data.items():
            meta_name = self._OBJECT_META_PREFIX + meta_name
            post_policy.policies.append(("eq", "$%s" % meta_name, meta_value))
            post_policy.form_data[meta_name] = meta_value

        expires_date = datetime.utcnow() + timedelta(seconds=expires)
        post_policy.set_expires(expires_date)

        url, fields = self.client.presigned_post_policy(post_policy)
        return {"url": url, "fields": fields}
Ejemplo n.º 7
0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# Note: my-bucketname, my-objectname, YOUR-ACCESSKEYID, and YOUR-SECRETACCESSKEY
# are dummy values, please replace them with original values.

from datetime import datetime, timedelta

from minio import Minio
from minio import PostPolicy
from minio.error import ResponseError

post_policy = PostPolicy()
# set bucket name location for uploads.
post_policy.set_bucket_name('my-bucketname')
# set key prefix for all incoming uploads.
post_policy.set_key_startswith('my-objectname')
# set content length for incoming uploads.
post_policy.set_content_length_range(10, 1024)

# set expiry 10 days into future.
expires_date = datetime.utcnow()+timedelta(days=10)
post_policy.set_expires(expires_date)

client = Minio('s3.amazonaws.com',
               access_key='YOUR-ACCESSKEYID',
               secret_key='YOUR-SECRETACCESSKEY')
Ejemplo n.º 8
0
def main():
    """
    Functional testing of minio python library.
    """
    fake = Factory.create()
    client = Minio('play.minio.io:9000',
                   'Q3AM3UQ867SPQQA43P2F',
                   'zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG')

    _http = urllib3.PoolManager(
        cert_reqs='CERT_REQUIRED',
        ca_certs=certifi.where()
    )

    # Get unique bucket_name, object_name.
    bucket_name = uuid.uuid4().__str__()
    object_name = uuid.uuid4().__str__()

    # Enable trace
    # client.trace_on(sys.stderr)

    # Make a new bucket.
    bucket_name = 'minio-pytest'

    client.make_bucket(bucket_name)

    is_s3 = client._endpoint_url.startswith("s3.amazonaws")
    if is_s3:
        client.make_bucket(bucket_name+'.unique',
                           location='us-west-1')

    ## Check if return codes a valid from server.
    if is_s3:
        try:
            client.make_bucket(bucket_name+'.unique',
                               location='us-west-1')
        except BucketAlreadyOwnedByYou as err:
            pass
        except BucketAlreadyExists as err:
            pass
        except ResponseError as err:
            raise

    # Check if bucket was created properly.
    client.bucket_exists(bucket_name)
    if is_s3:
        client.bucket_exists(bucket_name+'.unique')

    # List all buckets.
    buckets = client.list_buckets()
    for bucket in buckets:
        _, _ = bucket.name, bucket.creation_date

    with open('testfile', 'wb') as file_data:
        file_data.write(fake.text().encode('utf-8'))
    file_data.close()

    # Put a file
    file_stat = os.stat('testfile')
    with open('testfile', 'rb') as file_data:
        client.put_object(bucket_name, object_name, file_data,
                          file_stat.st_size)
    file_data.close()

    with open('largefile', 'wb') as file_data:
        for i in range(0, 104857):
            file_data.write(fake.text().encode('utf-8'))
    file_data.close()

    # Fput a file
    client.fput_object(bucket_name, object_name+'-f', 'testfile')
    if is_s3:
        client.fput_object(bucket_name, object_name+'-f', 'testfile',
                           metadata={'x-amz-storage-class': 'STANDARD_IA'})

    # Fput a large file.
    client.fput_object(bucket_name, object_name+'-large', 'largefile')
    if is_s3:
        client.fput_object(bucket_name, object_name+'-large', 'largefile',
                           metadata={'x-amz-storage-class': 'STANDARD_IA'})

    # Copy a file
    client.copy_object(bucket_name, object_name+'-copy',
                       '/'+bucket_name+'/'+object_name+'-f')

    try:
        copy_conditions = CopyConditions()
        copy_conditions.set_match_etag('test-etag')
        client.copy_object(bucket_name, object_name+'-copy',
                           '/'+bucket_name+'/'+object_name+'-f',
                           copy_conditions)
    except PreconditionFailed as err:
        if err.message != 'At least one of the preconditions you specified did not hold.':
            raise

    # Fetch stats on your object.
    client.stat_object(bucket_name, object_name)

    # Fetch stats on your object.
    client.stat_object(bucket_name, object_name+'-f')

    # Fetch stats on your large object.
    client.stat_object(bucket_name, object_name+'-large')

    # Fetch stats on your object.
    client.stat_object(bucket_name, object_name+'-copy')

    # Get a full object
    object_data = client.get_object(bucket_name, object_name)
    with open('newfile', 'wb') as file_data:
        for data in object_data:
            file_data.write(data)
    file_data.close()

    # Get a full object locally.
    client.fget_object(bucket_name, object_name, 'newfile-f')

    client.fput_object(bucket_name, object_name+'-f', 'testfile',
                       metadata={'x-amz-meta-testing': 'value'})

    stat = client.fget_object(bucket_name, object_name+'-f', 'newfile-f-custom')
    if not stat.metadata.has_key('X-Amz-Meta-Testing'):
        raise ValueError('Metadata key \'x-amz-meta-testing\' not found')
    value = stat.metadata['X-Amz-Meta-Testing']
    if value != 'value':
        raise ValueError('Metadata key has unexpected'
                         ' value {0}'.format(value))

    # List all object paths in bucket.
    print("Listing using ListObjects")
    objects = client.list_objects(bucket_name, recursive=True)
    for obj in objects:
        _, _, _, _, _, _ = obj.bucket_name, obj.object_name, \
                           obj.last_modified, \
                           obj.etag, obj.size, \
                           obj.content_type

    # List all object paths in bucket using V2 API.
    print("Listing using ListObjectsV2")
    objects = client.list_objects_v2(bucket_name, recursive=True)
    for obj in objects:
        _, _, _, _, _, _ = obj.bucket_name, obj.object_name, \
                           obj.last_modified, \
                           obj.etag, obj.size, \
                           obj.content_type

    presigned_get_object_url = client.presigned_get_object(bucket_name, object_name)
    response = _http.urlopen('GET', presigned_get_object_url)
    if response.status != 200:
        raise ResponseError(response,
                            'GET',
                            bucket_name,
                            object_name).get_exception()

    presigned_put_object_url = client.presigned_put_object(bucket_name, object_name)
    value = fake.text().encode('utf-8')
    data = io.BytesIO(value).getvalue()
    response = _http.urlopen('PUT', presigned_put_object_url, body=data)
    if response.status != 200:
        raise ResponseError(response,
                            'PUT',
                            bucket_name,
                            object_name).get_exception()

    object_data = client.get_object(bucket_name, object_name)
    if object_data.read() != value:
        raise ValueError('Bytes not equal')

    # Post policy.
    policy = PostPolicy()
    policy.set_bucket_name(bucket_name)
    policy.set_key_startswith('objectPrefix/')

    expires_date = datetime.utcnow()+timedelta(days=10)
    policy.set_expires(expires_date)
    client.presigned_post_policy(policy)

    # Remove all objects.
    client.remove_object(bucket_name, object_name)
    client.remove_object(bucket_name, object_name+'-f')
    client.remove_object(bucket_name, object_name+'-large')
    client.remove_object(bucket_name, object_name+'-copy')

    policy_name = client.get_bucket_policy(bucket_name)
    if policy_name != Policy.NONE:
        raise ValueError('Policy name is invalid ' + policy_name)

    # Set read-only policy successfully.
    client.set_bucket_policy(bucket_name, '1/', Policy.READ_ONLY)

    # Set read-write policy successfully.
    client.set_bucket_policy(bucket_name, '1/', Policy.READ_WRITE)

    # Reset policy to NONE.
    client.set_bucket_policy(bucket_name, '', Policy.NONE)

    # Validate if the policy is reverted back to NONE.
    policy_name = client.get_bucket_policy(bucket_name)
    if policy_name != Policy.NONE:
        raise ValueError('Policy name is invalid ' + policy_name)

    # Upload some new objects to prepare for multi-object delete test.
    print("Prepare for remove_objects() test.")
    object_names = []
    for i in range(10):
        curr_object_name = object_name+"-{}".format(i)
        # print("object-name: {}".format(curr_object_name))
        client.fput_object(bucket_name, curr_object_name, "testfile")
        object_names.append(curr_object_name)

    # delete the objects in a single library call.
    print("Performing remove_objects() test.")
    del_errs = client.remove_objects(bucket_name, object_names)
    had_errs = False
    for del_err in del_errs:
        had_errs = True
        print("Remove objects err is {}".format(del_err))
    if had_errs:
        print("Removing objects FAILED - it had unexpected errors.")
        raise
    else:
        print("Removing objects worked as expected.")

    # Remove a bucket. This operation will only work if your bucket is empty.
    print("Deleting buckets and finishing tests.")
    client.remove_bucket(bucket_name)
    if client._endpoint_url.startswith("s3.amazonaws"):
        client.remove_bucket(bucket_name+'.unique')

    # Remove temporary files.
    os.remove('testfile')
    os.remove('newfile')
    os.remove('newfile-f')
    os.remove('largefile')
    os.remove('newfile-f-custom')
Ejemplo n.º 9
0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# Note: my-bucketname, my-objectname, YOUR-ACCESSKEYID, and YOUR-SECRETACCESSKEY
# are dummy values, please replace them with original values.

from datetime import datetime, timedelta

from minio import Minio
from minio import PostPolicy
from minio.error import ResponseError

post_policy = PostPolicy()
# set bucket name location for uploads.
post_policy.set_bucket_name('my-bucketname')
# set key prefix for all incoming uploads.
post_policy.set_key_startswith('my-objectname')
# set content length for incoming uploads.
post_policy.set_content_length_range(10, 1024)

# set expiry 10 days into future.
expires_date = datetime.utcnow() + timedelta(days=10)
post_policy.set_expires(expires_date)

client = Minio('s3.amazonaws.com',
               access_key='YOUR-ACCESSKEYID',
               secret_key='YOUR-SECRETACCESSKEY')
Ejemplo n.º 10
0
def _create_presigned_post_policy(bucket_name, content_id, mimetype, size):
    from datetime import datetime, timedelta
    from minio import PostPolicy

    post_policy = PostPolicy()
    post_policy.set_bucket_name(bucket_name)
    post_policy.set_key_startswith(content_id)
    post_policy.set_content_length_range(size, size)
    post_policy.set_content_type(mimetype)
    expires_date = datetime.utcnow() + timedelta(days=1)
    post_policy.set_expires(expires_date)
    post_policy.append_policy('eq', '$acl', 'bucket-owner-full-control')

    return post_policy
Ejemplo n.º 11
0
    def post_object(self, bucket_name, object_name=None, content_type=None):
        post_policy = PostPolicy()
        post_policy.set_bucket_name(bucket_name)

        if object_name:
            post_policy.set_key(object_name)

        if content_type:
            post_policy.set_content_type(content_type)

        post_policy.set_content_length_range(0, 1024000)
        expires_date = datetime.utcnow() + timedelta(days=10)
        post_policy.set_expires(expires_date)
        return self.minioClient.presigned_post_policy(post_policy)
Ejemplo n.º 12
0
def main():
    """
    Functional testing of minio python library.
    """
    fake = Factory.create()
    client = Minio('s3.amazonaws.com', os.getenv('ACCESS_KEY'),
                   os.getenv('SECRET_KEY'))

    _http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED',
                                ca_certs=certifi.where())

    # Get unique bucket_name, object_name.
    bucket_name = uuid.uuid4().__str__()
    object_name = uuid.uuid4().__str__()

    # Enable trace
    # client.trace_on(sys.stderr)

    # Make a new bucket.
    bucket_name = 'minio-pytest'

    print(client.make_bucket(bucket_name))
    print(client.make_bucket(bucket_name + '.unique', location='us-west-1'))

    ## Check if return codes a valid from server.
    try:
        client.make_bucket(bucket_name + '.unique', location='us-west-1')
    except ResponseError as err:
        if str(err.code) in ['BucketAlreadyOwnedByYou', 'BucketAlreadyExists']:
            pass
        else:
            raise

    # Check if bucket was created properly.
    print(client.bucket_exists(bucket_name))
    print(client.bucket_exists(bucket_name + '.unique'))

    # List all buckets.
    buckets = client.list_buckets()
    for bucket in buckets:
        print(bucket.name, bucket.creation_date)

    with open('testfile', 'wb') as file_data:
        file_data.write(fake.text().encode('utf-8'))
    file_data.close()

    # Put a file
    file_stat = os.stat('testfile')
    with open('testfile', 'rb') as file_data:
        client.put_object(bucket_name, object_name, file_data,
                          file_stat.st_size)
    file_data.close()

    # Fput a file
    print(client.fput_object(bucket_name, object_name + '-f', 'testfile'))

    # Copy a file
    print(
        client.copy_object(bucket_name, object_name + '-copy',
                           '/' + bucket_name + '/' + object_name + '-f'))

    try:
        copy_conditions = CopyConditions()
        copy_conditions.set_match_etag('test-etag')
        print(
            client.copy_object(bucket_name, object_name + '-copy',
                               '/' + bucket_name + '/' + object_name + '-f',
                               copy_conditions))
    except ResponseError as err:
        if err.code != 'PreconditionFailed':
            raise
        if err.message != 'At least one of the pre-conditions you specified did not hold':
            raise

    # Fetch stats on your object.
    print(client.stat_object(bucket_name, object_name))

    # Fetch stats on your object.
    print(client.stat_object(bucket_name, object_name + '-f'))

    # Fetch stats on your object.
    print(client.stat_object(bucket_name, object_name + '-copy'))

    # Get a full object
    object_data = client.get_object(bucket_name, object_name)
    with open('newfile', 'wb') as file_data:
        for data in object_data:
            file_data.write(data)
    file_data.close()

    # Get a full object locally.
    print(client.fget_object(bucket_name, object_name, 'newfile-f'))

    # List all object paths in bucket.
    print("Listing using ListObjects API")
    objects = client.list_objects(bucket_name, recursive=True)
    for obj in objects:
        print(obj.bucket_name, obj.object_name, obj.last_modified, \
            obj.etag, obj.size, obj.content_type)

    # List all object paths in bucket using V2 API.
    print("Listing using ListObjectsV2 API")
    objects = client.list_objects_v2(bucket_name, recursive=True)
    for obj in objects:
        print(obj.bucket_name, obj.object_name, obj.last_modified, \
            obj.etag, obj.size, obj.content_type)

    presigned_get_object_url = client.presigned_get_object(
        bucket_name, object_name)
    response = _http.urlopen('GET', presigned_get_object_url)
    if response.status != 200:
        response_error = ResponseError(response)
        raise response_error.get(bucket_name, object_name)

    presigned_put_object_url = client.presigned_put_object(
        bucket_name, object_name)
    value = fake.text().encode('utf-8')
    data = io.BytesIO(value).getvalue()
    response = _http.urlopen('PUT', presigned_put_object_url, body=data)
    if response.status != 200:
        response_error = ResponseError(response)
        raise response_error.put(bucket_name, object_name)

    object_data = client.get_object(bucket_name, object_name)
    if object_data.read() != value:
        raise ValueError('Bytes not equal')

    # Post policy.
    policy = PostPolicy()
    policy.set_bucket_name(bucket_name)
    policy.set_key_startswith('objectPrefix/')

    expires_date = datetime.utcnow() + timedelta(days=10)
    policy.set_expires(expires_date)
    print(client.presigned_post_policy(policy))

    # Remove an object.
    print(client.remove_object(bucket_name, object_name))
    print(client.remove_object(bucket_name, object_name + '-f'))
    print(client.remove_object(bucket_name, object_name + '-copy'))

    policy_name = client.get_bucket_policy(bucket_name)
    if policy_name != Policy.NONE:
        raise ValueError('Policy name is invalid ' + policy_name)

    # Set read-write policy successfully.
    client.set_bucket_policy(bucket_name, '', Policy.READ_WRITE)

    # Reset policy to NONE.
    client.set_bucket_policy(bucket_name, '', Policy.NONE)

    # Validate if the policy is reverted back to NONE.
    policy_name = client.get_bucket_policy(bucket_name)
    if policy_name != Policy.NONE:
        raise ValueError('Policy name is invalid ' + policy_name)

    # Upload some new objects to prepare for multi-object delete test.
    print("Prepare for remove_objects() test.")
    object_names = []
    for i in range(10):
        curr_object_name = object_name + "-{}".format(i)
        print("object-name: {}".format(curr_object_name))
        print(client.fput_object(bucket_name, curr_object_name, "testfile"))
        object_names.append(curr_object_name)

    # delete the objects in a single library call.
    print("Performing remove_objects() test.")
    del_errs = client.remove_objects(bucket_name, object_names)
    had_errs = False
    for del_err in del_errs:
        had_errs = True
        print("Err is {}".format(del_err))
    if had_errs:
        print("remove_objects() FAILED - it had unexpected errors.")
    else:
        print("remove_objects() worked as expected.")

    # Remove a bucket. This operation will only work if your bucket is empty.
    print(client.remove_bucket(bucket_name))
    print(client.remove_bucket(bucket_name + '.unique'))

    # Remove temporary files.
    os.remove('testfile')
    os.remove('newfile')
    os.remove('newfile-f')