Exemple #1
0
def copy_object_with_conditions_test(client, bucket_name, dest_object_name,
                                     src_object_name):
    _log_test()
    found = client.bucket_exists(bucket_name)
    assert found == True
    testfile = data_dir + "/" + "SmallFile"

    object_name = uuid.uuid4().__str__()
    # Put a file
    file_stat = os.stat(testfile)
    client.fput_object(bucket_name, src_object_name, testfile)
    stat = client.stat_object(bucket_name, src_object_name)
    assert stat.size == file_stat.st_size
    try:
        copy_conditions = CopyConditions()
        copy_conditions.set_match_etag('test-etag')
        client.copy_object(bucket_name, dest_object_name,
                           '/' + bucket_name + '/' + src_object_name,
                           copy_conditions)
        copy_stat = client.stat_object(bucket_name, dest_object_name)
        assert copy_stat.size == stat.size
    except PreconditionFailed as err:
        if err.message != 'At least one of the preconditions you specified did not hold.':
            logger.error(err)
    except Exception as err:
        logger.error(err)
        raise
Exemple #2
0
    def copy_object(self, to_bucket_name, to_object_name,
                    from_bucket_object_name):
        try:
            ret = self.client.copy_object(to_bucket_name, to_object_name,
                                          from_bucket_object_name,
                                          CopyConditions())
            print(ret)
            return True
        except ResponseError as _err:
            logging.exception("Could not copy")

        return False
Exemple #3
0
def test_copy_object(client, log_output):
    # Get a unique bucket_name and object_name
    log_output.args['bucket_name'] = bucket_name = generate_bucket_name()
    object_name = uuid.uuid4().__str__()
    log_output.args['object_source'] = object_source = object_name + '-source'
    log_output.args['object_name'] = object_copy = object_name + '-copy'
    try:
        client.make_bucket(bucket_name)
        # Upload a streaming object of 1MiB
        KB_1 = 1024  # 1KiB.
        KB_1_reader = LimitedRandomReader(KB_1)
        client.put_object(bucket_name, object_source, KB_1_reader, KB_1)
        # Perform a server side copy of an object
        client.copy_object(bucket_name, object_copy,
                           '/' + bucket_name + '/' + object_source)

        client.stat_object(bucket_name, object_copy)
        try:
            # Perform a server side copy of an object with pre-conditions and fail
            etag = 'test-etag'
            copy_conditions = CopyConditions()
            copy_conditions.set_match_etag(etag)
            log_output.args['conditions'] = {'set_match_etag': etag}
            client.copy_object(bucket_name, object_copy,
                               '/' + bucket_name + '/' + object_source,
                               copy_conditions)
        except PreconditionFailed as err:
            if err.message != 'At least one of the preconditions you specified did not hold.':
                raise Exception(err)
    except Exception as err:
        raise Exception(err)
    finally:
        try:
            client.remove_object(bucket_name, object_source)
            client.remove_object(bucket_name, object_copy)
            client.remove_bucket(bucket_name)
        except Exception as err:
            raise Exception(err)
    # Test passes
    print(log_output.json_report())
Exemple #4
0
def main():
    """
    Functional testing of minio python library.
    """
    fake = Factory.create()
    client = Minio('play.minio.io:9000',
                   'Q3AM3UQ867SPQQA43P2F',
                   'zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG')

    _http = urllib3.PoolManager(
        cert_reqs='CERT_REQUIRED',
        ca_certs=certifi.where()
    )

    # Get unique bucket_name, object_name.
    bucket_name = uuid.uuid4().__str__()
    object_name = uuid.uuid4().__str__()

    # Enable trace
    # client.trace_on(sys.stderr)

    # Make a new bucket.
    bucket_name = 'minio-pytest'

    client.make_bucket(bucket_name)

    is_s3 = client._endpoint_url.startswith("s3.amazonaws")
    if is_s3:
        client.make_bucket(bucket_name+'.unique',
                           location='us-west-1')

    ## Check if return codes a valid from server.
    if is_s3:
        try:
            client.make_bucket(bucket_name+'.unique',
                               location='us-west-1')
        except BucketAlreadyOwnedByYou as err:
            pass
        except BucketAlreadyExists as err:
            pass
        except ResponseError as err:
            raise

    # Check if bucket was created properly.
    client.bucket_exists(bucket_name)
    if is_s3:
        client.bucket_exists(bucket_name+'.unique')

    # List all buckets.
    buckets = client.list_buckets()
    for bucket in buckets:
        _, _ = bucket.name, bucket.creation_date

    with open('testfile', 'wb') as file_data:
        file_data.write(fake.text().encode('utf-8'))
    file_data.close()

    # Put a file
    file_stat = os.stat('testfile')
    with open('testfile', 'rb') as file_data:
        client.put_object(bucket_name, object_name, file_data,
                          file_stat.st_size)
    file_data.close()

    with open('largefile', 'wb') as file_data:
        for i in range(0, 104857):
            file_data.write(fake.text().encode('utf-8'))
    file_data.close()

    # Fput a file
    client.fput_object(bucket_name, object_name+'-f', 'testfile')
    if is_s3:
        client.fput_object(bucket_name, object_name+'-f', 'testfile',
                           metadata={'x-amz-storage-class': 'STANDARD_IA'})

    # Fput a large file.
    client.fput_object(bucket_name, object_name+'-large', 'largefile')
    if is_s3:
        client.fput_object(bucket_name, object_name+'-large', 'largefile',
                           metadata={'x-amz-storage-class': 'STANDARD_IA'})

    # Copy a file
    client.copy_object(bucket_name, object_name+'-copy',
                       '/'+bucket_name+'/'+object_name+'-f')

    try:
        copy_conditions = CopyConditions()
        copy_conditions.set_match_etag('test-etag')
        client.copy_object(bucket_name, object_name+'-copy',
                           '/'+bucket_name+'/'+object_name+'-f',
                           copy_conditions)
    except PreconditionFailed as err:
        if err.message != 'At least one of the preconditions you specified did not hold.':
            raise

    # Fetch stats on your object.
    client.stat_object(bucket_name, object_name)

    # Fetch stats on your object.
    client.stat_object(bucket_name, object_name+'-f')

    # Fetch stats on your large object.
    client.stat_object(bucket_name, object_name+'-large')

    # Fetch stats on your object.
    client.stat_object(bucket_name, object_name+'-copy')

    # Get a full object
    object_data = client.get_object(bucket_name, object_name)
    with open('newfile', 'wb') as file_data:
        for data in object_data:
            file_data.write(data)
    file_data.close()

    # Get a full object locally.
    client.fget_object(bucket_name, object_name, 'newfile-f')

    client.fput_object(bucket_name, object_name+'-f', 'testfile',
                       metadata={'x-amz-meta-testing': 'value'})

    stat = client.fget_object(bucket_name, object_name+'-f', 'newfile-f-custom')
    if not stat.metadata.has_key('X-Amz-Meta-Testing'):
        raise ValueError('Metadata key \'x-amz-meta-testing\' not found')
    value = stat.metadata['X-Amz-Meta-Testing']
    if value != 'value':
        raise ValueError('Metadata key has unexpected'
                         ' value {0}'.format(value))

    # List all object paths in bucket.
    print("Listing using ListObjects")
    objects = client.list_objects(bucket_name, recursive=True)
    for obj in objects:
        _, _, _, _, _, _ = obj.bucket_name, obj.object_name, \
                           obj.last_modified, \
                           obj.etag, obj.size, \
                           obj.content_type

    # List all object paths in bucket using V2 API.
    print("Listing using ListObjectsV2")
    objects = client.list_objects_v2(bucket_name, recursive=True)
    for obj in objects:
        _, _, _, _, _, _ = obj.bucket_name, obj.object_name, \
                           obj.last_modified, \
                           obj.etag, obj.size, \
                           obj.content_type

    presigned_get_object_url = client.presigned_get_object(bucket_name, object_name)
    response = _http.urlopen('GET', presigned_get_object_url)
    if response.status != 200:
        raise ResponseError(response,
                            'GET',
                            bucket_name,
                            object_name).get_exception()

    presigned_put_object_url = client.presigned_put_object(bucket_name, object_name)
    value = fake.text().encode('utf-8')
    data = io.BytesIO(value).getvalue()
    response = _http.urlopen('PUT', presigned_put_object_url, body=data)
    if response.status != 200:
        raise ResponseError(response,
                            'PUT',
                            bucket_name,
                            object_name).get_exception()

    object_data = client.get_object(bucket_name, object_name)
    if object_data.read() != value:
        raise ValueError('Bytes not equal')

    # Post policy.
    policy = PostPolicy()
    policy.set_bucket_name(bucket_name)
    policy.set_key_startswith('objectPrefix/')

    expires_date = datetime.utcnow()+timedelta(days=10)
    policy.set_expires(expires_date)
    client.presigned_post_policy(policy)

    # Remove all objects.
    client.remove_object(bucket_name, object_name)
    client.remove_object(bucket_name, object_name+'-f')
    client.remove_object(bucket_name, object_name+'-large')
    client.remove_object(bucket_name, object_name+'-copy')

    policy_name = client.get_bucket_policy(bucket_name)
    if policy_name != Policy.NONE:
        raise ValueError('Policy name is invalid ' + policy_name)

    # Set read-only policy successfully.
    client.set_bucket_policy(bucket_name, '1/', Policy.READ_ONLY)

    # Set read-write policy successfully.
    client.set_bucket_policy(bucket_name, '1/', Policy.READ_WRITE)

    # Reset policy to NONE.
    client.set_bucket_policy(bucket_name, '', Policy.NONE)

    # Validate if the policy is reverted back to NONE.
    policy_name = client.get_bucket_policy(bucket_name)
    if policy_name != Policy.NONE:
        raise ValueError('Policy name is invalid ' + policy_name)

    # Upload some new objects to prepare for multi-object delete test.
    print("Prepare for remove_objects() test.")
    object_names = []
    for i in range(10):
        curr_object_name = object_name+"-{}".format(i)
        # print("object-name: {}".format(curr_object_name))
        client.fput_object(bucket_name, curr_object_name, "testfile")
        object_names.append(curr_object_name)

    # delete the objects in a single library call.
    print("Performing remove_objects() test.")
    del_errs = client.remove_objects(bucket_name, object_names)
    had_errs = False
    for del_err in del_errs:
        had_errs = True
        print("Remove objects err is {}".format(del_err))
    if had_errs:
        print("Removing objects FAILED - it had unexpected errors.")
        raise
    else:
        print("Removing objects worked as expected.")

    # Remove a bucket. This operation will only work if your bucket is empty.
    print("Deleting buckets and finishing tests.")
    client.remove_bucket(bucket_name)
    if client._endpoint_url.startswith("s3.amazonaws"):
        client.remove_bucket(bucket_name+'.unique')

    # Remove temporary files.
    os.remove('testfile')
    os.remove('newfile')
    os.remove('newfile-f')
    os.remove('largefile')
    os.remove('newfile-f-custom')
Exemple #5
0
from minio import CopyConditions, Minio

client = Minio('s3.amazonaws.com',
               access_key='YOUR-ACCESSKEY',
               secret_key='YOUR-SECRETKEY')

# copy an object from a bucket to another.
result = client.copy_object(
    "my-bucket",
    "my-object",
    "/my-sourcebucket/my-sourceobject",
)
print(result.object_name, result.version_id)

# copy an object with condition.
copy_conditions = CopyConditions()
# Set modified condition, copy object modified since 1st April 2014.
mod_since = datetime(2014, 4, 1, tzinfo=timezone.utc)
copy_conditions.set_modified_since(mod_since)

# Set unmodified condition, copy object unmodified since 1st April 2014.
# copy_conditions.set_unmodified_since(mod_since)

# Set matching ETag condition, copy object which matches the following ETag.
# copy_conditions.set_match_etag("31624deb84149d2f8ef9c385918b653a")

# Set matching ETag except condition, copy object which does not match the
# following ETag.
# copy_conditions.set_match_etag_except("31624deb84149d2f8ef9c385918b653a")
result = client.copy_object(
    "my-bucket",
Exemple #6
0
# Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and
# my-objectname are dummy values, please replace them with original values.

import time
from datetime import datetime

from minio import Minio, CopyConditions
from minio.error import ResponseError

client = Minio('s3.amazonaws.com',
               access_key='YOUR-ACCESSKEY',
               secret_key='YOUR-SECRETKEY')

# client.trace_on(sys.stderr)
copy_conditions = CopyConditions()
# Set modified condition, copy object modified since 2014 April.
t = (2014, 4, 0, 0, 0, 0, 0, 0, 0)
mod_since = datetime.utcfromtimestamp(time.mktime(t))
copy_conditions.set_modified_since(mod_since)

# Set unmodified condition, copy object unmodified since 2014 April.
# copy_conditions.set_unmodified_since(mod_since)

# Set matching ETag condition, copy object which matches the following ETag.
# copy_conditions.set_match_etag("31624deb84149d2f8ef9c385918b653a")

# Set matching ETag except condition, copy object which does not match the
# following ETag.
# copy_conditions.set_match_etag_except("31624deb84149d2f8ef9c385918b653a")
Exemple #7
0
import time
from datetime import datetime
from minio import CopyConditions
from minio import Minio
from minio.error import ResponseError

# copy的一些条件  注意:本API支持的最大文件大小是5TB。

# 第一次运行报错 minio.error.PreconditionFailed: PreconditionFailed: message: At least one of the preconditions you specified did not hold.
# 不在copy_object函数加上最后一个参数就不报错了: copy_conditions,metadata=metadata
copy_conditions = CopyConditions()
# Set modified condition, copy object modified since 2014 April.
t = (2014, 4, 0, 0, 0, 0, 0, 0, 0)
mod_since = datetime.utcfromtimestamp(time.mktime(t))
copy_conditions.set_modified_since(mod_since)

# Set unmodified condition, copy object unmodified since 2014 April.
copy_conditions.set_unmodified_since(mod_since)

# Set matching ETag condition, copy object which matches the following ETag.
copy_conditions.set_match_etag("31624deb84149d2f8ef9c385918b653a")

# Set matching ETag except condition, copy object which does not match the following ETag.
copy_conditions.set_match_etag_except("31624deb84149d2f8ef9c385918b653a")

# Set metadata
metadata = {"test-key": "test-data"}

minioClient = Minio('ggtaiwanmini.117503445.top:9000',
                    access_key='admin',
                    secret_key='admin123',
Exemple #8
0
def main():
    """
    Functional testing of minio python library.
    """
    fake = Factory.create()
    client = Minio('s3.amazonaws.com', os.getenv('ACCESS_KEY'),
                   os.getenv('SECRET_KEY'))

    _http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED',
                                ca_certs=certifi.where())

    # Get unique bucket_name, object_name.
    bucket_name = uuid.uuid4().__str__()
    object_name = uuid.uuid4().__str__()

    # Enable trace
    # client.trace_on(sys.stderr)

    # Make a new bucket.
    bucket_name = 'minio-pytest'

    print(client.make_bucket(bucket_name))
    print(client.make_bucket(bucket_name + '.unique', location='us-west-1'))

    ## Check if return codes a valid from server.
    try:
        client.make_bucket(bucket_name + '.unique', location='us-west-1')
    except ResponseError as err:
        if str(err.code) in ['BucketAlreadyOwnedByYou', 'BucketAlreadyExists']:
            pass
        else:
            raise

    # Check if bucket was created properly.
    print(client.bucket_exists(bucket_name))
    print(client.bucket_exists(bucket_name + '.unique'))

    # List all buckets.
    buckets = client.list_buckets()
    for bucket in buckets:
        print(bucket.name, bucket.creation_date)

    with open('testfile', 'wb') as file_data:
        file_data.write(fake.text().encode('utf-8'))
    file_data.close()

    # Put a file
    file_stat = os.stat('testfile')
    with open('testfile', 'rb') as file_data:
        client.put_object(bucket_name, object_name, file_data,
                          file_stat.st_size)
    file_data.close()

    # Fput a file
    print(client.fput_object(bucket_name, object_name + '-f', 'testfile'))

    # Copy a file
    print(
        client.copy_object(bucket_name, object_name + '-copy',
                           '/' + bucket_name + '/' + object_name + '-f'))

    try:
        copy_conditions = CopyConditions()
        copy_conditions.set_match_etag('test-etag')
        print(
            client.copy_object(bucket_name, object_name + '-copy',
                               '/' + bucket_name + '/' + object_name + '-f',
                               copy_conditions))
    except ResponseError as err:
        if err.code != 'PreconditionFailed':
            raise
        if err.message != 'At least one of the pre-conditions you specified did not hold':
            raise

    # Fetch stats on your object.
    print(client.stat_object(bucket_name, object_name))

    # Fetch stats on your object.
    print(client.stat_object(bucket_name, object_name + '-f'))

    # Fetch stats on your object.
    print(client.stat_object(bucket_name, object_name + '-copy'))

    # Get a full object
    object_data = client.get_object(bucket_name, object_name)
    with open('newfile', 'wb') as file_data:
        for data in object_data:
            file_data.write(data)
    file_data.close()

    # Get a full object locally.
    print(client.fget_object(bucket_name, object_name, 'newfile-f'))

    # List all object paths in bucket.
    print("Listing using ListObjects API")
    objects = client.list_objects(bucket_name, recursive=True)
    for obj in objects:
        print(obj.bucket_name, obj.object_name, obj.last_modified, \
            obj.etag, obj.size, obj.content_type)

    # List all object paths in bucket using V2 API.
    print("Listing using ListObjectsV2 API")
    objects = client.list_objects_v2(bucket_name, recursive=True)
    for obj in objects:
        print(obj.bucket_name, obj.object_name, obj.last_modified, \
            obj.etag, obj.size, obj.content_type)

    presigned_get_object_url = client.presigned_get_object(
        bucket_name, object_name)
    response = _http.urlopen('GET', presigned_get_object_url)
    if response.status != 200:
        response_error = ResponseError(response)
        raise response_error.get(bucket_name, object_name)

    presigned_put_object_url = client.presigned_put_object(
        bucket_name, object_name)
    value = fake.text().encode('utf-8')
    data = io.BytesIO(value).getvalue()
    response = _http.urlopen('PUT', presigned_put_object_url, body=data)
    if response.status != 200:
        response_error = ResponseError(response)
        raise response_error.put(bucket_name, object_name)

    object_data = client.get_object(bucket_name, object_name)
    if object_data.read() != value:
        raise ValueError('Bytes not equal')

    # Post policy.
    policy = PostPolicy()
    policy.set_bucket_name(bucket_name)
    policy.set_key_startswith('objectPrefix/')

    expires_date = datetime.utcnow() + timedelta(days=10)
    policy.set_expires(expires_date)
    print(client.presigned_post_policy(policy))

    # Remove an object.
    print(client.remove_object(bucket_name, object_name))
    print(client.remove_object(bucket_name, object_name + '-f'))
    print(client.remove_object(bucket_name, object_name + '-copy'))

    policy_name = client.get_bucket_policy(bucket_name)
    if policy_name != Policy.NONE:
        raise ValueError('Policy name is invalid ' + policy_name)

    # Set read-write policy successfully.
    client.set_bucket_policy(bucket_name, '', Policy.READ_WRITE)

    # Reset policy to NONE.
    client.set_bucket_policy(bucket_name, '', Policy.NONE)

    # Validate if the policy is reverted back to NONE.
    policy_name = client.get_bucket_policy(bucket_name)
    if policy_name != Policy.NONE:
        raise ValueError('Policy name is invalid ' + policy_name)

    # Upload some new objects to prepare for multi-object delete test.
    print("Prepare for remove_objects() test.")
    object_names = []
    for i in range(10):
        curr_object_name = object_name + "-{}".format(i)
        print("object-name: {}".format(curr_object_name))
        print(client.fput_object(bucket_name, curr_object_name, "testfile"))
        object_names.append(curr_object_name)

    # delete the objects in a single library call.
    print("Performing remove_objects() test.")
    del_errs = client.remove_objects(bucket_name, object_names)
    had_errs = False
    for del_err in del_errs:
        had_errs = True
        print("Err is {}".format(del_err))
    if had_errs:
        print("remove_objects() FAILED - it had unexpected errors.")
    else:
        print("remove_objects() worked as expected.")

    # Remove a bucket. This operation will only work if your bucket is empty.
    print(client.remove_bucket(bucket_name))
    print(client.remove_bucket(bucket_name + '.unique'))

    # Remove temporary files.
    os.remove('testfile')
    os.remove('newfile')
    os.remove('newfile-f')