def main(): """ Functional testing of minio python library. """ fake = Factory.create() client = Minio('s3.amazonaws.com', os.getenv('ACCESS_KEY'), os.getenv('SECRET_KEY')) _http = urllib3.PoolManager( cert_reqs='CERT_REQUIRED', ca_certs=certifi.where() ) # Get unique bucket_name, object_name. bucket_name = uuid.uuid4().__str__() object_name = uuid.uuid4().__str__() # Enable trace # client.trace_on(sys.stderr) # Make a new bucket. bucket_name = 'minio-pytest' print(client.make_bucket(bucket_name)) print(client.make_bucket(bucket_name+'.unique', location='us-west-1')) ## Check if return codes a valid from server. try: client.make_bucket(bucket_name+'.unique', location='us-west-1') except ResponseError as err: if str(err.code) in ['BucketAlreadyOwnedByYou', 'BucketAlreadyExists']: pass else: raise # Check if bucket was created properly. print(client.bucket_exists(bucket_name)) print(client.bucket_exists(bucket_name+'.unique')) # List all buckets. buckets = client.list_buckets() for bucket in buckets: print(bucket.name, bucket.creation_date) with open('testfile', 'wb') as file_data: file_data.write(fake.text().encode('utf-8')) file_data.close() # Put a file file_stat = os.stat('testfile') with open('testfile', 'rb') as file_data: client.put_object(bucket_name, object_name, file_data, file_stat.st_size) file_data.close() # Fput a file print(client.fput_object(bucket_name, object_name+'-f', 'testfile')) # Fetch stats on your object. print(client.stat_object(bucket_name, object_name)) # Get a full object object_data = client.get_object(bucket_name, object_name) with open('newfile', 'wb') as file_data: for data in object_data: file_data.write(data) file_data.close() # Get a full object locally. print(client.fget_object(bucket_name, object_name, 'newfile-f')) # List all object paths in bucket. objects = client.list_objects(bucket_name, recursive=True) for obj in objects: print(obj.bucket_name, obj.object_name, obj.last_modified, \ obj.etag, obj.size, obj.content_type) presigned_get_object_url = client.presigned_get_object(bucket_name, object_name) response = _http.urlopen('GET', presigned_get_object_url) if response.status != 200: response_error = ResponseError(response) raise response_error.get(bucket_name, object_name) presigned_put_object_url = client.presigned_put_object(bucket_name, object_name) value = fake.text().encode('utf-8') data = io.BytesIO(value).getvalue() response = _http.urlopen('PUT', presigned_put_object_url, body=data) if response.status != 200: response_error = ResponseError(response) raise response_error.put(bucket_name, object_name) object_data = client.get_object(bucket_name, object_name) if object_data.read() != value: raise ValueError('Bytes not equal') # Post policy. policy = PostPolicy() policy.set_bucket_name(bucket_name) policy.set_key_startswith('objectPrefix/') expires_date = datetime.utcnow()+timedelta(days=10) policy.set_expires(expires_date) print(client.presigned_post_policy(policy)) # Remove an object. print(client.remove_object(bucket_name, object_name)) print(client.remove_object(bucket_name, object_name+'-f')) # Remove a bucket. This operation will only work if your bucket is empty. print(client.remove_bucket(bucket_name)) print(client.remove_bucket(bucket_name+'.unique')) # Remove temporary files. os.remove('testfile') os.remove('newfile') os.remove('newfile-f')
def put(self, request, user_id, format=None): try: # allow admin if request.user.character == 1: pass else: # make sure it's your own if user_id != request.user.user_id: return Response({"msg": "Forbidden. You are not the man."}, status=403) except AttributeError: # Anonymous return Response({"msg": "Forbidden. You are not logged in."}, status=403) post_policy = PostPolicy() # set bucket name location for uploads. post_policy.set_bucket_name(DEFAULT_BUCKET) # set key prefix for all incoming uploads. file_token = generate_avatar_token(user_id) post_policy.set_key_startswith(file_token) # set content length for incoming uploads. post_policy.set_content_length_range(0, MAX_AVATAR_SIZE) # set expiry expires_date = datetime.utcnow() + DEFAULT_FILE_URL_TIMEOUT post_policy.set_expires(expires_date) url, signed_form_data = local_minio_client.presigned_post_policy( post_policy) response = {"url": url, "request_form": signed_form_data} return Response(response)
def post_object(self, bucket_name, object_name=None, content_type=None): post_policy = PostPolicy() post_policy.set_bucket_name(bucket_name) if object_name: post_policy.set_key(object_name) if content_type: post_policy.set_content_type(content_type) post_policy.set_content_length_range(0, 1024000) expires_date = datetime.utcnow() + timedelta(days=10) post_policy.set_expires(expires_date) return self.minioClient.presigned_post_policy(post_policy)
from minio import Minio from minio import PostPolicy from minio.error import ResponseError post_policy = PostPolicy() # set bucket name location for uploads. post_policy.set_bucket_name('my-bucketname') # set key prefix for all incoming uploads. post_policy.set_key_startswith('my-objectname') # set content length for incoming uploads. post_policy.set_content_length_range(10, 1024) # set expiry 10 days into future. expires_date = datetime.utcnow()+timedelta(days=10) post_policy.set_expires(expires_date) client = Minio('s3.amazonaws.com', access_key='YOUR-ACCESSKEYID', secret_key='YOUR-SECRETACCESSKEY') try: curl_str = 'curl -X POST my-bucketname.s3.amazonaws.com/' curl_cmd = [curl_str] signed_form_data = client.presigned_post_policy(post_policy) for field in signed_form_data: curl_cmd.append('-F {0}={1}'.format(field, signed_form_data[field])) # print curl command to upload files. curl_cmd.append('-F file=@<FILE>') print(' '.join(curl_cmd))
def main(): """ Functional testing of minio python library. """ fake = Factory.create() client = Minio('s3.amazonaws.com', os.getenv('ACCESS_KEY'), os.getenv('SECRET_KEY')) _http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where()) # Get unique bucket_name, object_name. bucket_name = uuid.uuid4().__str__() object_name = uuid.uuid4().__str__() # Enable trace # client.trace_on(sys.stderr) # Make a new bucket. bucket_name = 'minio-pytest' print(client.make_bucket(bucket_name)) print(client.make_bucket(bucket_name + '.unique', location='us-west-1')) ## Check if return codes a valid from server. try: client.make_bucket(bucket_name + '.unique', location='us-west-1') except ResponseError as err: if str(err.code) in ['BucketAlreadyOwnedByYou', 'BucketAlreadyExists']: pass else: raise # Check if bucket was created properly. print(client.bucket_exists(bucket_name)) print(client.bucket_exists(bucket_name + '.unique')) # List all buckets. buckets = client.list_buckets() for bucket in buckets: print(bucket.name, bucket.creation_date) with open('testfile', 'wb') as file_data: file_data.write(fake.text().encode('utf-8')) file_data.close() # Put a file file_stat = os.stat('testfile') with open('testfile', 'rb') as file_data: client.put_object(bucket_name, object_name, file_data, file_stat.st_size) file_data.close() # Fput a file print(client.fput_object(bucket_name, object_name + '-f', 'testfile')) # Copy a file print( client.copy_object(bucket_name, object_name + '-copy', '/' + bucket_name + '/' + object_name + '-f')) try: copy_conditions = CopyConditions() copy_conditions.set_match_etag('test-etag') print( client.copy_object(bucket_name, object_name + '-copy', '/' + bucket_name + '/' + object_name + '-f', copy_conditions)) except ResponseError as err: if err.code != 'PreconditionFailed': raise if err.message != 'At least one of the pre-conditions you specified did not hold': raise # Fetch stats on your object. print(client.stat_object(bucket_name, object_name)) # Fetch stats on your object. print(client.stat_object(bucket_name, object_name + '-f')) # Fetch stats on your object. print(client.stat_object(bucket_name, object_name + '-copy')) # Get a full object object_data = client.get_object(bucket_name, object_name) with open('newfile', 'wb') as file_data: for data in object_data: file_data.write(data) file_data.close() # Get a full object locally. print(client.fget_object(bucket_name, object_name, 'newfile-f')) # List all object paths in bucket. print("Listing using ListObjects API") objects = client.list_objects(bucket_name, recursive=True) for obj in objects: print(obj.bucket_name, obj.object_name, obj.last_modified, \ obj.etag, obj.size, obj.content_type) # List all object paths in bucket using V2 API. print("Listing using ListObjectsV2 API") objects = client.list_objects_v2(bucket_name, recursive=True) for obj in objects: print(obj.bucket_name, obj.object_name, obj.last_modified, \ obj.etag, obj.size, obj.content_type) presigned_get_object_url = client.presigned_get_object( bucket_name, object_name) response = _http.urlopen('GET', presigned_get_object_url) if response.status != 200: response_error = ResponseError(response) raise response_error.get(bucket_name, object_name) presigned_put_object_url = client.presigned_put_object( bucket_name, object_name) value = fake.text().encode('utf-8') data = io.BytesIO(value).getvalue() response = _http.urlopen('PUT', presigned_put_object_url, body=data) if response.status != 200: response_error = ResponseError(response) raise response_error.put(bucket_name, object_name) object_data = client.get_object(bucket_name, object_name) if object_data.read() != value: raise ValueError('Bytes not equal') # Post policy. policy = PostPolicy() policy.set_bucket_name(bucket_name) policy.set_key_startswith('objectPrefix/') expires_date = datetime.utcnow() + timedelta(days=10) policy.set_expires(expires_date) print(client.presigned_post_policy(policy)) # Remove an object. print(client.remove_object(bucket_name, object_name)) print(client.remove_object(bucket_name, object_name + '-f')) print(client.remove_object(bucket_name, object_name + '-copy')) policy_name = client.get_bucket_policy(bucket_name) if policy_name != Policy.NONE: raise ValueError('Policy name is invalid ' + policy_name) # Set read-write policy successfully. client.set_bucket_policy(bucket_name, '', Policy.READ_WRITE) # Reset policy to NONE. client.set_bucket_policy(bucket_name, '', Policy.NONE) # Validate if the policy is reverted back to NONE. policy_name = client.get_bucket_policy(bucket_name) if policy_name != Policy.NONE: raise ValueError('Policy name is invalid ' + policy_name) # Upload some new objects to prepare for multi-object delete test. print("Prepare for remove_objects() test.") object_names = [] for i in range(10): curr_object_name = object_name + "-{}".format(i) print("object-name: {}".format(curr_object_name)) print(client.fput_object(bucket_name, curr_object_name, "testfile")) object_names.append(curr_object_name) # delete the objects in a single library call. print("Performing remove_objects() test.") del_errs = client.remove_objects(bucket_name, object_names) had_errs = False for del_err in del_errs: had_errs = True print("Err is {}".format(del_err)) if had_errs: print("remove_objects() FAILED - it had unexpected errors.") else: print("remove_objects() worked as expected.") # Remove a bucket. This operation will only work if your bucket is empty. print(client.remove_bucket(bucket_name)) print(client.remove_bucket(bucket_name + '.unique')) # Remove temporary files. os.remove('testfile') os.remove('newfile') os.remove('newfile-f')