Example #1
0
 def _make_retrying_upload_file_call(self):
     self.kwargs["retry_strategy"] = DEFAULT_RETRY_STRATEGY
     upload_manager = UploadManager(self.object_storage_client,
                                    allow_multipart_uploads=False)
     return upload_manager.upload_file(self.namespace_name,
                                       self.bucket_name, self.object_name,
                                       self.file_path, **self.kwargs)
def upload_objects_to_os_bucket(namespace):
    bucket_name = input("\n\tEnter the bucket name to upload objects: ")
    request = CreateBucketDetails(name=bucket_name,
                                  compartment_id=compartment_dets)
    try:
        bucket = object_storage.create_bucket(namespace, request)
        #print(bucket.data)
        print("\n\t" + bucket.data.etag)
    except Exception as e:
        print("\n\t" + e.message)

    directory = input(
        "\n\tEnter the path to move files to OCI object storage: ")

    user_input = directory
    assert os.path.exists(
        user_input), "I did not find the directory at, " + str(user_input)

    print("\n\tFiles in directory " + str(directory) + " will be uploaded")
    user_input = directory

    files_to_process = [
        file for file in os.listdir(directory) if file.endswith('tar.gz')
    ]

    for upload_file in files_to_process:
        print('\n\tUploading file {}'.format(upload_file))
        print('\n\t' + upload_file)
        partsize = 1000 * MEBIBYTE
        object_name = upload_file
        filename = os.path.join(directory, upload_file)
        upload_manager = UploadManager(object_storage,
                                       allow_parallel_uploads=True,
                                       allow_multipart_uploads=True)
        response = upload_manager.upload_file(
            namespace,
            bucket_name,
            object_name,
            filename,
            part_size=partsize,
            progress_callback=progresscallback)
        if str(response.data) == 'None':
            print("\n\tUpload Complete")
    return
Example #3
0
 def upload_object(self, file_properties, user=None):
     self.user = user
     file, file_name = file_properties
     self.set_config()
     object_name = "{}_{}".format(self.user, file_name)
     part_size = 2 * MEBIBYTE  # part size (in bytes)
     upload_manager = UploadManager(self.object_storage,
                                    allow_parallel_uploads=True,
                                    parallel_process_count=3)
     file_path = self.get_media_file_path(file)
     response = upload_manager.upload_file(
         self.namespace,
         self.bucket_name,
         object_name,
         file_path,
         part_size=part_size,
         progress_callback=self.progress_callback)
     if response.status == 200:
         os.remove(file_path)
     return response
Example #4
0
def upload_data_Object_storage ():

    directory = os.path.dirname(os.path.abspath(__file__))
    directory +='\\civil-aviation-authority-of-the-philippines-passenger-movement-data'
    object_storage = oci.object_storage.ObjectStorageClient(config)
    files_to_process = [file for file in os.listdir(directory) if file.endswith('csv')]

    try:
        for upload_file in files_to_process:
            print('Uploading file {}'.format(upload_file))
            print(upload_file)
            partsize = 1000 * MEBIBYTE
            print(partsize)
            object_name=upload_file
            filename=os.path.join(directory,upload_file)
            upload_manager=UploadManager(object_storage,allow_parallel_uploads=True,allow_multipart_uploads=True)
            response=upload_manager.upload_file(namespace,bucket_name,object_name,filename,part_size=partsize,progress_callback=progresscallback)
            print(response.data)


    except Exception as e:
        print(e.message)
Example #5
0
file_size_in_mebibytes = 10
sample_content = b'a'
with open(filename, 'wb') as f:
    while f.tell() < MEBIBYTE * file_size_in_mebibytes:
        f.write(sample_content * MEBIBYTE)

print("Uploading new object {!r}".format(object_name))

# upload manager will automatically use mutlipart uploads if the part size is less than the file size
part_size = 2 * MEBIBYTE  # part size (in bytes)
upload_manager = UploadManager(object_storage,
                               allow_parallel_uploads=True,
                               parallel_process_count=3)
response = upload_manager.upload_file(namespace,
                                      bucket_name,
                                      object_name,
                                      filename,
                                      part_size=part_size,
                                      progress_callback=progress_callback)

# To force single part uploads, set "allow_multipart_uploads=False" when creating the UploadManager.
# upload_manager = UploadManager(object_storage, allow_multipart_uploads=False)
# response = upload_manager.upload_file(
#    namespace, bucket_name, object_name, filename, part_size=part_size, progress_callback=progress_callback)

# remove file to clean up
os.remove(filename)

print("Deleting object {}".format(object_name))
object_storage.delete_object(namespace, bucket_name, object_name)

print("Deleting bucket {}".format(bucket_name))
Example #6
0
 def _make_retrying_upload_file_call(self):
     upload_manager = UploadManager(self.object_storage_client,
                                    allow_multipart_uploads=False)
     return upload_manager.upload_file(self.namespace_name,
                                       self.bucket_name, self.object_name,
                                       self.file_path, **self.kwargs)
Example #7
0
class OCIObjectStorageClient():
    '''
    Object Storage Client
    '''
    def __init__(self, config_file, config_section, logger=None):
        '''
        Create ObjectStorageClient for OCI

        config_file     :   OCI Configuration File
        config_section  :   OCI Config File Section
        logger          :   Logger, if not given one will be created
        '''
        config = from_file(config_file, config_section)
        self.object_storage_client = ObjectStorageClient(config, retry_strategy=DEFAULT_RETRY_STRATEGY)
        self.upload_manager = UploadManager(self.object_storage_client)
        if logger is None:
            self.logger = setup_logger("oci_client", 10)
        else:
            self.logger = logger

    def object_list(self, namespace_name, bucket_name):
        '''
        Return list of object storage objects

        namespace_name  :   Object Storage Namespace
        bucket_name     :   Bucket Name
        '''
        self.logger.info("Retrieving object list from namespace %s and bucket %s",
                         namespace_name, bucket_name)
        all_objects_response = list_call_get_all_results(self.object_storage_client.list_objects,
                                                         namespace_name, bucket_name, fields='name,md5,size,timeCreated')
        return [to_dict(obj) for obj in all_objects_response.data.objects]


    def object_put(self, namespace_name, bucket_name, object_name, file_name, md5_sum=None, resume_upload=False):
        '''
        Upload object to object storage

        namespace_name  :   Object Storage Namespace
        bucket_name     :   Bucket name
        object_name     :   Name of uploaded object
        file_name       :   Name of local file to upload
        md5_sum         :   Md5 sum of local file
        '''
        self.logger.info(f'Starting upload of file "{file_name}" to namespace "{namespace_name}" '
                         f'bucket "{bucket_name}" and object name "{object_name}"')
        upload_resumed = False
        if resume_upload:
            self.logger.debug(f'Checking if object name "{object_name}" is in list of pending uploads')
            multipart_uploads = list_call_get_all_results(self.object_storage_client.list_multipart_uploads,
                                                          namespace_name, bucket_name)
            for multipart_upload in multipart_uploads.data:
                # Assume namespace and bucket are the same
                if multipart_upload.object == object_name:
                    self.logger.debug(f'Resuming file upload {multipart_upload.upload_id} for object "{object_name}"')
                    response = self.upload_manager.resume_upload_file(namespace_name, bucket_name, object_name, file_name, multipart_upload.upload_id)
                    upload_resumed = True
                    break
        if not upload_resumed:
            response = self.upload_manager.upload_file(namespace_name, bucket_name, object_name, file_name, content_md5=md5_sum)
        if response.status != 200:
            raise ObjectStorageException(f'Error uploading object, Reponse code {str(response.status)}')
        self.logger.info(f'File "{file_name}" uploaded to object storage with object name "{object_name}"')
        return True

    def object_get(self, namespace_name, bucket_name, object_name, file_name, set_restore=False):
        '''
        Download object from object storage

        namespace_name  :   Object Storage Namespace
        bucket_name     :   Bucket name
        object_name     :   Name of object to download
        file_name       :   Name of local file where object will be downloaded
        set_restore     :   If object is archived, run "set_restore"
        '''
        self.logger.info(f'Downloading object "{object_name}" from namespace "{namespace_name}" and bucket "{bucket_name}" to file "{file_name}"')
        with open(file_name, 'wb') as writer:
            try:
                get_response = self.object_storage_client.get_object(namespace_name,
                                                                     bucket_name,
                                                                     object_name)
            except ServiceError as error:
                self.logger.exception(f'Service Error when attempting to download object: {str(error)}')
                if set_restore and "'code': 'NotRestored'" in str(error):
                    self.logger.debug(f'Object "{object_name}" in bucket "{bucket_name}" and namepsace '
                                      f'"{namespace_name}" is archived, will mark for restore')
                    restore_details = RestoreObjectsDetails(object_name=object_name)
                    restore_response = self.object_storage_client.restore_objects(namespace_name, bucket_name, restore_details)
                    if restore_response.status != 202:
                        raise ObjectStorageException('Error restoring object, ' # pylint:disable=raise-missing-from
                                                     f'Response code {str(restore_response.status)}')
                    self.logger.info(f'Set restore on object "{object_name}" in bucket "{bucket_name}" and namespace "{namespace_name}"')
                return False

            if get_response.status != 200:
                raise ObjectStorageException(f'Error downloading object, Response code {str(get_response.status)}')
            self.logger.debug(f'Writing object "{object_name}" to file "{file_name}"')
            shutil.copyfileobj(get_response.data.raw, writer)
        return True

    def object_delete(self, namespace_name, bucket_name, object_name):
        '''
        Delete object in object storage

        namespace_name  :   Object Storage Namespace
        bucket_name     :   Bucket name
        object_name     :   Name of object to delete
        '''
        self.logger.info(f'Deleting object "{object_name}" from namespace "{namespace_name}" and bucket "{bucket_name}"')
        response = self.object_storage_client.delete_object(namespace_name,
                                                            bucket_name,
                                                            object_name)
        if response.status != 204:
            raise ObjectStorageException(f'Error deleting object, Reponse code {str(response.status)}')
        return True