Beispiel #1
0
 def _make_retrying_upload_file_call(self):
     self.kwargs["retry_strategy"] = DEFAULT_RETRY_STRATEGY
     upload_manager = UploadManager(self.object_storage_client,
                                    allow_multipart_uploads=False)
     return upload_manager.upload_file(self.namespace_name,
                                       self.bucket_name, self.object_name,
                                       self.file_path, **self.kwargs)
Beispiel #2
0
    def run_job(self):
        self.status = SUCCESS
        self.log_callback.info('Uploading image.')

        timestamp = None
        build_time = self.status_msg.get('build_time', 'unknown')

        if self.use_build_time and (build_time != 'unknown'):
            timestamp = timestamp_from_epoch(build_time)
        elif self.use_build_time and (build_time == 'unknown'):
            raise MashUploadException(
                'use_build_time set for job but build time is unknown.'
            )

        self.cloud_image_name = format_string_with_date(
            self.base_cloud_image_name,
            timestamp=timestamp
        )

        self.request_credentials([self.account])
        credentials = self.credentials[self.account]

        config = {
            'user': self.oci_user_id,
            'key_content': credentials['signing_key'],
            'fingerprint': credentials['fingerprint'],
            'tenancy': self.tenancy,
            'region': self.region
        }
        object_storage = ObjectStorageClient(config)
        namespace = object_storage.get_namespace().data
        upload_manager = UploadManager(
            object_storage,
            allow_parallel_uploads=True,
            parallel_process_count=self.upload_process_count
        )

        object_name = ''.join([self.cloud_image_name, '.qcow2'])
        self._image_size = stat(self.status_msg['image_file']).st_size

        with open(self.status_msg['image_file'], 'rb') as image_stream:
            upload_manager.upload_stream(
                namespace,
                self.bucket,
                object_name,
                image_stream,
                progress_callback=self._progress_callback
            )

        self.status_msg['cloud_image_name'] = self.cloud_image_name
        self.status_msg['object_name'] = object_name
        self.status_msg['namespace'] = namespace

        self.log_callback.info(
            'Uploaded image: {0}, to the bucket named: {1}'.format(
                object_name,
                self.bucket
            )
        )
Beispiel #3
0
    def __init__(self, config_file, config_section, logger=None):
        '''
        Create ObjectStorageClient for OCI

        config_file     :   OCI Configuration File
        config_section  :   OCI Config File Section
        logger          :   Logger, if not given one will be created
        '''
        config = from_file(config_file, config_section)
        self.object_storage_client = ObjectStorageClient(config, retry_strategy=DEFAULT_RETRY_STRATEGY)
        self.upload_manager = UploadManager(self.object_storage_client)
        if logger is None:
            self.logger = setup_logger("oci_client", 10)
        else:
            self.logger = logger
Beispiel #4
0
    def upload_object(self, callbacks_container, namespace_name, bucket_name,
                      object_name, file_path, file_size, **kwargs):
        if not self._config.use_multipart_uploads:
            upload_task = SimpleSingleUploadTask(self._client, namespace_name,
                                                 bucket_name, object_name,
                                                 file_path,
                                                 callbacks_container)
            return self._object_storage_request_pool.submit(upload_task)

        part_size = self._config.multipart_part_size
        if 'part_size' in kwargs:
            part_size = kwargs['part_size']
            kwargs.pop('part_size')

        if not UploadManager._use_multipart(file_size, part_size=part_size):
            if 'multipart_part_completion_callback' in kwargs:
                kwargs.pop('multipart_part_completion_callback')

            upload_task = SimpleSingleUploadTask(self._client, namespace_name,
                                                 bucket_name, object_name,
                                                 file_path,
                                                 callbacks_container, **kwargs)
            return self._object_storage_request_pool.submit(upload_task)
        else:
            multipart_upload_processor_task = MultipartUploadProcessorTask(
                self._client, namespace_name, bucket_name, object_name,
                file_path, callbacks_container,
                self._object_storage_multipart_request_pool, part_size,
                **kwargs)
            return self._multipart_upload_processor_pool.submit(
                multipart_upload_processor_task)
def upload_objects_to_os_bucket(namespace):
    bucket_name = input("\n\tEnter the bucket name to upload objects: ")
    request = CreateBucketDetails(name=bucket_name,
                                  compartment_id=compartment_dets)
    try:
        bucket = object_storage.create_bucket(namespace, request)
        #print(bucket.data)
        print("\n\t" + bucket.data.etag)
    except Exception as e:
        print("\n\t" + e.message)

    directory = input(
        "\n\tEnter the path to move files to OCI object storage: ")

    user_input = directory
    assert os.path.exists(
        user_input), "I did not find the directory at, " + str(user_input)

    print("\n\tFiles in directory " + str(directory) + " will be uploaded")
    user_input = directory

    files_to_process = [
        file for file in os.listdir(directory) if file.endswith('tar.gz')
    ]

    for upload_file in files_to_process:
        print('\n\tUploading file {}'.format(upload_file))
        print('\n\t' + upload_file)
        partsize = 1000 * MEBIBYTE
        object_name = upload_file
        filename = os.path.join(directory, upload_file)
        upload_manager = UploadManager(object_storage,
                                       allow_parallel_uploads=True,
                                       allow_multipart_uploads=True)
        response = upload_manager.upload_file(
            namespace,
            bucket_name,
            object_name,
            filename,
            part_size=partsize,
            progress_callback=progresscallback)
        if str(response.data) == 'None':
            print("\n\tUpload Complete")
    return
Beispiel #6
0
 def upload_object(self, file_properties, user=None):
     self.user = user
     file, file_name = file_properties
     self.set_config()
     object_name = "{}_{}".format(self.user, file_name)
     part_size = 2 * MEBIBYTE  # part size (in bytes)
     upload_manager = UploadManager(self.object_storage,
                                    allow_parallel_uploads=True,
                                    parallel_process_count=3)
     file_path = self.get_media_file_path(file)
     response = upload_manager.upload_file(
         self.namespace,
         self.bucket_name,
         object_name,
         file_path,
         part_size=part_size,
         progress_callback=self.progress_callback)
     if response.status == 200:
         os.remove(file_path)
     return response
Beispiel #7
0
def upload_data_Object_storage ():

    directory = os.path.dirname(os.path.abspath(__file__))
    directory +='\\civil-aviation-authority-of-the-philippines-passenger-movement-data'
    object_storage = oci.object_storage.ObjectStorageClient(config)
    files_to_process = [file for file in os.listdir(directory) if file.endswith('csv')]

    try:
        for upload_file in files_to_process:
            print('Uploading file {}'.format(upload_file))
            print(upload_file)
            partsize = 1000 * MEBIBYTE
            print(partsize)
            object_name=upload_file
            filename=os.path.join(directory,upload_file)
            upload_manager=UploadManager(object_storage,allow_parallel_uploads=True,allow_multipart_uploads=True)
            response=upload_manager.upload_file(namespace,bucket_name,object_name,filename,part_size=partsize,progress_callback=progresscallback)
            print(response.data)


    except Exception as e:
        print(e.message)
Beispiel #8
0
bucket = object_storage.create_bucket(namespace, request)

# create example file to upload
filename = 'multipart_object_content.txt'
file_size_in_mebibytes = 10
sample_content = b'a'
with open(filename, 'wb') as f:
    while f.tell() < MEBIBYTE * file_size_in_mebibytes:
        f.write(sample_content * MEBIBYTE)

print("Uploading new object {!r}".format(object_name))

# upload manager will automatically use mutlipart uploads if the part size is less than the file size
part_size = 2 * MEBIBYTE  # part size (in bytes)
upload_manager = UploadManager(object_storage,
                               allow_parallel_uploads=True,
                               parallel_process_count=3)
response = upload_manager.upload_file(namespace,
                                      bucket_name,
                                      object_name,
                                      filename,
                                      part_size=part_size,
                                      progress_callback=progress_callback)

# To force single part uploads, set "allow_multipart_uploads=False" when creating the UploadManager.
# upload_manager = UploadManager(object_storage, allow_multipart_uploads=False)
# response = upload_manager.upload_file(
#    namespace, bucket_name, object_name, filename, part_size=part_size, progress_callback=progress_callback)

# remove file to clean up
os.remove(filename)
Beispiel #9
0
def put_object(object_storage_client, module):
    namespace = module.params["namespace_name"]
    bucket = module.params["bucket_name"]
    obj = module.params["object_name"]
    src = module.params["src"]
    content_type = module.params["content_type"]
    content_length = module.params["content_length"]
    content_md5 = module.params["content_md5"]
    content_language = module.params["content_language"]
    content_encoding = module.params["content_encoding"]

    opc_meta = dict()

    if module.params["opc_meta"] is not None:
        opc_meta = module.params["opc_meta"]

    result = dict(changed=False)

    # Check if the object exists with same checksum.
    remote_object = head_object(object_storage_client, module)

    src_md5 = base64.b64encode(base64.b16decode(module.md5(src), True)).decode("ascii")

    # ENHANCEMENT_OVER_SDK: This is a EoU enhancement to make it easier for an Ansible user to provide
    # content for their object
    if remote_object is not None and (
        src_md5 == remote_object.headers.get("Content-MD5", None)
        or src_md5 == remote_object.headers.get("opc-multipart-md5", None)
    ):
        changed = False
    elif module.params.get("multipart_upload"):
        # Note: If the file size is less than 128 MB, UploadManager will automatically chose the upload strategy
        # to be single-part upload.
        upload_manager = UploadManager(
            object_storage_client,
            allow_parallel_uploads=module.params.get("parallel_uploads"),
        )
        response = oci_utils.call_with_backoff(
            upload_manager.upload_file,
            namespace_name=namespace,
            bucket_name=bucket,
            object_name=obj,
            file_path=src,
        )
        changed = True
        result["object"] = dict(response.headers)
    else:
        with open(to_bytes(src), "rb") as src_file:
            put_object_body = src_file.read()

        try:
            response = oci_utils.call_with_backoff(
                object_storage_client.put_object,
                namespace_name=namespace,
                bucket_name=bucket,
                object_name=obj,
                put_object_body=put_object_body,
                content_encoding=content_encoding,
                content_language=content_language,
                content_length=content_length,
                content_md5=content_md5,
                content_type=content_type,
                opc_meta=opc_meta,
            )
            changed = True
            result["object"] = dict(response.headers)

        except ServiceError as ex:
            module.fail_json(msg=ex.message)

    result["changed"] = changed
    return result
    def upload_object(self, src, force):
        if not os.path.isfile(to_bytes(src)):
            self.module.fail_json(msg="The source path %s must be a file." %
                                  src)

        if not os.access(to_bytes(src), os.R_OK):
            self.module.fail_json(
                msg=
                "Failed to access %s. Make sure the file exists and that you have "
                "read access." % src)

        object_exists = False
        try:
            response = self.get_resource()
            object_exists = True
        except ServiceError as ex:
            if ex.status != 404:
                self.module.fail_json(msg=ex.message)

        if object_exists and force is False:
            return self.prepare_result(
                changed=False,
                resource_type=self.resource_type,
                resource=to_dict(response.data),
                msg=
                "Object %s already present in bucket. Use force option to overwrite."
                % self.module.params.get("object_name"),
            )

        src_md5 = base64.b64encode(base64.b16decode(self.module.md5(src),
                                                    True)).decode("ascii")
        if object_exists and (
                src_md5 == response.headers.get("Content-MD5", None)
                or src_md5 == response.headers.get("opc-multipart-md5", None)):
            return self.prepare_result(
                changed=False,
                resource_type=self.resource_type,
                resource=to_dict(response.data),
            )

        try:
            # sdk complains if the value is None
            self.module.params["opc_meta"] = (
                self.module.params.get("opc_meta") or dict())
            upload_manager = UploadManager(self.client, )
            oci_common_utils.call_with_backoff(
                upload_manager.upload_file,
                namespace_name=self.module.params.get("namespace_name"),
                bucket_name=self.module.params.get("bucket_name"),
                object_name=self.module.params.get("object_name"),
                content_encoding=self.module.params.get("content_encoding"),
                content_language=self.module.params.get("content_language"),
                content_length=self.module.params.get("content_length"),
                content_md5=self.module.params.get("content_md5"),
                content_type=self.module.params.get("content_type"),
                content_disposition=self.module.params.get(
                    "content_disposition"),
                cache_control=self.module.params.get("cache_control"),
                opc_sse_customer_algorithm=self.module.params.get(
                    "opc_sse_customer_algorithm"),
                opc_sse_customer_key=self.module.params.get(
                    "opc_sse_customer_key"),
                opc_sse_customer_key_sha256=self.module.params.get(
                    "opc_sse_customer_key_sha256"),
                opc_meta=self.module.params.get("opc_meta"),
                file_path=src,
            )

            uploaded_object_response = self.get_resource()
            return self.prepare_result(
                changed=True,
                resource_type=self.resource_type,
                resource=to_dict(uploaded_object_response.data),
            )

        except ServiceError as ex:
            self.module.fail_json(msg=ex.message)
Beispiel #11
0
 def _make_retrying_upload_file_call(self):
     upload_manager = UploadManager(self.object_storage_client,
                                    allow_multipart_uploads=False)
     return upload_manager.upload_file(self.namespace_name,
                                       self.bucket_name, self.object_name,
                                       self.file_path, **self.kwargs)
Beispiel #12
0
class OCIObjectStorageClient():
    '''
    Object Storage Client
    '''
    def __init__(self, config_file, config_section, logger=None):
        '''
        Create ObjectStorageClient for OCI

        config_file     :   OCI Configuration File
        config_section  :   OCI Config File Section
        logger          :   Logger, if not given one will be created
        '''
        config = from_file(config_file, config_section)
        self.object_storage_client = ObjectStorageClient(config, retry_strategy=DEFAULT_RETRY_STRATEGY)
        self.upload_manager = UploadManager(self.object_storage_client)
        if logger is None:
            self.logger = setup_logger("oci_client", 10)
        else:
            self.logger = logger

    def object_list(self, namespace_name, bucket_name):
        '''
        Return list of object storage objects

        namespace_name  :   Object Storage Namespace
        bucket_name     :   Bucket Name
        '''
        self.logger.info("Retrieving object list from namespace %s and bucket %s",
                         namespace_name, bucket_name)
        all_objects_response = list_call_get_all_results(self.object_storage_client.list_objects,
                                                         namespace_name, bucket_name, fields='name,md5,size,timeCreated')
        return [to_dict(obj) for obj in all_objects_response.data.objects]


    def object_put(self, namespace_name, bucket_name, object_name, file_name, md5_sum=None, resume_upload=False):
        '''
        Upload object to object storage

        namespace_name  :   Object Storage Namespace
        bucket_name     :   Bucket name
        object_name     :   Name of uploaded object
        file_name       :   Name of local file to upload
        md5_sum         :   Md5 sum of local file
        '''
        self.logger.info(f'Starting upload of file "{file_name}" to namespace "{namespace_name}" '
                         f'bucket "{bucket_name}" and object name "{object_name}"')
        upload_resumed = False
        if resume_upload:
            self.logger.debug(f'Checking if object name "{object_name}" is in list of pending uploads')
            multipart_uploads = list_call_get_all_results(self.object_storage_client.list_multipart_uploads,
                                                          namespace_name, bucket_name)
            for multipart_upload in multipart_uploads.data:
                # Assume namespace and bucket are the same
                if multipart_upload.object == object_name:
                    self.logger.debug(f'Resuming file upload {multipart_upload.upload_id} for object "{object_name}"')
                    response = self.upload_manager.resume_upload_file(namespace_name, bucket_name, object_name, file_name, multipart_upload.upload_id)
                    upload_resumed = True
                    break
        if not upload_resumed:
            response = self.upload_manager.upload_file(namespace_name, bucket_name, object_name, file_name, content_md5=md5_sum)
        if response.status != 200:
            raise ObjectStorageException(f'Error uploading object, Reponse code {str(response.status)}')
        self.logger.info(f'File "{file_name}" uploaded to object storage with object name "{object_name}"')
        return True

    def object_get(self, namespace_name, bucket_name, object_name, file_name, set_restore=False):
        '''
        Download object from object storage

        namespace_name  :   Object Storage Namespace
        bucket_name     :   Bucket name
        object_name     :   Name of object to download
        file_name       :   Name of local file where object will be downloaded
        set_restore     :   If object is archived, run "set_restore"
        '''
        self.logger.info(f'Downloading object "{object_name}" from namespace "{namespace_name}" and bucket "{bucket_name}" to file "{file_name}"')
        with open(file_name, 'wb') as writer:
            try:
                get_response = self.object_storage_client.get_object(namespace_name,
                                                                     bucket_name,
                                                                     object_name)
            except ServiceError as error:
                self.logger.exception(f'Service Error when attempting to download object: {str(error)}')
                if set_restore and "'code': 'NotRestored'" in str(error):
                    self.logger.debug(f'Object "{object_name}" in bucket "{bucket_name}" and namepsace '
                                      f'"{namespace_name}" is archived, will mark for restore')
                    restore_details = RestoreObjectsDetails(object_name=object_name)
                    restore_response = self.object_storage_client.restore_objects(namespace_name, bucket_name, restore_details)
                    if restore_response.status != 202:
                        raise ObjectStorageException('Error restoring object, ' # pylint:disable=raise-missing-from
                                                     f'Response code {str(restore_response.status)}')
                    self.logger.info(f'Set restore on object "{object_name}" in bucket "{bucket_name}" and namespace "{namespace_name}"')
                return False

            if get_response.status != 200:
                raise ObjectStorageException(f'Error downloading object, Response code {str(get_response.status)}')
            self.logger.debug(f'Writing object "{object_name}" to file "{file_name}"')
            shutil.copyfileobj(get_response.data.raw, writer)
        return True

    def object_delete(self, namespace_name, bucket_name, object_name):
        '''
        Delete object in object storage

        namespace_name  :   Object Storage Namespace
        bucket_name     :   Bucket name
        object_name     :   Name of object to delete
        '''
        self.logger.info(f'Deleting object "{object_name}" from namespace "{namespace_name}" and bucket "{bucket_name}"')
        response = self.object_storage_client.delete_object(namespace_name,
                                                            bucket_name,
                                                            object_name)
        if response.status != 204:
            raise ObjectStorageException(f'Error deleting object, Reponse code {str(response.status)}')
        return True