def upload_object(self, bucket_name, object_name, read_path, subfolders=None, print_details=True): process_start_time = datetime.now(UTC) media = MediaFileUpload(read_path, chunksize=self._CHUNKSIZE, resumable=True) if not media.mimetype(): media = MediaFileUpload(read_path, self._DEFAULT_MIMETYPE, resumable=True) request = self._objects.insert( bucket=bucket_name, name=self._parse_object_name(object_name, subfolders), media_body=media ) progressless_iters = 0 response = None while response is None: error = None try: progress, response = request.next_chunk() except HttpError, err: error = err if err.resp.status < 500: raise except self._RETRYABLE_ERRORS, err: error = err
def upload_object(self, bucket_name, object_name, read_path, subfolders=None, print_details=True): process_start_time = datetime.now(UTC) media = MediaFileUpload(read_path, chunksize=self._CHUNKSIZE, resumable=True) if not media.mimetype(): media = MediaFileUpload(read_path, self._DEFAULT_MIMETYPE, resumable=True) request = self._objects.insert(bucket=bucket_name, name=self._parse_object_name( object_name, subfolders), media_body=media) progressless_iters = 0 response = None while response is None: error = None try: progress, response = request.next_chunk() except HttpError, err: error = err if err.resp.status < 500: raise except self._RETRYABLE_ERRORS, err: error = err
def upload_object(self, bucket_name, object_name, read_path, predefined_acl=None, projection=None, **object_resource): """ Uploads object in chunks. Optional parameters and valid object resources are listed here [https://cloud.google.com/storage/docs/json_api/v1/objects/insert] :param bucket_name: Bucket identifier. :type bucket_name: string :param object_name: Can take string representation of object resource or list denoting path to object on GCS. :type object_name: list or string :param read_path: Local path of object to upload. :type read_path: string :param predefined_acl: Apply a predefined set of access controls to this object. :param projection: Set of properties to return. :param object_resource: Supply optional properties [https://cloud.google.com/storage/docs/json_api/v1/objects/insert#request-body] :returns: GcsResponse object. :raises: HttpError if non-retryable errors are encountered. """ resp_obj = GcsResponse('uploaded') media = MediaFileUpload(read_path, chunksize=self._chunksize, resumable=True) if not media.mimetype(): media = MediaFileUpload(read_path, 'application/octet-stream', resumable=True) req = self._service.objects().insert( bucket=bucket_name, name=self._parse_object_name(object_name), media_body=media, predefinedAcl=predefined_acl, projection=projection, body=object_resource ) progressless_iters = 0 resp = None while resp is None: error = None try: progress, resp = req.next_chunk() except HttpError as e: error = e if e.resp.status < 500: raise except self._RETRYABLE_ERRORS as e: error = e if error: progressless_iters += 1 self._handle_progressless_iter(error, progressless_iters) else: progressless_iters = 0 resp_obj.load_resp( resp, is_download=False ) return resp_obj
def upload_file_to_bucket(self, bucket_name, file_path): def handle_progressless_iter(error, progressless_iters): if progressless_iters > NUM_RETRIES: self.logger.info( 'Failed to make progress for too many consecutive iterations.' ) raise error sleeptime = random.random() * (2**progressless_iters) self.logger.info( 'Caught exception (%s). Sleeping for %d seconds before retry #%d.', str(error), sleeptime, progressless_iters) time.sleep(sleeptime) self.logger.info('Building upload request...') media = MediaFileUpload(file_path, chunksize=CHUNKSIZE, resumable=True) if not media.mimetype(): media = MediaFileUpload(file_path, DEFAULT_MIMETYPE, resumable=True) blob_name = os.path.basename(file_path) if not self.bucket_exists(bucket_name): self.logger.error("Bucket '%s' doesn't exist", bucket_name) raise NotFoundError("bucket {}".format(bucket_name)) request = self._storage.objects().insert(bucket=bucket_name, name=blob_name, media_body=media) self.logger.info('Uploading file: %s, to bucket: %s, blob: %s', file_path, bucket_name, blob_name) progressless_iters = 0 response = None while response is None: error = None try: progress, response = request.next_chunk() if progress: self.logger.info('Upload progress: %d%%', 100 * progress.progress()) except errors.HttpError as error: if error.resp.status < 500: raise except RETRYABLE_ERRORS as error: if error: progressless_iters += 1 handle_progressless_iter(error, progressless_iters) else: progressless_iters = 0 self.logger.info('Upload complete!') self.logger.info('Uploaded Object:') self.logger.info(json_dumps(response, indent=2)) return (True, blob_name)
def upload_object(self, bucket_name, object_name, read_path): """ Uploads object in chunks. :param bucket_name: Bucket identifier. :type bucket_name: string :param object_name: Can take string representation of object resource or list denoting path to object on GCS. :type object_name: list or string :param read_path: Local path of object to upload. :type read_path: string :returns: GcsResponse object. :raises: HttpError if non-retryable errors are encountered. """ resp_obj = GcsResponse('uploaded') media = MediaFileUpload(read_path, chunksize=self._chunksize, resumable=True) if not media.mimetype(): media = MediaFileUpload(read_path, 'application/octet-stream', resumable=True) req = self._service.objects().insert( bucket=bucket_name, name=self._parse_object_name(object_name), media_body=media) progressless_iters = 0 resp = None while resp is None: error = None try: progress, resp = req.next_chunk() except HttpError as e: error = e if e.resp.status < 500: raise except self._RETRYABLE_ERRORS as e: error = e if error: progressless_iters += 1 self._handle_progressless_iter(error, progressless_iters) else: progressless_iters = 0 resp_obj.load_resp(resp, is_download=False) return resp_obj
def put_multipart(self, local_path, destination_gcs_path, chunk_size=67108864, fallback_to_simple_put=True): """ Put an object stored locally to an GCS path using using MediaFileUpload chunks(for files > 5GB, see https://developers.google.com/api-client-library/python/guide/media_upload). :param local_path: Path to source local file :param destination_gcs_path: URL for target GCS location :param chunk_size: Chunk size in bytes. Default: 67108864 (64MB). Chunk size restriction: There are some chunk size restrictions based on the size of the file you are uploading. Files larger than 256 KB (256 * 1024 B) must have chunk sizes that are multiples of 256 KB. For files smaller than 256 KB, there are no restrictions. In either case, the final chunk has no limitations; you can simply transfer the remaining bytes. """ source_size = os.stat(local_path).st_size if fallback_to_simple_put and (source_size <= chunk_size or source_size < GcsFileSystem.MIN_CHUNK_SIZE): GcsFileSystem.logger.debug("File too small will upload as a single chunk") return self.put(local_path, destination_gcs_path) chunk_size = GcsFileSystem.correct_chunk_size(chunk_size) (bucket, key) = self.path_to_bucket_and_key(destination_gcs_path) media = MediaFileUpload(local_path, chunksize=chunk_size, resumable=True) if media.mimetype() is None: media = MediaFileUpload(local_path, chunksize=chunk_size, resumable=True, mimetype='application/octet-stream') request = self.gcs_service.objects().insert(media_body=media, name=key, bucket=bucket) def should_retry(exception): return isinstance(exception, HttpError) and exception.resp.status in [500, 502, 503, 504] @retry(stop_max_attempt_number=5, wait_exponential_multiplier=1000, retry_on_exception=should_retry) def load_chunk(r): self.logger.debug("Uploading chunk to {}/{}".format(bucket, key)) return r.next_chunk() response = None while response is None: upload_status, response = load_chunk(request) self.logger.debug("Chunk uploaded to {}/{}".format(bucket, key)) if upload_status: self.logger.debug( "Overall uploaded to {}/{}: {}%".format(bucket, key, int(upload_status.progress() * 100))) return bucket, key
def test_media_file_upload_to_from_json(self): upload = MediaFileUpload( datafile('small.png'), chunksize=500, resumable=True) self.assertEqual('image/png', upload.mimetype()) self.assertEqual(190, upload.size()) self.assertEqual(True, upload.resumable()) self.assertEqual(500, upload.chunksize()) self.assertEqual('PNG', upload.getbytes(1, 3)) json = upload.to_json() new_upload = MediaUpload.new_from_json(json) self.assertEqual('image/png', new_upload.mimetype()) self.assertEqual(190, new_upload.size()) self.assertEqual(True, new_upload.resumable()) self.assertEqual(500, new_upload.chunksize()) self.assertEqual('PNG', new_upload.getbytes(1, 3))
def _upload_asset(self, asset_name, video_file): """Upload video asset. Before creating a new video creative on DCM, you need to upload the assets (actual video files). This method uploads the video file to DCM and gets the newly generated asset ID. You can later use this asset ID to associate the video asset to a new video creative on DCM. See https://support.google.com/dcm/answer/3312854?hl=en Args: asset_name: Name that will be used for the new video asset on DCM video_file: Video filename Returns: dfareporting#creativeAssetMetadata object with metadata about the newly created CreativeAsset (see DCM API documentation for more info) """ # Construct the creative asset metadata creative_asset = { 'assetIdentifier': { 'name': asset_name, 'type': "VIDEO" } } # Upload the asset and return the generated asset identifier logger.info("Uploading asset '%s'", asset_name) media = MediaFileUpload(video_file) if not media.mimetype(): media = MediaFileUpload(video_file, 'application/octet-stream') response = self._service.creativeAssets().insert( advertiserId=self._advertiser_id, profileId=self._profile_id, media_body=media, body=creative_asset).execute() logger.info("Asset uploaded. Name: '%s'", response['assetIdentifier']['name']) return response['assetIdentifier']
def method(self, **kwargs): # Don't bother with doc string, it will be over-written by createMethod. for name in kwargs.iterkeys(): if name not in parameters.argmap: raise TypeError('Got an unexpected keyword argument "%s"' % name) # Remove args that have a value of None. keys = kwargs.keys() for name in keys: if kwargs[name] is None: del kwargs[name] for name in parameters.required_params: if name not in kwargs: raise TypeError('Missing required parameter "%s"' % name) for name, regex in parameters.pattern_params.iteritems(): if name in kwargs: if isinstance(kwargs[name], basestring): pvalues = [kwargs[name]] else: pvalues = kwargs[name] for pvalue in pvalues: if re.match(regex, pvalue) is None: raise TypeError( 'Parameter "%s" value "%s" does not match the pattern "%s"' % (name, pvalue, regex)) for name, enums in parameters.enum_params.iteritems(): if name in kwargs: # We need to handle the case of a repeated enum # name differently, since we want to handle both # arg='value' and arg=['value1', 'value2'] if (name in parameters.repeated_params and not isinstance(kwargs[name], basestring)): values = kwargs[name] else: values = [kwargs[name]] for value in values: if value not in enums: raise TypeError( 'Parameter "%s" value "%s" is not an allowed value in "%s"' % (name, value, str(enums))) actual_query_params = {} actual_path_params = {} for key, value in kwargs.iteritems(): to_type = parameters.param_types.get(key, 'string') # For repeated parameters we cast each member of the list. if key in parameters.repeated_params and type(value) == type([]): cast_value = [_cast(x, to_type) for x in value] else: cast_value = _cast(value, to_type) if key in parameters.query_params: actual_query_params[parameters.argmap[key]] = cast_value if key in parameters.path_params: actual_path_params[parameters.argmap[key]] = cast_value body_value = kwargs.get('body', None) media_filename = kwargs.get('media_body', None) if self._developerKey: actual_query_params['key'] = self._developerKey model = self._model if methodName.endswith('_media'): model = MediaModel() elif 'response' not in methodDesc: model = RawModel() headers = {} headers, params, query, body = model.request(headers, actual_path_params, actual_query_params, body_value) expanded_url = uritemplate.expand(pathUrl, params) url = urlparse.urljoin(self._baseUrl, expanded_url + query) resumable = None multipart_boundary = '' if media_filename: # Ensure we end up with a valid MediaUpload object. if isinstance(media_filename, basestring): (media_mime_type, encoding) = mimetypes.guess_type(media_filename) if media_mime_type is None: raise UnknownFileType(media_filename) if not mimeparse.best_match([media_mime_type], ','.join(accept)): raise UnacceptableMimeTypeError(media_mime_type) media_upload = MediaFileUpload(media_filename, mimetype=media_mime_type) elif isinstance(media_filename, MediaUpload): media_upload = media_filename else: raise TypeError('media_filename must be str or MediaUpload.') # Check the maxSize if maxSize > 0 and media_upload.size() > maxSize: raise MediaUploadSizeError("Media larger than: %s" % maxSize) # Use the media path uri for media uploads expanded_url = uritemplate.expand(mediaPathUrl, params) url = urlparse.urljoin(self._baseUrl, expanded_url + query) if media_upload.resumable(): url = _add_query_parameter(url, 'uploadType', 'resumable') if media_upload.resumable(): # This is all we need to do for resumable, if the body exists it gets # sent in the first request, otherwise an empty body is sent. resumable = media_upload else: # A non-resumable upload if body is None: # This is a simple media upload headers['content-type'] = media_upload.mimetype() body = media_upload.getbytes(0, media_upload.size()) url = _add_query_parameter(url, 'uploadType', 'media') else: # This is a multipart/related upload. msgRoot = MIMEMultipart('related') # msgRoot should not write out it's own headers setattr(msgRoot, '_write_headers', lambda self: None) # attach the body as one part msg = MIMENonMultipart(*headers['content-type'].split('/')) msg.set_payload(body) msgRoot.attach(msg) # attach the media as the second part msg = MIMENonMultipart(*media_upload.mimetype().split('/')) msg['Content-Transfer-Encoding'] = 'binary' payload = media_upload.getbytes(0, media_upload.size()) msg.set_payload(payload) msgRoot.attach(msg) body = msgRoot.as_string() multipart_boundary = msgRoot.get_boundary() headers['content-type'] = ('multipart/related; ' 'boundary="%s"') % multipart_boundary url = _add_query_parameter(url, 'uploadType', 'multipart') logger.info('URL being requested: %s %s' % (httpMethod,url)) return self._requestBuilder(self._http, model.response, url, method=httpMethod, body=body, headers=headers, methodId=methodId, resumable=resumable)
def method(self, **kwargs): # Don't bother with doc string, it will be over-written by createMethod. for name in six.iterkeys(kwargs): if name not in parameters.argmap: raise TypeError('Got an unexpected keyword argument "%s"' % name) # Remove args that have a value of None. keys = list(kwargs.keys()) for name in keys: if kwargs[name] is None: del kwargs[name] for name in parameters.required_params: if name not in kwargs: # temporary workaround for non-paging methods incorrectly requiring # page token parameter (cf. drive.changes.watch vs. drive.changes.list) if name not in _PAGE_TOKEN_NAMES or _findPageTokenName( _methodProperties(methodDesc, schema, 'response')): raise TypeError('Missing required parameter "%s"' % name) for name, regex in six.iteritems(parameters.pattern_params): if name in kwargs: if isinstance(kwargs[name], six.string_types): pvalues = [kwargs[name]] else: pvalues = kwargs[name] for pvalue in pvalues: if re.match(regex, pvalue) is None: raise TypeError( 'Parameter "%s" value "%s" does not match the pattern "%s"' % (name, pvalue, regex)) for name, enums in six.iteritems(parameters.enum_params): if name in kwargs: # We need to handle the case of a repeated enum # name differently, since we want to handle both # arg='value' and arg=['value1', 'value2'] if (name in parameters.repeated_params and not isinstance(kwargs[name], six.string_types)): values = kwargs[name] else: values = [kwargs[name]] for value in values: if value not in enums: raise TypeError( 'Parameter "%s" value "%s" is not an allowed value in "%s"' % (name, value, str(enums))) actual_query_params = {} actual_path_params = {} for key, value in six.iteritems(kwargs): to_type = parameters.param_types.get(key, 'string') # For repeated parameters we cast each member of the list. if key in parameters.repeated_params and type(value) == type([]): cast_value = [_cast(x, to_type) for x in value] else: cast_value = _cast(value, to_type) if key in parameters.query_params: actual_query_params[parameters.argmap[key]] = cast_value if key in parameters.path_params: actual_path_params[parameters.argmap[key]] = cast_value body_value = kwargs.get('body', None) media_filename = kwargs.get('media_body', None) media_mime_type = kwargs.get('media_mime_type', None) if self._developerKey: actual_query_params['key'] = self._developerKey model = self._model if methodName.endswith('_media'): model = MediaModel() elif 'response' not in methodDesc: model = RawModel() headers = {} headers, params, query, body = model.request(headers, actual_path_params, actual_query_params, body_value) expanded_url = uritemplate.expand(pathUrl, params) url = _urljoin(self._baseUrl, expanded_url + query) resumable = None multipart_boundary = '' if media_filename: # Ensure we end up with a valid MediaUpload object. if isinstance(media_filename, six.string_types): if media_mime_type is None: logger.warning( 'media_mime_type argument not specified: trying to auto-detect for %s', media_filename) media_mime_type, _ = mimetypes.guess_type(media_filename) if media_mime_type is None: raise UnknownFileType(media_filename) if not mimeparse.best_match([media_mime_type], ','.join(accept)): raise UnacceptableMimeTypeError(media_mime_type) media_upload = MediaFileUpload(media_filename, mimetype=media_mime_type) elif isinstance(media_filename, MediaUpload): media_upload = media_filename else: raise TypeError('media_filename must be str or MediaUpload.') # Check the maxSize if media_upload.size( ) is not None and media_upload.size() > maxSize > 0: raise MediaUploadSizeError("Media larger than: %s" % maxSize) # Use the media path uri for media uploads expanded_url = uritemplate.expand(mediaPathUrl, params) url = _urljoin(self._baseUrl, expanded_url + query) if media_upload.resumable(): url = _add_query_parameter(url, 'uploadType', 'resumable') if media_upload.resumable(): # This is all we need to do for resumable, if the body exists it gets # sent in the first request, otherwise an empty body is sent. resumable = media_upload else: # A non-resumable upload if body is None: # This is a simple media upload headers['content-type'] = media_upload.mimetype() body = media_upload.getbytes(0, media_upload.size()) url = _add_query_parameter(url, 'uploadType', 'media') else: # This is a multipart/related upload. msgRoot = MIMEMultipart('related') # msgRoot should not write out it's own headers setattr(msgRoot, '_write_headers', lambda self: None) # attach the body as one part msg = MIMENonMultipart(*headers['content-type'].split('/')) msg.set_payload(body) msgRoot.attach(msg) # attach the media as the second part msg = MIMENonMultipart(*media_upload.mimetype().split('/')) msg['Content-Transfer-Encoding'] = 'binary' payload = media_upload.getbytes(0, media_upload.size()) msg.set_payload(payload) msgRoot.attach(msg) # encode the body: note that we can't use `as_string`, because # it plays games with `From ` lines. fp = BytesIO() g = _BytesGenerator(fp, mangle_from_=False) g.flatten(msgRoot, unixfrom=False) body = fp.getvalue() multipart_boundary = msgRoot.get_boundary() headers['content-type'] = ( 'multipart/related; ' 'boundary="%s"') % multipart_boundary url = _add_query_parameter(url, 'uploadType', 'multipart') logger.info('URL being requested: %s %s' % (httpMethod, url)) return self._requestBuilder(self._http, model.response, url, method=httpMethod, body=body, headers=headers, methodId=methodId, resumable=resumable)
def test_media_file_upload_mimetype_detection(self): upload = MediaFileUpload(datafile('small.png')) self.assertEqual('image/png', upload.mimetype()) upload = MediaFileUpload(datafile('empty')) self.assertEqual('application/octet-stream', upload.mimetype())
def method(self, **kwargs): # Don't bother with doc string, it will be over-written by createMethod. for name in six.iterkeys(kwargs): if name not in parameters.argmap: raise TypeError('Got an unexpected keyword argument "{0!s}"'.format(name)) # Remove args that have a value of None. keys = list(kwargs.keys()) for name in keys: if kwargs[name] is None: del kwargs[name] for name in parameters.required_params: if name not in kwargs: raise TypeError('Missing required parameter "{0!s}"'.format(name)) for name, regex in six.iteritems(parameters.pattern_params): if name in kwargs: if isinstance(kwargs[name], six.string_types): pvalues = [kwargs[name]] else: pvalues = kwargs[name] for pvalue in pvalues: if re.match(regex, pvalue) is None: raise TypeError( 'Parameter "{0!s}" value "{1!s}" does not match the pattern "{2!s}"'.format( name, pvalue, regex ) ) for name, enums in six.iteritems(parameters.enum_params): if name in kwargs: # We need to handle the case of a repeated enum # name differently, since we want to handle both # arg='value' and arg=['value1', 'value2'] if name in parameters.repeated_params and not isinstance(kwargs[name], six.string_types): values = kwargs[name] else: values = [kwargs[name]] for value in values: if value not in enums: raise TypeError( 'Parameter "{0!s}" value "{1!s}" is not an allowed value in "{2!s}"'.format( name, value, str(enums) ) ) actual_query_params = {} actual_path_params = {} for key, value in six.iteritems(kwargs): to_type = parameters.param_types.get(key, "string") # For repeated parameters we cast each member of the list. if key in parameters.repeated_params and type(value) == type([]): cast_value = [_cast(x, to_type) for x in value] else: cast_value = _cast(value, to_type) if key in parameters.query_params: actual_query_params[parameters.argmap[key]] = cast_value if key in parameters.path_params: actual_path_params[parameters.argmap[key]] = cast_value body_value = kwargs.get("body", None) media_filename = kwargs.get("media_body", None) if self._developerKey: actual_query_params["key"] = self._developerKey model = self._model if methodName.endswith("_media"): model = MediaModel() elif "response" not in methodDesc: model = RawModel() headers = {} headers, params, query, body = model.request(headers, actual_path_params, actual_query_params, body_value) expanded_url = uritemplate.expand(pathUrl, params) url = _urljoin(self._baseUrl, expanded_url + query) resumable = None multipart_boundary = "" if media_filename: # Ensure we end up with a valid MediaUpload object. if isinstance(media_filename, six.string_types): (media_mime_type, encoding) = mimetypes.guess_type(media_filename) if media_mime_type is None: raise UnknownFileType(media_filename) if not mimeparse.best_match([media_mime_type], ",".join(accept)): raise UnacceptableMimeTypeError(media_mime_type) media_upload = MediaFileUpload(media_filename, mimetype=media_mime_type) elif isinstance(media_filename, MediaUpload): media_upload = media_filename else: raise TypeError("media_filename must be str or MediaUpload.") # Check the maxSize if media_upload.size() is not None and media_upload.size() > maxSize > 0: raise MediaUploadSizeError("Media larger than: {0!s}".format(maxSize)) # Use the media path uri for media uploads expanded_url = uritemplate.expand(mediaPathUrl, params) url = _urljoin(self._baseUrl, expanded_url + query) if media_upload.resumable(): url = _add_query_parameter(url, "uploadType", "resumable") if media_upload.resumable(): # This is all we need to do for resumable, if the body exists it gets # sent in the first request, otherwise an empty body is sent. resumable = media_upload else: # A non-resumable upload if body is None: # This is a simple media upload headers["content-type"] = media_upload.mimetype() body = media_upload.getbytes(0, media_upload.size()) url = _add_query_parameter(url, "uploadType", "media") else: # This is a multipart/related upload. msgRoot = MIMEMultipart("related") # msgRoot should not write out it's own headers setattr(msgRoot, "_write_headers", lambda self: None) # attach the body as one part msg = MIMENonMultipart(*headers["content-type"].split("/")) msg.set_payload(body) msgRoot.attach(msg) # attach the media as the second part msg = MIMENonMultipart(*media_upload.mimetype().split("/")) msg["Content-Transfer-Encoding"] = "binary" payload = media_upload.getbytes(0, media_upload.size()) msg.set_payload(payload) msgRoot.attach(msg) # encode the body: note that we can't use `as_string`, because # it plays games with `From ` lines. fp = BytesIO() g = _BytesGenerator(fp, mangle_from_=False) g.flatten(msgRoot, unixfrom=False) body = fp.getvalue() multipart_boundary = msgRoot.get_boundary() headers["content-type"] = ("multipart/related; " 'boundary="%s"') % multipart_boundary url = _add_query_parameter(url, "uploadType", "multipart") logger.info("URL being requested: {0!s} {1!s}".format(httpMethod, url)) return self._requestBuilder( self._http, model.response, url, method=httpMethod, body=body, headers=headers, methodId=methodId, resumable=resumable, )
def test_media_file_upload_mimetype_detection(self): upload = MediaFileUpload(datafile('redbus.png')) self.assertEqual('image/png', upload.mimetype())
def upload_object(self, bucket_name, object_name, read_path, predefined_acl=None, projection=None, **object_resource): """ Uploads object in chunks. Optional parameters and valid object resources are listed here [https://cloud.google.com/storage/docs/json_api/v1/objects/insert] :param bucket_name: Bucket identifier. :type bucket_name: string :param object_name: Can take string representation of object resource or list denoting path to object on GCS. :type object_name: list or string :param read_path: Local path of object to upload. :type read_path: string :param predefined_acl: Apply a predefined set of access controls to this object. :param projection: Set of properties to return. :param object_resource: Supply optional properties [https://cloud.google.com/storage/docs/json_api/v1/objects/insert#request-body] :returns: GcsResponse object. :raises: HttpError if non-retryable errors are encountered. """ resp_obj = GcsResponse('uploaded') media = MediaFileUpload(read_path, chunksize=self._chunksize, resumable=True) if not media.mimetype(): media = MediaFileUpload(read_path, 'application/octet-stream', resumable=True) req = self._service.objects().insert( bucket=bucket_name, name=self._parse_object_name(object_name), media_body=media, predefinedAcl=predefined_acl, projection=projection, body=object_resource) progressless_iters = 0 resp = None while resp is None: error = None try: progress, resp = req.next_chunk() except HttpError as e: error = e if e.resp.status < 500: raise except self._RETRYABLE_ERRORS as e: error = e if error: progressless_iters += 1 self._handle_progressless_iter(error, progressless_iters) else: progressless_iters = 0 resp_obj.load_resp(resp, is_download=False) return resp_obj