def _urljoin(endpoint_url, url_path, host_prefix): p = urlsplit(endpoint_url) # <part> - <index> # scheme - p[0] # netloc - p[1] # path - p[2] # query - p[3] # fragment - p[4] if not url_path or url_path == '/': # If there's no path component, ensure the URL ends with # a '/' for backwards compatibility. if not p[2]: new_path = '/' else: new_path = p[2] elif p[2].endswith('/') and url_path.startswith('/'): new_path = p[2][:-1] + url_path else: new_path = p[2] + url_path new_netloc = p[1] if host_prefix is not None: new_netloc = host_prefix + new_netloc reconstructed = urlunsplit((p[0], new_netloc, new_path, p[3], p[4])) return reconstructed
def fix_s3_host(event_name, endpoint, request, auth, **kwargs): """ This handler looks at S3 requests just before they are signed. If there is a bucket name on the path (true for everything except ListAllBuckets) it checks to see if that bucket name conforms to the DNS naming conventions. If it does, it alters the request to use ``virtual hosting`` style addressing rather than ``path-style`` addressing. This allows us to avoid 301 redirects for all bucket names that can be CNAME'd. """ parts = urlsplit(request.url) auth.auth_path = parts.path path_parts = parts.path.split('/') if isinstance(auth, botocore.auth.SigV4Auth): return if len(path_parts) > 1: bucket_name = path_parts[1] logger.debug('Checking for DNS compatible bucket for: %s', request.url) if check_dns_name(bucket_name) and _allowed_region( endpoint.region_name): # If the operation is on a bucket, the auth_path must be # terminated with a '/' character. if len(path_parts) == 2: if auth.auth_path[-1] != '/': auth.auth_path += '/' path_parts.remove(bucket_name) host = bucket_name + '.' + endpoint.service.global_endpoint new_tuple = (parts.scheme, host, '/'.join(path_parts), parts.query, '') new_uri = urlunsplit(new_tuple) request.url = new_uri logger.debug('URI updated to: %s', new_uri) else: logger.debug('Not changing URI, bucket is not DNS compatible: %s', bucket_name)
def fix_s3_host(event_name, endpoint, request, auth, **kwargs): """ This handler looks at S3 requests just before they are signed. If there is a bucket name on the path (true for everything except ListAllBuckets) it checks to see if that bucket name conforms to the DNS naming conventions. If it does, it alters the request to use ``virtual hosting`` style addressing rather than ``path-style`` addressing. This allows us to avoid 301 redirects for all bucket names that can be CNAME'd. """ parts = urlsplit(request.url) auth.auth_path = parts.path path_parts = parts.path.split('/') if len(path_parts) > 1: bucket_name = path_parts[1] logger.debug('Checking for DNS compatible bucket for: %s', request.url) if check_dns_name(bucket_name) and _allowed_region(endpoint.region_name): # If the operation is on a bucket, the auth_path must be # terminated with a '/' character. if len(path_parts) == 2: if auth.auth_path[-1] != '/': auth.auth_path += '/' path_parts.remove(bucket_name) host = bucket_name + '.' + endpoint.service.global_endpoint new_tuple = (parts.scheme, host, '/'.join(path_parts), parts.query, '') new_uri = urlunsplit(new_tuple) request.url = new_uri logger.debug('URI updated to: %s', new_uri) else: logger.debug('Not changing URI, bucket is not DNS compatible: %s', bucket_name)
def _inject_signature(self, request, signature): query_dict = {} query_dict['AWSAccessKeyId'] = self.credentials.access_key query_dict['Signature'] = signature for header_key in request.headers: lk = header_key.lower() # For query string requests, Expires is used instead of the # Date header. if header_key == 'Date': query_dict['Expires'] = request.headers['Date'] # We only want to include relevant headers in the query string. # These can be anything that starts with x-amz, is Content-MD5, # or is Content-Type. elif lk.startswith('x-amz-') or lk in [ 'content-md5', 'content-type' ]: query_dict[lk] = request.headers[lk] # Combine all of the identified headers into an encoded # query string new_query_string = percent_encode_sequence(query_dict) # Create a new url with the presigned url. p = urlsplit(request.url) if p[3]: # If there was a pre-existing query string, we should # add that back before injecting the new query string. new_query_string = '%s&%s' % (p[3], new_query_string) new_url_parts = (p[0], p[1], p[2], new_query_string, p[4]) request.url = urlunsplit(new_url_parts)
def _modify_request_before_signing(self, request): # We automatically set this header, so if it's the auto-set value we # want to get rid of it since it doesn't make sense for presigned urls. content_type = request.headers.get('content-type') blacklisted_content_type = ( 'application/x-www-form-urlencoded; charset=utf-8' ) if content_type == blacklisted_content_type: del request.headers['content-type'] # Note that we're not including X-Amz-Signature. # From the docs: "The Canonical Query String must include all the query # parameters from the preceding table except for X-Amz-Signature. signed_headers = self.signed_headers(self.headers_to_sign(request)) auth_params = { 'X-Amz-Algorithm': 'AWS4-HMAC-SHA256', 'X-Amz-Credential': self.scope(request), 'X-Amz-Date': request.context['timestamp'], 'X-Amz-Expires': self._expires, 'X-Amz-SignedHeaders': signed_headers, } if self.credentials.token is not None: auth_params['X-Amz-Security-Token'] = self.credentials.token # Now parse the original query string to a dict, inject our new query # params, and serialize back to a query string. url_parts = urlsplit(request.url) # parse_qs makes each value a list, but in our case we know we won't # have repeated keys so we know we have single element lists which we # can convert back to scalar values. query_dict = dict( [(k, v[0]) for k, v in parse_qs(url_parts.query, keep_blank_values=True).items()]) # The spec is particular about this. It *has* to be: # https://<endpoint>?<operation params>&<auth params> # You can't mix the two types of params together, i.e just keep doing # new_query_params.update(op_params) # new_query_params.update(auth_params) # percent_encode_sequence(new_query_params) operation_params = '' if request.data: # We also need to move the body params into the query string. To # do this, we first have to convert it to a dict. query_dict.update(self._get_body_as_dict(request)) request.data = '' if query_dict: operation_params = percent_encode_sequence(query_dict) + '&' new_query_string = (operation_params + percent_encode_sequence(auth_params)) # url_parts is a tuple (and therefore immutable) so we need to create # a new url_parts with the new query string. # <part> - <index> # scheme - 0 # netloc - 1 # path - 2 # query - 3 <-- we're replacing this. # fragment - 4 p = url_parts new_url_parts = (p[0], p[1], p[2], new_query_string, p[4]) request.url = urlunsplit(new_url_parts)
def _inject_signature(self, request, signature): query_dict = {} query_dict['AWSAccessKeyId'] = self.credentials.access_key query_dict['Signature'] = signature for header_key in request.headers: lk = header_key.lower() # For query string requests, Expires is used instead of the # Date header. if header_key == 'Date': query_dict['Expires'] = request.headers['Date'] # We only want to include relevant headers in the query string. # These can be anything that starts with x-amz, is Content-MD5, # or is Content-Type. elif lk.startswith('x-amz-') or lk in ['content-md5', 'content-type']: query_dict[lk] = request.headers[lk] # Combine all of the identified headers into an encoded # query string new_query_string = percent_encode_sequence(query_dict) # Create a new url with the presigned url. p = urlsplit(request.url) if p[3]: # If there was a pre-existing query string, we should # add that back before injecting the new query string. new_query_string ='%s&%s' % (p[3], new_query_string) new_url_parts = (p[0], p[1], p[2], new_query_string, p[4]) request.url = urlunsplit(new_url_parts)
def _prepend_to_host(self, url, prefix): url_components = urlsplit(url) parts = url_components.netloc.split('.') parts = [prefix] + parts new_netloc = '.'.join(parts) new_components = (url_components.scheme, new_netloc, url_components.path, url_components.query, '') new_url = urlunsplit(new_components) return new_url
def switch_to_virtual_host_style(request, signature_version, default_endpoint_url=None, **kwargs): """ This is a handler to force virtual host style s3 addressing no matter the signature version (which is taken in consideration for the default case). If the bucket is not DNS compatible an InvalidDNSName is thrown. :param request: A AWSRequest object that is about to be sent. :param signature_version: The signature version to sign with :param default_endpoint_url: The endpoint to use when switching to a virtual style. If None is supplied, the virtual host will be constructed from the url of the request. """ if request.auth_path is not None: # The auth_path has already been applied (this may be a # retried request). We don't need to perform this # customization again. return elif _is_get_bucket_location_request(request): # For the GetBucketLocation response, we should not be using # the virtual host style addressing so we can avoid any sigv4 # issues. logger.debug("Request is GetBucketLocation operation, not checking " "for DNS compatibility.") return parts = urlsplit(request.url) request.auth_path = parts.path path_parts = parts.path.split("/") # Retrieve what the endpoint we will be prepending the bucket name to. if default_endpoint_url is None: default_endpoint_url = parts.netloc if len(path_parts) > 1: bucket_name = path_parts[1] if not bucket_name: # If the bucket name is empty we should not be checking for # dns compatibility. return logger.debug("Checking for DNS compatible bucket for: %s", request.url) if check_dns_name(bucket_name): # If the operation is on a bucket, the auth_path must be # terminated with a '/' character. if len(path_parts) == 2: if request.auth_path[-1] != "/": request.auth_path += "/" path_parts.remove(bucket_name) # At the very least the path must be a '/', such as with the # CreateBucket operation when DNS style is being used. If this # is not used you will get an empty path which is incorrect. path = "/".join(path_parts) or "/" global_endpoint = default_endpoint_url host = bucket_name + "." + global_endpoint new_tuple = (parts.scheme, host, path, parts.query, "") new_uri = urlunsplit(new_tuple) request.url = new_uri logger.debug("URI updated to: %s", new_uri) else: raise InvalidDNSNameError(bucket_name=bucket_name)
def quote_source_header(params, **kwargs): if params['headers'] and 'x-amz-copy-source' in params['headers']: value = params['headers']['x-amz-copy-source'] p = urlsplit(value) # We only want to quote the path. If the user specified # extra parts, say '?versionId=myversionid' then that part # should not be quoted. quoted = quote(p[2].encode('utf-8'), '/~') final_source = urlunsplit((p[0], p[1], quoted, p[3], p[4])) params['headers']['x-amz-copy-source'] = final_source
def _modify_request_before_signing(self, request): # This is our chance to add additional query params we need # before we go about calculating the signature. request.headers = {} request.method = 'GET' # Note that we're not including X-Amz-Signature. # From the docs: "The Canonical Query String must include all the query # parameters from the preceding table except for X-Amz-Signature. auth_params = { 'X-Amz-Algorithm': 'AWS4-HMAC-SHA256', 'X-Amz-Credential': self.scope(request), 'X-Amz-Date': request.context['timestamp'], 'X-Amz-Expires': self._expires, 'X-Amz-SignedHeaders': 'host', } if self.credentials.token is not None: auth_params['X-Amz-Security-Token'] = self.credentials.token # Now parse the original query string to a dict, inject our new query # params, and serialize back to a query string. url_parts = urlsplit(request.url) # parse_qs makes each value a list, but in our case we know we won't # have repeated keys so we know we have single element lists which we # can convert back to scalar values. query_dict = dict([(k, v[0]) for k, v in parse_qs(url_parts.query).items()]) # The spec is particular about this. It *has* to be: # https://<endpoint>?<operation params>&<auth params> # You can't mix the two types of params together, i.e just keep doing # new_query_params.update(op_params) # new_query_params.update(auth_params) # percent_encode_sequence(new_query_params) operation_params = '' if request.data: # We also need to move the body params into the query string. # request.data will be populated, for example, with query services # which normally form encode the params into the body. # This means that request.data is a dict() of the operation params. query_dict.update(request.data) request.data = '' if query_dict: operation_params = percent_encode_sequence(query_dict) + '&' new_query_string = (operation_params + percent_encode_sequence(auth_params)) # url_parts is a tuple (and therefore immutable) so we need to create # a new url_parts with the new query string. # <part> - <index> # scheme - 0 # netloc - 1 # path - 2 # query - 3 <-- we're replacing this. # fragment - 4 p = url_parts new_url_parts = (p[0], p[1], p[2], new_query_string, p[4]) request.url = urlunsplit(new_url_parts)
def _modify_request_before_signing(self, request): # This is our chance to add additional query params we need # before we go about calculating the signature. request.headers = {} request.method = 'GET' # Note that we're not including X-Amz-Signature. # From the docs: "The Canonical Query String must include all the query # parameters from the preceding table except for X-Amz-Signature. auth_params = { 'X-Amz-Algorithm': 'AWS4-HMAC-SHA256', 'X-Amz-Credential': self.scope(request), 'X-Amz-Date': self.timestamp, 'X-Amz-Expires': self._expires, 'X-Amz-SignedHeaders': 'host', } if self.credentials.token is not None: auth_params['X-Amz-Security-Token'] = self.credentials.token # Now parse the original query string to a dict, inject our new query # params, and serialize back to a query string. url_parts = urlsplit(request.url) # parse_qs makes each value a list, but in our case we know we won't # have repeated keys so we know we have single element lists which we # can convert back to scalar values. query_dict = dict( [(k, v[0]) for k, v in parse_qs(url_parts.query).items()]) # The spec is particular about this. It *has* to be: # https://<endpoint>?<operation params>&<auth params> # You can't mix the two types of params together, i.e just keep doing # new_query_params.update(op_params) # new_query_params.update(auth_params) # percent_encode_sequence(new_query_params) operation_params = '' if request.data: # We also need to move the body params into the query string. # request.data will be populated, for example, with query services # which normally form encode the params into the body. # This means that request.data is a dict() of the operation params. query_dict.update(request.data) request.data = '' if query_dict: operation_params = percent_encode_sequence(query_dict) + '&' new_query_string = (operation_params + percent_encode_sequence(auth_params)) # url_parts is a tuple (and therefore immutable) so we need to create # a new url_parts with the new query string. # <part> - <index> # scheme - 0 # netloc - 1 # path - 2 # query - 3 <-- we're replacing this. # fragment - 4 p = url_parts new_url_parts = (p[0], p[1], p[2], new_query_string, p[4]) request.url = urlunsplit(new_url_parts)
def _switch_hosts(request, new_endpoint, use_new_scheme=True): new_endpoint_components = urlsplit(new_endpoint) original_endpoint = request.url original_endpoint_components = urlsplit(original_endpoint) scheme = original_endpoint_components.scheme if use_new_scheme: scheme = new_endpoint_components.scheme final_endpoint_components = (scheme, new_endpoint_components.netloc, original_endpoint_components.path, original_endpoint_components.query, '') final_endpoint = urlunsplit(final_endpoint_components) logger.debug('Updating URI from %s to %s' % (request.url, final_endpoint)) request.url = final_endpoint
def switch_host_with_param(request, param_name): request_json = json.loads(request.data.decode('utf-8')) if request_json.get(param_name): new_endpoint = request_json[param_name] new_endpoint_components = urlsplit(new_endpoint) original_endpoint = request.url original_endpoint_components = urlsplit(original_endpoint) final_endpoint_components = (new_endpoint_components.scheme, new_endpoint_components.netloc, original_endpoint_components.path, original_endpoint_components.query, '') final_endpoint = urlunsplit(final_endpoint_components) request.url = final_endpoint
def _get_new_endpoint(original_endpoint, new_endpoint, use_new_scheme=True): new_endpoint_components = urlsplit(new_endpoint) original_endpoint_components = urlsplit(original_endpoint) scheme = original_endpoint_components.scheme if use_new_scheme: scheme = new_endpoint_components.scheme final_endpoint_components = (scheme, new_endpoint_components.netloc, original_endpoint_components.path, original_endpoint_components.query, '') final_endpoint = urlunsplit(final_endpoint_components) logger.debug('Updating URI from %s to %s' % (original_endpoint, final_endpoint)) return final_endpoint
def _prepend_to_host(self, url, prefix): url_components = urlsplit(url) parts = url_components.netloc.split('.') parts = [prefix] + parts new_netloc = '.'.join(parts) new_components = ( url_components.scheme, new_netloc, url_components.path, url_components.query, '' ) new_url = urlunsplit(new_components) return new_url
def _apply_signing_changes(self, aws_request, signed_crt_request): # Apply changes from signed CRT request to the AWSRequest super()._apply_signing_changes(aws_request, signed_crt_request) signed_query = urlsplit(signed_crt_request.path).query p = urlsplit(aws_request.url) # urlsplit() returns a tuple (and therefore immutable) so we # need to create new url with the new query string. # <part> - <index> # scheme - 0 # netloc - 1 # path - 2 # query - 3 <-- we're replacing this. # fragment - 4 aws_request.url = urlunsplit((p[0], p[1], p[2], signed_query, p[4]))
def fix_s3_host(request, signature_version, region_name, **kwargs): """ This handler looks at S3 requests just before they are signed. If there is a bucket name on the path (true for everything except ListAllBuckets) it checks to see if that bucket name conforms to the DNS naming conventions. If it does, it alters the request to use ``virtual hosting`` style addressing rather than ``path-style`` addressing. This allows us to avoid 301 redirects for all bucket names that can be CNAME'd. """ if request.auth_path is not None: # The auth_path has already been applied (this may be a # retried request). We don't need to perform this # customization again. return elif _is_get_bucket_location_request(request): # For the GetBucketLocation response, we should not be using # the virtual host style addressing so we can avoid any sigv4 # issues. logger.debug("Request is GetBucketLocation operation, not checking " "for DNS compatibility.") return parts = urlsplit(request.url) request.auth_path = parts.path path_parts = parts.path.split('/') if signature_version in ['s3v4', 'v4']: return if len(path_parts) > 1: bucket_name = path_parts[1] logger.debug('Checking for DNS compatible bucket for: %s', request.url) if check_dns_name(bucket_name) and _allowed_region(region_name): # If the operation is on a bucket, the auth_path must be # terminated with a '/' character. if len(path_parts) == 2: if request.auth_path[-1] != '/': request.auth_path += '/' path_parts.remove(bucket_name) global_endpoint = 's3.amazonaws.com' host = bucket_name + '.' + global_endpoint new_tuple = (parts.scheme, host, '/'.join(path_parts), parts.query, '') new_uri = urlunsplit(new_tuple) request.url = new_uri logger.debug('URI updated to: %s', new_uri) else: logger.debug('Not changing URI, bucket is not DNS compatible: %s', bucket_name)
def fix_s3_host(event_name, endpoint, request, auth, **kwargs): """ This handler looks at S3 requests just before they are signed. If there is a bucket name on the path (true for everything except ListAllBuckets) it checks to see if that bucket name conforms to the DNS naming conventions. If it does, it alters the request to use ``virtual hosting`` style addressing rather than ``path-style`` addressing. This allows us to avoid 301 redirects for all bucket names that can be CNAME'd. """ if request.auth_path is not None: # The auth_path has already been applied (this may be a # retried request). We don't need to perform this # customization again. return elif _is_get_bucket_location_request(request): # For the GetBucketLocation response, we should not be using # the virtual host style addressing so we can avoid any sigv4 # issues. logger.debug("Request is GetBucketLocation operation, not checking " "for DNS compatibility.") return parts = urlsplit(request.url) request.auth_path = parts.path path_parts = parts.path.split('/') if isinstance(auth, botocore.auth.SigV4Auth): return if len(path_parts) > 1: bucket_name = path_parts[1] logger.debug('Checking for DNS compatible bucket for: %s', request.url) if check_dns_name(bucket_name) and _allowed_region(endpoint.region_name): # If the operation is on a bucket, the auth_path must be # terminated with a '/' character. if len(path_parts) == 2: if request.auth_path[-1] != '/': request.auth_path += '/' path_parts.remove(bucket_name) global_endpoint = 's3.amazonaws.com' host = bucket_name + '.' + global_endpoint new_tuple = (parts.scheme, host, '/'.join(path_parts), parts.query, '') new_uri = urlunsplit(new_tuple) request.url = new_uri logger.debug('URI updated to: %s', new_uri) else: logger.debug('Not changing URI, bucket is not DNS compatible: %s', bucket_name)
def _get_new_endpoint(original_endpoint, new_endpoint, use_new_scheme=True): new_endpoint_components = urlsplit(new_endpoint) original_endpoint_components = urlsplit(original_endpoint) scheme = original_endpoint_components.scheme if use_new_scheme: scheme = new_endpoint_components.scheme final_endpoint_components = ( scheme, new_endpoint_components.netloc, original_endpoint_components.path, original_endpoint_components.query, "", ) final_endpoint = urlunsplit(final_endpoint_components) logger.debug("Updating URI from %s to %s" % (original_endpoint, final_endpoint)) return final_endpoint
def switch_host_with_param(request, param_name): request_json = json.loads(request.data.decode('utf-8')) if request_json.get(param_name): new_endpoint = request_json[param_name] new_endpoint_components = urlsplit(new_endpoint) original_endpoint = request.url original_endpoint_components = urlsplit(original_endpoint) final_endpoint_components = ( new_endpoint_components.scheme, new_endpoint_components.netloc, original_endpoint_components.path, original_endpoint_components.query, '' ) final_endpoint = urlunsplit(final_endpoint_components) request.url = final_endpoint
def _switch_hosts(request, new_endpoint, use_new_scheme=True): new_endpoint_components = urlsplit(new_endpoint) original_endpoint = request.url original_endpoint_components = urlsplit(original_endpoint) scheme = original_endpoint_components.scheme if use_new_scheme: scheme = new_endpoint_components.scheme final_endpoint_components = ( scheme, new_endpoint_components.netloc, original_endpoint_components.path, original_endpoint_components.query, '' ) final_endpoint = urlunsplit(final_endpoint_components) logger.debug('Updating URI from %s to %s' % (request.url, final_endpoint)) request.url = final_endpoint
def _modify_request_before_signing(self, request): super()._modify_request_before_signing(request) # We automatically set this header, so if it's the auto-set value we # want to get rid of it since it doesn't make sense for presigned urls. content_type = request.headers.get('content-type') if content_type == 'application/x-www-form-urlencoded; charset=utf-8': del request.headers['content-type'] # Now parse the original query string to a dict, inject our new query # params, and serialize back to a query string. url_parts = urlsplit(request.url) # parse_qs makes each value a list, but in our case we know we won't # have repeated keys so we know we have single element lists which we # can convert back to scalar values. query_dict = dict( [(k, v[0]) for k, v in parse_qs(url_parts.query, keep_blank_values=True).items()]) if request.params: query_dict.update(request.params) request.params = {} # The spec is particular about this. It *has* to be: # https://<endpoint>?<operation params>&<auth params> # You can't mix the two types of params together, i.e just keep doing # new_query_params.update(op_params) # new_query_params.update(auth_params) # percent_encode_sequence(new_query_params) if request.data: # We also need to move the body params into the query string. To # do this, we first have to convert it to a dict. query_dict.update(_get_body_as_dict(request)) request.data = '' new_query_string = percent_encode_sequence(query_dict) # url_parts is a tuple (and therefore immutable) so we need to create # a new url_parts with the new query string. # <part> - <index> # scheme - 0 # netloc - 1 # path - 2 # query - 3 <-- we're replacing this. # fragment - 4 p = url_parts new_url_parts = (p[0], p[1], p[2], new_query_string, p[4]) request.url = urlunsplit(new_url_parts)
def switch_to_virtual_host_style(request, signature_version, default_endpoint_url=None, **kwargs): """ This is a handler to force virtual host style s3 addressing no matter the signature version (which is taken in consideration for the default case). If the bucket is not DNS compatible an InvalidDNSName is thrown. :param request: A AWSRequest object that is about to be sent. :param signature_version: The signature version to sign with :param default_endpoint_url: The endpoint to use when switching to a virtual style. If None is supplied, the virtual host will be constructed from the url of the request. """ if request.auth_path is not None: # The auth_path has already been applied (this may be a # retried request). We don't need to perform this # customization again. return elif _is_get_bucket_location_request(request): # For the GetBucketLocation response, we should not be using # the virtual host style addressing so we can avoid any sigv4 # issues. logger.debug("Request is GetBucketLocation operation, not checking " "for DNS compatibility.") return parts = urlsplit(request.url) request.auth_path = parts.path path_parts = parts.path.split('/') # Retrieve what the endpoint we will be prepending the bucket name to. if default_endpoint_url is None: default_endpoint_url = parts.netloc if len(path_parts) > 1: bucket_name = path_parts[1] if not bucket_name: # If the bucket name is empty we should not be checking for # dns compatibility. return logger.debug('Checking for DNS compatible bucket for: %s', request.url) if check_dns_name(bucket_name): # If the operation is on a bucket, the auth_path must be # terminated with a '/' character. if len(path_parts) == 2: if request.auth_path[-1] != '/': request.auth_path += '/' path_parts.remove(bucket_name) # At the very least the path must be a '/', such as with the # CreateBucket operation when DNS style is being used. If this # is not used you will get an empty path which is incorrect. path = '/'.join(path_parts) or '/' global_endpoint = default_endpoint_url host = bucket_name + '.' + global_endpoint new_tuple = (parts.scheme, host, path, parts.query, '') new_uri = urlunsplit(new_tuple) request.url = new_uri logger.debug('URI updated to: %s', new_uri) else: raise InvalidDNSNameError(bucket_name=bucket_name)