def create_domain(): region = ElasticsearchServiceBackend.get() data = json.loads(to_str(request.data)) domain_name = data["DomainName"] with _domain_mutex: if domain_name in region.es_domains: # domain already created return error_response(error_type="ResourceAlreadyExistsException") # "create" domain data region.es_domains[domain_name] = data # lazy-init the cluster, and set the data["Created"] flag _create_cluster(domain_name, data) # create result document result = get_domain_status(domain_name) # record event event_publisher.fire_event( event_publisher.EVENT_ES_CREATE_DOMAIN, payload={"n": event_publisher.get_hash(domain_name)}, ) persistence.record("es", request=request) return jsonify(result)
def create_domain(): data = json.loads(to_str(request.data)) domain_name = data['DomainName'] if domain_name in ES_DOMAINS: return error_response(error_type='ResourceAlreadyExistsException') ES_DOMAINS[domain_name] = data data['Created'] = False def do_start(*args): # start actual Elasticsearch instance version = data.get('ElasticsearchVersion') or DEFAULT_ES_VERSION start_elasticsearch_instance(version=version) data['Created'] = True # start ES instance in the background FuncThread(do_start).start() # sleep a short while, then return time.sleep(5) result = get_domain_status(domain_name) # record event event_publisher.fire_event( event_publisher.EVENT_ES_CREATE_DOMAIN, payload={'n': event_publisher.get_hash(domain_name)}) persistence.record('es', request=request) return jsonify(result)
def update_s3(method, path, data, headers, response=None, return_forward_info=False): if return_forward_info: modified_data = None # If this request contains streaming v4 authentication signatures, strip them from the message # Related isse: https://github.com/atlassian/localstack/issues/98 # TODO we should evaluate whether to replace moto s3 with scality/S3: # https://github.com/scality/S3/issues/237 if headers.get('x-amz-content-sha256') == 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD': modified_data = strip_chunk_signatures(data) # persist this API call to disk persistence.record('s3', method, path, data, headers) parsed = urlparse.urlparse(path) query = parsed.query path = parsed.path bucket = path.split('/')[1] query_map = urlparse.parse_qs(query) if method == 'PUT' and (query == 'notification' or 'notification' in query_map): tree = ET.fromstring(data) queue_config = tree.find('{%s}QueueConfiguration' % XMLNS_S3) if len(queue_config): S3_NOTIFICATIONS[bucket] = { 'Id': get_xml_text(queue_config, 'Id'), 'Event': get_xml_text(queue_config, 'Event', ns=XMLNS_S3), 'Queue': get_xml_text(queue_config, 'Queue', ns=XMLNS_S3), 'Topic': get_xml_text(queue_config, 'Topic', ns=XMLNS_S3), 'CloudFunction': get_xml_text(queue_config, 'CloudFunction', ns=XMLNS_S3) } if query == 'cors' or 'cors' in query_map: if method == 'GET': return get_cors(bucket) if method == 'PUT': return set_cors(bucket, data) if method == 'DELETE': return delete_cors(bucket) if modified_data: return Request(data=modified_data, headers=headers) return True # get subscribers and send bucket notifications if method in ('PUT', 'DELETE') and '/' in path[1:]: parts = path[1:].split('/', 1) bucket_name = parts[0] object_path = '/%s' % parts[1] send_notifications(method, bucket_name, object_path) # append CORS headers to response if response: parsed = urlparse.urlparse(path) bucket_name = parsed.path.split('/')[0] append_cors_headers(bucket_name, request_method=method, request_headers=headers, response=response)
def delete_domain(domain_name): if domain_name not in ES_DOMAINS: return error_response(error_type='ResourceNotFoundException') result = get_domain_status(domain_name, deleted=True) ES_DOMAINS.pop(domain_name) if not ES_DOMAINS: cleanup_elasticsearch_instance() # record event event_publisher.fire_event(event_publisher.EVENT_ES_DELETE_DOMAIN, payload={'n': event_publisher.get_hash(domain_name)}) persistence.record('es', request=request) return jsonify(result)
def create_domain(): data = json.loads(to_str(request.data)) domain_name = data['DomainName'] if domain_name in ES_DOMAINS: return error_response(error_type='ResourceAlreadyExistsException') ES_DOMAINS[domain_name] = data # start actual Elasticsearch instance start_elasticsearch_instance() result = get_domain_status(domain_name) # record event event_publisher.fire_event(event_publisher.EVENT_ES_CREATE_DOMAIN, payload={'n': event_publisher.get_hash(domain_name)}) persistence.record('es', request=request) return jsonify(result)
def delete_domain(domain_name): with _domain_mutex: if domain_name not in ES_DOMAINS: return error_response(error_type="ResourceNotFoundException") result = get_domain_status(domain_name, deleted=True) del ES_DOMAINS[domain_name] _cleanup_cluster(domain_name) # record event event_publisher.fire_event( event_publisher.EVENT_ES_DELETE_DOMAIN, payload={"n": event_publisher.get_hash(domain_name)}, ) persistence.record("es", request=request) return jsonify(result)
def delete_domain(domain_name): region = ElasticsearchServiceBackend.get() with _domain_mutex: if domain_name not in region.es_domains: return error_response(error_type="ResourceNotFoundException") result = get_domain_status(domain_name, deleted=True) del region.es_domains[domain_name] _remove_cluster(domain_name) # record event event_publisher.fire_event( event_publisher.EVENT_ES_DELETE_DOMAIN, payload={"n": event_publisher.get_hash(domain_name)}, ) persistence.record("es", request=request) return jsonify(result)
def update_s3(method, path, data, headers, response=None, return_forward_info=False): if return_forward_info: # persist this API call to disk persistence.record('s3', method, path, data, headers) parsed = urlparse.urlparse(path) query = parsed.query path = parsed.path bucket = path.split('/')[1] query_map = urlparse.parse_qs(query) if method == 'PUT' and (query == 'notification' or 'notification' in query_map): tree = ET.fromstring(data) queue_config = tree.find('{%s}QueueConfiguration' % XMLNS_S3) if len(queue_config): S3_NOTIFICATIONS[bucket] = { 'Id': get_xml_text(queue_config, 'Id'), 'Event': get_xml_text(queue_config, 'Event', ns=XMLNS_S3), 'Queue': get_xml_text(queue_config, 'Queue', ns=XMLNS_S3), 'Topic': get_xml_text(queue_config, 'Topic', ns=XMLNS_S3), 'CloudFunction': get_xml_text(queue_config, 'CloudFunction', ns=XMLNS_S3) } if query == 'cors' or 'cors' in query_map: if method == 'GET': return get_cors(bucket) if method == 'PUT': return set_cors(bucket, data) if method == 'DELETE': return delete_cors(bucket) return True # get subscribers and send bucket notifications if method in ('PUT', 'DELETE') and '/' in path[1:]: parts = path[1:].split('/', 1) bucket_name = parts[0] object_path = '/%s' % parts[1] send_notifications(method, bucket_name, object_path) # append CORS headers to response if response: parsed = urlparse.urlparse(path) bucket_name = parsed.path.split('/')[0] append_cors_headers(bucket_name, request_method=method, request_headers=headers, response=response)
def create_domain(): from localstack.services.es import es_starter data = json.loads(to_str(request.data)) domain_name = data['DomainName'] if domain_name in ES_DOMAINS: return error_response(error_type='ResourceAlreadyExistsException') ES_DOMAINS[domain_name] = data data['Created'] = False def do_start(*args): # start actual Elasticsearch instance version = data.get('ElasticsearchVersion') or DEFAULT_ES_VERSION start_elasticsearch_instance(version=version) data['Created'] = True try: if es_starter.check_elasticsearch(): data['Created'] = True else: LOG.error( 'Elasticsearch status is not healthy, please check the application status and logs' ) except requests.exceptions.ConnectionError: # Catch first run FuncThread(do_start).start() LOG.info('Elasticsearch is starting for the first time, please wait..') data['Created'] = True result = get_domain_status(domain_name) # record event event_publisher.fire_event( event_publisher.EVENT_ES_CREATE_DOMAIN, payload={'n': event_publisher.get_hash(domain_name)}) persistence.record('es', request=request) return jsonify(result)
def forward_request(self, method, path, data, headers): LOGGER.debug('forward_request - method: "%s"' % method) modified_data = None # If this request contains streaming v4 authentication signatures, strip them from the message # Related isse: https://github.com/localstack/localstack/issues/98 # TODO we should evaluate whether to replace moto s3 with scality/S3: # https://github.com/scality/S3/issues/237 if headers.get('x-amz-content-sha256' ) == 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD': modified_data = strip_chunk_signatures(data) # POST requests to S3 may include a "${filename}" placeholder in the # key, which should be replaced with an actual file name before storing. if method == 'POST': original_data = modified_data or data expanded_data = multipart_content.expand_multipart_filename( original_data, headers) if expanded_data is not original_data: modified_data = expanded_data # If no content-type is provided, 'binary/octet-stream' should be used # src: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html if method == 'PUT' and not headers.get('content-type'): headers['content-type'] = 'binary/octet-stream' # persist this API call to disk persistence.record('s3', method, path, data, headers) parsed = urlparse.urlparse(path) query = parsed.query path = parsed.path object_path = parsed.path LOGGER.debug('forward_request - query: "%s"' % query) LOGGER.debug('forward_request - path: "%s"' % path) LOGGER.debug('forward_request - host: "%s"' % headers['host']) LOGGER.debug('forward_request - object_path: "%s"' % object_path) # Check bucket name in host bucket = None hostname_parts = headers['host'].split('.') if len(hostname_parts) > 1: bucket = hostname_parts[0] # No bucket name in host, check in path. if (not bucket or len(bucket) == 0): bucket = get_bucket_name(path, headers) LOGGER.debug('forward_request - Bucket name: "%s"' % bucket) query_map = urlparse.parse_qs(query) if query == 'notification' or 'notification' in query_map: LOGGER.debug('forward_request - query: "%s"' % query) response = Response() response.status_code = 200 if method == 'GET': # TODO check if bucket exists result = '<NotificationConfiguration xmlns="%s">' % XMLNS_S3 if bucket in S3_NOTIFICATIONS: notif = S3_NOTIFICATIONS[bucket] for dest in NOTIFICATION_DESTINATION_TYPES: if dest in notif: dest_dict = { '%sConfiguration' % dest: { 'Id': uuid.uuid4(), dest: notif[dest], 'Event': notif['Event'], 'Filter': notif['Filter'] } } result += xmltodict.unparse(dest_dict, full_document=False) result += '</NotificationConfiguration>' response._content = result if method == 'PUT': LOGGER.debug('forward_request - method: "%s"' % method) parsed = xmltodict.parse(data) notif_config = parsed.get('NotificationConfiguration') S3_NOTIFICATIONS.pop(bucket, None) for dest in NOTIFICATION_DESTINATION_TYPES: LOGGER.debug( 'forward_request - NOTIFICATION_DESTINATION_TYPES - dest: "%s"' % dest) config = notif_config.get('%sConfiguration' % (dest)) if config: events = config.get('Event') if isinstance(events, six.string_types): events = [events] event_filter = config.get('Filter', {}) # make sure FilterRule is an array s3_filter = _get_s3_filter(event_filter) if s3_filter and not isinstance( s3_filter.get('FilterRule', []), list): s3_filter['FilterRule'] = [s3_filter['FilterRule']] # create final details dict notification_details = { 'Id': config.get('Id'), 'Event': events, dest: config.get(dest), 'Filter': event_filter } # TODO: what if we have multiple destinations - would we overwrite the config? LOGGER.debug( 'forward_request - S3_NOTIFICATIONS - bucket: "%s"' % bucket) S3_NOTIFICATIONS[bucket] = clone(notification_details) # return response for ?notification request return response if query == 'cors' or 'cors' in query_map: if method == 'GET': return get_cors(bucket) if method == 'PUT': return set_cors(bucket, data) if method == 'DELETE': return delete_cors(bucket) if query == 'lifecycle' or 'lifecycle' in query_map: if method == 'GET': return get_lifecycle(bucket) if method == 'PUT': return set_lifecycle(bucket, data) LOGGER.debug('forward_request - query_map: "%s"' % query_map) if method == 'PUT' and 'x-amz-meta-filename' in query_map and bucket is not None and object_path is not None: unique_id = get_unique_id(bucket, object_path) set_user_defined_metadata(unique_id, query_map) if modified_data: return Request(data=modified_data, headers=headers, method=method) return True
def forward_request(self, method, path, data, headers): modified_data = None # If this request contains streaming v4 authentication signatures, strip them from the message # Related isse: https://github.com/localstack/localstack/issues/98 # TODO we should evaluate whether to replace moto s3 with scality/S3: # https://github.com/scality/S3/issues/237 if headers.get('x-amz-content-sha256') == 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD': modified_data = strip_chunk_signatures(data) # POST requests to S3 may include a "${filename}" placeholder in the # key, which should be replaced with an actual file name before storing. if method == 'POST': original_data = modified_data or data expanded_data = expand_multipart_filename(original_data, headers) if expanded_data is not original_data: modified_data = expanded_data # persist this API call to disk persistence.record('s3', method, path, data, headers) parsed = urlparse.urlparse(path) query = parsed.query path = parsed.path bucket = path.split('/')[1] query_map = urlparse.parse_qs(query) if query == 'notification' or 'notification' in query_map: response = Response() response.status_code = 200 if method == 'GET': # TODO check if bucket exists result = '<NotificationConfiguration xmlns="%s">' % XMLNS_S3 if bucket in S3_NOTIFICATIONS: notif = S3_NOTIFICATIONS[bucket] events_string = '\n'.join(['<Event>%s</Event>' % e for e in notif['Event']]) for dest in ['Queue', 'Topic', 'CloudFunction']: if dest in notif: result += ("""<{dest}Configuration> <Id>{uid}</Id> <{dest}>{endpoint}</{dest}> {events} </{dest}Configuration>""").format( dest=dest, uid=uuid.uuid4(), endpoint=notif[dest], events=events_string) result += '</NotificationConfiguration>' response._content = result if method == 'PUT': parsed = xmltodict.parse(data) notif_config = parsed.get('NotificationConfiguration') for dest in ['Queue', 'Topic', 'CloudFunction']: config = notif_config.get('%sConfiguration' % (dest)) if config: # TODO: what if we have multiple destinations - would we overwrite the config? notification_details = { 'Id': config.get('Id'), 'Event': config.get('Event'), dest: config.get(dest), 'Filter': config.get('Filter') } S3_NOTIFICATIONS[bucket] = json.loads(json.dumps(notification_details)) # return response for ?notification request return response if query == 'cors' or 'cors' in query_map: if method == 'GET': return get_cors(bucket) if method == 'PUT': return set_cors(bucket, data) if method == 'DELETE': return delete_cors(bucket) if modified_data: return Request(data=modified_data, headers=headers, method=method) return True
def forward_request(self, method, path, data, headers): modified_data = None # If this request contains streaming v4 authentication signatures, strip them from the message # Related isse: https://github.com/localstack/localstack/issues/98 # TODO we should evaluate whether to replace moto s3 with scality/S3: # https://github.com/scality/S3/issues/237 if headers.get('x-amz-content-sha256' ) == 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD': modified_data = strip_chunk_signatures(data) # persist this API call to disk persistence.record('s3', method, path, data, headers) parsed = urlparse.urlparse(path) query = parsed.query path = parsed.path bucket = path.split('/')[1] query_map = urlparse.parse_qs(query) if query == 'notification' or 'notification' in query_map: response = Response() response.status_code = 200 if method == 'GET': # TODO check if bucket exists result = '<NotificationConfiguration xmlns="%s">' % XMLNS_S3 if bucket in S3_NOTIFICATIONS: notif = S3_NOTIFICATIONS[bucket] for dest in ['Queue', 'Topic', 'CloudFunction']: if dest in notif: result += ('''<{dest}Configuration> <Id>{uid}</Id> <{dest}>{endpoint}</{dest}> <Event>{event}</Event> </{dest}Configuration>''').format( dest=dest, uid=uuid.uuid4(), endpoint=S3_NOTIFICATIONS[bucket][dest], event=S3_NOTIFICATIONS[bucket]['Event']) result += '</NotificationConfiguration>' response._content = result if method == 'PUT': tree = ET.fromstring(data) for dest in ['Queue', 'Topic', 'CloudFunction']: config = tree.find('{%s}%sConfiguration' % (XMLNS_S3, dest)) if config is not None and len(config): # TODO: what if we have multiple destinations - would we overwrite the config? S3_NOTIFICATIONS[bucket] = { 'Id': get_xml_text(config, 'Id'), 'Event': get_xml_text(config, 'Event', ns=XMLNS_S3), # TODO extract 'Events' attribute (in addition to 'Event') dest: get_xml_text(config, dest, ns=XMLNS_S3), } # return response for ?notification request return response if query == 'cors' or 'cors' in query_map: if method == 'GET': return get_cors(bucket) if method == 'PUT': return set_cors(bucket, data) if method == 'DELETE': return delete_cors(bucket) if modified_data: return Request(data=modified_data, headers=headers, method=method) return True
def return_response(self, method, path, data, headers, response): path = to_str(path) method = to_str(method) bucket_name = get_bucket_name(path, headers) # persist this API call to disk persistence.record('s3', method, path, data, headers, response=response) # No path-name based bucket name? Try host-based hostname_parts = headers['host'].split('.') if (not bucket_name or len(bucket_name) == 0) and len(hostname_parts) > 1: bucket_name = hostname_parts[0] # POST requests to S3 may include a success_action_redirect or # success_action_status field, which should be used to redirect a # client to a new location. key = None if method == 'POST': key, redirect_url = multipart_content.find_multipart_key_value( data, headers) if key and redirect_url: response.status_code = 303 response.headers['Location'] = expand_redirect_url( redirect_url, key, bucket_name) LOGGER.debug('S3 POST {} to {}'.format( response.status_code, response.headers['Location'])) key, status_code = multipart_content.find_multipart_key_value( data, headers, 'success_action_status') if response.status_code == 200 and status_code == '201' and key: response.status_code = 201 response._content = self.get_201_response(key, bucket_name) response.headers['Content-Length'] = str(len( response._content)) response.headers[ 'Content-Type'] = 'application/xml; charset=utf-8' return response parsed = urlparse.urlparse(path) bucket_name_in_host = headers['host'].startswith(bucket_name) should_send_notifications = all([ method in ('PUT', 'POST', 'DELETE'), '/' in path[1:] or bucket_name_in_host or key, # check if this is an actual put object request, because it could also be # a put bucket request with a path like this: /bucket_name/ bucket_name_in_host or key or (len(path[1:].split('/')) > 1 and len(path[1:].split('/')[1]) > 0), self.is_query_allowable(method, parsed.query) ]) # get subscribers and send bucket notifications if should_send_notifications: # if we already have a good key, use it, otherwise examine the path if key: object_path = '/' + key elif bucket_name_in_host: object_path = parsed.path else: parts = parsed.path[1:].split('/', 1) object_path = parts[1] if parts[1][ 0] == '/' else '/%s' % parts[1] version_id = response.headers.get('x-amz-version-id', None) send_notifications(method, bucket_name, object_path, version_id) # publish event for creation/deletion of buckets: if method in ('PUT', 'DELETE') and ('/' not in path[1:] or len(path[1:].split('/')[1]) <= 0): event_type = (event_publisher.EVENT_S3_CREATE_BUCKET if method == 'PUT' else event_publisher.EVENT_S3_DELETE_BUCKET) event_publisher.fire_event( event_type, payload={'n': event_publisher.get_hash(bucket_name)}) # fix an upstream issue in moto S3 (see https://github.com/localstack/localstack/issues/382) if method == 'PUT' and parsed.query == 'policy': response._content = '' response.status_code = 204 return response # emulate ErrorDocument functionality if a website is configured if method == 'GET' and response.status_code == 404 and parsed.query != 'website': s3_client = aws_stack.connect_to_service('s3') try: # Verify the bucket exists in the first place--if not, we want normal processing of the 404 s3_client.head_bucket(Bucket=bucket_name) website_config = s3_client.get_bucket_website( Bucket=bucket_name) error_doc_key = website_config.get('ErrorDocument', {}).get('Key') if error_doc_key: error_object = s3_client.get_object(Bucket=bucket_name, Key=error_doc_key) response.status_code = 200 response._content = error_object['Body'].read() response.headers['content-length'] = len(response._content) except ClientError: # Pass on the 404 as usual pass if response: reset_content_length = False # append CORS headers and other annotations/patches to response append_cors_headers(bucket_name, request_method=method, request_headers=headers, response=response) append_last_modified_headers(response=response) append_list_objects_marker(method, path, data, response) fix_location_constraint(response) fix_range_content_type(bucket_name, path, headers, response) fix_delete_objects_response(bucket_name, method, parsed, data, headers, response) fix_metadata_key_underscores(response=response) fix_creation_date(method, path, response=response) fix_etag_for_multipart(data, headers, response) # Remove body from PUT response on presigned URL # https://github.com/localstack/localstack/issues/1317 if method == 'PUT' and ('X-Amz-Security-Token=' in path or 'X-Amz-Credential=' in path or 'AWSAccessKeyId=' in path): response._content = '' reset_content_length = True response_content_str = None try: response_content_str = to_str(response._content) except Exception: pass # Honor response header overrides # https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html if method == 'GET': query_map = urlparse.parse_qs(parsed.query, keep_blank_values=True) for param_name, header_name in ALLOWED_HEADER_OVERRIDES.items( ): if param_name in query_map: response.headers[header_name] = query_map[param_name][ 0] if response_content_str and response_content_str.startswith('<'): is_bytes = isinstance(response._content, six.binary_type) response._content = response_content_str append_last_modified_headers(response=response, content=response_content_str) # We need to un-pretty-print the XML, otherwise we run into this issue with Spark: # https://github.com/jserver/mock-s3/pull/9/files # https://github.com/localstack/localstack/issues/183 # Note: yet, we need to make sure we have a newline after the first line: <?xml ...>\n # Note: make sure to return XML docs verbatim: https://github.com/localstack/localstack/issues/1037 if method != 'GET' or not is_object_specific_request( path, headers): response._content = re.sub(r'([^\?])>\n\s*<', r'\1><', response_content_str, flags=re.MULTILINE) # update Location information in response payload response._content = self._update_location( response._content, bucket_name) # convert back to bytes if is_bytes: response._content = to_bytes(response._content) # fix content-type: https://github.com/localstack/localstack/issues/618 # https://github.com/localstack/localstack/issues/549 # https://github.com/localstack/localstack/issues/854 if 'text/html' in response.headers.get('Content-Type', '') \ and not response_content_str.lower().startswith('<!doctype html'): response.headers[ 'Content-Type'] = 'application/xml; charset=utf-8' reset_content_length = True # update content-length headers (fix https://github.com/localstack/localstack/issues/541) if method == 'DELETE': reset_content_length = True if reset_content_length: response.headers['content-length'] = len(response._content)
def update_s3(method, path, data, headers, response=None, return_forward_info=False): if return_forward_info: modified_data = None # If this request contains streaming v4 authentication signatures, strip them from the message # Related isse: https://github.com/atlassian/localstack/issues/98 # TODO we should evaluate whether to replace moto s3 with scality/S3: # https://github.com/scality/S3/issues/237 if headers.get('x-amz-content-sha256' ) == 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD': modified_data = strip_chunk_signatures(data) # persist this API call to disk persistence.record('s3', method, path, data, headers) parsed = urlparse.urlparse(path) query = parsed.query path = parsed.path bucket = path.split('/')[1] query_map = urlparse.parse_qs(query) if query == 'notification' or 'notification' in query_map: response = Response() response.status_code = 200 if method == 'GET': # TODO check if bucket exists result = '<NotificationConfiguration xmlns="%s">' % XMLNS_S3 if bucket in S3_NOTIFICATIONS: notif = S3_NOTIFICATIONS[bucket] for dest in ['Queue', 'Topic', 'CloudFunction']: if dest in notif: result += ('''<{dest}Configuration> <Id>{uid}</Id> <{dest}>{endpoint}</{dest}> <Event>{event}</Event> </{dest}Configuration>''').format( dest=dest, uid=uuid.uuid4(), endpoint=S3_NOTIFICATIONS[bucket][dest], event=S3_NOTIFICATIONS[bucket]['Event']) result += '</NotificationConfiguration>' response._content = result if method == 'PUT': tree = ET.fromstring(data) for dest in ['Queue', 'Topic', 'CloudFunction']: config = tree.find('{%s}%sConfiguration' % (XMLNS_S3, dest)) if config is not None and len(config): # TODO: what if we have multiple destinations - would we overwrite the config? S3_NOTIFICATIONS[bucket] = { 'Id': get_xml_text(config, 'Id'), 'Event': get_xml_text(config, 'Event', ns=XMLNS_S3), # TODO extract 'Events' attribute (in addition to 'Event') dest: get_xml_text(config, dest, ns=XMLNS_S3), } # return response for ?notification request return response if query == 'cors' or 'cors' in query_map: if method == 'GET': return get_cors(bucket) if method == 'PUT': return set_cors(bucket, data) if method == 'DELETE': return delete_cors(bucket) if modified_data: return Request(data=modified_data, headers=headers, method=method) return True # get subscribers and send bucket notifications if method in ('PUT', 'DELETE') and '/' in path[1:]: parts = path[1:].split('/', 1) bucket_name = parts[0] object_path = '/%s' % parts[1] send_notifications(method, bucket_name, object_path) # append CORS headers to response if response: parsed = urlparse.urlparse(path) bucket_name = parsed.path.split('/')[0] append_cors_headers(bucket_name, request_method=method, request_headers=headers, response=response) # we need to un-pretty-print the XML, otherwise we run into this issue with Spark: # https://github.com/jserver/mock-s3/pull/9/files # https://github.com/localstack/localstack/issues/183 response_content_str = None try: response_content_str = to_str(response._content) except Exception as e: pass if response_content_str and response_content_str.startswith('<'): is_bytes = isinstance(response._content, six.binary_type) response._content = re.sub(r'>\n\s*<', '><', response_content_str, flags=re.MULTILINE) if is_bytes: response._content = to_bytes(response._content) response.headers['content-length'] = len(response._content)
def forward_request(self, method, path, data, headers): modified_data = None # If this request contains streaming v4 authentication signatures, strip them from the message # Related isse: https://github.com/localstack/localstack/issues/98 # TODO we should evaluate whether to replace moto s3 with scality/S3: # https://github.com/scality/S3/issues/237 if headers.get('x-amz-content-sha256') == 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD': modified_data = strip_chunk_signatures(data) # POST requests to S3 may include a "${filename}" placeholder in the # key, which should be replaced with an actual file name before storing. if method == 'POST': original_data = modified_data or data expanded_data = multipart_content.expand_multipart_filename(original_data, headers) if expanded_data is not original_data: modified_data = expanded_data # If no content-type is provided, 'binary/octet-stream' should be used # src: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html if method == 'PUT' and not headers.get('content-type'): headers['content-type'] = 'binary/octet-stream' # persist this API call to disk persistence.record('s3', method, path, data, headers) parsed = urlparse.urlparse(path) query = parsed.query path = parsed.path bucket = path.split('/')[1] query_map = urlparse.parse_qs(query) if query == 'notification' or 'notification' in query_map: response = Response() response.status_code = 200 if method == 'GET': # TODO check if bucket exists result = '<NotificationConfiguration xmlns="%s">' % XMLNS_S3 if bucket in S3_NOTIFICATIONS: notif = S3_NOTIFICATIONS[bucket] for dest in NOTIFICATION_DESTINATION_TYPES: if dest in notif: dest_dict = { '%sConfiguration' % dest: { 'Id': uuid.uuid4(), dest: notif[dest], 'Event': notif['Event'], 'Filter': notif['Filter'] } } result += xmltodict.unparse(dest_dict, full_document=False) result += '</NotificationConfiguration>' response._content = result if method == 'PUT': parsed = xmltodict.parse(data) notif_config = parsed.get('NotificationConfiguration') S3_NOTIFICATIONS.pop(bucket, None) for dest in NOTIFICATION_DESTINATION_TYPES: config = notif_config.get('%sConfiguration' % (dest)) if config: events = config.get('Event') if isinstance(events, six.string_types): events = [events] event_filter = config.get('Filter', {}) # make sure FilterRule is an array s3_filter = _get_s3_filter(event_filter) if s3_filter and not isinstance(s3_filter.get('FilterRule', []), list): s3_filter['FilterRule'] = [s3_filter['FilterRule']] # create final details dict notification_details = { 'Id': config.get('Id'), 'Event': events, dest: config.get(dest), 'Filter': event_filter } # TODO: what if we have multiple destinations - would we overwrite the config? S3_NOTIFICATIONS[bucket] = clone(notification_details) # return response for ?notification request return response if query == 'cors' or 'cors' in query_map: if method == 'GET': return get_cors(bucket) if method == 'PUT': return set_cors(bucket, data) if method == 'DELETE': return delete_cors(bucket) if query == 'lifecycle' or 'lifecycle' in query_map: if method == 'GET': return get_lifecycle(bucket) if method == 'PUT': return set_lifecycle(bucket, data) if modified_data: return Request(data=modified_data, headers=headers, method=method) return True
def forward_request(self, method, path, data, headers): # Make sure we use 'localhost' as forward host, to ensure moto uses path style addressing. # Note that all S3 clients using LocalStack need to enable path style addressing. if 's3.amazonaws.com' not in headers.get('host', ''): headers['host'] = 'localhost' # check content md5 hash integrity if 'Content-MD5' in headers: response = check_content_md5(data, headers) if response is not None: return response modified_data = None # TODO: For some reason, moto doesn't allow us to put a location constraint on us-east-1 to_find = to_bytes('<LocationConstraint>us-east-1</LocationConstraint>') if data and data.startswith(to_bytes('<')) and to_find in data: modified_data = data.replace(to_find, '') # If this request contains streaming v4 authentication signatures, strip them from the message # Related isse: https://github.com/localstack/localstack/issues/98 # TODO we should evaluate whether to replace moto s3 with scality/S3: # https://github.com/scality/S3/issues/237 if headers.get('x-amz-content-sha256') == 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD': modified_data = strip_chunk_signatures(modified_data or data) headers['content-length'] = headers.get('x-amz-decoded-content-length') # POST requests to S3 may include a "${filename}" placeholder in the # key, which should be replaced with an actual file name before storing. if method == 'POST': original_data = modified_data or data expanded_data = multipart_content.expand_multipart_filename(original_data, headers) if expanded_data is not original_data: modified_data = expanded_data # If no content-type is provided, 'binary/octet-stream' should be used # src: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html if method == 'PUT' and not headers.get('content-type'): headers['content-type'] = 'binary/octet-stream' # persist this API call to disk persistence.record('s3', method, path, data, headers) # parse query params parsed = urlparse.urlparse(path) query = parsed.query path = parsed.path bucket = path.split('/')[1] query_map = urlparse.parse_qs(query, keep_blank_values=True) if query == 'notification' or 'notification' in query_map: # handle and return response for ?notification request response = handle_notification_request(bucket, method, data) return response if query == 'cors' or 'cors' in query_map: if method == 'GET': return get_cors(bucket) if method == 'PUT': return set_cors(bucket, data) if method == 'DELETE': return delete_cors(bucket) if query == 'lifecycle' or 'lifecycle' in query_map: if method == 'GET': return get_lifecycle(bucket) if method == 'PUT': return set_lifecycle(bucket, data) if modified_data is not None: return Request(data=modified_data, headers=headers, method=method) return True
def forward_request(self, method, path, data, headers): # parse path and query params parsed_path = urlparse.urlparse(path) # Make sure we use 'localhost' as forward host, to ensure moto uses path style addressing. # Note that all S3 clients using LocalStack need to enable path style addressing. if 's3.amazonaws.com' not in headers.get('host', ''): headers['host'] = 'localhost' # check content md5 hash integrity if not a copy request if 'Content-MD5' in headers and not self.is_s3_copy_request(headers, path): response = check_content_md5(data, headers) if response is not None: return response modified_data = None # check bucket name bucket_name = get_bucket_name(path, headers) if method == 'PUT' and not re.match(BUCKET_NAME_REGEX, bucket_name): if len(parsed_path.path) <= 1: return error_response('Unable to extract valid bucket name. Please ensure that your AWS SDK is ' + 'configured to use path style addressing, or send a valid <Bucket>.s3.amazonaws.com "Host" header', 'InvalidBucketName', status_code=400) return error_response('The specified bucket is not valid.', 'InvalidBucketName', status_code=400) # TODO: For some reason, moto doesn't allow us to put a location constraint on us-east-1 to_find = to_bytes('<LocationConstraint>us-east-1</LocationConstraint>') if data and data.startswith(to_bytes('<')) and to_find in data: modified_data = data.replace(to_find, to_bytes('')) # If this request contains streaming v4 authentication signatures, strip them from the message # Related isse: https://github.com/localstack/localstack/issues/98 # TODO we should evaluate whether to replace moto s3 with scality/S3: # https://github.com/scality/S3/issues/237 if headers.get(CONTENT_SHA256_HEADER) == STREAMING_HMAC_PAYLOAD: modified_data = strip_chunk_signatures(modified_data or data) headers['content-length'] = headers.get('x-amz-decoded-content-length') # POST requests to S3 may include a "${filename}" placeholder in the # key, which should be replaced with an actual file name before storing. if method == 'POST': original_data = modified_data or data expanded_data = multipart_content.expand_multipart_filename(original_data, headers) if expanded_data is not original_data: modified_data = expanded_data # If no content-type is provided, 'binary/octet-stream' should be used # src: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html if method == 'PUT' and not headers.get('content-type'): headers['content-type'] = 'binary/octet-stream' # persist this API call to disk persistence.record('s3', method, path, data, headers) # parse query params query = parsed_path.query path = parsed_path.path bucket = path.split('/')[1] query_map = urlparse.parse_qs(query, keep_blank_values=True) # remap metadata query params (not supported in moto) to request headers append_metadata_headers(method, query_map, headers) # apply fixes headers_changed = fix_metadata_key_underscores(request_headers=headers) if query == 'notification' or 'notification' in query_map: # handle and return response for ?notification request response = handle_notification_request(bucket, method, data) return response if query == 'cors' or 'cors' in query_map: if method == 'GET': return get_cors(bucket) if method == 'PUT': return set_cors(bucket, data) if method == 'DELETE': return delete_cors(bucket) if query == 'lifecycle' or 'lifecycle' in query_map: if method == 'GET': return get_lifecycle(bucket) if method == 'PUT': return set_lifecycle(bucket, data) if query == 'replication' or 'replication' in query_map: if method == 'GET': return get_replication(bucket) if method == 'PUT': return set_replication(bucket, data) if query == 'encryption' or 'encryption' in query_map: if method == 'GET': return get_encryption(bucket) if method == 'PUT': return set_encryption(bucket, data) if query == 'object-lock' or 'object-lock' in query_map: if method == 'GET': return get_object_lock(bucket) if method == 'PUT': return set_object_lock(bucket, data) if modified_data is not None or headers_changed: return Request(data=modified_data or data, headers=headers, method=method) return True
def forward_request(self, method, path, data, headers): modified_data = None # If this request contains streaming v4 authentication signatures, strip them from the message # Related isse: https://github.com/localstack/localstack/issues/98 # TODO we should evaluate whether to replace moto s3 with scality/S3: # https://github.com/scality/S3/issues/237 if headers.get('x-amz-content-sha256' ) == 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD': modified_data = strip_chunk_signatures(data) # POST requests to S3 may include a "${filename}" placeholder in the # key, which should be replaced with an actual file name before storing. if method == 'POST': original_data = modified_data or data expanded_data = expand_multipart_filename(original_data, headers) if expanded_data is not original_data: modified_data = expanded_data # persist this API call to disk persistence.record('s3', method, path, data, headers) parsed = urlparse.urlparse(path) query = parsed.query path = parsed.path bucket = path.split('/')[1] query_map = urlparse.parse_qs(query) if query == 'notification' or 'notification' in query_map: response = Response() response.status_code = 200 if method == 'GET': # TODO check if bucket exists result = '<NotificationConfiguration xmlns="%s">' % XMLNS_S3 if bucket in S3_NOTIFICATIONS: notif = S3_NOTIFICATIONS[bucket] for dest in ['Queue', 'Topic', 'CloudFunction']: if dest in notif: dest_dict = { '%sConfiguration' % dest: { 'Id': uuid.uuid4(), dest: notif[dest], 'Event': notif['Event'], 'Filter': notif['Filter'] } } result += xmltodict.unparse(dest_dict, full_document=False) result += '</NotificationConfiguration>' response._content = result if method == 'PUT': parsed = xmltodict.parse(data) notif_config = parsed.get('NotificationConfiguration') S3_NOTIFICATIONS.pop(bucket, None) for dest in ['Queue', 'Topic', 'CloudFunction']: config = notif_config.get('%sConfiguration' % (dest)) if config: events = config.get('Event') if isinstance(events, six.string_types): events = [events] event_filter = config.get('Filter', {}) # make sure FilterRule is an array s3_filter = _get_s3_filter(event_filter) if s3_filter and not isinstance( s3_filter.get('FilterRule', []), list): s3_filter['FilterRule'] = [s3_filter['FilterRule']] # create final details dict notification_details = { 'Id': config.get('Id'), 'Event': events, dest: config.get(dest), 'Filter': event_filter } # TODO: what if we have multiple destinations - would we overwrite the config? S3_NOTIFICATIONS[bucket] = clone(notification_details) # return response for ?notification request return response if query == 'cors' or 'cors' in query_map: if method == 'GET': return get_cors(bucket) if method == 'PUT': return set_cors(bucket, data) if method == 'DELETE': return delete_cors(bucket) if query == 'lifecycle' or 'lifecycle' in query_map: if method == 'GET': return get_lifecycle(bucket) if method == 'PUT': return set_lifecycle(bucket, data) if modified_data: return Request(data=modified_data, headers=headers, method=method) return True