コード例 #1
0
ファイル: kinesis_listener.py プロジェクト: bbc/localstack
    def return_response(self, method, path, data, headers, response):
        action = headers.get('X-Amz-Target')
        data = json.loads(to_str(data))

        records = []
        if action in (ACTION_CREATE_STREAM, ACTION_DELETE_STREAM):
            event_type = (event_publisher.EVENT_KINESIS_CREATE_STREAM if action == ACTION_CREATE_STREAM
                else event_publisher.EVENT_KINESIS_DELETE_STREAM)
            event_publisher.fire_event(event_type, payload={'n': event_publisher.get_hash(data.get('StreamName'))})
        elif action == ACTION_PUT_RECORD:
            response_body = json.loads(to_str(response.content))
            event_record = {
                'data': data['Data'],
                'partitionKey': data['PartitionKey'],
                'sequenceNumber': response_body.get('SequenceNumber')
            }
            event_records = [event_record]
            stream_name = data['StreamName']
            lambda_api.process_kinesis_records(event_records, stream_name)
        elif action == ACTION_PUT_RECORDS:
            event_records = []
            response_body = json.loads(to_str(response.content))
            response_records = response_body['Records']
            records = data['Records']
            for index in range(0, len(records)):
                record = records[index]
                event_record = {
                    'data': record['Data'],
                    'partitionKey': record['PartitionKey'],
                    'sequenceNumber': response_records[index].get('SequenceNumber')
                }
                event_records.append(event_record)
            stream_name = data['StreamName']
            lambda_api.process_kinesis_records(event_records, stream_name)
コード例 #2
0
ファイル: lambda_api.py プロジェクト: bbc/localstack
def delete_function(function):
    """ Delete an existing function
        ---
        operationId: 'deleteFunction'
        parameters:
            - name: 'request'
              in: body
    """
    arn = func_arn(function)

    # Stop/remove any containers that this arn uses.
    LAMBDA_EXECUTOR.cleanup(arn)

    try:
        arn_to_lambda.pop(arn)
    except KeyError:
        return error_response('Function does not exist: %s' % function, 404, error_type='ResourceNotFoundException')

    event_publisher.fire_event(event_publisher.EVENT_LAMBDA_DELETE_FUNC,
        payload={'n': event_publisher.get_hash(function)})
    i = 0
    while i < len(event_source_mappings):
        mapping = event_source_mappings[i]
        if mapping['FunctionArn'] == arn:
            del event_source_mappings[i]
            i -= 1
        i += 1
    result = {}
    return jsonify(result)
コード例 #3
0
ファイル: lambda_api.py プロジェクト: bbc/localstack
def create_function():
    """ Create new function
        ---
        operationId: 'createFunction'
        parameters:
            - name: 'request'
              in: body
    """
    arn = 'n/a'
    try:
        data = json.loads(to_str(request.data))
        lambda_name = data['FunctionName']
        event_publisher.fire_event(event_publisher.EVENT_LAMBDA_CREATE_FUNC,
            payload={'n': event_publisher.get_hash(lambda_name)})
        arn = func_arn(lambda_name)
        if arn in arn_to_lambda:
            return error_response('Function already exist: %s' %
                lambda_name, 409, error_type='ResourceConflictException')
        arn_to_lambda[arn] = func_details = LambdaFunction(arn)
        func_details.versions = {'$LATEST': {'CodeSize': 50}}
        func_details.handler = data['Handler']
        func_details.runtime = data['Runtime']
        func_details.envvars = data.get('Environment', {}).get('Variables', {})
        func_details.timeout = data.get('Timeout')
        result = set_function_code(data['Code'], lambda_name)
        if isinstance(result, Response):
            del arn_to_lambda[arn]
            return result
        result.update({
            'DeadLetterConfig': data.get('DeadLetterConfig'),
            'Description': data.get('Description'),
            'Environment': {'Error': {}, 'Variables': func_details.envvars},
            'FunctionArn': arn,
            'FunctionName': lambda_name,
            'Handler': func_details.handler,
            'MemorySize': data.get('MemorySize'),
            'Role': data.get('Role'),
            'Runtime': func_details.runtime,
            'Timeout': data.get('Timeout'),
            'TracingConfig': {},
            'VpcConfig': {'SecurityGroupIds': [None], 'SubnetIds': [None], 'VpcId': None}
        })
        return jsonify(result or {})
    except Exception as e:
        del arn_to_lambda[arn]
        return error_response('Unknown error: %s %s' % (e, traceback.format_exc()))
コード例 #4
0
    def return_response(self, method, path, data, headers, response):
        # update table definitions
        if data and 'TableName' in data and 'KeySchema' in data:
            TABLE_DEFINITIONS[data['TableName']] = data

        action = headers.get('X-Amz-Target')
        if not action:
            return

        record = {
            "eventID": "1",
            "eventVersion": "1.0",
            "dynamodb": {
                "StreamViewType": "NEW_AND_OLD_IMAGES",
                "SequenceNumber": "1",
                "SizeBytes": -1
            },
            "awsRegion": DEFAULT_REGION,
            "eventSource": "aws:dynamodb"
        }
        records = [record]

        if action == '%s.UpdateItem' % ACTION_PREFIX:
            req = {'TableName': data['TableName'], 'Key': data['Key']}
            new_item = aws_stack.dynamodb_get_item_raw(req)
            if 'Item' not in new_item:
                if 'message' in new_item:
                    ddb_client = aws_stack.connect_to_service('dynamodb')
                    table_names = ddb_client.list_tables()['TableNames']
                    msg = ('Unable to get item from DynamoDB (existing tables: %s): %s' %
                        (table_names, new_item['message']))
                    LOGGER.warning(msg)
                return
            record['eventName'] = 'MODIFY'
            record['dynamodb']['Keys'] = data['Key']
            record['dynamodb']['NewImage'] = new_item['Item']
        elif action == '%s.BatchWriteItem' % ACTION_PREFIX:
            records = []
            for table_name, requests in data['RequestItems'].items():
                for request in requests:
                    put_request = request.get('PutRequest')
                    if put_request:
                        keys = dynamodb_extract_keys(item=put_request['Item'], table_name=table_name)
                        if isinstance(keys, Response):
                            return keys
                        new_record = clone(record)
                        new_record['eventName'] = 'INSERT'
                        new_record['dynamodb']['Keys'] = keys
                        new_record['dynamodb']['NewImage'] = put_request['Item']
                        new_record['eventSourceARN'] = aws_stack.dynamodb_table_arn(table_name)
                        records.append(new_record)
        elif action == '%s.PutItem' % ACTION_PREFIX:
            record['eventName'] = 'INSERT'
            keys = dynamodb_extract_keys(item=data['Item'], table_name=data['TableName'])
            if isinstance(keys, Response):
                return keys
            record['dynamodb']['Keys'] = keys
            record['dynamodb']['NewImage'] = data['Item']
        elif action == '%s.GetItem' % ACTION_PREFIX:
            if response.status_code == 200:
                content = json.loads(to_str(response.content))
                # make sure we append 'ConsumedCapacity', which is properly
                # returned by dynalite, but not by AWS's DynamoDBLocal
                if 'ConsumedCapacity' not in content and data.get('ReturnConsumedCapacity') in ('TOTAL', 'INDEXES'):
                    content['ConsumedCapacity'] = {
                        'CapacityUnits': 0.5,  # TODO hardcoded
                        'TableName': data['TableName']
                    }
                    response._content = json.dumps(content)
                    response.headers['content-length'] = len(response.content)
                    response.headers['x-amz-crc32'] = calculate_crc32(response)
        elif action == '%s.DeleteItem' % ACTION_PREFIX:
            record['eventName'] = 'REMOVE'
            record['dynamodb']['Keys'] = data['Key']
        elif action == '%s.CreateTable' % ACTION_PREFIX:
            if 'StreamSpecification' in data:
                create_dynamodb_stream(data)
            event_publisher.fire_event(event_publisher.EVENT_DYNAMODB_CREATE_TABLE,
                payload={'n': event_publisher.get_hash(data['TableName'])})
            return
        elif action == '%s.DeleteTable' % ACTION_PREFIX:
            event_publisher.fire_event(event_publisher.EVENT_DYNAMODB_DELETE_TABLE,
                payload={'n': event_publisher.get_hash(data['TableName'])})
            return
        elif action == '%s.UpdateTable' % ACTION_PREFIX:
            if 'StreamSpecification' in data:
                create_dynamodb_stream(data)
            return
        else:
            # nothing to do
            return

        if 'TableName' in data:
            record['eventSourceARN'] = aws_stack.dynamodb_table_arn(data['TableName'])
        forward_to_lambda(records)
        forward_to_ddb_stream(records)
コード例 #5
0
def create_stream(
    stream_name: str,
    delivery_stream_type: str = "DirectPut",
    delivery_stream_type_configuration: Dict = None,
    s3_destination: Dict = None,
    elasticsearch_destination: Dict = None,
    http_destination: Dict = None,
    tags: Dict[str, str] = None,
):
    """Create a firehose stream with destination configurations. In case 'KinesisStreamAsSource' is set,
    creates a listener to process records from the underlying kinesis stream."""
    region = FirehoseBackend.get()
    tags = tags or {}
    stream = {
        "DeliveryStreamType": delivery_stream_type,
        "KinesisStreamSourceConfiguration": delivery_stream_type_configuration,
        "HasMoreDestinations": False,
        "VersionId": "1",
        "CreateTimestamp": time.time(),
        "DeliveryStreamARN": firehose_stream_arn(stream_name),
        "DeliveryStreamStatus": "ACTIVE",
        "DeliveryStreamName": stream_name,
        "Destinations": [],
        "Tags": tags,
    }
    region.delivery_streams[stream_name] = stream
    if elasticsearch_destination:
        update_destination(
            stream_name=stream_name,
            destination_id=short_uid(),
            elasticsearch_update=elasticsearch_destination,
        )
    if s3_destination:
        update_destination(
            stream_name=stream_name,
            destination_id=short_uid(),
            s3_update=s3_destination,
        )
    if http_destination:
        update_destination(
            stream_name=stream_name,
            destination_id=short_uid(),
            http_update=http_destination,
        )

    # record event
    event_publisher.fire_event(
        event_publisher.EVENT_FIREHOSE_CREATE_STREAM,
        payload={"n": event_publisher.get_hash(stream_name)},
    )

    if delivery_stream_type == "KinesisStreamAsSource":
        kinesis_stream_name = delivery_stream_type_configuration.get(
            "KinesisStreamARN").split("/")[1]
        kinesis_connector.listen_to_kinesis(
            stream_name=kinesis_stream_name,
            fh_d_stream=stream_name,
            listener_func=process_records,
            wait_until_started=True,
            ddb_lease_table_suffix="-firehose",
        )
    return stream
コード例 #6
0
    def forward_request(self, method, path, data, headers):
        if method == 'OPTIONS':
            return 200

        data = data or ''
        data_orig = data
        data = aws_stack.fix_account_id_in_arns(
            data,
            existing='%3A{}%3Astack/'.format(TEST_AWS_ACCOUNT_ID),
            replace='%3A{}%3Astack/'.format(MOTO_CLOUDFORMATION_ACCOUNT_ID),
            colon_delimiter='')
        data = aws_stack.fix_account_id_in_arns(
            data,
            existing='%3A{}%3AchangeSet/'.format(TEST_AWS_ACCOUNT_ID),
            replace='%3A{}%3AchangeSet/'.format(
                MOTO_CLOUDFORMATION_ACCOUNT_ID),
            colon_delimiter='')
        data = aws_stack.fix_account_id_in_arns(data,
                                                existing=TEST_AWS_ACCOUNT_ID,
                                                replace=MOTO_ACCOUNT_ID,
                                                colon_delimiter='%3A')

        req_data = None
        if method == 'POST' and path == '/':

            req_data = urlparse.parse_qs(to_str(data))
            req_data = dict([(k, v[0]) for k, v in req_data.items()])
            action = req_data.get('Action')
            stack_name = req_data.get('StackName')

            if action == 'CreateStack':
                event_publisher.fire_event(
                    event_publisher.EVENT_CLOUDFORMATION_CREATE_STACK,
                    payload={'n': event_publisher.get_hash(stack_name)})

            if action == 'DeleteStack':
                client = aws_stack.connect_to_service('cloudformation')
                stack_resources = client.list_stack_resources(
                    StackName=stack_name)['StackResourceSummaries']
                template_deployer.delete_stack(stack_name, stack_resources)

            if action == 'DescribeStackEvents':
                # fix an issue where moto cannot handle ARNs as stack names (or missing names)
                run_fix = not stack_name
                if stack_name:
                    if stack_name.startswith('arn:aws:cloudformation'):
                        run_fix = True
                        pattern = r'arn:aws:cloudformation:[^:]+:[^:]+:stack/([^/]+)(/.+)?'
                        stack_name = re.sub(pattern, r'\1', stack_name)
                if run_fix:
                    stack_names = [
                        stack_name
                    ] if stack_name else self._list_stack_names()
                    client = aws_stack.connect_to_service('cloudformation')
                    events = []
                    for stack_name in stack_names:
                        tmp = client.describe_stack_events(
                            StackName=stack_name)['StackEvents'][:1]
                        events.extend(tmp)
                    events = [{'member': e} for e in events]
                    response_content = '<StackEvents>%s</StackEvents>' % obj_to_xml(
                        events)
                    return make_response('DescribeStackEvents',
                                         response_content)

        if req_data:
            if action == 'ValidateTemplate':
                return validate_template(req_data)

            if action in ['CreateStack', 'UpdateStack']:
                do_replace_url = is_real_s3_url(req_data.get('TemplateURL'))
                if do_replace_url:
                    req_data['TemplateURL'] = convert_s3_to_local_url(
                        req_data['TemplateURL'])
                url = req_data.get('TemplateURL', '')
                is_custom_local_endpoint = is_local_service_url(
                    url) and '://localhost:' not in url
                modified_template_body = transform_template(req_data)
                if not modified_template_body and is_custom_local_endpoint:
                    modified_template_body = get_template_body(req_data)
                if modified_template_body:
                    req_data.pop('TemplateURL', None)
                    req_data['TemplateBody'] = modified_template_body
                if modified_template_body or do_replace_url:
                    data = urlparse.urlencode(req_data, doseq=True)
                    return Request(data=data, headers=headers, method=method)

            if data != data_orig or action in [
                    'DescribeChangeSet', 'ExecuteChangeSet'
            ]:
                return Request(data=urlparse.urlencode(req_data, doseq=True),
                               headers=headers,
                               method=method)

        return True
コード例 #7
0
    def return_response(self, method, path, data, headers, response):
        action = headers.get('X-Amz-Target')
        data = self.decode_content(data or '{}')

        response._content = re.sub(
            r'arn:aws:kinesis:[^:]+:',
            'arn:aws:kinesis:%s:' % aws_stack.get_region(),
            to_str(response.content or ''))

        records = []
        if action in (ACTION_CREATE_STREAM, ACTION_DELETE_STREAM):
            event_type = (event_publisher.EVENT_KINESIS_CREATE_STREAM
                          if action == ACTION_CREATE_STREAM else
                          event_publisher.EVENT_KINESIS_DELETE_STREAM)
            payload = {'n': event_publisher.get_hash(data.get('StreamName'))}
            if action == ACTION_CREATE_STREAM:
                payload['s'] = data.get('ShardCount')
            event_publisher.fire_event(event_type, payload=payload)
        elif action == ACTION_PUT_RECORD:
            response_body = self.decode_content(response.content)
            event_record = {
                'approximateArrivalTimestamp': epoch_timestamp(),
                'data': data['Data'],
                'encryptionType': 'NONE',
                'partitionKey': data['PartitionKey'],
                'sequenceNumber': response_body.get('SequenceNumber')
            }
            event_records = [event_record]
            stream_name = data['StreamName']
            lambda_api.process_kinesis_records(event_records, stream_name)
        elif action == ACTION_PUT_RECORDS:
            event_records = []
            response_body = self.decode_content(response.content)
            if 'Records' in response_body:
                response_records = response_body['Records']
                records = data['Records']
                for index in range(0, len(records)):
                    record = records[index]
                    event_record = {
                        'approximateArrivalTimestamp':
                        epoch_timestamp(),
                        'data':
                        record['Data'],
                        'encryptionType':
                        'NONE',
                        'partitionKey':
                        record['PartitionKey'],
                        'sequenceNumber':
                        response_records[index].get('SequenceNumber')
                    }
                    event_records.append(event_record)
                stream_name = data['StreamName']
                lambda_api.process_kinesis_records(event_records, stream_name)
        elif action == ACTION_UPDATE_SHARD_COUNT:
            # Currently kinesalite, which backs the Kinesis implementation for localstack, does
            # not support UpdateShardCount:
            # https://github.com/mhart/kinesalite/issues/61
            #
            # [Terraform](https://www.terraform.io) makes the call to UpdateShardCount when it
            # applies Kinesis resources. A Terraform run fails when this is not present.
            #
            # The code that follows just returns a successful response, bypassing the 400
            # response that kinesalite returns.
            #
            response = Response()
            response.status_code = 200
            content = {
                'CurrentShardCount': 1,
                'StreamName': data['StreamName'],
                'TargetShardCount': data['TargetShardCount']
            }
            response.encoding = 'UTF-8'
            response._content = json.dumps(content)
            return response
コード例 #8
0
ファイル: s3_listener.py プロジェクト: peteryhwong/localstack
    def return_response(self, method, path, data, headers, response):

        bucket_name = get_bucket_name(path, headers)

        # No path-name based bucket name?  Try host-based
        hostname_parts = headers['host'].split('.')
        if (not bucket_name
                or len(bucket_name) == 0) and len(hostname_parts) > 1:
            bucket_name = hostname_parts[0]

        # POST requests to S3 may include a success_action_redirect field,
        # which should be used to redirect a client to a new location.
        key = None
        if method == 'POST':
            key, redirect_url = multipart_content.find_multipart_redirect_url(
                data, headers)

            if key and redirect_url:
                response.status_code = 303
                response.headers['Location'] = expand_redirect_url(
                    redirect_url, key, bucket_name)
                LOGGER.debug('S3 POST {} to {}'.format(
                    response.status_code, response.headers['Location']))

        parsed = urlparse.urlparse(path)

        bucket_name_in_host = headers['host'].startswith(bucket_name)

        should_send_notifications = all([
            method in ('PUT', 'POST', 'DELETE'),
            '/' in path[1:] or bucket_name_in_host,
            # check if this is an actual put object request, because it could also be
            # a put bucket request with a path like this: /bucket_name/
            bucket_name_in_host or
            (len(path[1:].split('/')) > 1 and len(path[1:].split('/')[1]) > 0),
            # ignore bucket notification configuration requests
            parsed.query != 'notification' and parsed.query != 'lifecycle',
        ])

        # get subscribers and send bucket notifications
        if should_send_notifications:
            # if we already have a good key, use it, otherwise examine the path
            if key:
                object_path = '/' + key
            elif bucket_name_in_host:
                object_path = parsed.path
            else:
                parts = parsed.path[1:].split('/', 1)
                object_path = parts[1] if parts[1][
                    0] == '/' else '/%s' % parts[1]

            send_notifications(method, bucket_name, object_path)

        # publish event for creation/deletion of buckets:
        if method in ('PUT', 'DELETE') and ('/' not in path[1:] or
                                            len(path[1:].split('/')[1]) <= 0):
            event_type = (event_publisher.EVENT_S3_CREATE_BUCKET if method
                          == 'PUT' else event_publisher.EVENT_S3_DELETE_BUCKET)
            event_publisher.fire_event(
                event_type,
                payload={'n': event_publisher.get_hash(bucket_name)})

        # fix an upstream issue in moto S3 (see https://github.com/localstack/localstack/issues/382)
        if method == 'PUT' and parsed.query == 'policy':
            response._content = ''
            response.status_code = 204
            return response

        if response:
            # append CORS headers to response
            append_cors_headers(bucket_name,
                                request_method=method,
                                request_headers=headers,
                                response=response)

            if response._content:
                # default content type; possibly overwritten with "text/xml" for API calls further below
                response.headers['Content-Type'] = 'binary/octet-stream'

            response_content_str = None
            try:
                response_content_str = to_str(response._content)
            except Exception:
                pass

            # we need to un-pretty-print the XML, otherwise we run into this issue with Spark:
            # https://github.com/jserver/mock-s3/pull/9/files
            # https://github.com/localstack/localstack/issues/183
            # Note: yet, we need to make sure we have a newline after the first line: <?xml ...>\n
            if response_content_str and response_content_str.startswith('<'):
                is_bytes = isinstance(response._content, six.binary_type)
                response._content = re.sub(r'([^\?])>\n\s*<',
                                           r'\1><',
                                           response_content_str,
                                           flags=re.MULTILINE)
                if is_bytes:
                    response._content = to_bytes(response._content)
                # fix content-type: https://github.com/localstack/localstack/issues/549
                if 'text/html' in response.headers.get('Content-Type', ''):
                    response.headers[
                        'Content-Type'] = 'text/xml; charset=utf-8'

            # update content-length headers (fix https://github.com/localstack/localstack/issues/541)
            if isinstance(response._content,
                          (six.string_types, six.binary_type)):
                response.headers['content-length'] = len(response._content)
コード例 #9
0
    def return_response(self, method, path, data, headers, response):
        if path.startswith('/shell') or method == 'GET':
            return

        data = json.loads(to_str(data))

        # update table definitions
        if data and 'TableName' in data and 'KeySchema' in data:
            TABLE_DEFINITIONS[data['TableName']] = data

        if response._content:
            # fix the table and latest stream ARNs (DynamoDBLocal hardcodes "ddblocal" as the region)
            content_replaced = re.sub(
                r'("TableArn"|"LatestStreamArn"|"StreamArn")\s*:\s*"arn:aws:dynamodb:ddblocal:([^"]+)"',
                r'\1: "arn:aws:dynamodb:%s:\2"' % aws_stack.get_region(),
                to_str(response._content)
            )
            if content_replaced != response._content:
                response._content = content_replaced
                fix_headers_for_updated_response(response)

        action = headers.get('X-Amz-Target', '')
        action = action.replace(ACTION_PREFIX, '')
        if not action:
            return

        # upgrade event version to 1.1
        record = {
            'eventID': '1',
            'eventVersion': '1.1',
            'dynamodb': {
                'ApproximateCreationDateTime': time.time(),
                'StreamViewType': 'NEW_AND_OLD_IMAGES',
                'SizeBytes': -1
            },
            'awsRegion': aws_stack.get_region(),
            'eventSource': 'aws:dynamodb'
        }
        records = [record]

        streams_enabled_cache = {}
        table_name = data.get('TableName')
        event_sources_or_streams_enabled = has_event_sources_or_streams_enabled(table_name, streams_enabled_cache)

        if action == 'UpdateItem':
            if response.status_code == 200 and event_sources_or_streams_enabled:
                existing_item = self._thread_local('existing_item')
                record['eventName'] = 'INSERT' if not existing_item else 'MODIFY'

                updated_item = find_existing_item(data)
                if not updated_item:
                    return
                record['dynamodb']['Keys'] = data['Key']
                if existing_item:
                    record['dynamodb']['OldImage'] = existing_item
                record['dynamodb']['NewImage'] = updated_item
                record['dynamodb']['SizeBytes'] = len(json.dumps(updated_item))

        elif action == 'BatchWriteItem':
            records = self.prepare_batch_write_item_records(record, data)
            for record in records:
                event_sources_or_streams_enabled = (event_sources_or_streams_enabled or
                    has_event_sources_or_streams_enabled(record['eventSourceARN'], streams_enabled_cache))

        elif action == 'TransactWriteItems':
            records = self.prepare_transact_write_item_records(record, data)
            for record in records:
                event_sources_or_streams_enabled = (event_sources_or_streams_enabled or
                    has_event_sources_or_streams_enabled(record['eventSourceARN'], streams_enabled_cache))

        elif action == 'PutItem':
            if response.status_code == 200:
                keys = dynamodb_extract_keys(item=data['Item'], table_name=table_name)
                if isinstance(keys, Response):
                    return keys
                # fix response
                if response._content == '{}':
                    response._content = update_put_item_response_content(data, response._content)
                    fix_headers_for_updated_response(response)
                if event_sources_or_streams_enabled:
                    existing_item = self._thread_local('existing_item')
                    record['eventName'] = 'INSERT' if not existing_item else 'MODIFY'
                    # prepare record keys
                    record['dynamodb']['Keys'] = keys
                    record['dynamodb']['NewImage'] = data['Item']
                    record['dynamodb']['SizeBytes'] = len(json.dumps(data['Item']))
                    if existing_item:
                        record['dynamodb']['OldImage'] = existing_item

        elif action in ('GetItem', 'Query'):
            if response.status_code == 200:
                content = json.loads(to_str(response.content))
                # make sure we append 'ConsumedCapacity', which is properly
                # returned by dynalite, but not by AWS's DynamoDBLocal
                if 'ConsumedCapacity' not in content and data.get('ReturnConsumedCapacity') in ['TOTAL', 'INDEXES']:
                    content['ConsumedCapacity'] = {
                        'TableName': table_name,
                        'CapacityUnits': 5,             # TODO hardcoded
                        'ReadCapacityUnits': 2,
                        'WriteCapacityUnits': 3
                    }
                    response._content = json.dumps(content)
                    fix_headers_for_updated_response(response)

        elif action == 'DeleteItem':
            if response.status_code == 200 and event_sources_or_streams_enabled:
                old_item = self._thread_local('existing_item')
                record['eventName'] = 'REMOVE'
                record['dynamodb']['Keys'] = data['Key']
                record['dynamodb']['OldImage'] = old_item

        elif action == 'CreateTable':
            if 'StreamSpecification' in data:
                if response.status_code == 200:
                    content = json.loads(to_str(response._content))
                    create_dynamodb_stream(data, content['TableDescription'].get('LatestStreamLabel'))

            event_publisher.fire_event(event_publisher.EVENT_DYNAMODB_CREATE_TABLE,
                payload={'n': event_publisher.get_hash(table_name)})

            if data.get('Tags') and response.status_code == 200:
                table_arn = json.loads(response._content)['TableDescription']['TableArn']
                TABLE_TAGS[table_arn] = {tag['Key']: tag['Value'] for tag in data['Tags']}

            return

        elif action == 'DeleteTable':
            if response.status_code == 200:
                table_arn = json.loads(response._content).get('TableDescription', {}).get('TableArn')
                event_publisher.fire_event(
                    event_publisher.EVENT_DYNAMODB_DELETE_TABLE,
                    payload={'n': event_publisher.get_hash(table_name)}
                )
                self.delete_all_event_source_mappings(table_arn)
                dynamodbstreams_api.delete_streams(table_arn)
                TABLE_TAGS.pop(table_arn, None)
            return

        elif action == 'UpdateTable':
            if 'StreamSpecification' in data:
                if response.status_code == 200:
                    content = json.loads(to_str(response._content))
                    create_dynamodb_stream(data, content['TableDescription'].get('LatestStreamLabel'))
            return

        elif action == 'TagResource':
            table_arn = data['ResourceArn']
            if table_arn not in TABLE_TAGS:
                TABLE_TAGS[table_arn] = {}
            TABLE_TAGS[table_arn].update({tag['Key']: tag['Value'] for tag in data.get('Tags', [])})
            return

        elif action == 'UntagResource':
            table_arn = data['ResourceArn']
            for tag_key in data.get('TagKeys', []):
                TABLE_TAGS.get(table_arn, {}).pop(tag_key, None)
            return

        else:
            # nothing to do
            return

        if event_sources_or_streams_enabled and records and 'eventName' in records[0]:
            if 'TableName' in data:
                records[0]['eventSourceARN'] = aws_stack.dynamodb_table_arn(table_name)

            forward_to_lambda(records)
            forward_to_ddb_stream(records)
コード例 #10
0
    def return_response(self, method, path, data, headers, response, request_handler=None):
        path = to_str(path)
        method = to_str(method)

        # persist this API call to disk
        super(ProxyListenerS3, self).return_response(method, path, data, headers, response, request_handler)

        # No path-name based bucket name? Try host-based
        bucket_name = get_bucket_name(path, headers)
        hostname_parts = headers['host'].split('.')
        if (not bucket_name or len(bucket_name) == 0) and len(hostname_parts) > 1:
            bucket_name = hostname_parts[0]

        # POST requests to S3 may include a success_action_redirect or
        # success_action_status field, which should be used to redirect a
        # client to a new location.
        key = None
        if method == 'POST':
            key, redirect_url = multipart_content.find_multipart_key_value(data, headers)

            if key and redirect_url:
                response.status_code = 303
                response.headers['Location'] = expand_redirect_url(redirect_url, key, bucket_name)
                LOGGER.debug('S3 POST {} to {}'.format(response.status_code, response.headers['Location']))

            key, status_code = multipart_content.find_multipart_key_value(
                data, headers, 'success_action_status'
            )

            if response.status_code == 200 and status_code == '201' and key:
                response.status_code = 201
                response._content = self.get_201_response(key, bucket_name)
                response.headers['Content-Length'] = str(len(response._content))
                response.headers['Content-Type'] = 'application/xml; charset=utf-8'
                return response

        parsed = urlparse.urlparse(path)
        bucket_name_in_host = headers['host'].startswith(bucket_name)

        should_send_notifications = all([
            method in ('PUT', 'POST', 'DELETE'),
            '/' in path[1:] or bucket_name_in_host or key,
            # check if this is an actual put object request, because it could also be
            # a put bucket request with a path like this: /bucket_name/
            bucket_name_in_host or key or (len(path[1:].split('/')) > 1 and len(path[1:].split('/')[1]) > 0),
            self.is_query_allowable(method, parsed.query)
        ])

        # get subscribers and send bucket notifications
        if should_send_notifications:
            # if we already have a good key, use it, otherwise examine the path
            if key:
                object_path = '/' + key
            elif bucket_name_in_host:
                object_path = parsed.path
            else:
                parts = parsed.path[1:].split('/', 1)
                object_path = parts[1] if parts[1][0] == '/' else '/%s' % parts[1]
            version_id = response.headers.get('x-amz-version-id', None)

            send_notifications(method, bucket_name, object_path, version_id)

        # publish event for creation/deletion of buckets:
        if method in ('PUT', 'DELETE') and ('/' not in path[1:] or len(path[1:].split('/')[1]) <= 0):
            event_type = (event_publisher.EVENT_S3_CREATE_BUCKET if method == 'PUT'
                else event_publisher.EVENT_S3_DELETE_BUCKET)
            event_publisher.fire_event(event_type, payload={'n': event_publisher.get_hash(bucket_name)})

        # fix an upstream issue in moto S3 (see https://github.com/localstack/localstack/issues/382)
        if method == 'PUT' and parsed.query == 'policy':
            response._content = ''
            response.status_code = 204
            return response

        # emulate ErrorDocument functionality if a website is configured
        if method == 'GET' and response.status_code == 404 and parsed.query != 'website':
            s3_client = aws_stack.connect_to_service('s3')

            try:
                # Verify the bucket exists in the first place--if not, we want normal processing of the 404
                s3_client.head_bucket(Bucket=bucket_name)
                website_config = s3_client.get_bucket_website(Bucket=bucket_name)
                error_doc_key = website_config.get('ErrorDocument', {}).get('Key')

                if error_doc_key:
                    error_object = s3_client.get_object(Bucket=bucket_name, Key=error_doc_key)
                    response.status_code = 200
                    response._content = error_object['Body'].read()
                    response.headers['content-length'] = len(response._content)
            except ClientError:
                # Pass on the 404 as usual
                pass

        if response:
            reset_content_length = False
            # append CORS headers and other annotations/patches to response
            append_cors_headers(bucket_name, request_method=method, request_headers=headers, response=response)
            append_last_modified_headers(response=response)
            append_list_objects_marker(method, path, data, response)
            fix_location_constraint(response)
            fix_range_content_type(bucket_name, path, headers, response)
            fix_delete_objects_response(bucket_name, method, parsed, data, headers, response)
            fix_metadata_key_underscores(response=response)
            fix_creation_date(method, path, response=response)
            fix_etag_for_multipart(data, headers, response)
            append_aws_request_troubleshooting_headers(response)

            if method == 'PUT':
                set_object_expiry(path, headers)

            # Remove body from PUT response on presigned URL
            # https://github.com/localstack/localstack/issues/1317
            if method == 'PUT' and ('X-Amz-Security-Token=' in path or
                    'X-Amz-Credential=' in path or 'AWSAccessKeyId=' in path):
                response._content = ''
                reset_content_length = True

            response_content_str = None
            try:
                response_content_str = to_str(response._content)
            except Exception:
                pass

            # Honor response header overrides
            # https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html
            if method == 'GET':
                add_accept_range_header(response)
                add_reponse_metadata_headers(response)
                if is_object_expired(path):
                    return no_such_key_error(path, headers.get('x-amz-request-id'), 400)

                query_map = urlparse.parse_qs(parsed.query, keep_blank_values=True)
                for param_name, header_name in ALLOWED_HEADER_OVERRIDES.items():
                    if param_name in query_map:
                        response.headers[header_name] = query_map[param_name][0]

            if response_content_str and response_content_str.startswith('<'):
                is_bytes = isinstance(response._content, six.binary_type)
                response._content = response_content_str

                append_last_modified_headers(response=response, content=response_content_str)

                # We need to un-pretty-print the XML, otherwise we run into this issue with Spark:
                # https://github.com/jserver/mock-s3/pull/9/files
                # https://github.com/localstack/localstack/issues/183
                # Note: yet, we need to make sure we have a newline after the first line: <?xml ...>\n
                # Note: make sure to return XML docs verbatim: https://github.com/localstack/localstack/issues/1037
                if method != 'GET' or not is_object_specific_request(path, headers):
                    response._content = re.sub(r'([^\?])>\n\s*<', r'\1><', response_content_str, flags=re.MULTILINE)

                # update Location information in response payload
                response._content = self._update_location(response._content, bucket_name)

                # convert back to bytes
                if is_bytes:
                    response._content = to_bytes(response._content)

                # fix content-type: https://github.com/localstack/localstack/issues/618
                #                   https://github.com/localstack/localstack/issues/549
                #                   https://github.com/localstack/localstack/issues/854
                if 'text/html' in response.headers.get('Content-Type', '') \
                        and not response_content_str.lower().startswith('<!doctype html'):
                    response.headers['Content-Type'] = 'application/xml; charset=utf-8'

                reset_content_length = True

            # update content-length headers (fix https://github.com/localstack/localstack/issues/541)
            if method == 'DELETE':
                reset_content_length = True

            if reset_content_length:
                response.headers['content-length'] = len(response._content)
コード例 #11
0
    def return_response(self, method, path, data, headers, response):

        parsed = urlparse.urlparse(path)
        # TODO: consider the case of hostname-based (as opposed to path-based) bucket addressing
        bucket_name = parsed.path.split('/')[1]

        # POST requests to S3 may include a success_action_redirect field,
        # which should be used to redirect a client to a new location.
        if method == 'POST':
            key, redirect_url = find_multipart_redirect_url(data, headers)
            if key and redirect_url:
                response.status_code = 303
                response.headers['Location'] = expand_redirect_url(
                    redirect_url, key, bucket_name)
                LOGGER.debug('S3 POST {} to {}'.format(
                    response.status_code, response.headers['Location']))

        # get subscribers and send bucket notifications
        if method in ('PUT', 'DELETE') and '/' in path[1:]:
            # check if this is an actual put object request, because it could also be
            # a put bucket request with a path like this: /bucket_name/
            if len(path[1:].split('/')[1]) > 0:
                parts = parsed.path[1:].split('/', 1)
                # ignore bucket notification configuration requests
                if parsed.query != 'notification' and parsed.query != 'lifecycle':
                    object_path = parts[1] if parts[1][
                        0] == '/' else '/%s' % parts[1]
                    send_notifications(method, bucket_name, object_path)

        # publish event for creation/deletion of buckets:
        if method in ('PUT', 'DELETE') and ('/' not in path[1:] or
                                            len(path[1:].split('/')[1]) <= 0):
            event_type = (event_publisher.EVENT_S3_CREATE_BUCKET if method
                          == 'PUT' else event_publisher.EVENT_S3_DELETE_BUCKET)
            event_publisher.fire_event(
                event_type,
                payload={'n': event_publisher.get_hash(bucket_name)})

        # fix an upstream issue in moto S3 (see https://github.com/localstack/localstack/issues/382)
        if method == 'PUT' and parsed.query == 'policy':
            response._content = ''
            response.status_code = 204
            return response

        # append CORS headers to response
        if response:
            append_cors_headers(bucket_name,
                                request_method=method,
                                request_headers=headers,
                                response=response)

            response_content_str = None
            try:
                response_content_str = to_str(response._content)
            except Exception:
                pass

            # we need to un-pretty-print the XML, otherwise we run into this issue with Spark:
            # https://github.com/jserver/mock-s3/pull/9/files
            # https://github.com/localstack/localstack/issues/183
            # Note: yet, we need to make sure we have a newline after the first line: <?xml ...>\n
            if response_content_str and response_content_str.startswith('<'):
                is_bytes = isinstance(response._content, six.binary_type)
                response._content = re.sub(r'([^\?])>\n\s*<',
                                           r'\1><',
                                           response_content_str,
                                           flags=re.MULTILINE)
                if is_bytes:
                    response._content = to_bytes(response._content)
                response.headers['content-length'] = len(response._content)
コード例 #12
0
    def return_response(self, method, path, data, headers, response):
        if path.startswith("/shell") or method == "GET":
            return

        data = json.loads(to_str(data))

        # update table definitions
        if data and "TableName" in data and "KeySchema" in data:
            table_definitions = DynamoDBRegion.get().table_definitions
            table_definitions[data["TableName"]] = data
        if response._content:
            # fix the table and latest stream ARNs (DynamoDBLocal hardcodes "ddblocal" as the region)
            content_replaced = re.sub(
                r'("TableArn"|"LatestStreamArn"|"StreamArn")\s*:\s*"arn:aws:dynamodb:ddblocal:([^"]+)"',
                r'\1: "arn:aws:dynamodb:%s:\2"' % aws_stack.get_region(),
                to_str(response._content),
            )
            if content_replaced != response._content:
                response._content = content_replaced
                fix_headers_for_updated_response(response)

        action = headers.get("X-Amz-Target", "")
        action = action.replace(ACTION_PREFIX, "")
        if not action:
            return
        # upgrade event version to 1.1
        record = {
            "eventID": "1",
            "eventVersion": "1.1",
            "dynamodb": {
                "ApproximateCreationDateTime": time.time(),
                # 'StreamViewType': 'NEW_AND_OLD_IMAGES',
                "SizeBytes": -1,
            },
            "awsRegion": aws_stack.get_region(),
            "eventSource": "aws:dynamodb",
        }
        records = [record]

        streams_enabled_cache = {}
        table_name = data.get("TableName")
        event_sources_or_streams_enabled = has_event_sources_or_streams_enabled(
            table_name, streams_enabled_cache)

        if action == "UpdateItem":
            if response.status_code == 200 and event_sources_or_streams_enabled:
                existing_item = self._thread_local("existing_item")
                record[
                    "eventName"] = "INSERT" if not existing_item else "MODIFY"
                record["eventID"] = short_uid()
                updated_item = find_existing_item(data)
                if not updated_item:
                    return
                record["dynamodb"]["Keys"] = data["Key"]
                if existing_item:
                    record["dynamodb"]["OldImage"] = existing_item
                record["dynamodb"]["NewImage"] = updated_item
                record["dynamodb"]["SizeBytes"] = len(json.dumps(updated_item))
                stream_spec = dynamodb_get_table_stream_specification(
                    table_name=table_name)
                if stream_spec:
                    record["dynamodb"]["StreamViewType"] = stream_spec[
                        "StreamViewType"]

        elif action == "BatchWriteItem":
            records, unprocessed_items = self.prepare_batch_write_item_records(
                record, data)
            for record in records:
                event_sources_or_streams_enabled = (
                    event_sources_or_streams_enabled
                    or has_event_sources_or_streams_enabled(
                        record["eventSourceARN"], streams_enabled_cache))
            if response.status_code == 200 and any(unprocessed_items):
                content = json.loads(to_str(response.content))
                table_name = list(data["RequestItems"].keys())[0]
                if table_name not in content["UnprocessedItems"]:
                    content["UnprocessedItems"][table_name] = []
                for key in ["PutRequest", "DeleteRequest"]:
                    if any(unprocessed_items[key]):
                        content["UnprocessedItems"][table_name].append(
                            {key: unprocessed_items[key]})
                unprocessed = content["UnprocessedItems"]
                for key in list(unprocessed.keys()):
                    if not unprocessed.get(key):
                        del unprocessed[key]

                response._content = json.dumps(content)
                fix_headers_for_updated_response(response)

        elif action == "TransactWriteItems":
            records = self.prepare_transact_write_item_records(record, data)
            for record in records:
                event_sources_or_streams_enabled = (
                    event_sources_or_streams_enabled
                    or has_event_sources_or_streams_enabled(
                        record["eventSourceARN"], streams_enabled_cache))

        elif action == "PutItem":
            if response.status_code == 200:
                keys = dynamodb_extract_keys(item=data["Item"],
                                             table_name=table_name)
                if isinstance(keys, Response):
                    return keys
                # fix response
                if response._content == "{}":
                    response._content = update_put_item_response_content(
                        data, response._content)
                    fix_headers_for_updated_response(response)
                if event_sources_or_streams_enabled:
                    existing_item = self._thread_local("existing_item")
                    # Get stream specifications details for the table
                    stream_spec = dynamodb_get_table_stream_specification(
                        table_name=table_name)
                    record[
                        "eventName"] = "INSERT" if not existing_item else "MODIFY"
                    # prepare record keys
                    record["dynamodb"]["Keys"] = keys
                    record["dynamodb"]["NewImage"] = data["Item"]
                    record["dynamodb"]["SizeBytes"] = len(
                        json.dumps(data["Item"]))
                    record["eventID"] = short_uid()
                    if stream_spec:
                        record["dynamodb"]["StreamViewType"] = stream_spec[
                            "StreamViewType"]
                    if existing_item:
                        record["dynamodb"]["OldImage"] = existing_item

        elif action in ("GetItem", "Query"):
            if response.status_code == 200:
                content = json.loads(to_str(response.content))
                # make sure we append 'ConsumedCapacity', which is properly
                # returned by dynalite, but not by AWS's DynamoDBLocal
                if "ConsumedCapacity" not in content and data.get(
                        "ReturnConsumedCapacity") in [
                            "TOTAL",
                            "INDEXES",
                        ]:
                    content["ConsumedCapacity"] = {
                        "TableName": table_name,
                        "CapacityUnits": 5,  # TODO hardcoded
                        "ReadCapacityUnits": 2,
                        "WriteCapacityUnits": 3,
                    }
                    response._content = json.dumps(content)
                    fix_headers_for_updated_response(response)

        elif action == "DeleteItem":
            if response.status_code == 200 and event_sources_or_streams_enabled:
                old_item = self._thread_local("existing_item")
                record["eventID"] = short_uid()
                record["eventName"] = "REMOVE"
                record["dynamodb"]["Keys"] = data["Key"]
                record["dynamodb"]["OldImage"] = old_item
                record["dynamodb"]["SizeBytes"] = len(json.dumps(old_item))
                # Get stream specifications details for the table
                stream_spec = dynamodb_get_table_stream_specification(
                    table_name=table_name)
                if stream_spec:
                    record["dynamodb"]["StreamViewType"] = stream_spec[
                        "StreamViewType"]

        elif action == "CreateTable":
            if "StreamSpecification" in data:
                if response.status_code == 200:
                    content = json.loads(to_str(response._content))
                    create_dynamodb_stream(
                        data,
                        content["TableDescription"].get("LatestStreamLabel"))

            event_publisher.fire_event(
                event_publisher.EVENT_DYNAMODB_CREATE_TABLE,
                payload={"n": event_publisher.get_hash(table_name)},
            )

            if data.get("Tags") and response.status_code == 200:
                table_arn = json.loads(
                    response._content)["TableDescription"]["TableArn"]
                DynamoDBRegion.TABLE_TAGS[table_arn] = {
                    tag["Key"]: tag["Value"]
                    for tag in data["Tags"]
                }

            return

        elif action == "DeleteTable":
            if response.status_code == 200:
                table_arn = (json.loads(response._content).get(
                    "TableDescription", {}).get("TableArn"))
                event_publisher.fire_event(
                    event_publisher.EVENT_DYNAMODB_DELETE_TABLE,
                    payload={"n": event_publisher.get_hash(table_name)},
                )
                self.delete_all_event_source_mappings(table_arn)
                dynamodbstreams_api.delete_streams(table_arn)
                DynamoDBRegion.TABLE_TAGS.pop(table_arn, None)
            return

        elif action == "UpdateTable":
            content_str = to_str(response._content or "")
            if response.status_code == 200 and "StreamSpecification" in data:
                content = json.loads(content_str)
                create_dynamodb_stream(
                    data, content["TableDescription"].get("LatestStreamLabel"))
            if (response.status_code >= 400 and data.get("ReplicaUpdates")
                    and "Nothing to update" in content_str):
                table_name = data.get("TableName")
                # update local table props (replicas)
                table_properties = DynamoDBRegion.get().table_properties
                table_properties[
                    table_name] = table_props = table_properties.get(
                        table_name) or {}
                table_props["Replicas"] = replicas = table_props.get(
                    "Replicas") or []
                for repl_update in data["ReplicaUpdates"]:
                    for key, details in repl_update.items():
                        region = details.get("RegionName")
                        if key == "Create":
                            details["ReplicaStatus"] = details.get(
                                "ReplicaStatus") or "ACTIVE"
                            replicas.append(details)
                        if key == "Update":
                            replica = [
                                r for r in replicas
                                if r.get("RegionName") == region
                            ]
                            if replica:
                                replica[0].update(details)
                        if key == "Delete":
                            table_props["Replicas"] = [
                                r for r in replicas
                                if r.get("RegionName") != region
                            ]
                # update response content
                schema = get_table_schema(table_name)
                result = {"TableDescription": schema["Table"]}
                update_response_content(response, json_safe(result), 200)
            return

        elif action == "DescribeTable":
            table_name = data.get("TableName")
            table_props = DynamoDBRegion.get().table_properties.get(table_name)
            if table_props:
                content = json.loads(to_str(response.content))
                content.get("Table", {}).update(table_props)
                update_response_content(response, content)

        elif action == "TagResource":
            table_arn = data["ResourceArn"]
            table_tags = DynamoDBRegion.TABLE_TAGS
            if table_arn not in table_tags:
                table_tags[table_arn] = {}
            table_tags[table_arn].update(
                {tag["Key"]: tag["Value"]
                 for tag in data.get("Tags", [])})
            return

        elif action == "UntagResource":
            table_arn = data["ResourceArn"]
            for tag_key in data.get("TagKeys", []):
                DynamoDBRegion.TABLE_TAGS.get(table_arn, {}).pop(tag_key, None)
            return

        else:
            # nothing to do
            return
        if event_sources_or_streams_enabled and records and "eventName" in records[
                0]:
            if "TableName" in data:
                records[0]["eventSourceARN"] = aws_stack.dynamodb_table_arn(
                    table_name)
            # forward to kinesis stream
            forward_to_kinesis_stream(records)
            # forward to lambda and ddb_streams
            forward_to_lambda(records)
            records = self.prepare_records_to_forward_to_ddb_stream(records)
            forward_to_ddb_stream(records)
コード例 #13
0
    def return_response(self, method, path, data, headers, response):
        action = headers.get('X-Amz-Target')
        data = json.loads(to_str(data))

        print('ProxyListenerKinesis return_response data: [', data,
              '] action [', action, ']')

        records = []
        if action in (ACTION_CREATE_STREAM, ACTION_DELETE_STREAM):
            event_type = (event_publisher.EVENT_KINESIS_CREATE_STREAM
                          if action == ACTION_CREATE_STREAM else
                          event_publisher.EVENT_KINESIS_DELETE_STREAM)
            payload = {'n': event_publisher.get_hash(data.get('StreamName'))}
            if action == ACTION_CREATE_STREAM:
                payload['s'] = data.get('ShardCount')
            print(
                'ProxyListenerKinesis return_response ACTION_CREATE_STREAM, ACTION_DELETE_STREAM '
                + 'event_publisher.fire_event event_type: [', event_type,
                '] payload[', payload, ']')
            event_publisher.fire_event(event_type, payload=payload)
        elif action == ACTION_PUT_RECORD:
            response_body = json.loads(to_str(response.content))
            event_record = {
                'data': data['Data'],
                'partitionKey': data['PartitionKey'],
                'sequenceNumber': response_body.get('SequenceNumber')
            }
            event_records = [event_record]
            stream_name = data['StreamName']
            print(
                'ProxyListenerKinesis return_response ACTION_PUT_RECORD: ' +
                'lambda_api.process_kinesis_records event_record[',
                event_record, 'stream_name[', stream_name, ']')
            lambda_api.process_kinesis_records(event_records, stream_name)
        elif action == ACTION_PUT_RECORDS:
            event_records = []
            response_body = json.loads(to_str(response.content))
            if 'Records' in response_body:
                response_records = response_body['Records']
                records = data['Records']
                for index in range(0, len(records)):
                    record = records[index]
                    event_record = {
                        'data':
                        record['Data'],
                        'partitionKey':
                        record['PartitionKey'],
                        'sequenceNumber':
                        response_records[index].get('SequenceNumber')
                    }
                    event_records.append(event_record)
                stream_name = data['StreamName']
                print(
                    'ProxyListenerKinesis return_response ACTION_PUT_RECORDS: '
                    + 'lambda_api.process_kinesis_records event_record[',
                    event_records, 'stream_name[', stream_name, ']')
                lambda_api.process_kinesis_records(event_records, stream_name)
        elif action == ACTION_UPDATE_SHARD_COUNT:
            # Currently kinesalite, which backs the Kinesis implementation for localstack, does
            # not support UpdateShardCount:
            # https://github.com/mhart/kinesalite/issues/61
            #
            # [Terraform](https://www.terraform.io) makes the call to UpdateShardCount when it
            # applies Kinesis resources. A Terraform run fails when this is not present.
            #
            # The code that follows just returns a successful response, bypassing the 400
            # response that kinesalite returns.
            #
            response = Response()
            response.status_code = 200
            content = {
                'CurrentShardCount': 1,
                'StreamName': data['StreamName'],
                'TargetShardCount': data['TargetShardCount']
            }
            response.encoding = 'UTF-8'
            response._content = json.dumps(content)
            print(
                'ProxyListenerKinesis return_response ACTION_UPDATE_SHARD_COUNT:  response[',
                response, ']')
            return response
コード例 #14
0
    def return_response(self, method, path, data, headers, response):
        action = headers.get('X-Amz-Target')
        data = self.decode_content(data or '{}')
        response._content = self.replace_in_encoded(response.content or '')

        records = []
        if action in (ACTION_CREATE_STREAM, ACTION_DELETE_STREAM):
            event_type = (event_publisher.EVENT_KINESIS_CREATE_STREAM if action == ACTION_CREATE_STREAM
                          else event_publisher.EVENT_KINESIS_DELETE_STREAM)
            payload = {'n': event_publisher.get_hash(data.get('StreamName'))}
            if action == ACTION_CREATE_STREAM:
                payload['s'] = data.get('ShardCount')
            event_publisher.fire_event(event_type, payload=payload)
        elif action == ACTION_PUT_RECORD:
            response_body = self.decode_content(response.content)
            # Note: avoid adding 'encryptionType':'NONE' in the event_record, as this breaks .NET Lambdas
            event_record = {
                'approximateArrivalTimestamp': epoch_timestamp(),
                'data': data['Data'],
                'partitionKey': data['PartitionKey'],
                'sequenceNumber': response_body.get('SequenceNumber')
            }
            event_records = [event_record]
            stream_name = data['StreamName']
            lambda_api.process_kinesis_records(event_records, stream_name)
        elif action == ACTION_PUT_RECORDS:
            event_records = []
            response_body = self.decode_content(response.content)
            if 'Records' in response_body:
                response_records = response_body['Records']
                records = data['Records']
                for index in range(0, len(records)):
                    record = records[index]
                    # Note: avoid adding 'encryptionType':'NONE' in the event_record, as this breaks .NET Lambdas
                    event_record = {
                        'approximateArrivalTimestamp': epoch_timestamp(),
                        'data': record['Data'],
                        'partitionKey': record['PartitionKey'],
                        'sequenceNumber': response_records[index].get('SequenceNumber')
                    }
                    event_records.append(event_record)
                stream_name = data['StreamName']
                lambda_api.process_kinesis_records(event_records, stream_name)
        elif action == ACTION_UPDATE_SHARD_COUNT:
            # Currently kinesalite, which backs the Kinesis implementation for localstack, does
            # not support UpdateShardCount:
            # https://github.com/mhart/kinesalite/issues/61
            #
            # [Terraform](https://www.terraform.io) makes the call to UpdateShardCount when it
            # applies Kinesis resources. A Terraform run fails when this is not present.
            #
            # The code that follows just returns a successful response, bypassing the 400
            # response that kinesalite returns.
            #
            response = Response()
            response.status_code = 200
            content = {
                'CurrentShardCount': 1,
                'StreamName': data['StreamName'],
                'TargetShardCount': data['TargetShardCount']
            }
            response.encoding = 'UTF-8'
            response._content = json.dumps(content)
            return response
        elif action == ACTION_GET_RECORDS:
            sdk_v2 = self.sdk_is_v2(headers.get('User-Agent', '').split(' ')[0])
            results, encoding_type = self.decode_content(response.content, True)

            for record in results['Records']:
                if sdk_v2:
                    record['ApproximateArrivalTimestamp'] = int(record['ApproximateArrivalTimestamp'] * 1000)
                if not isinstance(record['Data'], str):
                    record['Data'] = base64.encodebytes(bytearray(record['Data']['data']))

            if encoding_type == APPLICATION_CBOR:
                response._content = cbor2.dumps(results)
            else:
                response._content = json.dumps(results)

            return response
コード例 #15
0
    def create_delivery_stream(
        self,
        context: RequestContext,
        delivery_stream_name: DeliveryStreamName,
        delivery_stream_type: DeliveryStreamType = DeliveryStreamType.
        DirectPut,
        kinesis_stream_source_configuration:
        KinesisStreamSourceConfiguration = None,
        delivery_stream_encryption_configuration_input:
        DeliveryStreamEncryptionConfigurationInput = None,
        s3_destination_configuration: S3DestinationConfiguration = None,
        extended_s3_destination_configuration:
        ExtendedS3DestinationConfiguration = None,
        redshift_destination_configuration:
        RedshiftDestinationConfiguration = None,
        elasticsearch_destination_configuration:
        ElasticsearchDestinationConfiguration = None,
        amazonopensearchservice_destination_configuration:
        AmazonopensearchserviceDestinationConfiguration = None,
        splunk_destination_configuration: SplunkDestinationConfiguration = None,
        http_endpoint_destination_configuration:
        HttpEndpointDestinationConfiguration = None,
        tags: TagDeliveryStreamInputTagList = None,
    ) -> CreateDeliveryStreamOutput:
        region = FirehoseBackend.get()

        destinations: DestinationDescriptionList = []
        if elasticsearch_destination_configuration:
            destinations.append(
                DestinationDescription(
                    DestinationId=short_uid(),
                    ElasticsearchDestinationDescription=
                    convert_es_config_to_desc(
                        elasticsearch_destination_configuration),
                ))
        if amazonopensearchservice_destination_configuration:
            destinations.append(
                DestinationDescription(
                    DestinationId=short_uid(),
                    AmazonopensearchserviceDestinationDescription=
                    convert_opensearch_config_to_desc(
                        amazonopensearchservice_destination_configuration),
                ))
        if s3_destination_configuration or extended_s3_destination_configuration:
            destinations.append(
                DestinationDescription(
                    DestinationId=short_uid(),
                    S3DestinationDescription=convert_s3_config_to_desc(
                        s3_destination_configuration),
                    ExtendedS3DestinationDescription=
                    convert_extended_s3_config_to_desc(
                        extended_s3_destination_configuration),
                ))
        if http_endpoint_destination_configuration:
            destinations.append(
                DestinationDescription(
                    DestinationId=short_uid(),
                    HttpEndpointDestinationDescription=
                    convert_http_config_to_desc(
                        http_endpoint_destination_configuration),
                ))
        if splunk_destination_configuration:
            LOG.warning(
                "Delivery stream contains a splunk destination (which is currently not supported)."
            )
        if redshift_destination_configuration:
            LOG.warning(
                "Delivery stream contains a redshift destination (which is currently not supported)."
            )

        stream = DeliveryStreamDescription(
            DeliveryStreamName=delivery_stream_name,
            DeliveryStreamARN=firehose_stream_arn(
                stream_name=delivery_stream_name,
                account_id=context.account_id,
                region_name=context.region,
            ),
            DeliveryStreamStatus=DeliveryStreamStatus.ACTIVE,
            DeliveryStreamType=delivery_stream_type,
            HasMoreDestinations=False,
            VersionId="1",
            CreateTimestamp=datetime.now(),
            LastUpdateTimestamp=datetime.now(),
            Destinations=destinations,
            Source=convert_source_config_to_desc(
                kinesis_stream_source_configuration),
        )
        FirehoseBackend.TAGS.tag_resource(stream["DeliveryStreamARN"], tags)
        region.delivery_streams[delivery_stream_name] = stream

        # record event
        event_publisher.fire_event(
            event_publisher.EVENT_FIREHOSE_CREATE_STREAM,
            payload={"n": event_publisher.get_hash(delivery_stream_name)},
        )

        if delivery_stream_type == DeliveryStreamType.KinesisStreamAsSource:
            if not kinesis_stream_source_configuration:
                raise InvalidArgumentException(
                    "Missing delivery stream configuration")
            kinesis_stream_name = kinesis_stream_source_configuration[
                "KinesisStreamARN"].split("/")[1]
            kinesis_connector.listen_to_kinesis(
                stream_name=kinesis_stream_name,
                fh_d_stream=delivery_stream_name,
                listener_func=self._process_records,
                wait_until_started=True,
                ddb_lease_table_suffix="-firehose",
            )
        return CreateDeliveryStreamOutput(
            DeliveryStreamARN=stream["DeliveryStreamARN"])
コード例 #16
0
ファイル: dynamodb_listener.py プロジェクト: bbc/localstack
    def return_response(self, method, path, data, headers, response):
        data = json.loads(to_str(data))

        # update table definitions
        if data and 'TableName' in data and 'KeySchema' in data:
            TABLE_DEFINITIONS[data['TableName']] = data

        if response._content:
            # fix the table ARN (DynamoDBLocal hardcodes "ddblocal" as the region)
            content_replaced = re.sub(r'"TableArn"\s*:\s*"arn:aws:dynamodb:ddblocal:([^"]+)"',
                r'"TableArn": "arn:aws:dynamodb:%s:\1"' % aws_stack.get_local_region(), to_str(response._content))
            if content_replaced != response._content:
                response._content = content_replaced
                fix_headers_for_updated_response(response)

        action = headers.get('X-Amz-Target')
        if not action:
            return

        record = {
            'eventID': '1',
            'eventVersion': '1.0',
            'dynamodb': {
                'StreamViewType': 'NEW_AND_OLD_IMAGES',
                'SizeBytes': -1
            },
            'awsRegion': DEFAULT_REGION,
            'eventSource': 'aws:dynamodb'
        }
        records = [record]

        if action == '%s.UpdateItem' % ACTION_PREFIX:
            updated_item = find_existing_item(data)
            if not updated_item:
                return
            record['eventName'] = 'MODIFY'
            record['dynamodb']['Keys'] = data['Key']
            record['dynamodb']['OldImage'] = ProxyListenerDynamoDB.thread_local.existing_item
            record['dynamodb']['NewImage'] = updated_item
            record['dynamodb']['SizeBytes'] = len(json.dumps(updated_item))
        elif action == '%s.BatchWriteItem' % ACTION_PREFIX:
            records = []
            for table_name, requests in data['RequestItems'].items():
                for request in requests:
                    put_request = request.get('PutRequest')
                    if put_request:
                        keys = dynamodb_extract_keys(item=put_request['Item'], table_name=table_name)
                        if isinstance(keys, Response):
                            return keys
                        new_record = clone(record)
                        new_record['eventName'] = 'INSERT'
                        new_record['dynamodb']['Keys'] = keys
                        new_record['dynamodb']['NewImage'] = put_request['Item']
                        new_record['eventSourceARN'] = aws_stack.dynamodb_table_arn(table_name)
                        records.append(new_record)
        elif action == '%s.PutItem' % ACTION_PREFIX:
            existing_item = ProxyListenerDynamoDB.thread_local.existing_item
            ProxyListenerDynamoDB.thread_local.existing_item = None
            record['eventName'] = 'INSERT' if not existing_item else 'MODIFY'
            keys = dynamodb_extract_keys(item=data['Item'], table_name=data['TableName'])
            if isinstance(keys, Response):
                return keys
            record['dynamodb']['Keys'] = keys
            record['dynamodb']['NewImage'] = data['Item']
            record['dynamodb']['SizeBytes'] = len(json.dumps(data['Item']))
        elif action == '%s.GetItem' % ACTION_PREFIX:
            if response.status_code == 200:
                content = json.loads(to_str(response.content))
                # make sure we append 'ConsumedCapacity', which is properly
                # returned by dynalite, but not by AWS's DynamoDBLocal
                if 'ConsumedCapacity' not in content and data.get('ReturnConsumedCapacity') in ('TOTAL', 'INDEXES'):
                    content['ConsumedCapacity'] = {
                        'CapacityUnits': 0.5,  # TODO hardcoded
                        'TableName': data['TableName']
                    }
                    response._content = json.dumps(content)
                    fix_headers_for_updated_response(response)
        elif action == '%s.DeleteItem' % ACTION_PREFIX:
            old_item = ProxyListenerDynamoDB.thread_local.existing_item
            record['eventName'] = 'REMOVE'
            record['dynamodb']['Keys'] = data['Key']
            record['dynamodb']['OldImage'] = old_item
        elif action == '%s.CreateTable' % ACTION_PREFIX:
            if 'StreamSpecification' in data:
                create_dynamodb_stream(data)
            event_publisher.fire_event(event_publisher.EVENT_DYNAMODB_CREATE_TABLE,
                payload={'n': event_publisher.get_hash(data['TableName'])})
            return
        elif action == '%s.DeleteTable' % ACTION_PREFIX:
            event_publisher.fire_event(event_publisher.EVENT_DYNAMODB_DELETE_TABLE,
                payload={'n': event_publisher.get_hash(data['TableName'])})
            return
        elif action == '%s.UpdateTable' % ACTION_PREFIX:
            if 'StreamSpecification' in data:
                create_dynamodb_stream(data)
            return
        else:
            # nothing to do
            return

        if len(records) > 0 and 'eventName' in records[0]:
            if 'TableName' in data:
                records[0]['eventSourceARN'] = aws_stack.dynamodb_table_arn(data['TableName'])
            forward_to_lambda(records)
            forward_to_ddb_stream(records)
コード例 #17
0
ファイル: s3_listener.py プロジェクト: bbc/localstack
    def return_response(self, method, path, data, headers, response):

        bucket_name = get_bucket_name(path, headers)

        # No path-name based bucket name?  Try host-based
        hostname_parts = headers['host'].split('.')
        if (not bucket_name or len(bucket_name) == 0) and len(hostname_parts) > 1:
            bucket_name = hostname_parts[0]

        # POST requests to S3 may include a success_action_redirect field,
        # which should be used to redirect a client to a new location.
        key = None
        if method == 'POST':
            key, redirect_url = multipart_content.find_multipart_redirect_url(data, headers)

            if key and redirect_url:
                response.status_code = 303
                response.headers['Location'] = expand_redirect_url(redirect_url, key, bucket_name)
                LOGGER.debug('S3 POST {} to {}'.format(response.status_code, response.headers['Location']))

        parsed = urlparse.urlparse(path)

        bucket_name_in_host = headers['host'].startswith(bucket_name)

        should_send_notifications = all([
            method in ('PUT', 'POST', 'DELETE'),
            '/' in path[1:] or bucket_name_in_host,
            # check if this is an actual put object request, because it could also be
            # a put bucket request with a path like this: /bucket_name/
            bucket_name_in_host or (len(path[1:].split('/')) > 1 and len(path[1:].split('/')[1]) > 0),
            # don't send notification if url has a query part (some/path/with?query)
            # (query can be one of 'notification', 'lifecycle', 'tagging', etc)
            not parsed.query
        ])

        # get subscribers and send bucket notifications
        if should_send_notifications:
            # if we already have a good key, use it, otherwise examine the path
            if key:
                object_path = '/' + key
            elif bucket_name_in_host:
                object_path = parsed.path
            else:
                parts = parsed.path[1:].split('/', 1)
                object_path = parts[1] if parts[1][0] == '/' else '/%s' % parts[1]

            send_notifications(method, bucket_name, object_path)

        # publish event for creation/deletion of buckets:
        if method in ('PUT', 'DELETE') and ('/' not in path[1:] or len(path[1:].split('/')[1]) <= 0):
            event_type = (event_publisher.EVENT_S3_CREATE_BUCKET if method == 'PUT'
                else event_publisher.EVENT_S3_DELETE_BUCKET)
            event_publisher.fire_event(event_type, payload={'n': event_publisher.get_hash(bucket_name)})

        # fix an upstream issue in moto S3 (see https://github.com/localstack/localstack/issues/382)
        if method == 'PUT' and parsed.query == 'policy':
            response._content = ''
            response.status_code = 204
            return response

        if response:
            # append CORS headers to response
            append_cors_headers(bucket_name, request_method=method, request_headers=headers, response=response)

            response_content_str = None
            try:
                response_content_str = to_str(response._content)
            except Exception:
                pass

            # we need to un-pretty-print the XML, otherwise we run into this issue with Spark:
            # https://github.com/jserver/mock-s3/pull/9/files
            # https://github.com/localstack/localstack/issues/183
            # Note: yet, we need to make sure we have a newline after the first line: <?xml ...>\n
            if response_content_str and response_content_str.startswith('<'):
                is_bytes = isinstance(response._content, six.binary_type)
                response._content = re.sub(r'([^\?])>\n\s*<', r'\1><', response_content_str, flags=re.MULTILINE)
                if is_bytes:
                    response._content = to_bytes(response._content)
                # fix content-type: https://github.com/localstack/localstack/issues/618
                #                   https://github.com/localstack/localstack/issues/549
                if 'text/html' in response.headers.get('Content-Type', ''):
                    response.headers['Content-Type'] = 'application/xml; charset=utf-8'

                response.headers['content-length'] = len(response._content)

            # update content-length headers (fix https://github.com/localstack/localstack/issues/541)
            if method == 'DELETE':
                response.headers['content-length'] = len(response._content)
コード例 #18
0
ファイル: sqs_listener.py プロジェクト: gambxxxx/localstack-1
    def return_response(self, method, path, data, headers, response,
                        request_handler):

        if method == 'POST' and path == '/':
            req_data = urlparse.parse_qs(to_str(data))
            action = req_data.get('Action', [None])[0]
            event_type = None
            queue_url = None
            if action == 'CreateQueue':
                event_type = event_publisher.EVENT_SQS_CREATE_QUEUE
                response_data = xmltodict.parse(response.content)
                if 'CreateQueueResponse' in response_data:
                    queue_url = response_data['CreateQueueResponse'][
                        'CreateQueueResult']['QueueUrl']
            elif action == 'DeleteQueue':
                event_type = event_publisher.EVENT_SQS_DELETE_QUEUE
                queue_url = req_data.get('QueueUrl', [None])[0]

            if event_type and queue_url:
                event_publisher.fire_event(
                    event_type,
                    payload={'u': event_publisher.get_hash(queue_url)})

            # patch the response and return the correct endpoint URLs
            if action in ('CreateQueue', 'GetQueueUrl', 'ListQueues'):
                content_str = content_str_original = to_str(response.content)
                new_response = Response()
                new_response.status_code = response.status_code
                new_response.headers = response.headers
                if config.USE_SSL and '<QueueUrl>http://' in content_str:
                    # return https://... if we're supposed to use SSL
                    content_str = re.sub(r'<QueueUrl>\s*http://',
                                         r'<QueueUrl>https://', content_str)
                # expose external hostname:port
                external_port = get_external_port(headers, request_handler)
                content_str = re.sub(
                    r'<QueueUrl>\s*([a-z]+)://[^<]*:([0-9]+)/([^<]*)\s*</QueueUrl>',
                    r'<QueueUrl>\1://%s:%s/\3</QueueUrl>' %
                    (HOSTNAME_EXTERNAL, external_port), content_str)
                new_response._content = content_str
                if content_str_original != new_response._content:
                    # if changes have been made, return patched response
                    new_response.headers['content-length'] = len(
                        new_response._content)
                    return new_response

            # Since the following 2 API calls are not implemented in ElasticMQ, we're mocking them
            # and letting them to return an empty response
            if action == 'TagQueue':
                new_response = Response()
                new_response.status_code = 200
                new_response._content = (
                    '<?xml version="1.0"?>'
                    '<TagQueueResponse>'
                    '<ResponseMetadata>'  # noqa: W291
                    '<RequestId>{}</RequestId>'  # noqa: W291
                    '</ResponseMetadata>'  # noqa: W291
                    '</TagQueueResponse>').format(uuid.uuid4())
                return new_response
            elif action == 'ListQueueTags':
                new_response = Response()
                new_response.status_code = 200
                new_response._content = (
                    '<?xml version="1.0"?>'
                    '<ListQueueTagsResponse xmlns="{}">'
                    '<ListQueueTagsResult/>'  # noqa: W291
                    '<ResponseMetadata>'  # noqa: W291
                    '<RequestId>{}</RequestId>'  # noqa: W291
                    '</ResponseMetadata>'  # noqa: W291
                    '</ListQueueTagsResponse>').format(XMLNS_SQS, uuid.uuid4())
                return new_response
コード例 #19
0
    def return_response(self, method, path, data, headers, response):
        action = headers.get("X-Amz-Target", "").split(".")[-1]
        data, encoding_type = self.decode_content(data or "{}", True)
        response._content = self.replace_in_encoded(response.content or "")

        if action in ("CreateStream", "DeleteStream"):
            event_type = (event_publisher.EVENT_KINESIS_CREATE_STREAM
                          if action == "CreateStream" else
                          event_publisher.EVENT_KINESIS_DELETE_STREAM)
            payload = {"n": event_publisher.get_hash(data.get("StreamName"))}
            if action == "CreateStream":
                payload["s"] = data.get("ShardCount")
            event_publisher.fire_event(event_type, payload=payload)
        elif action == "PutRecord":
            response_body = self.decode_content(response.content)
            # Note: avoid adding 'encryptionType':'NONE' in the event_record, as this breaks .NET Lambdas
            event_record = {
                "approximateArrivalTimestamp": epoch_timestamp(),
                "data": data["Data"],
                "partitionKey": data["PartitionKey"],
                "sequenceNumber": response_body.get("SequenceNumber"),
            }
            event_records = [event_record]
            stream_name = data["StreamName"]
            lambda_api.process_kinesis_records(event_records, stream_name)
        elif action == "PutRecords":
            event_records = []
            response_body = self.decode_content(response.content)
            if "Records" in response_body:
                response_records = response_body["Records"]
                records = data["Records"]
                for index in range(0, len(records)):
                    record = records[index]
                    # Note: avoid adding 'encryptionType':'NONE' in the event_record, as this breaks .NET Lambdas
                    event_record = {
                        "approximateArrivalTimestamp":
                        epoch_timestamp(),
                        "data":
                        record["Data"],
                        "partitionKey":
                        record["PartitionKey"],
                        "sequenceNumber":
                        response_records[index].get("SequenceNumber"),
                    }
                    event_records.append(event_record)
                stream_name = data["StreamName"]
                lambda_api.process_kinesis_records(event_records, stream_name)
        elif action == "UpdateShardCount" and config.KINESIS_PROVIDER == "kinesalite":
            # Currently kinesalite, which backs the Kinesis implementation for localstack, does
            # not support UpdateShardCount:
            # https://github.com/mhart/kinesalite/issues/61
            #
            # [Terraform](https://www.terraform.io) makes the call to UpdateShardCount when it
            # applies Kinesis resources. A Terraform run fails when this is not present.
            #
            # The code that follows just returns a successful response, bypassing the 400
            # response that kinesalite returns.
            #
            response = Response()
            response.status_code = 200
            content = {
                "CurrentShardCount": 1,
                "StreamName": data["StreamName"],
                "TargetShardCount": data["TargetShardCount"],
            }
            response.encoding = "UTF-8"
            response._content = json.dumps(content)
            return response
        elif action == "GetRecords":
            sdk_v2 = self.is_sdk_v2_request(headers)
            results, encoding_type = self.decode_content(
                response.content, True)

            records = results.get("Records", [])
            if not records:
                return response

            for record in records:
                if sdk_v2:
                    record["ApproximateArrivalTimestamp"] = int(
                        record["ApproximateArrivalTimestamp"])

                tmp = record["Data"]
                # "tmp" is either a base64-encoded string, or a dict {"data": <data_bytes...>}
                is_base64_encoded = isinstance(tmp, str)
                if isinstance(tmp, dict):
                    tmp = bytearray(tmp.get("data", []))

                if is_base64_encoded:
                    tmp = base64.b64decode(tmp)

                if encoding_type == APPLICATION_JSON:
                    # Remove double quotes from data written in regular JSON encoding (not for CBOR!)
                    # https://github.com/localstack/localstack/issues/3588
                    if len(tmp) >= 2 and tmp[0] == tmp[-1] == b'"'[0]:
                        tmp = tmp[1:-1]
                if encoding_type == APPLICATION_CBOR:
                    # Note: AWS Java SDK requires bytes embedded in CBOR encoding, not base64 strings!
                    tmp = to_bytes(tmp)

                if encoding_type == APPLICATION_JSON:
                    if is_base64_encoded:
                        tmp = base64.b64encode(tmp)
                    tmp = to_str(tmp)

                record["Data"] = tmp

            response._content = encode_data(results, encoding_type)
            return response

        if response.status_code >= 400:
            response_body = self.decode_content(response.content)
            if (response_body and response_body.get("__type")
                    and not headers.get(HEADER_AMZN_ERROR_TYPE)):
                response.headers[HEADER_AMZN_ERROR_TYPE] = str(
                    response_body.get("__type"))
コード例 #20
0
    def return_response(self, method, path, data, headers, response):
        action = headers.get('X-Amz-Target', '').split('.')[-1]
        data, encoding_type = self.decode_content(data or '{}', True)
        response._content = self.replace_in_encoded(response.content or '')
        records = []
        if action in ('CreateStream', 'DeleteStream'):
            event_type = (event_publisher.EVENT_KINESIS_CREATE_STREAM
                          if action == 'CreateStream' else
                          event_publisher.EVENT_KINESIS_DELETE_STREAM)
            payload = {'n': event_publisher.get_hash(data.get('StreamName'))}
            if action == 'CreateStream':
                payload['s'] = data.get('ShardCount')
            event_publisher.fire_event(event_type, payload=payload)
        elif action == 'PutRecord':
            response_body = self.decode_content(response.content)
            # Note: avoid adding 'encryptionType':'NONE' in the event_record, as this breaks .NET Lambdas
            event_record = {
                'approximateArrivalTimestamp': epoch_timestamp(),
                'data': data['Data'],
                'partitionKey': data['PartitionKey'],
                'sequenceNumber': response_body.get('SequenceNumber')
            }
            event_records = [event_record]
            stream_name = data['StreamName']
            lambda_api.process_kinesis_records(event_records, stream_name)
        elif action == 'PutRecords':
            event_records = []
            response_body = self.decode_content(response.content)
            if 'Records' in response_body:
                response_records = response_body['Records']
                records = data['Records']
                for index in range(0, len(records)):
                    record = records[index]
                    # Note: avoid adding 'encryptionType':'NONE' in the event_record, as this breaks .NET Lambdas
                    event_record = {
                        'approximateArrivalTimestamp':
                        epoch_timestamp(),
                        'data':
                        record['Data'],
                        'partitionKey':
                        record['PartitionKey'],
                        'sequenceNumber':
                        response_records[index].get('SequenceNumber')
                    }
                    event_records.append(event_record)
                stream_name = data['StreamName']
                lambda_api.process_kinesis_records(event_records, stream_name)
        elif action == 'UpdateShardCount':
            # Currently kinesalite, which backs the Kinesis implementation for localstack, does
            # not support UpdateShardCount:
            # https://github.com/mhart/kinesalite/issues/61
            #
            # [Terraform](https://www.terraform.io) makes the call to UpdateShardCount when it
            # applies Kinesis resources. A Terraform run fails when this is not present.
            #
            # The code that follows just returns a successful response, bypassing the 400
            # response that kinesalite returns.
            #
            response = Response()
            response.status_code = 200
            content = {
                'CurrentShardCount': 1,
                'StreamName': data['StreamName'],
                'TargetShardCount': data['TargetShardCount']
            }
            response.encoding = 'UTF-8'
            response._content = json.dumps(content)
            return response
        elif action == 'GetRecords':
            sdk_v2 = self.sdk_is_v2(
                headers.get('User-Agent', '').split(' ')[0])
            results, encoding_type = self.decode_content(
                response.content, True)

            records = results.get('Records', [])
            if not records:
                return response

            for record in records:
                if sdk_v2:
                    record['ApproximateArrivalTimestamp'] = int(
                        record['ApproximateArrivalTimestamp'])
                if not isinstance(record['Data'], str):
                    # Remove double quotes from data written as bytes
                    # https://github.com/localstack/localstack/issues/3588
                    tmp = bytearray(record['Data']['data'])
                    if len(tmp) >= 2 and tmp[0] == tmp[-1] == b'"'[0]:
                        tmp = tmp[1:-1]

                    if encoding_type == APPLICATION_JSON:
                        record['Data'] = to_str(base64.b64encode(tmp))
                    else:
                        record['Data'] = to_str(tmp)

                else:
                    tmp = base64.b64decode(record['Data'])
                    if len(tmp) >= 2 and tmp[0] == tmp[-1] == b'"'[0]:
                        tmp = tmp[1:-1]
                    record['Data'] = to_str(base64.b64encode(tmp))

            response._content = cbor2.dumps(
                results) if encoding_type == APPLICATION_CBOR else json.dumps(
                    results)
            return response
コード例 #21
0
ファイル: sqs_listener.py プロジェクト: bbc/localstack
    def return_response(self, method, path, data, headers, response, request_handler):

        if method == 'POST' and path == '/':
            req_data = urlparse.parse_qs(to_str(data))
            action = req_data.get('Action', [None])[0]
            event_type = None
            queue_url = None
            if action == 'CreateQueue':
                event_type = event_publisher.EVENT_SQS_CREATE_QUEUE
                response_data = xmltodict.parse(response.content)
                if 'CreateQueueResponse' in response_data:
                    queue_url = response_data['CreateQueueResponse']['CreateQueueResult']['QueueUrl']
            elif action == 'DeleteQueue':
                event_type = event_publisher.EVENT_SQS_DELETE_QUEUE
                queue_url = req_data.get('QueueUrl', [None])[0]

            if event_type and queue_url:
                event_publisher.fire_event(event_type, payload={'u': event_publisher.get_hash(queue_url)})

            # patch the response and return the correct endpoint URLs
            if action in ('CreateQueue', 'GetQueueUrl', 'ListQueues'):
                content_str = content_str_original = to_str(response.content)
                new_response = Response()
                new_response.status_code = response.status_code
                new_response.headers = response.headers
                if config.USE_SSL and '<QueueUrl>http://' in content_str:
                    # return https://... if we're supposed to use SSL
                    content_str = re.sub(r'<QueueUrl>\s*http://', r'<QueueUrl>https://', content_str)
                # expose external hostname:port
                external_port = get_external_port(headers, request_handler)
                content_str = re.sub(r'<QueueUrl>\s*([a-z]+)://[^<]*:([0-9]+)/([^<]*)\s*</QueueUrl>',
                    r'<QueueUrl>\1://%s:%s/\3</QueueUrl>' % (HOSTNAME_EXTERNAL, external_port), content_str)
                new_response._content = content_str
                if content_str_original != new_response._content:
                    # if changes have been made, return patched response
                    new_response.headers['content-length'] = len(new_response._content)
                    return new_response

            # Since the following 2 API calls are not implemented in ElasticMQ, we're mocking them
            # and letting them to return an empty response
            if action == 'TagQueue':
                new_response = Response()
                new_response.status_code = 200
                new_response._content = (
                    '<?xml version="1.0"?>'
                    '<TagQueueResponse>'
                        '<ResponseMetadata>'  # noqa: W291
                            '<RequestId>{}</RequestId>'  # noqa: W291
                        '</ResponseMetadata>'  # noqa: W291
                    '</TagQueueResponse>'
                ).format(uuid.uuid4())
                return new_response
            elif action == 'ListQueueTags':
                new_response = Response()
                new_response.status_code = 200
                new_response._content = (
                    '<?xml version="1.0"?>'
                    '<ListQueueTagsResponse xmlns="{}">'
                        '<ListQueueTagsResult/>'  # noqa: W291
                        '<ResponseMetadata>'  # noqa: W291
                            '<RequestId>{}</RequestId>'  # noqa: W291
                        '</ResponseMetadata>'  # noqa: W291
                    '</ListQueueTagsResponse>'
                ).format(XMLNS_SQS, uuid.uuid4())
                return new_response
コード例 #22
0
    def return_response(self, method, path, data, headers, response):
        if path.startswith('/shell'):
            return
        data = json.loads(to_str(data))

        # update table definitions
        if data and 'TableName' in data and 'KeySchema' in data:
            TABLE_DEFINITIONS[data['TableName']] = data

        if response._content:
            # fix the table and latest stream ARNs (DynamoDBLocal hardcodes "ddblocal" as the region)
            content_replaced = re.sub(
                r'("TableArn"|"LatestStreamArn"|"StreamArn")\s*:\s*"arn:aws:dynamodb:'
                + 'ddblocal:([^"]+)"',
                r'\1: "arn:aws:dynamodb:%s:\2"' % aws_stack.get_region(),
                to_str(response._content))
            if content_replaced != response._content:
                response._content = content_replaced
                fix_headers_for_updated_response(response)

        action = headers.get('X-Amz-Target')
        if not action:
            return

        record = {
            'eventID': '1',
            'eventVersion': '1.0',
            'dynamodb': {
                'StreamViewType': 'NEW_AND_OLD_IMAGES',
                'SizeBytes': -1
            },
            'awsRegion': aws_stack.get_region(),
            'eventSource': 'aws:dynamodb'
        }
        records = [record]

        if action == '%s.UpdateItem' % ACTION_PREFIX:
            if response.status_code == 200:
                updated_item = find_existing_item(data)
                if not updated_item:
                    return
                record['eventName'] = 'MODIFY'
                record['dynamodb']['Keys'] = data['Key']
                record['dynamodb']['OldImage'] = self._thread_local(
                    'existing_item')
                record['dynamodb']['NewImage'] = updated_item
                record['dynamodb']['SizeBytes'] = len(json.dumps(updated_item))
        elif action == '%s.BatchWriteItem' % ACTION_PREFIX:
            records = self.prepare_batch_write_item_records(record, data)
        elif action == '%s.TransactWriteItems' % ACTION_PREFIX:
            records = self.prepare_transact_write_item_records(record, data)
        elif action == '%s.PutItem' % ACTION_PREFIX:
            if response.status_code == 200:
                existing_item = self._thread_local('existing_item')
                record[
                    'eventName'] = 'INSERT' if not existing_item else 'MODIFY'
                keys = dynamodb_extract_keys(item=data['Item'],
                                             table_name=data['TableName'])
                if isinstance(keys, Response):
                    return keys
                # fix response
                if response._content == '{}':
                    response._content = json.dumps(
                        {'Attributes': data['Item']})
                    fix_headers_for_updated_response(response)
                # prepare record keys
                record['dynamodb']['Keys'] = keys
                record['dynamodb']['NewImage'] = data['Item']
                record['dynamodb']['SizeBytes'] = len(json.dumps(data['Item']))
                if existing_item:
                    record['dynamodb']['OldImage'] = existing_item
        elif action == '%s.GetItem' % ACTION_PREFIX:
            if response.status_code == 200:
                content = json.loads(to_str(response.content))
                # make sure we append 'ConsumedCapacity', which is properly
                # returned by dynalite, but not by AWS's DynamoDBLocal
                if 'ConsumedCapacity' not in content and data.get(
                        'ReturnConsumedCapacity') in ('TOTAL', 'INDEXES'):
                    content['ConsumedCapacity'] = {
                        'CapacityUnits': 0.5,  # TODO hardcoded
                        'TableName': data['TableName']
                    }
                    response._content = json.dumps(content)
                    fix_headers_for_updated_response(response)
        elif action == '%s.DeleteItem' % ACTION_PREFIX:
            if response.status_code == 200:
                old_item = self._thread_local('existing_item')
                record['eventName'] = 'REMOVE'
                record['dynamodb']['Keys'] = data['Key']
                record['dynamodb']['OldImage'] = old_item
        elif action == '%s.CreateTable' % ACTION_PREFIX:
            if 'StreamSpecification' in data:
                if response.status_code == 200:
                    content = json.loads(to_str(response._content))
                    create_dynamodb_stream(
                        data,
                        content['TableDescription'].get('LatestStreamLabel'))
            event_publisher.fire_event(
                event_publisher.EVENT_DYNAMODB_CREATE_TABLE,
                payload={'n': event_publisher.get_hash(data['TableName'])})
            return
        elif action == '%s.DeleteTable' % ACTION_PREFIX:
            event_publisher.fire_event(
                event_publisher.EVENT_DYNAMODB_DELETE_TABLE,
                payload={'n': event_publisher.get_hash(data['TableName'])})
            return
        elif action == '%s.UpdateTable' % ACTION_PREFIX:
            if 'StreamSpecification' in data:
                if response.status_code == 200:
                    content = json.loads(to_str(response._content))
                    create_dynamodb_stream(
                        data,
                        content['TableDescription'].get('LatestStreamLabel'))
            return
        else:
            # nothing to do
            return

        if len(records) > 0 and 'eventName' in records[0]:
            if 'TableName' in data:
                records[0]['eventSourceARN'] = aws_stack.dynamodb_table_arn(
                    data['TableName'])
            forward_to_lambda(records)
            forward_to_ddb_stream(records)
コード例 #23
0
    def create_elasticsearch_domain(
        self,
        context: RequestContext,
        domain_name: DomainName,
        elasticsearch_version: ElasticsearchVersionString = None,
        elasticsearch_cluster_config: ElasticsearchClusterConfig = None,
        ebs_options: EBSOptions = None,
        access_policies: PolicyDocument = None,
        snapshot_options: SnapshotOptions = None,
        vpc_options: VPCOptions = None,
        cognito_options: CognitoOptions = None,
        encryption_at_rest_options: EncryptionAtRestOptions = None,
        node_to_node_encryption_options: NodeToNodeEncryptionOptions = None,
        advanced_options: AdvancedOptions = None,
        log_publishing_options: LogPublishingOptions = None,
        domain_endpoint_options: DomainEndpointOptions = None,
        advanced_security_options: AdvancedSecurityOptionsInput = None,
        auto_tune_options: AutoTuneOptionsInput = None,
        tag_list: TagList = None,
    ) -> CreateElasticsearchDomainResponse:
        opensearch_client = aws_stack.connect_to_service(
            "opensearch", region_name=context.region)
        # If no version is given, we set our default elasticsearch version
        engine_version = (_version_to_opensearch(elasticsearch_version)
                          if elasticsearch_version else
                          constants.ELASTICSEARCH_DEFAULT_VERSION)
        kwargs = {
            "DomainName":
            domain_name,
            "EngineVersion":
            engine_version,
            "ClusterConfig":
            _clusterconfig_to_opensearch(elasticsearch_cluster_config),
            "EBSOptions":
            ebs_options,
            "AccessPolicies":
            access_policies,
            "SnapshotOptions":
            snapshot_options,
            "VPCOptions":
            vpc_options,
            "CognitoOptions":
            cognito_options,
            "EncryptionAtRestOptions":
            encryption_at_rest_options,
            "NodeToNodeEncryptionOptions":
            node_to_node_encryption_options,
            "AdvancedOptions":
            advanced_options,
            "LogPublishingOptions":
            log_publishing_options,
            "DomainEndpointOptions":
            domain_endpoint_options,
            "AdvancedSecurityOptions":
            advanced_security_options,
            "AutoTuneOptions":
            auto_tune_options,
            "TagList":
            tag_list,
        }

        # Filter the kwargs to not set None values at all (boto doesn't like that)
        kwargs = {
            key: value
            for key, value in kwargs.items() if value is not None
        }

        with exception_mapper():
            domain_status = opensearch_client.create_domain(
                **kwargs)["DomainStatus"]

        # record event
        event_publisher.fire_event(
            event_publisher.EVENT_ES_CREATE_DOMAIN,
            payload={"n": event_publisher.get_hash(domain_name)},
        )

        status = _domainstatus_from_opensearch(domain_status)
        return CreateElasticsearchDomainResponse(DomainStatus=status)
コード例 #24
0
    def return_response(self, method, path, data, headers, response):

        path = to_str(path)
        method = to_str(method)
        bucket_name = get_bucket_name(path, headers)

        # No path-name based bucket name? Try host-based
        hostname_parts = headers['host'].split('.')
        if (not bucket_name
                or len(bucket_name) == 0) and len(hostname_parts) > 1:
            bucket_name = hostname_parts[0]

        # POST requests to S3 may include a success_action_redirect field,
        # which should be used to redirect a client to a new location.
        key = None
        if method == 'POST':
            key, redirect_url = multipart_content.find_multipart_redirect_url(
                data, headers)

            if key and redirect_url:
                response.status_code = 303
                response.headers['Location'] = expand_redirect_url(
                    redirect_url, key, bucket_name)
                LOGGER.debug('S3 POST {} to {}'.format(
                    response.status_code, response.headers['Location']))

        parsed = urlparse.urlparse(path)
        bucket_name_in_host = headers['host'].startswith(bucket_name)

        should_send_notifications = all([
            method in ('PUT', 'POST', 'DELETE'),
            '/' in path[1:] or bucket_name_in_host,
            # check if this is an actual put object request, because it could also be
            # a put bucket request with a path like this: /bucket_name/
            bucket_name_in_host or
            (len(path[1:].split('/')) > 1 and len(path[1:].split('/')[1]) > 0),
            self.is_query_allowable(method, parsed.query)
        ])

        # get subscribers and send bucket notifications
        if should_send_notifications:
            # if we already have a good key, use it, otherwise examine the path
            if key:
                object_path = '/' + key
            elif bucket_name_in_host:
                object_path = parsed.path
            else:
                parts = parsed.path[1:].split('/', 1)
                object_path = parts[1] if parts[1][
                    0] == '/' else '/%s' % parts[1]
            version_id = response.headers.get('x-amz-version-id', None)

            send_notifications(method, bucket_name, object_path, version_id)

        # publish event for creation/deletion of buckets:
        if method in ('PUT', 'DELETE') and ('/' not in path[1:] or
                                            len(path[1:].split('/')[1]) <= 0):
            event_type = (event_publisher.EVENT_S3_CREATE_BUCKET if method
                          == 'PUT' else event_publisher.EVENT_S3_DELETE_BUCKET)
            event_publisher.fire_event(
                event_type,
                payload={'n': event_publisher.get_hash(bucket_name)})

        # fix an upstream issue in moto S3 (see https://github.com/localstack/localstack/issues/382)
        if method == 'PUT' and parsed.query == 'policy':
            response._content = ''
            response.status_code = 204
            return response

        if response:
            reset_content_length = False

            # append CORS headers to response
            append_cors_headers(bucket_name,
                                request_method=method,
                                request_headers=headers,
                                response=response)
            append_last_modified_headers(response=response)

            # Remove body from PUT response on presigned URL
            # https://github.com/localstack/localstack/issues/1317
            if method == 'PUT' and ('X-Amz-Security-Token=' in path
                                    or 'AWSAccessKeyId=' in path):
                response._content = ''
                reset_content_length = True

            response_content_str = None
            try:
                response_content_str = to_str(response._content)
            except Exception:
                pass

            # We need to un-pretty-print the XML, otherwise we run into this issue with Spark:
            # https://github.com/jserver/mock-s3/pull/9/files
            # https://github.com/localstack/localstack/issues/183
            # Note: yet, we need to make sure we have a newline after the first line: <?xml ...>\n
            if response_content_str and response_content_str.startswith('<'):
                is_bytes = isinstance(response._content, six.binary_type)

                append_last_modified_headers(response=response,
                                             content=response_content_str)

                # un-pretty-print the XML
                response._content = re.sub(r'([^\?])>\n\s*<',
                                           r'\1><',
                                           response_content_str,
                                           flags=re.MULTILINE)

                # update Location information in response payload
                response._content = self._update_location(
                    response._content, bucket_name)

                # convert back to bytes
                if is_bytes:
                    response._content = to_bytes(response._content)

                # fix content-type: https://github.com/localstack/localstack/issues/618
                #                   https://github.com/localstack/localstack/issues/549
                if 'text/html' in response.headers.get('Content-Type', ''):
                    response.headers[
                        'Content-Type'] = 'application/xml; charset=utf-8'

                reset_content_length = True

            # update content-length headers (fix https://github.com/localstack/localstack/issues/541)
            if method == 'DELETE':
                reset_content_length = True

            if reset_content_length:
                response.headers['content-length'] = len(response._content)
コード例 #25
0
    def forward_request(self, method, path, data, headers):
        if method == 'OPTIONS':
            return 200

        req_data = None
        if method == 'POST' and path == '/':
            req_data = urlparse.parse_qs(to_str(data))
            req_data = dict([(k, v[0]) for k, v in req_data.items()])
            action = req_data.get('Action')
            stack_name = req_data.get('StackName')

            if action == 'CreateStack':
                event_publisher.fire_event(
                    event_publisher.EVENT_CLOUDFORMATION_CREATE_STACK,
                    payload={'n': event_publisher.get_hash(stack_name)})

            if action == 'DeleteStack':
                client = aws_stack.connect_to_service('cloudformation')
                stack_resources = client.list_stack_resources(
                    StackName=stack_name)['StackResourceSummaries']
                template_deployer.delete_stack(stack_name, stack_resources)

            if action == 'DescribeStackEvents':
                # fix an issue where moto cannot handle ARNs as stack names (or missing names)
                run_fix = not stack_name
                if stack_name:
                    if stack_name.startswith('arn:aws:cloudformation'):
                        run_fix = True
                        stack_name = re.sub(
                            r'arn:aws:cloudformation:[^:]+:[^:]+:stack/([^/]+)(/.+)?',
                            r'\1', stack_name)
                if run_fix:
                    stack_names = [
                        stack_name
                    ] if stack_name else self._list_stack_names()
                    client = aws_stack.connect_to_service('cloudformation')
                    events = []
                    for stack_name in stack_names:
                        tmp = client.describe_stack_events(
                            StackName=stack_name)['StackEvents'][:1]
                        events.extend(tmp)
                    events = [{'member': e} for e in events]
                    response_content = '<StackEvents>%s</StackEvents>' % obj_to_xml(
                        events)
                    return make_response('DescribeStackEvents',
                                         response_content)

        if req_data:
            if action == 'ValidateTemplate':
                return validate_template(req_data)
            if action in ['CreateStack', 'UpdateStack']:
                do_replace_url = is_real_s3_url(req_data.get('TemplateURL'))
                if do_replace_url:
                    req_data['TemplateURL'] = convert_s3_to_local_url(
                        req_data['TemplateURL'])
                modified_request = transform_template(req_data)
                if modified_request:
                    req_data.pop('TemplateURL', None)
                    req_data['TemplateBody'] = json.dumps(modified_request)
                if modified_request or do_replace_url:
                    data = urlparse.urlencode(req_data, doseq=True)
                    return Request(data=data, headers=headers, method=method)

        return True
コード例 #26
0
    def return_response(self, method, path, data, headers, response):
        data = json.loads(to_str(data))

        # update table definitions
        if data and 'TableName' in data and 'KeySchema' in data:
            TABLE_DEFINITIONS[data['TableName']] = data

        if response._content:
            # fix the table ARN (DynamoDBLocal hardcodes "ddblocal" as the region)
            content_replaced = re.sub(
                r'"TableArn"\s*:\s*"arn:aws:dynamodb:ddblocal:([^"]+)"',
                r'"TableArn": "arn:aws:dynamodb:%s:\1"' %
                aws_stack.get_local_region(), to_str(response._content))
            if content_replaced != response._content:
                response._content = content_replaced
                fix_headers_for_updated_response(response)

        action = headers.get('X-Amz-Target')
        if not action:
            return

        record = {
            'eventID': '1',
            'eventVersion': '1.0',
            'dynamodb': {
                'StreamViewType': 'NEW_AND_OLD_IMAGES',
                'SizeBytes': -1
            },
            'awsRegion': DEFAULT_REGION,
            'eventSource': 'aws:dynamodb'
        }
        records = [record]

        if action == '%s.UpdateItem' % ACTION_PREFIX:
            updated_item = find_existing_item(data)
            if not updated_item:
                return
            record['eventName'] = 'MODIFY'
            record['dynamodb']['Keys'] = data['Key']
            record['dynamodb'][
                'OldImage'] = ProxyListenerDynamoDB.thread_local.existing_item
            record['dynamodb']['NewImage'] = updated_item
            record['dynamodb']['SizeBytes'] = len(json.dumps(updated_item))
        elif action == '%s.BatchWriteItem' % ACTION_PREFIX:
            records = []
            for table_name, requests in data['RequestItems'].items():
                for request in requests:
                    put_request = request.get('PutRequest')
                    if put_request:
                        keys = dynamodb_extract_keys(item=put_request['Item'],
                                                     table_name=table_name)
                        if isinstance(keys, Response):
                            return keys
                        new_record = clone(record)
                        new_record['eventName'] = 'INSERT'
                        new_record['dynamodb']['Keys'] = keys
                        new_record['dynamodb']['NewImage'] = put_request[
                            'Item']
                        new_record[
                            'eventSourceARN'] = aws_stack.dynamodb_table_arn(
                                table_name)
                        records.append(new_record)
        elif action == '%s.PutItem' % ACTION_PREFIX:
            existing_item = ProxyListenerDynamoDB.thread_local.existing_item
            ProxyListenerDynamoDB.thread_local.existing_item = None
            record['eventName'] = 'INSERT' if not existing_item else 'MODIFY'
            keys = dynamodb_extract_keys(item=data['Item'],
                                         table_name=data['TableName'])
            if isinstance(keys, Response):
                return keys
            record['dynamodb']['Keys'] = keys
            record['dynamodb']['NewImage'] = data['Item']
            record['dynamodb']['SizeBytes'] = len(json.dumps(data['Item']))
        elif action == '%s.GetItem' % ACTION_PREFIX:
            if response.status_code == 200:
                content = json.loads(to_str(response.content))
                # make sure we append 'ConsumedCapacity', which is properly
                # returned by dynalite, but not by AWS's DynamoDBLocal
                if 'ConsumedCapacity' not in content and data.get(
                        'ReturnConsumedCapacity') in ('TOTAL', 'INDEXES'):
                    content['ConsumedCapacity'] = {
                        'CapacityUnits': 0.5,  # TODO hardcoded
                        'TableName': data['TableName']
                    }
                    response._content = json.dumps(content)
                    fix_headers_for_updated_response(response)
        elif action == '%s.DeleteItem' % ACTION_PREFIX:
            record['eventName'] = 'REMOVE'
            record['dynamodb']['Keys'] = data['Key']
        elif action == '%s.CreateTable' % ACTION_PREFIX:
            if 'StreamSpecification' in data:
                create_dynamodb_stream(data)
            event_publisher.fire_event(
                event_publisher.EVENT_DYNAMODB_CREATE_TABLE,
                payload={'n': event_publisher.get_hash(data['TableName'])})
            return
        elif action == '%s.DeleteTable' % ACTION_PREFIX:
            event_publisher.fire_event(
                event_publisher.EVENT_DYNAMODB_DELETE_TABLE,
                payload={'n': event_publisher.get_hash(data['TableName'])})
            return
        elif action == '%s.UpdateTable' % ACTION_PREFIX:
            if 'StreamSpecification' in data:
                create_dynamodb_stream(data)
            return
        else:
            # nothing to do
            return

        if len(records) > 0 and 'eventName' in records[0]:
            if 'TableName' in data:
                records[0]['eventSourceARN'] = aws_stack.dynamodb_table_arn(
                    data['TableName'])
            forward_to_lambda(records)
            forward_to_ddb_stream(records)
コード例 #27
0
    def return_response(self, method, path, data, headers, response):
        if path.startswith('/shell') or method == 'GET':
            return

        data = json.loads(to_str(data))

        # update table definitions
        if data and 'TableName' in data and 'KeySchema' in data:
            table_definitions = DynamoDBRegion.get().table_definitions
            table_definitions[data['TableName']] = data
        if response._content:
            # fix the table and latest stream ARNs (DynamoDBLocal hardcodes "ddblocal" as the region)
            content_replaced = re.sub(
                r'("TableArn"|"LatestStreamArn"|"StreamArn")\s*:\s*"arn:aws:dynamodb:ddblocal:([^"]+)"',
                r'\1: "arn:aws:dynamodb:%s:\2"' % aws_stack.get_region(),
                to_str(response._content)
            )
            if content_replaced != response._content:
                response._content = content_replaced
                fix_headers_for_updated_response(response)

        action = headers.get('X-Amz-Target', '')
        action = action.replace(ACTION_PREFIX, '')
        if not action:
            return
        # upgrade event version to 1.1
        record = {
            'eventID': '1',
            'eventVersion': '1.1',
            'dynamodb': {
                'ApproximateCreationDateTime': time.time(),
                # 'StreamViewType': 'NEW_AND_OLD_IMAGES',
                'SizeBytes': -1
            },
            'awsRegion': aws_stack.get_region(),
            'eventSource': 'aws:dynamodb'
        }
        records = [record]

        streams_enabled_cache = {}
        table_name = data.get('TableName')
        event_sources_or_streams_enabled = has_event_sources_or_streams_enabled(table_name, streams_enabled_cache)

        if action == 'UpdateItem':
            if response.status_code == 200 and event_sources_or_streams_enabled:
                existing_item = self._thread_local('existing_item')
                record['eventName'] = 'INSERT' if not existing_item else 'MODIFY'
                record['eventID'] = short_uid()
                updated_item = find_existing_item(data)
                if not updated_item:
                    return
                record['dynamodb']['Keys'] = data['Key']
                if existing_item:
                    record['dynamodb']['OldImage'] = existing_item
                record['dynamodb']['NewImage'] = updated_item
                record['dynamodb']['SizeBytes'] = len(json.dumps(updated_item))
                stream_spec = dynamodb_get_table_stream_specification(table_name=table_name)
                if stream_spec:
                    record['dynamodb']['StreamViewType'] = stream_spec['StreamViewType']

        elif action == 'BatchWriteItem':
            records, unprocessed_items = self.prepare_batch_write_item_records(record, data)
            for record in records:
                event_sources_or_streams_enabled = (event_sources_or_streams_enabled or
                    has_event_sources_or_streams_enabled(record['eventSourceARN'], streams_enabled_cache))
            if response.status_code == 200 and any(unprocessed_items):
                content = json.loads(to_str(response.content))
                table_name = list(data['RequestItems'].keys())[0]
                if table_name not in content['UnprocessedItems']:
                    content['UnprocessedItems'][table_name] = []
                for key in ['PutRequest', 'DeleteRequest']:
                    if any(unprocessed_items[key]):
                        content['UnprocessedItems'][table_name].append({key: unprocessed_items[key]})
                unprocessed = content['UnprocessedItems']
                for key in list(unprocessed.keys()):
                    if not unprocessed.get(key):
                        del unprocessed[key]

                response._content = json.dumps(content)
                fix_headers_for_updated_response(response)

        elif action == 'TransactWriteItems':
            records = self.prepare_transact_write_item_records(record, data)
            for record in records:
                event_sources_or_streams_enabled = (event_sources_or_streams_enabled or
                    has_event_sources_or_streams_enabled(record['eventSourceARN'], streams_enabled_cache))

        elif action == 'PutItem':
            if response.status_code == 200:
                keys = dynamodb_extract_keys(item=data['Item'], table_name=table_name)
                if isinstance(keys, Response):
                    return keys
                # fix response
                if response._content == '{}':
                    response._content = update_put_item_response_content(data, response._content)
                    fix_headers_for_updated_response(response)
                if event_sources_or_streams_enabled:
                    existing_item = self._thread_local('existing_item')
                    # Get stream specifications details for the table
                    stream_spec = dynamodb_get_table_stream_specification(table_name=table_name)
                    record['eventName'] = 'INSERT' if not existing_item else 'MODIFY'
                    # prepare record keys
                    record['dynamodb']['Keys'] = keys
                    record['dynamodb']['NewImage'] = data['Item']
                    record['dynamodb']['SizeBytes'] = len(json.dumps(data['Item']))
                    record['eventID'] = short_uid()
                    if stream_spec:
                        record['dynamodb']['StreamViewType'] = stream_spec['StreamViewType']
                    if existing_item:
                        record['dynamodb']['OldImage'] = existing_item

        elif action in ('GetItem', 'Query'):
            if response.status_code == 200:
                content = json.loads(to_str(response.content))
                # make sure we append 'ConsumedCapacity', which is properly
                # returned by dynalite, but not by AWS's DynamoDBLocal
                if 'ConsumedCapacity' not in content and data.get('ReturnConsumedCapacity') in ['TOTAL', 'INDEXES']:
                    content['ConsumedCapacity'] = {
                        'TableName': table_name,
                        'CapacityUnits': 5,             # TODO hardcoded
                        'ReadCapacityUnits': 2,
                        'WriteCapacityUnits': 3
                    }
                    response._content = json.dumps(content)
                    fix_headers_for_updated_response(response)

        elif action == 'DeleteItem':
            if response.status_code == 200 and event_sources_or_streams_enabled:
                old_item = self._thread_local('existing_item')
                record['eventID'] = short_uid()
                record['eventName'] = 'REMOVE'
                record['dynamodb']['Keys'] = data['Key']
                record['dynamodb']['OldImage'] = old_item
                record['dynamodb']['SizeBytes'] = len(json.dumps(old_item))
                # Get stream specifications details for the table
                stream_spec = dynamodb_get_table_stream_specification(table_name=table_name)
                if stream_spec:
                    record['dynamodb']['StreamViewType'] = stream_spec['StreamViewType']

        elif action == 'CreateTable':
            if 'StreamSpecification' in data:
                if response.status_code == 200:
                    content = json.loads(to_str(response._content))
                    create_dynamodb_stream(data, content['TableDescription'].get('LatestStreamLabel'))

            event_publisher.fire_event(event_publisher.EVENT_DYNAMODB_CREATE_TABLE,
                payload={'n': event_publisher.get_hash(table_name)})

            if data.get('Tags') and response.status_code == 200:
                table_arn = json.loads(response._content)['TableDescription']['TableArn']
                DynamoDBRegion.TABLE_TAGS[table_arn] = {tag['Key']: tag['Value'] for tag in data['Tags']}

            return

        elif action == 'DeleteTable':
            if response.status_code == 200:
                table_arn = json.loads(response._content).get('TableDescription', {}).get('TableArn')
                event_publisher.fire_event(
                    event_publisher.EVENT_DYNAMODB_DELETE_TABLE,
                    payload={'n': event_publisher.get_hash(table_name)}
                )
                self.delete_all_event_source_mappings(table_arn)
                dynamodbstreams_api.delete_streams(table_arn)
                DynamoDBRegion.TABLE_TAGS.pop(table_arn, None)
            return

        elif action == 'UpdateTable':
            content_str = to_str(response._content or '')
            if response.status_code == 200 and 'StreamSpecification' in data:
                content = json.loads(content_str)
                create_dynamodb_stream(data, content['TableDescription'].get('LatestStreamLabel'))
            if response.status_code >= 400 and data.get('ReplicaUpdates') and 'Nothing to update' in content_str:
                table_name = data.get('TableName')
                # update local table props (replicas)
                table_properties = DynamoDBRegion.get().table_properties
                table_properties[table_name] = table_props = table_properties.get(table_name) or {}
                table_props['Replicas'] = replicas = table_props.get('Replicas') or []
                for repl_update in data['ReplicaUpdates']:
                    for key, details in repl_update.items():
                        region = details.get('RegionName')
                        if key == 'Create':
                            details['ReplicaStatus'] = details.get('ReplicaStatus') or 'ACTIVE'
                            replicas.append(details)
                        if key == 'Update':
                            replica = [r for r in replicas if r.get('RegionName') == region]
                            if replica:
                                replica[0].update(details)
                        if key == 'Delete':
                            table_props['Replicas'] = [r for r in replicas if r.get('RegionName') != region]
                # update response content
                schema = get_table_schema(table_name)
                result = {'TableDescription': schema['Table']}
                update_response_content(response, json_safe(result), 200)
            return

        elif action == 'DescribeTable':
            table_name = data.get('TableName')
            table_props = DynamoDBRegion.get().table_properties.get(table_name)
            if table_props:
                content = json.loads(to_str(response.content))
                content.get('Table', {}).update(table_props)
                update_response_content(response, content)

        elif action == 'TagResource':
            table_arn = data['ResourceArn']
            table_tags = DynamoDBRegion.TABLE_TAGS
            if table_arn not in table_tags:
                table_tags[table_arn] = {}
            table_tags[table_arn].update({tag['Key']: tag['Value'] for tag in data.get('Tags', [])})
            return

        elif action == 'UntagResource':
            table_arn = data['ResourceArn']
            for tag_key in data.get('TagKeys', []):
                DynamoDBRegion.TABLE_TAGS.get(table_arn, {}).pop(tag_key, None)
            return

        else:
            # nothing to do
            return
        if event_sources_or_streams_enabled and records and 'eventName' in records[0]:
            if 'TableName' in data:
                records[0]['eventSourceARN'] = aws_stack.dynamodb_table_arn(table_name)
            # forward to kinesis stream
            forward_to_kinesis_stream(records)
            # forward to lambda and ddb_streams
            forward_to_lambda(records)
            records = self.prepare_records_to_forward_to_ddb_stream(records)
            forward_to_ddb_stream(records)