示例#1
0
def append_cors_headers(bucket_name, request_method, request_headers, response):
    cors = BUCKET_CORS.get(bucket_name)
    if not cors:
        return
    origin = request_headers.get('Origin', '')
    rules = cors['CORSConfiguration']['CORSRule']
    if not isinstance(rules, list):
        rules = [rules]
    for rule in rules:
        # add allow-origin header
        allowed_methods = rule.get('AllowedMethod', [])
        if request_method in allowed_methods:
            allowed_origins = rule.get('AllowedOrigin', [])
            for allowed in allowed_origins:
                if origin in allowed or re.match(allowed.replace('*', '.*'), origin):
                    response.headers['Access-Control-Allow-Origin'] = origin
                    break
        # add additional headers
        exposed_headers = rule.get('ExposeHeader', [])
        for header in exposed_headers:
            if header.lower() == 'date':
                response.headers[header] = timestamp(format='%a, %d %b %Y %H:%M:%S +0000')
            elif header.lower() == 'etag':
                response.headers[header] = md5(response._content)
            elif header.lower() in ('server', 'x-amz-id-2', 'x-amz-request-id'):
                response.headers[header] = short_uid()
            elif header.lower() == 'x-amz-delete-marker':
                response.headers[header] = 'false'
            elif header.lower() == 'x-amz-version-id':
                # TODO: check whether bucket versioning is enabled and return proper version id
                response.headers[header] = 'null'
示例#2
0
文件: infra.py 项目: bbc/localstack
def get_lambda_code(func_name, retries=1, cache_time=None, env=None):
    if MOCK_OBJ:
        return ''
    env = aws_stack.get_environment(env)
    if cache_time is None and env.region != REGION_LOCAL:
        cache_time = AWS_LAMBDA_CODE_CACHE_TIMEOUT
    out = cmd_lambda('get-function --function-name %s' % func_name, env, cache_time)
    out = json.loads(out)
    loc = out['Code']['Location']
    hash = md5(loc)
    folder = TMP_DOWNLOAD_FILE_PATTERN.replace('*', hash)
    filename = 'archive.zip'
    archive = '%s/%s' % (folder, filename)
    try:
        mkdir(folder)
        if not os.path.isfile(archive):
            download(loc, archive, verify_ssl=False)
        if len(os.listdir(folder)) <= 1:
            zip_path = os.path.join(folder, filename)
            unzip(zip_path, folder)
    except Exception as e:
        print('WARN: %s' % e)
        rm_rf(archive)
        if retries > 0:
            return get_lambda_code(func_name, retries=retries - 1, cache_time=1, env=env)
        else:
            print('WARNING: Unable to retrieve lambda code: %s' % e)

    # traverse subdirectories and get script sources
    result = {}
    for root, subdirs, files in os.walk(folder):
        for file in files:
            prefix = root.split(folder)[-1]
            key = '%s/%s' % (prefix, file)
            if re.match(r'.+\.py$', key) or re.match(r'.+\.js$', key):
                codefile = '%s/%s' % (root, file)
                result[key] = load_file(codefile)

    # cleanup cache
    clean_cache(file_pattern=TMP_DOWNLOAD_FILE_PATTERN,
        last_clean_time=last_cache_cleanup_time,
        max_age=TMP_DOWNLOAD_CACHE_MAX_AGE)
    # TODO: delete only if cache_time is over
    rm_rf(folder)

    return result
示例#3
0
def process_sqs_message(message_body,
                        message_attributes,
                        queue_name,
                        region_name=None):
    # feed message into the first listening lambda (message should only get processed once)
    try:
        queue_arn = aws_stack.sqs_queue_arn(queue_name,
                                            region_name=region_name)
        sources = get_event_sources(source_arn=queue_arn)
        LOG.debug('Found %s source mappings for event from SQS queue %s' %
                  (len(sources), queue_arn))
        source = next(iter(sources), None)
        if source:
            arn = source['FunctionArn']
            event = {
                'Records': [{
                    'body': message_body,
                    'receiptHandle': 'MessageReceiptHandle',
                    'md5OfBody': md5(message_body),
                    'eventSourceARN': queue_arn,
                    'eventSource': 'aws:sqs',
                    'awsRegion': region_name,
                    'messageId': str(uuid.uuid4()),
                    'attributes': {
                        'ApproximateFirstReceiveTimestamp':
                        '{}000'.format(int(time.time())),
                        'SenderId':
                        TEST_AWS_ACCOUNT_ID,
                        'ApproximateReceiveCount':
                        '1',
                        'SentTimestamp':
                        '{}000'.format(int(time.time()))
                    },
                    'messageAttributes': message_attributes,
                    'sqs': True,
                }]
            }
            run_lambda(event=event, context={}, func_arn=arn)
            return True
    except Exception as e:
        LOG.warning('Unable to run Lambda function on SQS messages: %s %s' %
                    (e, traceback.format_exc()))
示例#4
0
        def add_default_props(resource_props, res_name=None):
            """ apply some fixes which otherwise cause deployments to fail """
            res_type = resource_props['Type']
            props = resource_props.get('Properties', {})

            if res_type == 'AWS::Lambda::EventSourceMapping' and not props.get('StartingPosition'):
                props['StartingPosition'] = 'LATEST'

            if res_type == 'AWS::SNS::Topic' and not props.get('TopicName'):
                props['TopicName'] = 'topic-%s' % short_uid()

            if res_type == 'AWS::SQS::Queue' and not props.get('QueueName'):
                props['QueueName'] = 'queue-%s' % short_uid()

            if res_type == 'AWS::ApiGateway::RestApi':
                props['Name'] = props.get('Name') or res_name

            # generate default names for certain resource types
            default_attrs = (('AWS::IAM::Role', 'RoleName'), ('AWS::Events::Rule', 'Name'))
            for entry in default_attrs:
                if res_type == entry[0] and not props.get(entry[1]):
                    props[entry[1]] = 'cf-%s-%s' % (stack_name, md5(canonical_json(props)))
示例#5
0
    def _send_message(self, path, data, req_data, headers):
        queue_url = self._queue_url(path, req_data, headers)
        queue_name = queue_url.rpartition('/')[2]
        message_body = req_data.get('MessageBody', [None])[0]
        message_attributes = self.format_message_attributes(req_data)
        region_name = extract_region_from_auth_header(headers)

        process_result = lambda_api.process_sqs_message(
            message_body,
            message_attributes,
            queue_name,
            region_name=region_name)
        if process_result:
            # If a Lambda was listening, do not add the message to the queue
            new_response = Response()
            message_attr_hash = self.get_message_attributes_md5(req_data)
            new_response._content = SUCCESSFUL_SEND_MESSAGE_XML_TEMPLATE.format(
                message_attr_hash=message_attr_hash,
                message_body_hash=md5(message_body),
                message_id=str(uuid.uuid4()))
            new_response.status_code = 200
            return new_response
示例#6
0
def store_delivery_log(subscriber: dict,
                       success: bool,
                       message: str,
                       message_id: str,
                       delivery: dict = None):

    log_group_name = subscriber.get("TopicArn",
                                    "").replace("arn:aws:",
                                                "").replace(":", "/")
    log_stream_name = long_uid()
    invocation_time = int(time.time() * 1000)

    delivery = not_none_or(delivery, {})
    delivery["deliveryId"] = (long_uid(), )
    delivery["destination"] = (subscriber.get("Endpoint", ""), )
    delivery["dwellTimeMs"] = 200
    if not success:
        delivery["attemps"] = 1

    delivery_log = {
        "notification": {
            "messageMD5Sum":
            md5(message),
            "messageId":
            message_id,
            "topicArn":
            subscriber.get("TopicArn"),
            "timestamp":
            datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f%z"),
        },
        "delivery": delivery,
        "status": "SUCCESS" if success else "FAILURE",
    }

    log_output = json.dumps(json_safe(delivery_log))

    return store_cloudwatch_logs(log_group_name, log_stream_name, log_output,
                                 invocation_time)
示例#7
0
def process_sqs_message(message_body, message_attributes, queue_name, region_name=None):
    # feed message into the first listening lambda (message should only get processed once)
    try:
        queue_arn = aws_stack.sqs_queue_arn(queue_name, region_name=region_name)
        sources = get_event_sources(source_arn=queue_arn)
        arns = [s.get('FunctionArn') for s in sources]
        LOG.debug('Found %s source mappings for event from SQS queue %s: %s' % (len(arns), queue_arn, arns))
        source = next(iter(sources), None)
        if not source:
            return False
        if source:
            arn = source['FunctionArn']
            event = {'Records': [{
                'body': message_body,
                'receiptHandle': 'MessageReceiptHandle',
                'md5OfBody': md5(message_body),
                'eventSourceARN': queue_arn,
                'eventSource': 'aws:sqs',
                'awsRegion': region_name,
                'messageId': str(uuid.uuid4()),
                'attributes': {
                    'ApproximateFirstReceiveTimestamp': '{}000'.format(int(time.time())),
                    'SenderId': TEST_AWS_ACCOUNT_ID,
                    'ApproximateReceiveCount': '1',
                    'SentTimestamp': '{}000'.format(int(time.time()))
                },
                'messageAttributes': message_attributes,
                'sqs': True,
            }]}
            result = run_lambda(event=event, context={}, func_arn=arn)
            status_code = getattr(result, 'status_code', 200)
            if status_code >= 400:
                LOG.warning('Invoking Lambda %s from SQS message failed (%s): %s' % (arn, status_code, result.data))
                # check if we need to forward to a dead letter queue
                sqs_error_to_dead_letter_queue(queue_arn, event, result)
            return True
    except Exception as e:
        LOG.warning('Unable to run Lambda function on SQS messages: %s %s' % (e, traceback.format_exc()))
示例#8
0
文件: s3.py 项目: pinzon/localstack
 def get_physical_resource_id(self, attribute=None, **kwargs):
     policy = self.props.get("Policy")
     return policy and md5(canonical_json(json.loads(policy)))