Beispiel #1
0
    def _put_to_search_db(
        self, db_flavor, db_description, delivery_stream_name, records, unprocessed_records
    ):
        """
        sends Firehose records to an ElasticSearch or Opensearch database
        """
        search_db_index = db_description["IndexName"]
        search_db_type = db_description.get("TypeName")
        region = aws_stack.get_region()
        domain_arn = db_description.get("DomainARN")
        cluster_endpoint = db_description.get("ClusterEndpoint")
        if cluster_endpoint is None:
            cluster_endpoint = aws_stack.get_opensearch_endpoint(domain_arn)

        db_connection = get_search_db_connection(cluster_endpoint, region)
        if db_description.get("S3BackupMode") == ElasticsearchS3BackupMode.AllDocuments:
            s3_dest_desc = db_description.get("S3DestinationDescription")
            if s3_dest_desc:
                try:
                    self._put_records_to_s3_bucket(
                        stream_name=delivery_stream_name,
                        records=unprocessed_records,
                        s3_destination_description=s3_dest_desc,
                    )
                except Exception as e:
                    LOG.warning("Unable to backup unprocessed records to S3. Error: %s", e)
            else:
                LOG.warning("Passed S3BackupMode without S3Configuration. Cannot backup...")
        elif db_description.get("S3BackupMode") == ElasticsearchS3BackupMode.FailedDocumentsOnly:
            # TODO support FailedDocumentsOnly as well
            LOG.warning("S3BackupMode FailedDocumentsOnly is set but currently not supported.")
        for record in records:
            obj_id = uuid.uuid4()

            data = "{}"
            # DirectPut
            if "Data" in record:
                data = base64.b64decode(record["Data"])
            # KinesisAsSource
            elif "data" in record:
                data = base64.b64decode(record["data"])

            try:
                body = json.loads(data)
            except Exception as e:
                LOG.warning(f"{db_flavor} only allows json input data!")
                raise e

            LOG.debug(
                "Publishing to {} destination. Data: {}".format(
                    db_flavor, truncate(data, max_length=300)
                )
            )
            try:
                db_connection.create(
                    index=search_db_index, doc_type=search_db_type, id=obj_id, body=body
                )
            except Exception as e:
                LOG.exception(f"Unable to put record to stream {delivery_stream_name}.")
                raise e
def transform_template(req_data) -> Optional[str]:
    """only returns string when parsing SAM template, otherwise None"""
    template_body = get_template_body(req_data)
    parsed = parse_template(template_body)
    if parsed.get("Transform") == "AWS::Serverless-2016-10-31":
        policy_map = {
            # SAM Transformer expects this map to be non-empty, but apparently the content doesn't matter (?)
            "dummy": "entry"
            # 'AWSLambdaBasicExecutionRole': 'arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole',
        }

        class MockPolicyLoader(object):
            def load(self):
                return policy_map

        # Note: we need to fix boto3 region, otherwise AWS SAM transformer fails
        region_before = os.environ.get("AWS_DEFAULT_REGION")
        if boto3.session.Session().region_name is None:
            os.environ["AWS_DEFAULT_REGION"] = aws_stack.get_region()
        try:
            transformed = transform_sam(parsed, {}, MockPolicyLoader())
            return json.dumps(transformed)
        finally:
            os.environ.pop("AWS_DEFAULT_REGION", None)
            if region_before is not None:
                os.environ["AWS_DEFAULT_REGION"] = region_before
Beispiel #3
0
    def return_response(self, method, path, data, headers, response):

        req_data = parse_request_data(method, path, data)
        action = req_data.get("Action")
        if action == "PutMetricAlarm":
            name = req_data.get("AlarmName")
            # add missing attribute "TreatMissingData"
            treat_missing_data = req_data.get("TreatMissingData", "ignore")
            cloudwatch_backends[aws_stack.get_region(
            )].alarms[name].treat_missing_data = treat_missing_data
            # record tags
            arn = aws_stack.cloudwatch_alarm_arn(name)
            tags = aws_responses.extract_tags(req_data)
            TAGS.tag_resource(arn, tags)

        # Fix Incorrect date format to the correct format
        # the dictionary contains the tag as the key and the value is a
        # tuple (pattern, replacement)

        regexes1 = (r"<{}>([^<]+) ([^<+]+)(\+[^<]*)?</{}>", r"<{}>\1T\2Z</{}>")
        regexes2 = (r"<{}>([^<]+) ([^<+.]+)(\.[^<]*)?</{}>",
                    r"<{}>\1T\2Z</{}>")
        timestamp_tags = {
            "AlarmConfigurationUpdatedTimestamp": regexes1,
            "StateUpdatedTimestamp": regexes1,
            "member": regexes2,
        }

        for tag, value in timestamp_tags.items():
            pattern, replacement = value
            self.fix_date_format(response, tag, pattern, replacement)
        response.headers["Content-Length"] = len(response.content)
        return response
    def return_response(self, method, path, data, headers, response):

        req_data = parse_request_data(method, path, data)
        action = req_data.get('Action')
        if action == 'PutMetricAlarm':
            name = req_data.get('AlarmName')
            treat_missing_data = req_data.get('TreatMissingData', 'ignore')
            cloudwatch_backends[aws_stack.get_region()].alarms[name].treat_missing_data = treat_missing_data

        # Fix Incorrect date format to the correct format
        # the dictionary contains the tag as the key and the value is a
        # tuple (pattern, replacement)

        regexes1 = (r'<{}>([^<]+) ([^<+]+)(\+[^<]*)?</{}>', r'<{}>\1T\2Z</{}>')
        regexes2 = (r'<{}>([^<]+) ([^<+.]+)(\.[^<]*)?</{}>', r'<{}>\1T\2Z</{}>')
        timestamp_tags = {
            'AlarmConfigurationUpdatedTimestamp': regexes1,
            'StateUpdatedTimestamp': regexes1,
            'member': regexes2
        }

        for tag, value in timestamp_tags.items():
            pattern, replacement = value
            self.fix_date_format(response, tag, pattern, replacement)
        response.headers['Content-Length'] = len(response.content)
        return response
Beispiel #5
0
 def _create_lambda_event_payload(self, stream_arn, records, shard_id=None):
     record_payloads = []
     for record in records:
         record_payload = {}
         for key, val in record.items():
             record_payload[first_char_to_lower(key)] = val
         # boto3 automatically decodes records in get_records(), so we must re-encode
         record_payload["data"] = to_str(
             base64.b64encode(record_payload["data"]))
         # convert datetime obj to timestamp
         record_payload["approximateArrivalTimestamp"] = (
             record_payload["approximateArrivalTimestamp"].timestamp() *
             1000)
         record_payloads.append({
             "eventID":
             "{0}:{1}".format(shard_id, record_payload["sequenceNumber"]),
             "eventSourceARN":
             stream_arn,
             "eventSource":
             "aws:kinesis",
             "eventVersion":
             "1.0",
             "eventName":
             "aws:kinesis:record",
             "invokeIdentityArn":
             "arn:aws:iam::{0}:role/lambda-role".format(
                 constants.TEST_AWS_ACCOUNT_ID),
             "awsRegion":
             aws_stack.get_region(),
             "kinesis":
             record_payload,
         })
     return {"Records": record_payloads}
 def cf_describe_stack_events(self):
     stack_name = self._get_param('StackName')
     stack = self.cloudformation_backend.get_stack(stack_name)
     if not stack:
         raise ValidationError(stack_name,
             message='Unable to find stack "%s" in region %s' % (stack_name, aws_stack.get_region()))
     return cf_describe_stack_events_orig(self)
Beispiel #7
0
def connect_api_gateway_to_sqs(gateway_name,
                               stage_name,
                               queue_arn,
                               path,
                               region_name=None):
    resources = {}
    template = APIGATEWAY_SQS_DATA_INBOUND_TEMPLATE
    resource_path = path.replace('/', '')
    region_name = region_name or aws_stack.get_region()
    queue_name = aws_stack.sqs_queue_name(queue_arn)
    sqs_region = aws_stack.extract_region_from_arn(queue_arn) or region_name
    resources[resource_path] = [{
        'httpMethod':
        'POST',
        'authorizationType':
        'NONE',
        'integrations': [{
            'type':
            'AWS',
            'uri':
            'arn:aws:apigateway:%s:sqs:path/%s/%s' %
            (sqs_region, TEST_AWS_ACCOUNT_ID, queue_name),
            'requestTemplates': {
                'application/json': template
            },
        }]
    }]
    return aws_stack.create_api_gateway(name=gateway_name,
                                        resources=resources,
                                        stage_name=stage_name,
                                        region_name=region_name)
Beispiel #8
0
def resolve_ref(stack_name, ref, resources, attribute):
    if ref == 'AWS::Region':
        return aws_stack.get_region()
    resource_status = {}
    if stack_name:
        resource_status = describe_stack_resource(stack_name, ref)
        if not resource_status:
            return
        attr_value = resource_status.get(attribute)
        if attr_value not in [None, '']:
            return attr_value
    elif ref in resources:
        resource_status = resources[ref]['__details__']
    # fetch resource details
    resource = resources.get(ref)
    resource_new = retrieve_resource_details(ref, resource_status, resources,
                                             stack_name)
    if not resource_new:
        return
    resource_type = get_resource_type(resource)
    result = extract_resource_attribute(resource_type, resource_new, attribute)
    if not result:
        LOG.warning(
            'Unable to extract reference attribute %s from resource: %s' %
            (attribute, resource_new))
    return result
Beispiel #9
0
def process_sqs_message(message_body, message_attributes, queue_name, region_name=None):
    # feed message into the first listening lambda (message should only get processed once)
    try:
        region_name = region_name or aws_stack.get_region()
        queue_arn = aws_stack.sqs_queue_arn(queue_name, region_name=region_name)
        sources = get_event_sources(source_arn=queue_arn)
        arns = [s.get('FunctionArn') for s in sources]
        LOG.debug('Found %s source mappings for event from SQS queue %s: %s' % (len(arns), queue_arn, arns))
        source = next(iter(sources), None)
        if not source:
            return False
        if source:
            arn = source['FunctionArn']
            event = {'Records': [{
                'body': message_body,
                'receiptHandle': 'MessageReceiptHandle',
                'md5OfBody': md5(message_body),
                'eventSourceARN': queue_arn,
                'eventSource': 'aws:sqs',
                'awsRegion': region_name,
                'messageId': str(uuid.uuid4()),
                'attributes': {
                    'ApproximateFirstReceiveTimestamp': '{}000'.format(int(time.time())),
                    'SenderId': TEST_AWS_ACCOUNT_ID,
                    'ApproximateReceiveCount': '1',
                    'SentTimestamp': '{}000'.format(int(time.time()))
                },
                'messageAttributes': message_attributes,
                'sqs': True,
            }]}
            run_lambda(event=event, context={}, func_arn=arn, asynchronous=True)
            return True
    except Exception as e:
        LOG.warning('Unable to run Lambda function on SQS messages: %s %s' % (e, traceback.format_exc()))
Beispiel #10
0
def publish_log_metrics_for_events(data):
    """Filter and publish log metrics for matching events"""
    from moto.logs.models import logs_backends

    data = data if isinstance(data, dict) else json.loads(data)
    log_events = data.get("logEvents") or []
    logs_backend = logs_backends[aws_stack.get_region()]
    metric_filters = logs_backend.filters.metric_filters
    client = aws_stack.connect_to_service("cloudwatch")
    for metric_filter in metric_filters:
        pattern = metric_filter.get("filterPattern", "")
        transformations = metric_filter.get("metricTransformations", [])
        matches = get_pattern_matcher(pattern)
        for log_event in log_events:
            if matches(pattern, log_event):
                for tf in transformations:
                    value = tf.get("metricValue") or "1"
                    if "$size" in value:
                        LOG.info("Expression not yet supported for log filter metricValue", value)
                    value = float(value) if is_number(value) else 1
                    data = [{"MetricName": tf["metricName"], "Value": value}]
                    try:
                        client.put_metric_data(Namespace=tf["metricNamespace"], MetricData=data)
                    except Exception as e:
                        LOG.info("Unable to put metric data for matching CloudWatch log events", e)
Beispiel #11
0
def configure_region_for_current_request(region_name: str, service_name: str):
    """Manually configure (potentially overwrite) the region in the current request context. This may be
    used by API endpoints that are invoked directly by the user (without specifying AWS Authorization
    headers), to still enable transparent region lookup via aws_stack.get_region() ..."""

    # TODO: leaving import here for now, to avoid circular dependency
    from localstack.utils.aws import aws_stack

    request_context = get_request_context()
    if not request_context:
        LOG.info(
            "Unable to set region '%s' in undefined request context: %s",
            region_name,
            request_context,
        )
        return

    headers = request_context.headers
    auth_header = headers.get("Authorization")
    auth_header = auth_header or aws_stack.mock_aws_request_headers(
        service_name)["Authorization"]
    auth_header = auth_header.replace("/%s/" % aws_stack.get_region(),
                                      "/%s/" % region_name)
    try:
        headers["Authorization"] = auth_header
    except Exception as e:
        if "immutable" not in str(e):
            raise
        _context_to_update = get_proxy_request_for_thread() or request
        _context_to_update.headers = CaseInsensitiveDict({
            **headers, "Authorization":
            auth_header
        })
Beispiel #12
0
def transform_template(req_data):
    template_body = get_template_body(req_data)
    parsed = template_deployer.parse_template(template_body)

    policy_map = {
        # SAM Transformer expects this map to be non-empty, but apparently the content doesn't matter (?)
        'dummy': 'entry'
        # 'AWSLambdaBasicExecutionRole': 'arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole',
    }

    class MockPolicyLoader(object):
        def load(self):
            return policy_map

    if parsed.get('Transform') == 'AWS::Serverless-2016-10-31':
        # Note: we need to fix boto3 region, otherwise AWS SAM transformer fails
        region_before = os.environ.get('AWS_DEFAULT_REGION')
        if boto3.session.Session().region_name is None:
            os.environ['AWS_DEFAULT_REGION'] = aws_stack.get_region()
        try:
            transformed = transform_sam(parsed, {}, MockPolicyLoader())
            return json.dumps(transformed)
        finally:
            os.environ.pop('AWS_DEFAULT_REGION', None)
            if region_before is not None:
                os.environ['AWS_DEFAULT_REGION'] = region_before
def resolve_ref(stack_name, ref, resources, attribute):
    if ref == 'AWS::Region':
        return aws_stack.get_region()
    if ref == 'AWS::Partition':
        return 'aws'
    if ref == 'AWS::StackName':
        return stack_name

    # first, check stack parameters
    stack_param = get_stack_parameter(stack_name, ref)
    if stack_param is not None:
        return stack_param

    # second, resolve resource references
    resource_status = {}
    if stack_name:
        resource_status = describe_stack_resource(stack_name, ref)
        if not resource_status:
            return
        attr_value = resource_status.get(attribute)
        if attr_value not in [None, '']:
            return attr_value
    elif ref in resources:
        resource_status = resources[ref]['__details__']
    # fetch resource details
    resource_new = retrieve_resource_details(ref, resource_status, resources, stack_name)
    if not resource_new:
        return
    resource = resources.get(ref)
    resource_type = get_resource_type(resource)
    result = extract_resource_attribute(resource_type, resource_new, attribute)
    if not result:
        LOG.warning('Unable to extract reference attribute %s from resource: %s' % (attribute, resource_new))
    return result
Beispiel #14
0
    def replace_in_encoded(self, data):
        if not data:
            return ''

        decoded, type_encoding = self.decode_content(data, True)

        if type_encoding == APPLICATION_JSON:
            return re.sub(r'arn:aws:kinesis:[^:]+:',
                          'arn:aws:kinesis:%s:' % aws_stack.get_region(),
                          to_str(data))

        if type_encoding == APPLICATION_CBOR:
            replaced = re.sub(r'arn:aws:kinesis:[^:]+:',
                              'arn:aws:kinesis:%s:' % aws_stack.get_region(),
                              json.dumps(decoded))
            return cbor2.dumps(json.loads(replaced))
Beispiel #15
0
 def put_log_events(
     self,
     context: RequestContext,
     log_group_name: LogGroupName,
     log_stream_name: LogStreamName,
     log_events: InputLogEvents,
     sequence_token: SequenceToken = None,
 ) -> PutLogEventsResponse:
     logs_backend = logs_backends[aws_stack.get_region()]
     metric_filters = logs_backend.filters.metric_filters
     for metric_filter in metric_filters:
         pattern = metric_filter.get("filterPattern", "")
         transformations = metric_filter.get("metricTransformations", [])
         matches = get_pattern_matcher(pattern)
         for log_event in log_events:
             if matches(pattern, log_event):
                 for tf in transformations:
                     value = tf.get("metricValue") or "1"
                     if "$size" in value:
                         LOG.info(
                             "Expression not yet supported for log filter metricValue", value
                         )
                     value = float(value) if is_number(value) else 1
                     data = [{"MetricName": tf["metricName"], "Value": value}]
                     try:
                         self.cw_client.put_metric_data(
                             Namespace=tf["metricNamespace"], MetricData=data
                         )
                     except Exception as e:
                         LOG.info(
                             "Unable to put metric data for matching CloudWatch log events", e
                         )
     return call_moto(context)
Beispiel #16
0
def publish_log_metrics_for_events(data):
    """ Filter and publish log metrics for matching events """
    from moto.logs.models import logs_backends  # TODO: create separate RegionBackend class to store state
    data = data if isinstance(data, dict) else json.loads(data)
    log_events = data.get('logEvents') or []
    logs_backend = logs_backends[aws_stack.get_region()]
    metric_filters = logs_backend.metric_filters = getattr(
        logs_backend, 'metric_filters', [])
    client = aws_stack.connect_to_service('cloudwatch')
    for metric_filter in metric_filters:
        pattern = metric_filter.get('filterPattern', '')
        if log_events_match_filter_pattern(pattern, log_events):
            for tf in metric_filter.get('metricTransformations', []):
                value = tf.get('metricValue') or '1'
                if '$size' in value:
                    LOG.info(
                        'Expression not yet supported for log filter metricValue: %s'
                        % value)
                value = float(value) if is_number(value) else 1
                data = [{'MetricName': tf['metricName'], 'Value': value}]
                try:
                    client.put_metric_data(Namespace=tf['metricNamespace'],
                                           MetricData=data)
                except Exception as e:
                    LOG.info(
                        'Unable to put metric data for matching CloudWatch log events: %s'
                        % e)
def get_stream_info(stream_name, log_file=None, shards=None, env=None, endpoint_url=None,
        ddb_lease_table_suffix=None, env_vars={}):
    if not ddb_lease_table_suffix:
        ddb_lease_table_suffix = DEFAULT_DDB_LEASE_TABLE_SUFFIX
    # construct stream info
    env = aws_stack.get_environment(env)
    props_file = os.path.join(tempfile.gettempdir(), 'kclipy.%s.properties' % short_uid())
    # make sure to convert stream ARN to stream name
    stream_name = aws_stack.kinesis_stream_name(stream_name)
    app_name = '%s%s' % (stream_name, ddb_lease_table_suffix)
    stream_info = {
        'name': stream_name,
        'region': aws_stack.get_region(),
        'shards': shards,
        'properties_file': props_file,
        'log_file': log_file,
        'app_name': app_name,
        'env_vars': env_vars
    }
    # set local connection
    if aws_stack.is_local_env(env):
        stream_info['conn_kwargs'] = {
            'host': LOCALHOST,
            'port': config.PORT_KINESIS,
            'is_secure': bool(USE_SSL)
        }
    if endpoint_url:
        if 'conn_kwargs' not in stream_info:
            stream_info['conn_kwargs'] = {}
        url = urlparse(endpoint_url)
        stream_info['conn_kwargs']['host'] = url.hostname
        stream_info['conn_kwargs']['port'] = url.port
        stream_info['conn_kwargs']['is_secure'] = url.scheme == 'https'
    return stream_info
Beispiel #18
0
    def associate_vpc_with_hosted_zone(
        self,
        context: RequestContext,
        hosted_zone_id: ResourceId,
        vpc: VPC,
        comment: AssociateVPCComment = None,
    ) -> AssociateVPCWithHostedZoneResponse:
        region_details = Route53Backend.get()
        # TODO: handle NoSuchHostedZone and ConflictingDomainExist
        zone_details = region_details.vpc_hosted_zone_associations.get(
            hosted_zone_id) or []
        hosted_zone_association = HostedZoneAssociation(
            hosted_zone_id=hosted_zone_id,
            id=short_uid(),
            vpc=vpc,
            status=ChangeStatus.INSYNC,
            submitted_at=datetime.now(),
        )
        zone_details.append(hosted_zone_association)
        vpc_id = vpc.get("VPCId")
        # update VPC info in hosted zone moto object - fixes required after https://github.com/spulec/moto/pull/4786
        hosted_zone = route53_backend.zones.get(hosted_zone_id)
        if not getattr(hosted_zone, "vpcid", None):
            hosted_zone.vpcid = vpc_id
        if not getattr(hosted_zone, "vpcregion", None):
            hosted_zone.vpcregion = aws_stack.get_region()

        region_details.vpc_hosted_zone_associations[
            hosted_zone_id] = zone_details
        return AssociateVPCWithHostedZoneResponse(
            ChangeInfo=ChangeInfo(Id=short_uid(),
                                  Status=ChangeStatus.INSYNC,
                                  SubmittedAt=datetime.now()))
Beispiel #19
0
def get_sample_arn(service, resource):
    return "arn:aws:%s:%s:%s:%s" % (
        service,
        aws_stack.get_region(),
        TEST_AWS_ACCOUNT_ID,
        resource,
    )
def handle_delete_rule(rule_name):
    region = aws_stack.get_region()
    job_id = RULE_SCHEDULED_JOBS.get(region, {}).get(rule_name)
    if job_id:
        LOG.debug('Removing scheduled Events: {} | job_id: {}'.format(
            rule_name, job_id))
        JobScheduler.instance().cancel_job(job_id=job_id)
 def forward_request(self, method, path, data, headers):
     req_data = parse_request_data(method, path, data)
     action = req_data.get('Action')
     if action == 'TagResource':
         arn = req_data.get('ResourceARN')
         tags = aws_stack.extract_tags(req_data)
         TAGS.tag_resource(arn, tags)
         return aws_responses.requests_response_xml(action, {}, xmlns=XMLNS_CLOUDWATCH)
     if action == 'UntagResource':
         arn = req_data.get('ResourceARN')
         tag_names = [v for k, v in req_data.items() if k.startswith('TagKeys.member.')]
         TAGS.untag_resource(arn, tag_names)
         return aws_responses.requests_response_xml(action, {}, xmlns=XMLNS_CLOUDWATCH)
     if action == 'ListTagsForResource':
         arn = req_data.get('ResourceARN')
         tags = TAGS.list_tags_for_resource(arn)
         result = {'Tags': {'member': tags.get('Tags', [])}}
         return aws_responses.requests_response_xml(action, result, xmlns=XMLNS_CLOUDWATCH)
     if path.startswith(PATH_GET_RAW_METRICS):
         result = cloudwatch_backends[aws_stack.get_region()].metric_data
         result = [
             {'ns': r.namespace, 'n': r.name, 'v': r.value, 't': r.timestamp,
              'd': [{'n': d.name, 'v': d.value} for d in r.dimensions]}
             for r in result
         ]
         return {'metrics': result}
     return True
Beispiel #22
0
 def RestAPI_create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
     props = cloudformation_json['Properties']
     name = props['Name']
     region_name = props.get('Region') or aws_stack.get_region()
     description = props.get('Description') or ''
     id = props.get('Id') or short_uid()
     return apigw_models.RestAPI(id, region_name, name, description)
Beispiel #23
0
    def test_import_certificate(self, acm_client):
        certs_before = acm_client.list_certificates().get(
            "CertificateSummaryList", [])

        with pytest.raises(Exception) as exec_info:
            acm_client.import_certificate(Certificate=b"CERT123",
                                          PrivateKey=b"KEY123")
        assert "PEM" in str(exec_info)

        private_key = ec2_utils.random_key_pair()["material"]
        result = None
        try:
            result = acm_client.import_certificate(
                Certificate=DIGICERT_ROOT_CERT, PrivateKey=private_key)
            assert "CertificateArn" in result

            expected_arn = "arn:aws:acm:{0}:{1}:certificate".format(
                aws_stack.get_region(), TEST_AWS_ACCOUNT_ID)
            acm_cert_arn = result["CertificateArn"].split("/")[0]
            assert expected_arn == acm_cert_arn

            certs_after = acm_client.list_certificates().get(
                "CertificateSummaryList", [])
            assert len(certs_before) + 1 == len(certs_after)
        finally:
            if result is not None:
                acm_client.delete_certificate(
                    CertificateArn=result["CertificateArn"])
Beispiel #24
0
def connect_api_gateway_to_sqs(gateway_name, stage_name, queue_arn, path, region_name=None):
    resources = {}
    template = APIGATEWAY_SQS_DATA_INBOUND_TEMPLATE
    resource_path = path.replace("/", "")
    region_name = region_name or aws_stack.get_region()

    try:
        arn = parse_arn(queue_arn)
        queue_name = arn["resource"]
        sqs_region = arn["region"]
    except InvalidArnException:
        queue_name = queue_arn
        sqs_region = region_name

    resources[resource_path] = [
        {
            "httpMethod": "POST",
            "authorizationType": "NONE",
            "integrations": [
                {
                    "type": "AWS",
                    "uri": "arn:aws:apigateway:%s:sqs:path/%s/%s"
                    % (sqs_region, TEST_AWS_ACCOUNT_ID, queue_name),
                    "requestTemplates": {"application/json": template},
                }
            ],
        }
    ]
    return aws_stack.create_api_gateway(
        name=gateway_name,
        resources=resources,
        stage_name=stage_name,
        region_name=region_name,
    )
Beispiel #25
0
def get_domain_status(domain_name, deleted=False):
    status = ES_DOMAINS.get(domain_name) or {}
    return {
        'DomainStatus': {
            'ARN': 'arn:aws:es:%s:%s:domain/%s' % (aws_stack.get_region(), TEST_AWS_ACCOUNT_ID, domain_name),
            'Created': True,
            'Deleted': deleted,
            'DomainId': '%s/%s' % (TEST_AWS_ACCOUNT_ID, domain_name),
            'DomainName': domain_name,
            'ElasticsearchClusterConfig': {
                'DedicatedMasterCount': 1,
                'DedicatedMasterEnabled': True,
                'DedicatedMasterType': 'm3.medium.elasticsearch',
                'InstanceCount': 1,
                'InstanceType': 'm3.medium.elasticsearch',
                'ZoneAwarenessEnabled': False
            },
            'ElasticsearchVersion': status.get('ElasticsearchVersion') or DEFAULT_ES_VERSION,
            'Endpoint': aws_stack.get_elasticsearch_endpoint(domain_name),
            'Processing': False,
            'EBSOptions': {
                'EBSEnabled': True,
                'VolumeType': 'gp2',
                'VolumeSize': 10,
                'Iops': 0
            },
            'CognitoOptions': {
                'Enabled': False
            },
        }
    }
Beispiel #26
0
 def __init__(self, resource_json, region_name=None, **params):
     self.region_name = region_name or aws_stack.get_region()
     self.resource_json = resource_json
     self.resource_type = resource_json['Type']
     # properties, as defined in the template
     self.properties = resource_json.get('Properties') or {}
     # state, as determined from the deployed resource
     self.state = {}
Beispiel #27
0
    def get(cls, region=None):
        if not hasattr(cls, 'REGIONS'):
            # maps region name to region backend instance
            cls.REGIONS = {}

        region = region or aws_stack.get_region()
        cls.REGIONS[region] = cls.REGIONS.get(region) or cls()
        return cls.REGIONS[region]
 def Resource_create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
     props = cloudformation_json['Properties']
     region_name = props.get('Region') or aws_stack.get_region()
     path_part = props.get('PathPart')
     api_id = props.get('RestApiId')
     parent_id = props.get('ParentId')
     id = props.get('Id') or short_uid()
     return apigw_models.Resource(id, region_name, api_id, path_part, parent_id)
def get_table_schema(table_name):
    key = '%s/%s' % (aws_stack.get_region(), table_name)
    schema = SCHEMA_CACHE.get(key)
    if not schema:
        ddb_client = aws_stack.connect_to_service('dynamodb')
        schema = ddb_client.describe_table(TableName=table_name)
        SCHEMA_CACHE[key] = schema
    return schema
Beispiel #30
0
def get_domain_status(domain_name, deleted=False):
    region = ElasticsearchServiceBackend.get()
    status = region.es_domains.get(domain_name) or {}
    cluster_cfg = status.get("ElasticsearchClusterConfig") or {}
    default_cfg = DEFAULT_ES_CLUSTER_CONFIG
    endpoint = "%s://%s:%s" % (
        get_service_protocol(),
        config.HOSTNAME_EXTERNAL,
        config.PORT_ELASTICSEARCH,
    )
    return {
        "DomainStatus": {
            "ARN":
            "arn:aws:es:%s:%s:domain/%s" %
            (aws_stack.get_region(), TEST_AWS_ACCOUNT_ID, domain_name),
            "Created":
            status.get("Created", False),
            "Deleted":
            deleted,
            "DomainId":
            "%s/%s" % (TEST_AWS_ACCOUNT_ID, domain_name),
            "DomainName":
            domain_name,
            "ElasticsearchClusterConfig": {
                "DedicatedMasterCount":
                cluster_cfg.get("DedicatedMasterCount",
                                default_cfg["DedicatedMasterCount"]),
                "DedicatedMasterEnabled":
                cluster_cfg.get("DedicatedMasterEnabled",
                                default_cfg["DedicatedMasterEnabled"]),
                "DedicatedMasterType":
                cluster_cfg.get("DedicatedMasterType",
                                default_cfg["DedicatedMasterType"]),
                "InstanceCount":
                cluster_cfg.get("InstanceCount", default_cfg["InstanceCount"]),
                "InstanceType":
                cluster_cfg.get("InstanceType", default_cfg["InstanceType"]),
                "ZoneAwarenessEnabled":
                cluster_cfg.get("ZoneAwarenessEnabled",
                                default_cfg["ZoneAwarenessEnabled"]),
            },
            "ElasticsearchVersion":
            status.get("ElasticsearchVersion") or DEFAULT_ES_VERSION,
            "Endpoint":
            endpoint,
            "Processing":
            False,
            "EBSOptions": {
                "EBSEnabled": True,
                "VolumeType": "gp2",
                "VolumeSize": 10,
                "Iops": 0,
            },
            "CognitoOptions": {
                "Enabled": False
            },
        }
    }