def _metric(resources, log_group_resource, metric_namespace, metric_name, log_metric): patterns = log_metric.get('patterns', {}) for pattern, value in patterns.items(): pattern_name = clean_name( pattern.replace('=', 'EQ').replace('!', 'NOT').replace('*', 'STAR')) resource_name = '%sMetricFilter%s' % (clean_name(metric_name), pattern_name) resources[resource_name] = { 'Type': 'AWS::Logs::MetricFilter', 'Properties': { 'LogGroupName': { 'Ref': log_group_resource }, 'FilterPattern': pattern, 'MetricTransformations': [{ 'MetricValue': value, 'MetricName': metric_name, 'MetricNamespace': metric_namespace }] } }
def _get_defaults(self, resource_name, metric): """ Source: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/rds-metricscollected.html """ rds_dimensions = { 'DBInstanceIdentifier': {'Ref': resource_name} } defaults = { 'namespace': 'AWS/RDS', 'dimensions': rds_dimensions, 'period': '3x60', 'statistic': 'Average' } metric_lower = clean_name(metric.lower()) if 'creditusage' in metric_lower: defaults['metricName'] = 'CPUCreditUsage' defaults['threshold'] = '>10' defaults['period'] = '3x300', elif 'credit' in metric_lower: defaults['metricName'] = 'CPUCreditBalance' defaults['threshold'] = '<5' defaults['period'] = '3x300', elif 'cpu' in metric_lower: defaults['metricName'] = 'CPUUtilization' defaults['threshold'] = '>50' elif 'read' in metric_lower: self._io_default('Read', metric_lower, defaults) elif 'write' in metric_lower: self._io_default('Write', metric_lower, defaults) return defaults
def _get_defaults(self, resource_name, metric): """ Source: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/rds-metricscollected.html """ rds_dimensions = {'DBInstanceIdentifier': {'Ref': resource_name}} defaults = { 'namespace': 'AWS/RDS', 'dimensions': rds_dimensions, 'period': '3x60', 'statistic': 'Average' } metric_lower = clean_name(metric.lower()) if 'creditusage' in metric_lower: defaults['metricName'] = 'CPUCreditUsage' defaults['threshold'] = '>10' defaults['period'] = '3x300', elif 'credit' in metric_lower: defaults['metricName'] = 'CPUCreditBalance' defaults['threshold'] = '<5' defaults['period'] = '3x300', elif 'cpu' in metric_lower: defaults['metricName'] = 'CPUUtilization' defaults['threshold'] = '>50' elif 'read' in metric_lower: self._io_default('Read', metric_lower, defaults) elif 'write' in metric_lower: self._io_default('Write', metric_lower, defaults) return defaults
def ingress_resource(name, **kwargs): kwargs.update(ingress_properties) resource_name = 'Ingress%s%s%s%sto%s' % ( sg_ref, clean_name(name), protocol, start_port, end_port) ingress_resources[resource_name] = { 'Type': 'AWS::EC2::SecurityGroupIngress', 'Properties': kwargs }
def ingress_resource(name, **kwargs): kwargs.update(ingress_properties) resource_name = 'Ingress%s%s%s%sto%s' % (sg_ref, clean_name(name), protocol, start_port, end_port) ingress_resources[resource_name] = { 'Type': 'AWS::EC2::SecurityGroupIngress', 'Properties': kwargs }
def _metric(resources, log_group_resource, metric_namespace, metric_name, log_metric): patterns = log_metric.get('patterns', {}) for pattern, value in patterns.items(): pattern_name = clean_name(pattern.replace('=', 'EQ') .replace('!', 'NOT') .replace('*', 'STAR')) resource_name = '%sMetricFilter%s' % (clean_name(metric_name), pattern_name) resources[resource_name] = { 'Type': 'AWS::Logs::MetricFilter', 'Properties': { 'LogGroupName': {'Ref': log_group_resource}, 'FilterPattern': pattern, 'MetricTransformations': [{ 'MetricValue': value, 'MetricName': metric_name, 'MetricNamespace': metric_namespace }] } }
def add_caches(self, app_region, template): if not app_region.caches: logger.debug('No caches specified.') return app_name = app_region.app.name orbit_name = app_region.app.orbit.name resources = template['Resources'] # Get UserData components: user_data = self._lc_user_data(resources) # Find the breadcrumb for the cache map: cache_intro = user_data.index('"caches":{') + 1 added_caches = 0 for name, params in app_region.caches.items(): # How many replicas? replicas = self._replicas(params) if replicas is None: logger.warning('Cache "%s" has invalid "replicas".', name) continue default_automatic_failover = replicas > 0 automatic_failover = params.get('automatic_failover', default_automatic_failover) instance_type = self._instance_type(params, automatic_failover) redis_version = params.get('version', REDIS_VERSION) cache_resource = 'Cache%s' % clean_name(name) cache_desc = '%s for %s in %s' % (name, app_name, orbit_name) logger.debug('Creating cache "%s".', name) # Security group for cache: cache_sg_resource = '%sSg' % cache_resource resources[cache_sg_resource] = { 'Type': 'AWS::EC2::SecurityGroup', 'Properties': { 'GroupDescription': cache_desc, 'VpcId': {'Ref': 'VpcId'}, 'SecurityGroupIngress': [ { 'IpProtocol': 'tcp', 'FromPort': REDIS_PORT, 'ToPort': REDIS_PORT, 'SourceSecurityGroupId': {'Ref': 'Sg'} } ] } } # Cache: resources[cache_resource] = { 'Type': 'AWS::ElastiCache::ReplicationGroup', 'Properties': { 'AutomaticFailoverEnabled': automatic_failover, 'AutoMinorVersionUpgrade': True, 'CacheNodeType': instance_type, 'CacheSubnetGroupName': {'Ref': 'PrivateCacheSubnetGroup'}, 'Engine': 'redis', 'EngineVersion': redis_version, 'NumCacheClusters': (1 + replicas), 'Port': REDIS_PORT, 'ReplicationGroupDescription': cache_desc, 'SecurityGroupIds': [{'Ref': cache_sg_resource}] } } # Inject a labeled reference to this cache replication group: # Read this backwards, and note the trailing comma. user_data.insert(cache_intro, ',') user_data.insert(cache_intro, '"') user_data.insert(cache_intro, {'Ref': cache_resource}) user_data.insert(cache_intro, '"%s":"' % name) added_caches += 1 self._add_client_resources(resources, app_region, 6379, params, cache_sg_resource) # If we added any caches, remove trailing comma: if added_caches: resources['CachePolicy'] = { 'DependsOn': 'Role', 'Type': 'AWS::IAM::Policy', 'Properties': { 'PolicyName': 'DescribeCacheEndpoints', 'Roles': [{'Ref': 'Role'}], 'PolicyDocument': { 'Statement': [{ 'Effect': 'Allow', 'Action': 'elasticache:DescribeReplicationGroups', 'Resource': '*' }] } } } del user_data[cache_intro + (4 * added_caches) - 1]
def add_endpoints(self, template, name, params): url = params.get('url') if not url: logger.warning('Slack endpoint %s is missing "url".', name) return ACTIONS_NONE resources = template['Resources'] # This may intersect with other decorators/features, that's fine. resources['LambdaRole'] = { 'Type': 'AWS::IAM::Role', 'Properties': { 'AssumeRolePolicyDocument': { 'Version': '2012-10-17', 'Statement': [{ 'Effect': 'Allow', 'Principal': { 'Service': ['lambda.amazonaws.com'] }, 'Action': ['sts:AssumeRole'] }] }, 'Path': '/', 'Policies': [{ 'PolicyName': 'root', 'PolicyDocument': { 'Version': '2012-10-17', 'Statement': [{ 'Effect': 'Allow', 'Action': [ 'logs:CreateLogGroup', 'logs:CreateLogStream', 'logs:PutLogEvents' ], 'Resource': 'arn:aws:logs:*:*:*' }] } }] } } # Upload to S3: slack_path = urlparse(url).path bucket, key = self._lambda_uploader.upload('sns-to-slack.js', { '__PATH__': slack_path }) topic_resource = self.resource_name(name) resource_base = clean_name(name) function_resource = 'EndpointSlack%sFunction' % resource_base resources[function_resource] = { 'Type': 'AWS::Lambda::Function', 'Properties': { 'Handler': 'index.handler', 'Role': {'Fn::GetAtt': ['LambdaRole', 'Arn']}, 'Timeout': '3', 'Code': { 'S3Bucket': bucket, 'S3Key': key }, 'Runtime': 'nodejs' } } resources['EndpointSlack%sPermission' % resource_base] = { 'Type': 'AWS::Lambda::Permission', 'DependsOn': function_resource, 'Properties': { 'FunctionName': { 'Fn::GetAtt': [ function_resource, 'Arn' ] }, 'Action': 'lambda:InvokeFunction', 'Principal': 'sns.amazonaws.com', 'SourceArn': {'Ref': topic_resource} } } resources[topic_resource] = { 'Type': 'AWS::SNS::Topic', 'Properties': { 'Subscription': [{ 'Endpoint': { 'Fn::GetAtt': [ function_resource, 'Arn' ] }, 'Protocol': 'lambda' }], 'DisplayName': {'Fn::Join': [ ' ', [ {'Ref': 'AWS::StackName'}, 'CloudWatchSlackAlarms'] ]} } } return ACTIONS_OK_ALARM
def resource_name(name): return 'EndpointSlack%sTopic' % clean_name(name)
def resource_name(name): return 'EndpointScale%sPolicy' % clean_name(name)
def add_endpoints(self, template, name, params): url = params.get('url') if not url: logger.warning('Slack endpoint %s is missing "url".', name) return ACTIONS_NONE resources = template['Resources'] # This may intersect with other decorators/features, that's fine. resources['LambdaRole'] = { 'Type': 'AWS::IAM::Role', 'Properties': { 'AssumeRolePolicyDocument': { 'Version': '2012-10-17', 'Statement': [{ 'Effect': 'Allow', 'Principal': { 'Service': ['lambda.amazonaws.com'] }, 'Action': ['sts:AssumeRole'] }] }, 'Path': '/', 'Policies': [{ 'PolicyName': 'root', 'PolicyDocument': { 'Version': '2012-10-17', 'Statement': [{ 'Effect': 'Allow', 'Action': [ 'logs:CreateLogGroup', 'logs:CreateLogStream', 'logs:PutLogEvents' ], 'Resource': 'arn:aws:logs:*:*:*' }] } }] } } # Upload to S3: slack_path = urlparse(url).path bucket, key = self._lambda_uploader.upload('sns-to-slack.js', {'__PATH__': slack_path}) topic_resource = self.resource_name(name) resource_base = clean_name(name) function_resource = 'EndpointSlack%sFunction' % resource_base resources[function_resource] = { 'Type': 'AWS::Lambda::Function', 'Properties': { 'Handler': 'index.handler', 'Role': { 'Fn::GetAtt': ['LambdaRole', 'Arn'] }, 'Timeout': '3', 'Code': { 'S3Bucket': bucket, 'S3Key': key }, 'Runtime': 'nodejs' } } resources['EndpointSlack%sPermission' % resource_base] = { 'Type': 'AWS::Lambda::Permission', 'DependsOn': function_resource, 'Properties': { 'FunctionName': { 'Fn::GetAtt': [function_resource, 'Arn'] }, 'Action': 'lambda:InvokeFunction', 'Principal': 'sns.amazonaws.com', 'SourceArn': { 'Ref': topic_resource } } } resources[topic_resource] = { 'Type': 'AWS::SNS::Topic', 'Properties': { 'Subscription': [{ 'Endpoint': { 'Fn::GetAtt': [function_resource, 'Arn'] }, 'Protocol': 'lambda' }], 'DisplayName': { 'Fn::Join': [ ' ', [{ 'Ref': 'AWS::StackName' }, 'CloudWatchSlackAlarms'] ] } } } return ACTIONS_OK_ALARM
def add_rds(self, app_region, template): if not app_region.databases: logger.debug('No databases specified.') return params = template['Parameters'] resources = template['Resources'] app = app_region.app app_name = app.name orbit_name = app.orbit.name user_data = self._lc_user_data(resources) db_intro = user_data.index('"databases":{') + 1 added_databases = 0 secret_params = {} iam_statements = [] for name, db_params in app_region.databases.items(): password_label = 'rds:%s' % name rds_resource = 'Db%s' % clean_name(name) db_global = db_params.get('global') if db_global and db_global != app_region.region: # If connecting to a global DB, query for stored password: encrypted, _ = \ self._passwords.get_password(app_region, password_label, generate=False) if not encrypted: continue rds_id = self._rds_id(app, db_global, rds_resource) if not rds_id: continue iam_statements.append({ 'Effect': 'Allow', 'Action': 'rds:DescribeDBInstances', 'Resource': {'Fn::Join': ['', [ 'arn:aws:rds:%s:' % db_global, {'Ref': 'AWS::AccountId'}, ':db:%s' % rds_id ]]} }) user_data.insert(db_intro, ',') user_data.insert(db_intro, ',"region": "%s"}' % db_global) user_data.insert(db_intro, '","password": %s' % encrypted.json()) user_data.insert(db_intro, rds_id) user_data.insert(db_intro, '"%s":{"name":"' % name) added_databases += 1 continue db_type = db_params.get('type', 'postgres') db_version = db_params.get('version', DEFAULT_VERSIONS.get(db_type)) if not db_version: logger.warning('Database "%s" has invalid "version".', name) continue db_port = db_params.get('port', DEFAULT_PORTS.get(db_type)) if not db_port: logger.warning('Database "%s" has invalid "port".', name) continue instance_type = self._instance_type(db_params) multi_az = bool_param(db_params, 'multi_az', False) encrypted = bool_param(db_params, 'encrypted', False) storage_size = db_params.get('size', '5') storage_type = db_params.get('storage_type', 'gp2') storage_iops = db_params.get('iops', None) db_username = db_params.get('username', name) public = (db_global == app_region.region or bool_param(db_params, 'public', False)) db_subnet_group = '%sRdsSubnetGroup' % (public and 'Public' or 'Private') rds_desc = '%s for %s in %s' % (name, app_name, orbit_name) logger.debug('Creating database "%s".', name) # Create a parameter for the database password: password_param = '%sPassword' % rds_resource params[password_param] = { 'Type': 'String', 'Description': 'Password for database %s' % name, 'NoEcho': True } # Security group for database: rds_sg_resource = '%sSg' % rds_resource resources[rds_sg_resource] = { 'Type': 'AWS::EC2::SecurityGroup', 'Properties': { 'GroupDescription': rds_desc, 'VpcId': {'Ref': 'VpcId'}, 'SecurityGroupIngress': [{ 'IpProtocol': 'tcp', 'FromPort': db_port, 'ToPort': db_port, 'SourceSecurityGroupId': {'Ref': 'Sg'} }] } } rds_params = { 'AllocatedStorage': storage_size, 'AllowMajorVersionUpgrade': False, 'AutoMinorVersionUpgrade': True, 'DBInstanceClass': instance_type, 'DBName': name, 'DBSubnetGroupName': {'Ref': db_subnet_group}, 'Engine': db_type, 'EngineVersion': db_version, 'MasterUsername': db_username, 'MasterUserPassword': {'Ref': password_param}, 'MultiAZ': multi_az, 'Port': db_port, 'PubliclyAccessible': public, 'StorageEncrypted': encrypted, 'StorageType': storage_type, 'VPCSecurityGroups': [{'Ref': rds_sg_resource}] } if storage_iops: rds_params['Iops'] = storage_iops if storage_type != 'io1': logger.warning('Overriding "storage_type" of "%s": ' + '"iops" requires io1.', name) rds_params['StorageType'] = 'io1' # Workaround for the instance_type default not supporting crypt # Other t2's fail, but at least that's the user's fault. if encrypted and instance_type == 'db.t2.micro': logger.warning('Overriding "instance_type" of "%s": ' + '"encrypted" requires t2.large".', name) rds_params['DBInstanceClass'] = 'db.t2.large' resources[rds_resource] = { 'Type': 'AWS::RDS::DBInstance', 'Properties': rds_params } encrypted, plaintext_func = \ self._passwords.get_password(app_region, password_label) # If hosting a global DB, store the password in each region: if db_global: region_clients = [] for other_region, other_app_region in app.regions.items(): if app_region.region == other_region: continue self._passwords.set_password(other_app_region, password_label, plaintext_func) region_clients.append(other_region) # Inject other regions into 'clients' list db_clients = db_params.get('clients') if db_clients is None: db_params['clients'] = region_clients else: db_clients += region_clients iam_statements.append({ 'Effect': 'Allow', 'Action': 'rds:DescribeDBInstances', 'Resource': {'Fn::Join': ['', [ 'arn:aws:rds:%s:' % app_region.region, {'Ref': 'AWS::AccountId'}, ':db:', {'Ref': rds_resource}, ]]} }) # Inject a labeled reference to this cache replication group: # Read this backwards, and note the trailing comma. user_data.insert(db_intro, ',') user_data.insert(db_intro, ',"region": "%s"}' % app_region.region) user_data.insert(db_intro, '","password": %s' % encrypted.json()) user_data.insert(db_intro, {'Ref': rds_resource}) user_data.insert(db_intro, '"%s":{"name":"' % name) added_databases += 1 self._add_client_resources(resources, app_region, db_port, db_params, rds_sg_resource) secret_params[password_param] = plaintext_func rds_alarms = db_params.get('alarms', {}) self._alarms.add_rds_alarms(app_region, resources, rds_alarms, rds_resource) if iam_statements: resources['RdsPolicy'] = { 'DependsOn': 'Role', 'Type': 'AWS::IAM::Policy', 'Properties': { 'PolicyName': 'DescribeRdsDatabases', 'Roles': [{'Ref': 'Role'}], 'PolicyDocument': { 'Statement': iam_statements } } } if added_databases: del user_data[db_intro + (5 * added_databases) - 1] return secret_params
def resource_name(name): return 'EndpointEmail%sTopic' % clean_name(name)
def _build_alarm(self, name, params, endpoint_resources, resources, custom_namespace=True, resource_name=''): metric = params.get('metric') if not metric: logger.warning('Trigger %s is missing "metric".', name) return None defaults = self._get_defaults(resource_name, metric) if not defaults: namespace = params.get('namespace') if not namespace or not custom_namespace: logger.warning('Trigger %s has invalid "metric".', name) return None defaults = { 'namespace': namespace, 'metricName': metric, 'dimensions': params.get('dimensions') } endpoints = self._get_endpoints(params) if not endpoints: logger.warning('Trigger %s is missing "endpoints".', name) return None alarm, insufficient, ok = self._get_endpoint_actions( endpoints, endpoint_resources, name) if not alarm and not insufficient and not ok: logger.warning('Trigger %s has no valid "endpoints".', name) return None threshold_raw = self._get_param(params, defaults, 'threshold') operator, thresh = self._parse_threshold(threshold_raw) if not operator or thresh is None: logger.warning('Trigger %s has invalid "threshold".', name) return None period_raw = self._get_param(params, defaults, 'period') periods, period = self._parse_period(period_raw) if not periods or not period: logger.warning('Trigger %s has invalid "period".', name) return None alarm_description = 'Alarm %s' % name alarm_stat = self._get_param(params, defaults, 'statistic') if not alarm_stat: logger.warning('Trigger %s has invalid "statistic".', name) return None alarm_properties = { 'ActionsEnabled': 'true', 'AlarmDescription': alarm_description, 'Namespace': defaults['namespace'], 'MetricName': defaults['metricName'], 'ComparisonOperator': operator, 'EvaluationPeriods': periods, 'Period': period, 'Statistic': alarm_stat, 'Threshold': thresh } if alarm: alarm_properties['AlarmActions'] = alarm if insufficient: alarm_properties['InsufficientDataActions'] = insufficient if ok: alarm_properties['OKActions'] = ok dimensions = defaults.get('dimensions') if dimensions: alarm_properties['Dimensions'] = [{ 'Name': k, 'Value': v } for k, v in dimensions.items()] trigger_name = 'Alarm%s%s' % (resource_name, clean_name(name)) resources[trigger_name] = { 'Type': 'AWS::CloudWatch::Alarm', 'Properties': alarm_properties }
def _build_alarm(self, name, params, endpoint_resources, resources, custom_namespace=True, resource_name=''): metric = params.get('metric') if not metric: logger.warning('Trigger %s is missing "metric".', name) return None defaults = self._get_defaults(resource_name, metric) if not defaults: namespace = params.get('namespace') if not namespace or not custom_namespace: logger.warning('Trigger %s has invalid "metric".', name) return None defaults = { 'namespace': namespace, 'metricName': metric, 'dimensions': params.get('dimensions') } endpoints = self._get_endpoints(params) if not endpoints: logger.warning('Trigger %s is missing "endpoints".', name) return None alarm, insufficient, ok = self._get_endpoint_actions(endpoints, endpoint_resources, name) if not alarm and not insufficient and not ok: logger.warning('Trigger %s has no valid "endpoints".', name) return None threshold_raw = self._get_param(params, defaults, 'threshold') operator, thresh = self._parse_threshold(threshold_raw) if not operator or thresh is None: logger.warning('Trigger %s has invalid "threshold".', name) return None period_raw = self._get_param(params, defaults, 'period') periods, period = self._parse_period(period_raw) if not periods or not period: logger.warning('Trigger %s has invalid "period".', name) return None alarm_description = 'Alarm %s' % name alarm_stat = self._get_param(params, defaults, 'statistic') if not alarm_stat: logger.warning('Trigger %s has invalid "statistic".', name) return None alarm_properties = { 'ActionsEnabled': 'true', 'AlarmDescription': alarm_description, 'Namespace': defaults['namespace'], 'MetricName': defaults['metricName'], 'ComparisonOperator': operator, 'EvaluationPeriods': periods, 'Period': period, 'Statistic': alarm_stat, 'Threshold': thresh } if alarm: alarm_properties['AlarmActions'] = alarm if insufficient: alarm_properties['InsufficientDataActions'] = insufficient if ok: alarm_properties['OKActions'] = ok dimensions = defaults.get('dimensions') if dimensions: alarm_properties['Dimensions'] = [ {'Name': k, 'Value': v} for k, v in dimensions.items()] trigger_name = 'Alarm%s%s' % (resource_name, clean_name(name)) resources[trigger_name] = { 'Type': 'AWS::CloudWatch::Alarm', 'Properties': alarm_properties }
def resource_name(name): return 'EndpointPagerDuty%sTopic' % clean_name(name)
def add_rds(self, app_region, template): if not app_region.databases: logger.debug('No databases specified.') return params = template['Parameters'] resources = template['Resources'] app = app_region.app app_name = app.name orbit_name = app.orbit.name user_data = self._lc_user_data(resources) db_intro = user_data.index('"databases":{') + 1 added_databases = 0 secret_params = {} iam_statements = [] for name, db_params in app_region.databases.items(): password_label = 'rds:%s' % name rds_resource = 'Db%s' % clean_name(name) db_global = db_params.get('global') if db_global and db_global != app_region.region: # If connecting to a global DB, query for stored password: encrypted, _ = \ self._passwords.get_password(app_region, password_label, generate=False) if not encrypted: continue rds_id = self._rds_id(app, db_global, rds_resource) if not rds_id: continue iam_statements.append({ 'Effect': 'Allow', 'Action': 'rds:DescribeDBInstances', 'Resource': { 'Fn::Join': [ '', [ 'arn:aws:rds:%s:' % db_global, { 'Ref': 'AWS::AccountId' }, ':db:%s' % rds_id ] ] } }) user_data.insert(db_intro, ',') user_data.insert(db_intro, ',"region": "%s"}' % db_global) user_data.insert(db_intro, '","password": %s' % encrypted.json()) user_data.insert(db_intro, rds_id) user_data.insert(db_intro, '"%s":{"name":"' % name) added_databases += 1 continue db_type = db_params.get('type', 'postgres') db_version = db_params.get('version', DEFAULT_VERSIONS.get(db_type)) if not db_version: logger.warning('Database "%s" has invalid "version".', name) continue db_port = db_params.get('port', DEFAULT_PORTS.get(db_type)) if not db_port: logger.warning('Database "%s" has invalid "port".', name) continue instance_type = self._instance_type(db_params) multi_az = bool_param(db_params, 'multi_az', False) encrypted = bool_param(db_params, 'encrypted', False) storage_size = db_params.get('size', '5') storage_type = db_params.get('storage_type', 'gp2') storage_iops = db_params.get('iops', None) db_username = db_params.get('username', name) public = (db_global == app_region.region or bool_param(db_params, 'public', False)) db_subnet_group = '%sRdsSubnetGroup' % (public and 'Public' or 'Private') rds_desc = '%s for %s in %s' % (name, app_name, orbit_name) logger.debug('Creating database "%s".', name) # Create a parameter for the database password: password_param = '%sPassword' % rds_resource params[password_param] = { 'Type': 'String', 'Description': 'Password for database %s' % name, 'NoEcho': True } # Security group for database: rds_sg_resource = '%sSg' % rds_resource resources[rds_sg_resource] = { 'Type': 'AWS::EC2::SecurityGroup', 'Properties': { 'GroupDescription': rds_desc, 'VpcId': { 'Ref': 'VpcId' }, 'SecurityGroupIngress': [{ 'IpProtocol': 'tcp', 'FromPort': db_port, 'ToPort': db_port, 'SourceSecurityGroupId': { 'Ref': 'Sg' } }] } } rds_params = { 'AllocatedStorage': storage_size, 'AllowMajorVersionUpgrade': False, 'AutoMinorVersionUpgrade': True, 'DBInstanceClass': instance_type, 'DBName': name, 'DBSubnetGroupName': { 'Ref': db_subnet_group }, 'Engine': db_type, 'EngineVersion': db_version, 'MasterUsername': db_username, 'MasterUserPassword': { 'Ref': password_param }, 'MultiAZ': multi_az, 'Port': db_port, 'PubliclyAccessible': public, 'StorageEncrypted': encrypted, 'StorageType': storage_type, 'VPCSecurityGroups': [{ 'Ref': rds_sg_resource }] } if storage_iops: rds_params['Iops'] = storage_iops if storage_type != 'io1': logger.warning( 'Overriding "storage_type" of "%s": ' + '"iops" requires io1.', name) rds_params['StorageType'] = 'io1' # Workaround for the instance_type default not supporting crypt # Other t2's fail, but at least that's the user's fault. if encrypted and instance_type == 'db.t2.micro': logger.warning( 'Overriding "instance_type" of "%s": ' + '"encrypted" requires t2.large".', name) rds_params['DBInstanceClass'] = 'db.t2.large' resources[rds_resource] = { 'Type': 'AWS::RDS::DBInstance', 'Properties': rds_params } encrypted, plaintext_func = \ self._passwords.get_password(app_region, password_label) # If hosting a global DB, store the password in each region: if db_global: region_clients = [] for other_region, other_app_region in app.regions.items(): if app_region.region == other_region: continue self._passwords.set_password(other_app_region, password_label, plaintext_func) region_clients.append(other_region) # Inject other regions into 'clients' list db_clients = db_params.get('clients') if db_clients is None: db_params['clients'] = region_clients else: db_clients += region_clients iam_statements.append({ 'Effect': 'Allow', 'Action': 'rds:DescribeDBInstances', 'Resource': { 'Fn::Join': [ '', [ 'arn:aws:rds:%s:' % app_region.region, { 'Ref': 'AWS::AccountId' }, ':db:', { 'Ref': rds_resource }, ] ] } }) # Inject a labeled reference to this cache replication group: # Read this backwards, and note the trailing comma. user_data.insert(db_intro, ',') user_data.insert(db_intro, ',"region": "%s"}' % app_region.region) user_data.insert(db_intro, '","password": %s' % encrypted.json()) user_data.insert(db_intro, {'Ref': rds_resource}) user_data.insert(db_intro, '"%s":{"name":"' % name) added_databases += 1 self._add_client_resources(resources, app_region, db_port, db_params, rds_sg_resource) secret_params[password_param] = plaintext_func rds_alarms = db_params.get('alarms', {}) self._alarms.add_rds_alarms(app_region, resources, rds_alarms, rds_resource) if iam_statements: resources['RdsPolicy'] = { 'DependsOn': 'Role', 'Type': 'AWS::IAM::Policy', 'Properties': { 'PolicyName': 'DescribeRdsDatabases', 'Roles': [{ 'Ref': 'Role' }], 'PolicyDocument': { 'Statement': iam_statements } } } if added_databases: del user_data[db_intro + (5 * added_databases) - 1] return secret_params