def _create_cloud_watch_trigger_from_meta(topic_name, trigger_meta, region): required_parameters = ['target_rule'] validate_params(topic_name, trigger_meta, required_parameters) rule_name = trigger_meta['target_rule'] topic_arn = CONN.sns(region).get_topic_arn(topic_name) CONN.cw_events(region).add_rule_target(rule_name, topic_arn) CONN.sns(region).allow_service_invoke(topic_arn, 'events.amazonaws.com') _LOG.info('SNS topic %s subscribed to cloudwatch rule %s', topic_name, rule_name)
def _remove_sns_application(arn, config): region = arn.split(':')[3] application_name = config['resource_name'] try: CONN.sns(region).remove_application_by_arn(arn) _LOG.info('SNS application %s was removed.', application_name) except ClientError as e: exception_type = e.response['Error']['Code'] if exception_type == 'ResourceNotFoundException': _LOG.warn('SNS application %s is not found', application_name) else: raise e
def _create_sns_topic_from_meta(name, meta, region): arn = CONN.sns(region).get_topic_arn(name) if arn: _LOG.warn('{0} sns topic exists in region {1}.'.format(name, region)) return describe_sns(name=name, meta=meta, region=region, arn=arn) arn = CONN.sns(region).create_topic(name) event_sources = meta.get('event_sources') if event_sources: for trigger_meta in event_sources: trigger_type = trigger_meta['resource_type'] func = CREATE_TRIGGER[trigger_type] func(name, trigger_meta, region) _LOG.info('SNS topic %s in region %s created.', name, region) return describe_sns(name=name, meta=meta, region=region, arn=arn)
def _subscribe_lambda_to_sns_topic(lambda_arn, topic_name, region): topic_arn = CONN.sns(region).subscribe(lambda_arn, topic_name, 'lambda') try: CONN.lambda_conn().add_invocation_permission(lambda_arn, 'sns.amazonaws.com', source_arn=topic_arn) except ClientError: _LOG.warn('The final access policy size for lambda {} is reached. ' 'The limit is 20480 bytes. ' 'Invocation permission was not added.'.format(lambda_arn))
def describe_sns_from_meta(name, meta): new_region_args = create_args_for_multi_region([{ 'name': name, 'meta': meta }], ALL_REGIONS) responses = [] for arg in new_region_args: region = arg['region'] topic_arn = CONN.sns(region).get_topic_arn(name) if not topic_arn: continue response = CONN.sns(region).get_topic_attributes(topic_arn) if response: responses.append({'arn': topic_arn, 'response': response}) description = [] for topic in responses: description.append({ topic['arn']: build_description_obj(topic['response'], name, meta) }) return description
def _create_platform_application_from_meta(name, meta, region): required_parameters = ['platform', 'attributes'] validate_params(name, meta, required_parameters) arn = CONN.sns(region).get_platform_application(name) if arn: _LOG.warn('{0} SNS platform application exists in region {1}.'.format( name, region)) return describe_sns_application(name, meta, region, arn) platform = meta['platform'] atrbts = meta['attributes'] try: arn = CONN.sns(region).create_platform_application(name=name, platform=platform, attributes=atrbts) except ClientError as e: exception_type = e.response['Error']['Code'] if exception_type == 'InvalidParameterException': _LOG.warn('SNS application %s is already exist.', name) else: raise e _LOG.info('SNS platform application %s in region %s has been created.', name, region) return describe_sns_application(name, meta, region, arn)
def _create_alarm_from_meta(name, meta): """ Create alarm resource in AWS Cloud via meta description. :type name: str :type meta: dict """ required_parameters = [ 'metric_name', 'namespace', 'period', 'threshold', 'evaluation_periods', 'comparison_operator', 'statistic' ] validate_params(name, meta, required_parameters) if _CW_METRIC.is_alarm_exists(name): _LOG.warn('%s alarm exists.', name) return describe_alarm(name, meta) params = dict(alarm_name=name, metric_name=meta['metric_name'], namespace=meta['namespace'], period=meta['period'], evaluation_periods=meta['evaluation_periods'], threshold=meta['threshold'], statistic=meta['statistic'], comparison_operator=meta['comparison_operator']) sns_topics = meta.get('sns_topics') sns_topic_arns = [] if sns_topics: for each in sns_topics: arn = CONN.sns().get_topic_arn(each) sns_topic_arns.append(arn) if sns_topic_arns: params['alarm_actions'] = sns_topic_arns _CW_METRIC.put_metric_alarm(**params) _LOG.info('Created alarm {0}.'.format(name)) return describe_alarm(name, meta)
def _create_ebs_app_env_from_meta(name, meta): response = _EBS_CONN.describe_applications([name]) if response: _LOG.warn('%s EBS app exists.', name) return describe_ebs(name, meta, response[0]) env_settings = meta['env_settings'] topic_name = meta.get('notification_topic') # check topic exists if topic_name: topic_arn = CONN.sns().get_topic_arn(topic_name) if topic_arn: env_settings.append({ "OptionName": "Notification Topic ARN", "Namespace": "aws:elasticbeanstalk:sns:topics", "Value": "{0}".format(topic_arn) }) else: raise AssertionError('Cant find notification ' 'topic {0} for EBS.'.format(topic_name)) # check key pair exists key_pair_name = meta['ec2_key_pair'] if _EC2_CONN.if_key_pair_exists(key_pair_name): env_settings.append({ "OptionName": "KeyName", "ResourceName": "AWSEBAutoScalingLaunchConfiguration", "Namespace": "aws:cloudformation:template:resource:property", "Value": key_pair_name }) else: raise AssertionError('Specified key pair ' 'does not exist: {0}.'.format(key_pair_name)) # check ec2 role exists iam_role = meta['ec2_role'] if _IAM_CONN.check_if_role_exists(iam_role): env_settings.append({ "OptionName": "IamInstanceProfile", "ResourceName": "AWSEBAutoScalingLaunchConfiguration", "Namespace": "aws:autoscaling:launchconfiguration", "Value": iam_role }) else: raise AssertionError( 'Specified iam role does not exist: {0}.'.format(iam_role)) # check service role exists iam_role = meta['ebs_service_role'] if _IAM_CONN.check_if_role_exists(iam_role): env_settings.append({ "OptionName": "ServiceRole", "Namespace": "aws:elasticbeanstalk:environment", "Value": iam_role }) else: raise AssertionError('Specified iam role ' 'does not exist: {0}.'.format(iam_role)) image_id = meta.get('image_id') if image_id: env_settings.append({ "OptionName": "ImageId", "ResourceName": "AWSEBAutoScalingLaunchConfiguration", "Namespace": "aws:autoscaling:launchconfiguration", "Value": image_id }) else: _LOG.warn('Image id is not specified.') # check that desired solution stack exists stack = meta['stack'] available_stacks = _EBS_CONN.describe_available_solutions_stack_names() if stack not in available_stacks: raise AssertionError('No solution stack named {0} found.' ' Available:\n{1}'.format(stack, available_stacks)) vpc_id = next( (option for option in env_settings if option['OptionName'] == 'VPCId'), None) if not vpc_id: vpc_id = _EC2_CONN.get_default_vpc_id() _LOG.info('Default vpc id %s', vpc_id) if vpc_id: _LOG.debug('Will use vpc %s', vpc_id) subnets = _EC2_CONN.list_subnets(filters=[{ 'Name': 'vpc-id', 'Values': [vpc_id] }]) _LOG.debug('Found subnets for %s vpc: %s', vpc_id, subnets) if subnets: _LOG.info('Will attach default %s vpc to env', vpc_id) _add_subnets_info(env_settings, subnets, vpc_id) sg_id = _EC2_CONN.get_sg_id(group_name='default', vpc_id=vpc_id) if sg_id: _LOG.debug('Found default sg with id %s', sg_id) env_settings.append({ "OptionName": "SecurityGroups", "Namespace": "aws:autoscaling:launchconfiguration", "Value": sg_id }) env_name = meta["env_name"] + str(int(time())) start = time() end = start + 180 while end > time(): describe_app_result = _EBS_CONN.describe_applications([name]) if not describe_app_result: break # create APP response = _EBS_CONN.create_application(name) _LOG.info('Created EBS app %s.', name) # create ENV _EBS_CONN.create_environment(app_name=name, env_name=env_name, option_settings=env_settings, tier=meta['tier'], solution_stack_name=stack) key = meta[S3_PATH_NAME] if not CONN.s3().is_file_exists(CONFIG.deploy_target_bucket, key): raise AssertionError('Deployment package does not exist in ' '{0} bucket'.format(CONFIG.deploy_target_bucket)) # create VERSION version_label = env_name + str(uuid1()) _EBS_CONN.create_app_version(app_name=name, version_label=version_label, s3_bucket=CONFIG.deploy_target_bucket, s3_key=key) _LOG.debug('Waiting for beanstalk env %s', env_name) # wait for env creation start = time() status = {} end = start + 360 # end in 6 min while end > time(): status = _EBS_CONN.describe_environment_health(env_name=env_name, attr_names=['Status']) if status['Status'] == 'Ready': _LOG.info('Launching env took %s.', time() - start) break if status['Status'] != 'Ready': _LOG.error('Env status: %s. Failed to create env.', status) # deploy new app version _EBS_CONN.deploy_env_version(name, env_name, version_label) _LOG.info('Created environment for %s.', name) return describe_ebs(name, meta, response)
def describe_sns_application(name, meta, region, arn=None): if not arn: arn = CONN.sns(region).get_platform_application(name) response = CONN.sns(region).get_platform_application_attributes(arn) return {arn: build_description_obj(response, name, meta)}
def describe_sns(name, meta, region, arn=None): if not arn: arn = CONN.sns(region).get_topic_arn(name) response = CONN.sns(region).get_topic_attributes(arn) return {arn: build_description_obj(response, name, meta)}
def _subscribe_lambda_to_sns_topic(lambda_arn, topic_name, region): topic_arn = CONN.sns(region).subscribe(lambda_arn, topic_name, 'lambda') CONN.lambda_conn().add_invocation_permission(lambda_arn, 'sns.amazonaws.com', source_arn=topic_arn)