def create_s3deploy_properties(self, stage, action, info): base_name = stage.name + action.name s3_deploy_bucket_name_param = self.create_cfn_parameter( param_type='String', name=self.create_cfn_logical_id('S3DeployBucketName' + base_name), description='The name of the S3 bucket to deploy to.', value=action.bucket + '.name', ) s3_deploy_extract_param = self.create_cfn_parameter( param_type='String', name=self.create_cfn_logical_id('S3DeployExtract' + base_name), description= 'Boolean indicating whether the deployment artifact will be extracted.', value=action.extract, ) s3_deploy_object_key_param = 'AWS::NoValue' if action.object_key != None: s3_deploy_object_key_param = self.create_cfn_parameter( param_type='String', name=self.create_cfn_logical_id('S3DeployObjectKey' + base_name), description= 'S3 object key to store the deployment artifact as.', value=action.object_key, ) bucket = get_model_obj_from_ref(action.bucket, self.paco_ctx.project) account = get_model_obj_from_ref(bucket.account, self.paco_ctx.project) input_artifacts = [] for artifact in action.input_artifacts: stage_name, action_name = artifact.split('.') source_action = self.pipeline.stages[stage_name][action_name] input_name = '{}Artifact{}{}'.format( ACTION_MAP[source_action.type]['Name'], stage_name, action_name, ) input_artifacts.append( troposphere.codepipeline.InputArtifacts(Name=input_name)) return { 'Configuration': { 'BucketName': troposphere.Ref(s3_deploy_bucket_name_param), 'Extract': troposphere.Ref(s3_deploy_extract_param), 'ObjectKey': troposphere.Ref(s3_deploy_object_key_param), }, 'InputArtifacts': input_artifacts, 'RoleArn': troposphere.Ref(self.s3deploy_buckets[account.name]), #'RunOrder': troposphere.If('ManualApprovalIsEnabled', 2, 1) }
def init_stack_groups(self): for trail in self.cloudtrail.trails.values(): accounts = trail.get_accounts() # re-organize the list so that the s3_bucket_account is the first on the list # as the first account gets the S3 bucket s3_bucket_account = get_model_obj_from_ref(trail.s3_bucket_account, self.paco_ctx.project) ordered_accounts = [] for account in accounts: if s3_bucket_account.name == account.name: ordered_accounts.append(account) for account in accounts: if s3_bucket_account.name != account.name: ordered_accounts.append(account) for account in ordered_accounts: account_ctx = self.paco_ctx.get_account_context( account_name=account.name) cloudtrail_stack_grp = CloudTrailStackGroup( self.paco_ctx, account_ctx, self.cloudtrail, self, accounts, account_default_region=account.region, ) self.stack_grps.append(cloudtrail_stack_grp)
def init_stack_groups(self): for trail in self.cloudtrail.trails.values(): accounts = trail.get_accounts() # re-organize the list so that the s3_bucket_account is the first on the list # as the first account gets the S3 bucket s3_bucket_account = get_model_obj_from_ref(trail.s3_bucket_account, self.paco_ctx.project) ordered_accounts = [] for account in accounts: if s3_bucket_account.name == account.name: # S3 Bucket account is also the KMS Key account if that's enabled account._kms_key_account = False if trail.enable_kms_encryption == True: account._kms_key_account = True ordered_accounts.append(account) for account in accounts: if s3_bucket_account.name != account.name: account._kms_key_account = False ordered_accounts.append(account) for account in ordered_accounts: account_ctx = self.paco_ctx.get_account_context(account_name=account.name) cloudtrail_stack_grp = CloudTrailStackGroup( self.paco_ctx, account_ctx, self.cloudtrail, self, accounts, account_default_region=account.region, kms_key_account=account._kms_key_account, ) self.stack_grps.append(cloudtrail_stack_grp)
def get_account(self): """ Return the Account object that this resource is provisioned to """ env_reg = get_parent_by_interface(self, schemas.IEnvironmentRegion) project = get_parent_by_interface(self, schemas.IProject) return get_model_obj_from_ref(env_reg.network.aws_account, project)
def create_image_definitions_artifact_cache(self, hook, pipeline): "Create a cache id for the imageDefinitions service name" for action in pipeline.deploy.values(): if action.type == 'ECS.Deploy': service = get_model_obj_from_ref(action.service, self.paco_ctx.project) return service.name
def resolve_ref(self, ref): if ref.last_part == 'arn': region_container = get_model_obj_from_ref( 'paco.ref ' + '.'.join(ref.parts[:-2]), self.paco_ctx.project) return region_container.stack else: return None
def load_app_in_account_region( parent, account, region, app_name, app_config, project=None, monitor_config=None, read_file_path='not set', ): """ Load an Application from config into an AccountContainer and RegionContainer. Account can be a paco.ref but then the Paco Project must be supplied too. """ account_name = account if is_ref(account): account_name = get_model_obj_from_ref(account, project).name if account_name not in parent: account_cont = AccountContainer(account_name, parent) parent[account_name] = account_cont if region not in parent[account_name]: region_cont = RegionContainer(region, parent[account_name]) parent[account_name][region] = region_cont app = Application(app_name, parent[account_name][region]) parent[account_name][region][app_name] = app if project == None: project = get_parent_by_interface(parent) apply_attributes_from_config( app, app_config, lookup_config=monitor_config, read_file_path=read_file_path, resource_registry=project.resource_registry, ) return app
def __init__( self, stack, paco_ctx, sns_topic_list ): super().__init__( stack, paco_ctx, iam_capabilities=["CAPABILITY_NAMED_IAM"], ) account_ctx = stack.account_ctx aws_region = stack.aws_region self.set_aws_name('LambdaSNSSubs', self.resource_group_name, self.resource_name) awslambda = self.awslambda = self.stack.resource self.init_template('Lambda SNS Subscriptions') # if not enabled finish with only empty placeholder if not self.awslambda.is_enabled(): return # Permissions # SNS Topic Lambda permissions and subscription lambda_arn_param = self.create_cfn_parameter( name='LambdaFunctionArn', param_type='String', description='An SNS Topic ARN to grant permission to.', value=self.awslambda.paco_ref + '.arn' ) idx = 1 for sns_topic in sns_topic_list: # SNS Topic Arn parameters if is_ref(sns_topic): sns_topic_value = sns_topic + '.arn' sns_topic_obj = get_model_obj_from_ref(sns_topic, self.paco_ctx.project) region_name = sns_topic_obj.region_name else: sns_topic_value = sns_topic region_name = sns_topic.split(':')[3] param_name = 'SNSTopicArn%d' % idx self.create_cfn_parameter( name=param_name, param_type='String', description='An SNS Topic ARN to grant permission to.', value=sns_topic_value ) # SNS Topic subscription troposphere.sns.SubscriptionResource( title=param_name + 'Subscription', template=self.template, Endpoint=troposphere.Ref(lambda_arn_param), Protocol='lambda', TopicArn=troposphere.Ref(param_name), Region=region_name ) idx += 1
def add_record_set(self, account_ctx, region, resource, dns, record_set_type, enabled=True, resource_records=None, alias_dns_name=None, alias_hosted_zone_id=None, stack_group=None, async_stack_provision=False, config_ref=None): record_set_config = { 'enabled': enabled, 'dns': dns, 'alias_dns_name': alias_dns_name, 'alias_hosted_zone_id': alias_hosted_zone_id, 'record_set_type': record_set_type, 'resource_records': resource_records } if stack_group == None: # I don't believe this case happens anymore, and it doesn't # look like it does anything. raise PacoException(PacoErrorCode.Unknown) #record_set_stack_group = Route53RecordSetStackGroup( # self.paco_ctx, account_ctx, self #) #record_set_stack_group.add_new_stack( # region, # resource, # Route53RecordSet, # extra_context={'record_set_config': record_set_config, 'record_set_name': dns.domain_name} #) else: stack_account_ctx = account_ctx if is_ref(dns.hosted_zone): hosted_zone_obj = get_model_obj_from_ref( dns.hosted_zone, self.paco_ctx.project) stack_account_ctx = self.paco_ctx.get_account_context( account_ref=hosted_zone_obj.account) stack_orders = None if async_stack_provision == True: stack_orders = [StackOrder.PROVISION, StackOrder.WAITLAST] stack_group.add_new_stack(region, resource, Route53RecordSet, account_ctx=stack_account_ctx, stack_orders=stack_orders, extra_context={ 'record_set_config': record_set_config, 'record_set_name': dns.domain_name })
def account_name(self): account_cont = get_parent_by_interface(self, schemas.IAccountContainer) if account_cont != None: return account_cont.name env_region = get_parent_by_interface(self, schemas.IEnvironmentRegion) project = get_parent_by_interface(self) if env_region != None: return get_model_obj_from_ref(env_region.network.aws_account, project).name raise AttributeError('Could not determine account for {}'.format( self.name))
def init_s3_deploy_roles(self): "Create Role for every account with an S3.Deploy bucket to allow access to all S3 Bucket(s) for S3.Deploy Actions in that account" for account_ref in self.s3deploy_bucket_refs.keys(): account = get_model_obj_from_ref(account_ref, self.paco_ctx.project) bucket_arns = [] for ref in self.s3deploy_bucket_refs[account_ref].keys(): bucket_arn = self.paco_ctx.get_ref(ref + '.arn') bucket_arns.append(bucket_arn) bucket_arns.append(bucket_arn + '/*') role_dict = { 'assume_role_policy': {'effect': 'Allow', 'aws': [ self.pipeline_account_ctx.get_id() ]}, 'instance_profile': False, 'path': '/', 'role_name': 'S3Deploy', 'enabled': True, 'policies': [{ 'name': 'DeploymentPipeline', 'statement': [ {'effect': 'Allow', 'action': ['s3:*'], 'resource': bucket_arns, }, { 'effect': 'Allow', 'action': ['s3:*'], 'resource': [self.artifacts_bucket_meta['arn'], self.artifacts_bucket_meta['arn'] + '/*'] }, { 'effect': 'Allow', 'action': 'kms:*', 'resource': ["!Ref CMKArn"] }, ] }], } role_name = 's3deploydelegate_{}'.format(account.name) role = models.iam.Role(role_name, self.pipeline) role.apply_config(role_dict) iam_ctl = self.paco_ctx.get_controller('IAM') role_id = self.gen_iam_role_id(self.res_id, role_name) self.artifacts_bucket_policy_resource_arns.append("paco.sub '${%s}'" % (role.paco_ref + '.arn')) # IAM Roles Parameters iam_role_params = [{ 'key': 'CMKArn', 'value': self.pipeline.paco_ref + '.kms.arn', 'type': 'String', 'description': 'DeploymentPipeline KMS Key Arn' }] iam_ctl.add_role( account_ctx=self.paco_ctx.get_account_context(account_ref), region=self.aws_region, resource=self.resource, role=role, iam_role_id=role_id, stack_group=self.stack_group, stack_tags=self.stack_tags, template_params=iam_role_params, ) self.s3deploy_delegate_role_arns[account_ref] = iam_ctl.role_arn(role.paco_ref_parts)
def get_accounts(self): """ Resolve the locations field for all accounts. If locations is empty, then all accounts are returned. """ if self.locations == []: return project['accounts'].values() accounts = [] project = get_parent_by_interface(self, schemas.IProject) for location in self.locations: account = references.get_model_obj_from_ref(location.account, project) accounts.append(account) return accounts
def get_accounts(self): """ Resolve the CloudTrail.accounts field to a list of IAccount objects from the model. If the field is empty, then all accounts are returned. """ project = get_parent_by_interface(self, schemas.IProject) if self.accounts == []: accounts = project['accounts'].values() else: accounts = [] for account_ref in self.accounts: account = references.get_model_obj_from_ref(account_ref, project) accounts.append(account) return accounts
def get_account(self): """ Return the Account object that this resource is provisioned to """ region = get_parent_by_interface(self, schemas.IRegionContainer) project = get_parent_by_interface(self, schemas.IProject) # NetEnv accounts if schemas.IEnvironmentRegion.providedBy(region): return get_model_obj_from_ref(region.network.aws_account, project) # Service accounts else: account_cont = get_parent_by_interface(self, schemas.IAccountContainer) return project.accounts[account_cont.name]
def get_controller(self, controller_type, command=None, model_obj=None, model_paco_ref=None): """Gets a controller by name and calls .init() on it with any controller args""" controller_type = controller_type.lower() controller = None if model_obj == None and model_paco_ref != None: model_obj = references.get_model_obj_from_ref(model_paco_ref, self.project) if controller_type != 'service': if controller_type in self.controllers: controller = self.controllers[controller_type] if controller == None: controller = paco.controllers.klass[controller_type](self) self.controllers[controller_type] = controller else: service_name = model_obj.paco_ref_list[1] if service_name.lower() not in self.services: message = "Could not find Service: {}".format(service_name) raise StackException(PacoErrorCode.Unknown, message = message) controller = self.services[service_name.lower()] controller.init(command, model_obj) return controller
def gen_state_config(self, source): monitoring = self.resource.monitoring if monitoring != None and monitoring.is_enabled() == True: notifications = None if monitoring.notifications != None and len( monitoring.notifications.keys()) > 0: notifications = monitoring.notifications else: app_config = get_parent_by_interface(self.resource, schemas.IApplication) notifications = app_config.notifications source_obj = get_model_obj_from_ref(source, self.paco_ctx.project) state_config = {'type': source_obj.type, 'notifications': []} if notifications == None or len(notifications.keys()) <= 0: return state_config # Store the Notification state for this EventRule if self.resource.event_pattern == None: return state_config if source_obj.type == 'CodeBuild.Build': state_config[ 'project_name'] = source_obj._stack.template.get_project_name( ) state_config['notifications'] = {} for group_id in notifications.keys(): notify_group = notifications[group_id] state_config['notifications'][group_id] = {} state_config['notifications'][group_id][ 'severity'] = notify_group.severity state_config['notifications'][group_id]['groups'] = [] state_config['notifications'][group_id]['groups'].extend( notify_group.groups) state_config['notifications'][group_id]['slack_channels'] = [] state_config['notifications'][group_id][ 'slack_channels'].extend(notify_group.slack_channels) return state_config
def create_image_definitions_artifact(self, hook, pipeline): "Create an imageDefinitions file" for action in pipeline.source.values(): if action.type == 'ECR.Source': ecr_uri = f"{self.pipeline_account_ctx.get_id()}.dkr.ecr.{self.aws_region}.amazonaws.com/{action.repository}:{action.image_tag}" for action in pipeline.deploy.values(): if action.type == 'ECS.Deploy': service = get_model_obj_from_ref(action.service, self.paco_ctx.project) file_contents = f"""[ {{ "name": "{service.name}", "imageUri": "{ecr_uri}" }} ] """ # Upload to S3 s3_ctl = self.paco_ctx.get_controller('S3') bucket_name = self.artifacts_bucket_meta['name'] s3_key = self.pipeline._stack.get_name() + '-imagedef.zip' # create temp zip file orig_cwd = os.getcwd() work_path = pathlib.Path(self.paco_ctx.build_path) work_path = work_path / 'DeploymentPipeline' / self.app.paco_ref_parts / self.pipeline_account_ctx.get_name( ) work_path = work_path / self.aws_region / self.app.name / self.resource.group_name / self.resource.name / 'ImageDefinitions' zip_path = work_path / 'zip' pathlib.Path(zip_path).mkdir(parents=True, exist_ok=True) os.chdir(zip_path) image_def_path = zip_path / 'imagedefinitions.json' with open(image_def_path, "w") as output_fd: output_fd.write(file_contents) archive_path = work_path / 'imagedef' shutil.make_archive(archive_path, 'zip', zip_path) os.chdir(orig_cwd) s3_client = self.account_ctx.get_aws_client('s3') s3_client.upload_file(str(archive_path) + '.zip', bucket_name, s3_key)
def __init__(self, stack, paco_ctx): super().__init__( stack, paco_ctx, iam_capabilities=["CAPABILITY_NAMED_IAM"], ) eventsrule = stack.resource config_ref = eventsrule.paco_ref_parts self.set_aws_name('EventsRule', self.resource_group_name, self.resource_name) self.notification_groups = {} # Init a Troposphere template self.init_template('CloudWatch EventsRule') if eventsrule.is_enabled() == False: return # Parameters schedule_expression_param = None if eventsrule.schedule_expression: schedule_expression_param = self.create_cfn_parameter( param_type='String', name='ScheduleExpression', description='ScheduleExpression for the Event Rule.', value=eventsrule.schedule_expression, ) description_param = self.create_cfn_parameter( param_type='String', name='EventDescription', description='Description for the Event Rule.', value=eventsrule.description, ) # Monitoring Target monitoring = self.resource.monitoring if monitoring != None and monitoring.is_enabled() == True: notifications = None if monitoring.notifications != None and len( monitoring.notifications.keys()) > 0: notifications = monitoring.notifications else: app_config = get_parent_by_interface(self.resource, schemas.IApplication) notifications = app_config.notifications if notifications != None and len(notifications.keys()) > 0: # Create the CF Param for the SNS ARN we need to Publish to notify_param_cache = [] for notify_group_name in notifications.keys(): for sns_group_name in notifications[ notify_group_name].groups: notify_param = self.create_notification_param( sns_group_name) # Only append if the are unique if notify_param not in notify_param_cache: eventsrule.targets.append(notify_param) notify_param_cache.append(notify_param) # Targets targets = [] self.target_params = {} target_invocation_role_resource = None for index in range(0, len(eventsrule.targets)): target = eventsrule.targets[index] # Target Parameters target_name = 'Target{}'.format(index) # Target CFN Parameters # Check if we already have a parameter object target_policy_actions = None if isinstance(target, troposphere.Parameter): self.target_params[target_name + 'Arn'] = target else: self.target_params[target_name + 'Arn'] = self.create_cfn_parameter( param_type='String', name=target_name + 'Arn', description=target_name + ' Arn for the Events Rule.', value=target.target + '.arn', ) # If the target is a reference, get the target object from the model # to check what type of resource we need to configure for target_ref = Reference(target.target) if target_ref.parts[-1] == 'project' and target_ref.parts[ -3] == 'build': codebuild_target_ref = f'paco.ref {".".join(target_ref.parts[:-1])}' target_model_obj = get_model_obj_from_ref( codebuild_target_ref, self.paco_ctx.project) else: target_model_obj = get_model_obj_from_ref( target.target, self.paco_ctx.project) # Lambda Policy Actions if schemas.IDeploymentPipelineBuildCodeBuild.providedBy( target_model_obj): # CodeBuild Project target_policy_actions = [awacs.codebuild.StartBuild] elif schemas.ILambda.providedBy(target_model_obj): # Lambda Function target_policy_actions = [awacs.awslambda.InvokeFunction] self.target_params[target_name] = self.create_cfn_parameter( param_type='String', name=target_name, description=target_name + ' for the Event Rule.', value=target_name, ) # IAM Role Polcies by Resource type if target_policy_actions != None: # IAM Role Resources to allow Event to invoke Target target_invocation_role_resource = troposphere.iam.Role( 'TargetInvocationRole', AssumeRolePolicyDocument=Policy( Version='2012-10-17', Statement=[ Statement(Effect=Allow, Action=[awacs.sts.AssumeRole], Principal=Principal( 'Service', ['events.amazonaws.com'])) ], ), Policies=[ troposphere.iam.Policy( PolicyName="TargetInvocation", PolicyDocument=Policy( Version='2012-10-17', Statement=[ Statement( Effect=Allow, Action=target_policy_actions, Resource=[ troposphere.Ref( self.target_params[target_name + 'Arn']) ], ) ])) ], ) self.template.add_resource(target_invocation_role_resource) # Create Target CFN Resources cfn_export_dict = { 'Arn': troposphere.Ref(self.target_params[target_name + 'Arn']), 'Id': troposphere.Ref(self.target_params[target_name]) } if target_invocation_role_resource != None: cfn_export_dict['RoleArn'] = troposphere.GetAtt( target_invocation_role_resource, 'Arn') if hasattr(target, 'input_json') and target.input_json != None: cfn_export_dict['Input'] = target.input_json # Events Rule Targets targets.append(cfn_export_dict) # Events Rule Resource # The Name is needed so that a Lambda can be created and it's Lambda ARN output # can be supplied as a Parameter to this Stack and a Lambda Permission can be # made with the Lambda. Avoids circular dependencies. name = create_event_rule_name(eventsrule) if eventsrule.enabled_state: enabled_state = 'ENABLED' else: enabled_state = 'DISABLED' events_rule_dict = { 'Name': name, 'Description': troposphere.Ref(description_param), 'Targets': targets, 'State': enabled_state } if target_invocation_role_resource != None: events_rule_dict['RoleArn'] = troposphere.GetAtt( target_invocation_role_resource, 'Arn') if schedule_expression_param != None: events_rule_dict['ScheduleExpression'] = troposphere.Ref( schedule_expression_param) elif eventsrule.event_pattern != None: source_value_list = [] project_name_list = [] for pattern_source in eventsrule.event_pattern.source: if is_ref(pattern_source): source_obj = get_model_obj_from_ref( pattern_source, self.paco_ctx.project) if schemas.IDeploymentPipelineBuildCodeBuild.providedBy( source_obj): source_value_list.append('aws.codebuild') project_name_list.append( source_obj._stack.template.get_project_name()) else: raise InvalidEventsRuleEventPatternSource( pattern_source) else: source_value_list.append(pattern_source) if len(project_name_list) > 0: eventsrule.event_pattern.detail[ 'project-name'] = project_name_list event_pattern_dict = { 'source': source_value_list, 'detail-type': utils.obj_to_dict(eventsrule.event_pattern.detail_type), 'detail': utils.obj_to_dict(eventsrule.event_pattern.detail), } event_pattern_yaml = yaml.dump(event_pattern_dict) events_rule_dict['EventPattern'] = yaml.load(event_pattern_yaml) else: # Defaults to a CodePipeline events rule event_pattern_yaml = """ source: - aws.codepipeline detail-type: - 'CodePipeline Pipeline Execution State Change' detail: state: - STARTED """ events_rule_dict['EventPattern'] = yaml.load(event_pattern_yaml) event_rule_resource = troposphere.events.Rule.from_dict( 'EventRule', events_rule_dict) if target_invocation_role_resource != None: event_rule_resource.DependsOn = target_invocation_role_resource self.template.add_resource(event_rule_resource) # Outputs self.create_output( title="EventRuleId", value=troposphere.Ref(event_rule_resource), ref=config_ref + '.id', ) self.create_output( title="EventRuleArn", value=troposphere.GetAtt(event_rule_resource, "Arn"), ref=config_ref + '.arn', )
def __init__(self, stack, paco_ctx, role): super().__init__(stack, paco_ctx) self.set_aws_name('IoTAnalyticsPipeline', self.resource_group_name, self.resource_name) iotap = self.resource # Init Troposphere template self.init_template('IoT Analytics pipeline') if not iotap.is_enabled(): return # Role ARN for IoT role_arn_param = self.create_cfn_parameter( param_type='String', name='IoTRoleArn', description='IoT Topic Rule Service Role ARN', value=role.get_arn(), ) # Channel Resource iotchannel_logical_id = 'IoTAnalyticsChannel' cfn_export_dict = {} if iotap.channel_storage.bucket == None: channel_storage_dict = {'ServiceManagedS3': {}} cfn_export_dict['RetentionPeriod'] = convert_expire_to_cfn_dict( iotap.channel_storage.expire_events_after_days) else: channel_bucket_param = self.create_cfn_parameter( param_type='String', name='IoTAnalyticsChannelBucketName', description='IoT Analytics Channel storage bucket name', value=iotap.channel_storage.bucket + '.name', ) channel_storage_dict = { 'CustomerManagedS3': { 'Bucket': troposphere.Ref(channel_bucket_param), 'KeyPrefix': iotap.channel_storage.key_prefix, 'RoleArn': troposphere.Ref(role_arn_param), } } cfn_export_dict['ChannelStorage'] = channel_storage_dict iot_channel_resource = troposphere.iotanalytics.Channel.from_dict( iotchannel_logical_id, cfn_export_dict) self.template.add_resource(iot_channel_resource) self.create_output( title='ChannelName', description='IoT Analytics Channel name', value=troposphere.Ref(iot_channel_resource), ref=self.resource.paco_ref_parts + '.channel.name', ) # Datastore Resource iotchannel_logical_id = 'IoTAnalyticsDatastore' cfn_export_dict = {} if iotap.datastore_storage.bucket == None: datastore_storage_dict = {'ServiceManagedS3': {}} cfn_export_dict['RetentionPeriod'] = convert_expire_to_cfn_dict( iotap.datastore_storage.expire_events_after_days) else: datastore_bucket_param = self.create_cfn_parameter( param_type='String', name='IoTAnalyticsDatastoreBucketName', description='IoT Analytics Datastore storage bucket name', value=iotap.datastore_storage.bucket + '.name', ) datastore_storage_dict = { 'CustomerManagedS3': { 'Bucket': troposphere.Ref(datastore_bucket_param), 'KeyPrefix': iotap.datastore_storage.key_prefix, 'RoleArn': troposphere.Ref(role_arn_param), } } cfn_export_dict['DatastoreStorage'] = datastore_storage_dict if iotap.datastore_name != None: cfn_export_dict['DatastoreName'] = iotap.datastore_name iotap_datastore_resource = troposphere.iotanalytics.Datastore.from_dict( iotchannel_logical_id, cfn_export_dict) iotap_datastore_resource.DependsOn = iot_channel_resource self.template.add_resource(iotap_datastore_resource) self.create_output( title='DatastoreName', description='IoT Analytics Datastore name', value=troposphere.Ref(iotap_datastore_resource), ref=self.resource.paco_ref_parts + '.datastore.name', ) # Pipeline Resource iotpipeline_logical_id = 'IoTAnalyticsPipeline' cfn_export_dict = {} cfn_export_dict['PipelineActivities'] = [] idx = 0 activity_list = list(iotap.pipeline_activities.values()) # start with a Channel activity if len(activity_list) == 0: next_name = "DatastoreActivity" else: next_name = activity_list[idx].name + "Activity" cfn_export_dict['PipelineActivities'].append({ 'Channel': { 'Name': "ChannelActivity", 'ChannelName': troposphere.Ref(iot_channel_resource), 'Next': next_name, } }) for activity in iotap.pipeline_activities.values(): if len(activity_list) == idx + 1: next_name = 'DatastoreActivity' else: next_name = activity_list[idx + 1].name + "Activity" if activity.activity_type == 'lambda': lambda_param = self.create_cfn_parameter( param_type='String', name=f'LambdaFunction{idx}', description=f'IoT Analytics Lambda for Activity {idx}', value=activity.function + '.arn', ) if not activity.batch_size: activity.batch_size = 1 activity_dict = { 'Lambda': { 'LambdaName': troposphere.Join('', [ '', troposphere.Select( 6, troposphere.Split( ':', troposphere.Ref(lambda_param))) ]), 'BatchSize': activity.batch_size, 'Name': activity.name + "Activity", 'Next': next_name, } } elif activity.activity_type == 'add_attributes': activity_dict = { 'AddAttributes': { 'Name': activity.name + "Activity", 'Attributes': activity.attributes, 'Next': next_name, } } elif activity.activity_type == 'remove_attributes': activity_dict = { 'RemoveAttributes': { 'Name': activity.name + "Activity", 'Attributes': activity.attribute_list, 'Next': next_name, } } elif activity.activity_type == 'select_attributes': activity_dict = { 'SelectAttributes': { 'Name': activity.name + "Activity", 'Attributes': activity.attribute_list, 'Next': next_name, } } elif activity.activity_type == 'filter': activity_dict = { 'Filter': { 'Name': activity.name + "Activity", 'Filter': activity.filter, 'Next': next_name, } } elif activity.activity_type == 'math': activity_dict = { 'Math': { 'Name': activity.name + "Activity", 'Attribute': activity.attribute, 'Math': activity.math, 'Next': next_name, } } elif activity.activity_type == 'device_registry_enrich': activity_dict = { 'DeviceRegistryEnrich': { 'Name': activity.name + "Activity", 'Attribute': activity.attribute, 'ThingName': activity.thing_name, 'Next': next_name, } } elif activity.activity_type == 'device_shadow_enrich': activity_dict = { 'DeviceShadowEnrich': { 'Name': activity.name + "Activity", 'Attribute': activity.attribute, 'ThingName': activity.thing_name, 'Next': next_name, } } cfn_export_dict['PipelineActivities'].append(activity_dict) idx += 1 # finish with a Datastore activity cfn_export_dict['PipelineActivities'].append({ 'Datastore': { 'Name': "DatastoreActivity", 'DatastoreName': troposphere.Ref(iotap_datastore_resource), } }) iotpipeline_resource = troposphere.iotanalytics.Pipeline.from_dict( iotpipeline_logical_id, cfn_export_dict, ) iotpipeline_resource.DependsOn = [ iot_channel_resource, iotap_datastore_resource ] self.template.add_resource(iotpipeline_resource) self.create_output( title='PipelineName', description='IoT Analytics Pipeline name', value=troposphere.Ref(iotpipeline_resource), ref=self.resource.paco_ref_parts + '.pipeline.name', ) # Datasets for dataset in iotap.datasets.values(): iotdataset_logical_id = self.create_cfn_logical_id( f'IoTDataset{dataset.name}') cfn_export_dict = {} cfn_export_dict['Actions'] = [] if dataset.query_action != None: cfn_export_dict['Actions'].append({ 'ActionName': dataset.name, 'QueryAction': { 'Filters': dataset.query_action.filters, 'SqlQuery': dataset.query_action.sql_query, } }) else: # ToDo: container_action pass cfn_export_dict['ContentDeliveryRules'] = [] for delivery_rule in dataset.content_delivery_rules.values(): delivery_dict = { 'Destination': {}, # 'EntryName': delivery_rule.name, } if delivery_rule.s3_destination != None: bucket = get_model_obj_from_ref( delivery_rule.s3_destination.bucket, self.paco_ctx.project) delivery_dict['Destination'][ 'S3DestinationConfiguration'] = { 'Bucket': bucket.get_aws_name(), 'Key': delivery_rule.s3_destination.key, 'RoleArn': troposphere.Ref(role_arn_param), } cfn_export_dict['ContentDeliveryRules'].append(delivery_dict) cfn_export_dict['RetentionPeriod'] = convert_expire_to_cfn_dict( dataset.expire_events_after_days) if dataset.version_history != None: if dataset.version_history == 0: cfn_export_dict['VersioningConfiguration'] = { 'Unlimited': True } else: cfn_export_dict['VersioningConfiguration'] = { 'MaxVersions': dataset.version_history, 'Unlimited': False } iot_dataset_resource = troposphere.iotanalytics.Dataset.from_dict( iotdataset_logical_id, cfn_export_dict) iot_dataset_resource.DependsOn = iotap_datastore_resource self.template.add_resource(iot_dataset_resource) self.create_output( title=f'{dataset.name}DatasetName', description=f'IoT Analytics Dataset {dataset.name}', value=troposphere.Ref(iot_dataset_resource), ref=self.resource.paco_ref_parts + '.dataset.' + dataset.name + '.name', )
def init_lb(self, aws_name, template_title): self.set_aws_name(aws_name, self.resource_group_name, self.lb_config.name) self.network = self.lb_config.env_region_obj.network # Init Troposphere template self.init_template(template_title) if not self.lb_config.is_enabled(): return self.set_template() # Parameters if self.lb_config.is_enabled(): lb_enable = 'true' else: lb_enable = 'false' lb_is_enabled_param = self.create_cfn_parameter( param_type='String', name='LBEnabled', description='Enable the LB in this template', value=lb_enable) vpc_stack = self.env_ctx.get_vpc_stack() vpc_param = self.create_cfn_parameter(param_type='String', name='VPC', description='VPC ID', value=StackOutputParam( 'VPC', vpc_stack, 'VPC', self)) lb_region = self.env_ctx.region if self.lb_config.type == 'LBApplication': lb_type = 'alb' else: lb_type = 'nlb' lb_hosted_zone_id_param = self.create_cfn_parameter( param_type='String', name='LBHostedZoneId', description='The Regonal AWS Route53 Hosted Zone ID', value=self.lb_hosted_zone_id(lb_type, lb_region)) # 32 Characters max # <proj>-<env>-<app>-<lb.name> # TODO: Limit each name item to 7 chars # Name collision risk:, if unique identifying characrtes are truncated # - Add a hash? # - Check for duplicates with validating template load_balancer_name = self.create_resource_name_join( name_list=[ self.env_ctx.netenv.name, self.env_ctx.env.name, self.app_id, self.resource_group_name, self.lb_config.name ], separator='', camel_case=True, filter_id='EC2.ElasticLoadBalancingV2.LoadBalancer.Name') load_balancer_name_param = self.create_cfn_parameter( param_type='String', name='LoadBalancerName', description='The name of the load balancer', value=load_balancer_name) scheme_param = self.create_cfn_parameter( param_type='String', min_length=1, max_length=128, name='Scheme', description= 'Specify internal to create an internal load balancer with a DNS name that resolves to private IP addresses or internet-facing to create a load balancer with a publicly resolvable DNS name, which resolves to public IP addresses.', value=self.lb_config.scheme) # Segment SubnetList is a Segment stack Output based on availability zones subnet_list_ref = self.network.vpc.segments[ self.lb_config.segment].paco_ref + '.subnet_id_list' subnet_list_param = self.create_cfn_parameter( param_type='List<AWS::EC2::Subnet::Id>', name='SubnetList', description= 'A list of subnets where the LBs instances will be provisioned', value=subnet_list_ref, ) # Security Groups if self.lb_config.type == 'LBApplication': sg_group_list = [] sg_group_list.extend(self.lb_config.security_groups) for hook in SECURITY_GROUPS_HOOKS: env_config = get_parent_by_interface(self.lb_config, IEnvironmentRegion) vpc_id = self.paco_ctx.get_ref( f'{env_config.network.vpc.paco_ref}.id').get_outputs_value( 'VPC') hook_sg_list = hook(self.lb_config, self.account_ctx, self.aws_region, vpc_id) sg_group_list.extend(hook_sg_list) security_group_list_param = self.create_cfn_ref_list_param( param_type='List<AWS::EC2::SecurityGroup::Id>', name='SecurityGroupList', description='A List of security groups to attach to the LB', value=sg_group_list, ref_attribute='id') idle_timeout_param = self.create_cfn_parameter( param_type='String', name='IdleTimeoutSecs', description='The idle timeout value, in seconds.', value=self.lb_config.idle_timeout_secs) # Conditions self.template.add_condition( "LBIsEnabled", troposphere.Equals(troposphere.Ref(lb_is_enabled_param), "true")) # Resources # LoadBalancer load_balancer_logical_id = 'LoadBalancer' cfn_export_dict = {} cfn_export_dict['Name'] = troposphere.Ref(load_balancer_name_param) if self.lb_config.type == 'LBApplication': lb_v2_type = 'application' else: lb_v2_type = 'network' cfn_export_dict['Type'] = lb_v2_type cfn_export_dict['Scheme'] = troposphere.Ref(scheme_param) cfn_export_dict['Subnets'] = troposphere.Ref(subnet_list_param) # Application Load Balancer Logic lb_attributes = [] if self.lb_config.type == 'LBApplication': cfn_export_dict['SecurityGroups'] = troposphere.Ref( security_group_list_param) lb_attributes.append({ 'Key': 'idle_timeout.timeout_seconds', 'Value': troposphere.Ref(idle_timeout_param) }) if self.lb_config.enable_access_logs: # ToDo: automatically create a bucket when access_logs_bucket is not set s3bucket = get_model_obj_from_ref( self.lb_config.access_logs_bucket, self.paco_ctx.project) lb_attributes.append({ 'Key': 'access_logs.s3.enabled', 'Value': 'true' }) lb_attributes.append({ 'Key': 'access_logs.s3.bucket', 'Value': s3bucket.get_bucket_name() }) if self.lb_config.access_logs_prefix: lb_attributes.append({ 'Key': 'access_logs.s3.prefix', 'Value': self.lb_config.access_logs_prefix }) cfn_export_dict['LoadBalancerAttributes'] = lb_attributes lb_resource = troposphere.elasticloadbalancingv2.LoadBalancer.from_dict( load_balancer_logical_id, cfn_export_dict) lb_resource.Condition = "LBIsEnabled" self.template.add_resource(lb_resource) # Target Groups for target_group_name, target_group in sorted( self.lb_config.target_groups.items()): if target_group.is_enabled() == False: continue target_group_id = self.create_cfn_logical_id(target_group_name) target_group_logical_id = 'TargetGroup' + target_group_id cfn_export_dict = {} if self.paco_ctx.legacy_flag( 'target_group_name_2019_10_29') == True: name = self.create_resource_name_join( name_list=[load_balancer_name, target_group_id], separator='', camel_case=True, hash_long_names=True, filter_id='EC2.ElasticLoadBalancingV2.TargetGroup.Name', ) else: name = troposphere.Ref('AWS::NoValue') cfn_export_dict['Name'] = name cfn_export_dict[ 'HealthCheckIntervalSeconds'] = target_group.health_check_interval cfn_export_dict[ 'HealthCheckTimeoutSeconds'] = target_group.health_check_timeout cfn_export_dict[ 'HealthyThresholdCount'] = target_group.healthy_threshold cfn_export_dict[ 'HealthCheckProtocol'] = target_group.health_check_protocol # HTTP Health Checks if target_group.health_check_protocol in ['HTTP', 'HTTPS']: cfn_export_dict[ 'HealthCheckPath'] = target_group.health_check_path cfn_export_dict['Matcher'] = { 'HttpCode': target_group.health_check_http_code } if target_group.health_check_port != 'traffic-port': cfn_export_dict[ 'HealthCheckPort'] = target_group.health_check_port if target_group.port != None: cfn_export_dict['Port'] = target_group.port cfn_export_dict['Protocol'] = target_group.protocol cfn_export_dict[ 'UnhealthyThresholdCount'] = target_group.unhealthy_threshold cfn_export_dict['TargetGroupAttributes'] = [{ 'Key': 'deregistration_delay.timeout_seconds', 'Value': str(target_group.connection_drain_timeout) }] # TODO: Preserve Client IP # if self.lb_config.type == 'LBNetwork': # cfn_export_dict['TargetGroupAttributes'].append({ # 'Key': 'preserve_client_ip.enabled', 'Value': 'false' # }) cfn_export_dict['VpcId'] = troposphere.Ref(vpc_param) if target_group.target_type != 'instance': cfn_export_dict['TargetType'] = target_group.target_type target_group_resource = troposphere.elasticloadbalancingv2.TargetGroup.from_dict( target_group_logical_id, cfn_export_dict) self.template.add_resource(target_group_resource) # Target Group Outputs target_group_ref = '.'.join([ self.lb_config.paco_ref_parts, 'target_groups', target_group_name ]) target_group_arn_ref = '.'.join([target_group_ref, 'arn']) self.create_output(title='TargetGroupArn' + target_group_id, value=troposphere.Ref(target_group_resource), ref=target_group_arn_ref) target_group_name_ref = '.'.join([target_group_ref, 'name']) self.create_output(title='TargetGroupName' + target_group_id, value=troposphere.GetAtt( target_group_resource, 'TargetGroupName'), ref=target_group_name_ref) self.create_output(title='TargetGroupFullName' + target_group_id, value=troposphere.GetAtt( target_group_resource, 'TargetGroupFullName'), ref=target_group_ref + '.fullname') # Listeners for listener_name, listener in self.lb_config.listeners.items(): logical_listener_name = self.create_cfn_logical_id('Listener' + listener_name) cfn_export_dict = listener.cfn_export_dict # Listener - Default Actions if listener.redirect != None: action = { 'Type': 'redirect', 'RedirectConfig': { 'Port': str(listener.redirect.port), 'Protocol': listener.redirect.protocol, 'StatusCode': 'HTTP_301' } } else: target_group_id = self.create_cfn_logical_id( listener.target_group) action = { 'Type': 'forward', 'TargetGroupArn': troposphere.Ref('TargetGroup' + target_group_id) } cfn_export_dict['DefaultActions'] = [action] cfn_export_dict['LoadBalancerArn'] = troposphere.Ref(lb_resource) # Listener - SSL Certificates ssl_cert_param_obj_list = [] unique_listener_cert_name = "" if len(listener.ssl_certificates ) > 0 and self.lb_config.is_enabled(): if listener.ssl_policy != '': cfn_export_dict['SslPolicy'] = listener.ssl_policy cfn_export_dict['Certificates'] = [] for ssl_cert_idx in range(0, len(listener.ssl_certificates)): ssl_cert_param = self.create_cfn_parameter( param_type='String', name='SSLCertificateIdL%sC%d' % (listener_name, ssl_cert_idx), description= 'The Arn of the SSL Certificate to associate with this Load Balancer', value=listener.ssl_certificates[ssl_cert_idx] + ".arn") if ssl_cert_idx == 0: cfn_export_dict['Certificates'] = [{ 'CertificateArn': troposphere.Ref(ssl_cert_param) }] else: unique_listener_cert_name = f'{unique_listener_cert_name}{listener.ssl_certificates[ssl_cert_idx]}' ssl_cert_param_obj_list.append( troposphere.elasticloadbalancingv2.Certificate( CertificateArn=troposphere.Ref( ssl_cert_param))) listener_resource = troposphere.elasticloadbalancingv2.Listener.from_dict( logical_listener_name, cfn_export_dict) self.template.add_resource(listener_resource) # ListenerCertificates if len(ssl_cert_param_obj_list) > 0: unique_listener_cert_name = utils.md5sum( str_data=unique_listener_cert_name) logical_listener_cert_name = self.create_cfn_logical_id_join([ logical_listener_name, 'Certificate', unique_listener_cert_name ]) troposphere.elasticloadbalancingv2.ListenerCertificate( title=logical_listener_cert_name, template=self.template, Certificates=ssl_cert_param_obj_list, ListenerArn=troposphere.Ref(listener_resource)) # Listener - Rules if listener.rules != None: for rule_name, rule in listener.rules.items(): if rule.enabled == False: continue logical_rule_name = self.create_cfn_logical_id(rule_name) cfn_export_dict = {} rule_conditions = [] if rule.rule_type == "forward": logical_target_group_id = self.create_cfn_logical_id( 'TargetGroup' + rule.target_group) cfn_export_dict['Actions'] = [{ 'Type': 'forward', 'TargetGroupArn': troposphere.Ref(logical_target_group_id) }] if rule.host != None: rule_conditions.append({ 'Field': 'host-header', 'Values': [rule.host] }) if len(rule.path_pattern) > 0: rule_conditions.append({ 'Field': 'path-pattern', 'Values': rule.path_pattern }) elif rule.rule_type == "redirect": redirect_config = { 'Type': 'redirect', 'RedirectConfig': { 'Host': rule.redirect_host, 'StatusCode': 'HTTP_301' } } if rule.redirect_path != None: redirect_config['RedirectConfig'][ 'Path'] = rule.redirect_path cfn_export_dict['Actions'] = [redirect_config] rule_conditions.append({ 'Field': 'host-header', 'Values': [rule.host] }) if len(rule.path_pattern) > 0: rule_conditions.append({ 'Field': 'path-pattern', 'Values': rule.path_pattern }) cfn_export_dict['Conditions'] = rule_conditions cfn_export_dict['ListenerArn'] = troposphere.Ref( logical_listener_name) cfn_export_dict['Priority'] = rule.priority logical_listener_rule_name = self.create_cfn_logical_id_join( str_list=[ logical_listener_name, 'Rule', logical_rule_name ]) listener_rule_resource = troposphere.elasticloadbalancingv2.ListenerRule.from_dict( logical_listener_rule_name, cfn_export_dict) listener_rule_resource.Condition = "LBIsEnabled" self.template.add_resource(listener_rule_resource) # Record Sets if self.paco_ctx.legacy_flag('route53_record_set_2019_10_16'): record_set_index = 0 for lb_dns in self.lb_config.dns: if self.lb_config.is_dns_enabled() == True: hosted_zone_param = self.create_cfn_parameter( param_type='String', description='LB DNS Hosted Zone ID', name='HostedZoneID%d' % (record_set_index), value=lb_dns.hosted_zone + '.id') cfn_export_dict = {} cfn_export_dict['HostedZoneId'] = troposphere.Ref( hosted_zone_param) cfn_export_dict['Name'] = lb_dns.domain_name cfn_export_dict['Type'] = 'A' cfn_export_dict['AliasTarget'] = { 'DNSName': troposphere.GetAtt(lb_resource, 'DNSName'), 'HostedZoneId': troposphere.GetAtt(lb_resource, 'CanonicalHostedZoneID') } record_set_resource = troposphere.route53.RecordSet.from_dict( 'RecordSet' + str(record_set_index), cfn_export_dict) record_set_resource.Condition = "LBIsEnabled" self.template.add_resource(record_set_resource) record_set_index += 1 if self.enabled == True: self.create_output(title='LoadBalancerArn', value=troposphere.Ref(lb_resource), ref=self.lb_config.paco_ref_parts + '.arn') self.create_output(title='LoadBalancerName', value=troposphere.GetAtt( lb_resource, 'LoadBalancerName'), ref=self.lb_config.paco_ref_parts + '.name') self.create_output(title='LoadBalancerFullName', value=troposphere.GetAtt( lb_resource, 'LoadBalancerFullName'), ref=self.lb_config.paco_ref_parts + '.fullname') self.create_output( title='LoadBalancerCanonicalHostedZoneID', value=troposphere.GetAtt(lb_resource, 'CanonicalHostedZoneID'), ref=self.lb_config.paco_ref_parts + '.canonicalhostedzoneid') self.create_output( title='LoadBalancerDNSName', value=troposphere.GetAtt(lb_resource, 'DNSName'), ref=self.lb_config.paco_ref_parts + '.dnsname', ) if self.paco_ctx.legacy_flag( 'route53_record_set_2019_10_16') == False: route53_ctl = self.paco_ctx.get_controller('route53') for lb_dns in self.lb_config.dns: if self.lb_config.is_dns_enabled() == True: alias_dns_ref = self.lb_config.paco_ref + '.dnsname' alias_hosted_zone_ref = self.lb_config.paco_ref + '.canonicalhostedzoneid' hosted_zone = get_model_obj_from_ref( lb_dns.hosted_zone, self.paco_ctx.project) account_ctx = self.paco_ctx.get_account_context( account_ref=hosted_zone.account) route53_ctl.add_record_set( account_ctx, self.aws_region, self.lb_config, enabled=self.lb_config.is_enabled(), dns=lb_dns, record_set_type='Alias', alias_dns_name=alias_dns_ref, alias_hosted_zone_id=alias_hosted_zone_ref, stack_group=self.stack.stack_group, async_stack_provision=True, config_ref=self.lb_config.paco_ref_parts + '.dns')
def init_resource(self): # Create a Role for the IoT Topic Rule to assume role_name = "iot_analytics" # add needed Statements to the Policy statements = [] # pipeline buckets bucket_refs = {} if self.resource.channel_storage.bucket != None: bucket_refs[self.resource.channel_storage.bucket] = None if self.resource.datastore_storage.bucket != None: bucket_refs[self.resource.datastore_storage.bucket] = None for dataset in self.resource.datasets.values(): for delivery_rule in dataset.content_delivery_rules.values(): if delivery_rule.s3_destination != None: bucket_refs[delivery_rule.s3_destination.bucket] = None for bucket_ref in bucket_refs.keys(): bucket = get_model_obj_from_ref(bucket_ref, self.paco_ctx.project) statements.append({ 'effect': 'Allow', 'action': [ "s3:GetBucketLocation", "s3:GetObject", "s3:ListBucket", "s3:ListBucketMultipartUploads", "s3:ListMultipartUploadParts", "s3:AbortMultipartUpload", "s3:PutObject", "s3:DeleteObject", ], 'resource': [ f"arn:aws:s3:::" + bucket.get_aws_name(), f"arn:aws:s3:::" + bucket.get_aws_name() + "/*" ], }) role_dict = { 'enabled': self.resource.is_enabled(), 'path': '/', 'role_name': "IoTAnalytics", 'assume_role_policy': { 'effect': 'Allow', 'service': ['iotanalytics.amazonaws.com'] }, } if len(statements) > 0: role_dict['policies'] = [{ 'name': 'IoTTopicRule', 'statement': statements }] role = Role(role_name, self.resource) role.apply_config(role_dict) iam_role_id = self.gen_iam_role_id(self.resource.name, role_name) iam_ctl = self.paco_ctx.get_controller('IAM') iam_ctl.add_role(region=self.aws_region, resource=self.resource, role=role, iam_role_id=iam_role_id, stack_group=self.stack_group, stack_tags=self.stack_tags) self.stack_group.add_new_stack( self.aws_region, self.resource, paco.cftemplates.iotanalyticspipeline.IoTAnalyticsPipeline, stack_tags=self.stack_tags, extra_context={'role': role}, )
def init_cloud_command( command_name, paco_ctx, verbose, nocache, yes, disable_validation, quiet_changes_only, config_scope, home ): paco_ctx.verbose = verbose paco_ctx.nocache = nocache paco_ctx.yes = yes paco_ctx.disable_validation = disable_validation paco_ctx.quiet_changes_only = quiet_changes_only paco_ctx.command = command_name init_paco_home_option(paco_ctx, home) if not paco_ctx.home: raise InvalidPacoHome('Paco configuration directory needs to be specified with either --home or PACO_HOME environment variable.') # Inform about invalid scopes before trying to load the Paco project scopes = config_scope.split('.') if scopes[0] not in ('accounts', 'netenv', 'resource', 'service'): raise InvalidPacoScope( """'{}' is not a valid top-level CONFIG_SCOPE for '{}'. This must start with one of: accounts, netenv, resource or service. See the Paco CLI config scope docs at https://www.paco-cloud.io/en/latest//cli.html#config-scope """.format(scopes[0], config_scope) ) if config_scope.startswith('accounts.'): raise InvalidPacoScope( """The accounts scope can only refer to the top-level 'accounts' and applies account actions to all accounts listed in the organization_account_ids: field in the 'accounts/master.yaml' file. See the Paco CLI config scope docs at https://www.paco-cloud.io/en/latest//cli.html#config-scope """ ) if config_scope.startswith('netenv'): parts = config_scope.split('.') if len(parts) < 3: raise InvalidPacoScope( """A netenv CONFIG_SCOPE must specify a minimum of a NetworkEnvironment name and Environment name, for example: netenv.mynet.dev netenv.mynet.prod netenv.mynet.prod.us-west-2 netenv.mynet.test.us-west-2.applications.myapp netenv.mynet.test.us-west-2.applications.myapp.groups.cicd netenv.mynet.test.us-west-2.applications.myapp.groups.servers.resources.web See the Paco CLI config scope docs at https://www.paco-cloud.io/en/latest//cli.html#config-scope """ ) if config_scope.startswith('resource'): parts = config_scope.split('.') if len(parts) == 1: raise InvalidPacoScope( """A resource CONFIG_SCOPE must specify a minimum of a global Resource type, for example: resource.codecommit resource.ec2 See the Paco CLI config scope docs at https://www.paco-cloud.io/en/latest//cli.html#config-scope """ ) if config_scope.lower().startswith('resource.codecommit'): parts = config_scope.split('.') if len(parts) > 2: raise InvalidPacoScope( """A CodeCommit Resource CONFIG_SCOPE can only apply to all CodeCommit repos: resource.codecommit See the Paco CLI config scope docs at https://www.paco-cloud.io/en/latest//cli.html#config-scope """ ) if config_scope.lower().startswith('resource.snstopics') or config_scope.lower().startswith('resource.notificationgroups'): parts = config_scope.split('.') if len(parts) > 2: raise InvalidPacoScope( """An SNSTopics resource CONFIG_SCOPE can only apply to all SNS Topics: resource.snstopics See the Paco CLI config scope docs at https://www.paco-cloud.io/en/latest//cli.html#config-scope """ ) if config_scope.lower().startswith('resource.route53'): parts = config_scope.split('.') if len(parts) > 2: raise InvalidPacoScope( """A Route 53 resource CONFIG_SCOPE can only apply to all Route 53 configuration: resource.route53 See the Paco CLI config scope docs at https://www.paco-cloud.io/en/latest//cli.html#config-scope """ ) if config_scope.lower().startswith('resource.s3'): parts = config_scope.split('.') if len(parts) > 2: raise InvalidPacoScope( """A S3 resource CONFIG_SCOPE can only apply to all S3 Buckets: resource.s3 See the Paco CLI config scope docs at https://www.paco-cloud.io/en/latest//cli.html#config-scope """ ) import warnings warnings.simplefilter("ignore") paco_ctx.load_project() # resource.snstopics is an alias for resource.notificationgroups if config_scope.startswith('resource.snstopics'): config_scope = 'resource.notificationgroups' + config_scope[len('resource.snstopics'):] scope_parts = config_scope.split('.') if scope_parts[0] == 'resource': controller_type = scope_parts[1] else: controller_type = scope_parts[0] # Locate a model object and summarize it paco_ref = 'paco.ref {}'.format(config_scope) obj = get_model_obj_from_ref(paco_ref, paco_ctx.project) print('Object selected to {}:'.format(command_name)) print(' Name: {}'.format( getattr(obj, 'name', 'unnamed') )) print(' Type: {}'.format(obj.__class__.__name__)) if getattr(obj, 'title', None): print(' Title: {}'.format(obj.title)) if hasattr(obj, 'paco_ref_parts'): print(' Reference: {}'.format(obj.paco_ref_parts)) return controller_type, obj
def __init__(self, stack, paco_ctx, role): vault = stack.resource super().__init__(stack, paco_ctx) self.set_aws_name(vault.name) self.init_template('Backup Vault: ' + vault.name) self.paco_ctx.log_action_col("Init", "Backup", "Vault") if not vault.is_enabled(): return # Service Role ARN parameter if role != None: service_role_arn_param = self.create_cfn_parameter( param_type='String', name='ServiceRoleArn', description='The Backup service Role to assume', value=role.get_arn()) # BackupVault resource cfn_export_dict = {} cfn_export_dict['BackupVaultName'] = vault.name # BackupVault Notifications if vault.notification_events: notification_paco_ref = self.paco_ctx.project['resource'][ 'sns'].computed[self.account_ctx.name][stack.aws_region][ vault.notification_group].paco_ref + '.arn' param_name = 'Notification{}'.format( utils.md5sum(str_data=notification_paco_ref)) notification_param = self.create_cfn_parameter( param_type='String', name=param_name, description='SNS Topic to notify', value=notification_paco_ref, min_length= 1, # prevent borked empty values from breaking notification ) cfn_export_dict['Notifications'] = { 'BackupVaultEvents': vault.notification_events, 'SNSTopicArn': troposphere.Ref(notification_param) } vault_logical_id = 'BackupVault' vault_resource = troposphere.backup.BackupVault.from_dict( vault_logical_id, cfn_export_dict) self.template.add_resource(vault_resource) # BackupVault Outputs self.create_output( title='BackupVaultName', value=troposphere.GetAtt(vault_resource, 'BackupVaultName'), ref=vault.paco_ref_parts + '.name', ) self.create_output(title='BackupVaultArn', value=troposphere.GetAtt(vault_resource, 'BackupVaultArn'), ref=vault.paco_ref_parts + '.arn') # BackupPlans for plan in vault.plans.values(): # PlanRules rules_list = [] for rule in plan.plan_rules: rule_dict = { 'RuleName': rule.title_or_name, 'TargetBackupVault': vault.name } if rule.schedule_expression: rule_dict['ScheduleExpression'] = rule.schedule_expression if rule.lifecycle_delete_after_days != None or rule.lifecycle_move_to_cold_storage_after_days != None: lifecycle_dict = {} if rule.lifecycle_delete_after_days != None: lifecycle_dict[ 'DeleteAfterDays'] = rule.lifecycle_delete_after_days if rule.lifecycle_move_to_cold_storage_after_days != None: lifecycle_dict[ 'MoveToColdStorageAfterDays'] = rule.lifecycle_move_to_cold_storage_after_days rule_dict['Lifecycle'] = lifecycle_dict rules_list.append(rule_dict) cfn_export_dict = { 'BackupPlan': { 'BackupPlanName': plan.name, 'BackupPlanRule': rules_list } } plan_logical_id = self.create_cfn_logical_id('BackupPlan' + plan.name) plan_resource = troposphere.backup.BackupPlan.from_dict( plan_logical_id, cfn_export_dict) plan_resource.DependsOn = [vault_resource.title] self.template.add_resource(plan_resource) # PlanSelection resources idx = 0 for selection in plan.selections: cfn_export_dict = { 'BackupPlanId': troposphere.Ref(plan_resource), 'BackupSelection': {} } # Tag-based selections tags_list = [] for tag in selection.tags: tag_dict = {} tag_dict['ConditionKey'] = tag.condition_key tag_dict['ConditionType'] = tag.condition_type tag_dict['ConditionValue'] = tag.condition_value tags_list.append(tag_dict) if tags_list: cfn_export_dict['BackupSelection'][ 'ListOfTags'] = tags_list if selection.title: cfn_export_dict['BackupSelection'][ 'SelectionName'] = "Selection: " + selection.title # Resource-based selections if selection.resources: resource_arns = [] for paco_ref in selection.resources: from paco.models.references import get_model_obj_from_ref obj = get_model_obj_from_ref(paco_ref, self.paco_ctx.project) resource_arns.append(obj.get_arn()) cfn_export_dict['BackupSelection'][ 'Resources'] = resource_arns # Role cfn_export_dict['BackupSelection'][ 'IamRoleArn'] = troposphere.Ref(service_role_arn_param) selection_logical_id = self.create_cfn_logical_id( 'BackupPlan{}Selection{}'.format(plan.name, idx)) selection_resource = troposphere.backup.BackupSelection.from_dict( selection_logical_id, cfn_export_dict) selection_resource.DependsOn = [plan_resource.title] self.template.add_resource(selection_resource) idx += 1
def __init__(self, paco_ctx, account_ctx, aws_region, stack_group, stack_tags, env_ctx, app_id, grp_id, cdapp, role): super().__init__(paco_ctx, account_ctx, aws_region, enabled=cdapp.is_enabled(), config_ref=cdapp.paco_ref_parts, iam_capabilities=["CAPABILITY_NAMED_IAM"], stack_group=stack_group, stack_tags=stack_tags) self.env_ctx = env_ctx self.set_aws_name('CodeDeployApplication', grp_id, cdapp.name) self.init_template('CodeDeploy Application') self.res_name_prefix = self.create_resource_name_join(name_list=[ self.env_ctx.get_aws_name(), app_id, grp_id, cdapp.name ], separator='-', camel_case=True) # Service Role ARN parameter service_role_arn_param = self.create_cfn_parameter( param_type='String', name='ServiceRoleArn', description='The codedeploy service Role to assume.', value=role.get_arn(), ) # CodeDeploy Application cdapp_resource = troposphere.codedeploy.Application( 'CodeDeployApplication', ComputePlatform=cdapp.compute_platform) self.template.add_resource(cdapp_resource) # DeploymentGroup resources for deploy_group in cdapp.deployment_groups.values(): if not deploy_group.is_enabled(): continue # Deployment configuration deploy_group_logical_id = self.create_cfn_logical_id( 'DeploymentGroup' + deploy_group.name) deployment_dict = { 'Description': deploy_group.title_or_name, } if deploy_group.ignore_application_stop_failures: deployment_dict[ 'IgnoreApplicationStopFailures'] = deploy_group.ignore_application_stop_failures if deploy_group.revision_location_s3: s3bucket = get_model_obj_from_ref( deploy_group.revision_location_s3.bucket, self.paco_ctx.project) deployment_dict['Revision'] = { 'S3Location': { 'Bucket': s3bucket.get_aws_name(), 'Key': deploy_group.revision_location_s3.key, }, 'RevisionType': 'S3' } if deploy_group.revision_location_s3.bundle_type: deployment_dict['Revision']['S3Location'][ 'BundleType'] = deploy_group.revision_location_s3.bundle_type cfn_export_dict = { 'Deployment': deployment_dict, 'ApplicationName': troposphere.Ref(cdapp_resource), 'ServiceRoleArn': troposphere.Ref(service_role_arn_param), } if deploy_group.autoscalinggroups: cfn_export_dict['AutoScalingGroups'] = [] for asg_ref in deploy_group.autoscalinggroups: asg = get_model_obj_from_ref(asg_ref, self.paco_ctx.project) cfn_export_dict['AutoScalingGroups'].append( asg.get_aws_name()) deploy_group_resource = troposphere.codedeploy.DeploymentGroup.from_dict( deploy_group_logical_id, cfn_export_dict) self.template.add_resource(deploy_group_resource) deploy_group_resource.DependsOn = [] deploy_group_resource.DependsOn.append(cdapp_resource.title) # User-defined Policies for policy in deploy_group.role_policies: policy_name = self.create_resource_name_join( name_list=[ self.res_name_prefix, 'CodeDeploy', deploy_group.name, policy.name ], separator='-', filter_id='IAM.Policy.PolicyName', hash_long_names=True, camel_case=True) statement_list = [] for statement in policy.statement: action_list = [] for action in statement.action: action_parts = action.split(':') action_list.append( Action(action_parts[0], action_parts[1])) statement_list.append( Statement(Effect=statement.effect, Action=action_list, Resource=statement.resource)) policy_resource = troposphere.iam.PolicyType( title=self.create_cfn_logical_id('CodeDeployPolicy' + policy.name, camel_case=True), PolicyName=policy_name, PolicyDocument=PolicyDocument(Statement=statement_list, ), Roles=[troposphere.Ref(service_role_resource)]) self.template.add_resource(policy_resource) deploy_group_resource.DependsOn = policy_resource # All done, let's go home! self.set_template()
def script_manager_ecr_deploy(self, ecr_deploy_group, asg_dict, asg_config, template): policy_statements = [] for ecr_deploy_name in ecr_deploy_group.keys(): ecr_deploy = ecr_deploy_group[ecr_deploy_name] if ecr_deploy == None: continue if ecr_deploy and len(ecr_deploy.release_phase.ecs) > 0: pull_repos = [] push_repos = [] for repository in ecr_deploy.repositories: source_ecr_obj = get_model_obj_from_ref(repository.source_repo, self.paco_ctx.project) source_env = get_parent_by_interface(source_ecr_obj, schemas.IEnvironmentRegion) source_account_id = self.paco_ctx.get_ref(source_env.network.aws_account+".id") dest_ecr_obj = get_model_obj_from_ref(repository.dest_repo, self.paco_ctx.project) dest_env = get_parent_by_interface(dest_ecr_obj, schemas.IEnvironmentRegion) dest_account_id = self.paco_ctx.get_ref(dest_env.network.aws_account+".id") pull_repo_arn = f'arn:aws:ecr:{source_env.region}:{source_account_id}:repository/{source_ecr_obj.repository_name}' push_repo_arn = f'arn:aws:ecr:{dest_env.region}:{dest_account_id}:repository/{dest_ecr_obj.repository_name}' pull_repos.append(pull_repo_arn) push_repos.append(push_repo_arn) policy_statements.append( Statement( Sid=f'ScriptManagerECRDeployPull', Effect=Allow, Action=[ Action('ecr', 'GetDownloadUrlForLayer'), Action('ecr', 'BatchGetImage'), ], Resource=pull_repos ) ) policy_statements.append( Statement( Sid=f'ScriptManagerECRDeployPush', Effect=Allow, Action=[ Action('ecr', 'GetDownloadUrlForLayer'), Action('ecr', 'BatchCheckLayerAvailability'), Action('ecr', 'PutImage'), Action('ecr', 'InitiateLayerUpload'), Action('ecr', 'UploadLayerPart'), Action('ecr', 'CompleteLayerUpload'), ], Resource=push_repos ) ) iam_cluster_cache = [] idx = 0 for command in ecr_deploy.release_phase.ecs: service_obj = get_model_obj_from_ref(command.service, self.paco_ctx.project) ecs_services_obj = get_parent_by_interface(service_obj, schemas.IECSServices) ecs_release_phase_cluster_arn_param = self.create_cfn_parameter( param_type='String', name=f'ECSReleasePhaseClusterArn{idx}', description=f'ECS Release Phase Cluster Arn {idx}', value=ecs_services_obj.cluster + '.arn' ) ecs_release_phase_cluster_name_param = self.create_cfn_parameter( param_type='String', name=f'ECSReleasePhaseClusterName{idx}', description=f'ECS Release Phase Cluster Name {idx}', value=ecs_services_obj.cluster + '.name' ) ecs_release_phase_service_name_param = self.create_cfn_parameter( param_type='String', name=f'ECSReleasePhaseServiceName{idx}', description=f'ECS Release Phase Cluster Name {idx}', value=command.service + '.name' ) ecs_cluster_asg_tag = troposphere.autoscaling.Tag( f'PACO_CB_RP_ECS_CLUSTER_ID_{idx}', troposphere.Ref(ecs_release_phase_cluster_name_param), True ) ecs_service_asg_tag = troposphere.autoscaling.Tag( f'PACO_CB_RP_ECS_SERVICE_ID_{idx}', troposphere.Ref(ecs_release_phase_service_name_param), True ) asg_dict['Tags'].append(ecs_cluster_asg_tag) asg_dict['Tags'].append(ecs_service_asg_tag) if ecs_services_obj.cluster not in iam_cluster_cache: policy_statements.append( Statement( Sid=f'ECSReleasePhaseSSMSendCommand{idx}', Effect=Allow, Action=[ Action('ssm', 'SendCommand'), ], Resource=[ 'arn:aws:ec2:*:*:instance/*' ], Condition=Condition( StringLike({ 'ssm:resourceTag/Paco-ECSCluster-Name': troposphere.Ref(ecs_release_phase_cluster_name_param) }) ) ) ) policy_statements.append( Statement( Sid=f'ECSRelasePhaseClusterAccess{idx}', Effect=Allow, Action=[ Action('ecs', 'DescribeServices'), Action('ecs', 'RunTask'), Action('ecs', 'StopTask'), Action('ecs', 'DescribeContainerInstances'), Action('ecs', 'ListTasks'), Action('ecs', 'DescribeTasks'), ], Resource=[ '*' ], Condition=Condition( StringEquals({ 'ecs:cluster': troposphere.Ref(ecs_release_phase_cluster_arn_param) }) ) ) ) iam_cluster_cache.append(ecs_services_obj.cluster) idx += 1 policy_statements.append( Statement( Sid='ECSReleasePhaseSSMAutomationExecution', Effect=Allow, Action=[ Action('ssm', 'StartAutomationExecution'), Action('ssm', 'StopAutomationExecution'), Action('ssm', 'GetAutomationExecution'), ], Resource=[ 'arn:aws:ssm:::automation-definition/' ] ) ) # ECS Policies policy_statements.append( Statement( Sid='ECSRelasePhaseECS', Effect=Allow, Action=[ Action('ecs', 'DescribeTaskDefinition'), Action('ecs', 'DeregisterTaskDefinition'), Action('ecs', 'RegisterTaskDefinition'), Action('ecs', 'ListTagsForResource'), Action('ecr', 'DescribeImages') ], Resource=[ '*' ] ) ) policy_statements.append( Statement( Sid=f'ECSReleasePhaseSSMSendCommandDocument', Effect=Allow, Action=[ Action('ssm', 'SendCommand'), ], Resource=[ f'arn:aws:ssm:{self.aws_region}:{self.account_ctx.get_id()}:document/paco_ecs_docker_exec' ] ) ) policy_statements.append( Statement( Sid='ECSReleasePhaseSSMCore', Effect=Allow, Action=[ Action('ssm', 'ListDocuments'), Action('ssm', 'ListDocumentVersions'), Action('ssm', 'DescribeDocument'), Action('ssm', 'GetDocument'), Action('ssm', 'DescribeInstanceInformation'), Action('ssm', 'DescribeDocumentParameters'), Action('ssm', 'CancelCommand'), Action('ssm', 'ListCommands'), Action('ssm', 'ListCommandInvocations'), Action('ssm', 'DescribeAutomationExecutions'), Action('ssm', 'DescribeInstanceProperties'), Action('ssm', 'GetCommandInvocation'), Action('ec2', 'DescribeInstanceStatus'), Action('ecr', 'GetAuthorizationToken') ], Resource=[ '*' ] ) ) policy_statements.append( Statement( Sid='IAMPassRole', Effect=Allow, Action=[ Action('iam', 'passrole') ], Resource=[ '*' ] ) ) ecs_release_phase_project_policy_res = troposphere.iam.ManagedPolicy( title='ECSReleasePhase', PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=policy_statements ), Roles=[self.instance_iam_role_name] ) template.add_resource(ecs_release_phase_project_policy_res)
def set_ecr_repositories_statements(self, ecr_repositories, template, policy_name_prefix, roles): if ecr_repositories == None or len(ecr_repositories) == 0: return index = 0 pull_actions = [ Action('ecr', 'GetDownloadUrlForLayer'), Action('ecr', 'BatchGetImage'), ] push_actions = [ Action('ecr', 'GetDownloadUrlForLayer'), Action('ecr', 'BatchCheckLayerAvailability'), Action('ecr', 'PutImage'), Action('ecr', 'InitiateLayerUpload'), Action('ecr', 'UploadLayerPart'), Action('ecr', 'CompleteLayerUpload'), ] push_pull_actions = pull_actions + push_actions ecr_params = {} for ecr_permission in ecr_repositories: ecr_repo = get_model_obj_from_ref(ecr_permission.repository, self.paco_ctx.project) if ecr_repo.paco_ref not in ecr_params: param_name = ecr_repo.create_cfn_logical_id() ecr_repo_name_param = self.create_cfn_parameter( param_type='String', name=f'{param_name}ARN', description='The ARN of the ECR repository', value=ecr_repo.paco_ref + '.arn', ) ecr_params[ecr_repo.paco_ref] = ecr_repo_name_param for ecr_permission in ecr_repositories: perm_name = f'PacoEcr{index}' policy_name = self.create_resource_name_join( name_list=[policy_name_prefix, perm_name], separator='-', filter_id='IAM.Policy.PolicyName', hash_long_names=True, camel_case=True) statement_list = [ Statement( Effect='Allow', Action=[ Action('ecr', 'GetAuthorizationToken'), ], Resource=['*'], ), ] ecr_repo = get_model_obj_from_ref(ecr_permission.repository, self.paco_ctx.project) if ecr_permission.permission == 'Pull': statement_list.append( Statement( Effect='Allow', Action=pull_actions, Resource=[ troposphere.Ref(ecr_params[ecr_repo.paco_ref]) ], )) elif ecr_permission.permission == 'Push': statement_list.append( Statement( Effect='Allow', Action=push_actions, Resource=[ troposphere.Ref(ecr_params[ecr_repo.paco_ref]) ], )) elif ecr_permission.permission == 'PushAndPull': statement_list.append( Statement( Effect='Allow', Action=push_pull_actions, Resource=[ troposphere.Ref(ecr_params[ecr_repo.paco_ref]) ], )) troposphere.iam.PolicyType(title=self.create_cfn_logical_id( 'CodeBuildProjectPolicy' + perm_name, camel_case=True), template=template, PolicyName=policy_name, PolicyDocument=PolicyDocument( Statement=statement_list, ), Roles=roles) index += 1
def init_cloud_command(command_name, paco_ctx, verbose, nocache, yes, warn, disable_validation, quiet_changes_only, hooks_only, config_scope, home): "Applies cloud options and verifies that the command is sane. Loads the model and reports on it" "" paco_ctx.verbose = verbose paco_ctx.nocache = nocache paco_ctx.yes = yes paco_ctx.warn = warn paco_ctx.disable_validation = disable_validation paco_ctx.quiet_changes_only = quiet_changes_only paco_ctx.hooks_only = hooks_only paco_ctx.command = command_name paco_ctx.config_scope = config_scope init_paco_home_option(paco_ctx, home) if not paco_ctx.home: raise InvalidPacoHome( 'Paco configuration directory needs to be specified with either --home or PACO_HOME environment variable.' ) load_paco_config_options(paco_ctx) # Inform about invalid scopes before trying to load the Paco project scopes = config_scope.split('.') if scopes[0] not in ('accounts', 'netenv', 'resource', 'service'): raise InvalidPacoScope( """'{}' is not a valid top-level CONFIG_SCOPE for '{}'. This must start with one of: accounts, netenv, resource or service. See the Paco CLI config scope docs at https://www.paco-cloud.io/en/latest//cli.html#config-scope """.format(scopes[0], config_scope)) if config_scope.startswith('accounts.'): raise InvalidPacoScope( """The accounts scope can only refer to the top-level 'accounts' and applies account actions to all accounts listed in the organization_account_ids: field in the 'accounts/master.yaml' file. See the Paco CLI config scope docs at https://www.paco-cloud.io/en/latest//cli.html#config-scope """) if config_scope.startswith('netenv'): parts = config_scope.split('.') if len(parts) < 3: raise InvalidPacoScope( """A netenv CONFIG_SCOPE must specify a minimum of a NetworkEnvironment name and Environment name, for example: netenv.mynet.dev netenv.mynet.prod netenv.mynet.prod.us-west-2 netenv.mynet.test.us-west-2.applications.myapp netenv.mynet.test.us-west-2.applications.myapp.groups.cicd netenv.mynet.test.us-west-2.applications.myapp.groups.servers.resources.web See the Paco CLI config scope docs at https://www.paco-cloud.io/en/latest//cli.html#config-scope """) if config_scope.startswith('resource'): parts = config_scope.split('.') if len(parts) == 1: raise InvalidPacoScope( """A resource CONFIG_SCOPE must specify a minimum of a global Resource type, for example: resource.codecommit resource.ec2 See the Paco CLI config scope docs at https://www.paco-cloud.io/en/latest//cli.html#config-scope """) if config_scope.lower().startswith('resource.codecommit'): parts = config_scope.split('.') if len(parts) > 2: raise InvalidPacoScope( """A CodeCommit Resource CONFIG_SCOPE can only apply to all CodeCommit repos: resource.codecommit See the Paco CLI config scope docs at https://www.paco-cloud.io/en/latest//cli.html#config-scope """) if config_scope.lower().startswith('resource.snstopics'): parts = config_scope.split('.') if len(parts) > 2: raise InvalidPacoScope( """An SNSTopics resource CONFIG_SCOPE can only apply to all SNS Topics: resource.snstopics See the Paco CLI config scope docs at https://www.paco-cloud.io/en/latest//cli.html#config-scope """) if config_scope.lower().startswith('resource.route53'): parts = config_scope.split('.') if len(parts) > 2: raise InvalidPacoScope( """A Route 53 resource CONFIG_SCOPE can only apply to all Route 53 configuration: resource.route53 See the Paco CLI config scope docs at https://www.paco-cloud.io/en/latest//cli.html#config-scope """) if config_scope.lower().startswith('resource.s3'): parts = config_scope.split('.') if len(parts) > 2: raise InvalidPacoScope( """A S3 resource CONFIG_SCOPE can only apply to all S3 Buckets: resource.s3 See the Paco CLI config scope docs at https://www.paco-cloud.io/en/latest//cli.html#config-scope """) paco_ctx.load_project() # Perform VCS checks if enforce_branch_environments is enabled if paco_ctx.project.version_control.enforce_branch_environments: vc_config = paco_ctx.project.version_control # Import git and test if it can find a valid git try: from git import Repo as GitRepo from git.exc import InvalidGitRepositoryError except ImportError: raise InvalidVersionControl( """This Paco project has version_control.enforce_branch_environments enabled in it's project.yaml file. Could not find a git executable. Either disable or git must be included in your $PATH or set via $GIT_PYTHON_GIT_EXECUTABLE.""" ) try: repo = GitRepo(paco_ctx.home, search_parent_directories=False) branch_name = repo.active_branch.name except InvalidGitRepositoryError: raise InvalidVersionControl( """This Paco project has version_control.enforce_branch_environments enabled in it's project.yaml file. This Paco project is not under version control? Either put the project into a git repo or disable enforce_branch_environments.""" ) except TypeError: raise InvalidVersionControl( """This Paco project has version_control.enforce_branch_environments enabled in it's project.yaml file. Unable to retrieve the current git branch name. This can occur when git is in a detached-head state. Either disable enforce_branch_environments or change your git state.""" ) # set-up override mappings mappings = {} for mapping in vc_config.git_branch_environment_mappings: environment, branch = mapping.split(':') mappings[environment] = branch # check branch vs netenv environment to see if they match if config_scope.startswith('netenv.'): env_name = config_scope.split('.')[2] if env_name in mappings: expected_branch_name = mappings[env_name] else: expected_branch_name = vc_config.environment_branch_prefix + env_name if expected_branch_name != branch_name: raise InvalidVersionControl( """This Paco project has version_control.enforce_branch_environments enabled in it's project.yaml file. Expected to be on branch named '{}' for environment '{}', but the active branch is '{}'.""" .format(expected_branch_name, env_name, branch_name)) # or if outside a netenv check against the global environment name else: expected_branch_name = vc_config.environment_branch_prefix + vc_config.global_environment_name if branch_name != expected_branch_name: raise InvalidVersionControl( """This Paco project has version_control.enforce_branch_environments enabled in it's project.yaml file. Expected to be on branch named '{}' for a change with a global scope of '{}', but the active branch is '{}'.""" .format(expected_branch_name, config_scope, branch_name)) scope_parts = config_scope.split('.') if scope_parts[0] == 'resource': controller_type = scope_parts[1] else: controller_type = scope_parts[0] paco_ref = 'paco.ref {}'.format(config_scope) obj = get_model_obj_from_ref(paco_ref, paco_ctx.project) return controller_type, obj
def init_action_s3_deploy(self, stage, action): "Initialize an IAM Role stack to allow access to the S3 Bucket for the action" bucket = get_model_obj_from_ref(action.bucket, self.paco_ctx.project) if bucket.account not in self.s3deploy_bucket_refs: self.s3deploy_bucket_refs[bucket.account] = {} self.s3deploy_bucket_refs[bucket.account][action.bucket] = None
def __init__( self, stack, paco_ctx, ): super().__init__( stack, paco_ctx, iam_capabilities=["CAPABILITY_NAMED_IAM"], ) account_ctx = stack.account_ctx aws_region = stack.aws_region self.set_aws_name('Lambda', self.resource_group_name, self.resource_name) awslambda = self.awslambda = self.stack.resource self.init_template('Lambda Function') # if not enabled finish with only empty placeholder if not awslambda.is_enabled(): return # Parameters sdb_cache_param = self.create_cfn_parameter( name='EnableSDBCache', param_type='String', description='Boolean indicating whether an SDB Domain will be created to be used as a cache.', value=awslambda.sdb_cache ) function_description_param = self.create_cfn_parameter( name='FunctionDescription', param_type='String', description='A description of the Lamdba Function.', value=awslambda.description ) handler_param = self.create_cfn_parameter( name='Handler', param_type='String', description='The name of the function to call upon execution.', value=awslambda.handler ) runtime_param = self.create_cfn_parameter( name='Runtime', param_type='String', description='The name of the runtime language.', value=awslambda.runtime ) role_arn_param = self.create_cfn_parameter( name='RoleArn', param_type='String', description='The execution role for the Lambda Function.', value=awslambda.iam_role.get_arn() ) role_name_param = self.create_cfn_parameter( name='RoleName', param_type='String', description='The execution role name for the Lambda Function.', value=awslambda.iam_role.resolve_ref_obj.role_name ) memory_size_param = self.create_cfn_parameter( name='MemorySize', param_type='Number', description="The amount of memory that your function has access to. Increasing the function's" + \ " memory also increases its CPU allocation. The default value is 128 MB. The value must be a multiple of 64 MB.", value=awslambda.memory_size ) reserved_conc_exec_param = self.create_cfn_parameter( name='ReservedConcurrentExecutions', param_type='Number', description='The number of simultaneous executions to reserve for the function.', value=awslambda.reserved_concurrent_executions ) timeout_param = self.create_cfn_parameter( name='Timeout', param_type='Number', description='The amount of time that Lambda allows a function to run before stopping it. ', value=awslambda.timeout ) layers_param = self.create_cfn_parameter( name='Layers', param_type='CommaDelimitedList', description='List of up to 5 Lambda Layer ARNs.', value=','.join(awslambda.layers) ) # create the Lambda resource cfn_export_dict = { 'Description': troposphere.Ref(function_description_param), 'Handler': troposphere.Ref(handler_param), 'MemorySize': troposphere.Ref(memory_size_param), 'Runtime': troposphere.Ref(runtime_param), 'Role': troposphere.Ref(role_arn_param), 'Timeout': troposphere.Ref(timeout_param), } if awslambda.reserved_concurrent_executions: cfn_export_dict['ReservedConcurrentExecutions'] = troposphere.Ref(reserved_conc_exec_param), if len(awslambda.layers) > 0: cfn_export_dict['Layers'] = troposphere.Ref(layers_param), # Lambda VPC if awslambda.vpc_config != None: vpc_security_group = self.create_cfn_ref_list_param( name='VpcSecurityGroupIdList', param_type='List<AWS::EC2::SecurityGroup::Id>', description='VPC Security Group Id List', value=awslambda.vpc_config.security_groups, ref_attribute='id', ) # Segment SubnetList is a Segment stack Output based on availability zones segment_ref = awslambda.vpc_config.segments[0] + '.subnet_id_list' subnet_list_param = self.create_cfn_parameter( name='VpcSubnetIdList', param_type='List<AWS::EC2::Subnet::Id>', description='VPC Subnet Id List', value=segment_ref ) cfn_export_dict['VpcConfig'] = { 'SecurityGroupIds': troposphere.Ref(vpc_security_group), 'SubnetIds': troposphere.Ref(subnet_list_param), } # Code object: S3 Bucket, inline ZipFile or deploy artifact? if awslambda.code.s3_bucket: if awslambda.code.s3_bucket.startswith('paco.ref '): value = awslambda.code.s3_bucket + ".name" else: value = awslambda.code.s3_bucket s3bucket_param = self.create_cfn_parameter( name='CodeS3Bucket', description="An Amazon S3 bucket in the same AWS Region as your function. The bucket can be in a different AWS account.", param_type='String', value=value ) s3key_param = self.create_cfn_parameter( name='CodeS3Key', description="The Amazon S3 key of the deployment package.", param_type='String', value=awslambda.code.s3_key ) cfn_export_dict['Code'] = { 'S3Bucket': troposphere.Ref(s3bucket_param), 'S3Key': troposphere.Ref(s3key_param), } else: zip_path = Path(awslambda.code.zipfile) if zip_path.is_file(): cfn_export_dict['Code'] = { 'ZipFile': zip_path.read_text() } elif zip_path.is_dir(): # get S3Bucket/S3Key or if it does not exist, it will create the bucket and artifact # and then upload the artifact bucket_name, artifact_name = init_lambda_code( self.paco_ctx.paco_buckets, self.stack.resource, awslambda.code.zipfile, self.stack.account_ctx, self.stack.aws_region, ) s3bucket_param = self.create_cfn_parameter( name='CodeS3Bucket', description="The Paco S3 Bucket for configuration", param_type='String', value=bucket_name ) s3key_param = self.create_cfn_parameter( name='CodeS3Key', description="The Lambda code artifact S3 Key.", param_type='String', value=artifact_name ) cfn_export_dict['Code'] = { 'S3Bucket': troposphere.Ref(s3bucket_param), 'S3Key': troposphere.Ref(s3key_param), } # Environment variables var_export = {} if awslambda.environment != None and awslambda.environment.variables != None: for var in awslambda.environment.variables: name = var.key.replace('_','') env_param = self.create_cfn_parameter( name='EnvVar{}'.format(name), param_type='String', description='Env var for {}'.format(name), value=var.value, ) var_export[var.key] = troposphere.Ref(env_param) if awslambda.sdb_cache == True: var_export['SDB_CACHE_DOMAIN'] = troposphere.Ref('LambdaSDBCacheDomain') if len(awslambda.log_group_names) > 0: # Add PACO_LOG_GROUPS Environment Variable paco_log_groups = [ prefixed_name(awslambda, loggroup_name, self.paco_ctx.legacy_flag) for loggroup_name in awslambda.log_group_names ] paco_log_groups_param = self.create_cfn_parameter( name='EnvVariablePacoLogGroups', param_type='String', description='Env var for Paco Log Groups', value=','.join(paco_log_groups), ) var_export['PACO_LOG_GROUPS'] = troposphere.Ref(paco_log_groups_param) cfn_export_dict['Environment'] = { 'Variables': var_export } # Lambda resource self.awslambda_resource = troposphere.awslambda.Function.from_dict( 'Function', cfn_export_dict ) self.template.add_resource(self.awslambda_resource) # SDB Cache with SDB Domain and SDB Domain Policy resources if awslambda.sdb_cache == True: sdb_domain_resource = troposphere.sdb.Domain( title='LambdaSDBCacheDomain', template=self.template, Description="Lambda Function Domain" ) sdb_policy = troposphere.iam.Policy( title='LambdaSDBCacheDomainPolicy', template=self.template, PolicyName='SDBDomain', PolicyDocument=Policy( Version='2012-10-17', Statement=[ Statement( Effect=Allow, Action=[Action("sdb","*")], Resource=[ troposphere.Sub( 'arn:aws:sdb:${AWS::Region}:${AWS::AccountId}:domain/${DomainName}', DomainName=troposphere.Ref('LambdaSDBCacheDomain') ) ], ) ], Roles=troposphere.Ref(role_arn_param) ) ) sdb_policy.DependsOn = sdb_domain_resource self.awslambda_resource.DependsOn = sdb_domain_resource # Permissions # SNS Topic Lambda permissions and subscription idx = 1 for sns_topic_ref in awslambda.sns_topics: # SNS Topic Arn parameters param_name = 'SNSTopicArn%d' % idx self.create_cfn_parameter( name=param_name, param_type='String', description='An SNS Topic ARN to grant permission to.', value=sns_topic_ref + '.arn' ) # Lambda permission troposphere.awslambda.Permission( title=param_name + 'Permission', template=self.template, Action="lambda:InvokeFunction", FunctionName=troposphere.GetAtt(self.awslambda_resource, 'Arn'), Principal='sns.amazonaws.com', SourceArn=troposphere.Ref(param_name), ) # SNS Topic subscription sns_topic = get_model_obj_from_ref(sns_topic_ref, self.paco_ctx.project) troposphere.sns.SubscriptionResource( title=param_name + 'Subscription', template=self.template, Endpoint=troposphere.GetAtt(self.awslambda_resource, 'Arn'), Protocol='lambda', TopicArn=troposphere.Ref(param_name), Region=sns_topic.region_name ) idx += 1 # Lambda permissions for connected Paco resources app = get_parent_by_interface(awslambda, schemas.IApplication) for obj in get_all_nodes(app): # S3 Bucket notification permission(s) if schemas.IS3Bucket.providedBy(obj): seen = {} if hasattr(obj, 'notifications'): if hasattr(obj.notifications, 'lambdas'): for lambda_notif in obj.notifications.lambdas: if lambda_notif.function == awslambda.paco_ref: # yes, this Lambda gets notification from this S3Bucket group = get_parent_by_interface(obj, schemas.IResourceGroup) s3_logical_name = self.gen_cf_logical_name(group.name + obj.name, '_') if s3_logical_name not in seen: troposphere.awslambda.Permission( title='S3Bucket' + s3_logical_name, template=self.template, Action="lambda:InvokeFunction", FunctionName=troposphere.GetAtt(self.awslambda_resource, 'Arn'), Principal='s3.amazonaws.com', SourceArn='arn:aws:s3:::' + obj.get_bucket_name(), ) seen[s3_logical_name] = True # Events Rule permission(s) if schemas.IEventsRule.providedBy(obj): seen = {} for target in obj.targets: target_ref = Reference(target.target) target_ref.set_account_name(account_ctx.get_name()) target_ref.set_region(aws_region) lambda_ref = Reference(awslambda.paco_ref) if target_ref.raw == lambda_ref.raw: # yes, the Events Rule has a Target that is this Lambda group = get_parent_by_interface(obj, schemas.IResourceGroup) eventsrule_logical_name = self.gen_cf_logical_name(group.name + obj.name, '_') if eventsrule_logical_name not in seen: rule_name = create_event_rule_name(obj) # rule_name = self.create_cfn_logical_id("EventsRule" + obj.paco_ref) # rule_name = hash_smaller(rule_name, 64) source_arn = 'arn:aws:events:{}:{}:rule/{}'.format( aws_region, account_ctx.id, rule_name ) troposphere.awslambda.Permission( title='EventsRule' + eventsrule_logical_name, template=self.template, Action="lambda:InvokeFunction", FunctionName=troposphere.GetAtt(self.awslambda_resource, 'Arn'), Principal='events.amazonaws.com', SourceArn=source_arn, ) seen[eventsrule_logical_name] = True # IoT Analytics permission(s) if schemas.IIoTAnalyticsPipeline.providedBy(obj): seen = {} for activity in obj.pipeline_activities.values(): if activity.activity_type == 'lambda': target_ref = Reference(activity.function) target_ref.set_account_name(account_ctx.get_name()) target_ref.set_region(aws_region) lambda_ref = Reference(awslambda.paco_ref) if target_ref.raw == lambda_ref.raw: # yes, the IoT Analytics Lambda Activity has a ref to this Lambda group = get_parent_by_interface(obj, schemas.IResourceGroup) iotap_logical_name = self.gen_cf_logical_name(group.name + obj.name, '_') if iotap_logical_name not in seen: rule_name = create_event_rule_name(obj) troposphere.awslambda.Permission( title='IoTAnalyticsPipeline' + iotap_logical_name, template=self.template, Action="lambda:InvokeFunction", FunctionName=troposphere.GetAtt(self.awslambda_resource, 'Arn'), Principal='iotanalytics.amazonaws.com', ) seen[iotap_logical_name] = True # Log group(s) loggroup_function_name = troposphere.Join( '', [ '/aws/lambda/', troposphere.Select( 6, troposphere.Split(':', troposphere.GetAtt(self.awslambda_resource, 'Arn')) ) ] ) loggroup_resources = [] loggroup_resources.append( self.add_log_group(loggroup_function_name, 'lambda') ) if len(awslambda.log_group_names) > 0: # Additional App-specific LogGroups for loggroup_name in awslambda.log_group_names: # Add LogGroup to the template prefixed_loggroup_name = prefixed_name(awslambda, loggroup_name, self.paco_ctx.legacy_flag) loggroup_resources.append( self.add_log_group(prefixed_loggroup_name) ) # LogGroup permissions log_group_arns = [ troposphere.Join(':', [ f'arn:aws:logs:{self.aws_region}:{account_ctx.id}:log-group', loggroup_function_name, '*' ]) ] log_stream_arns = [ troposphere.Join(':', [ f'arn:aws:logs:{self.aws_region}:{account_ctx.id}:log-group', loggroup_function_name, 'log-stream', '*' ]) ] for loggroup_name in awslambda.log_group_names: prefixed_loggroup_name = prefixed_name(awslambda, loggroup_name, self.paco_ctx.legacy_flag) log_group_arns.append( f'arn:aws:logs:{self.aws_region}:{account_ctx.id}:log-group:{prefixed_loggroup_name}:*' ) log_stream_arns.append( f'arn:aws:logs:{self.aws_region}:{account_ctx.id}:log-group:{prefixed_loggroup_name}:log-stream:*' ) loggroup_policy_resource = troposphere.iam.ManagedPolicy( title='LogGroupManagedPolicy', PolicyDocument=Policy( Version='2012-10-17', Statement=[ Statement( Sid='AllowLambdaModifyLogStreams', Effect=Allow, Action=[ Action("logs","CreateLogStream"), Action("logs","DescribeLogStreams"), ], Resource=log_group_arns, ), Statement( Sid='AllowLambdaPutLogEvents', Effect=Allow, Action=[ Action("logs","PutLogEvents"), ], Resource=log_stream_arns, ), ], ), Roles=[troposphere.Ref(role_name_param)], ) loggroup_policy_resource.DependsOn = loggroup_resources self.template.add_resource(loggroup_policy_resource) # Outputs self.create_output( title='FunctionName', value=troposphere.Ref(self.awslambda_resource), ref=awslambda.paco_ref_parts + '.name', ) self.create_output( title='FunctionArn', value=troposphere.GetAtt(self.awslambda_resource, 'Arn'), ref=awslambda.paco_ref_parts + '.arn', )
def __init__(self, stack, paco_ctx, env_ctx): elasticache_config = stack.resource config_ref = elasticache_config.paco_ref_parts super().__init__(stack, paco_ctx) self.set_aws_name('ElastiCache', self.resource_group_name, self.resource.name, elasticache_config.engine) # Troposphere Template Generation self.init_template('ElastiCache: {} - {}'.format( elasticache_config.engine, elasticache_config.engine_version)) # if disabled then leave an empty placeholder and finish if not elasticache_config.is_enabled(): return # Security Groups sg_params = [] vpc_sg_list = [] for sg_ref in elasticache_config.security_groups: ref = Reference(sg_ref) sg_param_name = self.create_cfn_logical_id('SecurityGroupId' + ref.parts[-2] + ref.parts[-1]) sg_param = self.create_cfn_parameter( name=sg_param_name, param_type='String', description='VPC Security Group Id', value=sg_ref + '.id', ) sg_params.append(sg_param) vpc_sg_list.append(troposphere.Ref(sg_param)) # Subnet Ids subnet_ids_param = self.create_cfn_parameter( name='SubnetIdList', param_type='List<String>', description='List of Subnet Ids to provision ElastiCache nodes', value=elasticache_config.segment + '.subnet_id_list', ) # ElastiCache Subnet Group subnet_group_dict = { 'Description': troposphere.Ref('AWS::StackName'), 'SubnetIds': troposphere.Ref(subnet_ids_param) } subnet_group_res = troposphere.elasticache.SubnetGroup.from_dict( 'SubnetGroup', subnet_group_dict) self.template.add_resource(subnet_group_res) # ElastiCache Resource elasticache_dict = elasticache_config.cfn_export_dict elasticache_dict['SecurityGroupIds'] = vpc_sg_list elasticache_dict['CacheSubnetGroupName'] = troposphere.Ref( subnet_group_res) if elasticache_config.description: elasticache_dict[ 'ReplicationGroupDescription'] = elasticache_config.description else: elasticache_dict['ReplicationGroupDescription'] = troposphere.Ref( 'AWS::StackName') cfn_cache_cluster_name = 'ReplicationGroup' cache_cluster_res = troposphere.elasticache.ReplicationGroup.from_dict( cfn_cache_cluster_name, elasticache_dict) self.template.add_resource(cache_cluster_res) # Outputs self.create_output(title='PrimaryEndPointAddress', description='ElastiCache PrimaryEndpoint Address', value=troposphere.GetAtt(cache_cluster_res, 'PrimaryEndPoint.Address'), ref=config_ref + ".primaryendpoint.address") self.create_output(title='PrimaryEndPointPort', description='ElastiCache PrimaryEndpoint Port', value=troposphere.GetAtt(cache_cluster_res, 'PrimaryEndPoint.Port'), ref=config_ref + ".primaryendpoint.port") self.create_output(title='ReadEndPointAddresses', description='ElastiCache ReadEndpoint Addresses', value=troposphere.GetAtt(cache_cluster_res, 'ReadEndPoint.Addresses'), ref=config_ref + ".readendpoint.addresses") self.create_output( title='ReadEndPointPorts', description='ElastiCache ReadEndpoint Ports', value=troposphere.GetAtt(cache_cluster_res, 'ReadEndPoint.Ports'), ref=config_ref + ".readendpoint.ports", ) route53_ctl = self.paco_ctx.get_controller('route53') for ec_dns in self.resource.dns: if self.resource.is_dns_enabled() == True: # alias_dns_ref = self.resource.paco_ref + '.dnsname' # alias_hosted_zone_ref = self.resource.paco_ref + '.canonicalhostedzoneid' hosted_zone = get_model_obj_from_ref(ec_dns.hosted_zone, self.paco_ctx.project) account_ctx = self.paco_ctx.get_account_context( account_ref=hosted_zone.account) route53_ctl.add_record_set( account_ctx, self.aws_region, self.resource, enabled=self.resource.is_enabled(), dns=ec_dns, record_set_type='CNAME', resource_records=[ f'{self.resource.paco_ref}.primaryendpoint.address' ], stack_group=self.stack.stack_group, async_stack_provision=True, config_ref=self.resource.paco_ref_parts + '.dns')