def create_ec2_instance_role(template, name, managed_policy_arns=None, policies=None): role_name = name + "Role" cfnrole = Role( role_name, AssumeRolePolicyDocument=Policy( Statement=[ Statement( Effect=Allow, Action=[AssumeRole], Principal=Principal("Service", ["ec2.amazonaws.com"]) ) ] ) ) if policies: cfnrole.Policies = policies if managed_policy_arns: cfnrole.ManagedPolicyArns = managed_policy_arns cfnrole = template.add_resource(cfnrole) profile_name = name + 'Profile' cfninstanceprofile = template.add_resource(InstanceProfile( profile_name, Roles=[Ref(cfnrole)] )) return {'role': cfnrole, 'profile': profile_name}
) ], )) ud = Base64( Join('\n', [ "#!/bin/bash", "sudo yum install --enablerepo=epel -y git", "pip install ansible", AnsiblePullCmd, "echo '*/10 * * * * {}' > /etc/cron.d/ansible-pull".format( AnsiblePullCmd) ])) t.add_resource( Role("Role", AssumeRolePolicyDocument=Policy(Statement=[ Statement(Effect=Allow, Action=[AssumeRole], Principal=Principal("Service", ["ec2.amazonaws.com"])) ]))) t.add_resource( InstanceProfile("InstanceProfile", Path="/", Roles=[Ref("Role")])) t.add_resource( ec2.Instance("instance", ImageId="ami-1b316af0", InstanceType="t2.micro", SecurityGroups=[Ref("SecurityGroup")], KeyName=Ref("KeyPair"), UserData=ud, IamInstanceProfile=Ref("InstanceProfile")))
Role( "LambdaExecutionRole", Path="/", Policies=[ Policy( PolicyName="root", PolicyDocument={ "Version": "2012-10-17", "Statement": [ { "Action": ["logs:*"], "Resource": "arn:aws:logs:*:*:*", "Effect": "Allow" }, ] }), ], AssumeRolePolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Action": ["sts:AssumeRole"], "Effect": "Allow", "Principal": { "Service": ["lambda.amazonaws.com"] } }] }, ManagedPolicyArns=[ "arn:aws:iam::aws:policy/AmazonKinesisReadOnlyAccess", "arn:aws:iam::aws:policy/service-role/AWSLambdaRole" ]))
def create_pipeline_template(config) -> Template: t = Template() github_token = t.add_parameter(Parameter("GithubToken", Type="String")) github_owner = t.add_parameter( Parameter("GitHubOwner", Type='String', Default='aiengines', AllowedPattern="[A-Za-z0-9-_]+")) github_repo = t.add_parameter( Parameter("GitHubRepo", Type='String', Default='codebuild_pipeline_skeleton', AllowedPattern="[A-Za-z0-9-_]+")) github_branch = t.add_parameter( Parameter("GitHubBranch", Type='String', Default='master', AllowedPattern="[A-Za-z0-9-_]+")) artifact_store_s3_bucket = t.add_resource(Bucket("S3Bucket", )) cloudformationrole = t.add_resource( Role("CloudformationRole", AssumeRolePolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Action=[AssumeRole], Principal=Principal("Service", ["cloudformation.amazonaws.com"])) ]), ManagedPolicyArns=['arn:aws:iam::aws:policy/AdministratorAccess' ])) codepipelinerole = t.add_resource( Role("CodePipelineRole", AssumeRolePolicyDocument=PolicyDocument(Statement=[ Statement(Effect=Allow, Action=[AssumeRole], Principal=Principal("Service", ["codepipeline.amazonaws.com"])) ]), ManagedPolicyArns=['arn:aws:iam::aws:policy/AdministratorAccess' ])) codebuild_project = t.add_resource(create_codebuild_project(t)) pipeline = t.add_resource( Pipeline( "CDPipeline", ArtifactStore=ArtifactStore( Type="S3", Location=Ref(artifact_store_s3_bucket)), # DisableInboundStageTransitions = [ # DisableInboundStageTransitions( # StageName = "Release", # Reason = "Disabling the transition until " # "integration tests are completed" # ) # ], RestartExecutionOnUpdate=True, RoleArn=codepipelinerole.GetAtt('Arn'), Stages=[ Stages(Name="Source", Actions=[ Actions(Name="SourceAction", ActionTypeId=ActionTypeId( Category="Source", Owner="ThirdParty", Provider="GitHub", Version="1", ), OutputArtifacts=[ OutputArtifacts(Name="GitHubSourceCode") ], Configuration={ 'Owner': Ref(github_owner), 'Repo': Ref(github_repo), 'Branch': Ref(github_branch), 'PollForSourceChanges': False, 'OAuthToken': Ref(github_token) }, RunOrder="1") ]), Stages(Name="Build", Actions=[ Actions(Name="BuildAction", ActionTypeId=ActionTypeId( Category="Build", Owner="AWS", Provider="CodeBuild", Version="1"), InputArtifacts=[ InputArtifacts(Name="GitHubSourceCode") ], OutputArtifacts=[ OutputArtifacts(Name="BuildArtifacts") ], Configuration={ 'ProjectName': Ref(codebuild_project), }, RunOrder="1") ]), ], )) t.add_resource( Webhook("GitHubWebHook", Authentication='GITHUB_HMAC', AuthenticationConfiguration=WebhookAuthConfiguration( SecretToken=Ref(github_token)), Filters=[ WebhookFilterRule(JsonPath='$.ref', MatchEquals='refs/heads/{Branch}') ], TargetPipeline=Ref(pipeline), TargetAction='Source', TargetPipelineVersion=pipeline.GetAtt('Version'))) return t
Role("PipelineRole", AssumeRolePolicyDocument=Policy(Statement=[ Statement(Effect=Allow, Action=[AssumeRole], Principal=Principal("Service", ["codepipeline.amazonaws.com"])) ]), Path="/", Policies=[ IAMPolicy(PolicyName="HelloworldCodePipeline", PolicyDocument={ "Statement": [ { "Effect": "Allow", "Action": "cloudformation:*", "Resource": "*" }, { "Effect": "Allow", "Action": "codebuild:*", "Resource": "*" }, { "Effect": "Allow", "Action": "codepipeline:*", "Resource": "*" }, { "Effect": "Allow", "Action": "ecr:*", "Resource": "*" }, { "Effect": "Allow", "Action": "ecs:*", "Resource": "*" }, { "Effect": "Allow", "Action": "iam:*", "Resource": "*" }, { "Effect": "Allow", "Action": "s3:*", "Resource": "*" }, ], }), ]))
def __init__(self, lambda_names): self.t = Template() self.lambda_uris = [] lambda_iam_policy_arn = self.t.add_parameter(Parameter( "LambdaIAMPolicyARN", Description="ARN of the base IAM policy for Lambda functions", Type="String" )) apigw_stage_name = self.t.add_parameter(Parameter( "APIGWStageName", Description="Stage name for API Gateway deployment", Type="String" )) json_mapping_template = self.t.add_parameter(Parameter( "MappingTemplate", Description="Mapping template for request body, uri and body params, and stage variables", Default=MAPPING_TEMPLATE, Type="String" )) for l in sorted(lambda_names): self.add_lambda_uri_parameters(l) apigw_role = self.t.add_resource(Role( "APIGWRole", AssumeRolePolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Action": ["sts:AssumeRole"], "Effect": "Allow", "Principal": { "Service": ["apigateway.amazonaws.com"] } }] }, Policies=[Policy( PolicyName="APIGateway", PolicyDocument={ "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": ["lambda:InvokeFunction", "iam:PassRole"], "Resource": "*", }, ] }) ], )) apigw = self.t.add_resource(RestApi( "APIGW", Body=self.get_swagger() )) custom_apigw_deployment_role = self.t.add_resource(Role( "CustomAPIGWDeploymentRole", AssumeRolePolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Action": ["sts:AssumeRole"], "Effect": "Allow", "Principal": { "Service": ["lambda.amazonaws.com"] } }] }, ManagedPolicyArns=[ Ref(lambda_iam_policy_arn), ], Policies=[Policy( PolicyName="CreateDeployment", PolicyDocument={ "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": ["apigateway:POST"], "Resource": [ Join('', [ "arn:aws:apigateway:", Ref('AWS::Region'), "::/restapis/", Ref(apigw), "/deployments", ]), ], }, ] }) ], )) self.custom_apigw_deployment_lambda = self.t.add_resource(Function( f"CustomAPIGWDeploymentLambda", FunctionName="CustomAPIGWDeploymentLambda", Handler="index.handler", Runtime="python3.6", Role=GetAtt(custom_apigw_deployment_role, "Arn"), Code=Code( ZipFile=self.get_custom_apigw_deployment_code() ) )) apigw_deployment = self.t.add_resource(CustomAPIGWDeployment( "APIGWDeployment", ServiceToken=GetAtt(self.custom_apigw_deployment_lambda, "Arn"), RestApiId=Ref(apigw), StageName=Ref(apigw_stage_name), LambdaUris=self.lambda_uris )) self.t.add_output(Output( "URL", Value=Join('', [ 'https://', Ref(apigw), '.execute-api.', Ref('AWS::Region'), '.amazonaws.com/', Ref(apigw_stage_name), ]), Description="API Gateway stage's URL" ))
], VpcId=Ref("VpcId"))) t.add_resource(Cluster('ECSCluster', )) t.add_resource( Role( 'EcsClusterRole', ManagedPolicyArns=[ 'arn:aws:iam::aws:policy/service-role/AmazonEC2RoleforSSM', 'arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly', 'arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role', 'arn:aws:iam::aws:policy/CloudWatchFullAccess' ], AssumeRolePolicyDocument={ 'Version': '2012-10-17', 'Statement': [{ 'Action': 'sts:AssumeRole', 'Principal': { 'Service': 'ec2.amazonaws.com' }, 'Effect': 'Allow', }] })) t.add_resource( InstanceProfile( 'EC2InstanceProfile', Roles=[Ref('EcsClusterRole')], ))
t.add_mapping("Region2Principal", get_map('region2principal')) solution_stack_name = t.add_parameter( Parameter("SolutionStackName", Description= "ElasticBeanstalk configuration to run the API in. This should \ be set to the latest version of the Python 3.6 container.", Type="String", Default="64bit Amazon Linux 2018.03 v2.7.3 running Python 3.6")) role = t.add_resource( Role("HyP3ApiWebServerRole", AssumeRolePolicyDocument=get_ec2_assume_role_policy( FindInMap("Region2Principal", Ref("AWS::Region"), "EC2Principal")), Path="/", ManagedPolicyArns=[ "arn:aws:iam::aws:policy/AWSElasticBeanstalkWebTier" ])) instance_profile = t.add_resource( InstanceProfile("WebServerInstanceProfile", Path="/", Roles=[Ref(role)])) app = t.add_resource( Application("HyP3Api", ApplicationName=Sub("${StackName}-hyp3-api", StackName=Ref('AWS::StackName')), Description=("AWS Elastic Beanstalk API for " "interacting with the HyP3 system"))) app_version = t.add_resource(
Default="karthik-lab", ConstraintDescription="can contain only ASCII characters.", )) ec2role = t.add_resource( Role("Ec2Roletest", RoleName="eksworker", ManagedPolicyArns=[ "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy", "arn:aws:iam::aws:policy/AmazonEKSServicePolicy", "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy", "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy" ], AssumeRolePolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Sid": "", "Effect": "Allow", "Principal": { "Service": ["ssm.amazonaws.com", "ec2.amazonaws.com"] }, "Action": "sts:AssumeRole" }] }, Path="/")) StackName = Ref('AWS::StackName') region = Ref('AWS::Region') clustername = Ref(ClusterName) SSMEC2 = t.add_resource( InstanceProfile("SSMEC2",
'PolicyCloudwatch', PolicyName='Cloudwatch', PolicyDocument={'Version': '2012-10-17', 'Statement': [{'Action': ['cloudwatch:*'], 'Resource': '*', 'Effect': 'Allow'}]}, Roles=[Ref('EcsClusterRole')], )) # Role our EC2 instance will take on to work with ECR, ECS and CloudWatch EcsClusterRole = t.add_resource(Role( 'EcsClusterRole', RoleName='EcsClusterRole', Path='/', ManagedPolicyArns=[ 'arn:aws:iam::aws:policy/AmazonEC2ContainerServiceFullAccess' ], AssumeRolePolicyDocument={'Version': '2012-10-17', 'Statement': [{'Action': 'sts:AssumeRole', 'Principal': {'Service': 'ec2.amazonaws.com'}, 'Effect': 'Allow', }]} )) # Linking our EC2 with the Role EC2InstanceProfile = t.add_resource(InstanceProfile( 'EC2InstanceProfile', Path='/', Roles=[Ref('EcsClusterRole')], )) # Amazon EC2 Launch Configuration
from troposphere.codebuild import (Artifacts, Environment, Project, Source) from troposphere.iam import Role t = Template() t.add_description("Effective DevOps in AWS: CodeBuild - Helloworld container") t.add_resource( Role("ServiceRole", AssumeRolePolicyDocument=Policy(Statement=[ Statement(Effect=Allow, Action=[AssumeRole], Principal=Principal("Service", ["codebuild.amazonaws.com"])) ]), Path="/", ManagedPolicyArns=[ 'arn:aws:iam::aws:policy/AWSCodePipelineReadOnlyAccess', 'arn:aws:iam::aws:policy/AWSCodeBuildDeveloperAccess', 'arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryPowerUser', 'arn:aws:iam::aws:policy/AmazonS3FullAccess', 'arn:aws:iam::aws:policy/CloudWatchLogsFullAccess' ])) environment = Environment( ComputeType='BUILD_GENERAL1_SMALL', Image='aws/codebuild/docker:1.12.1', Type='LINUX_CONTAINER', EnvironmentVariables=[ { 'Name': 'REPOSITORY_NAME',
def add_resources(self): self.EKSControlPlaneSG = self.template.add_resource( ec2.SecurityGroup( "EKSControlPlaneSG", GroupDescription= "Allow communication between WorkerNodes and EKS", VpcId=Ref(self.SharedServicesVpcId), Tags=self.base_tags + Tags(Name=self.environment_parameters["ClientEnvironmentKey"] + "-SS-EksControlPlane-SG"), )) self.EKSClusterRole = self.template.add_resource( Role( "EKSClusterRole", AssumeRolePolicyDocument=Policy(Statement=[ Statement(Effect=Allow, Action=[AssumeRole], Principal=Principal("Service", ["eks.amazonaws.com"])) ]), ManagedPolicyArns=[ "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy", "arn:aws:iam::aws:policy/AmazonEKSServicePolicy" ], )) self.EKSCluster = self.template.add_resource( eks.Cluster( "EKSCluster", DependsOn=["EKSControlPlaneSG", "EKSClusterRole"], Name=self.environment_parameters["ClientEnvironmentKey"] + "-SS-EKS", RoleArn=GetAtt(self.EKSClusterRole, "Arn"), Version=Ref(self.EksClusterVersion), ResourcesVpcConfig=eks.ResourcesVpcConfig( SecurityGroupIds=[Ref(self.EKSControlPlaneSG)], SubnetIds=[ Ref(self.SharedServicesPubSubnet1), Ref(self.SharedServicesPubSubnet2) ], ), )) self.WorkerNodeEc2SG = self.template.add_resource( ec2.SecurityGroup( "WorkerNodeEc2SG", DependsOn=["EKSCluster"], GroupDescription= "Allow communication between WorkerNodes and EKS", VpcId=Ref(self.SharedServicesVpcId), Tags=self.base_tags + Tags(Name=self.environment_parameters["ClientEnvironmentKey"] + "-SS-EksWorkerNodes-Ec2SG") + Tags({ "kubernetes.io/cluster/" + self.environment_parameters["ClientEnvironmentKey"] + "-SS-EKS": "owned" }), )) self.WorkerNodeInstanceRole = self.template.add_resource( Role( "WorkerNodeInstanceRole", AssumeRolePolicyDocument=Policy(Statement=[ Statement(Effect=Allow, Action=[AssumeRole], Principal=Principal("Service", ["ec2.amazonaws.com"])) ]), ManagedPolicyArns=[ "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy", "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy", "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" ], )) self.WorkerNodeInstanceProfile = self.template.add_resource( InstanceProfile( "WorkerNodeInstanceProfile", Path="/", Roles=[Ref(self.WorkerNodeInstanceRole)], )) self.WorkerNodeEc2SGIngress = self.template.add_resource( ec2.SecurityGroupIngress( "WorkerNodeEc2SGIngress", DependsOn=["WorkerNodeEc2SG"], GroupId=Ref(self.WorkerNodeEc2SG), IpProtocol="-1", FromPort=0, ToPort=65535, SourceSecurityGroupId=Ref(self.WorkerNodeEc2SG), )) self.WorkerNodeEc2SGIngressFromEKSControlPlane = self.template.add_resource( ec2.SecurityGroupIngress( "WorkerNodeEc2SGIngressFromEKSControlPlane", DependsOn=["WorkerNodeEc2SG"], GroupId=Ref(self.WorkerNodeEc2SG), IpProtocol="tcp", FromPort=1025, ToPort=65535, SourceSecurityGroupId=Ref(self.EKSControlPlaneSG), )) self.EksControlPlaneEgressToWorkerNodes = self.template.add_resource( ec2.SecurityGroupEgress( "EksControlPlaneEgressToWorkerNodes", DependsOn=["WorkerNodeEc2SG"], GroupId=Ref(self.EKSControlPlaneSG), IpProtocol="tcp", FromPort=1025, ToPort=65535, DestinationSecurityGroupId=Ref(self.WorkerNodeEc2SG), )) self.WorkerNodeEc2SG443IngressFromEKSControlPlane = self.template.add_resource( ec2.SecurityGroupIngress( "WorkerNodeEc2SG443IngressFromEKSControlPlane", DependsOn=["WorkerNodeEc2SG"], GroupId=Ref(self.WorkerNodeEc2SG), IpProtocol="tcp", FromPort=443, ToPort=443, SourceSecurityGroupId=Ref(self.EKSControlPlaneSG), )) self.EKSControlPlaneSG443IngressFromWorkerNode = self.template.add_resource( ec2.SecurityGroupIngress( "EKSControlPlaneSG443IngressFromWorkerNode", DependsOn=["WorkerNodeEc2SG"], GroupId=Ref(self.EKSControlPlaneSG), IpProtocol="tcp", FromPort=443, ToPort=443, SourceSecurityGroupId=Ref(self.WorkerNodeEc2SG), )) self.EksControlPlane443EgressToWorkerNodes = self.template.add_resource( ec2.SecurityGroupEgress( "EksControlPlane443EgressToWorkerNodes", DependsOn=["WorkerNodeEc2SG"], GroupId=Ref(self.EKSControlPlaneSG), IpProtocol="tcp", FromPort=443, ToPort=443, DestinationSecurityGroupId=Ref(self.WorkerNodeEc2SG), )) self.WorkerNodeLaunchConfiguration = self.template.add_resource( LaunchConfiguration( "WorkerNodeLaunchConfiguration", ImageId=Ref(self.WorkerNodeImageId), InstanceType=Ref(self.WorkerNodeInstanceType), IamInstanceProfile=Ref(self.WorkerNodeInstanceProfile), KeyName=Ref(self.WorkerNodeKeyName), SecurityGroups=[Ref(self.WorkerNodeEc2SG)], UserData=Base64( Join('', [ "#!/bin/bash \n", "set -o xtrace \n" "ClusterName=\"" + self.environment_parameters["ClientEnvironmentKey"] + "-SS-EKS" + "\" \n", "BootstrapArguments=\"" "\" \n", "/etc/eks/bootstrap.sh ${ClusterName} ${BootstrapArguments} \n" ])))) self.WorkerNodeAutoScalingGroup = self.template.add_resource( AutoScalingGroup( "WorkerNodeAutoscalingGroup", AutoScalingGroupName=self. environment_parameters["ClientEnvironmentKey"] + "-SS-EksWorkerNodeAutoScalingGroup", LaunchConfigurationName=Ref( self.WorkerNodeLaunchConfiguration), MaxSize=Ref(self.WorkerNodeASGGroupMaxSize), MinSize=Ref(self.WorkerNodeASGGroupMinSize), DesiredCapacity=Ref(self.WorkerNodeASGGroupDesiredSize), HealthCheckType=Ref(self.WorkerNodeASGHealthCheckType), HealthCheckGracePeriod=Ref( self.WorkerNodeASGHealthCheckGracePeriod), Cooldown=Ref(self.WorkerNodeASGCoolDown), VPCZoneIdentifier=[ Ref(self.SharedServicesPrivSubnet1), Ref(self.SharedServicesPrivSubnet2) ], Tags=[ AutoScalingTag( "Name", self.environment_parameters["ClientEnvironmentKey"] + "-SS-EKS" + "-WorkerNodeGroup-Node", True), AutoScalingTag( "kubernetes.io/cluster/" + self.environment_parameters["ClientEnvironmentKey"] + "-SS-EKS", "owned", True), AutoScalingTag( "Environment", self.environment_parameters["EnvironmentName"], True), AutoScalingTag( "ResourceOwner", self.environment_parameters["ResourceOwner"], True), AutoScalingTag( "ClientCode", self.environment_parameters["ClientEnvironmentKey"], True), ], ))
def add_scaling(spot_fleet, template, cluster_name): """ Add scaling resources to a cluster """ ssm_param = Parameter( 'Scale{}'.format(sanitize_cfn_resource_name(spot_fleet.get('name'))), Type="String", Value="0", Name=Sub("/ecs-maestro/${ClusterName}/${Version}/scaletime") ) template.add_resource(ssm_param) function_name = sanitize_cfn_resource_name(cluster_name) autoscaling_role = Role( "AutoscalingRole", AssumeRolePolicyDocument={ "Statement": [{ "Effect": "Allow", "Action": "sts:AssumeRole", "Principal": {"Service": "lambda.amazonaws.com"}, }] }, Policies=[ Policy( PolicyName="ec2-spot-fleet-scaler", PolicyDocument={ "Statement": [{ "Effect": "Allow", "Action": [ "cloudwatch:Get*", "ec2:DescribeSpotFleetRequests", "ec2:ModifySpotFleetRequest", "logs:*", "ecs:ListContainerInstances", "ecs:Update*", "ecs:ListTasks", "s3:GetEncryptionConfiguration" ], "Resource": "*" }, { "Effect": "Allow", "Action": [ "ssm:Get*", "ssm:Put*", "ssm:Delete*" ], "Resource": [ {"Fn::Sub": "arn:aws:ssm:${AWS::Region}:${AWS::AccountId}:parameter/ecs-maestro/${ClusterName}/*"} ] }] } ), Policy( PolicyName="DeleteStack", PolicyDocument={ "Statement": [{ "Effect": "Allow", "Action": [ "lambda:InvokeFunction", ], "Resource": [ {"Fn::Sub": "arn:aws:lambda:${AWS::Region}:${AWS::AccountId}:function:"+function_name+"ASGCleanupLambda"}] }] } ) ] ) template.add_resource(autoscaling_role) scaling_lambda = Function( 'ScalingLambda{}'.format(sanitize_cfn_resource_name(spot_fleet.get('name'))), Code=Code( S3Bucket=Sub("${S3Bucket}"), S3Key=Sub("${S3Prefix}/deployment.zip") ), Handler="scaling.scale_spot.lambda_handler", Role=GetAtt(autoscaling_role, "Arn"), Environment=Environment( Variables={ "CLUSTER_NAME": Sub("${ClusterName}"), "SPOT_FLEET": Ref( "SpotFleet{}".format( sanitize_cfn_resource_name( spot_fleet.get('name') ) ) ), "STATUS": Sub("${Status}"), "VERSION": Sub("${Version}"), "SCALE_IN_THRESHOLD": Sub("${SpotTaskThresholdIn}"), "SCALE_OUT_THRESHOLD": Sub("${SpotTaskThresholdOut}"), "MAX_WEIGHT": Sub("${SpotMaxWeight}"), "MIN_WEIGHT": Sub("${SpotMinWeight}") } ), Timeout=900, MemorySize=128, Runtime="python3.7", ) template.add_resource(scaling_lambda) CronScaling = Rule( "CronScaling{}".format( sanitize_cfn_resource_name(spot_fleet.get('name')) ), ScheduleExpression="rate(1 minute)", Description="Cron for cluster stats", Targets=[ Target( Id="1", Arn=GetAtt(scaling_lambda, "Arn")) ] ) template.add_resource(CronScaling) ScalingPerm = Permission( "ScalePerm{}".format( sanitize_cfn_resource_name(spot_fleet.get('name')) ), Action="lambda:InvokeFunction", FunctionName=GetAtt(scaling_lambda, "Arn"), Principal="events.amazonaws.com", SourceArn=GetAtt(CronScaling, "Arn") ) template.add_resource(ScalingPerm)
Role("LambdaExecutionRole", Path="/", Policies=[ Policy( PolicyName="LambdaGzToSnappy", PolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": ["s3:GetObject", "s3:PutObject", "s3:DeleteObject"], "Resource": [ Join(":", [ "arn", "aws", "s3", "", "", Join("", [ Join("/", [ Ref(inputBucketName), Ref(inputKeyPrefix) ]), "*" ]) ]), Join(":", [ "arn", "aws", "s3", "", "", Join("", [ Join("/", [ Ref(outputBucketName), Ref(outputKeyPrefix) ]), "*" ]) ]) ] }, { "Action": ["logs:*"], "Resource": "arn:aws:logs:*:*:*", "Effect": "Allow" }] }) ], AssumeRolePolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Action": ["sts:AssumeRole"], "Effect": "Allow", "Principal": { "Service": ["lambda.amazonaws.com"] } }] }))
Vpc = t.add_parameter( Parameter('Vpc', ConstraintDescription='Must be a valid VPC ID.', Type='AWS::EC2::VPC::Id')) BatchServiceRole = t.add_resource( Role( 'BatchServiceRole', Path='/', Policies=[], ManagedPolicyArns=[ 'arn:aws:iam::aws:policy/service-role/AWSBatchServiceRole', ], AssumeRolePolicyDocument={ 'Statement': [{ 'Action': ['sts:AssumeRole'], 'Effect': 'Allow', 'Principal': { 'Service': ['batch.amazonaws.com'] } }] }, )) BatchInstanceRole = t.add_resource( Role( 'BatchInstanceRole', Path='/', Policies=[], ManagedPolicyArns=[
web_server_role = Role( "WebServerRole", template=template, AssumeRolePolicyDocument=Policy(Statement=[ Statement(Effect=Allow, Action=[AssumeRole], Principal=Principal("Service", [ FindInMap("Region2Principal", Ref("AWS::Region"), "EC2Principal") ])) ]), Path="/", Policies=[ assets_management_policy, logging_policy, iam.Policy( PolicyName="EBBucketAccess", PolicyDocument=dict(Statement=[ dict( Effect="Allow", Action=[ "s3:Get*", "s3:List*", "s3:PutObject", ], Resource=[ "arn:aws:s3:::elasticbeanstalk-*", "arn:aws:s3:::elasticbeanstalk-*/*", ], ) ], ), ), iam.Policy( PolicyName="EBXRayAccess", PolicyDocument=dict(Statement=[ dict( Effect="Allow", Action=[ "xray:PutTraceSegments", "xray:PutTelemetryRecords", ], Resource="*", ) ], ), ), iam.Policy( PolicyName="EBCloudWatchLogsAccess", PolicyDocument=dict(Statement=[ dict( Effect="Allow", Action=[ "logs:PutLogEvents", "logs:CreateLogStream", ], Resource= "arn:aws:logs:*:*:log-group:/aws/elasticbeanstalk*", ) ], ), ), iam.Policy( PolicyName="ECSManagementPolicy", PolicyDocument=dict(Statement=[ dict( Effect="Allow", Action=[ "ecs:*", "elasticloadbalancing:*", ], Resource="*", ) ], ), ), iam.Policy( PolicyName='ECRManagementPolicy', PolicyDocument=dict(Statement=[ dict( Effect='Allow', Action=[ ecr.GetAuthorizationToken, ecr.GetDownloadUrlForLayer, ecr.BatchGetImage, ecr.BatchCheckLayerAvailability, ], Resource="*", ) ], ), ), ])
def main(): """Generates the CloudFormation template""" template = Template() template.add_version('2010-09-09') template.add_description( 'This template deploys an ECS cluster to the ' + 'provided VPC and subnets using an Auto Scaling Group') # Parameters # EnvironmentName env_name_param = template.add_parameter( Parameter( 'EnvironmentName', Type='String', Description= 'An environment name that will be prefixed to resource names', )) # InstanceType instance_type_param = template.add_parameter( Parameter( 'InstanceType', Type='String', Default='t2.nano', Description= 'Which instance type should we use to build the ECS cluster?', AllowedValues=[ 't2.nano', 't2.micro', 't2.small', 't2.medium', 't2.large', 't2.xlarge', 't2.2xlarge', ], )) # ClusterSize cluster_size_param = template.add_parameter( Parameter( 'ClusterSize', Type='Number', Description='How many ECS hosts do you want to initially deploy?', Default='1', )) # VPC template.add_parameter( Parameter( 'VPC', Type='AWS::EC2::VPC::Id', Description= 'Choose which VPC this ECS cluster should be deployed to', )) # Subnets subnets_param = template.add_parameter( Parameter( 'Subnets', Type='List<AWS::EC2::Subnet::Id>', Description= 'Choose which subnets this ECS cluster should be deployed to', )) # SecurityGroup sg_param = template.add_parameter( Parameter( 'SecurityGroup', Type='AWS::EC2::SecurityGroup::Id', Description= 'Select the Security Group to use for the ECS cluster hosts', )) # Mappings # AWSRegionToAMI template.add_mapping( 'AWSRegionToAMI', { 'us-east-1': { 'AMI': 'ami-a58760b3' }, 'us-east-2': { 'AMI': 'ami-a6e4bec3' }, 'us-west-1': { 'AMI': 'ami-74cb9b14' }, 'us-west-2': { 'AMI': 'ami-5b6dde3b' }, 'eu-west-1': { 'AMI': 'ami-e3fbd290' }, 'eu-west-2': { 'AMI': 'ami-77f6fc13' }, 'eu-central-1': { 'AMI': 'ami-38dc1157' }, 'ap-northeast-1': { 'AMI': 'ami-30bdce57' }, 'ap-southeast-1': { 'AMI': 'ami-9f75ddfc' }, 'ap-southeast-2': { 'AMI': 'ami-cf393cac' }, 'ca-central-1': { 'AMI': 'ami-1b01b37f' }, }, ) # Resources ecs_role = template.add_resource( Role( 'ECSRole', Path='/', RoleName=Sub('${EnvironmentName}-ECSRole-${AWS::Region}'), AssumeRolePolicyDocument=awacs.aws.Policy(Statement=[ awacs.aws.Statement( Effect=awacs.aws.Allow, Action=[awacs.aws.Action('sts', 'AssumeRole')], Principal=awacs.aws.Principal('Service', ['ec2.amazonaws.com']), ), ]), Policies=[ Policy( PolicyName='ecs-service', PolicyDocument=awacs.aws.Policy(Statement=[ awacs.aws.Statement( Effect=awacs.aws.Allow, Action=[ awacs.aws.Action('ecs', 'CreateCluster'), awacs.aws.Action( 'ecs', 'DeregisterContainerInstance'), awacs.aws.Action('ecs', 'DiscoverPollEndpoint'), awacs.aws.Action('ecs', 'Poll'), awacs.aws.Action('ecs', 'RegisterContainerInstance'), awacs.aws.Action('ecs', 'StartTelemetrySession'), awacs.aws.Action('ecs', 'Submit*'), awacs.aws.Action('logs', 'CreateLogStream'), awacs.aws.Action( 'ecr', 'BatchCheckLayerAvailability'), awacs.aws.Action('ecr', 'BatchGetImage'), awacs.aws.Action('ecr', 'GetDownloadUrlForLayer'), awacs.aws.Action('ecr', 'GetAuthorizationToken'), ], Resource=['*'], ), ], ), ), ], )) ecs_instance_profile = template.add_resource( InstanceProfile( 'ECSInstanceProfile', Path='/', Roles=[Ref(ecs_role)], )) # ECSCluster ecs_cluster = template.add_resource( Cluster( 'ECSCluster', ClusterName=Ref(env_name_param), )) instance_metadata = Metadata( Init({ 'config': InitConfig( commands={ '01_add_instance_to_cluster': { 'command': Join('', [ '#!/bin/bash\n', 'echo ECS_CLUSTER=', Ref(ecs_cluster), ' >> /etc/ecs/ecs.config' ]) }, }, files=InitFiles({ '/etc/cfn/cfn-hup.conf': InitFile( mode='000400', owner='root', group='root', content=Join('', [ '[main]\n', 'stack=', Ref('AWS::StackId'), '\n', 'region=', Ref('AWS::Region'), '\n' ]), ), '/etc/cfn/hooks.d/cfn-auto-reloader.conf': InitFile( mode='000400', owner='root', group='root', content=Join('', [ '[cfn-auto-reloader-hook]\n', 'triggers=post.update\n', 'path=Resources.ContainerInstances.Metadata.AWS::CloudFormation::Init\n' 'action=/opt/aws/bin/cfn-init -v --region ', Ref('AWS::Region'), ' --stack ', Ref('AWS::StackId'), ' --resource ECSLaunchConfiguration\n' ]), ) }), services=InitServices({ 'cfn-hup': InitService(enabled='true', ensureRunning='true', files=[ '/etc/cfn/cfn-hup.conf', '/etc/cfn/hooks.d/cfn-auto-reloader.conf' ]) }), ) })) ecs_launch_config = template.add_resource( LaunchConfiguration( 'ECSLaunchConfiguration', ImageId=FindInMap('AWSRegionToAMI', Ref('AWS::Region'), 'AMI'), InstanceType=Ref(instance_type_param), SecurityGroups=[Ref(sg_param)], IamInstanceProfile=Ref(ecs_instance_profile), UserData=Base64( Join('', [ '#!/bin/bash\n', 'yum install -y aws-cfn-bootstrap\n', '/opt/aws/bin/cfn-init -v --region ', Ref('AWS::Region'), ' --stack ', Ref('AWS::StackName'), ' --resource ECSLaunchConfiguration\n', '/opt/aws/bin/cfn-signal -e $? --region ', Ref('AWS::Region'), ' --stack ', Ref('AWS::StackName'), ' --resource ECSAutoScalingGroup\n', ])), Metadata=instance_metadata, )) # ECSAutoScalingGroup: template.add_resource( AutoScalingGroup( 'ECSAutoScalingGroup', VPCZoneIdentifier=Ref(subnets_param), LaunchConfigurationName=Ref(ecs_launch_config), MinSize=Ref(cluster_size_param), MaxSize=Ref(cluster_size_param), DesiredCapacity=Ref(cluster_size_param), Tags=ASTags(Name=(Sub('${EnvironmentName} ECS host'), True)), CreationPolicy=CreationPolicy( ResourceSignal=ResourceSignal(Timeout='PT15M'), ), UpdatePolicy=UpdatePolicy( AutoScalingRollingUpdate=AutoScalingRollingUpdate( MinInstancesInService='1', MaxBatchSize='1', PauseTime='PT15M', WaitOnResourceSignals=True, )), )) # Output template.add_output( Output( 'Cluster', Description='A reference to the ECS cluster', Value=Ref(ecs_cluster), )) print(template.to_json())
def __init__(self): self.name = 'ec2.template' self.template = Template() self.template.add_version("2010-09-09") self.test_parameter_groups = TestParameterGroups() default_test_params = TestParameterGroup() self.test_parameter_groups.add(default_test_params) Environment = self.template.add_parameter(Parameter( "Environment", Default="Development", Type="String", Description="Application environment", AllowedValues=["Development", "Integration", "PreProduction", "Production", "Staging", "Test"], )) default_test_params.add(TestParameter("Environment", "Integration")) Bucket = self.template.add_parameter(Parameter( "S3Bucket", Type="String", Description="S3 Bucket", )) default_test_params.add(TestParameter("S3Bucket", "Arn", S3Bucket())) ImageId = self.template.add_parameter(Parameter( "ImageId", Type="String", Description="Image Id" )) default_test_params.add(TestParameter("ImageId", "ami-6869aa05")) self.template.add_resource(Instance( "EC2Instance", Tags=Tags( Name=Ref("AWS::StackName"), ServiceProvider="Rackspace", Environment=Ref(Environment), ), InstanceType="t2.small", ImageId=Ref(ImageId), )) EC2Policy = Policy( PolicyName="EC2_S3_Access", PolicyDocument={ "Statement": [{ "Effect": "Allow", "Action": "s3:*", "Resource": Ref(Bucket) }] }) EC2InstanceRole = self.template.add_resource(Role( "EC2InstanceRole", AssumeRolePolicyDocument={ "Statement": [{ "Effect": "Allow", "Principal": { "Service": ["ec2.amazonaws.com"] }, "Action": ["sts:AssumeRole"] }] }, Path="/", Policies=[EC2Policy], )) self.template.add_resource(InstanceProfile( "EC2InstanceProfile", Path="/", Roles=[Ref(EC2InstanceRole)] ))
def create_template(website_template, website_parameters): template = Template() runtime = template.add_parameter( Parameter("LambdaRuntime", Default="python3.7", Type="String")) smite_developer_id = template.add_parameter( Parameter("SmiteDeveloperId", Type="String")) smite_auth_key = template.add_parameter( Parameter("SmiteAuthKey", Type="String", NoEcho=True)) twitter_consumer_key = template.add_parameter( Parameter("TwitterConsumerKey", Type="String")) twitter_consumer_secret = template.add_parameter( Parameter("TwitterConsumerSecret", Type="String", NoEcho=True)) twitter_access_key = template.add_parameter( Parameter("TwitterAccessKey", Type="String")) twitter_access_secret = template.add_parameter( Parameter("TwitterAccessSecret", Type="String", NoEcho=True)) table = template.add_resource( Table( "StorageTable", AttributeDefinitions=[ AttributeDefinition(AttributeName="key", AttributeType="N") ], KeySchema=[KeySchema(AttributeName="key", KeyType="HASH")], BillingMode="PAY_PER_REQUEST", DeletionPolicy="Retain", )) website = template.add_resource( Stack("Website", TemplateURL=website_template, Parameters=website_parameters)) role = template.add_resource( Role( "LambdaRole", AssumeRolePolicyDocument=get_lambda_assumerole_policy(), ManagedPolicyArns=[ "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole", "arn:aws:iam::aws:policy/AmazonDynamoDBFullAccess", "arn:aws:iam::aws:policy/AmazonS3FullAccess", "arn:aws:iam::aws:policy/AWSLambdaFullAccess", ], )) smite_api_function = template.add_resource( Function( "SmiteApiFunction", Code=Code(ZipFile=inspect.getsource(smite)), Handler="index.handler", MemorySize=256, Timeout=30, Runtime=Ref(runtime), Role=GetAtt(role, "Arn"), Environment=Environment( Variables={ smite.Config.SMITE_DEVELOPER_ID.name: Ref( smite_developer_id), smite.Config.SMITE_AUTH_KEY.name: Ref(smite_auth_key), }), )) smite_api_logs = template.add_resource( log_group_for_function(smite_api_function)) twitter_api_function = template.add_resource( Function( "TwitterApiFunction", Code=Code(ZipFile=inspect.getsource(twitter)), Handler="index.handler", MemorySize=256, Timeout=30, Runtime=Ref(runtime), Role=GetAtt(role, "Arn"), Environment=Environment( Variables={ twitter.Config.TWITTER_CONSUMER_KEY.name: Ref(twitter_consumer_key), twitter.Config.TWITTER_CONSUMER_SECRET.name: Ref(twitter_consumer_secret), twitter.Config.TWITTER_ACCESS_KEY.name: Ref(twitter_access_key), twitter.Config.TWITTER_ACCESS_SECRET.name: Ref(twitter_access_secret), }), )) twitter_api_logs = template.add_resource( log_group_for_function(twitter_api_function)) table_export_function = template.add_resource( Function( "TableExportFunction", Code=Code(ZipFile=packmodule.pack(inspect.getsource(exporter))), Handler="index.handler", MemorySize=512, Timeout=30, Runtime=Ref(runtime), Role=GetAtt(role, "Arn"), Environment=Environment( Variables={ exporter.Config.DDB_TABLE_NAME.name: Ref(table), exporter.Config.S3_BUCKET_NAME.name: Select( 5, Split(":", GetAtt(website, "Outputs.ContentBucketArn"))), exporter.Config.SMITE_API_LAMBDA_ARN.name: GetAtt(smite_api_function, "Arn"), }), )) table_export_logs = template.add_resource( log_group_for_function(table_export_function)) update_check_function = template.add_resource( Function( "UpdateCheckFunction", Code=Code(ZipFile=inspect.getsource(updater)), Handler="index.handler", MemorySize=256, Timeout=30, Runtime=Ref(runtime), Role=GetAtt(role, "Arn"), Environment=Environment( Variables={ updater.Config.TWITTER_API_LAMBDA_ARN.name: GetAtt(twitter_api_function, "Arn"), updater.Config.SMITE_API_LAMBDA_ARN.name: GetAtt(smite_api_function, "Arn"), updater.Config.TABLE_EXPORT_LAMBDA_ARN.name: GetAtt(table_export_function, "Arn"), updater.Config.DDB_TABLE_NAME.name: Ref(table), }), )) update_check_logs = template.add_resource( log_group_for_function(update_check_function)) update_check_rule = template.add_resource( Rule( "UpdateCheckRule", ScheduleExpression="rate(5 minutes)", Targets=[ Target( Id=Ref(update_check_function), Arn=GetAtt(update_check_function, "Arn"), ) ], DependsOn=[ update_check_logs, table_export_logs, smite_api_logs, twitter_api_logs, ], )) template.add_resource( Permission( "UpdateCheckPermission", Action="lambda:InvokeFunction", FunctionName=Ref(update_check_function), SourceArn=GetAtt(update_check_rule, "Arn"), Principal="events.amazonaws.com", )) return template
) )) template.add_resource(Role( "PortfolioPipelineRole", AssumeRolePolicyDocument=Policy( Statement=[ Statement( Effect=Allow, Action=[AssumeRole], Principal=Principal("Service", ["codepipeline.amazonaws.com"]) ) ] ), Policies=[ IAMPolicy( PolicyName="PortfolioCodePipeline", PolicyDocument={ "Statement": [ {"Effect": "Allow", "Action": "cloudformation:*", "Resource": "*"}, {"Effect": "Allow", "Action": "codedeploy:*", "Resource": "*"}, {"Effect": "Allow", "Action": "codepipeline:*", "Resource": "*"}, {"Effect": "Allow", "Action": "iam:*", "Resource": "*"}, {"Effect": "Allow", "Action": "s3:*", "Resource": "*"} ] } ) ] )) template.add_resource(Pipeline( "PortfolioPipeline",
def generate_template(service_name): t = Template() t.add_version('2010-09-09') t.add_description("""\ AWS CloudFormation Template for AWS Exploitation Lab """) t.add_mapping("PublicRegionMap", ami_public_mapping) t.add_mapping("PrivateRegionMap", ami_private_mapping) keyname_param = t.add_parameter( Parameter( 'KeyName', ConstraintDescription= 'must be the name of an existing EC2 KeyPair.', Description= 'Name of an existing EC2 KeyPair to enable SSH access to \ the instance', Type='AWS::EC2::KeyPair::KeyName', )) sshlocation_param = t.add_parameter( Parameter( 'SSHLocation', Description= ' The IP address range that can be used to SSH to the EC2 \ instances', Type='String', MinLength='9', MaxLength='18', Default='0.0.0.0/0', AllowedPattern= "(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})/(\d{1,2})", ConstraintDescription=( "must be a valid IP CIDR range of the form x.x.x.x/x."), )) instanceType_param = t.add_parameter( Parameter( 'InstanceType', Type='String', Description='WebServer EC2 instance type', Default='t2.micro', AllowedValues=[ 't2.micro', 't2.small', 't2.medium', 'm3.medium', 'm3.large', 'm3.xlarge', 'm3.2xlarge', ], ConstraintDescription='must be a valid EC2 instance type.', )) ref_stack_id = Ref('AWS::StackId') ec2_role = t.add_resource( Role("%sEC2Role" % service_name, AssumeRolePolicyDocument=awacs.aws.Policy(Statement=[ awacs.aws.Statement( Effect=awacs.aws.Allow, Action=[awacs.aws.Action("sts", "AssumeRole")], Principal=awacs.aws.Principal("Service", ["ec2.amazonaws.com"])) ]))) ec2_role.ManagedPolicyArns = ["arn:aws:iam::aws:policy/ReadOnlyAccess"] ec2_snapshot_policy_document = awacs.aws.Policy(Statement=[ awacs.aws.Statement(Sid="PermitEC2Snapshots", Effect=awacs.aws.Allow, Action=[ awacs.aws.Action("ec2", "CreateSnapshot"), awacs.aws.Action("ec2", "ModifySnapshotAttribute"), ], Resource=["*"]) ]) ec2_snapshot_policy = Policy(PolicyName="EC2SnapshotPermissions", PolicyDocument=ec2_snapshot_policy_document) priv_ec2_role = t.add_resource( Role("%sPrivEC2Role" % service_name, AssumeRolePolicyDocument=awacs.aws.Policy(Statement=[ awacs.aws.Statement( Effect=awacs.aws.Allow, Action=[awacs.aws.Action("sts", "AssumeRole")], Principal=awacs.aws.Principal("Service", ["ec2.amazonaws.com"])) ]), Policies=[ec2_snapshot_policy])) priv_ec2_role.ManagedPolicyArns = [ "arn:aws:iam::aws:policy/ReadOnlyAccess" ] VPC_ref = t.add_resource( VPC('VPC', CidrBlock='10.0.0.0/16', Tags=Tags(Application=ref_stack_id))) instanceProfile = t.add_resource( InstanceProfile("InstanceProfile", InstanceProfileName="%sInstanceRole" % (service_name), Roles=[Ref(ec2_role)])) privInstanceProfile = t.add_resource( InstanceProfile("PrivInstanceProfile", InstanceProfileName="%sPrivInstanceRole" % (service_name), Roles=[Ref(priv_ec2_role)])) public_subnet = t.add_resource( Subnet('%sSubnetPublic' % service_name, MapPublicIpOnLaunch=True, CidrBlock='10.0.1.0/24', VpcId=Ref(VPC_ref), Tags=Tags(Application=ref_stack_id, Name="%sSubnet_public" % (service_name)))) private_subnet = t.add_resource( Subnet('%sSubnetPrivate' % service_name, MapPublicIpOnLaunch=False, CidrBlock='10.0.2.0/24', VpcId=Ref(VPC_ref), Tags=Tags(Application=ref_stack_id, Name="%sSubnet_private" % (service_name)))) internetGateway = t.add_resource( InternetGateway('InternetGateway', Tags=Tags(Application=ref_stack_id, Name="%sInternetGateway" % service_name))) gatewayAttachment = t.add_resource( VPCGatewayAttachment('AttachGateway', VpcId=Ref(VPC_ref), InternetGatewayId=Ref(internetGateway))) routeTable = t.add_resource( RouteTable('RouteTable', VpcId=Ref(VPC_ref), Tags=Tags(Application=ref_stack_id, Name="%sRouteTable" % service_name))) route = t.add_resource( Route( 'Route', DependsOn='AttachGateway', GatewayId=Ref('InternetGateway'), DestinationCidrBlock='0.0.0.0/0', RouteTableId=Ref(routeTable), )) # Only associate this Route Table with the public subnet subnetRouteTableAssociation = t.add_resource( SubnetRouteTableAssociation( 'SubnetRouteTableAssociation', SubnetId=Ref(public_subnet), RouteTableId=Ref(routeTable), )) instanceSecurityGroup = t.add_resource( SecurityGroup( 'InstanceSecurityGroup', GroupDescription='%sSecurityGroup' % service_name, SecurityGroupIngress=[ SecurityGroupRule(IpProtocol='tcp', FromPort='22', ToPort='22', CidrIp=Ref(sshlocation_param)), SecurityGroupRule(IpProtocol='tcp', FromPort='80', ToPort='80', CidrIp='0.0.0.0/0'), SecurityGroupRule(IpProtocol='tcp', FromPort='1080', ToPort='1080', CidrIp='0.0.0.0/0'), SecurityGroupRule(IpProtocol='tcp', FromPort='443', ToPort='443', CidrIp='0.0.0.0/0'), SecurityGroupRule(IpProtocol='tcp', FromPort='0', ToPort='65535', CidrIp="10.0.0.0/8"), ], VpcId=Ref(VPC_ref), )) public_instance = t.add_resource( Instance( "Public%sInstance" % service_name, ImageId=FindInMap("PublicRegionMap", Ref("AWS::Region"), "AMI"), InstanceType=Ref(instanceType_param), KeyName=Ref(keyname_param), NetworkInterfaces=[ NetworkInterfaceProperty(GroupSet=[Ref(instanceSecurityGroup)], AssociatePublicIpAddress='true', DeviceIndex='0', DeleteOnTermination='true', SubnetId=Ref(public_subnet)) ], UserData=Base64(public_instance_userdata), Tags=Tags(Application=ref_stack_id, Name='%sPublicInstance' % (service_name)))) private_instance = t.add_resource( Instance( "Private%sInstance" % service_name, ImageId=FindInMap("PrivateRegionMap", Ref("AWS::Region"), "AMI"), InstanceType=Ref(instanceType_param), KeyName=Ref(keyname_param), NetworkInterfaces=[ NetworkInterfaceProperty(GroupSet=[Ref(instanceSecurityGroup)], DeviceIndex='0', DeleteOnTermination='true', SubnetId=Ref(private_subnet)) ], UserData=Base64(private_instance_userdata), Tags=Tags(Application=ref_stack_id, Name='%sPrivateInstance' % (service_name)), IamInstanceProfile="%sPrivInstanceRole" % (service_name))) outputs = [] outputs.append( Output( "PublicIP", Description="IP Address of Public Instance", Value=GetAtt(public_instance, "PublicIp"), )) t.add_output(outputs) # Set up S3 Bucket and CloudTrail S3Bucket = t.add_resource(Bucket("S3Bucket", DeletionPolicy="Retain")) S3PolicyDocument = awacs.aws.PolicyDocument( Id='EnforceServersideEncryption', Version='2012-10-17', Statement=[ awacs.aws.Statement( Sid='PermitCTBucketPut', Action=[s3.PutObject], Effect=awacs.aws.Allow, Principal=awacs.aws.Principal("Service", ["cloudtrail.amazonaws.com"]), Resource=[Join('', [s3.ARN(''), Ref(S3Bucket), "/*"])], ), awacs.aws.Statement( Sid='PermitCTBucketACLRead', Action=[s3.GetBucketAcl], Effect=awacs.aws.Allow, Principal=awacs.aws.Principal("Service", ["cloudtrail.amazonaws.com"]), Resource=[Join('', [s3.ARN(''), Ref(S3Bucket)])], ) ]) S3BucketPolicy = t.add_resource( BucketPolicy("BucketPolicy", PolicyDocument=S3PolicyDocument, Bucket=Ref(S3Bucket), DependsOn=[S3Bucket])) myTrail = t.add_resource( Trail( "CloudTrail", IsLogging=True, S3BucketName=Ref(S3Bucket), DependsOn=["BucketPolicy"], )) myTrail.IsMultiRegionTrail = True myTrail.IncludeGlobalServiceEvents = True return t.to_json()
def init_cloud(args): template = Template() queue = template.add_resource( Queue( "{0}".format(args.sqs_name), QueueName="{0}".format(args.sqs_name), )) bucket = template.add_resource( Bucket("{0}".format(args.s3_name), BucketName="{0}".format(args.s3_name))) kala_security_group = template.add_resource( ec2.SecurityGroup( "{0}".format(args.kala_security_group), GroupName="{0}".format(args.kala_security_group), GroupDescription="Enable HTTP and HTTPS access on the inbound port", SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="80", ToPort="80", CidrIp="0.0.0.0/0", ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="443", ToPort="443", CidrIp="0.0.0.0/0", ), ])) database_security_group = template.add_resource( ec2.SecurityGroup( "{0}".format(args.database_security_group), GroupName="{0}".format(args.database_security_group), GroupDescription="Enable Database access for the security groups", SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="5432", ToPort="5432", SourceSecurityGroupName=Ref(kala_security_group), ), ])) database = template.add_resource( rds.DBInstance( "{0}".format(args.rds_instance_name), DBInstanceIdentifier="{0}".format(args.rds_instance_name), DBName=args.rds_name, MasterUsername="******".format(args.rds_username), MasterUserPassword="******".format(args.rds_password), AllocatedStorage=args.rds_allocated_storage, DBInstanceClass=args.rds_instance_class, Engine="postgres", MultiAZ=args.production, StorageEncrypted=True, VPCSecurityGroups=[GetAtt(database_security_group, "GroupId")])) s3_policy = PolicyDocument( Version="2012-10-17", Id="{0}Policy".format(args.s3_name), Statement=[ Statement(Effect="Allow", Action=[S3Action("*")], Resource=[Join("", [GetAtt(bucket, "Arn"), "/*"])]), ]) sqs_policy = PolicyDocument(Version="2012-10-17", Id="{0}Policy".format(args.s3_name), Statement=[ Statement(Effect="Allow", Action=[SQSAction("*")], Resource=[GetAtt(queue, "Arn")]) ]) role = Role('{0}Role'.format(args.iam_role), RoleName='{0}Role'.format(args.iam_role), AssumeRolePolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Action": "sts:AssumeRole", "Effect": "Allow", "Principal": { "Service": "ec2.amazonaws.com" } }] }, Policies=[ Policy(PolicyName="KalaS3Policy", PolicyDocument=s3_policy), Policy(PolicyName="KalaSQSPolicy", PolicyDocument=sqs_policy) ]) template.add_resource(role) template.add_resource( InstanceProfile("{0}InstanceProfile".format(args.iam_role), Roles=[Ref(role)], InstanceProfileName="{0}InstanceProfile".format( args.iam_role))) return template
def build_codebuild_role(config, template=Template(), project_name: str = None, **kwargs) -> Ref: """ Build a role with a CodeBuild managed policy. """ assert project_name role_name = project_name + 'Role' region = config.get("Global", "aws_region") account_number = config.get("CFNRole", "account_number") # Create a policy to Allow CodeBuild to write to s3 for Artifact storage/retrieval. # This should be an AWS Managed Policy, but here we are. policies = [ Policy( PolicyName=f"CodeBuildArtifactPolicy", PolicyDocument=PolicyDocument(Statement=[ Statement(Effect=Allow, Action=[ Action("s3", "PutObject"), Action("s3", "GetObject"), Action("s3", "GetObjectVersion"), Action("s3", "GetBucketAcl"), Action("s3", "GetBucketLocation") ], Resource=[ "arn:aws:s3:::s2n-build-artifacts/*", ]), Statement( Effect=Allow, Action=[ Action("logs", "CreateLogGroup"), Action("logs", "CreateLogStream"), Action("logs", "PutLogEvents") ], Resource=[ "arn:aws:logs:{region}:{account_number}:log-group:/aws/codebuild/{project}:*" .format(region=region, account_number=account_number, project=project_name), ]), Statement( Effect=Allow, Action=[ Action("codecommit", "BatchGet*"), Action("codecommit", "BatchDescribe*"), Action("codecommit", "Describe*"), Action("codecommit", "EvaluatePullRequestApprovalRules"), Action("codecommit", "Get*"), Action("codecommit", "List*"), Action("codecommit", "GitPull"), ], Resource=["*"], ), ])) ] # NOTE: By default CodeBuild manages the policies for this role. If you delete a CFN stack and try to recreate the # project or make changes to it when the Codebuild managed Policy still exists, you'll see an error in the UI: # `The policy is attached to 0 entities but it must be attached to a single role`. (CFN fails with fail to update) # Orphaned policies created by CodeBuild will have CodeBuildBasePolicy prepended to them; search for policies with # this name and no role and delete to clear the error. role_id = template.add_resource( Role(role_name, Path='/', Description='Policy created by CloudFormation.', Policies=policies, AssumeRolePolicyDocument=PolicyDocument(Statement=[ Statement(Effect=Allow, Action=[AssumeRole], Principal=Principal("Service", ["codebuild.amazonaws.com"])) ]))) template.add_output([Output(role_name, Value=Ref(role_id))]) return Ref(role_id)
Role( "pVideoDashboardLambdaRole" + suffixcf, RoleName="pVideoDashboardLambdaRole" + suffixcf, ManagedPolicyArns=[ "arn:aws:iam::aws:policy/service-role/AWSLambdaDynamoDBExecutionRole", "arn:aws:iam::aws:policy/AmazonDynamoDBReadOnlyAccess", "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" ], Path="/", Policies=[ Policy( "pVideoDashboardPolicy" + suffixcf, PolicyName="pVideoDashboardPolicy" + suffixcf, PolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Action": ["s3:*"], "Effect": "Allow", "Resource": [ Join("", ["arn:aws:s3:::", s3_bucket_name, "/*"]), Join("", [ "arn:aws:s3:::", s3_bucket_name + suffix_work, "/*" ]) ] }, { "Effect": "Allow", "Action": [ "redshift:DescribeHsmConfigurations", "redshift:DescribeClusterSecurityGroups", "redshift:DescribeEventSubscriptions", "redshift:DescribeOrderableClusterOptions", "redshift:DescribeEvents", "redshift:DescribeHsmClientCertificates", "redshift:ViewQueriesInConsole", "redshift:DescribeTags", "redshift:DescribeClusterParameterGroups", "redshift:DescribeDefaultClusterParameters", "redshift:DescribeEventCategories", "redshift:DescribeClusterSubnetGroups", "redshift:DescribeReservedNodeOfferings", "redshift:DescribeSnapshotCopyGrants", "redshift:DescribeReservedNodes", "redshift:DescribeClusterVersions", "redshift:DescribeClusterSnapshots", "redshift:DescribeClusters", "redshift:DescribeResize", "redshift:DescribeLoggingStatus", "redshift:GetClusterCredentials", "redshift:DescribeTableRestoreStatus", "redshift:DescribeClusterParameters" ], "Resource": ["*"] }, { "Effect": "Allow", "Action": [ "ec2:CreateNetworkInterface", "ec2:DescribeNetworkInterfaces", "ec2:DeleteNetworkInterface" ], "Resource": ["*"] }, { "Effect": "Allow", "Action": ["logs:*"], "Resource": ["*"] }, { "Effect": "Allow", "Action": ["dynamodb:*"], "Resource": [ Join("", [ "", GetAtt("pVideoDashboard" + suffixcf, "Arn") ]) ] }] }, ) ], AssumeRolePolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Action": ["sts:AssumeRole"], "Effect": "Allow", "Principal": { "Service": ["lambda.amazonaws.com"] } }] }, ))
Role( "CodepipelineExecutionRole", Path="/", Policies=[ Policy( PolicyName="CodepipelineExecutionRole", PolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Action": ["kms:Decrypt"], "Resource": GetAtt('projectkey', "Arn"), "Effect": "Allow" }, { "Action": ["lambda:listfunctions"], "Resource": "*", "Effect": "Allow" }, { "Action": ["lambda:invokefunction", "lambda:listfunctions"], "Resource": [GetAtt('sageDispatch', "Arn")], "Effect": "Allow" }, { "Action": [ "s3:ListBucket", "s3:GetBucketPolicy", "s3:GetObjectAcl", "s3:PutObjectAcl", "s3:DeleteObject", "s3:GetObject", "s3:PutObject", "s3:PutObjectTagging" ], "Resource": [ Join('', [GetAtt("InputBucket", "Arn"), "/*"]), Join('', [GetAtt("OutputBucket", "Arn"), "/*"]), Join('', [GetAtt("CodePipelineBucket", "Arn"), "/*"]) ], "Effect": "Allow" }, { "Action": [ "codecommit:CancelUploadArchive", "codecommit:GetBranch", "codecommit:GetCommit", "codecommit:GetUploadArchiveStatus", "codecommit:UploadArchive" ], "Resource": [GetAtt("Repository", "Arn")], "Effect": "Allow" }, { "Action": [ "codebuild:BatchGetBuilds", "codebuild:StartBuild", "ecr:GetAuthorizationToken", "iam:PassRole" ], "Resource": "*", "Effect": "Allow" }, { "Action": [ "ecr:GetDownloadUrlForLayer", "ecr:BatchGetImage", "ecr:BatchCheckLayerAvailability", "ecr:PutImage", "ecr:InitiateLayerUpload", "ecr:UploadLayerPart", "ecr:CompleteLayerUpload" ], "Resource": Join('', [ 'arn:aws:ecr:', Ref('regionparameter'), ':', Ref('accountparameter'), ':repository/', Ref('mldockerregistrynameparameter') ]), "Effect": "Allow" }, { "Action": [ "logs:CreateLogGroup", "logs:CreateLogStream", "logs:PutLogEvents", "logs:DescribeLogStreams" ], "Resource": ["arn:aws:logs:*:*:*"], "Effect": "Allow" }] }) ], AssumeRolePolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Action": ["sts:AssumeRole"], "Effect": "Allow", "Principal": { "Service": ["codepipeline.amazonaws.com", "codebuild.amazonaws.com"] } }] }, ))
GatewayId=Ref(internet_gateway), ) # ------------------------------------------------------------------------------ # Identity and Access Management (IAM) Resources # ------------------------------------------------------------------------------ ecs_instance_role = Role( title="ECSInstanceRole", Policies=[], ManagedPolicyArns=[ "arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role", "arn:aws:iam::aws:policy/AmazonS3FullAccess", ], AssumeRolePolicyDocument={ "Statement": [{ "Action": ["sts:AssumeRole"], "Effect": "Allow", "Sid": "", "Principal": { "Service": ["ec2.amazonaws.com"] } }] }, Tags=[Tag("service", Ref(aws_service_tag))], ) batch_service_role = Role( title="BatchServiceRole", Policies=[], ManagedPolicyArns=[ "arn:aws:iam::aws:policy/service-role/AWSBatchServiceRole", ],
Cluster("HelloWorldECSCluster", ClusterName=Join("-", [Ref(parameters["Project"]), "cluster"]))) ### ECS Task Definition IAM Role ### resources["HelloWorldTaskExecutionRole"] = template.add_resource( Role( "HelloWorldTaskExecutionRole", AssumeRolePolicyDocument={ "Version": "2008-10-17", "Statement": [{ "Sid": "", "Effect": "Allow", "Principal": { "Service": "ecs-tasks.amazonaws.com" }, "Action": "sts:AssumeRole" }] }, ManagedPolicyArns=[ "arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy" ], Path="/", RoleName="HelloWorldTaskExecutionRole")) ### ECS Task Definition ### resources["HelloWorldTaskDef"] = template.add_resource( TaskDefinition("HelloWorldTaskDef", ContainerDefinitions=[
Role( "EC2Role", AssumeRolePolicyDocument={ "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Principal": { "Service": [ "ec2.amazonaws.com" ] }, "Action": [ "sts:AssumeRole" ] } ] }, Path="/", Policies=[ Policy( PolicyName="s3-policy", PolicyDocument={ "Version": "2012-10-17", "Statement": [ { "Action": [ "s3:GetObject" ], "Resource": Sub("arn:aws:s3:::${DeployBucket}/*"), "Effect": "Allow" } ] }), Policy( PolicyName="root", PolicyDocument={ "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "cloudwatch:PutMetricData" ], "Resource": "*" } ] }) ] )
) ) EC2InstanceProfile = t.add_resource( InstanceProfile( "EC2InstanceProfile", Path="/", Roles=[Ref("EcsClusterRole")], ) ) EcsClusterRole = t.add_resource( Role( "EcsClusterRole", Path="/", ManagedPolicyArns=["arn:aws:iam::aws:policy/service-role/AmazonEC2RoleforSSM"], AssumeRolePolicyDocument={ "Version": "2012-10-17", "Statement": [ { "Action": "sts:AssumeRole", "Principal": {"Service": "ec2.amazonaws.com"}, "Effect": "Allow", } ], }, ) ) print(t.to_json())
template = Template() version = template.add_parameter( Parameter("Version", Type="String", Default="0")) app_id = template.add_parameter(Parameter("AlexaApplicationId", Type="String")) template.add_condition("Bootstrapping", Equals(Ref(version), "0")) bucket = template.add_resource(Bucket("MetrolinkSkillLambdaBucket", )) lambda_role = template.add_resource( Role( "MetrolinkSkillExecutionRole", Path='/', AssumeRolePolicyDocument=Policy(Statement=[ Statement(Effect=Allow, Principal=Principal('Service', ['lambda.amazonaws.com']), Action=[Action('sts', 'AssumeRole')]) ]))) skill = template.add_resource( Function("MetrolinkSkill", Code=If( "Bootstrapping", Code(ZipFile="placeholder"), Code(S3Bucket=Ref(bucket), S3Key=Join( '/', [Ref(version), 'alexa_metrolink_skill.zip']))), Environment=Environment(Variables={'ALEXA_APP_ID': Ref(app_id)}), Handler='alexa_metrolink_skill.handle_request', Role=GetAtt(lambda_role, 'Arn'), Runtime='python3.9'))
Role( "Role", AssumeRolePolicyDocument=Policy( Version="2012-10-17", Statement=[ Statement(Effect=Allow, Action=[Action("sts", "AssumeRole")], Principal=Principal("Service", ["lambda.amazonaws.com"])) ]), Policies=[ iamPolicy( PolicyName="Policy", PolicyDocument=Policy(Statement=[ Statement( Effect=Allow, Action=[Action("s3", "Get*"), Action("s3", "List*")], Resource=[ Ref(shared_resources_bucket_arn), ]), Statement(Effect=Allow, Action=[ Action("logs", "CreateLogGroup"), Action("logs", "CreateLogStream"), Action("logs", "PutLogEvents"), Action("ec2", "CreateNetworkInterface"), Action("ec2", "DescribeNetworkInterfaces"), Action("ec2", "DeleteNetworkInterface") ], Resource=["*"]), Statement(Effect=Allow, Action=[ Action("s3", "Get*"), Action("s3", "List*"), Action("s3", "Put*"), Action("s3", "Delete*") ], Resource=[ Join( "", [Ref(shared_resources_bucket_arn), "/*"]) ]), Statement(Effect=Allow, Action=[GetItem, PutItem, Query, UpdateItem], Resource=[GetAtt(companyTable, "Arn")]), Statement(Effect=Allow, Action=[GetItem, PutItem, Query, UpdateItem], Resource=[GetAtt(companyTableAN, "Arn")]), Statement(Effect="Allow", Action=[Action("logs", "*")], Resource=["arn:aws:logs:*:*:*"]) ])) ]))