def generate_cf(self): """ Create Cloud Formation Template from user supplied or default config file :return json string """ ## read machine list from the config file ## machines = self._readConfig(self.configdata) if 'error' in machines: return machines template = Template() template.add_description( "%s: [%s]" % (self.owner,", ".join(self.machinelist)) ) ## convert the params into cloud formation instance object ## for subnet in machines: for mclass in machines[subnet]: machine = machines[subnet][mclass] instance = self._set_instance_value(machine,mclass,self.subnet[subnet]) template.add_resource(instance) intrecordset = self._set_internal_resource_record(mclass) template.add_resource(intrecordset) if subnet == 'public': pubrecordset = self._set_public_resource_record(mclass) template.add_resource(pubrecordset) ## this magic function turn it to jason formatted template ## return template.to_json(),self.envname
def _template_init(self): t = Template() t.add_version("2010-09-09") #t.add_parameter(Parameter("StackName", # Description="Name of this stack", # Type="String",)) return t
class CloudFormationTemplate(CloudTemplate): def __init__(self): # initialize Process super(CloudTemplate, self).__init__() def generate(self): self.template = Template() for instance in self.source['instance_groups']: image_id = instance['image_id'] instance_type = instance['type'] key_pair = instance['key_pair'] name = instance['name'] ec2_instance = self.template.add_resource(ec2.Instance( "Ec2Instance", ImageId=image_id, InstanceType=instance_type, KeyName=key_pair, SecurityGroups=[name], UserData=Base64("80") )) self.template.add_output([ Output( "InstanceId", Description="InstanceId of the newly created EC2 instance", Value=Ref(ec2_instance), ), Output( "AZ", Description="Availability Zone of the newly created EC2 instance", Value=GetAtt(ec2_instance, "AvailabilityZone"), ), Output( "PublicIP", Description="Public IP address of the newly created EC2 instance", Value=GetAtt(ec2_instance, "PublicIp"), ), Output( "PrivateIP", Description="Private IP address of the newly created EC2 instance", Value=GetAtt(ec2_instance, "PrivateIp"), ), Output( "PublicDNS", Description="Public DNSName of the newly created EC2 instance", Value=GetAtt(ec2_instance, "PublicDnsName"), ), Output( "PrivateDNS", Description="Private DNSName of the newly created EC2 instance", Value=GetAtt(ec2_instance, "PrivateDnsName"), ), ]) self.template = self.template.to_json() return self.template
def main(): template = Template() database_config = DatabaseConfig( db_instance_type='db.t2.micro', db_engine='postgres', db_port='5432', db_name='myDb', db_hdd_size='5', db_snapshot_id=None, db_backup_window=None, db_backup_retention=None, db_maintenance_window=None, db_storage_type='gp2', owner='*****@*****.**' ) DatabaseLeaf(leaf_title='MyDb', tree_name='tree', template=template, database_config=database_config, availability_zones=['ap-southeast-2a', 'ap-southeast-2b', 'ap-southeast-2c'] ) print(template.to_json(indent=2, separators=(',', ': ')))
def test_mutualexclusion(self): t = Template() t.add_resource(FakeAWSObject( 'fake', callcorrect=True, singlelist=[10]) ) with self.assertRaises(ValueError): t.to_json()
def to_cloudformation_template(self): t = Template() t.description = self.description for d in self.dollops: d.to_cloudformation_template(t) return t
def test_simple_table(self): serverless_table = SimpleTable( "SomeTable" ) t = Template() t.add_resource(serverless_table) t.to_json()
def test_s3_filter(self): t = Template() t.add_resource( Function( "ProcessorFunction", Handler='process_file.handler', CodeUri='.', Runtime='python3.6', Policies='AmazonS3FullAccess', Events={ 'FileUpload': S3Event( 'FileUpload', Bucket="bucket", Events=['s3:ObjectCreated:*'], Filter=Filter(S3Key=S3Key( Rules=[ Rules(Name="prefix", Value="upload/"), Rules(Name="suffix", Value=".txt"), ], )) ) } ) ) t.to_json()
def main(): template = Template() lambda_config = LambdaConfig( lambda_s3_bucket='smallest-bucket-in-history', lambda_s3_key='test_lambda.zip', lambda_description='test function', lambda_function_name='test_lambda', lambda_handler='test_lambda.lambda_handler', lambda_memory_size=128, lambda_role_arn='arn:aws:iam::123456789:role/lambda_basic_vpc_execution_with_s3', lambda_runtime='python2.7', lambda_timeout=1, lambda_schedule='rate(5 minutes)' ) # Test Lambda LambdaLeaf(leaf_title='MyLambda', template=template, dependencies=['app1:80'], lambda_config=lambda_config, availability_zones=['ap-southeast-2a', 'ap-southeast-2b', 'ap-southeast-2c'], public_cidr={'name': 'PublicIp', 'cidr': '0.0.0.0/0'}, tree_name='tree' ) print(template.to_json(indent=2, separators=(',', ': ')))
def test_add_or_get_returns_with_out_adding_duplicate(self): t = Template() p = Parameter("param", Type="String", Default="foo") t.add_parameter(p) result = t.get_or_add_parameter(p) self.assertEquals(t.parameters["param"], p) self.assertEquals(result, p) self.assertEquals(len(t.parameters), 1)
def test_s3_bucket_accelerate_configuration(self): t = Template() ac = AccelerateConfiguration(AccelerationStatus="Enabled") b = Bucket("s3Bucket", AccelerateConfiguration=ac) t.add_resource(b) output = t.to_json() self.assertIn('"AccelerationStatus": "Enabled"', output)
def create_services(name='services', sns_name='cfSns', sqs_name='cfSqs'): t = Template() t.add_description("""\ microservices stack""") create_sns_sqs(t, sns_name + name, sqs_name + name) return t
def test_api_no_definition(self): serverless_api = Api( "SomeApi", StageName='test', ) t = Template() t.add_resource(serverless_api) t.to_json()
def test_no_required(self): stack = Stack( "mystack", ) t = Template() t.add_resource(stack) with self.assertRaises(ValueError): t.to_json()
def main(): """ Creates a troposphere template and then adds a single s3 bucket and assocaited cloud trail """ template = Template() Cloudtrail('MyCloud', template) print(template.to_json(indent=2, separators=(',', ': ')))
def __init__(self): TropoTemplate.__init__(self) self.ordered_resources = OrderedDict() self.metadata = OrderedDict() self.conditions = OrderedDict() self.mappings = OrderedDict() self.outputs = OrderedDict() self.parameters = OrderedDict() self.resources = OrderedDict()
def test_required_api_definitionuri(self): serverless_api = Api( "SomeApi", StageName='test', DefinitionUri='s3://bucket/swagger.yml', ) t = Template() t.add_resource(serverless_api) t.to_json()
def test_required_api_definitionbody(self): serverless_api = Api( "SomeApi", StageName='test', DefinitionBody=self.swagger, ) t = Template() t.add_resource(serverless_api) t.to_json()
def test_required(self): stack = Stack( "mystack", DefaultInstanceProfileArn="instancearn", Name="myopsworksname", ServiceRoleArn="arn", ) t = Template() t.add_resource(stack) t.to_json()
def test_ne(self): t1 = Template(Description='foo1', Metadata='bar1') t1.add_resource(Bucket('Baz1')) t1.add_output(Output('qux1', Value='qux1')) t2 = Template(Description='foo2', Metadata='bar2') t2.add_resource(Bucket('Baz2')) t2.add_output(Output('qux2', Value='qux2')) self.assertNotEqual(t1, t2)
def test_required_function(self): serverless_func = Function( "SomeHandler", Handler="index.handler", Runtime="nodejs", CodeUri="s3://bucket/handler.zip" ) t = Template() t.add_resource(serverless_func) t.to_json()
def sceptre_handler(sceptre_user_data): t = Template() cidr_block_param = t.add_parameter(Parameter( "CidrBlock", Type="String", Default="10.0.0.0/16", )) vpc = t.add_resource(VPC( "VirtualPrivateCloud", CidrBlock=Ref(cidr_block_param), InstanceTenancy="default", EnableDnsSupport=True, EnableDnsHostnames=True, )) igw = t.add_resource(InternetGateway( "InternetGateway", )) t.add_resource(VPCGatewayAttachment( "IGWAttachment", VpcId=Ref(vpc), InternetGatewayId=Ref(igw), )) t.add_output(Output( "VpcId", Description="New VPC ID", Value=Ref(vpc) )) return t.to_json()
def test_optional_auto_publish_alias(self): serverless_func = Function( "SomeHandler", Handler="index.handler", Runtime="nodejs", CodeUri="s3://bucket/handler.zip", AutoPublishAlias="alias" ) t = Template() t.add_resource(serverless_func) t.to_json()
def main(): template = Template() az_a = 'ap-southeast-2a' az_b = 'ap-southeast-2b' az_c = 'ap-southeast-2c' private_subnets = [] public_subnets = [] vpc = Ref(template.add_resource(ec2.VPC('MyVPC', CidrBlock='10.0.0.0/16'))) public_route_table = template.add_resource(ec2.RouteTable('MyUnitPublicRouteTable', VpcId=vpc)) private_route_table = template.add_resource(ec2.RouteTable('MyUnitPrivateRouteTable', VpcId=vpc)) public_subnets.append(Subnet(template=template, route_table=public_route_table, az=az_a, cidr='10.0.1.0/24', vpc=vpc, is_public=True)) public_subnets.append(Subnet(template=template, route_table=public_route_table, az=az_b, cidr='10.0.2.0/24', vpc=vpc, is_public=True)) public_subnets.append(Subnet(template=template, route_table=public_route_table, az=az_c, cidr='10.0.3.0/24', vpc=vpc, is_public=True)) private_subnets.append(Subnet(template=template, route_table=private_route_table, az=az_a, cidr='10.0.101.0/24', vpc=vpc, is_public=False)) private_subnets.append(Subnet(template=template, route_table=private_route_table, az=az_b, cidr='10.0.102.0/24', vpc=vpc, is_public=False)) private_subnets.append(Subnet(template=template, route_table=private_route_table, az=az_c, cidr='10.0.103.0/24', vpc=vpc, is_public=False)) print(template.to_json(indent=2, separators=(',', ': ')))
def test_required_api_both(self): serverless_api = Api( "SomeApi", StageName='test', DefinitionUri='s3://bucket/swagger.yml', DefinitionBody=self.swagger, ) t = Template() t.add_resource(serverless_api) with self.assertRaises(ValueError): t.to_json()
def main(): """ Creates a troposphere template and then adds a single DynamoDB Table """ template = Template() DynamoDB(ddb_title='MyDynamoDB', ddb_att_dict=[{'ddb_name': 'Name', 'ddb_atttype': 'S', 'ddb_keytype': 'HASH'}, {'ddb_name': 'Version', 'ddb_atttype': 'S', 'ddb_keytype': 'RANGE'}], template=template) print(template.to_json(indent=2, separators=(',', ': ')))
def test_exclusive(self): lambda_func = Function( "AMIIDLookup", Handler="index.handler", Role=GetAtt("LambdaExecutionRole", "Arn"), Code=Code(S3Bucket="lambda-functions", S3Key="amilookup.zip"), Runtime="nodejs", Timeout="25", ) t = Template() t.add_resource(lambda_func) t.to_json()
def test_valid_data(self): t = Template() cd = ecs.ContainerDefinition.from_dict("mycontainer", self.d) self.assertEquals(cd.Links[0], "containerA") td = ecs.TaskDefinition( "taskdef", ContainerDefinitions=[cd], Volumes=[ecs.Volume(Name="myvol")], TaskRoleArn=Ref(iam.Role("myecsrole")) ) t.add_resource(td) t.to_json()
def test_s3_location(self): serverless_func = Function( "SomeHandler", Handler="index.handler", Runtime="nodejs", CodeUri=S3Location( Bucket="mybucket", Key="mykey", ) ) t = Template() t.add_resource(serverless_func) t.to_json()
def setup_resources(): """ Create generic testing data """ global template global vpc template = Template() vpc = template.add_resource(ec2.VPC('MyVPC', CidrBlock='10.0.0.0/16', EnableDnsSupport='true', EnableDnsHostnames='true'))
from troposphere import Template from troposphere.codedeploy import ( AutoRollbackConfiguration, DeploymentGroup, DeploymentStyle, Ec2TagFilters, Ec2TagSet, Ec2TagSetListObject, ElbInfoList, LoadBalancerInfo, OnPremisesInstanceTagFilters, ) template = Template() template.add_version('2010-09-09') auto_rollback_configuration = AutoRollbackConfiguration( Enabled=True, Events=['DEPLOYMENT_FAILURE']) deployment_style = DeploymentStyle(DeploymentOption='WITH_TRAFFIC_CONTROL') elb_info_list = ElbInfoList(Name='DemoLoadBalancer') load_balancer_info = LoadBalancerInfo(ElbInfoList=[elb_info_list]) deployment_group = DeploymentGroup( "DemoDeploymentGroup", ApplicationName='DemoApplication', AutoRollbackConfiguration=auto_rollback_configuration, DeploymentStyle=deployment_style, LoadBalancerInfo=load_balancer_info,
import troposphere.msk as msk from troposphere import Template t = Template() t.add_resource( msk.Cluster( "TestCluster", ClusterName="MyMskCluster", KafkaVersion="2.1.0", NumberOfBrokerNodes=3, EnhancedMonitoring="PER_BROKER", BrokerNodeGroupInfo=msk.BrokerNodeGroupInfo( BrokerAZDistribution="DEFAULT", InstanceType="kafka.m5.large", SecurityGroups=["sg-c73ebda3"], StorageInfo=msk.StorageInfo(EBSStorageInfo=msk.EBSStorageInfo( VolumeSize=100)), ClientSubnets=[ "subnet-ce49ff7bcd", "subnet-2541a68474", "subnet-1d6b6f39da", ], ), EncryptionInfo=msk.EncryptionInfo( EncryptionAtRest=msk.EncryptionAtRest( DataVolumeKMSKeyId="ReplaceWithKmsKeyArn"), EncryptionInTransit=msk.EncryptionInTransit( ClientBroker="TLS", InCluster=True, ),
class EnvironmentTemplate: def __init__(self, Env=os.environ.get('ENV', 'Development')): self.env = Env url_config = os.getcwd() + '/config/config.ini' p = MyParser() self.config = p.readconfig(url_config, self.env) self.template = Template() self.template.set_description("Service VPC") self.template.set_metadata({ "DependsOn": [], "Environment": Env, "StackName": "%s-VPC" % Env }) self.vpc = None self.gateway = None self.gateway_attachment = None """ private method to set tags for the resources""" def __set_tags(self, name): tags = [{ "Key": "Environment", "Value": self.env }, { "Key": "Name", "Value": "%s-%s" % (self.env, name) }] return tags """ create gateway within the vpc""" def create_gateway(self, name=GATEWAY): self.gateway = self.template.add_resource( ec2.InternetGateway(name, Tags=self.__set_tags("InternetGateway"))) self.template.add_output(Output( GATEWAY_ID, Value=self.gateway.Ref(), )) self.gateway_attachment = self.template.add_resource( ec2.VPCGatewayAttachment( VPC_GATEWAYATTACHMENT, VpcId=self.vpc.Ref(), InternetGatewayId=self.gateway.Ref(), )) """ create vpc n/w access list,inboud and outbound rule""" def create_network(self): self.vpc_nw_acl = self.template.add_resource( ec2.NetworkAcl(VPC_NETWORK_ACCESS_LIST, VpcId=self.vpc.Ref(), Tags=self.__set_tags("NetworkAcl"))) self.inbound_rule = self.template.add_resource( ec2.NetworkAclEntry(VPC_NETWORK_ACL_INBOUND_RULE, NetworkAclId=self.vpc_nw_acl.Ref(), RuleNumber=100, Protocol="6", PortRange=PortRange(To="443", From="443"), Egress="false", RuleAction="allow", CidrBlock="0.0.0.0/0")) self.outbound_rule = self.template.add_resource( ec2.NetworkAclEntry(VPC_NETWORK_ACL_OUTBOUND_RULE, NetworkAclId=self.vpc_nw_acl.Ref(), RuleNumber=200, Protocol="6", Egress="true", RuleAction="allow", CidrBlock="0.0.0.0/0")) """ create vpc for given template""" def create_vpc(self, name=VPC_NAME): self.vpc = self.template.add_resource( ec2.VPC(name, CidrBlock=self.config['vpc_cidrblock'], EnableDnsSupport=True, EnableDnsHostnames=True, InstanceTenancy="default", Tags=self.__set_tags("ServiceVPC"))) # Just about everything needs this, so storing it on the object self.template.add_output(Output(VPC_ID, Value=self.vpc.Ref())) """ write template json to the file""" def write_to_file(self, filename): with open(filename, 'w') as f: f.write( json.dumps(json.loads(self.template.to_json()), indent=2, sort_keys=True))
ApplicationName = "nodeserver" ApplicationPort = "3000" GithubAccount = "EffectiveDevOpsWithAWS" GithubAnsibleURL = "https://github.com/{}/ansible".format(GithubAccount) AnsiblePullCmd = \ "/usr/local/bin/ansible-pull -U {} {}.yml -i localhost".format( GithubAnsibleURL, ApplicationName ) PublicCidrIp = str(ip_network(get_ip())) t = Template() t.add_description("Effective DevOps in AWS: HelloWorld web application") t.add_parameter( Parameter( "KeyPair", Description="Name of an existing EC2 KeyPair to SSH", Type="AWS::EC2::KeyPair::KeyName", ConstraintDescription="must be the name of an existing EC2 KeyPair.", )) #VPC 선언 t.add_parameter(Parameter("VpcId", Type="AWS::EC2::VPC::Id", Description="VPC"))
def getTest(t: Template, inputArt: str, stage: str) -> Action: role = t.add_resource(getBuildRole(stage)) buildspec = getBuildSpec(stage) cb = getCodeBuild(role, stage, buildspec) build_ref = t.add_resource(cb) return buildCfWithDockerAction(build_ref, inputArt)
def create_cloudformation_stack(args): print("Hello AWS!") # Connect to EC2. Get a cloudformation client session = boto3.Session(profile_name='f_project') client = session.client('cloudformation', region_name='sa-east-1') stack_name = args.stack_name ssh_key = args.ssh_key_name sec_group_name = 'TesisSecurityGroup' # Create stack template. cloudformation_template = Template() # Add parameters -> SSH key ssh_key_parameter = cloudformation_template.add_parameter( ssh_parameter(ssh_key)) cloudformation_template.add_output( cf_output("SSHKey", "SSH Key to log into instances", 'KeyName')) # Add roles and policies (bucket) policy_name = 'RolePolicies' role_name = 'InstanceRole' profile_name = 'InstanceProfile' ref_stack_id = Ref('AWS::StackId') ref_region = Ref('AWS::Region') ref_stack_name = Ref('AWS::StackName') cloudformation_template, subnet = set_cloudformation_settings( cloudformation_template, ref_stack_id) # Add security group ssh_sec_group = add_ssh_security_group(sec_group_name) cloudformation_template.add_resource(ssh_sec_group) # Read the environment information from the config file cfg_parser = ConfigObj(CFG_FILE) instance_size = cfg_parser['aws_config']['INSTANCE_SIZE'] bucket_name = cfg_parser['aws_config']['BUCKET'] # Add bucket access policies cloudformation_template.add_resource( allow_bucket_access_role(role_name)) # 1 cloudformation_template.add_resource( bucket_access_policy(policy_name, role_name, bucket_name)) # 2 cloudformation_template.add_resource( instance_profile_bucket(profile_name, role_name)) # 3 for instance_id in cfg_parser["Instances"]: instance = cfg_parser["Instances"][instance_id] name = instance['name'] print("Instance name:", name) ami_id = instance['ami_id'] ip = instance['ip'] bootstrap_file = instance['local_bootstrap_file'] bootstrap_path = os.path.join(os.getcwd(), BOOTSTRAP_FOLDER, name, bootstrap_file) aws_instance = EnvInstance(name) aws_instance.create_instance_template( ami_id, instance_size, ip, ssh_key_parameter, bootstrap_path, sec_group_name, # TODO: unused subnet) aws_instance.set_bucket_access(role_name, profile_name, bucket_name) aws_instance.add_to_security_group(ssh_sec_group) # aws_instance.add_to_security_group(ref_stack_id) cloudformation_template.add_resource(aws_instance) cloudformation_template.add_output( cf_output("%sInstance" % name, "%s: IP %s" % (name, ip), name)) print("Instance added to template!") try: # Create stack client.create_stack(StackName=stack_name, TemplateBody=cloudformation_template.to_json(), Capabilities=['CAPABILITY_IAM']) # Wait until stack is created while client.describe_stacks( StackName=stack_name )["Stacks"][0]["StackStatus"] != StackState.created: # Add timeout -> and delete stack print("Creating Environment...") time.sleep(15) else: print("CloudFormation Stack created") except ClientError: formatted_lines = traceback.format_exc().splitlines() print(traceback.format_exc()) print(formatted_lines[0]) print(formatted_lines[-1]) print("CloudFormation Stack could not be created!")
def test_resource(self): t = Template() r = FakeAWSObject('fake', callcorrect=True) t.add_resource(r) with self.assertRaises(ValueError): t.add_resource(r)
def test_output(self): t = Template() o = Output("MyOutput", Value="myvalue") t.add_output(o) with self.assertRaises(ValueError): t.add_output(o)
def test_parameter(self): t = Template() p = Parameter("MyParameter", Type="String") t.add_parameter(p) with self.assertRaises(ValueError): t.add_parameter(p)
def test_invalid_parameter_property_in_template(self): t = Template() p = Parameter("BasicNumber", Type="Number", AllowedPattern=".*") t.add_parameter(p) with self.assertRaises(ValueError): t.to_json()
def test_badrequired(self): with self.assertRaises(ValueError): t = Template() t.add_resource(Instance('ec2instance')) t.to_json()
def test_mutualexclusion(self): t = Template() t.add_resource(FakeAWSObject('fake', callcorrect=True, singlelist=[10])) with self.assertRaises(ValueError): t.to_json()
from troposphere import Template, cloudfront, constants, ImportValue, Sub, Join, Parameter, Ref, Output, GetAtt, \ Equals, AWS_NO_VALUE, If, route53, FindInMap, AWS_REGION import custom_resources.acm import custom_resources.cloudformation import cfnutils.mappings import cfnutils.output template = Template() custom_resources.use_custom_resources_stack_name_parameter(template) authorizer_stack = template.add_parameter( Parameter( "AuthorizerStack", Type=constants.STRING, Default="authorizer", Description="Authorizer stack to import from", )) template.set_parameter_label(authorizer_stack, "Authorizer stack") param_authorizer_lae_arn = template.add_parameter( Parameter( "AuthorizerLaeParam", Type="AWS::SSM::Parameter::Value<String>", Default='/authorizer/lae-arn', Description="Parameter name to get Lambda@Edge ARN from", )) template.set_parameter_label(param_authorizer_lae_arn, "Authorizer Lambda@Edge parameter") param_label = template.add_parameter(
ApplicationName = "helloworld" ApplicationPort = "3000" GithubAccount = "Abdul-Aziz-T" GithubAnsibleURL = "https://github.com/{}/Ansible".format(GithubAccount) AnsiblePullCmd = \ "/usr/local/bin/ansible-pull -U {} {}.yml -i localhost".format( GithubAnsibleURL, ApplicationName ) PublicCidrIp = str(ip_network(get_ip())) t = Template() t.add_description("Effective DevOps in AWS: HelloWorld web application") t.add_parameter(Parameter( "KeyPair", Description="Name of an existing EC2 KeyPair to SSH", Type="AWS::EC2::KeyPair::KeyName", ConstraintDescription="must be the name of an existing EC2 KeyPair" )) t.add_resource(ec2.SecurityGroup( "SecurityGroup", GroupDescription="Allow SSH and TCP/{} access".format(ApplicationPort), SecurityGroupIngress=[ ec2.SecurityGroupRule(
def generate(env='pilot'): template = Template() template.set_version("2010-09-09") # ExistingVPC = template.add_parameter(Parameter( # "ExistingVPC", # Type="AWS::EC2::VPC::Id", # Description=( # "The VPC ID that includes the security groups in the" # "ExistingSecurityGroups parameter." # ), # )) # # ExistingSecurityGroups = template.add_parameter(Parameter( # "ExistingSecurityGroups", # Type="List<AWS::EC2::SecurityGroup::Id>", # )) param_spider_lambda_memory_size = template.add_parameter( Parameter( 'SpiderLambdaMemorySize', Type=NUMBER, Description='Amount of memory to allocate to the Lambda Function', Default='128', AllowedValues=MEMORY_VALUES ) ) param_spider_lambda_timeout = template.add_parameter( Parameter( 'SpiderLambdaTimeout', Type=NUMBER, Description='Timeout in seconds for the Lambda function', Default='60' ) ) spider_tasks_queue_dlq_name = f'{env}-spider-tasks-dlq' spider_tasks_queue_dlq = template.add_resource( Queue( "SpiderTasksDLQ", QueueName=spider_tasks_queue_dlq_name, MessageRetentionPeriod=(60 * 60 * 24 * 14), ) ) spider_tasks_queue_name = f"{env}-spider-tasks" spider_tasks_queue = template.add_resource( Queue( "SpiderTasksQueue", QueueName=spider_tasks_queue_name, MessageRetentionPeriod=(60 * 60 * 24 * 14), VisibilityTimeout=300, RedrivePolicy=RedrivePolicy( deadLetterTargetArn=GetAtt(spider_tasks_queue_dlq, "Arn"), maxReceiveCount=2, ), DependsOn=[spider_tasks_queue_dlq], ) ) spider_lambda_role = template.add_resource( Role( "SpiderLambdaRole", Path="/", Policies=[ Policy( PolicyName="root", PolicyDocument=PolicyDocument( Version="2012-10-17", Id="root", Statement=[ Statement( Effect=Allow, Resource=["*"], Action=[ Action("logs", "*") ] ), Statement( Effect=Allow, Resource=["*"], Action=[ Action("s3", "*") ] ), Statement( Effect=Allow, Resource=["*"], Action=[ Action("sqs", "*") ] ), ] ), ) ], AssumeRolePolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Action": ["sts:AssumeRole"], "Effect": "Allow", "Principal": { "Service": ["lambda.amazonaws.com"] } }] }, ) ) spider_file_path = './spider/index.js' spider_code = open(spider_file_path, 'r').readlines() spider_lambda = template.add_resource( Function( "SpiderLambda", Code=Code( S3Bucket='spider-lambda', S3Key=f'{env}.zip', # ZipFile=Join("", spider_code) ), Handler="index.handler", Role=GetAtt(spider_lambda_role, "Arn"), Runtime="nodejs12.x", Layers=['arn:aws:lambda:us-east-1:342904801388:layer:spider-node-browser:1'], MemorySize=Ref(param_spider_lambda_memory_size), Timeout=Ref(param_spider_lambda_timeout), DependsOn=[spider_tasks_queue], ) ) # AllSecurityGroups = template.add_resource(CustomResource( # "AllSecurityGroups", # List=Ref(ExistingSecurityGroups), # AppendedItem=Ref("SecurityGroup"), # ServiceToken=GetAtt(spider_lambda, "Arn"), # )) # # SecurityGroup = template.add_resource(SecurityGroup( # "SecurityGroup", # SecurityGroupIngress=[ # {"ToPort": "80", "IpProtocol": "tcp", "CidrIp": "0.0.0.0/0", # "FromPort": "80"}], # VpcId=Ref(ExistingVPC), # GroupDescription="Allow HTTP traffic to the host", # SecurityGroupEgress=[ # {"ToPort": "80", "IpProtocol": "tcp", "CidrIp": "0.0.0.0/0", # "FromPort": "80"}], # )) # # AllSecurityGroups = template.add_output(Output( # "AllSecurityGroups", # Description="Security Groups that are associated with the EC2 instance", # Value=Join(", ", GetAtt(AllSecurityGroups, "Value")), # )) source_sns_name = f'{env}-source-sns-topic' source_sns_topic = template.add_resource( Topic( "SNSSource", TopicName=source_sns_name, Subscription=[ Subscription( Endpoint=GetAtt(spider_tasks_queue, "Arn"), Protocol='sqs', ) ], DependsOn=[spider_tasks_queue] ) ) source_sns_topic_policy = template.add_resource( TopicPolicy( "SourceForwardingTopicPolicy", PolicyDocument=PolicyDocument( Version="2012-10-17", Id="AllowS3PutMessageInSNS", Statement=[ Statement( Sid="AllowS3PutMessages", Principal=Principal("Service", "s3.amazonaws.com"), Effect=Allow, Action=[ Action("sns", "Publish"), ], Resource=["*"], ) ] ), Topics=[Ref(source_sns_topic)], ) ) sns_sqs_policy = template.add_resource( QueuePolicy( "AllowSNSPutMessagesInSQS", PolicyDocument=PolicyDocument( Version="2012-10-17", Id="AllowSNSPutMessagesInSQS", Statement=[ Statement( Sid="AllowSNSPutMessagesInSQS2", Principal=Principal("*"), Effect=Allow, Action=[ Action("sqs", "SendMessage"), ], Resource=["*"], ) ] ), Queues=[Ref(spider_tasks_queue)], DependsOn=[spider_tasks_queue], ) ) # Buckets source_bucket_name = f'{env}-source-bucket' source_bucket = template.add_resource( Bucket( "SourceBucket", BucketName=source_bucket_name, NotificationConfiguration=NotificationConfiguration( TopicConfigurations=[ TopicConfigurations( Topic=Ref(source_sns_topic), Event="s3:ObjectCreated:*", ) ], ), DependsOn=[source_sns_topic_policy], ) ) results_bucket_name = f'{env}-results-bucket' results_bucket = template.add_resource( Bucket( "ResultsBucket", BucketName=results_bucket_name, ) ) # Lambda trigger template.add_resource( EventSourceMapping( "TriggerLambdaSpiderFromSQS", EventSourceArn=GetAtt(spider_tasks_queue, "Arn"), FunctionName=Ref(spider_lambda), BatchSize=1, # Default process tasks one by one ) ) return template.to_json()
) from troposphere.cloudwatch import ( Alarm, MetricDimension ) from troposphere.ecs import Cluster from troposphere.iam import ( InstanceProfile, Role ) PublicCidrIp = str(ip_network(get_ip())) t = Template() t.add_description("Ganesh DevOps in AWS: ECS Cluster") t.add_parameter(Parameter( "KeyPair", Description="Name of an existing EC2 KeyPair to SSH", Type="AWS::EC2::KeyPair::KeyName", ConstraintDescription="must be the name of an existing EC2 KeyPair.", )) t.add_parameter(Parameter( "VpcId", Type="AWS::EC2::VPC::Id", Description="VPC" )) t.add_parameter(Parameter(
def create_cft(): """"Creates the CFT""" template = Template() template.add_version("2010-09-09") template.add_description("CFT to create merch cube user") # Add Parameters for param in parameters: template.add_parameter(param) # Add Maps template.add_mapping("EnvGroupToEnv", env_group_to_env) # Add Resources template.add_resource(managed_policy) template.add_resource(merch_cube_user) return template
from troposphere import Parameter, Output, Template from troposphere import Base64, FindInMap, GetAtt, Ref, Join, If, Tags, Equals from troposphere import cloudformation from troposphere.cloudformation import WaitCondition, WaitConditionHandle from troposphere.ec2 import Instance from troposphere.ec2 import VPCGatewayAttachment, NetworkInterfaceProperty, SecurityGroup, SubnetRouteTableAssociation, RouteTable, Route, Subnet, InternetGateway, VPC, EIP from troposphere.iam import AccessKey, User, Policy from troposphere.route53 import RecordSetType import awacs.aws t = Template() # Header t.add_description("Perforce Helix Deployment for EC2") # Header End # Metadata t.add_metadata({ "Comments": "Perforce Helix Deployment for EC2", "LastUpdated": "Sep 14th 2016", "UpdatedBy": "Graeme Rich", "Version": "2016.1", }) # Metadata End # Conditions ProdNotify = t.add_condition("ProdNotify", Equals(Ref("EnvironmentType"), "Production")),
return Sub(str.join(':', [ "arn", "aws", "apigateway", "${AWS::Region}", "lambda", "path/2015-03-31/functions/${fn}/invocations" ]), fn=GetAtt(lambda_resource, "Arn")) def executeapi_arn(api_resource, path): return Sub(str.join(':', [ 'arn', 'aws', 'execute-api', '${AWS::Region}', '${AWS::AccountId}', '${api}/' + path ]), api=Ref(api_resource)) template = Template() devices_table = template.add_resource( dynamodb.Table( 'DevicesTable', TableName='Devices', KeySchema=[ dynamodb.KeySchema(AttributeName='key', KeyType='HASH'), ], AttributeDefinitions=[ dynamodb.AttributeDefinition(AttributeName='key', AttributeType='S'), ], BillingMode='PAY_PER_REQUEST', ))
# Python script to generate the cloudformation template json file # This is not strictly needed, but it takes the pain out of writing a # cloudformation template by hand. It also allows for DRY approaches # to maintaining cloudformation templates. from troposphere import Ref, Template, Parameter, Output, Join, GetAtt, Tags import troposphere.ec2 as ec2 import configuration t = Template() t.add_description('VPC Script') def createCouchbaseVPC(t): couchbaseVPC = t.add_resource( ec2.VPC( 'VPC', CidrBlock='10.0.0.0/16', EnableDnsSupport='true', EnableDnsHostnames='true', Tags=Tags(Name=Join( '', ['vpc-scalabilty-', Ref('AWS::Region')])))) return couchbaseVPC def createCouchbaseInternetGateway(t): couchbaseInternetGateway = t.add_resource( ec2.InternetGateway( 'GATEWAY', Tags=Tags(
from troposphere import Template from troposphere.firehose import ( BufferingHints, CloudWatchLoggingOptions, CopyCommand, DeliveryStream, EncryptionConfiguration, KMSEncryptionConfig, RedshiftDestinationConfiguration, S3Configuration, ) t = Template() t.add_version('2010-09-09') t.add_description('Sample Kinesis Firehose Delivery Stream') t.add_resource( DeliveryStream( 'MyDeliveryStream', DeliveryStreamName='MyDeliveryStream', RedshiftDestinationConfiguration=RedshiftDestinationConfiguration( CloudWatchLoggingOptions=CloudWatchLoggingOptions( Enabled=True, LogGroupName='my-log-group', LogStreamName='my-log-stream', ), ClusterJDBCURL= 'jdbc:redshift://my-redshift-db.asdf.us-west-2.redshift.amazonaws.com:5432/mydb', # noqa CopyCommand=CopyCommand( CopyOptions="JSON 'auto'", DataTableColumns='mycol',
AutoScalingGroup, LaunchConfiguration, ScalingPolicy, ) from troposphere.cloudwatch import ( Alarm, MetricDimension, ) from awacs.sts import AssumeRole ApplicationPort = "6000" PublicCidrIp = str(ip_network(get_ip())) t = Template() t.add_description("Appserver Template") t.add_parameter( Parameter( "KeyPair", Description="Name of an existing EC2 KeyPair to SSH", Type="AWS::EC2::KeyPair::KeyName", ConstraintDescription="must be the name of an existing EC2 KeyPair.", )) t.add_parameter(Parameter("VpcId", Type="AWS::EC2::VPC::Id", Description="VPC")) t.add_parameter(
from troposphere import Template, Tags import troposphere.ec2 as ec2 t = Template() t.add_resource( ec2.ClientVpnEndpoint( "myClientVpnEndpoint", AuthenticationOptions=[ ec2.ClientAuthenticationRequest( Type="directory-service-authentication", ActiveDirectory=ec2.DirectoryServiceAuthenticationRequest( DirectoryId="d-926example"), ) ], ClientCidrBlock="10.0.0.0/22", ConnectionLogOptions=ec2.ConnectionLogOptions(Enabled=False), Description="My Client VPN Endpoint", DnsServers=["11.11.0.1"], ServerCertificateArn=("arn:aws:acm:us-east-1:111122223333:certificate/" "12345678-1234-1234-1234-123456789012"), TagSpecifications=[ ec2.TagSpecifications( ResourceType="client-vpn-endpoint", Tags=Tags(Purpose="Production"), ) ], TransportProtocol="udp", )) print(t.to_json())
#!/usr/bin/env python from troposphere import Base64, FindInMap, GetAtt from troposphere import Parameter, Output, Ref, Template import troposphere.ec2 as ec2 template = Template() # non-working code w/o the Refs being defined for i in range(3): ec2_instance = template.add_resource( ec2.Instance("Ec2Instance{}".format(i + 1), ImageId=FindInMap( "AWSRegionArch2AMI", Ref("AWS::Region"), FindInMap("AWSRegionArch2AMI", Ref("InstanceType"), "Arch")), InstanceType=Ref(InstanceType), KeyName=Ref(KeyName), SecurityGroups=[Ref(InstanceSecurityGroup)])) print(template.to_yaml())
# Example Network with a NAT Gateway from troposphere import GetAtt, Output, Parameter, Ref, Template, ec2 t = Template() t.set_description( "AWS CloudFormation Sample Template NatGateway: Sample template showing " "how to create a public NAT gateway. " "**WARNING** This template creates an Amazon NAT gateway. " "You will be billed for the AWS resources used if you create " "a stack from this template.") vpc_cidr = t.add_parameter( Parameter( "VPCCIDR", Default="172.18.0.0/16", Description="The IP address space for this VPC, in CIDR notation", Type="String", )) public_subnet = t.add_parameter( Parameter( "PublicSubnetCidr", Type="String", Description="Public Subnet CIDR", Default="172.18.0.0/22", )) private_subnet = t.add_parameter( Parameter(
def generate_queues_template(QueueNamePrefix, Environment): QueueName = f'{QueueNamePrefix}-{Environment}' DLQQueueName = f'{QueueNamePrefix}DLQ-{Environment}' t = Template(Description='A template for a messaging queue') t.version = '2010-09-09' KMSKey = t.add_resource( Key('KMSKey', Description=f'KMS Key for encrypting {QueueName}', Enabled=True, EnableKeyRotation=True, KeyPolicy=Policy( Version='2012-10-17', Statement=[ Statement(Sid='Enable IAM User Permissions', Effect=Allow, Principal=AWSPrincipal( Sub('arn:aws:iam::${AWS::AccountId}:root')), Action=[KmsAction(All)], Resource=AllResources), Statement(Sid='Allow access for Key Administrators', Effect=Allow, Principal=AWSPrincipal([ Sub(f'{USER}/frank'), Sub(f'{USER}/moonunit') ]), Action=[ KmsAction('Create*'), KmsAction('Describe*'), KmsAction('Enable*'), KmsAction('List*'), KmsAction('Put*'), KmsAction('Update*'), KmsAction('Revoke*'), KmsAction('Disable*'), KmsAction('Get*'), KmsAction('Delete*'), KmsAction('ScheduleKeyDeletion'), KmsAction('CancelKeyDeletion') ], Resource=AllResources) ]))) t.add_resource( Alias('KMSKeyAlias', AliasName=f'alias/{QueueName}', TargetKeyId=Ref(KMSKey))) dlq = t.add_resource( Queue( 'DeadLetterQueue', QueueName=DLQQueueName, MaximumMessageSize=262144, # 256KiB MessageRetentionPeriod=1209600, # 14 days VisibilityTimeout=30)) t.add_resource( Queue( 'PrimaryQueue', QueueName=QueueName, MaximumMessageSize=262144, # 256KiB MessageRetentionPeriod=1209600, # 14 days VisibilityTimeout=30, RedrivePolicy=RedrivePolicy(deadLetterTargetArn=GetAtt( dlq.title, 'Arn'), maxReceiveCount=10), KmsMasterKeyId=Ref(KMSKey), KmsDataKeyReusePeriodSeconds=300)) t.add_output([ Output('QueueArn', Description=f'ARN of {QueueName} Queue', Value=GetAtt('PrimaryQueue', 'Arn'), Export=Export(Name(Sub('${AWS::StackName}:PrimaryQueueArn')))), Output('KmsKeyArn', Description=f'KMS Key ARN for {QueueName} Queue', Value=GetAtt('KMSKey', 'Arn'), Export=Export(Name(Sub('${AWS::StackName}:KmsKeyArn')))) ]) return t
#!/usr/bin/env python from troposphere import GetAtt, Template, Ref, Output, Parameter, AWSObject from troposphere.awslambda import Code, Function from troposphere.iam import Role from troposphere.iam import Policy as IAM_Policy from awacs.aws import Policy, Allow, Action, Principal, Statement template = Template() s3_bucket_param = Parameter( 'S3BucketName', Type='String', Description='The bucket where the lambda code is uploaded') s3_key_param = Parameter( 'S3KeyName', Type='String', Description='The full name of the s3 key containing the lambda zip') assume_role_policy_document = Policy(Statement=[ Statement( Effect=Allow, Action=[Action('sts', 'AssumeRole')], Principal=Principal('Service', 'lambda.amazonaws.com'), ) ]) lambda_policy = IAM_Policy( PolicyName='ECSPortAllocator', PolicyDocument=Policy(Statement=[ Statement(Effect=Allow,
from troposphere import sqs, s3 from troposphere import GetAtt, Ref from troposphere import Template, Parameter from troposphere.awslambda import Function, Code, Environment, TracingConfig, EventSourceMapping, Permission from troposphere.sqs import Queue, QueuePolicy, RedrivePolicy from troposphere.iam import Role, Policy import regex import time # Object that will generate the template t = Template() # Gather input and validate correct datatype def validate_user_input(type, prompt, max_ = 0, min_ = 1): # pick int between min_ and max_ # Loop until user submits a valid response while True: # Collect users input to custom questions response = input(prompt) # If there actually is a response then validate if (response is not None or response is not ''): # If the answer is a string or the response to a yes or no then... if(type == "String" or type == 'y/n'): # Check the response for any special characters as they are usually not allowed ## In the cases that they are, have user input a code that refers to that character check_for_punc = re.search("[[:punct:]]", response) # If there is punctuation, return to collecting input if (check_for_punc): print("Please no punctuation") continue # If there is no punctuation and the response is yes/no if (type == 'y/n'):
from awacs.sts import AssumeRole ApplicationName = "jenkins" ApplicationPort = 8080 GithubAccount = "srrk" GithubAnsibleURL = "https://github.com/{}/ansible-workspace".format( GithubAccount) AnsiblePullCommand = \ "/usr/local/bin/ansible-pull -U {} {}.yml -i localhost --sleep 60".format( GithubAnsibleURL, ApplicationName ) PublicCidrIp = str(ip_network(get_ip())) t = Template() t.add_description("Effective Devops in AWS : HelloWorld Web Application") t.add_parameter( Parameter( "KeyPair", Description="Name of an existing EC2 keypair to SSH", Type="AWS::EC2::KeyPair::KeyName", ConstraintDescription="must be the name of an existing EC2 keypair")) t.add_resource( ec2.SecurityGroup( "SecurityGroup", GroupDescription="Allow SSH and TCP/{}".format(ApplicationPort), SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="22",
def create_vpc_template(): template = Template() vpc_cidr = template.add_parameter(parameter=Parameter( title='VpcCidr', Type='String', Default='192.168.0.0/16')) subnet_cidr_a = template.add_parameter(parameter=Parameter( title='SubnetCidr1', Type='String', Default='192.168.1.0/24')) subnet_cidr_b = template.add_parameter(parameter=Parameter( title='SubnetCidr2', Type='String', Default='192.168.2.0/24')) vpc = template.add_resource(resource=VPC( title='SampleVpc', CidrBlock=Ref(vpc_cidr), EnableDnsHostnames=True)) igw = template.add_resource(resource=InternetGateway(title='SampleIgw')) template.add_resource(resource=VPCGatewayAttachment( title='SampleAttachment', VpcId=Ref(vpc), InternetGatewayId=Ref(igw))) subnet_a = template.add_resource( resource=Subnet(title='SampleSubnetA', AvailabilityZone='us-east-1a', CidrBlock=Ref(subnet_cidr_a), MapPublicIpOnLaunch=True, VpcId=Ref(vpc))) subnet_b = template.add_resource( resource=Subnet(title='SampleSubnetB', AvailabilityZone='us-east-1b', CidrBlock=Ref(subnet_cidr_b), MapPublicIpOnLaunch=True, VpcId=Ref(vpc))) route_table = template.add_resource( resource=RouteTable(title='SampleRoteTable', VpcId=Ref(vpc))) template.add_resource(resource=SubnetRouteTableAssociation( title='SampleRoteTableAssociationA', RouteTableId=Ref(route_table), SubnetId=Ref(subnet_a))) template.add_resource(resource=SubnetRouteTableAssociation( title='SampleRoteTableAssociationB', RouteTableId=Ref(route_table), SubnetId=Ref(subnet_b))) template.add_resource(resource=Route(title='SampleRoute', DestinationCidrBlock='0.0.0.0/0', GatewayId=Ref(igw), RouteTableId=Ref(route_table))) with open('./vpc.yml', mode='w') as file: file.write(template.to_yaml())