def GenerateGlobalLayer(): t = Template() t.add_description("""\ Global Layer """) stackname_param = t.add_parameter( Parameter( "StackName", Description="Environment Name (default: StepGlobals)", Type="String", Default="StepGlobals", )) crontab_table = t.add_resource( dynamodb.Table("scheduleTable", AttributeDefinitions=[ dynamodb.AttributeDefinition("taskname", "S"), ], KeySchema=[dynamodb.Key("taskname", "HASH")], ProvisionedThroughput=dynamodb.ProvisionedThroughput( 1, 1))) t.add_output([ Output( "crontabtablename", Description="Crontab Table Name", Value=Ref(crontab_table), ) ]) return t
def inject_to(self, t): attribute_definitions = [ dynamodb.AttributeDefinition(AttributeName=self.hash_key[0], AttributeType=self.hash_key[1]) ] key_schema = [ dynamodb.KeySchema(AttributeName=self.hash_key[0], KeyType="HASH") ] if self.range_key is not None: attribute_definitions.append( dynamodb.AttributeDefinition(AttributeName=self.range_key[0], AttributeType=self.range_key[1])) key_schema.append( dynamodb.KeySchema(AttributeName=self.range_key[0], KeyType="RANGE")) table = t.add_resource( dynamodb.Table( self.label, AttributeDefinitions=attribute_definitions, KeySchema=key_schema, ProvisionedThroughput=self.get_provisioned_throughput(t))) self.configure_autoscaling(t, table) return t
def define_table(table, template): """ Function to create the DynamoDB table resource :param table: :type table: ecs_composex.common.compose_resources.Table """ table_props = import_record_properties(table.properties, dynamodb.Table) table_props.update({ "Metadata": metadata, "Tags": Tags( Name=table.name, ResourceName=table.logical_name, CreatedByComposex=True, RootStackName=Ref(ROOT_STACK_NAME), ), }) cfn_table = dynamodb.Table(table.logical_name, **table_props) table.cfn_resource = cfn_table if table.scaling: add_autoscaling(table, template) table.init_outputs() table.generate_outputs() add_resource(template, table.cfn_resource) add_outputs(template, table.outputs)
def words_table(): return dynamodb.Table( 'WordsTable', AttributeDefinitions=words_attrdefs(), GlobalSecondaryIndexes=words_gsis(), KeySchema=[ dynamodb.KeySchema(AttributeName='user', KeyType='HASH'), dynamodb.KeySchema(AttributeName='word', KeyType='RANGE'), ], ProvisionedThroughput=dynamodb.ProvisionedThroughput( ReadCapacityUnits=1, WriteCapacityUnits=1))
def create_table(self, table_name, table_config, stream_enabled): t = self.template t.add_resource(dynamodb.Table(table_name, **table_config)) if stream_enabled: t.add_output( Output(table_name + "StreamArn", Value=GetAtt(table_name, "StreamArn"))) t.add_output(Output(table_name + "Name", Value=Ref(table_name)))
def add_api_token_table(self): self._api_token_table = self.add_resource( dynamodb.Table( 'ApiTokens', TableName=Sub('${AWS::StackName}ApiTokens'), AttributeDefinitions=[ dynamodb.AttributeDefinition(AttributeName='token', AttributeType='S') ], BillingMode='PAY_PER_REQUEST', KeySchema=[ dynamodb.KeySchema(AttributeName='token', KeyType='HASH') ], SSESpecification=dynamodb.SSESpecification(SSEEnabled=True)))
def r_table(self): table_name = self.conf['table_name'] table_key = self.conf['table_key'] return dynamodb.Table('DynamoDbTable', TableName=table_name, BillingMode='PAY_PER_REQUEST', AttributeDefinitions=[ dynamodb.AttributeDefinition( AttributeName=table_key, AttributeType='S'), ], KeySchema=[ dynamodb.KeySchema(AttributeName=table_key, KeyType='HASH') ])
def cf_resources(cls): """produce CF template""" from troposphere import dynamodb return [ dynamodb.Table(cls._TABLE_NAME, TableName=cls._TABLE_NAME, BillingMode='PAY_PER_REQUEST', AttributeDefinitions=[ dynamodb.AttributeDefinition(AttributeName='id', AttributeType='S'), dynamodb.AttributeDefinition( AttributeName='created_at', AttributeType='S') ], KeySchema=[ dynamodb.KeySchema(AttributeName='id', KeyType='HASH'), dynamodb.KeySchema(AttributeName='created_at', KeyType='RANGE') ]) ]
def define_table(table_name, table_res_name, table_definition): """ Function to create the DynamoDB table resource :param table_name: :param str table_res_name: :param table_definition: :return: the DynamoDB Table :rtype: dynamodb.Table """ required_keys = ["AttributeDefinitions", "KeySchema"] properties = table_definition["Properties"] if not all(required_key in properties.keys() for required_key in required_keys): raise KeyError("You must at least specify properties", required_keys) table_props = { "AttributeDefinitions": define_attributes_definition( properties["AttributeDefinitions"] ), "KeySchema": define_key_schema(properties["KeySchema"]), "ProvisionedThroughput": define_provisioned_throughput(properties), "LocalSecondaryIndexes": define_local_secondary_index(properties), "PointInTimeRecoverySpecification": define_pit_spec(properties), "SSESpecification": define_sse_spec(properties), "TimeToLiveSpecification": define_ttl_spec(properties), "StreamSpecification": define_stream_spec(properties), "GlobalSecondaryIndexes": define_global_sec_indexes(properties), "BillingMode": properties["BillingMode"] if keyisset("BillingMode", properties) else Ref(AWS_NO_VALUE), "Tags": Tags( Name=table_name, ResourceName=table_res_name, CreatedByComposex=True, RootStackName=Ref(ROOT_STACK_NAME), ), "Metadata": metadata, } table = dynamodb.Table(table_res_name, **table_props) return table
# ============================================================================ db = rds.DBInstance( 'Postgres', DBInstanceClass='db.t2.micro', Engine='postgres', AllocatedStorage="5", DBName=Ref(db_name), MasterUsername=Ref(db_user), MasterUserPassword=Ref(db_password), ) # ============================================================================ # Dynamodb # ============================================================================ dynamo_db = dynamodb.Table( 'DynamoDBLog', TableName='DynamoDBLog', AttributeDefinitions=[dynamodb.AttributeDefinition( AttributeName='request_id', AttributeType='S', )], KeySchema=[dynamodb.KeySchema( AttributeName='request_id', KeyType='HASH', )], ProvisionedThroughput=dynamodb.ProvisionedThroughput( ReadCapacityUnits=5, WriteCapacityUnits=5, ) )
Type=constants.STRING, AllowedValues=['true', 'false'], Default='false', Description="overwrite", )) custom_resources.use_custom_resources_stack_name_parameter(template) table1 = template.add_resource(dynamodb.Table( "Table1", BillingMode="PAY_PER_REQUEST", AttributeDefinitions=[ dynamodb.AttributeDefinition( AttributeName="key", AttributeType="S", ) ], KeySchema=[ dynamodb.KeySchema( AttributeName="key", KeyType="HASH", ) ], )) template.add_output(Output( "Table1Name", Value=Ref(table1), )) table2 = template.add_resource(dynamodb.Table( "Table2",
def create_template(self): """Create template (main function called by Stacker).""" template = self.template variables = self.get_variables() self.template.add_version('2010-09-09') self.template.add_description('Terraform State Resources') # Conditions for i in ['BucketName', 'TableName']: template.add_condition( "%sOmitted" % i, Or(Equals(variables[i].ref, ''), Equals(variables[i].ref, 'undefined'))) # Resources terraformlocktable = template.add_resource( dynamodb.Table( 'TerraformStateTable', AttributeDefinitions=[ dynamodb.AttributeDefinition(AttributeName='LockID', AttributeType='S') ], KeySchema=[ dynamodb.KeySchema(AttributeName='LockID', KeyType='HASH') ], ProvisionedThroughput=dynamodb.ProvisionedThroughput( ReadCapacityUnits=2, WriteCapacityUnits=2), TableName=If('TableNameOmitted', NoValue, variables['TableName'].ref))) template.add_output( Output('%sName' % terraformlocktable.title, Description='Name of DynamoDB table for Terraform state', Value=terraformlocktable.ref())) terraformstatebucket = template.add_resource( s3.Bucket( 'TerraformStateBucket', AccessControl=s3.Private, BucketName=If('BucketNameOmitted', NoValue, variables['BucketName'].ref), LifecycleConfiguration=s3.LifecycleConfiguration(Rules=[ s3.LifecycleRule(NoncurrentVersionExpirationInDays=90, Status='Enabled') ]), VersioningConfiguration=s3.VersioningConfiguration( Status='Enabled'))) template.add_output( Output('%sName' % terraformstatebucket.title, Description='Name of bucket storing Terraform state', Value=terraformstatebucket.ref())) template.add_output( Output('%sArn' % terraformstatebucket.title, Description='Arn of bucket storing Terraform state', Value=terraformstatebucket.get_att('Arn'))) managementpolicy = template.add_resource( iam.ManagedPolicy( 'ManagementPolicy', Description='Managed policy for Terraform state management.', Path='/', PolicyDocument=PolicyDocument( Version='2012-10-17', Statement=[ # https://www.terraform.io/docs/backends/types/s3.html#s3-bucket-permissions Statement( Action=[awacs.s3.ListBucket], Effect=Allow, Resource=[terraformstatebucket.get_att('Arn')]), Statement( Action=[awacs.s3.GetObject, awacs.s3.PutObject], Effect=Allow, Resource=[ Join('', [ terraformstatebucket.get_att('Arn'), '/*' ]) ]), Statement(Action=[ awacs.dynamodb.GetItem, awacs.dynamodb.PutItem, awacs.dynamodb.DeleteItem ], Effect=Allow, Resource=[terraformlocktable.get_att('Arn')]) ]))) template.add_output( Output('PolicyArn', Description='Managed policy Arn', Value=managementpolicy.ref()))
from troposphere import dynamodb dynamodb_table = dynamodb.Table( "DynamoDBTable", AttributeDefinitions=[ dynamodb.AttributeDefinition(AttributeName="uuid", AttributeType="S"), dynamodb.AttributeDefinition( AttributeName="lambda_name", AttributeType="S" ), ], KeySchema=[ dynamodb.KeySchema(AttributeName="uuid", KeyType="HASH"), dynamodb.KeySchema(AttributeName="lambda_name", KeyType="RANGE"), ], ProvisionedThroughput=dynamodb.ProvisionedThroughput( ReadCapacityUnits=2, WriteCapacityUnits=2 ), )
def GenerateSerializerLayer(): t = Template() t.add_description("""\ Serializer Layer """) serializername_param = t.add_parameter( Parameter( "SerializerName", Description="Serializer Name (default: SampleSerializer)", Type="String", Default="SampleSerializer", )) stackname_param = t.add_parameter( Parameter( "StackName", Description="Environment Name (default: hackathon)", Type="String", Default="hackathon", )) clusterid_param = t.add_parameter( Parameter( "ClusterId", Type="String", Description="ClusterId to run the serializer on", )) docker_id_param = t.add_parameter( Parameter("DockerId", Description="DockerId (default: centos:latest)", Type="String", Default="centos:latest")) execution_role_param = t.add_parameter( Parameter( "ExecutionRole", Description="Lambda Execution Role", Type="String", )) hashkeyname_param = t.add_parameter( Parameter( "HaskKeyElementName", Description="HashType PrimaryKey Name (default: id)", Type="String", AllowedPattern="[a-zA-Z0-9]*", MinLength="1", MaxLength="2048", ConstraintDescription="must contain only alphanumberic characters", Default="id")) dirtylist_param = t.add_parameter( Parameter("DirtyList", Description="DirtyList Table Name", Type="String")) runlog_param = t.add_parameter( Parameter("RunLog", Description="RunLog Table Name", Type="String")) canonical_prefix_param = t.add_parameter( Parameter("CanonicalPrefix", Description="Canonical Tables Prefix", Type="String", Default="CANON_")) serializer_table = t.add_resource( dynamodb.Table( "sampleSerializerTable", AttributeDefinitions=[ dynamodb.AttributeDefinition(Ref(hashkeyname_param), "S"), ], KeySchema=[dynamodb.Key(Ref(hashkeyname_param), "HASH")], ProvisionedThroughput=dynamodb.ProvisionedThroughput(1, 1))) task_definition = t.add_resource( ecs.TaskDefinition( 'TaskDefinition', ContainerDefinitions=[ ecs.ContainerDefinition( Name=Join('', [Ref(stackname_param), "Task"]), Image=Ref(docker_id_param), Environment=[ ecs.Environment(Name="LAYER", Value=Ref(stackname_param)), ecs.Environment(Name="SERIALIZER", Value=Ref(serializername_param)), ecs.Environment(Name="SERIALIZER_TABLE", Value=Ref(serializer_table)), ecs.Environment(Name="CANONICAL_PREFIX", Value=Ref(canonical_prefix_param)), ecs.Environment(Name="DIRTYLIST", Value=Ref(dirtylist_param)), ecs.Environment(Name="RUNLOG", Value=Ref(runlog_param)) ], Memory=512, ) ], Volumes=[], )) t.add_output([ Output( "taskdefinitionid", Description="Task Definition Id", Value=Ref(task_definition), ) ]) return t
def GenerateStepPublisherLayer(): t = Template() t.add_description("""\ StepScheduler Layer """) stackname_param = t.add_parameter( Parameter( "StackName", Description="Environment Name (default: hackathon)", Type="String", Default="hackathon", )) vpcid_param = t.add_parameter( Parameter( "VpcId", Type="String", Description="VpcId of your existing Virtual Private Cloud (VPC)", Default="vpc-fab00e9f")) subnets = t.add_parameter( Parameter( "Subnets", Type="CommaDelimitedList", Description=( "The list SubnetIds, for public subnets in the " "region and in your Virtual Private Cloud (VPC) - minimum one" ), Default="subnet-b68f3bef,subnet-9a6208ff,subnet-bfdd4fc8")) keypair_param = t.add_parameter( Parameter("KeyPair", Description="Name of an existing EC2 KeyPair to enable SSH " "access to the instance", Type="String", Default="glueteam")) scheduler_ami_id_param = t.add_parameter( Parameter( "SchedulerAmiId", Description="Scheduler server AMI ID (default: ami-a10897d6)", Type="String", Default="ami-a10897d6")) cluster_ami_id_param = t.add_parameter( Parameter("ClusterAmiId", Description="Cluster server AMI ID (default: ami-3db4ca4a)", Type="String", Default="ami-3db4ca4a")) iam_role_param = t.add_parameter( Parameter( "IamRole", Description="IAM Role name", Type="String", )) hashkeyname_param = t.add_parameter( Parameter( "HaskKeyElementName", Description="HashType PrimaryKey Name (default: id)", Type="String", AllowedPattern="[a-zA-Z0-9]*", MinLength="1", MaxLength="2048", ConstraintDescription="must contain only alphanumberic characters", Default="id")) crontab_tablename_param = t.add_parameter( Parameter( "CrontabTablename", Description="Crontab Table Name", Type="String", )) containerlauncher_param = t.add_parameter( Parameter( "Containerlauncher", Description= "Container Launcher zip file (default: containerLauncher-1.0.zip)", Type="String", Default="containerLauncher-1.0.zip")) zipfileversion_param = t.add_parameter( Parameter( "ZipfileVersion", Description="Container Launcher zip file version", Type="String", )) # --------- Lambda Container Launcher lambda_function = t.add_resource( Function( "containerLauncher", Code=Code( S3Bucket="hackathon-glueteam-lambda", S3Key=Ref(containerlauncher_param), S3ObjectVersion=Ref(zipfileversion_param), ), Description=Join('', [Ref(stackname_param), " container Launcher"]), MemorySize=256, Handler="com.philips.glueteam.DockerLauncher::myHandler", Runtime="java8", Timeout=60, Role=Join('', [ "arn:aws:iam::", Ref("AWS::AccountId"), ":role/", Ref(iam_role_param) ]), )) townclock_topic = t.add_resource( sns.Topic( "TownClock", Subscription=[ sns.Subscription(Endpoint=GetAtt("containerLauncher", "Arn"), Protocol="lambda"), ], )) # --------- Scheduler instance scheduler_sg = t.add_resource( ec2.SecurityGroup( 'SchedulerSG', GroupDescription='Security group for Scheduler host', VpcId=Ref(vpcid_param), Tags=Tags(Name=Join("", [Ref(stackname_param), "SchedulerSG"])), SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp="0.0.0.0/0", ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="8080", ToPort="8080", CidrIp="0.0.0.0/0", ), ])) cluster = t.add_resource(ecs.Cluster("ECSCluster", )) scheduler_host = t.add_resource( ec2.Instance( 'SchedulerHost', ImageId=Ref(scheduler_ami_id_param), InstanceType='t2.micro', KeyName=Ref(keypair_param), IamInstanceProfile=Ref(iam_role_param), NetworkInterfaces=[ ec2.NetworkInterfaceProperty( AssociatePublicIpAddress=True, SubnetId=Select(0, Ref(subnets)), DeleteOnTermination=True, GroupSet=[ Ref(scheduler_sg), ], DeviceIndex=0, ), ], Tags=Tags(Name=Join("", [Ref(stackname_param), "Scheduler"]), Id=Join("", [Ref(stackname_param), "Scheduler"])), UserData=Base64( Join('', [ '#!/bin/bash\n', 'yum update -y aws-cfn-bootstrap\n', 'sns_topic_arn="', Ref(townclock_topic), '"\n', 'region="', Ref("AWS::Region"), '"\n', 'crontab_tablename="', Ref(crontab_tablename_param), '"\n', 'ecs_clustername="', Ref(cluster), '"\n', 'publish_source=https://raw.githubusercontent.com/hngkr/hackathon/master/ansible/files/unreliable-town-clock-publish\n', 'publish=/usr/local/bin/unreliable-town-clock-publish\n', 'curl -s --location --retry 10 -o $publish $publish_source\n', 'chmod +x $publish\n', 'cat <<EOF >/etc/cron.d/unreliable-town-clock\n', '*/2 * * * * ec2-user $publish "$sns_topic_arn" "$region" "$crontab_tablename" "$ecs_clustername"\n', 'EOF\n', ])), )) cluster_sg = t.add_resource( ec2.SecurityGroup( 'ClusterSG', GroupDescription='Security group for Cluster host', VpcId=Ref(vpcid_param), Tags=Tags(Name=Join("", [Ref(stackname_param), "ClusterSG"])), SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp="0.0.0.0/0", ), ])) cluster_host = t.add_resource( ec2.Instance( 'ClusterHost', ImageId=Ref(cluster_ami_id_param), InstanceType='t2.micro', KeyName=Ref(keypair_param), # TODO: Should have multiple separate iam roles for townclock / clusterhost IamInstanceProfile=Ref(iam_role_param), NetworkInterfaces=[ ec2.NetworkInterfaceProperty( AssociatePublicIpAddress=True, SubnetId=Select(0, Ref(subnets)), DeleteOnTermination=True, GroupSet=[ Ref(cluster_sg), ], DeviceIndex=0, ), ], Tags=Tags(Name=Join("", [Ref(stackname_param), "ClusterNode"]), Id=Join("", [Ref(stackname_param), "ClusterNode"])), UserData=Base64( Join('', [ '#!/bin/bash\n', 'mkdir /etc/ecs\n', 'cat <<EOF >/etc/ecs/ecs.config\n', 'ECS_CLUSTER=', Ref(cluster), '\n', 'EOF\n', ])), )) # --------- Expected DynamoDB Tables dirtylist_table = t.add_resource( dynamodb.Table( "DirtyList", AttributeDefinitions=[ dynamodb.AttributeDefinition(Ref(hashkeyname_param), "S"), ], KeySchema=[dynamodb.Key(Ref(hashkeyname_param), "HASH")], ProvisionedThroughput=dynamodb.ProvisionedThroughput(1, 1))) runlog_table = t.add_resource( dynamodb.Table( "RunLog", AttributeDefinitions=[ dynamodb.AttributeDefinition(Ref(hashkeyname_param), "S"), ], KeySchema=[dynamodb.Key(Ref(hashkeyname_param), "HASH")], ProvisionedThroughput=dynamodb.ProvisionedThroughput(1, 1))) # --------- Outputs t.add_output( [Output( "clusterid", Description="Cluster Id", Value=Ref(cluster), )]) t.add_output([ Output( "dirtylist", Description="DirtyList Tablename", Value=Ref(dirtylist_table), ) ]) t.add_output([ Output( "runlog", Description="Runlog Tablename", Value=Ref(runlog_table), ) ]) t.add_output([ Output( "lambdafunctionname", Description="Lambda Function Name", Value=Ref(lambda_function), ) ]) t.add_output([ Output( "clocktowertopicarn", Description="Clock Tower Topic Arn", Value=Ref(townclock_topic), ) ]) return t
return Sub(str.join(':', [ 'arn', 'aws', 'execute-api', '${AWS::Region}', '${AWS::AccountId}', '${api}/' + path ]), api=Ref(api_resource)) template = Template() devices_table = template.add_resource( dynamodb.Table( 'DevicesTable', TableName='Devices', KeySchema=[ dynamodb.KeySchema(AttributeName='key', KeyType='HASH'), ], AttributeDefinitions=[ dynamodb.AttributeDefinition(AttributeName='key', AttributeType='S'), ], BillingMode='PAY_PER_REQUEST', )) restapi = template.add_resource( apigateway.RestApi('DevicesApi', Name='DevicesApi', Description='Devices API.', BinaryMediaTypes=['application/vnd.pasbox.octets'])) authorizer_lambda = authorizer.generate(template) authorizer_credentials = template.add_resource(