def inject_to(self, t): attribute_definitions = [ dynamodb.AttributeDefinition(AttributeName=self.hash_key[0], AttributeType=self.hash_key[1]) ] key_schema = [ dynamodb.KeySchema(AttributeName=self.hash_key[0], KeyType="HASH") ] if self.range_key is not None: attribute_definitions.append( dynamodb.AttributeDefinition(AttributeName=self.range_key[0], AttributeType=self.range_key[1])) key_schema.append( dynamodb.KeySchema(AttributeName=self.range_key[0], KeyType="RANGE")) table = t.add_resource( dynamodb.Table( self.label, AttributeDefinitions=attribute_definitions, KeySchema=key_schema, ProvisionedThroughput=self.get_provisioned_throughput(t))) self.configure_autoscaling(t, table) return t
def words_attrdefs(): return [ dynamodb.AttributeDefinition(AttributeName='word', AttributeType='S'), dynamodb.AttributeDefinition(AttributeName='reminder', AttributeType='S'), dynamodb.AttributeDefinition(AttributeName='user', AttributeType='S'), dynamodb.AttributeDefinition( AttributeName='user/forgetful', AttributeType='S'), dynamodb.AttributeDefinition( AttributeName='user/showed', AttributeType='S'), ]
def __init__(self, title, **kwargs): super().__init__(title, **kwargs) self.AttributeDefinitions = [ ddb.AttributeDefinition(AttributeName='name', AttributeType='S'), ddb.AttributeDefinition(AttributeName='version', AttributeType='S') ] self.KeySchema = [ ddb.KeySchema(AttributeName='name', KeyType='HASH'), ddb.KeySchema(AttributeName='version', KeyType='RANGE') ] self.ProvisionedThroughput = ddb.ProvisionedThroughput( ReadCapacityUnits=1, WriteCapacityUnits=1) self.TableName = Sub('credential-store-${EnvShort}')
def GenerateGlobalLayer(): t = Template() t.add_description("""\ Global Layer """) stackname_param = t.add_parameter( Parameter( "StackName", Description="Environment Name (default: StepGlobals)", Type="String", Default="StepGlobals", )) crontab_table = t.add_resource( dynamodb.Table("scheduleTable", AttributeDefinitions=[ dynamodb.AttributeDefinition("taskname", "S"), ], KeySchema=[dynamodb.Key("taskname", "HASH")], ProvisionedThroughput=dynamodb.ProvisionedThroughput( 1, 1))) t.add_output([ Output( "crontabtablename", Description="Crontab Table Name", Value=Ref(crontab_table), ) ]) return t
def define_attributes_definition(attribute_definitions): attributes = [] for attribute in attribute_definitions: attributes.append( dynamodb.AttributeDefinition( AttributeName=attribute["AttributeName"], AttributeType=attribute["AttributeType"], ) ) return attributes
def cf_resources(cls): """produce CF template""" from troposphere import dynamodb return [ dynamodb.Table(cls._TABLE_NAME, TableName=cls._TABLE_NAME, BillingMode='PAY_PER_REQUEST', AttributeDefinitions=[ dynamodb.AttributeDefinition(AttributeName='id', AttributeType='S'), dynamodb.AttributeDefinition( AttributeName='created_at', AttributeType='S') ], KeySchema=[ dynamodb.KeySchema(AttributeName='id', KeyType='HASH'), dynamodb.KeySchema(AttributeName='created_at', KeyType='RANGE') ]) ]
def add_api_token_table(self): self._api_token_table = self.add_resource( dynamodb.Table( 'ApiTokens', TableName=Sub('${AWS::StackName}ApiTokens'), AttributeDefinitions=[ dynamodb.AttributeDefinition(AttributeName='token', AttributeType='S') ], BillingMode='PAY_PER_REQUEST', KeySchema=[ dynamodb.KeySchema(AttributeName='token', KeyType='HASH') ], SSESpecification=dynamodb.SSESpecification(SSEEnabled=True)))
def r_table(self): table_name = self.conf['table_name'] table_key = self.conf['table_key'] return dynamodb.Table('DynamoDbTable', TableName=table_name, BillingMode='PAY_PER_REQUEST', AttributeDefinitions=[ dynamodb.AttributeDefinition( AttributeName=table_key, AttributeType='S'), ], KeySchema=[ dynamodb.KeySchema(AttributeName=table_key, KeyType='HASH') ])
# ============================================================================ db = rds.DBInstance( 'Postgres', DBInstanceClass='db.t2.micro', Engine='postgres', AllocatedStorage="5", DBName=Ref(db_name), MasterUsername=Ref(db_user), MasterUserPassword=Ref(db_password), ) # ============================================================================ # Dynamodb # ============================================================================ dynamo_db = dynamodb.Table( 'DynamoDBLog', TableName='DynamoDBLog', AttributeDefinitions=[dynamodb.AttributeDefinition( AttributeName='request_id', AttributeType='S', )], KeySchema=[dynamodb.KeySchema( AttributeName='request_id', KeyType='HASH', )], ProvisionedThroughput=dynamodb.ProvisionedThroughput( ReadCapacityUnits=5, WriteCapacityUnits=5, ) )
overwrite = template.add_parameter(Parameter( "Overwrite", Type=constants.STRING, AllowedValues=['true', 'false'], Default='false', Description="overwrite", )) custom_resources.use_custom_resources_stack_name_parameter(template) table1 = template.add_resource(dynamodb.Table( "Table1", BillingMode="PAY_PER_REQUEST", AttributeDefinitions=[ dynamodb.AttributeDefinition( AttributeName="key", AttributeType="S", ) ], KeySchema=[ dynamodb.KeySchema( AttributeName="key", KeyType="HASH", ) ], )) template.add_output(Output( "Table1Name", Value=Ref(table1), ))
def create_template(self): """Create template (main function called by Stacker).""" template = self.template variables = self.get_variables() self.template.add_version('2010-09-09') self.template.add_description('Terraform State Resources') # Conditions for i in ['BucketName', 'TableName']: template.add_condition( "%sOmitted" % i, Or(Equals(variables[i].ref, ''), Equals(variables[i].ref, 'undefined'))) # Resources terraformlocktable = template.add_resource( dynamodb.Table( 'TerraformStateTable', AttributeDefinitions=[ dynamodb.AttributeDefinition(AttributeName='LockID', AttributeType='S') ], KeySchema=[ dynamodb.KeySchema(AttributeName='LockID', KeyType='HASH') ], ProvisionedThroughput=dynamodb.ProvisionedThroughput( ReadCapacityUnits=2, WriteCapacityUnits=2), TableName=If('TableNameOmitted', NoValue, variables['TableName'].ref))) template.add_output( Output('%sName' % terraformlocktable.title, Description='Name of DynamoDB table for Terraform state', Value=terraformlocktable.ref())) terraformstatebucket = template.add_resource( s3.Bucket( 'TerraformStateBucket', AccessControl=s3.Private, BucketName=If('BucketNameOmitted', NoValue, variables['BucketName'].ref), LifecycleConfiguration=s3.LifecycleConfiguration(Rules=[ s3.LifecycleRule(NoncurrentVersionExpirationInDays=90, Status='Enabled') ]), VersioningConfiguration=s3.VersioningConfiguration( Status='Enabled'))) template.add_output( Output('%sName' % terraformstatebucket.title, Description='Name of bucket storing Terraform state', Value=terraformstatebucket.ref())) template.add_output( Output('%sArn' % terraformstatebucket.title, Description='Arn of bucket storing Terraform state', Value=terraformstatebucket.get_att('Arn'))) managementpolicy = template.add_resource( iam.ManagedPolicy( 'ManagementPolicy', Description='Managed policy for Terraform state management.', Path='/', PolicyDocument=PolicyDocument( Version='2012-10-17', Statement=[ # https://www.terraform.io/docs/backends/types/s3.html#s3-bucket-permissions Statement( Action=[awacs.s3.ListBucket], Effect=Allow, Resource=[terraformstatebucket.get_att('Arn')]), Statement( Action=[awacs.s3.GetObject, awacs.s3.PutObject], Effect=Allow, Resource=[ Join('', [ terraformstatebucket.get_att('Arn'), '/*' ]) ]), Statement(Action=[ awacs.dynamodb.GetItem, awacs.dynamodb.PutItem, awacs.dynamodb.DeleteItem ], Effect=Allow, Resource=[terraformlocktable.get_att('Arn')]) ]))) template.add_output( Output('PolicyArn', Description='Managed policy Arn', Value=managementpolicy.ref()))
from troposphere import dynamodb dynamodb_table = dynamodb.Table( "DynamoDBTable", AttributeDefinitions=[ dynamodb.AttributeDefinition(AttributeName="uuid", AttributeType="S"), dynamodb.AttributeDefinition( AttributeName="lambda_name", AttributeType="S" ), ], KeySchema=[ dynamodb.KeySchema(AttributeName="uuid", KeyType="HASH"), dynamodb.KeySchema(AttributeName="lambda_name", KeyType="RANGE"), ], ProvisionedThroughput=dynamodb.ProvisionedThroughput( ReadCapacityUnits=2, WriteCapacityUnits=2 ), )
def GenerateSerializerLayer(): t = Template() t.add_description("""\ Serializer Layer """) serializername_param = t.add_parameter( Parameter( "SerializerName", Description="Serializer Name (default: SampleSerializer)", Type="String", Default="SampleSerializer", )) stackname_param = t.add_parameter( Parameter( "StackName", Description="Environment Name (default: hackathon)", Type="String", Default="hackathon", )) clusterid_param = t.add_parameter( Parameter( "ClusterId", Type="String", Description="ClusterId to run the serializer on", )) docker_id_param = t.add_parameter( Parameter("DockerId", Description="DockerId (default: centos:latest)", Type="String", Default="centos:latest")) execution_role_param = t.add_parameter( Parameter( "ExecutionRole", Description="Lambda Execution Role", Type="String", )) hashkeyname_param = t.add_parameter( Parameter( "HaskKeyElementName", Description="HashType PrimaryKey Name (default: id)", Type="String", AllowedPattern="[a-zA-Z0-9]*", MinLength="1", MaxLength="2048", ConstraintDescription="must contain only alphanumberic characters", Default="id")) dirtylist_param = t.add_parameter( Parameter("DirtyList", Description="DirtyList Table Name", Type="String")) runlog_param = t.add_parameter( Parameter("RunLog", Description="RunLog Table Name", Type="String")) canonical_prefix_param = t.add_parameter( Parameter("CanonicalPrefix", Description="Canonical Tables Prefix", Type="String", Default="CANON_")) serializer_table = t.add_resource( dynamodb.Table( "sampleSerializerTable", AttributeDefinitions=[ dynamodb.AttributeDefinition(Ref(hashkeyname_param), "S"), ], KeySchema=[dynamodb.Key(Ref(hashkeyname_param), "HASH")], ProvisionedThroughput=dynamodb.ProvisionedThroughput(1, 1))) task_definition = t.add_resource( ecs.TaskDefinition( 'TaskDefinition', ContainerDefinitions=[ ecs.ContainerDefinition( Name=Join('', [Ref(stackname_param), "Task"]), Image=Ref(docker_id_param), Environment=[ ecs.Environment(Name="LAYER", Value=Ref(stackname_param)), ecs.Environment(Name="SERIALIZER", Value=Ref(serializername_param)), ecs.Environment(Name="SERIALIZER_TABLE", Value=Ref(serializer_table)), ecs.Environment(Name="CANONICAL_PREFIX", Value=Ref(canonical_prefix_param)), ecs.Environment(Name="DIRTYLIST", Value=Ref(dirtylist_param)), ecs.Environment(Name="RUNLOG", Value=Ref(runlog_param)) ], Memory=512, ) ], Volumes=[], )) t.add_output([ Output( "taskdefinitionid", Description="Task Definition Id", Value=Ref(task_definition), ) ]) return t
def GenerateStepPublisherLayer(): t = Template() t.add_description("""\ StepScheduler Layer """) stackname_param = t.add_parameter( Parameter( "StackName", Description="Environment Name (default: hackathon)", Type="String", Default="hackathon", )) vpcid_param = t.add_parameter( Parameter( "VpcId", Type="String", Description="VpcId of your existing Virtual Private Cloud (VPC)", Default="vpc-fab00e9f")) subnets = t.add_parameter( Parameter( "Subnets", Type="CommaDelimitedList", Description=( "The list SubnetIds, for public subnets in the " "region and in your Virtual Private Cloud (VPC) - minimum one" ), Default="subnet-b68f3bef,subnet-9a6208ff,subnet-bfdd4fc8")) keypair_param = t.add_parameter( Parameter("KeyPair", Description="Name of an existing EC2 KeyPair to enable SSH " "access to the instance", Type="String", Default="glueteam")) scheduler_ami_id_param = t.add_parameter( Parameter( "SchedulerAmiId", Description="Scheduler server AMI ID (default: ami-a10897d6)", Type="String", Default="ami-a10897d6")) cluster_ami_id_param = t.add_parameter( Parameter("ClusterAmiId", Description="Cluster server AMI ID (default: ami-3db4ca4a)", Type="String", Default="ami-3db4ca4a")) iam_role_param = t.add_parameter( Parameter( "IamRole", Description="IAM Role name", Type="String", )) hashkeyname_param = t.add_parameter( Parameter( "HaskKeyElementName", Description="HashType PrimaryKey Name (default: id)", Type="String", AllowedPattern="[a-zA-Z0-9]*", MinLength="1", MaxLength="2048", ConstraintDescription="must contain only alphanumberic characters", Default="id")) crontab_tablename_param = t.add_parameter( Parameter( "CrontabTablename", Description="Crontab Table Name", Type="String", )) containerlauncher_param = t.add_parameter( Parameter( "Containerlauncher", Description= "Container Launcher zip file (default: containerLauncher-1.0.zip)", Type="String", Default="containerLauncher-1.0.zip")) zipfileversion_param = t.add_parameter( Parameter( "ZipfileVersion", Description="Container Launcher zip file version", Type="String", )) # --------- Lambda Container Launcher lambda_function = t.add_resource( Function( "containerLauncher", Code=Code( S3Bucket="hackathon-glueteam-lambda", S3Key=Ref(containerlauncher_param), S3ObjectVersion=Ref(zipfileversion_param), ), Description=Join('', [Ref(stackname_param), " container Launcher"]), MemorySize=256, Handler="com.philips.glueteam.DockerLauncher::myHandler", Runtime="java8", Timeout=60, Role=Join('', [ "arn:aws:iam::", Ref("AWS::AccountId"), ":role/", Ref(iam_role_param) ]), )) townclock_topic = t.add_resource( sns.Topic( "TownClock", Subscription=[ sns.Subscription(Endpoint=GetAtt("containerLauncher", "Arn"), Protocol="lambda"), ], )) # --------- Scheduler instance scheduler_sg = t.add_resource( ec2.SecurityGroup( 'SchedulerSG', GroupDescription='Security group for Scheduler host', VpcId=Ref(vpcid_param), Tags=Tags(Name=Join("", [Ref(stackname_param), "SchedulerSG"])), SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp="0.0.0.0/0", ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="8080", ToPort="8080", CidrIp="0.0.0.0/0", ), ])) cluster = t.add_resource(ecs.Cluster("ECSCluster", )) scheduler_host = t.add_resource( ec2.Instance( 'SchedulerHost', ImageId=Ref(scheduler_ami_id_param), InstanceType='t2.micro', KeyName=Ref(keypair_param), IamInstanceProfile=Ref(iam_role_param), NetworkInterfaces=[ ec2.NetworkInterfaceProperty( AssociatePublicIpAddress=True, SubnetId=Select(0, Ref(subnets)), DeleteOnTermination=True, GroupSet=[ Ref(scheduler_sg), ], DeviceIndex=0, ), ], Tags=Tags(Name=Join("", [Ref(stackname_param), "Scheduler"]), Id=Join("", [Ref(stackname_param), "Scheduler"])), UserData=Base64( Join('', [ '#!/bin/bash\n', 'yum update -y aws-cfn-bootstrap\n', 'sns_topic_arn="', Ref(townclock_topic), '"\n', 'region="', Ref("AWS::Region"), '"\n', 'crontab_tablename="', Ref(crontab_tablename_param), '"\n', 'ecs_clustername="', Ref(cluster), '"\n', 'publish_source=https://raw.githubusercontent.com/hngkr/hackathon/master/ansible/files/unreliable-town-clock-publish\n', 'publish=/usr/local/bin/unreliable-town-clock-publish\n', 'curl -s --location --retry 10 -o $publish $publish_source\n', 'chmod +x $publish\n', 'cat <<EOF >/etc/cron.d/unreliable-town-clock\n', '*/2 * * * * ec2-user $publish "$sns_topic_arn" "$region" "$crontab_tablename" "$ecs_clustername"\n', 'EOF\n', ])), )) cluster_sg = t.add_resource( ec2.SecurityGroup( 'ClusterSG', GroupDescription='Security group for Cluster host', VpcId=Ref(vpcid_param), Tags=Tags(Name=Join("", [Ref(stackname_param), "ClusterSG"])), SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp="0.0.0.0/0", ), ])) cluster_host = t.add_resource( ec2.Instance( 'ClusterHost', ImageId=Ref(cluster_ami_id_param), InstanceType='t2.micro', KeyName=Ref(keypair_param), # TODO: Should have multiple separate iam roles for townclock / clusterhost IamInstanceProfile=Ref(iam_role_param), NetworkInterfaces=[ ec2.NetworkInterfaceProperty( AssociatePublicIpAddress=True, SubnetId=Select(0, Ref(subnets)), DeleteOnTermination=True, GroupSet=[ Ref(cluster_sg), ], DeviceIndex=0, ), ], Tags=Tags(Name=Join("", [Ref(stackname_param), "ClusterNode"]), Id=Join("", [Ref(stackname_param), "ClusterNode"])), UserData=Base64( Join('', [ '#!/bin/bash\n', 'mkdir /etc/ecs\n', 'cat <<EOF >/etc/ecs/ecs.config\n', 'ECS_CLUSTER=', Ref(cluster), '\n', 'EOF\n', ])), )) # --------- Expected DynamoDB Tables dirtylist_table = t.add_resource( dynamodb.Table( "DirtyList", AttributeDefinitions=[ dynamodb.AttributeDefinition(Ref(hashkeyname_param), "S"), ], KeySchema=[dynamodb.Key(Ref(hashkeyname_param), "HASH")], ProvisionedThroughput=dynamodb.ProvisionedThroughput(1, 1))) runlog_table = t.add_resource( dynamodb.Table( "RunLog", AttributeDefinitions=[ dynamodb.AttributeDefinition(Ref(hashkeyname_param), "S"), ], KeySchema=[dynamodb.Key(Ref(hashkeyname_param), "HASH")], ProvisionedThroughput=dynamodb.ProvisionedThroughput(1, 1))) # --------- Outputs t.add_output( [Output( "clusterid", Description="Cluster Id", Value=Ref(cluster), )]) t.add_output([ Output( "dirtylist", Description="DirtyList Tablename", Value=Ref(dirtylist_table), ) ]) t.add_output([ Output( "runlog", Description="Runlog Tablename", Value=Ref(runlog_table), ) ]) t.add_output([ Output( "lambdafunctionname", Description="Lambda Function Name", Value=Ref(lambda_function), ) ]) t.add_output([ Output( "clocktowertopicarn", Description="Clock Tower Topic Arn", Value=Ref(townclock_topic), ) ]) return t
def prep_config(raw_config): prepped_config = {} dynamodb_table_properties = [ "AttributeDefinitions", "GlobalSecondaryIndexes", "KeySchema", "LocalSecondaryIndexes", "ProvisionedThroughput", "StreamSpecification", ] util.check_properties( raw_config, dynamodb_table_properties, "DynamoDB", ) # AttributeDefinitions are required, so raise a KeyError if this doesn't # work. try: config_attributes = raw_config["AttributeDefinitions"] except KeyError: raise KeyError( "Attribute definitions are required for the creation of a " + "DynamoDB table.") attributes = [] for attribute in config_attributes: attributes.append(dynamodb.AttributeDefinition(**attribute)) prepped_config["AttributeDefinitions"] = attributes if "GlobalSecondaryIndexes" in raw_config: # AWS limits us to 5 GSIs. Check for that and bail if there's more. if len(raw_config["GlobalSecondaryIndexes"]) > MAX_GSI_VALUE: raise ValueError( "A DynamoDB table can only have a maximum of 5 global " + "secondary indexes.") gsis = [] for gsi in raw_config["GlobalSecondaryIndexes"]: gsi["KeySchema"] = prep_schemata(gsi) gsi["Projection"] = prep_projection(gsi) gsi["ProvisionedThroughput"] = prep_throughput(gsi) gsis.append(dynamodb.GlobalSecondaryIndex(**gsi)) prepped_config["GlobalSecondaryIndexes"] = gsis prepped_config["KeySchema"] = prep_schemata(raw_config) if "LocalSecondaryIndexes" in raw_config: # Another limit of 5. Check and bail if more than that. if len(raw_config["LocalSecondaryIndexes"]) > MAX_LSI_VALUE: raise ValueError( "A DynamoDB table can only have a maximum of 5 local " + "secondary indexes.") lsis = [] for lsi in raw_config["LocalSecondaryIndexes"]: lsi["KeySchema"] = prep_schemata(lsi) lsi["Projection"] = prep_projection(lsi) lsi["ProvisionedThroughput"] = prep_throughput(lsi) lsis.append(dynamodb.LocalSecondaryIndex(**lsi)) prepped_config["LocalSecondaryIndexes"] = lsis prepped_config["ProvisionedThroughput"] = prep_throughput(raw_config) if "StreamSpecification" in raw_config: prepped_config["StreamSpecification"] = dynamodb.StreamSpecification( **raw_config["StreamSpecification"]) return prepped_config
'${api}/' + path ]), api=Ref(api_resource)) template = Template() devices_table = template.add_resource( dynamodb.Table( 'DevicesTable', TableName='Devices', KeySchema=[ dynamodb.KeySchema(AttributeName='key', KeyType='HASH'), ], AttributeDefinitions=[ dynamodb.AttributeDefinition(AttributeName='key', AttributeType='S'), ], BillingMode='PAY_PER_REQUEST', )) restapi = template.add_resource( apigateway.RestApi('DevicesApi', Name='DevicesApi', Description='Devices API.', BinaryMediaTypes=['application/vnd.pasbox.octets'])) authorizer_lambda = authorizer.generate(template) authorizer_credentials = template.add_resource( iam.Role('DevicesApiAuthorizerCredentials', RoleName='DevicesApiAuthorizerCredentials',