def GenerateGlobalLayer(): t = Template() t.add_description("""\ Global Layer """) stackname_param = t.add_parameter( Parameter( "StackName", Description="Environment Name (default: StepGlobals)", Type="String", Default="StepGlobals", )) crontab_table = t.add_resource( dynamodb.Table("scheduleTable", AttributeDefinitions=[ dynamodb.AttributeDefinition("taskname", "S"), ], KeySchema=[dynamodb.Key("taskname", "HASH")], ProvisionedThroughput=dynamodb.ProvisionedThroughput( 1, 1))) t.add_output([ Output( "crontabtablename", Description="Crontab Table Name", Value=Ref(crontab_table), ) ]) return t
def prep_throughput(config): try: return dynamodb.ProvisionedThroughput( **config["ProvisionedThroughput"]) except KeyError: raise KeyError( "ProvisionedThroughput values are required for the creation of " + "a DynamoDB table or index.")
def words_table(): return dynamodb.Table( 'WordsTable', AttributeDefinitions=words_attrdefs(), GlobalSecondaryIndexes=words_gsis(), KeySchema=[ dynamodb.KeySchema(AttributeName='user', KeyType='HASH'), dynamodb.KeySchema(AttributeName='word', KeyType='RANGE'), ], ProvisionedThroughput=dynamodb.ProvisionedThroughput( ReadCapacityUnits=1, WriteCapacityUnits=1))
def define_provisioned_throughput(properties): if keyisset("ProvisionedThroughput", properties): props = properties["ProvisionedThroughput"] return dynamodb.ProvisionedThroughput( ReadCapacityUnits=int(props["ReadCapacityUnits"]) if keyisset("ReadCapacityUnits", props) else Ref(AWS_NO_VALUE), WriteCapacityUnits=int(props["WriteCapacityUnits"]) if keyisset("WriteCapacityUnits", props) else Ref(AWS_NO_VALUE), ) return Ref(AWS_NO_VALUE)
def words_gsis(): return [ dynamodb.GlobalSecondaryIndex( IndexName='user_showed_reminder', KeySchema=[ dynamodb.KeySchema(AttributeName='user/showed', KeyType='HASH'), dynamodb.KeySchema(AttributeName='reminder', KeyType='RANGE'), ], Projection=dynamodb.Projection(ProjectionType='ALL'), ProvisionedThroughput=dynamodb.ProvisionedThroughput( ReadCapacityUnits=1, WriteCapacityUnits=1)), dynamodb.GlobalSecondaryIndex( IndexName='user_forgetful_reminder', KeySchema=[ dynamodb.KeySchema( AttributeName='user/forgetful', KeyType='HASH'), dynamodb.KeySchema(AttributeName='reminder', KeyType='RANGE'), ], Projection=dynamodb.Projection(ProjectionType='ALL'), ProvisionedThroughput=dynamodb.ProvisionedThroughput( ReadCapacityUnits=1, WriteCapacityUnits=1)), ]
def __init__(self, title, **kwargs): super().__init__(title, **kwargs) self.AttributeDefinitions = [ ddb.AttributeDefinition(AttributeName='name', AttributeType='S'), ddb.AttributeDefinition(AttributeName='version', AttributeType='S') ] self.KeySchema = [ ddb.KeySchema(AttributeName='name', KeyType='HASH'), ddb.KeySchema(AttributeName='version', KeyType='RANGE') ] self.ProvisionedThroughput = ddb.ProvisionedThroughput( ReadCapacityUnits=1, WriteCapacityUnits=1) self.TableName = Sub('credential-store-${EnvShort}')
# ============================================================================ db = rds.DBInstance( 'Postgres', DBInstanceClass='db.t2.micro', Engine='postgres', AllocatedStorage="5", DBName=Ref(db_name), MasterUsername=Ref(db_user), MasterUserPassword=Ref(db_password), ) # ============================================================================ # Dynamodb # ============================================================================ dynamo_db = dynamodb.Table( 'DynamoDBLog', TableName='DynamoDBLog', AttributeDefinitions=[dynamodb.AttributeDefinition( AttributeName='request_id', AttributeType='S', )], KeySchema=[dynamodb.KeySchema( AttributeName='request_id', KeyType='HASH', )], ProvisionedThroughput=dynamodb.ProvisionedThroughput( ReadCapacityUnits=5, WriteCapacityUnits=5, ) )
def create_template(self): """Create template (main function called by Stacker).""" template = self.template variables = self.get_variables() self.template.add_version('2010-09-09') self.template.add_description('Terraform State Resources') # Conditions for i in ['BucketName', 'TableName']: template.add_condition( "%sOmitted" % i, Or(Equals(variables[i].ref, ''), Equals(variables[i].ref, 'undefined'))) # Resources terraformlocktable = template.add_resource( dynamodb.Table( 'TerraformStateTable', AttributeDefinitions=[ dynamodb.AttributeDefinition(AttributeName='LockID', AttributeType='S') ], KeySchema=[ dynamodb.KeySchema(AttributeName='LockID', KeyType='HASH') ], ProvisionedThroughput=dynamodb.ProvisionedThroughput( ReadCapacityUnits=2, WriteCapacityUnits=2), TableName=If('TableNameOmitted', NoValue, variables['TableName'].ref))) template.add_output( Output('%sName' % terraformlocktable.title, Description='Name of DynamoDB table for Terraform state', Value=terraformlocktable.ref())) terraformstatebucket = template.add_resource( s3.Bucket( 'TerraformStateBucket', AccessControl=s3.Private, BucketName=If('BucketNameOmitted', NoValue, variables['BucketName'].ref), LifecycleConfiguration=s3.LifecycleConfiguration(Rules=[ s3.LifecycleRule(NoncurrentVersionExpirationInDays=90, Status='Enabled') ]), VersioningConfiguration=s3.VersioningConfiguration( Status='Enabled'))) template.add_output( Output('%sName' % terraformstatebucket.title, Description='Name of bucket storing Terraform state', Value=terraformstatebucket.ref())) template.add_output( Output('%sArn' % terraformstatebucket.title, Description='Arn of bucket storing Terraform state', Value=terraformstatebucket.get_att('Arn'))) managementpolicy = template.add_resource( iam.ManagedPolicy( 'ManagementPolicy', Description='Managed policy for Terraform state management.', Path='/', PolicyDocument=PolicyDocument( Version='2012-10-17', Statement=[ # https://www.terraform.io/docs/backends/types/s3.html#s3-bucket-permissions Statement( Action=[awacs.s3.ListBucket], Effect=Allow, Resource=[terraformstatebucket.get_att('Arn')]), Statement( Action=[awacs.s3.GetObject, awacs.s3.PutObject], Effect=Allow, Resource=[ Join('', [ terraformstatebucket.get_att('Arn'), '/*' ]) ]), Statement(Action=[ awacs.dynamodb.GetItem, awacs.dynamodb.PutItem, awacs.dynamodb.DeleteItem ], Effect=Allow, Resource=[terraformlocktable.get_att('Arn')]) ]))) template.add_output( Output('PolicyArn', Description='Managed policy Arn', Value=managementpolicy.ref()))
def get_provisioned_throughput(self, t): return dynamodb.ProvisionedThroughput( ReadCapacityUnits=self.min_read_capacity, WriteCapacityUnits=self.min_write_capacity)
def GenerateSerializerLayer(): t = Template() t.add_description("""\ Serializer Layer """) serializername_param = t.add_parameter( Parameter( "SerializerName", Description="Serializer Name (default: SampleSerializer)", Type="String", Default="SampleSerializer", )) stackname_param = t.add_parameter( Parameter( "StackName", Description="Environment Name (default: hackathon)", Type="String", Default="hackathon", )) clusterid_param = t.add_parameter( Parameter( "ClusterId", Type="String", Description="ClusterId to run the serializer on", )) docker_id_param = t.add_parameter( Parameter("DockerId", Description="DockerId (default: centos:latest)", Type="String", Default="centos:latest")) execution_role_param = t.add_parameter( Parameter( "ExecutionRole", Description="Lambda Execution Role", Type="String", )) hashkeyname_param = t.add_parameter( Parameter( "HaskKeyElementName", Description="HashType PrimaryKey Name (default: id)", Type="String", AllowedPattern="[a-zA-Z0-9]*", MinLength="1", MaxLength="2048", ConstraintDescription="must contain only alphanumberic characters", Default="id")) dirtylist_param = t.add_parameter( Parameter("DirtyList", Description="DirtyList Table Name", Type="String")) runlog_param = t.add_parameter( Parameter("RunLog", Description="RunLog Table Name", Type="String")) canonical_prefix_param = t.add_parameter( Parameter("CanonicalPrefix", Description="Canonical Tables Prefix", Type="String", Default="CANON_")) serializer_table = t.add_resource( dynamodb.Table( "sampleSerializerTable", AttributeDefinitions=[ dynamodb.AttributeDefinition(Ref(hashkeyname_param), "S"), ], KeySchema=[dynamodb.Key(Ref(hashkeyname_param), "HASH")], ProvisionedThroughput=dynamodb.ProvisionedThroughput(1, 1))) task_definition = t.add_resource( ecs.TaskDefinition( 'TaskDefinition', ContainerDefinitions=[ ecs.ContainerDefinition( Name=Join('', [Ref(stackname_param), "Task"]), Image=Ref(docker_id_param), Environment=[ ecs.Environment(Name="LAYER", Value=Ref(stackname_param)), ecs.Environment(Name="SERIALIZER", Value=Ref(serializername_param)), ecs.Environment(Name="SERIALIZER_TABLE", Value=Ref(serializer_table)), ecs.Environment(Name="CANONICAL_PREFIX", Value=Ref(canonical_prefix_param)), ecs.Environment(Name="DIRTYLIST", Value=Ref(dirtylist_param)), ecs.Environment(Name="RUNLOG", Value=Ref(runlog_param)) ], Memory=512, ) ], Volumes=[], )) t.add_output([ Output( "taskdefinitionid", Description="Task Definition Id", Value=Ref(task_definition), ) ]) return t
def GenerateStepPublisherLayer(): t = Template() t.add_description("""\ StepScheduler Layer """) stackname_param = t.add_parameter( Parameter( "StackName", Description="Environment Name (default: hackathon)", Type="String", Default="hackathon", )) vpcid_param = t.add_parameter( Parameter( "VpcId", Type="String", Description="VpcId of your existing Virtual Private Cloud (VPC)", Default="vpc-fab00e9f")) subnets = t.add_parameter( Parameter( "Subnets", Type="CommaDelimitedList", Description=( "The list SubnetIds, for public subnets in the " "region and in your Virtual Private Cloud (VPC) - minimum one" ), Default="subnet-b68f3bef,subnet-9a6208ff,subnet-bfdd4fc8")) keypair_param = t.add_parameter( Parameter("KeyPair", Description="Name of an existing EC2 KeyPair to enable SSH " "access to the instance", Type="String", Default="glueteam")) scheduler_ami_id_param = t.add_parameter( Parameter( "SchedulerAmiId", Description="Scheduler server AMI ID (default: ami-a10897d6)", Type="String", Default="ami-a10897d6")) cluster_ami_id_param = t.add_parameter( Parameter("ClusterAmiId", Description="Cluster server AMI ID (default: ami-3db4ca4a)", Type="String", Default="ami-3db4ca4a")) iam_role_param = t.add_parameter( Parameter( "IamRole", Description="IAM Role name", Type="String", )) hashkeyname_param = t.add_parameter( Parameter( "HaskKeyElementName", Description="HashType PrimaryKey Name (default: id)", Type="String", AllowedPattern="[a-zA-Z0-9]*", MinLength="1", MaxLength="2048", ConstraintDescription="must contain only alphanumberic characters", Default="id")) crontab_tablename_param = t.add_parameter( Parameter( "CrontabTablename", Description="Crontab Table Name", Type="String", )) containerlauncher_param = t.add_parameter( Parameter( "Containerlauncher", Description= "Container Launcher zip file (default: containerLauncher-1.0.zip)", Type="String", Default="containerLauncher-1.0.zip")) zipfileversion_param = t.add_parameter( Parameter( "ZipfileVersion", Description="Container Launcher zip file version", Type="String", )) # --------- Lambda Container Launcher lambda_function = t.add_resource( Function( "containerLauncher", Code=Code( S3Bucket="hackathon-glueteam-lambda", S3Key=Ref(containerlauncher_param), S3ObjectVersion=Ref(zipfileversion_param), ), Description=Join('', [Ref(stackname_param), " container Launcher"]), MemorySize=256, Handler="com.philips.glueteam.DockerLauncher::myHandler", Runtime="java8", Timeout=60, Role=Join('', [ "arn:aws:iam::", Ref("AWS::AccountId"), ":role/", Ref(iam_role_param) ]), )) townclock_topic = t.add_resource( sns.Topic( "TownClock", Subscription=[ sns.Subscription(Endpoint=GetAtt("containerLauncher", "Arn"), Protocol="lambda"), ], )) # --------- Scheduler instance scheduler_sg = t.add_resource( ec2.SecurityGroup( 'SchedulerSG', GroupDescription='Security group for Scheduler host', VpcId=Ref(vpcid_param), Tags=Tags(Name=Join("", [Ref(stackname_param), "SchedulerSG"])), SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp="0.0.0.0/0", ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="8080", ToPort="8080", CidrIp="0.0.0.0/0", ), ])) cluster = t.add_resource(ecs.Cluster("ECSCluster", )) scheduler_host = t.add_resource( ec2.Instance( 'SchedulerHost', ImageId=Ref(scheduler_ami_id_param), InstanceType='t2.micro', KeyName=Ref(keypair_param), IamInstanceProfile=Ref(iam_role_param), NetworkInterfaces=[ ec2.NetworkInterfaceProperty( AssociatePublicIpAddress=True, SubnetId=Select(0, Ref(subnets)), DeleteOnTermination=True, GroupSet=[ Ref(scheduler_sg), ], DeviceIndex=0, ), ], Tags=Tags(Name=Join("", [Ref(stackname_param), "Scheduler"]), Id=Join("", [Ref(stackname_param), "Scheduler"])), UserData=Base64( Join('', [ '#!/bin/bash\n', 'yum update -y aws-cfn-bootstrap\n', 'sns_topic_arn="', Ref(townclock_topic), '"\n', 'region="', Ref("AWS::Region"), '"\n', 'crontab_tablename="', Ref(crontab_tablename_param), '"\n', 'ecs_clustername="', Ref(cluster), '"\n', 'publish_source=https://raw.githubusercontent.com/hngkr/hackathon/master/ansible/files/unreliable-town-clock-publish\n', 'publish=/usr/local/bin/unreliable-town-clock-publish\n', 'curl -s --location --retry 10 -o $publish $publish_source\n', 'chmod +x $publish\n', 'cat <<EOF >/etc/cron.d/unreliable-town-clock\n', '*/2 * * * * ec2-user $publish "$sns_topic_arn" "$region" "$crontab_tablename" "$ecs_clustername"\n', 'EOF\n', ])), )) cluster_sg = t.add_resource( ec2.SecurityGroup( 'ClusterSG', GroupDescription='Security group for Cluster host', VpcId=Ref(vpcid_param), Tags=Tags(Name=Join("", [Ref(stackname_param), "ClusterSG"])), SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp="0.0.0.0/0", ), ])) cluster_host = t.add_resource( ec2.Instance( 'ClusterHost', ImageId=Ref(cluster_ami_id_param), InstanceType='t2.micro', KeyName=Ref(keypair_param), # TODO: Should have multiple separate iam roles for townclock / clusterhost IamInstanceProfile=Ref(iam_role_param), NetworkInterfaces=[ ec2.NetworkInterfaceProperty( AssociatePublicIpAddress=True, SubnetId=Select(0, Ref(subnets)), DeleteOnTermination=True, GroupSet=[ Ref(cluster_sg), ], DeviceIndex=0, ), ], Tags=Tags(Name=Join("", [Ref(stackname_param), "ClusterNode"]), Id=Join("", [Ref(stackname_param), "ClusterNode"])), UserData=Base64( Join('', [ '#!/bin/bash\n', 'mkdir /etc/ecs\n', 'cat <<EOF >/etc/ecs/ecs.config\n', 'ECS_CLUSTER=', Ref(cluster), '\n', 'EOF\n', ])), )) # --------- Expected DynamoDB Tables dirtylist_table = t.add_resource( dynamodb.Table( "DirtyList", AttributeDefinitions=[ dynamodb.AttributeDefinition(Ref(hashkeyname_param), "S"), ], KeySchema=[dynamodb.Key(Ref(hashkeyname_param), "HASH")], ProvisionedThroughput=dynamodb.ProvisionedThroughput(1, 1))) runlog_table = t.add_resource( dynamodb.Table( "RunLog", AttributeDefinitions=[ dynamodb.AttributeDefinition(Ref(hashkeyname_param), "S"), ], KeySchema=[dynamodb.Key(Ref(hashkeyname_param), "HASH")], ProvisionedThroughput=dynamodb.ProvisionedThroughput(1, 1))) # --------- Outputs t.add_output( [Output( "clusterid", Description="Cluster Id", Value=Ref(cluster), )]) t.add_output([ Output( "dirtylist", Description="DirtyList Tablename", Value=Ref(dirtylist_table), ) ]) t.add_output([ Output( "runlog", Description="Runlog Tablename", Value=Ref(runlog_table), ) ]) t.add_output([ Output( "lambdafunctionname", Description="Lambda Function Name", Value=Ref(lambda_function), ) ]) t.add_output([ Output( "clocktowertopicarn", Description="Clock Tower Topic Arn", Value=Ref(townclock_topic), ) ]) return t