def set_up(self, name, *, sourcedir, resources=None, __opts__): if resources is None: resources = {} resgen = ResourceGenerator(resources) bucket = get_lambda_bucket(resource=self) bobj = s3.BucketObject( f'{name}-code', bucket=bucket.id, source=build_zip_package(sourcedir, resgen), **opts(parent=self), ) role = generate_role( f'{name}-role', { rname: (res, ...) # Ask for basic RW permissions (not manage) for rname, res in resources.items() }, **opts(parent=self)) return { 'bucket': bucket, 'object': bobj, 'role': role, '_resources': list(resources.values()), # This should only be used internally }
def generate_role(name, resources, **ropts): role = iam.Role( f'{name}', assume_role_policy={ "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": "sts:AssumeRole", "Principal": { "Service": "lambda.amazonaws.com", } }] }, **ropts, ) iam.RolePolicyAttachment(f'{name}-base', role=role, policy_arn=BASIC_POLICY.arn, **opts(parent=role)) if resources: iam.RolePolicy( f'{name}-policy', role=role, policy={ "Version": "2012-10-17", # FIXME: Reduce this "Statement": [{ "Effect": "Allow", "Action": "*", # FIXME: More reasonable permissions "Resource": pulumi.Output.all( *[res[0].arn for res in resources.values()]) }] }, **opts(parent=role)) return role
def get_lambda_bucket(region=None, resource=None): """ Gets the shared bucket for lambda packages for the given region """ if resource is not None: region = get_region(resource) if region not in _lambda_buckets: _lambda_buckets[region] = s3.Bucket( f'lambda-bucket-{region}', region=region, versioning={ 'enabled': True, }, # FIXME: Life cycle rules for expiration **opts(region=region), ) return _lambda_buckets[region]
def AwsgiHandler(self, name, zone, domain, package, func, __opts__, **lambdaargs): """ Define a handler to accept requests, using awsgi """ func = package.function(f"{name}-function", func, **lambdaargs, **opts(parent=self)) invoke_policy = lambda_.Permission( f'{name}-function-permission', function=func, action='lambda:InvokeFunction', principal='elasticloadbalancing.amazonaws.com', **opts(parent=func)) netinfo = get_public_subnets(opts=__opts__) @netinfo.apply def vpc_id(info): vpc, subnets, is_v6 = info return vpc.id @netinfo.apply def netstack(info): vpc, subnets, is_v6 = info return 'dualstack' if is_v6 else 'ipv4' @netinfo.apply def subnet_ids(info): vpc, subnets, is_v6 = info return [sn.id for sn in subnets] cert = Certificate(f"{name}-cert", domain=domain, zone=zone, **opts(parent=self)) # TODO: Cache this sg = ec2.SecurityGroup(f"{name}-sg", vpc_id=vpc_id, ingress=[ { 'from_port': 80, 'to_port': 80, 'protocol': "tcp", 'cidr_blocks': ['0.0.0.0/0'], }, { 'from_port': 443, 'to_port': 443, 'protocol': "tcp", 'cidr_blocks': ['0.0.0.0/0'], }, { 'from_port': 80, 'to_port': 80, 'protocol': "tcp", 'ipv6_cidr_blocks': ['::/0'], }, { 'from_port': 443, 'to_port': 443, 'protocol': "tcp", 'ipv6_cidr_blocks': ['::/0'], }, ], egress=[ { 'from_port': 0, 'to_port': 0, 'protocol': "-1", 'cidr_blocks': ['0.0.0.0/0'], }, { 'from_port': 0, 'to_port': 0, 'protocol': "-1", 'ipv6_cidr_blocks': ['::/0'], }, ], **opts(parent=self)) alb = elb.LoadBalancer(f"{name}-alb", load_balancer_type='application', subnets=subnet_ids, ip_address_type=netstack, security_groups=[sg], enable_http2=True, **opts(parent=self)) target = elb.TargetGroup( f"{name}-target", target_type='lambda', lambda_multi_value_headers_enabled= False, # AWSGI does not support this yet health_check={ 'enabled': True, 'path': '/', 'matcher': '200-299', 'interval': 30, 'timeout': 5, }, **opts(parent=self)) elb.TargetGroupAttachment(f"{name}-target-func", target_group_arn=target.arn, target_id=func.arn, **opts(depends_on=[invoke_policy], parent=self)) elb.Listener(f"{name}-http", load_balancer_arn=alb.arn, port=80, protocol='HTTP', default_actions=[{ 'type': 'forward', 'target_group_arn': target.arn, }], **opts(parent=self)) elb.Listener(f"{name}-https", load_balancer_arn=alb.arn, port=443, protocol='HTTPS', ssl_policy='ELBSecurityPolicy-TLS-1-2-Ext-2018-06', certificate_arn=cert.cert_arn, default_actions=[{ 'type': 'forward', 'target_group_arn': target.arn, }], **opts(parent=self)) a_aaaa( f"{name}-record", name=domain, zone_id=zone.zone_id, aliases=[ { 'name': alb.dns_name, 'zone_id': alb.zone_id, 'evaluate_target_health': True, }, ], **opts(parent=self), )
import pulumi from putils import opts from staticsite import StaticSite from pulumi_aws import route53 config = pulumi.Config('castle') zone = route53.get_zone(name='dingbots.dev') # Create an AWS resource (S3 Bucket) site = StaticSite( 'MainSite', domain=config.require('domain'), zone=zone, content_dir='www', **opts(), ) pulumi.export('website', site.url)
import pulumi from putils import opts from deplumi import Package, AwsgiHandler from pulumi_aws import route53 config = pulumi.Config('castle') zone = route53.get_zone(name='dingbots.dev') clank = Package('Clank', sourcedir='clank', resources={}, **opts()) api_domain = f'api.{config.require("domain")}' AwsgiHandler( 'ClankService', domain=api_domain, zone=zone, package=clank, func='__main__:main', environment={ 'variables': { 'github_client_id': config.get('github-client-id'), # OAuth Client ID 'github_client_secret': config.get('github-client-secret'), # OAuth Client Secret 'github_app_id': config.get('github-app-id'), # Numeric App ID 'github_private_key': config.get('github-private-key'), # Signs JWTs for API authn 'github_secret': config.get('github-secret'), # github->app hook verify },
import pulumi from putils import opts, find_zone from deplumi import Package, AwsgiHandler config = pulumi.Config('cardboard') basedomain = config.require("domain") apiserv = Package( 'Api', sourcedir='apiserv', resources={ }, **opts() ) api_domain = f'api.{basedomain}' AwsgiHandler( 'ApiService', domain=api_domain, package=apiserv, func='__lambda__:main', timeout=6, # seconds environment={ 'variables': { 'github_client_id': config.get('github-client-id'), # OAuth Client ID 'github_client_secret': config.get('github-client-secret'), # OAuth Client Secret 'github_app_id': config.get('github-app-id'), # Numeric App ID 'github_private_key': config.get('github-private-key'), # Signs JWTs for API authn 'github_webhook_secret': config.get('github-webhook-secret'), # github->app hook verify },
def StaticSite(self, name, domain, zone, content_dir, __opts__): """ A static site, at the given domain with the contents of the given directory. Uses S3, CloudFront, ACM, and Route53. """ web_bucket = s3.Bucket( f'{name}-bucket', bucket=domain, website={ "index_document": "index.html", "errorDocument": "404.html", }, acl='public-read', website_domain=domain, **opts(parent=self), ) for absp, relp in walk(content_dir): mime_type, _ = mimetypes.guess_type(str(absp)) s3.BucketObject( f'{name}-{relp!s}', key=str(relp), bucket=web_bucket.id, source=FileAsset(str(absp)), content_type=mime_type, **opts(parent=web_bucket), ) bucket_name = web_bucket.id s3.BucketPolicy( f"{name}-policy", bucket=bucket_name, policy=bucket_name.apply(public_read_policy_for_bucket), **opts(parent=web_bucket), ) cert = Certificate( f"{name}-cert", domain=domain, zone=zone, **opts(parent=self, region='us-east-1'), ) distro = cloudfront.Distribution( f"{name}-dist", enabled=True, # Alternate aliases the CloudFront distribution can be reached at, in addition to https://xxxx.cloudfront.net. # Required if you want to access the distribution via config.targetDomain as well. aliases=[domain], is_ipv6_enabled=True, # We only specify one origin for this distribution, the S3 content bucket. origins=[ { "originId": web_bucket.arn, "domainName": web_bucket.website_endpoint, "customOriginConfig": { # Amazon S3 doesn't support HTTPS connections when using an S3 bucket configured as a website endpoint. "originProtocolPolicy": "http-only", "httpPort": 80, "httpsPort": 443, "originSslProtocols": ["TLSv1.2"], }, }, ], default_root_object="index.html", # A CloudFront distribution can configure different cache behaviors based on the request path. # Here we just specify a single, default cache behavior which is just read-only requests to S3. default_cache_behavior={ "targetOriginId": web_bucket.arn, "viewerProtocolPolicy": "redirect-to-https", "allowedMethods": ["GET", "HEAD", "OPTIONS"], "cachedMethods": ["GET", "HEAD", "OPTIONS"], "forwardedValues": { "cookies": {"forward": "none"}, "queryString": False, }, "minTtl": 0, "defaultTtl": 10*60, "maxTtl": 10*60, }, # "All" is the most broad distribution, and also the most expensive. # "100" is the least broad, and also the least expensive. price_class="PriceClass_100", # You can customize error responses. When CloudFront recieves an error from the origin (e.g. # S3 or some other web service) it can return a different error code, and return the # response for a different resource. custom_error_responses=[ {"errorCode": 404, "responseCode": 404, "responsePagePath": "/404.html"}, ], restrictions={ "geoRestriction": { "restrictionType": "none", }, }, viewer_certificate={ "acmCertificateArn": cert.cert_arn, "sslSupportMethod": "sni-only", }, # loggingConfig: { # bucket: logsBucket.bucketDomainName, # includeCookies: false, # prefix: `${config.targetDomain}/`, # }, **opts(parent=self), ) a_aaaa( f"{name}-record", name=domain, zone_id=zone.zone_id, aliases=[ { 'name': distro.domain_name, 'zone_id': distro.hosted_zone_id, 'evaluate_target_health': True, }, ], **opts(parent=self), ) return { 'url': f'https://{domain}/' }