def test_sec_groups_tags(self): aws_config_testing = aws.Provider( 'aws_testing',region='us-west-2', profile='testing_profile' ) security_group_testing = aws_sg.aws_sg(aws_config_testing) rule_alb_ingress_testing = [{'description': 'allow_https','fromPort': 443, 'toPort': 443,'protocol': 'tcp','cidrBlocks': ['0.0.0.0/0'], }] rule_alb_egress_testing = [{'description': 'all_traffic','fromPort': 0,'toPort': 0, 'protocol': '-1','cidrBlocks': ['0.0.0.0/0'], }] sg_tesging = security_group_testing.create_sg( 'alb', 'vpc-123456', rule_alb_ingress_testing, rule_alb_egress_testing ) def check_tags(args): urn,tags = args self.assertIsNotNone(tags, f'server {urn} must have tags') self.assertIn('Name', tags, f'server {urn} must have a name tag') self.assertIn('Createdby', tags, f'server {urn} must have a name tag') return pulumi.Output.all(sg_tesging.urn,sg_tesging.tags).apply(check_tags)
class TestingWithMocks(unittest.TestCase): aws_config_testing = aws.Provider( 'aws_testing',region='us-west-2', profile='testing_profile' ) # Test if the service has tags and a name tag. @pulumi.runtime.test def test_sec_groups_tags(self): aws_config_testing = aws.Provider( 'aws_testing',region='us-west-2', profile='testing_profile' ) security_group_testing = aws_sg.aws_sg(aws_config_testing) rule_alb_ingress_testing = [{'description': 'allow_https','fromPort': 443, 'toPort': 443,'protocol': 'tcp','cidrBlocks': ['0.0.0.0/0'], }] rule_alb_egress_testing = [{'description': 'all_traffic','fromPort': 0,'toPort': 0, 'protocol': '-1','cidrBlocks': ['0.0.0.0/0'], }] sg_tesging = security_group_testing.create_sg( 'alb', 'vpc-123456', rule_alb_ingress_testing, rule_alb_egress_testing ) def check_tags(args): urn,tags = args self.assertIsNotNone(tags, f'server {urn} must have tags') self.assertIn('Name', tags, f'server {urn} must have a name tag') self.assertIn('Createdby', tags, f'server {urn} must have a name tag') return pulumi.Output.all(sg_tesging.urn,sg_tesging.tags).apply(check_tags) # Testing alb listener method @pulumi.runtime.test def test_listener_method(self): aws_config_testing = aws.Provider( 'aws_testing',region='us-west-2', profile='testing_profile' ) alb_testing= aws_alb.aws_alb( 'alb_testing', ['subnet-123456','subnet-78011'], False, ['sg-123456'], aws_config_testing ) create_alb_testing = alb_testing.create_alb() listener_test2 = alb_testing.create_listener('HTTPs', 80, ['redirect', 443, 'HTTPS']) def check_arn(args): urn,arn = args print(arn) self.assertIsNotNone(arn, f'the listener {urn} must exist') self.assertIsNotNone(arn, f'the listener {urn} must exist') return pulumi.Output.all(listener_test2.urn,listener_test2.arn).apply(check_arn)
def get_provider_for_region(region): if PROVIDER is not None: # Using localstack return PROVIDER assert isinstance(region, str) if region not in _provider_cache: _provider_cache[region] = pulumi_aws.Provider( region, # profile=pulumi_aws.config.profile, # FIXME region=region, ) return _provider_cache[region]
def __init__(self, name: str, account_id: Input[str], account_name: Input[str], access_role_name: Input[str], username: Input[str], user_policy_arn: Input[str], password: Input[str], opts: ResourceOptions = None): super().__init__("nuage/aws:organizations:AWSOrganizationAccountUser", name, {}, opts) assume_role_arn = Output.all(account_id, access_role_name).apply( lambda a: f"arn:aws:iam::{a[0]}:role/{a[1]}") provider = aws.Provider("freelance-account-provider", assume_role={"role_arn": assume_role_arn}) user = iam.User( "freelance-account-user", name=Output.all(account_name, username).apply(lambda a: f"{a[0]}-{a[1]}"), opts=pulumi.ResourceOptions(provider=provider)) user_login_profile = dynamic_providers.iam.UserLoginProfile( "freelance-account-user-login-profile", username=user.name, password=password, assume_role=dynamic_providers.iam.AssumeRole( role_arn=assume_role_arn)) user_policy_attachment = iam.UserPolicyAttachment( "freelance-account-user_UserAccessRole", policy_arn=user_policy_arn, user=user.name, opts=pulumi.ResourceOptions(provider=provider)) self.console_url = Output.all(account_id).apply( lambda a: f"https://{a[0]}.signin.aws.amazon.com/console") self.username = user.name self.password = password self.register_outputs({})
def configure_dns(domain: str, zone_id: pulumi.Input): # SSL Cert must be created in us-east-1 unrelated to where the API is deployed. aws_us_east_1 = aws.Provider("aws-provider-us-east-1", region="us-east-1") # Request ACM certificate ssl_cert = aws.acm.Certificate( "ssl-cert", domain_name=domain, validation_method="DNS", opts=ResourceOptions(provider=aws_us_east_1)) # Create DNS record to prove to ACM that we own the domain ssl_cert_validation_dns_record = aws.route53.Record( "ssl-cert-validation-dns-record", zone_id=zone_id, name=ssl_cert.domain_validation_options.apply( lambda options: options[0].resource_record_name), type=ssl_cert.domain_validation_options.apply( lambda options: options[0].resource_record_type), records=[ ssl_cert.domain_validation_options.apply( lambda options: options[0].resource_record_value) ], ttl=10 * 60) # Wait for the certificate validation to succeed validated_ssl_certificate = aws.acm.CertificateValidation( "ssl-cert-validation", certificate_arn=ssl_cert.arn, validation_record_fqdns=[ssl_cert_validation_dns_record.fqdn], opts=ResourceOptions(provider=aws_us_east_1)) # Configure API Gateway to be able to use domain name & certificate api_domain_name = aws.apigateway.DomainName( "api-domain-name", certificate_arn=validated_ssl_certificate.certificate_arn, domain_name=domain) # Create DNS record aws.route53.Record("api-dns", zone_id=zone_id, type="A", name=domain, aliases=[ aws.route53.RecordAliasArgs( name=api_domain_name.cloudfront_domain_name, evaluate_target_health=False, zone_id=api_domain_name.cloudfront_zone_id) ]) return api_domain_name
def test_listener_method(self): aws_config_testing = aws.Provider( 'aws_testing',region='us-west-2', profile='testing_profile' ) alb_testing= aws_alb.aws_alb( 'alb_testing', ['subnet-123456','subnet-78011'], False, ['sg-123456'], aws_config_testing ) create_alb_testing = alb_testing.create_alb() listener_test2 = alb_testing.create_listener('HTTPs', 80, ['redirect', 443, 'HTTPS']) def check_arn(args): urn,arn = args print(arn) self.assertIsNotNone(arn, f'the listener {urn} must exist') self.assertIsNotNone(arn, f'the listener {urn} must exist') return pulumi.Output.all(listener_test2.urn,listener_test2.arn).apply(check_arn)
import pulumi import pulumi_aws as aws name_base = "kafka-play" vpc_cidr = "10.0.0.0/16" az1_subnet_cidr = "10.0.1.0/24" az2_subnet_cidr = "10.0.2.0/24" reg = "us-east-2" az1 = reg+"a" az1 = reg+"b" ubuntu_ami = "ami-0b51ab7c28f4bf5a6" # being lazy for now and hardcoding Ohio Ubuntu 18.04 ami from Canonical # Custom provider based on settings above region = aws.Provider(reg, region=reg) # VPC for the stack vpc = aws.ec2.Vpc(name_base+"-vpc", cidr_block=vpc_cidr, tags={"Name":name_base+"-vpc"}) gw = aws.ec2.InternetGateway(name_base+"-gw", tags={ "Name": name_base+"-gw" }, vpc_id=vpc.id) routes=[ { "cidr_block": vpc_cidr,
from pulumi_aws.config.vars import region from pulumi import Config, ResourceOptions, export def require_region(): """ require_region fetches the AWS region, requiring that it exists. if it does not exist, an exception is raised. """ if not region: raise Exception('No AWS region has been configured') return region config = Config() role_to_assume_arn = config.require('roleToAssumeARN') provider = aws.Provider('privileged', assume_role={ 'role_arn': role_to_assume_arn, 'session_name': 'PulumiSession', 'externalId': 'PulumiApplication', }, region=require_region()) # Creates an AWS resource (S3 Bucket) bucket = aws.s3.Bucket('my-bucket', opts=ResourceOptions(provider=provider)) # Exports the DNS name of the bucket export('bucket_name', bucket.bucket_domain_name)
'CreatedOn': creation_date, 'Owner': cfg.require('owner'), 'PulumiProject': pulumi.get_project(), 'PulumiStack': pulumi.get_stack(), 'Customer': cfg.require_secret('customer') } opts = pulumi.ResourceOptions() if cfg.get_bool("local-mode"): opts.provider = aws.Provider(resource_name="localstack", access_key="integration-testing", secret_key="integration-testing", region="us-east-1", endpoints=[{ "s3": "http://localhost:4572" }], skip_credentials_validation=True, s3_force_path_style=True, skip_metadata_api_check=True, skip_requesting_account_id=True, skip_region_validation=True) # Provision an AWS S3 Bucket bucket = aws.s3.Bucket(resource_name=bucket_name, force_destroy=False, tags=tags, opts=opts) # Export the name of the S3 bucket pulumi.export('s3_bucket_name', bucket.id)
content_type=mime_type, source=FileAsset(filepath), opts=ResourceOptions(parent=content_bucket)) # Crawl the web content root path and convert the file paths to S3 object resources. crawl_directory(web_contents_root_path, bucket_object_converter) TEN_MINUTES = 60 * 10 # Provision a certificate if the arn is not provided via configuration. if certificate_arn is None: # CloudFront is in us-east-1 and expects the ACM certificate to also be in us-east-1. # So, we create an east_region provider specifically for these operations. east_region = pulumi_aws.Provider('east', profile=pulumi_aws.config.profile, region='us-east-1') # Get a certificate for our website domain name. certificate = pulumi_aws.acm.Certificate( 'certificate', domain_name=target_domain, validation_method='DNS', opts=ResourceOptions(provider=east_region)) # Find the Route 53 hosted zone so we can create the validation record. subdomain, parent_domain = get_domain_and_subdomain(target_domain) hzid = pulumi_aws.route53.get_zone(name=parent_domain).id # Create a validation record to prove that we own the domain. cert_validation_domain = pulumi_aws.route53.Record(
"""An AWS Python Pulumi program""" import pulumi from pulumi_aws import s3 import pulumi_aws as aws import pulumi_eks as eks base_name = "tushar" useast2ohio = aws.Provider("useast2ohio", region="us-east-2") prov_cluster = eks.Cluster(base_name, instance_type="t2.micro", provider_credential_opts=aws.config.profile, opts=pulumi.ResourceOptions(provider=useast2ohio)) pulumi.export("Cluster_name", prov_cluster.name) pulumi.export("kubeconfig", prov_cluster.kubeconfig)
from aws_components.dynamodb import aws_dynamodb # from pulumi_docker import Image, ImageRegistry, DockerBuild # Pulumi configs config = pulumi.Config() # ARNs policy_ecs = 'arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy' # load env variables load_dotenv() # Provider for testing aws_config_testing = aws.Provider('aws_testing', region='us-west-2', profile='testing_profile') ##### creating custom aws provider aws_config = aws.Provider('aws', region=config.require('aws_region'), profile=config.require('aws_profile')) Azs_Availables = aws.get_availability_zones( state="available", opts=pulumi.ResourceOptions(provider=aws_config)) # Networking creation cidr_public = ['10.11.1.0/24', '10.11.2.0/24'] cidr_private = ['10.11.100.0/24', '10.11.101.0/24']
# Copyright 2016-2020, Pulumi Corporation. All rights reserved. import pulumi_aws as aws from pulumi import Config, ResourceOptions, export config = Config() role_to_assume_arn = config.require('roleToAssumeARN') aws_config = Config('aws') provider = aws.Provider( 'privileged', assume_role={ 'role_arn': role_to_assume_arn, # session name can contain only the following special characters =,.@- # if any other special character is used, an error stating that the role # cannot be assumed will be returned 'session_name': 'PulumiSession', 'externalId': 'PulumiApplication', }, region=aws_config.require('region')) # Creates an AWS resource (S3 Bucket) bucket = aws.s3.Bucket('my-bucket', opts=ResourceOptions(provider=provider)) # Exports the DNS name of the bucket export('bucket_name', bucket.bucket_domain_name)
def _get_aws_provider(self, region: str) -> pulumi_aws.Provider: # Use "default" to name the provider for the default region to preserve backward compatibility. name = region if region != self.model.aws.default_region else "default" return pulumi_aws.Provider(name, region=region)
import pulumi import pulumi_aws as aws provider = aws.Provider("provider", region="us-west-2") bucket1 = aws.s3.Bucket("bucket1", opts=pulumi.ResourceOptions(provider=provider, depends_on=[provider], protect=True, ignore_changes=[ "bucket", "lifecycleRules[0]", ]))
PROVIDER = pulumi_aws.Provider( "localstack", skip_credentials_validation=True, skip_metadata_api_check=True, s3_force_path_style=True, access_key="mockAccessKey", secret_key="mockSecretKey", region='us-east-1', endpoints=[{ 'apigateway': "http://localhost:4567", 'cloudformation': "http://localhost:4581", 'cloudwatch': "http://localhost:4582", 'cloudwatchlogs': "http://localhost:4586", 'dynamodb': "http://localhost:4569", # "DynamoDBStreams": "http://localhost:4570", # "Elasticsearch": "http://localhost:4571", 'es': "http://localhost:4578", 'firehose': "http://localhost:4573", 'iam': "http://localhost:4593", 'kinesis': "http://localhost:4568", 'kms': "http://localhost:4584", 'lambda': "http://localhost:4574", 'redshift': "http://localhost:4577", 'route53': "http://localhost:4580", 's3': "http://localhost:4572", 'ses': "http://localhost:4579", # "StepFunctions": "http://localhost:4585", 'sns': "http://localhost:4575", 'sqs': "http://localhost:4576", 'ssm': "http://localhost:4583", 'sts': "http://localhost:4592", }], )
import pulumi import pulumi_aws as aws import pulumi_eks as eks project_name = pulumi.get_project() # For CI testing only: used to set profileName to alternate AWS_PROFILE envvar. if not os.getenv("ALT_AWS_PROFILE"): raise Exception("ALT_AWS_PROFILE must be set") # AWS named profile to use. profile_name = os.getenv("ALT_AWS_PROFILE") # Create an AWS provider instance using the named profile creds # and current region. aws_provider = aws.Provider("aws-provider", profile=profile_name, region=aws.get_region().name) # Define the AWS provider credential opts to configure the cluster's # kubeconfig auth. kubeconfig_opts = eks.KubeconfigOptionsArgs(profile_name=profile_name) # Create the cluster using the AWS provider and credential opts. cluster = eks.Cluster(project_name, provider_credential_opts=kubeconfig_opts, opts=pulumi.ResourceOptions(provider=aws_provider)) # Export the cluster kubeconfig. pulumi.export("kubeconfig", cluster.kubeconfig)