def __init__(self, scope: Construct, ns: str): super().__init__(scope, ns) AwsProvider(self, 'Aws', region='us-east-1') # create EC2 instance helloInstance = Instance(self, 'hello', ami="ami-0742b4e673072066f", instance_type="t2.micro", tags={ "Name": "Provisioned by CDKTF", "user": "******" }) # create S3 bucket bucket = S3Bucket(self, 'my_bucket', bucket="nathan.bekenov.labs") # Outputs public_ip = TerraformOutput(self, 'hello_public_ip', value=helloInstance.public_ip) bucket_name = TerraformOutput(self, 'hello_bucket_name', value=bucket.bucket)
def __init__(self, scope: Construct, ns: str): super().__init__(scope, ns) AwsProvider(self, 'Aws', region='us-west-2') my_vpc = Vpc( self, 'MyVpc', name='my-vpc', cidr='10.0.0.0/16', azs=['us-west-2a', 'us-west-2b', 'us-west-2c'], private_subnets=['10.0.1.0/24', '10.0.2.0/24', '10.0.3.0/24'], public_subnets=['10.0.101.0/24', '10.0.102.0/24', '10.0.103.0/24'], enable_nat_gateway=True) my_eks = Eks(self, 'MyEks', cluster_name='my-eks', subnets=Token().as_list(my_vpc.private_subnets_output), vpc_id=Token().as_string(my_vpc.vpc_id_output), manage_aws_auth=False, cluster_version='1.17') TerraformOutput(self, 'cluster_endpoint', value=my_eks.cluster_endpoint_output) TerraformOutput(self, 'create_user_arn', value=DataAwsCallerIdentity(self, 'current').arn)
def _tf_outputs(self): TerraformOutput(self, 'cluster_endpoint', value=self.eks_cluster.cluster_endpoint_output) TerraformOutput(self, 'create_user_arn', value=DataAwsCallerIdentity(self, 'current').arn) TerraformOutput(self, 'kubeconfig', value=self.eks_cluster.kubeconfig_output)
def __init__(self, scope: Construct, ns: str): super().__init__(scope, ns) NullProvider(self, "null") Resource(self, "null-resource") fixture = TerraformAsset(self, 'fixture', path=os.path.abspath("./fixture.txt"), asset_hash="hash") fixtures = TerraformAsset(self, 'fixtures', path=os.path.abspath("./fixtures"), asset_hash="hash", type=AssetType.ARCHIVE) TerraformOutput(self, 'fixture-output', value=fixture.path) TerraformOutput(self, 'fixtures-output', value=fixtures.path)
def __init__(self, scope: Construct, ns: str): super().__init__(scope, ns) # define resources here features = AzurermProviderFeatures() provider = AzurermProvider(self, 'azure', features=[features]) node_pool = KubernetesClusterDefaultNodePool( name='default', node_count=1, vm_size='Standard_D2_v2') # resource_group = ResourceGroup( # self, name='gepp', location='East US', id='') resource_group = ResourceGroupConfig(name='gepp', location='East US') identity = KubernetesClusterIdentity(type='SystemAssigned') cluster = KubernetesCluster( self, 'gepp-kube-cluster', name='gepp-kube-cluster', default_node_pool=[node_pool], dns_prefix='gepp', location=resource_group.location, resource_group_name=resource_group.name, identity=[identity], tags={"genarated": "gepp"}) kubeconfig = TerraformOutput(self, 'kubeconfig', value=cluster.kube_config_raw, sensitive=True)
def __init__(self, scope: Construct, ns: str): super().__init__(scope, ns) region = 'us-east-1' bucket_name = 'cdktest-cmclaughlin' AwsProvider(self, 'aws', region=region) website = S3BucketWebsite(index_document='index.html', error_document='error.html') bucket = S3Bucket(self, 'bucket', bucket=bucket_name, region=region, website=[website]) S3BucketObject(self, 'upload', bucket=bucket_name, key='index.html', source='../index.html', acl='public-read', content_type='text/html', depends_on=[bucket]) TerraformOutput(self, 'endpoint', value=bucket.website_endpoint)
def __init__(self, scope: Construct, ns: str): super().__init__(scope, ns) AwsProvider(self, 'Aws', region='eu-central-1') # bucketname = 'cdktest-wiwa' # TerraformAwsModulesS3BucketAws(self, 'bucket', bucket = bucketname) my_vpc = TerraformAwsModulesVpcAws(self, id='vpc', name="test-vpc", cidr="10.0.0.0/16", azs=["eu-central-1a"], public_subnets=["10.0.101.0/24"]) newInstance = Instance(self, 'pythondemo', ami="ami-0a02ee601d742e89f", instance_type="t2.micro", availability_zone="eu-central-1a", associate_public_ip_address=True, tags={"Name": "Python-Demo-updated"}, user_data=user_data, subnet_id=my_vpc.public_subnets_output) TerraformOutput(self, 'pythondemo_public_ip', value=newInstance.public_ip)
def __init__(self, scope: Construct, ns: str): super().__init__(scope, ns) # define resources here features = AzurermProviderFeatures() provider = AzurermProvider(self, 'azure', features=[features]) node_pool = KubernetesClusterDefaultNodePool( name='default', node_count=1, vm_size='${var.instance_type}') # resource_group = ResourceGroup( # self, name='gepp', location='East US', id='') resource_group = ResourceGroupConfig( name='${var.resource_group}', location='${var.cluster_location}') identity = KubernetesClusterIdentity(type='SystemAssigned') cluster = KubernetesCluster( self, cluster_name, name=cluster_name, default_node_pool=[node_pool], dns_prefix=cluster_dns_prefix, location=resource_group.location, resource_group_name=resource_group.name, identity=[identity], tags={"genarated": "gepp"}) kubeconfig = TerraformOutput(self, 'kubeconfig', value=cluster.kube_config_raw, sensitive=True) self.add_override( path='variable', value={ "cluster_size": { "description": "Number of nodes that will be in default pool", "type": "number", "default": 3 }, "instance_type": { "description": "Instance type", "type": "string", "default": instance_type }, "cluster_location": { "description": "Location of the cluster", "type": "string", "default": cluster_location }, "resource_group": { "description": "Azure resource group name for cluster to be created in", "type": "string", "default": resource_group_name } })
def __init__(self, scope: Construct, ns: str): super().__init__(scope, ns) # define resources here loca = "West Europe" add_space = ["10.12.0.0/27"] rg_name = "example-rg" tag = {"ENV": "Dev", "PROJECT": "AZ_TF"} AzurermProvider(self, "Azurerm",\ features=[{}] ) example_rg = ResourceGroup(self, 'example-rg',\ name=rg_name, location = loca, tags = tag ) example_vnet = VirtualNetwork(self, 'example_vnet',\ depends_on =[example_rg], name="example_vnet", location=loca, address_space=add_space, resource_group_name=Token().as_string(example_rg.name), tags = tag ) TerraformOutput(self, 'vnet_id', value=example_vnet.id)
def __init__(self, scope: Construct, ns: str): super().__init__(scope, ns) resourcedetails = parseresourceyaml() print(" after readding from the yaml", resourcedetails) print("resorce region is", resourcedetails["customerName"]["region"]) #AwsProvider(self, 'Aws', region='us-east-1') AwsProvider(self, 'Aws', region=resourcedetails["customerName"]["region"]) helloInstance = Instance(self, 'hello', ami="ami-2757f631", instance_type="t2.micro", tags={ "Name": "Provisioned by Python", "Creator": "CDKTF-Python" }) Vpc(self, 'CustomVpc', name='custom-vpc', cidr='10.0.0.0/16', azs=["us-east-1a", "us-east-1b"], public_subnets=["10.0.1.0/24", "10.0.2.0/24"]) TerraformOutput(self, 'hello_public_ip', value=helloInstance.public_ip)
def __init__(self, scope: Construct, ns: str): super().__init__(scope, ns) LocalBackend(self, path="terraform.tfstate") NullProvider(self, "null") resource = Resource(self, "null-resource") resource.add_override('triggers', {'cluster_instance_ids': 'foo'}) self.add_override('terraform.backend', { 'remote': { 'organization': 'test', 'workspaces': { 'name': 'test' } } }) TerraformOutput(self, "computed", value=Fn.element( Fn.merge([{ "id": resource.id }, { "value": "123" }]), 1))
def __init__(self, scope=Construct, ns=str): super().__init__(scope, ns) user_id = DataAwsCallerIdentity(self, "userId") AwsProvider(self, 'Aws', region=region) role = IamRole( self, "basic_lambda_role", description="Basic Lambda Execution Role", name=f"{stack_prefix}lambda_role", assume_role_policy= '{"Version": "2012-10-17", "Statement": [{"Action": "sts:AssumeRole", "Principal": {"Service": "lambda.amazonaws.com"}, "Effect": "Allow", "Sid": ""}]}' ) api = ApiGatewayRestApi(self, "api-gateway", name="rest-api") resource = ApiGatewayResource(self, f"api-gateway-resource", rest_api_id=api.id, parent_id=api.root_resource_id, path_part="notes") get_notes = add_gateway_method(self, "GET", "get_notes_handler", api, resource, role.arn, user_id) add_note = add_gateway_method(self, "POST", "add_note_handler", api, resource, role.arn, user_id) delete_note = add_gateway_method(self, "DELETE", "delete_note_handler", api, resource, role.arn, user_id) deployment = ApiGatewayDeployment( self, f"{stack_prefix}api-gateway-deployment", rest_api_id=api.id, stage_name="dev", stage_description="Production Environment", depends_on=get_notes + add_note + delete_note) bucket_name = 'eladr-terraform-cdk-demo-bucket' bucket = S3Bucket(self, 's3_bucket', bucket=bucket_name, force_destroy=True) bucket.policy = f'{{"Version": "2012-10-17", "Statement": [{{"Action": "s3:*", "Resource" : "arn:aws:s3:::{bucket_name}/*", "Principal": {{"AWS": "{role.arn}"}}, "Effect": "Allow", "Sid": ""}}]}}' TerraformOutput(self, "endpoint", value=f"{deployment.invoke_url}") TerraformOutput(self, "bucket-arn", value=bucket.arn)
def __init__(self, scope: Construct, ns: str): super().__init__(scope, ns) AwsProvider(self, 'Aws', region='us-west-1') helloInstance = Instance( self, 'hello', ami="ami-031b673f443c2172c", instance_type="t2.micro", ) TerraformOutput(self, 'hello_public_ip', value=helloInstance.public_ip)
def __init__(self, scope: Construct, ns: str): super().__init__(scope, ns) AwsProvider(self, 'Aws', region='us-east-1') helloInstance = Instance(self, 'hello', ami="ami-2757f631", instance_type="t2.micro", subnet_id= "subnet-0a9820d8725d1ca85" ) TerraformOutput(self, 'hello_public_ip', value=helloInstance.public_ip )
def __init__(self, scope: Construct, ns, project_id) -> None: super().__init__(scope, ns) GoogleProvider(self, id=project_id, region="us-central1", project=project_id) code_bucket = StorageBucket(self, "bucket", name="terraform-cf-zip-files", project=project_id) asset = TerraformAsset(self, "cloud-function-asset", \ path = os.path.join( os.path.abspath(os.path.dirname(__name__)), "function"), type = AssetType.ARCHIVE ) code_object = StorageBucketObject(self, "archive", \ name=asset.file_name, bucket = code_bucket.name, source = asset.path, ) cloud_function = CloudfunctionsFunction(self, 'addition-function', \ name="addition", project=project_id, region="us-central1", runtime="python38", available_memory_mb=128, source_archive_bucket=code_bucket.name, source_archive_object=code_object.name, trigger_http=True, entry_point="main" ) invoker = CloudfunctionsFunctionIamBinding(self, "invoker",\ project=cloud_function.project, region=cloud_function.region, cloud_function=cloud_function.name, members=["allUsers"], role="roles/cloudfunctions.invoker" ) url = TerraformOutput(self, "cloud-function-url",\ value=cloud_function.https_trigger_url )
def __init__(self, scope: Construct, ns: str): super().__init__(scope, ns) # define resources here instanceUserData = '#!/bin/bash\r\n' \ 'echo "Hello, World From Python Form Terraform CDK " > index.html\r\n'\ 'nohup busybox httpd -f -p 80 &\r\n' AwsProvider(self, 'Aws', region='us-east-1') ingress_allow = SecurityGroupIngress(cidr_blocks=['0.0.0.0/0'], ipv6_cidr_blocks=[], protocol='tcp', from_port=80, to_port=80, description="Allow", prefix_list_ids=[], security_groups=[], self_attribute=False) egress_allow = SecurityGroupEgress(cidr_blocks=['0.0.0.0/0'], ipv6_cidr_blocks=[], protocol='-1', from_port=0, to_port=0, prefix_list_ids=[], security_groups=[], self_attribute=False) secGroup = SecurityGroup(self, 'web_server', name="allow_web_traffic", ingress=[ingress_allow], egress=[egress_allow]) instance = Instance( self, "hello", ami="ami-2757f631", instance_type="t2.micro", vpc_security_group_ids=[Token.as_string(secGroup.id)], user_data=instanceUserData, tags=["Name", "Terraform-CDK WebServer"]) TerraformOutput(self, 'public_dns', value=instance.public_dns)
def __init__(self, scope: Construct, ns: str): super().__init__(scope, ns) LocalBackend(self, path="terraform.tfstate") DockerProvider(self, "provider") docker_image = Image(self, 'nginxImage', name='nginx:latest', keep_locally=False) # Simple References container = Container(self, 'nginxContainer', name='nginx-python-cdktf', image=docker_image.repo_digest, ports=[ { 'internal': 80, 'external': 8000 }], privileged=False) # Single-item References TerraformOutput(self, "containerCapAdd", value=container.capabilities.add) # Direct Mutation docker_image.keep_locally = True # Reference Mutation container.privileged = docker_image.keep_locally
def __init__(self, scope: Construct, ns: str): super().__init__(scope, ns) AzurermProvider(self, "Azurerm", features={}) resource_group = ResourceGroup(self, vars.rg_name, name=vars.rg_name, location=vars.location, tags=vars.tag) aks_cluster = KubernetesCluster(self, vars.k8s_cluster_name, name=vars.k8s_cluster_name, location=vars.location, resource_group_name=Token().as_string( resource_group.name), dns_prefix=vars.stack_name, default_node_pool={ "name": "default", "node_count": 2, "vm_size": "standard_d2_v4" }, identity={"type": "SystemAssigned"}, tags=vars.tag) cog_account = CognitiveAccount(self, vars.cog_name, name=vars.cog_name, location=vars.location, resource_group_name=Token().as_string( resource_group.name), kind="TextAnalytics", sku_name="F0", custom_subdomain_name=vars.cog_name, public_network_access_enabled=True, tags=vars.tag) container = ContainerRegistry(self, vars.container_reg_name, name=vars.container_reg_name, location=vars.location, resource_group_name=Token().as_string( resource_group.name), sku="Basic") client_config = DataAzurermClientConfig(self, "current") key_vault = KeyVault( self, vars.key_vault_name, name=vars.key_vault_name, location=vars.location, resource_group_name=Token().as_string(resource_group.name), tenant_id=client_config.tenant_id, sku_name="premium", access_policy=[ KeyVaultAccessPolicy(tenant_id=client_config.tenant_id, object_id=client_config.object_id, key_permissions=[ "Create", "Get", ], secret_permissions=[ "Set", "Get", "Delete", "Purge", "Recover" ]) ]) cognitive_endpoint_secret = KeyVaultSecret(self, vars.secret_endpoint, name=vars.secret_endpoint, value=cog_account.endpoint, key_vault_id=key_vault.id) cognitive_key_secret = KeyVaultSecret( self, vars.secret_key, name=vars.secret_key, value=cog_account.primary_access_key, key_vault_id=key_vault.id) TerraformOutput(self, 'resource_group', value=resource_group.id) TerraformOutput( self, 'container_id', value=container.id, ) TerraformOutput( self, 'container_url', value=container.login_server, ) TerraformOutput(self, 'cognitive_endpoint_secret_id', value=cognitive_endpoint_secret.id) TerraformOutput(self, 'cognitive_key_secret_id', value=cognitive_key_secret.id)
def __init__(self, scope: Construct, ns: str): super().__init__(scope, ns) AzurermProvider(self, "Azurerm", features={}) resource_group = ResourceGroup(self, vars.rg_name, name=vars.rg_name, location=vars.location, tags=vars.tag) storage_acount = StorageAccount(self, vars.storage_name, name=vars.storage_name, location=vars.location, resource_group_name=Token().as_string( resource_group.name), account_tier="Standard", account_kind="Storage", account_replication_type="LRS") containers = [ "azure-webjobs-hosts", "azure-webjobs-secrets", "scm-releases" ] for container in containers: StorageContainer(self, container, name=container, storage_account_name=storage_acount.name, container_access_type="private") app_insights = ApplicationInsights( self, vars.ap_name, name=vars.ap_name, location=vars.location, resource_group_name=Token().as_string(resource_group.name), application_type="web") service_plan = AppServicePlan(self, vars.sp_name, name=vars.sp_name, location=vars.location, resource_group_name=Token().as_string( resource_group.name), kind="FunctionApp", reserved=True, sku={ "tier": "Dynamic", "size": "Y1" }) FunctionApp( self, vars.function_name, name=vars.function_name, location=vars.location, resource_group_name=Token().as_string(resource_group.name), app_service_plan_id=service_plan.id, storage_account_name=Token().as_string(storage_acount.name), storage_account_access_key=storage_acount.primary_access_key, https_only=True, version="~4", os_type="linux", app_settings={ "FUNCTIONS_WORKER_RUNTIME": "python", "APPINSIGHTS_INSTRUMENTATIONKEY": f"{app_insights.instrumentation_key}", "AzureWebJobsStorage": storage_acount.primary_access_key, "AZURE_LANGUAGE_ENDPOINT": vars.azure_language_endpoint, "AZURE_LANGUAGE_KEY": vars.azure_language_key }, site_config={ "linux_fx_version": "Python|3.8", "ftps_state": "Disabled" }) cog_account = CognitiveAccount(self, vars.cog_name, name=vars.cog_name, location=vars.location, resource_group_name=Token().as_string( resource_group.name), kind="TextAnalytics", sku_name="F0", custom_subdomain_name=vars.cog_name, public_network_access_enabled=True, tags=vars.tag) TerraformOutput(self, 'cognitive_endpoint', value=cog_account.endpoint)
def __init__(self, scope: Construct, ns: str): super().__init__(scope, ns) AwsProvider(self, 'Aws', region='us-east-1') tags = { "CreateBy": "cdktf-samples-python", "SampleFrom": "https://github.com/shazi7804/cdktf-samples-python" } armAmi = DataAwsAmi(self, 'amazon-arm-linux', most_recent=True, owners=["amazon"], filter=[{ "name": "root-device-type", "values": ["ebs"] }, { "name": "virtualization-type", "values": ["hvm"] }, { "name": "name", "values": ["amzn2-ami-hvm-2.0.20200722.0-arm64*"] }]) # define resources here vpc = Vpc(self, 'vpc', enable_dns_hostnames=True, cidr_block='10.0.0.0/16', tags=tags) igw = InternetGateway(self, 'internetGateway', vpc_id=Token().as_string(vpc.id), tags=tags) subnet = Subnet(self, 'subnet', vpc_id=Token().as_string(vpc.id), cidr_block="10.0.0.0/24", availability_zone="us-east-1a", map_public_ip_on_launch=True, tags=tags) routeTable = DefaultRouteTable( self, 'routeTable', default_route_table_id=Token().as_string( vpc.default_route_table_id), tags=tags) route = Route(self, 'route', route_table_id=Token().as_string(routeTable.id), destination_cidr_block="0.0.0.0/0", gateway_id=Token().as_string(igw.id)) # instance resources sg = SecurityGroup(self, 'bastionSecurityGroup', name="bastion-sg", vpc_id=Token().as_string(vpc.id), tags=tags) sgInboundRule = SecurityGroupRule(self, 'bastionInbound', type="ingress", cidr_blocks=["0.0.0.0/0"], from_port=22, to_port=22, protocol="ssh", security_group_id=Token().as_string( sg.id)) sgOutboundRule = SecurityGroupRule(self, 'bastionOutbound', type="egress", cidr_blocks=["0.0.0.0/0"], from_port=0, to_port=65535, protocol="-1", security_group_id=Token().as_string( sg.id)) # reading JSON policy to create sts assuume role with open('templates/ec2_assume_role_policy.json') as data: sts_assume_policy = json.load(data) role = IamRole(self, 'bastionRole', assume_role_policy=str( json.dumps(sts_assume_policy))) # iterating through config to create policy attachment objects manage_policies = { "ssm": 'arn:aws:iam::aws:policy/service-role/AmazonEC2RoleforSSM', "s3": 'arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess' } for policy, arn in manage_policies.items(): IamRolePolicyAttachment(self, 'bastion-{0}-attachment'.format(policy), role=role.id, policy_arn=arn) instance_profile = IamInstanceProfile(self, 'instanceProfile', role=role.id) bastion = Instance(self, 'bastion', ami=armAmi.id, instance_type="t4g.nano", subnet_id=Token().as_string(subnet.id), vpc_security_group_ids=[Token().as_string(sg.id)], iam_instance_profile=instance_profile.id, tags=tags) TerraformOutput(self, 'bastion_public_ip', value=bastion.public_ip)
def __init__(self, scope: Construct, ns: str, *, auth_dict: dict, k8s_stack_variable: OptionsK8Stack): keys = list(auth_dict.keys()) access_key = auth_dict['access_key'] if check_keys( key='access_key', key_list=keys) else None key_data = auth_dict['key_data'] if check_keys(key='key_data', key_list=keys) else None subscription_id = auth_dict['subscription_id'] if check_keys( key='subscription_id', key_list=keys) else None client_id = auth_dict['client_id'] if check_keys( key='client_id', key_list=keys) else None client_secret = auth_dict['client_secret'] if check_keys( key='client_secret', key_list=keys) else None tenant_id = auth_dict['tenant_id'] if check_keys( key='tenant_id', key_list=keys) else None ######### App Variables########### # keys = list(k8s_stack_variable.keys()) var_tags = k8s_stack_variable.tags var_rg_name = k8s_stack_variable.rg_name var_vm_size = k8s_stack_variable.vm_size var_dns_prefix = k8s_stack_variable.dns_prefix common_code_dir = k8s_stack_variable.common_code_dir super().__init__(scope, ns) ##### Terraform Variables ######## tf_key_data = TerraformVariable(self, 'key_data', type='string', default=key_data) tf_access_key = TerraformVariable(self, 'access_key', type='string', default=access_key) tf_location = TerraformVariable(self, 'location', type='string', default='West Europe') tf_storage_resource_group_name = TerraformVariable( self, 'stogage_resource_group_name', type='string', default='Prateek-Test') tf_resource_group_name = TerraformVariable(self, 'resource_group_name', type='string', default=var_rg_name) tf_storage_account_name = TerraformVariable( self, 'storage_account_name', type='string', default=config('storage_account_name')) tf_container_name = TerraformVariable(self, 'container_name', type='string', default='tfstate') tf_storage_tfstate_key = TerraformVariable( self, 'storage_tfstate_key', type='string', default='prod.terraform.tfstate.prateek-vm2') tf_node_count = TerraformVariable(self, 'node_count', type='number', default=1) tf_min_count = TerraformVariable(self, 'min_count', type='number', default=1) tf_max_count = TerraformVariable(self, 'max_count', type='number', default=2) tf_max_pod = TerraformVariable(self, 'max_pod', type='number', default=20) features = AzurermProviderFeatures() AzurermProvider(self, 'azure', features=[features], subscription_id=subscription_id, client_id=client_id, client_secret=client_secret, tenant_id=tenant_id) #TerraformModule(self, 'common_module', source='../{0}'.format(common_code_dir)) node_pool = KubernetesClusterDefaultNodePool( name='default', node_count=tf_node_count.number_value, vm_size=var_vm_size) resource_group = ResourceGroup(self, 'azure-rg', name=var_rg_name, location=tf_location.string_value) identity = KubernetesClusterIdentity(type='SystemAssigned') linux_profile = KubernetesClusterLinuxProfile( admin_username='******', ssh_key=[ KubernetesClusterLinuxProfileSshKey( key_data=tf_key_data.string_value) ]) cluster = KubernetesCluster( self, 'my-kube-cluster', name='my-kube-cluster', default_node_pool=[node_pool], dns_prefix=var_dns_prefix, location=resource_group.location, resource_group_name=resource_group.name, node_resource_group="{0}-nodes".format(resource_group.name), identity=[identity], linux_profile=[linux_profile], network_profile=[ KubernetesClusterNetworkProfile(network_plugin='azure') ], addon_profile=[ KubernetesClusterAddonProfile( kube_dashboard=[ KubernetesClusterAddonProfileKubeDashboard( enabled=True) ], # oms_agent=[KubernetesClusterAddonProfileOmsAgent(enabled=True,log_analytics_workspace_id='test')] ) ], role_based_access_control=[ KubernetesClusterRoleBasedAccessControl(enabled=True) ], tags=var_tags) kube_config = cluster.kube_config_raw File(self, 'kube-config', filename=os.path.join( os.path.join(os.curdir, '..', 'generated_files')), content=kube_config) TerraformOutput(self, 'kube_config', value=kube_config, sensitive=True) cluster_node_pool = KubernetesClusterNodePool( self, "k8sNodePool", kubernetes_cluster_id=cluster.id, name='k8snodepool', node_count=tf_node_count.number_value, vm_size=var_vm_size, enable_auto_scaling=True, min_count=tf_min_count.number_value, max_count=tf_max_count.number_value, max_pods=tf_max_pod.number_value, lifecycle=TerraformResourceLifecycle(create_before_destroy=True, ignore_changes=['node_count' ])) #RoleAssignment(self, "network_contributer", scope=resource_group.id, # principal_id=identity.principal_id, # role_definition_name='Network Contributor') #RoleAssignment(self, "kubectl_pull", scope=resource_group.id, # principal_id=cluster.kubelet_identity(index='0').object_id, # role_definition_name='AcrPull') #############Removed Temporarly ###################################### k8s_provider = KubernetesProvider( self, 'k8s', load_config_file=False, host=cluster.kube_config(index='0').host, client_key=add_base64decode( cluster.kube_config(index='0').client_key), client_certificate=add_base64decode( cluster.kube_config(index='0').client_certificate), cluster_ca_certificate=add_base64decode( cluster.kube_config(index='0').cluster_ca_certificate)) helm_provider = HelmProvider( self, 'helm', kubernetes=[ HelmProviderKubernetes( load_config_file=False, host=cluster.kube_config(index='0').host, client_key=add_base64decode( cluster.kube_config(index='0').client_key), client_certificate=add_base64decode( cluster.kube_config(index='0').client_certificate), cluster_ca_certificate=add_base64decode( cluster.kube_config(index='0').cluster_ca_certificate)) ]) # Add traefik and certmanager to expose services by https. traefik_ns_metadata = NamespaceMetadata(name='traefik', labels={ 'created_by': 'PythonCDK', 'location': 'eastus', 'resource_group': var_rg_name }) traefik_ns = Namespace(self, 'traefik-ns', metadata=[traefik_ns_metadata]) helm_traefik2_value = ''' additionalArguments: - "--entrypoints.websecure.http.tls" - "--providers.kubernetesingress=true" - "--providers.kubernetesIngress.ingressClass=traefik" - "--ping" - "--metrics.prometheus" ports: web: redirectTo: websecure ''' helm_traefik2_release = Release( self, 'traefik2', name='traefik', repository='https://containous.github.io/traefik-helm-chart', chart='traefik', namespace='traefik', values=[helm_traefik2_value]) cert_manager_ns_metadata = NamespaceMetadata(name='cert-manager', labels={ 'created_by': 'PythonCDK', "location": 'westeurope', 'resource_group': var_rg_name }) cert_manager_ns = Namespace( self, 'cert-manager-ns', metadata=[cert_manager_ns_metadata], )
def __init__(self, scope: Construct, ns: str): super().__init__(scope, ns) f = open("webext.sh", "r") scriptStr = f.read() instanceUserData = '#! /bin/bash\r\n' \ 'sudo apt-get update\r\n' \ 'sudo apt-get install -y apache2\r\r' \ 'sudo systemctl start apache2 ' \ 'sudo systemctl enable apache2 ' \ 'echo "<h1>Azure Linux VM with Web Server</h1>" | sudo tee /var/www/html/index.html' scriptStr_string_bytes = scriptStr.encode("ascii") base64_bytes = base64.b64encode(scriptStr_string_bytes) base64_string = base64_bytes.decode("ascii") script = '{ "script" : "' + base64_string + '" }' AzurermProvider(self, 'AzureRm', features=[{}], subscription_id="00000000-0000-0000-0000-000000000000", tenant_id="00000000-0000-0000-0000-000000000000", client_secret="00000000-0000-0000-0000-000000000000", client_id="00000000-0000-0000-0000-000000000000") RandomProvider(self, "rnd") rnd_pass = Password(self, "rnd_pass", length=16, min_upper=2, min_lower=2, min_special=2, number=True, special=True, override_special="!@#$%&") rg = ResourceGroup(self, "group", name="rg-terraform-cdk-py", location="eastus") vnet = VirtualNetwork(self, 'TfVnet', location=rg.location, address_space=['10.0.0.0/16'], name='terraform-cdk-ts-vnet', resource_group_name=rg.name) subnet = Subnet(self, 'TfVnetSubNet', virtual_network_name=vnet.name, address_prefix='10.0.0.0/24', name='terraform-cdk-ts-sub', resource_group_name=rg.name) nsg = NetworkSecurityGroup(self, "TfVnetSecurityGroup", location=rg.location, name='terraform-cdk-ts-nsg', resource_group_name=rg.name, security_rule=[ NetworkSecurityGroupSecurityRule( name="AllowWEB", description="Allow web", priority=1000, direction="Inbound", access="Allow", protocol="Tcp", source_port_range="*", destination_port_range="80", source_address_prefix="Internet", destination_address_prefix="*"), NetworkSecurityGroupSecurityRule( name="SSH", priority=1001, direction="Inbound", access="Allow", protocol="Tcp", source_port_range="*", destination_port_range="22", source_address_prefix="*", destination_address_prefix="*") ]) SubnetNetworkSecurityGroupAssociation( self, "TfVNetSecGrpAssc", subnet_id=subnet.id, network_security_group_id=nsg.id, ) web_vm_ip = PublicIp(self, "TfWebVmIp", location=rg.location, resource_group_name=rg.name, allocation_method="Static", name="terragorm-cdk-ts-web-ip") nic = NetworkInterface(self, "TfNic", location=rg.location, name="terraform-cdk-ts-nic", resource_group_name=rg.name, ip_configuration=[ NetworkInterfaceIpConfiguration( name="internal", private_ip_address_allocation="Dynamic", public_ip_address_id=web_vm_ip.id, subnet_id=subnet.id) ]) webvm = LinuxVirtualMachine( self, "TfWebVM", location=rg.location, name="terraform-cdk-ts-web-vm", resource_group_name=rg.name, network_interface_ids=[nic.id], admin_username="******", size="Standard_B2s", computer_name="web-tfcdk-ts-vm", admin_password=rnd_pass.result, source_image_reference=[ LinuxVirtualMachineSourceImageReference(offer="UbuntuServer", publisher="Canonical", sku="18.04-LTS", version="latest") ], os_disk=[ LinuxVirtualMachineOsDisk(caching="ReadWrite", storage_account_type="Standard_LRS") ], disable_password_authentication=False) VirtualMachineExtension(self, "TfWebVMExt", virtual_machine_id=webvm.id, name="webvmext", publisher="Microsoft.Azure.Extensions", type="CustomScript", type_handler_version="2.0", settings=script) TerraformOutput(self, 'webserver_ip', value=web_vm_ip)
def __init__(self, scope: Construct, ns: str): super().__init__(scope, ns) # VPC self.devopsVpc = Vpc( self, "devopsVPC", cidr_block="10.0.0.0/16", # IPv6 CIDR Block 사용여부 assign_generated_ipv6_cidr_block=False, # 전용 테넌시 인스턴스 사용 instance_tenancy="default", enable_dns_hostnames=True, enable_dns_support=True, # enable_classiclink = False, # enable_classiclink_dns_support = False, tags={"Name": "devopsVPC"}, ) # Subnet self.devopsPri_Subnet = [] self.devopsPri_Subnet.append( Subnet( self, "devopsPri_Subnet1", # 2a, 2b, 2c, 2d availability_zone="ap-northeast-2a", cidr_block="10.0.0.0/18", vpc_id=self.devopsVpc.id, assign_ipv6_address_on_creation=False, tags={"Name": "devopsPri_Subnet1"}, ) ) self.devopsPri_Subnet.append( Subnet( self, "devopsPri_Subnet2", availability_zone="ap-northeast-2c", cidr_block="10.0.128.0/18", vpc_id=self.devopsVpc.id, assign_ipv6_address_on_creation=False, tags={"Name": "devopsPri_Subnet2"}, ) ) self.devopsPub_Subnet = [] self.devopsPub_Subnet.append( Subnet( self, "devopsPub_Subnet1", availability_zone="ap-northeast-2a", cidr_block="10.0.64.0/18", vpc_id=self.devopsVpc.id, assign_ipv6_address_on_creation=False, # For Public IP map_public_ip_on_launch=True, # depends_on = [devopsIG.id], tags={"Name": "devopsPub_subnet1"}, ) ) self.devopsPub_Subnet.append( Subnet( self, "devopsPub_Subnet2", availability_zone="ap-northeast-2c", cidr_block="10.0.192.0/18", vpc_id=self.devopsVpc.id, assign_ipv6_address_on_creation=False, # For Public IP map_public_ip_on_launch=True, # depends_on = [devopsIG.id], tags={"Name": "devopsPub_subnet2"}, ) ) self.devopsEIP = Eip(self, "devopsEIP", vpc=True, tags={"Name": "devopsEIP"}) self.devopsNATG = NatGateway( self, "devopsNATG", allocation_id=self.devopsEIP.id, subnet_id=self.devopsPub_Subnet[0].id, tags={"Name": "devopsNATG"}, ) self.devopsIG = InternetGateway( self, "devopsIG", vpc_id=self.devopsVpc.id, tags={"Name": "devopsIG"} ) self.devopsPriRouteTable = RouteTable( self, "devops_PriRT", vpc_id=self.devopsVpc.id, route=[ RouteTableRoute( # Destination arg cidr_block="0.0.0.0/0", # Target arg nat_gateway_id=self.devopsNATG.id, carrier_gateway_id=None, destination_prefix_list_id=None, ) ], tags={"Name": "devops_PriRT"}, ) devopsPriRouteTable_Association = [] for a in range(0, 2): devopsPriRouteTable_Association.append( RouteTableAssociation( self, "DevopsPriRouteTableAssociation" + str(a), route_table_id=self.devopsPriRouteTable.id, subnet_id=self.devopsPri_Subnet[a].id, ) ) self.devopsPubRouteTable = RouteTable( self, "devops_PubRT", vpc_id=self.devopsVpc.id, route=[ RouteTableRoute( # Destination arg cidr_block="0.0.0.0/0", # Target arg gateway_id=self.devopsIG.id, ), ], tags={"Name": "devops_PubRT"}, ) devopsPubRouteTable_Association = [] for a in range(0, 2): devopsPubRouteTable_Association.append( RouteTableAssociation( self, "DevopsPubRouteTableAssociation" + str(a), route_table_id=self.devopsPubRouteTable.id, subnet_id=self.devopsPub_Subnet[a].id, ) ) # Security Group self.devopsSG = SecurityGroup( self, "devopsSG", name="devopsSG", description="Security Group for devops_proj", vpc_id=self.devopsVpc.id, ingress=[ SecurityGroupIngress( description="For HTTP", from_port=80, to_port=80, protocol="tcp", # Source, 상대방의 IP 주소 cidr_blocks=["0.0.0.0/0"], # "ipv6_cidr_blocks" : "" ), SecurityGroupIngress( description="For SSH", from_port=22, to_port=22, protocol="tcp", cidr_blocks=["0.0.0.0/0"], # "ipv6_cidr_blocks" : "" ), ], egress=[ # Allow All SecurityGroupEgress( from_port=0, to_port=0, protocol="-1", cidr_blocks=["0.0.0.0/0"], ipv6_cidr_blocks=["::/0"], ) ], ) self.devopsACL = NetworkAcl( self, "devopsACL", vpc_id=self.devopsVpc.id, ingress=[ NetworkAclIngress( # Rule Number, Ordering rule_no=100, # Action: allow / deny action="allow", cidr_block="0.0.0.0/0", from_port=80, to_port=80, protocol="tcp", ) ], egress=[ NetworkAclEgress( rule_no=200, action="allow", cidr_block="10.0.0.0/18", from_port=443, to_port=443, protocol="tcp", ) ], tags={"Name": "devopsACL"}, ) TerraformOutput(self, "vpc_id", description="VPC ID", value=self.devopsVpc.id) TerraformOutput(self, "vpc_cidr", value=self.devopsVpc.cidr_block) pub_id = [] for a in self.devopsPub_Subnet: pub_id.append(a.id) TerraformOutput(self, "pub_subnet_id", value=pub_id) TerraformOutput(self, "sg_id", value=self.devopsSG.id)