managed_cluster = containerservice.ManagedCluster( managed_cluster_name, resource_group_name=resource_group.name, agent_pool_profiles=[{ "count": 3, "max_pods": 110, "mode": "System", "name": "agentpool", "node_labels": {}, "os_disk_size_gb": 30, "os_type": "Linux", "type": "VirtualMachineScaleSets", "vm_size": "Standard_DS2_v2", }], enable_rbac=True, kubernetes_version="1.18.14", linux_profile={ "admin_username": "******", "ssh": { "public_keys": [{ "key_data": ssh_key.public_key_openssh, }], }, }, dns_prefix=resource_group.name, node_resource_group=f"MC_azure-native-go_{managed_cluster_name}_westus", service_principal_profile={ "client_id": ad_app.application_id, "secret": ad_sp_password.value })
k8s_cluster = containerservice.ManagedCluster( 'cluster', resource_group_name=resource_group.name, addon_profiles={ 'KubeDashboard': { 'enabled': True, }, }, agent_pool_profiles=[{ 'count': node_count, 'max_pods': 20, 'mode': 'System', 'name': 'agentpool', 'node_labels': {}, 'os_disk_size_gb': 30, 'os_type': 'Linux', 'type': 'VirtualMachineScaleSets', 'vm_size': node_size, }], dns_prefix=resource_group.name, enable_rbac=True, kubernetes_version=k8s_version, linux_profile={ 'admin_username': admin_username, 'ssh': { 'publicKeys': [{ 'keyData': ssh_public_key, }], }, }, node_resource_group='node-resource-group', service_principal_profile={ 'client_id': ad_app.application_id, 'secret': ad_sp_password.value, })
aks = containerservice.ManagedCluster( f"{prefix}-aks", location=rg.location, resource_group_name=rg.name, kubernetes_version="1.23.5", dns_prefix="dns", agent_pool_profiles=[{ "name": "type1", "mode": "System", "count": 2, "vm_size": "Standard_B2ms", "os_type": containerservice.OSType.LINUX, "max_pods": 110, "vnet_subnet_id": subnet.id }], linux_profile={ "admin_username": "******", "ssh": { "public_keys": [{ "key_data": ssh_public_key }] } }, service_principal_profile={ "client_id": app.application_id, "secret": sppwd.value }, enable_rbac=True, network_profile={ "network_plugin": "azure", "service_cidr": "10.10.0.0/16", "dns_service_ip": "10.10.0.10", "docker_bridge_cidr": "172.17.0.1/16" }, opts=ResourceOptions(depends_on=[subnet_assignment]))
cluster_names = [] for cluster_config in aks_cluster_configs: cluster = containerservice.ManagedCluster( "aksCluster-{}".format(cluster_config["name"]), resource_group_name=resource_group.name, linux_profile=containerservice.ContainerServiceLinuxProfileArgs( admin_username="******", ssh=containerservice.ContainerServiceSshConfigurationArgs( public_keys=[ containerservice.ContainerServiceSshPublicKeyArgs( key_data=ssh_public_key, ) ], ), ), service_principal_profile=containerservice. ManagedClusterServicePrincipalProfileArgs( client_id=ad_app.application_id, secret=ad_sp_password.value), location=cluster_config["location"], agent_pool_profiles=[ containerservice.ManagedClusterAgentPoolProfileArgs( name="aksagentpool", mode=containerservice.AgentPoolMode.SYSTEM, count=cluster_config["node_count"], vm_size=cluster_config["node_size"], ) ], dns_prefix="{}-kube".format(pulumi.get_stack()), kubernetes_version="1.18.14") cluster_names.append(cluster.name) pulumi.export("aks_cluster_names", pulumi.Output.all(cluster_names))
aks_cluster = containerservice.ManagedCluster( addon_profiles={}, agent_pool_profiles=[ containerservice.ManagedClusterAgentPoolProfileArgs( count=1, enable_node_public_ip=False, mode="System", name="nodepool1", os_type="Linux", type="VirtualMachineScaleSets", vm_size="Standard_D2s_v4", vnet_subnet_id=aks_subnet.id) ], api_server_access_profile=containerservice. ManagedClusterAPIServerAccessProfileArgs(enable_private_cluster=True), dns_prefix=prefix_name, enable_rbac=True, identity=containerservice.ManagedClusterIdentityArgs( type=containerservice.ResourceIdentityType.SYSTEM_ASSIGNED), linux_profile=containerservice.ContainerServiceLinuxProfileArgs( admin_username="******", ssh=containerservice.ContainerServiceSshConfigurationArgs(public_keys=[ containerservice.ContainerServiceSshPublicKeyArgs( key_data=ssh_key, ) ], ), ), network_profile=containerservice.ContainerServiceNetworkProfileArgs( load_balancer_sku="standard", outbound_type="loadBalancer", network_plugin="azure"), resource_group_name=resource_group.name, resource_name=(prefix_name + "-aks"), sku=containerservice.ManagedClusterSKUArgs( name="Basic", tier="Free", ))
def __init__(self, name: str, args: ClusterArgs, opts: ResourceOptions = None): # Leave this line. You can modify 'customer:resoure:Cluster' if you want super().__init__('custom:resource:Cluster', name, {}, opts) # Create the resources. # Be sure to set a ResourceOption(parent=self) and prefix anything you want to return as an output with "self." # Example: # resource_group = resources.ResourceGroup('rg', opts=ResourceOptions(parent=self)) # self.rg_name = resource_group.name ### AKS Cluster Related Resources generated_key_pair = PrivateKey(f'{name}-ssh-key', algorithm='RSA', rsa_bits=4096, opts=ResourceOptions(parent=self)) ssh_public_key = generated_key_pair.public_key_openssh ad_app = azuread.Application('app', display_name='app', opts=ResourceOptions(parent=self)) ad_sp = azuread.ServicePrincipal('service-principal', application_id=ad_app.application_id, opts=ResourceOptions(parent=self)) ad_sp_password = azuread.ServicePrincipalPassword( 'sp-pwd', service_principal_id=ad_sp.id, value=args.password, end_date='2099-01-01T00:00:00Z', opts=ResourceOptions(parent=self)) k8s_cluster = containerservice.ManagedCluster( f'{name}-k8s', resource_group_name=args.resource_group_name, addon_profiles={ 'KubeDashboard': { 'enabled': True, }, }, agent_pool_profiles=[{ 'count': args.node_count, 'max_pods': 20, 'mode': 'System', 'name': 'agentpool', 'node_labels': {}, 'os_disk_size_gb': 30, 'os_type': 'Linux', 'type': 'VirtualMachineScaleSets', 'vm_size': args.node_size, }], dns_prefix=args.resource_group_name, enable_rbac=True, kubernetes_version=args.k8s_version, linux_profile={ 'admin_username': args.admin_username, 'ssh': { 'publicKeys': [{ 'keyData': ssh_public_key, }], }, }, node_resource_group='node-resource-group', service_principal_profile={ 'client_id': ad_app.application_id, 'secret': ad_sp_password.value, }, opts=ResourceOptions(parent=self)) # Obtaining the kubeconfig from an Azure K8s cluster requires using the "list_managed_clsuter_user_credentials" # function. # That function requires passing values that are not be known until the resources are created. # Thus, the use of "apply()" to wait for those values before calling the function. creds = pulumi.Output.all( args.resource_group_name, k8s_cluster.name).apply( lambda args: containerservice. list_managed_cluster_user_credentials( resource_group_name=args[0], resource_name=args[1])) # The "list_managed_cluster_user_credentials" function returns an array of base64 encoded kubeconfigs. # So decode the kubeconfig for our cluster but mark it as a secret so Pulumi treats it accordingly. self.kubeconfig = pulumi.Output.secret( creds.kubeconfigs[0].value.apply( lambda enc: base64.b64decode(enc).decode())) ### End of Cluster Related Resources # End with this. It is used for display purposes. self.register_outputs({})
managed_cluster = containerservice.ManagedCluster( managed_cluster_name, resource_group_name=resource_group.name, agent_pool_profiles=[{ "count": 3, "max_pods": 110, "mode": "System", "name": "agentpool", "node_labels": {}, "os_disk_size_gb": 30, "os_type": "Linux", "type": "VirtualMachineScaleSets", "vm_size": "Standard_DS2_v2", "vnet_subnet_id": subnet1.id, }], linux_profile={ "admin_username": "******", "ssh": { "public_keys": [{ "key_data": ssh_key.public_key_openssh, }], }, }, enable_rbac=True, dns_prefix=resource_group.name, kubernetes_version="1.18.14", service_principal_profile={ "client_id": ad_app.application_id, "secret": ad_sp_password.value }, network_profile=containerservice.ContainerServiceNetworkProfileArgs( network_plugin="azure", network_mode="transparent"), sku=containerservice.ManagedClusterSKUArgs( name="Basic", tier="Free", ), )