Exemplo n.º 1
0
 def engine_status(cls, **kwargs):
     try:
         credentials = ServicePrincipalCredentials(
             client_id=kwargs.get('client_id'),
             secret=kwargs.get('secret'),
             tenant=kwargs.get('tenant'))
     except AuthenticationError:
         logger.exception(
             'Invalid credentials for {} Azure Provisioner'.format(
                 cls.name))
         return config.get('PROVISIONER_ERROR_STATE')
     except Exception:
         logger.exception('{} Azure Provisioner validation failed.'.format(
             cls.name))
         return config.get('PROVISIONER_UNKNOWN_STATE')
     client = ContainerServiceClient(credentials,
                                     kwargs.get('subscription_id'))
     try:
         list(
             client.managed_clusters.list_by_resource_group(
                 kwargs.get('resource_group_name')))
     except CloudError as e:
         logger.exception(
             'Invalid parameters for {} Azure Provisioner: {}'.format(
                 cls.name, e.message))
         return config.get('PROVISIONER_ERROR_STATE')
     except Exception:
         logger.exception('{} Azure Provisioner validation failed.'.format(
             cls.name))
         return config.get('PROVISIONER_UNKNOWN_STATE')
     return config.get('PROVISIONER_OK_STATE')
 def containerservice_client(self):
     self.log('Getting container service client')
     if not self._containerservice_client:
         self._containerservice_client = ContainerServiceClient(
             self.azure_credentials, self.subscription_id)
         self._register('Microsoft.ContainerService')
     return self._containerservice_client
Exemplo n.º 3
0
    def start(self):
        credentials, subscription_id = self._get_credentials()

        # Cluster name
        cluster_name = self.config.get("cluster", None)
        if _is_none_or_blank(cluster_name):
            cluster_name = self.cluster_name
            logging.info("Using same cluster name as DSS: {}".format(cluster_name))

        # Resource group
        resource_group = self.config.get('resourceGroup', None)
        if _is_none_or_blank(resource_group):
            metadata = get_instance_metadata()
            resource_group = metadata["compute"]["resourceGroupName"]
            logging.info("Using same resource group as DSS: {}".format(resource_group))

        clusters_client = ContainerServiceClient(credentials, subscription_id)

        # Get kubeconfig 
        logging.info("Fetching kubeconfig for cluster %s in %s", cluster_name, resource_group)
        def do_fetch():
            return clusters_client.managed_clusters.list_cluster_admin_credentials(resource_group, cluster_name)
        get_credentials_result = run_and_process_cloud_error(do_fetch)
        kube_config_content = get_credentials_result.kubeconfigs[0].value.decode('utf8')
        kube_config_path = os.path.join(os.getcwd(), 'kube_config')
        with open(kube_config_path, 'w') as f:
            f.write(kube_config_content)
        overrides = make_overrides(self.config, yaml.safe_load(kube_config_content), kube_config_path)
        
        # Get other cluster infos
        def do_inspect():
            return clusters_client.managed_clusters.get(resource_group, cluster_name)
        get_cluster_result = run_and_process_cloud_error(do_inspect)

        return [overrides, {'kube_config_path':kube_config_path, 'cluster':get_cluster_result.as_dict()}]
Exemplo n.º 4
0
 def __init__(self,
              azure_config,
              logger,
              api_version=constants.API_VER_MANAGED_CLUSTER):
     super(ManagedCluster, self).__init__(azure_config)
     self.logger = logger
     self.client = ContainerServiceClient(self.credentials,
                                          self.subscription_id)
 def __init__(self,
              azure_config,
              logger,
              api_version=constants.API_VER_CONTAINER):
     super(ContainerService, self).__init__(azure_config)
     self.logger = logger
     self.client = ContainerServiceClient(self.credentials,
                                          self.subscription_id)
Exemplo n.º 6
0
 def __init__(self, config: Configuration):
     super().__init__(config)
     credentials = ServicePrincipalCredentials(config.az_client(),
                                               config.secret(),
                                               tenant=config.az_tenant())
     self.client = ContainerServiceClient(credentials,
                                          config.az_subscription())
     self.rg = config.az_resource_group()
     self.aks = config.aks()
Exemplo n.º 7
0
Arquivo: aks.py Projeto: epcim/kqueen
    def _get_client(self):
        """
        Initialize Azure client
        Construct service account credentials using the service account key file

        """
        credentials = ServicePrincipalCredentials(client_id=self.client_id, secret=self.secret, tenant=self.tenant)
        subscription_id = self.subscription_id
        client = ContainerServiceClient(credentials, subscription_id)

        return client
Exemplo n.º 8
0
def initiate_container_service_client():
    subscription_id = os.environ.get("ARM_SUBSCRIPTION_ID", None)

    # Python SDK needs different env var names to Terraform SDK
    for envname in ("TENANT_ID", "CLIENT_SECRET", "CLIENT_ID"):
        azure_name = f"AZURE_{envname}"
        if azure_name not in os.environ:
            os.environ[azure_name] = os.environ[f"ARM_{envname}"]

    return ContainerServiceClient(credential=EnvironmentCredential(),
                                  subscription_id=subscription_id)
Exemplo n.º 9
0
    def start(self):
        connection_info = self.config.get("connectionInfo", {})
        connection_info_secret = self.plugin_config.get("connectionInfo", {})
        subscription_id = connection_info.get('subscriptionId', None)
        if _is_none_or_blank(subscription_id):
            raise Exception('Subscription must be defined')

        credentials = get_credentials_from_connection_info(
            connection_info, connection_info_secret)
        clusters_client = ContainerServiceClient(credentials, subscription_id)

        resource_group_name = self.config.get('resourceGroup', None)
        if _is_none_or_blank(resource_group_name):
            raise Exception(
                "A resource group to put the cluster in is required")

        cluster_name = self.config.get('cluster', self.cluster_name)

        logging.info("Fetching kubeconfig for cluster %s in %s" %
                     (cluster_name, resource_group_name))

        def do_fetch():
            return clusters_client.managed_clusters.list_cluster_admin_credentials(
                resource_group_name, cluster_name)

        get_credentials_result = run_and_process_cloud_error(do_fetch)

        kube_config_content = get_credentials_result.kubeconfigs[
            0].value.decode('utf8')

        kube_config_path = os.path.join(os.getcwd(), 'kube_config')
        with open(kube_config_path, 'w') as f:
            f.write(kube_config_content)

        overrides = make_overrides(self.config,
                                   yaml.safe_load(kube_config_content),
                                   kube_config_path)

        def do_inspect():
            return clusters_client.managed_clusters.get(
                resource_group_name, cluster_name)

        get_cluster_result = run_and_process_cloud_error(do_inspect)

        return [
            overrides, {
                'kube_config_path': kube_config_path,
                'cluster': get_cluster_result.as_dict()
            }
        ]
    def __init__(self,
                 logger,
                 credentials,
                 group_name,
                 container_service_name,
                 container_params={}):
        self.group_name = group_name
        self.container_service_name = container_service_name
        self.logger = logger
        self.container_params = container_params
        self.resource_verify = bool(credentials.get('endpoint_verify', True))
        super(ContainerService, self).__init__(credentials)
        self.client = ContainerServiceClient(
            self.credentials, str(credentials['subscription_id']))

        self.logger.info("Use subscription: {}".format(
            credentials['subscription_id']))
Exemplo n.º 11
0
def get_cluster_from_connection_info(config, plugin_config):
    """
    Return a ContainerServiceClient after authenticating using the connection info.
    """
    
    connection_info = config.get("connectionInfo", {})
    connection_info_secret = plugin_config.get("connectionInfo", {})
    subscription_id = connection_info.get('subscriptionId', None)
    if _is_none_or_blank(subscription_id):
        raise Exception('Subscription must be defined')

    credentials = get_credentials_from_connection_info(connection_info, connection_info_secret)
    clusters_client = ContainerServiceClient(credentials, subscription_id)
            
    # credit this cluster to Dataiku
    # clusters_client.config.add_user_agent('pid-fd3813c7-273c-5eec-9221-77323f62a148')

    return clusters_client
Exemplo n.º 12
0
    def stop(self, data):
        connection_info = self.config.get("connectionInfo", {})
        connection_info_secret = self.plugin_config.get("connectionInfo", {})
        subscription_id = connection_info.get('subscriptionId', None)
        if _is_none_or_blank(subscription_id):
            raise Exception('Subscription must be defined')

        credentials = get_credentials_from_connection_info(
            connection_info, connection_info_secret)
        clusters_client = ContainerServiceClient(credentials, subscription_id)

        resource_group_name = self.config.get('resourceGroup', None)
        if _is_none_or_blank(resource_group_name):
            raise Exception(
                "A resource group to put the cluster in is required")

        logging.info("Fetching kubeconfig for cluster %s in %s" %
                     (self.cluster_name, resource_group_name))

        def do_delete():
            return clusters_client.managed_clusters.delete(
                resource_group_name, self.cluster_name)

        delete_result = run_and_process_cloud_error(do_delete)

        # delete returns void, so we poll until the cluster is really gone
        gone = False
        while not gone:
            time.sleep(5)
            try:
                cluster = clusters_client.managed_clusters.get(
                    resource_group_name, self.cluster_name)
                if cluster.provisioning_state.lower() != 'deleting':
                    logging.info(
                        "Cluster is not deleting anymore, must be deleted now (state = %s)"
                        % cluster.provisioning_state)
            except Exception as e:
                logging.info("Could not get cluster, should be gone (%s)" %
                             str(e))
                gone = True
Exemplo n.º 13
0
def get_cluster_from_connection_info(config, plugin_config):
    """
    Return a ContainerServiceClient after authenticating using the connection info.
    """
    connection_info = config.get("connectionInfo", None)
    connection_info_secret = plugin_config.get("connectionInfo", None)
    if not _is_none_or_blank(connection_info) or not _is_none_or_blank(
            connection_info_secret):
        logging.warn(
            "Using legacy authentication fields. Clear them to use the new ones."
        )
        credentials = get_credentials_from_connection_info(
            connection_info, connection_info_secret)
        subscription_id = connection_info.get('subscriptionId', None)
    else:
        connection_info_v2 = config.get("connectionInfoV2",
                                        {"identityType": "default"})
        credentials, _ = get_credentials_from_connection_infoV2(
            connection_info_v2)
        subscription_id = get_subscription_id(connection_info_v2)
    clusters_client = ContainerServiceClient(credentials, subscription_id)
    return clusters_client
Exemplo n.º 14
0
 def __init__(self) -> None:
     try:
         assert self.AZURE_SUBSCRIPTION_ID is not None
         assert self.AZURE_TENANT_ID is not None
         assert self.AZURE_CLIENT_ID is not None
         assert self.AZURE_CLIENT_SECRET is not None
         credential = ClientSecretCredential(
             tenant_id=self.AZURE_TENANT_ID,
             client_id=self.AZURE_CLIENT_ID,
             client_secret=self.AZURE_CLIENT_SECRET,
         )
         container_client = ContainerServiceClient(
             credential=credential,
             subscription_id=self.AZURE_SUBSCRIPTION_ID,
         )
         self.container_client = container_client
     except Exception as e:
         logger.error(
             "Failed to create ContainerServiceClient for the api server. "
             "Make sure you've set the AZURE_SUBSCRIPTION_ID AZURE_TENANT_ID "
             "AZURE_CLIENT_ID AZURE_CLIENT_SECRET environment variables.\n"
             f"Full exception:\n{e}"
         )
Exemplo n.º 15
0
def azure(project):

    credentials = ServicePrincipalCredentials(
        client_id='df13d748-8207-4bde-b2a7-f8e19fc13d7b',
        secret='24dee47b-a64a-45df-aae2-fba6da19a162',
        tenant='9ce5569a-4466-451f-b8c2-8c22b5ce353a')
    subscription_id = 'b0f2aac6-ea25-4d2d-89ec-3c1156f933a5'

    resource_client = ResourceManagementClient(credentials, subscription_id)
    container_client = ContainerServiceClient(credentials, subscription_id)
    resource_group_name = 'test-cluster'

    output = resource_client.resource_groups.create_or_update(
        resource_group_name, {'location': 'eastus'})

    print(LOCATION)

    #create_cluster = deploy_azure_k8s(container_client, GROUP_NAME, LOCATION)

    clusters = container_client.managed_clusters.list()

    print(clusters.__dict__)

    for c in clusters:
        #        pprint(c)
        #        pprint(c.__dict__)
        #        pprint(c.properties.__dict__)
        #        pprint(c.properties.service_principal_profile.__dict__)
        #        print("ACCESS_PROFILE")
        #        pprint(c.properties.access_profiles.__dict__)
        #        pprint(c.properties.access_profiles.cluster_admin.__dict__)
        print("KUBECONFIG")
        #        import base64
        #        import pdb;pdb.set_trace()
        #        access_profiles = c.properties.access_profiles.as_dict()
        #        access_profile = access_profiles.get('cluster_admin')
        #        encoded_kubeconfig = access_profile.get("kube_config")
        #        print(base64.b64decode(encoded_kubeconfig).decode(encoding='UTF-8'))
        print("LINUX PROFILE")
        pprint(c.properties.linux_profile.__dict__)
        pprint(c.properties.linux_profile.ssh.__dict__)
        print("PUBLIC ssh")
        pprint(type(c.properties.linux_profile.ssh.public_keys))
        for l in c.properties.linux_profile.ssh.public_keys:
            print(l)
        print("AGENT POOL")


#        for a in c.properties.agent_pool_profiles:
#            print(a)

    c = container_client.managed_clusters.get(resource_group_name,
                                              "test-k8s-cluster")
    access_profiles = c.properties.access_profiles.as_dict()
    state = properties.get('provisioning_state')
    print(state)
    access_profile = access_profiles.get('cluster_admin')
    encoded_kubeconfig = access_profile.get("kube_config")
    print("KUBECONFIG")
    kubeconfig = base64.b64decode(encoded_kubeconfig).decode(encoding='UTF-8')
    print(kubeconfig)
Exemplo n.º 16
0
def get_container_client():
    subscription_id = handler.serviceaccount
    credentials = get_credentials()
    container_client = ContainerServiceClient(credentials, subscription_id)
    return container_client
Exemplo n.º 17
0
    def stop(self, data):
        credentials, _, _ = self._get_credentials()

        # Do NOT use the conf but the actual values from the cluster here
        cluster_resource_id = data["cluster"]["id"]
        _, _, subscription_id, _, resource_group, _, _, _, cluster_name = cluster_resource_id.split(
            "/")
        clusters_client = ContainerServiceClient(credentials, subscription_id)

        # Try to detach from ACR if required. It is not mandatory but if not done, it would pollute
        # the ACR with multiple invalid role attachments and consume attachment quotas
        node_resource_group = data["cluster"]["node_resource_group"]
        acr_attachment = data.get("acr_attachment", None)
        if not _is_none_or_blank(acr_attachment):
            logging.info(
                "Cluster has an ACR attachment, check managed identity")
            cluster_identity_profile = data["cluster"]["identity_profile"]
            kubelet_mi_resource_id = cluster_identity_profile[
                "kubeletidentity"].get("resource_id", None)
            if kubelet_mi_resource_id is not None:
                _, _, mi_subscription_id, _, mi_resource_group, _, _, _, mi_name = kubelet_mi_resource_id.split(
                    "/")
                if mi_resource_group == node_resource_group:
                    logging.info(
                        "Cluster has an AKS managed kubelet identity, try to detach"
                    )
                    authorization_client = AuthorizationManagementClient(
                        credentials, acr_attachment["subscription_id"])
                    try:
                        authorization_client.role_assignments.delete_by_id(
                            acr_attachment["role_assignment"]["id"])
                    except ResourceNotFoundError as e:
                        logging.warn(
                            "It looks that the ACR role assignment doesnt exist. Ignore this step"
                        )

        # Detach Vnet like ACR
        vnet_attachment = data.get("vnet_attachment", None)
        if not _is_none_or_blank(vnet_attachment):
            logging.info(
                "Cluster has an Vnet attachment, check managed identity")
            if "role_assignment" in vnet_attachment:
                logging.info(
                    "Cluster has an AKS managed kubelet identity, try to detach"
                )
                authorization_client = AuthorizationManagementClient(
                    credentials, vnet_attachment["subscription_id"])
                try:
                    authorization_client.role_assignments.delete_by_id(
                        vnet_attachment["role_assignment"]["id"])
                except ResourceNotFoundError as e:
                    logging.warn(
                        "It looks that the Vnet role assignment doesnt exist. Ignore this step"
                    )

        def do_delete():
            future = clusters_client.managed_clusters.begin_delete(
                resource_group, cluster_name)
            return future.result()

        delete_result = run_and_process_cloud_error(do_delete)

        # delete returns void, so we poll until the cluster is really gone
        gone = False
        while not gone:
            time.sleep(5)
            try:
                cluster = clusters_client.managed_clusters.get(
                    resource_group, cluster_name)
                if cluster.provisioning_state.lower() != 'deleting':
                    logging.info(
                        "Cluster is not deleting anymore, must be deleted now (state = %s)"
                        % cluster.provisioning_state)
            # other exceptions should not be ignored
            except ResourceNotFoundError as e:
                logging.info(
                    "Cluster doesn't seem to exist anymore, considering it deleted"
                )
                gone = True
Exemplo n.º 18
0
    def start(self):
        """
        Build the create cluster request.
        """
        credentials, subscription_id, managed_identity_id = self._get_credentials(
        )

        # Fetch metadata about the instance
        metadata = get_instance_metadata()

        # Resource group
        resource_group = self.config.get('resourceGroup', None)
        dss_host_resource_group = metadata["compute"]["resourceGroupName"]
        if _is_none_or_blank(resource_group):
            resource_group = dss_host_resource_group
            logging.info(
                "Using same resource group as DSS: {}".format(resource_group))

        # Location
        location = self.config.get('location', None)
        if _is_none_or_blank(location):
            location = metadata["compute"]["location"]
            logging.info("Using same location as DSS: {}".format(location))

        # Consistency checks
        if _is_none_or_blank(resource_group):
            raise Exception(
                "A resource group to put the cluster in is required")
        if _is_none_or_blank(location):
            raise Exception("A location to put the cluster in is required")

        # AKS Client
        clusters_client = None

        # Credit the cluster to DATAIKU
        if os.environ.get("DISABLE_AZURE_USAGE_ATTRIBUTION", "0") == "1":
            logging.info("Azure usage attribution is disabled")
            clusters_client = ContainerServiceClient(credentials,
                                                     subscription_id)
        else:
            policy = UserAgentPolicy()
            policy.add_user_agent('pid-fd3813c7-273c-5eec-9221-77323f62a148')
            clusters_client = ContainerServiceClient(credentials,
                                                     subscription_id,
                                                     user_agent_policy=policy)

        # check that the cluster doesn't exist yet, otherwise azure will try to update it
        # and will almost always fail
        try:
            existing = clusters_client.managed_clusters.get(
                resource_group, self.cluster_name)
            if existing is not None:
                raise Exception(
                    "A cluster with name %s in resource group %s already exists"
                    % (self.cluster_name, resource_group))
        except CloudError as e:
            logging.info("Cluster doesn't seem to exist yet")
        except ResourceNotFoundError as e:
            logging.info("Cluster doesn't seem to exist yet")

        cluster_builder = ClusterBuilder(clusters_client)
        cluster_builder.with_name(self.cluster_name)
        cluster_builder.with_dns_prefix("{}-dns".format(self.cluster_name))
        cluster_builder.with_resource_group(resource_group)
        cluster_builder.with_location(location)
        cluster_builder.add_tags(self.config.get("tags", None))
        cluster_builder.with_linux_profile()  # default is None
        cluster_builder.with_network_profile(
            service_cidr=self.config.get("serviceCIDR", None),
            dns_service_ip=self.config.get("dnsServiceIP", None),
            load_balancer_sku=self.config.get("loadBalancerSku", None),
            outbound_type=self.config.get("outboundType", None),
            network_plugin=self.config.get("networkPlugin"),
            docker_bridge_cidr=self.config.get("dockerBridgeCidr"))

        if self.config.get("useCustomNodeResourceGroup", False):
            cluster_builder.with_node_resource_group(
                self.config.get("nodeResourceGroup"))

        # Cluster identity
        connection_info = self.config.get("connectionInfo", None)
        cluster_idendity_legacy_use_distinct_sp = self.config.get(
            "useDistinctSPForCluster", False)
        cluster_idendity_legacy_sp = self.config.get("clusterServicePrincipal",
                                                     None)
        cluster_identity_type = None
        cluster_identity = None
        if not _is_none_or_blank(
                connection_info) or cluster_idendity_legacy_use_distinct_sp:
            logging.warn(
                "Using legacy options to configure cluster identity. Clear them to use the new ones."
            )
            if not cluster_idendity_legacy_use_distinct_sp and not _is_none_or_blank(
                    connection_info):
                cluster_sp = connection_info
            elif cluster_idendity_legacy_use_distinct_sp and not _is_none_or_blank(
                    cluster_idendity_legacy_sp):
                cluster_sp = self.config.get("clusterServicePrincipal")
            else:
                raise Exception(
                    "Legacy options are not complete enough to determine cluster identity settings"
                )
            cluster_builder.with_cluster_sp_legacy(
                cluster_service_principal_connection_info=cluster_sp)
        else:
            cluster_identity = self.config.get(
                "clusterIdentity", {"identityType": "managed-identity"})
            cluster_identity_type = cluster_identity.get(
                "identityType", "managed-identity")
            if cluster_identity_type == "managed-identity":
                if cluster_identity.get("inheritDSSIdentity", True):
                    logging.info(
                        "Need to inspect Managed Identity infos from Azure")
                    if metadata is None:
                        metadata = get_instance_metadata()
                    vm_resource_group = metadata["compute"][
                        "resourceGroupName"]
                    vm_name = metadata["compute"]["name"]
                    compute_client = ComputeManagementClient(
                        credentials, subscription_id)
                    vm = compute_client.virtual_machines.get(
                        vm_resource_group, vm_name)
                    # No choice here but to use the first one
                    if managed_identity_id is None:
                        managed_identity_id = next(
                            iter(vm.identity.user_assigned_identities.keys()))
                    for managed_identity_resource_id, managed_identity_properties in vm.identity.user_assigned_identities.items(
                    ):
                        if managed_identity_id == managed_identity_resource_id or managed_identity_id == managed_identity_properties.client_id:
                            break
                    logging.info("Found managed identity id {}".format(
                        managed_identity_resource_id))
                    cluster_builder.with_managed_identity(
                        managed_identity_resource_id)
                    cluster_builder.with_kubelet_identity(
                        managed_identity_resource_id,
                        managed_identity_properties.client_id,
                        managed_identity_properties.principal_id)
                else:
                    control_plane_mi = None if cluster_identity.get(
                        "useAKSManagedIdentity", True
                    ) else cluster_identity["controlPlaneUserAssignedIdentity"]
                    cluster_builder.with_managed_identity(control_plane_mi)
                    if control_plane_mi is None:
                        logging.info(
                            "Configure cluster with system managed identity.")
                    else:
                        logging.info(
                            "Configure cluster with user assigned identity: {}"
                            .format(control_plane_mi))
                    if not cluster_identity.get("useAKSManagedKubeletIdentity",
                                                True):
                        kubelet_mi = cluster_identity[
                            "kubeletUserAssignedIdentity"]
                        _, _, mi_subscription_id, _, mi_resource_group, _, _, _, mi_name = kubelet_mi.split(
                            "/")
                        msiclient = ManagedServiceIdentityClient(
                            AzureIdentityCredentialAdapter(credentials),
                            mi_subscription_id)
                        mi = msiclient.user_assigned_identities.get(
                            mi_resource_group, mi_name)
                        cluster_builder.with_kubelet_identity(
                            kubelet_mi, mi.client_id, mi.principal_id)
                        logging.info(
                            "Configure kubelet identity with user assigned identity resourceId=\"{}\", clientId=\"{}\", objectId=\"{}\""
                            .format(kubelet_mi, mi.client_id, mi.principal_id))
            elif cluster_identity_type == "service-principal":
                cluster_builder.with_cluster_sp(cluster_identity["clientId"],
                                                cluster_identity["password"])
                logging.info("Configure cluster with service principal")
            else:
                raise Exception(
                    "Cluster identity type \"{}\" is unknown".format(
                        cluster_identity_type))

        # Fail fast for non existing ACRs to avoid drama in case of failure AFTER cluster is created
        acr_role_id = None
        authorization_client = None
        if cluster_identity_type is not None and cluster_identity is not None:
            if cluster_identity_type == "managed-identity" and cluster_identity.get(
                    "useAKSManagedKubeletIdentity",
                    True) and not cluster_identity.get("inheritDSSIdentity",
                                                       True):
                acr_name = cluster_identity.get("attachToACRName", None)
                if not _is_none_or_blank(acr_name):
                    # build acr scope
                    acr_identifier_splitted = acr_name.split('/')
                    acr_subscription_id = subscription_id
                    acr_resource_group = resource_group
                    if 9 == len(acr_identifier_splitted):
                        _, _, acr_subscription_id, _, acr_resource_group, _, _, _, acr_name = acr_identifier_splitted
                    elif 2 == len(acr_identifier_splitted):
                        acr_resource_group, acr_name = acr_identifier_splitted

                    authorization_client = AuthorizationManagementClient(
                        credentials, acr_subscription_id)
                    acr_scope = "/subscriptions/{acr_subscription_id}/resourceGroups/{acr_resource_group}/providers/Microsoft.ContainerRegistry/registries/{acr_name}".format(
                        **locals())
                    try:
                        acr_roles = list(
                            authorization_client.role_definitions.list(
                                acr_scope, "roleName eq 'AcrPull'"))
                    except ResourceNotFoundError as e:
                        raise Exception(
                            "ACR {} not found. Check it exists and you are Owner of it."
                            .format(acr_scope))
                    if 0 == len(acr_roles):
                        raise Exception(
                            "Could not find the AcrPull role on the ACR {}. Check you are Owner of it."
                            .format(acr_scope))
                    else:
                        acr_role_id = acr_roles[0].id
                        logging.info("ACR pull role id: %s", acr_role_id)

                    # Try to run a fake role assignment. Depending on the failure type we know if we are Owner or not
                    try:
                        fake_role_assignment = authorization_client.role_assignments.create(
                            scope=acr_scope,
                            role_assignment_name=str(uuid.uuid4()),
                            parameters={
                                "properties": {
                                    "role_definition_id":
                                    acr_role_id,
                                    "principal_id":
                                    "00000000-0000-0000-0000-000000000000",
                                },
                            },
                        )
                    except HttpResponseError as e:
                        if e.reason == "Forbidden" and "AuthorizationFailed" in str(
                                e.error):
                            raise Exception(
                                "Cannot create role assignments on ACR {}. Check that your are Owner of it or provide an existing Kubelet identity."
                                .format(acr_scope))
                        elif e.reason == "Bad Request" and "PrincipalNotFound" in str(
                                e.error):
                            logging.info(
                                "Fake role assignment on ACR looks ok. Identity should be allowed to assign roles in further steps."
                            )
                        else:
                            raise (e)
                    except Exception as e:
                        raise (e)

        # Sanity check for node pools
        node_pool_vnets = set()
        for idx, node_pool_conf in enumerate(self.config.get("nodePools", [])):
            node_pool_builder = cluster_builder.get_node_pool_builder()
            nodepool_vnet = node_pool_conf.get("vnet", None)
            nodepool_subnet = node_pool_conf.get("subnet", None)
            vnet, _ = node_pool_builder.resolve_network(
                inherit_from_host=node_pool_conf.get(
                    "useSameNetworkAsDSSHost"),
                cluster_vnet=nodepool_vnet,
                cluster_subnet=nodepool_subnet,
                connection_info=connection_info,
                credentials=credentials,
                resource_group=resource_group,
                dss_host_resource_group=dss_host_resource_group)
            node_pool_vnets.add(vnet)

        if 1 < len(node_pool_vnets):
            raise Exception(
                "Node pools must all share the same vnet. Current node pools configuration yields vnets {}."
                .format(",".join(node_pool_vnets)))
        elif 0 == len(node_pool_vnets):
            raise Exception(
                "You cannot deploy a cluster without any node pool.")

        # Check role assignments for vnet like on ACR for fail fast if not doable
        vnet_id = node_pool_vnets.pop()
        if not vnet_id.startswith("/"):
            vnet_name = vnet_id
            vnet_id = "/subscriptions/{subscription_id}/resourceGroups/{resource_group}/providers/Microsoft.Network/virtualNetworks/{vnet_name}".format(
                **locals())
        vnet_role_id = None
        if cluster_identity_type is not None and cluster_identity is not None:
            if cluster_identity_type == "managed-identity" and cluster_identity.get(
                    "useAKSManagedIdentity",
                    True) and not cluster_identity.get("inheritDSSIdentity",
                                                       True):
                authorization_client = AuthorizationManagementClient(
                    credentials, subscription_id)
                try:
                    vnet_roles = list(
                        authorization_client.role_definitions.list(
                            vnet_id, "roleName eq 'Contributor'"))
                except ResourceNotFoundError as e:
                    raise Exception(
                        "Vnet {} not found. Check it exists and you are Owner of it."
                        .format(vnet_id))
                if 0 == len(acr_roles):
                    raise Exception(
                        "Could not find the Contributor role on the vnet {}. Check you are Owner of it."
                        .format(vnet_id))
                else:
                    vnet_role_id = vnet_roles[0].id
                    logging.info("Vnet contributor role id: %s", acr_role_id)
                    # Try to run a fake role assignment. Depending on the failure type we know if we are Owner or not
                    try:
                        fake_role_assignment = authorization_client.role_assignments.create(
                            scope=vnet_id,
                            role_assignment_name=str(uuid.uuid4()),
                            parameters={
                                "properties": {
                                    "role_definition_id":
                                    vnet_role_id,
                                    "principal_id":
                                    "00000000-0000-0000-0000-000000000000",
                                },
                            },
                        )
                    except HttpResponseError as e:
                        if e.reason == "Forbidden" and "AuthorizationFailed" in str(
                                e.error):
                            raise Exception(
                                "Cannot create role assignments on Vnet {}. Check that your are Owner of it or provide an existing Controle Plane identity."
                                .format(vnet_id))
                        elif e.reason == "Bad Request" and "PrincipalNotFound" in str(
                                e.error):
                            logging.info(
                                "Fake role assignment on Vnet looks ok. Identity should be allowed to assign roles in further steps."
                            )
                        else:
                            raise (e)
                    except Exception as e:
                        raise (e)

        # Access level
        if self.config.get("privateAccess"):
            cluster_builder.with_private_access(
                self.config.get("privateAccess"))

        cluster_builder.with_cluster_version(
            self.config.get("clusterVersion", None))

        # Node pools
        for idx, node_pool_conf in enumerate(self.config.get("nodePools", [])):
            node_pool_builder = cluster_builder.get_node_pool_builder()
            node_pool_builder.with_idx(idx)
            node_pool_builder.with_vm_size(node_pool_conf.get("vmSize", None))
            vnet = node_pool_conf.get("vnet", None)
            subnet = node_pool_conf.get("subnet", None)
            node_pool_builder.with_network(
                inherit_from_host=node_pool_conf.get(
                    "useSameNetworkAsDSSHost"),
                cluster_vnet=vnet,
                cluster_subnet=subnet,
                connection_info=connection_info,
                credentials=credentials,
                resource_group=resource_group,
                dss_host_resource_group=dss_host_resource_group)

            node_pool_builder.with_availability_zones(
                use_availability_zones=node_pool_conf.get(
                    "useAvailabilityZones", True))

            node_pool_builder.with_node_count(
                enable_autoscaling=node_pool_conf.get("autoScaling", False),
                num_nodes=node_pool_conf.get("numNodes", None),
                min_num_nodes=node_pool_conf.get("minNumNodes", None),
                max_num_nodes=node_pool_conf.get("maxNumNodes", None))

            node_pool_builder.with_mode(
                mode=node_pool_conf.get("mode", "Automatic"),
                system_pods_only=node_pool_conf.get("systemPodsOnly", True))

            node_pool_builder.with_disk_size_gb(
                disk_size_gb=node_pool_conf.get("osDiskSizeGb", 0))
            node_pool_builder.with_node_labels(
                node_pool_conf.get("labels", None))
            node_pool_builder.with_node_taints(
                node_pool_conf.get("taints", None))
            node_pool_builder.add_tags(self.config.get("tags", None))
            node_pool_builder.add_tags(node_pool_conf.get("tags", None))
            node_pool_builder.build()
            cluster_builder.with_node_pool(
                node_pool=node_pool_builder.agent_pool_profile)

        # Run creation
        logging.info("Start creation of cluster")

        def do_creation():
            cluster_create_op = cluster_builder.build()
            return cluster_create_op.result()

        create_result = run_and_process_cloud_error(do_creation)
        logging.info("Cluster creation finished")

        # Attach to ACR
        acr_attachment = {}
        if cluster_identity_type is not None and cluster_identity is not None:
            if cluster_identity_type == "managed-identity" and cluster_identity.get(
                    "useAKSManagedKubeletIdentity",
                    True) and not cluster_identity.get("inheritDSSIdentity",
                                                       True):
                kubelet_mi_object_id = create_result.identity_profile.get(
                    "kubeletidentity").object_id
                logging.info("Kubelet Managed Identity object id: %s",
                             kubelet_mi_object_id)
                if not _is_none_or_blank(acr_role_id):
                    logging.info("Assign ACR pull role id %s to %s",
                                 acr_role_id, kubelet_mi_object_id)
                    role_assignment = authorization_client.role_assignments.create(
                        scope=acr_scope,
                        role_assignment_name=str(uuid.uuid4()),
                        parameters={
                            "properties": {
                                "role_definition_id": acr_role_id,
                                "principal_id": kubelet_mi_object_id,
                            },
                        },
                    )
                    acr_attachment.update({
                        "name":
                        acr_name,
                        "resource_group":
                        acr_resource_group,
                        "subscription_id":
                        acr_subscription_id,
                        "resource_id":
                        acr_scope,
                        "role_assignment":
                        role_assignment.as_dict(),
                    })

        # Attach to VNET to allow LoadBalancers creation
        vnet_attachment = {}
        if cluster_identity_type is not None and cluster_identity is not None:
            if cluster_identity_type == "managed-identity" and cluster_identity.get(
                    "useAKSManagedIdentity",
                    True) and not cluster_identity.get("inheritDSSIdentity",
                                                       True):
                # And here we are blocked because we cant get the principal id of a System Assigned Managed Id easily
                control_plane_object_id = create_result.identity.principal_id
                logging.info("Controle Plane Managed Identity object id: %s",
                             control_plane_object_id)
                if not _is_none_or_blank(vnet_role_id):
                    logging.info("Assign Vnet contributolr role id %s to %s",
                                 vnet_role_id, control_plane_object_id)
                    vnet_role_assignment = authorization_client.role_assignments.create(
                        scope=vnet_id,
                        role_assignment_name=str(uuid.uuid4()),
                        parameters={
                            "properties": {
                                "role_definition_id": vnet_role_id,
                                "principal_id": control_plane_object_id,
                            },
                        },
                    )
                    vnet_attachment.update({
                        "subscription_id":
                        subscription_id,
                        "resource_id":
                        vnet_id,
                        "role_assignment":
                        vnet_role_assignment.as_dict(),
                    })

        logging.info("Fetching kubeconfig for cluster {} in {}...".format(
            self.cluster_name, resource_group))

        def do_fetch():
            return clusters_client.managed_clusters.list_cluster_admin_credentials(
                resource_group, self.cluster_name)

        get_credentials_result = run_and_process_cloud_error(do_fetch)
        kube_config_content = get_credentials_result.kubeconfigs[
            0].value.decode("utf8")
        logging.info("Writing kubeconfig file...")
        kube_config_path = os.path.join(os.getcwd(), "kube_config")
        with open(kube_config_path, 'w') as f:
            f.write(kube_config_content)

        overrides = make_overrides(
            self.config,
            yaml.safe_load(kube_config_content),
            kube_config_path,
            acr_name=None
            if _is_none_or_blank(acr_attachment) else acr_attachment["name"],
        )

        return [
            overrides, {
                "kube_config_path": kube_config_path,
                "cluster": create_result.as_dict(),
                "acr_attachment": acr_attachment,
                "vnet_attachment": vnet_attachment
            }
        ]
Exemplo n.º 19
0
    def start(self):
        """
        Build the create cluster request.
        """

        connection_info = self.config.get("connectionInfo", {})
        connection_info_secret = self.plugin_config.get("connectionInfo", {})
        credentials = get_credentials_from_connection_info(
            connection_info, connection_info_secret)
        subscription_id = connection_info.get('subscriptionId', None)
        resource_group = self.config.get('resourceGroup', None)

        clusters_client = ContainerServiceClient(credentials, subscription_id)

        # Credit the cluster to DATAIKU
        if os.environ.get("DISABLE_AZURE_USAGE_ATTRIBUTION", "0") == "1":
            logging.info("Azure usage attribution is disabled")
        else:
            clusters_client.config.add_user_agent(
                'pid-fd3813c7-273c-5eec-9221-77323f62a148')

        resource_group_name = self.config.get('resourceGroup', None)
        # TODO: Auto detection
        #if _is_none_or_blank(resource_group_name):
        #    resource_group_name = vm_infos.get('resource_group_name', None)
        if _is_none_or_blank(resource_group_name):
            raise Exception(
                "A resource group to put the cluster in is required")

        location = self.config.get('location', None)
        # TODO: Auto detection
        #if _is_none_or_blank(location):
        #    location = vm_infos.get('location', None)
        if _is_none_or_blank(location):
            raise Exception("A location to put the cluster in is required")

        # check that the cluster doesn't exist yet, otherwise azure will try to update it
        # and will almost always fail
        try:
            existing = clusters_client.managed_clusters.get(
                resource_group_name, self.cluster_name)
            if existing is not None:
                raise Exception(
                    "A cluster with name %s in resource group %s already exists"
                    % (self.cluster_name, resource_group_name))
        except CloudError as e:
            logging.info("Cluster doesn't seem to exist yet")

        cluster_builder = ClusterBuilder(clusters_client)
        cluster_builder.with_name(self.cluster_name)
        cluster_builder.with_dns_prefix("{}-dns".format(self.cluster_name))
        cluster_builder.with_resource_group(resource_group)
        cluster_builder.with_location(self.config.get("location", None))
        cluster_builder.with_linux_profile()  # default is None
        cluster_builder.with_network_profile(
            service_cidr=self.config.get("serviceCIDR", None),
            dns_service_ip=self.config.get("dnsServiceIP", None),
            load_balancer_sku=self.config.get("loadBalancerSku", None))

        if self.config.get("useDistinctSPForCluster", False):
            cluster_sp = self.config.get("clusterServicePrincipal")
        else:
            cluster_sp = connection_info
        cluster_builder.with_cluster_sp(
            cluster_service_principal_connection_info=cluster_sp)

        cluster_builder.with_cluster_version(
            self.config.get("clusterVersion", None))

        for idx, node_pool_conf in enumerate(self.config.get("nodePools", [])):
            node_pool_builder = cluster_builder.get_node_pool_builder()
            node_pool_builder.with_idx(idx)
            node_pool_builder.with_vm_size(node_pool_conf.get("vmSize", None))
            vnet = node_pool_conf.get("vnet", None)
            subnet = node_pool_conf.get("subnet", None)
            node_pool_builder.with_network(
                inherit_from_host=node_pool_conf.get(
                    "useSameNetworkAsDSSHost"),
                cluster_vnet=vnet,
                cluster_subnet=subnet,
                connection_info=connection_info,
                credentials=credentials,
                resource_group=resource_group)

            node_pool_builder.with_node_count(
                enable_autoscaling=node_pool_conf.get("autoScaling", False),
                num_nodes=node_pool_conf.get("numNodes", None),
                min_num_nodes=node_pool_conf.get("minNumNodes", None),
                max_num_nodes=node_pool_conf.get("maxNumNodes", None))

            node_pool_builder.with_disk_size_gb(
                disk_size_gb=node_pool_conf.get("osDiskSizeGb", 0))
            node_pool_builder.build()
            cluster_builder.with_node_pool(
                node_pool=node_pool_builder.agent_pool_profile)

        def do_creation():
            cluster_create_op = cluster_builder.build()
            return cluster_create_op.result()

        create_result = run_and_process_cloud_error(do_creation)

        logging.info("Fetching kubeconfig for cluster {} in {}...".format(
            self.cluster_name, resource_group))

        def do_fetch():
            return clusters_client.managed_clusters.list_cluster_admin_credentials(
                resource_group, self.cluster_name)

        get_credentials_result = run_and_process_cloud_error(do_fetch)
        kube_config_content = get_credentials_result.kubeconfigs[
            0].value.decode("utf8")
        logging.info("Writing kubeconfig file...")
        kube_config_path = os.path.join(os.getcwd(), "kube_config")
        with open(kube_config_path, 'w') as f:
            f.write(kube_config_content)

        overrides = make_overrides(self.config,
                                   yaml.safe_load(kube_config_content),
                                   kube_config_path)

        return [
            overrides, {
                "kube_config_path": kube_config_path,
                "cluster": create_result.as_dict()
            }
        ]
Exemplo n.º 20
0
def main():
    SUBSCRIPTION_ID = os.environ.get("SUBSCRIPTION_ID", None)
    CLIENT_ID = os.environ.get("CLIENT_ID", None)
    CLIENT_SECRET = os.environ.get("CLIENT_SECRET", None)
    GROUP_NAME = "testgroupx"
    MANAGED_CLUSTERS = "managed_clustersxxyyzz"
    AZURE_LOCATION = "eastus"

    # Create client
    # # For other authentication approaches, please see: https://pypi.org/project/azure-identity/
    resource_client = ResourceManagementClient(
        credential=DefaultAzureCredential(), subscription_id=SUBSCRIPTION_ID)
    containerservice_client = ContainerServiceClient(
        credential=DefaultAzureCredential(), subscription_id=SUBSCRIPTION_ID)
    # - init depended client -
    # - end -

    # Create resource group
    resource_client.resource_groups.create_or_update(
        GROUP_NAME, {"location": AZURE_LOCATION})

    # - init depended resources -
    # - end -

    # Create managed clusters
    managed_clusters = containerservice_client.managed_clusters.begin_create_or_update(
        GROUP_NAME, MANAGED_CLUSTERS, {
            "dns_prefix":
            "akspythonsdk",
            "agent_pool_profiles": [{
                "name": "aksagent",
                "count": 1,
                "vm_size": "Standard_DS2_v2",
                "max_pods": 110,
                "min_count": 1,
                "max_count": 100,
                "os_type": "Linux",
                "type": "VirtualMachineScaleSets",
                "enable_auto_scaling": True,
                "mode": "System",
            }],
            "service_principal_profile": {
                "client_id": CLIENT_ID,
                "secret": CLIENT_SECRET
            },
            "location":
            AZURE_LOCATION
        }).result()
    print("Create managed clusters:\n{}".format(managed_clusters))

    # Get managed clusters
    managed_clusters = containerservice_client.managed_clusters.get(
        GROUP_NAME, MANAGED_CLUSTERS)
    print("Get managed clusters:\n{}".format(managed_clusters))

    # Update managed clusters
    managed_clusters = containerservice_client.managed_clusters.begin_update_tags(
        GROUP_NAME, MANAGED_CLUSTERS, {
            "tags": {
                "tier": "testing",
                "archv3": ""
            }
        }).result()
    print("Update managed clusters:\n{}".format(managed_clusters))

    # Delete managed clusters
    managed_clusters = containerservice_client.managed_clusters.begin_delete(
        GROUP_NAME, MANAGED_CLUSTERS).result()
    print("Delete managed clusters.\n")

    # Delete Group
    resource_client.resource_groups.begin_delete(GROUP_NAME).result()
Exemplo n.º 21
0
def aksauth_connect(cmd, resource_group, cluster_name, tenant, username, password):

    subscription = get_subscription_id(cmd.cli_ctx)
    
    authority_url = ('https://login.microsoftonline.com/' + tenant)
    context = adal.AuthenticationContext(
        authority_url, api_version=1.0,
        )

    LOGGER.info("Authenticating to AAD using ARM Resource and Default Client ID")

    #Create credentials object from our adal username and password flow. 
    credentials = AdalAuthentication(
        context.acquire_token_with_username_password,
        'https://management.azure.com/',
        username,
        password,
        '04b07795-8ddb-461a-bbee-02f9e1bf7b46'
    )

    LOGGER.info("Getting Kubeconfig Skeleton")
    #Get the skeleton kubeconfig
    client = ContainerServiceClient(credentials, subscription)

    credentialResults = client.managed_clusters.list_cluster_user_credentials(resource_group, cluster_name)

    LOGGER.info("Write Kubeconfig Skeleton to temp file")
    #Write skeleton kubeconfig to temp file
    fd, temp_path = tempfile.mkstemp()
    additional_file = os.fdopen(fd, 'w+t')
    try:
        additional_file.write(credentialResults.kubeconfigs[0].value.decode(encoding='UTF-8'))
        additional_file.flush()
    finally:
        additional_file.close()

    LOGGER.info("Load Kubeconfig Skeleton to dict")
    #Open skeleton kubeconfig into a dict and extract server, client and context name
    with open(temp_path) as file:
        kconfig = yaml.load(file, Loader=yaml.FullLoader)
        apiServer = kconfig.get('users')[0].get('user').get('auth-provider').get('config').get('apiserver-id')
        clientId = kconfig.get('users')[0].get('user').get('auth-provider').get('config').get('client-id')
        contextName = kconfig.get('contexts')[0].get('name')

    os.remove(temp_path)

    LOGGER.info("Authenticate with client on behalf of user to API Server App")
    #Generate access token, refresh token, expiry details using client and server
    token = context.acquire_token_with_username_password(
        resource=apiServer,
        username=username,
        password=password,
        client_id=clientId)

    LOGGER.info("Subbing in User Identity details into Kubeconfig dict")
    #Sub in above into kubeconfig dict
    kconfig['users'][0]['user']['auth-provider']['config']['access-token']=token['accessToken']
    kconfig['users'][0]['user']['auth-provider']['config']['refresh-token']=token['refreshToken']
    kconfig['users'][0]['user']['auth-provider']['config']['expires-in']=str(token['expiresIn'])
    kconfig['users'][0]['user']['auth-provider']['config']['expires-on']=token['expiresOn']

    LOGGER.info("Write Kubeconfig dict to temp file")
    #Write kubeconfig dict to temp file
    fd1, temp_path1 = tempfile.mkstemp()
    with open(temp_path1, 'w') as file:
        documents = yaml.dump(kconfig, file)

    #Get default kubeconfig location
    kconfigPath = str(Path.home()) + os.sep + '.kube' + os.sep + 'config'

    LOGGER.info("Check if ddefault Kubeconfig exists")
    #Create empty kubeconfig and path if it doesn't exist
    if os.path.exists(kconfigPath) != True:
        os.makedirs(os.path.dirname(kconfigPath), exist_ok=True)
        with open(kconfigPath, "w") as f:
            f.write("")

    LOGGER.info("merge_kubernetes_configurations temp kubeconfig with default kuebconfig")
    #Reuse Azure's method to merge our new kubeconfig with the existing one
    merge_kubernetes_configurations(kconfigPath, temp_path1, True, contextName)
Exemplo n.º 22
0
def main():
    SUBSCRIPTION_ID = os.environ.get("SUBSCRIPTION_ID", None)
    GROUP_NAME = "testgroupx"
    AGENT_POOL = "agent_poolxxyyzz"
    AGENT_POOL_NAME = "aksagent"
    CLIENT_ID = os.environ.get("CLIENT_ID", None)
    CLIENT_SECRET = os.environ.get("CLIENT_SECRET", None)
    AZURE_LOCATION = "eastus"

    # Create client
    # # For other authentication approaches, please see: https://pypi.org/project/azure-identity/
    resource_client = ResourceManagementClient(
        credential=DefaultAzureCredential(),
        subscription_id=SUBSCRIPTION_ID
    )
    containerservice_client = ContainerServiceClient(
        credential=DefaultAzureCredential(),
        subscription_id=SUBSCRIPTION_ID
    )
    # - init depended client -
    # - end -

    # Create resource group
    resource_client.resource_groups.create_or_update(
        GROUP_NAME,
        {"location": AZURE_LOCATION}
    )

    # - init depended resources -
    # - end -

    # Create managed clusters
    managed_clusters = containerservice_client.managed_clusters.begin_create_or_update(
        GROUP_NAME,
        AGENT_POOL,
        {
            "dns_prefix": "akspythonsdk",
            "agent_pool_profiles": [
                {
                    "name": "aksagent",
                    "count": 1,
                    "vm_size": "Standard_DS2_v2",
                    "max_pods": 110,
                    "min_count": 1,
                    "max_count": 100,
                    "os_type": "Linux",
                    "type": "VirtualMachineScaleSets",
                    "enable_auto_scaling": True,
                    "mode": "System",
                }
            ],
            "service_principal_profile": {
                "client_id": CLIENT_ID,
                "secret": CLIENT_SECRET
            },
            "location": AZURE_LOCATION
        }
    ).result()
    # Create agent pool
    for i in range(10):
        try:
            agent_pool = containerservice_client.agent_pools.begin_create_or_update(
                GROUP_NAME,
                AGENT_POOL,
                AGENT_POOL_NAME,
                {
                    "orchestrator_version": "",
                    "count": "3",
                    "vm_size": "Standard_DS2_v2",
                    "os_type": "Linux",
                    "type": "VirtualMachineScaleSets",
                    "mode": "System",
                    "availability_zones": [
                        "1",
                        "2",
                        "3"
                    ],
                    "node_taints": []
                }
            ).result()
        except:
            time.sleep(30)
        else:
            break
    print("Create agent pool:\n{}".format(agent_pool))

    # Get agent pool
    agent_pool = containerservice_client.agent_pools.get(
        GROUP_NAME,
        AGENT_POOL,
        AGENT_POOL_NAME
    )
    print("Get agent pool:\n{}".format(agent_pool))

    # Delete Group
    resource_client.resource_groups.begin_delete(
        GROUP_NAME
    ).result()
Exemplo n.º 23
0
from azure.common.credentials import ServicePrincipalCredentials
from azure.mgmt.containerservice import ContainerServiceClient
from azure.mgmt.containerinstance import ContainerInstanceManagementClient
import os

tenant_id = os.environ.get('TENANT')
application_id = os.environ.get('CLIENT_ID_JENKINS')   
application_secret = os.environ.get('CLIENT_SECRET_JENKINS') 
subscription_id = 'b72ab7b7-723f-4b18-b6f6-03b0f2c6a1bb' # os.environ.get('SUBSCRIPTION_ID')

credentials = ServicePrincipalCredentials(
    client_id = application_id,
    secret = application_secret,
    tenant = tenant_id,
)

container_client = ContainerServiceClient(credentials, subscription_id)
instance_client = ContainerInstanceManagementClient(credentials, subscription_id)

clusters = container_client.managed_clusters.list()

for cluster in clusters:
    print(cluster)