Пример #1
0
 def raise_event(self, name, **kwargs):
     '''Raise the event `name`.
     '''
     data = truncate_text(str(kwargs), width=500)
     logger.debug("Application event '%s' with event data %s", name, data)
     for func in list(self._event_handlers[name]):  # Make copy in case handler modifies the list
         func(**kwargs)
Пример #2
0
 def raise_event(self, name, **kwargs):
     '''Raise the event `name`.
     '''
     data = truncate_text(str(kwargs), width=500)
     logger.debug("Application event '%s' with event data %s", name, data)
     for func in list(self._event_handlers[name]):  # Make copy in case handler modifies the list
         func(**kwargs)
Пример #3
0
 def test_truncate_text_negative_width(self):
     with self.assertRaises(ValueError):
         truncate_text('string to shorten', width=-1)
Пример #4
0
 def test_truncate_text_not_needed(self):
     expected = 'string to shorten'
     actual = truncate_text('string to shorten', width=100)
     self.assertEqual(expected, actual)
Пример #5
0
 def test_truncate_text(self):
     expected = 'stri [...]'
     actual = truncate_text('string to shorten', width=10)
     self.assertEqual(expected, actual)
Пример #6
0
 def test_truncate_text_negative_width(self):
     with self.assertRaises(ValueError):
         truncate_text('string to shorten', width=-1)
Пример #7
0
 def test_truncate_text_not_needed(self):
     expected = 'string to shorten'
     actual = truncate_text('string to shorten', width=100)
     self.assertEqual(expected, actual)
Пример #8
0
 def test_truncate_text(self):
     expected = 'stri [...]'
     actual = truncate_text('string to shorten', width=10)
     self.assertEqual(expected, actual)
Пример #9
0
def aks_create(
        cmd,
        client,
        resource_group_name,
        name,
        ssh_key_value,  # pylint: disable=too-many-locals
        dns_name_prefix=None,
        location=None,
        admin_username="******",
        kubernetes_version='',
        node_vm_size="Standard_DS2_v2",
        node_osdisk_size=0,
        node_count=3,
        service_principal=None,
        client_secret=None,
        no_ssh_key=False,
        disable_rbac=None,
        enable_rbac=None,
        enable_vmss=None,
        skip_subnet_role_assignment=False,
        enable_cluster_autoscaler=False,
        network_plugin=None,
        pod_cidr=None,
        service_cidr=None,
        dns_service_ip=None,
        docker_bridge_address=None,
        enable_addons=None,
        workspace_resource_id=None,
        min_count=None,
        max_count=None,
        vnet_subnet_id=None,
        max_pods=0,
        aad_client_app_id=None,
        aad_server_app_id=None,
        aad_server_app_secret=None,
        aad_tenant_id=None,
        tags=None,
        generate_ssh_keys=False,  # pylint: disable=unused-argument
        no_wait=False):
    if not no_ssh_key:
        try:
            if not ssh_key_value or not is_valid_ssh_rsa_public_key(
                    ssh_key_value):
                raise ValueError()
        except (TypeError, ValueError):
            shortened_key = truncate_text(ssh_key_value)
            raise CLIError(
                'Provided ssh key ({}) is invalid or non-existent'.format(
                    shortened_key))

    subscription_id = _get_subscription_id(cmd.cli_ctx)
    if not dns_name_prefix:
        dns_name_prefix = _get_default_dns_prefix(name, resource_group_name,
                                                  subscription_id)

    rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
    if location is None:
        location = rg_location

    agent_pool_profile = ManagedClusterAgentPoolProfile(
        name='nodepool1',  # Must be 12 chars or less before ACS RP adds to it
        count=int(node_count),
        vm_size=node_vm_size,
        os_type="Linux",
        vnet_subnet_id=vnet_subnet_id,
        max_pods=int(max_pods) if max_pods else None)

    if enable_vmss:
        agent_pool_profile.type = "VirtualMachineScaleSets"
    if node_osdisk_size:
        agent_pool_profile.os_disk_size_gb = int(node_osdisk_size)

    _check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count,
                                   max_count, node_count, agent_pool_profile)

    linux_profile = None
    # LinuxProfile is just used for SSH access to VMs, so omit it if --no-ssh-key was specified.
    if not no_ssh_key:
        ssh_config = ContainerServiceSshConfiguration(
            public_keys=[ContainerServiceSshPublicKey(key_data=ssh_key_value)])
        linux_profile = ContainerServiceLinuxProfile(
            admin_username=admin_username, ssh=ssh_config)

    principal_obj = _ensure_aks_service_principal(
        cmd.cli_ctx,
        service_principal=service_principal,
        client_secret=client_secret,
        subscription_id=subscription_id,
        dns_name_prefix=dns_name_prefix,
        location=location,
        name=name)
    service_principal_profile = ManagedClusterServicePrincipalProfile(
        client_id=principal_obj.get("service_principal"),
        secret=principal_obj.get("client_secret"))

    if (vnet_subnet_id and not skip_subnet_role_assignment and
            not subnet_role_assignment_exists(cmd.cli_ctx, vnet_subnet_id)):
        scope = vnet_subnet_id
        if not _add_role_assignment(
                cmd.cli_ctx, 'Network Contributor', service_principal,
                scope=scope):
            logger.warning('Could not create a role assignment for subnet. '
                           'Are you an Owner on this subscription?')

    network_profile = None
    if any([
            network_plugin, pod_cidr, service_cidr, dns_service_ip,
            docker_bridge_address
    ]):
        network_profile = ContainerServiceNetworkProfile(
            network_plugin=network_plugin,
            pod_cidr=pod_cidr,
            service_cidr=service_cidr,
            dns_service_ip=dns_service_ip,
            docker_bridge_cidr=docker_bridge_address)

    addon_profiles = _handle_addons_args(cmd, enable_addons, subscription_id,
                                         resource_group_name, {},
                                         workspace_resource_id)
    if 'omsagent' in addon_profiles:
        _ensure_container_insights_for_monitoring(cmd,
                                                  addon_profiles['omsagent'])
    aad_profile = None
    if any([
            aad_client_app_id, aad_server_app_id, aad_server_app_secret,
            aad_tenant_id
    ]):
        aad_profile = ManagedClusterAADProfile(
            client_app_id=aad_client_app_id,
            server_app_id=aad_server_app_id,
            server_app_secret=aad_server_app_secret,
            tenant_id=aad_tenant_id)

    # Check that both --disable-rbac and --enable-rbac weren't provided
    if all([disable_rbac, enable_rbac]):
        raise CLIError(
            'specify either "--disable-rbac" or "--enable-rbac", not both.')

    mc = ManagedCluster(location=location,
                        tags=tags,
                        dns_prefix=dns_name_prefix,
                        kubernetes_version=kubernetes_version,
                        enable_rbac=False if disable_rbac else True,
                        agent_pool_profiles=[agent_pool_profile],
                        linux_profile=linux_profile,
                        service_principal_profile=service_principal_profile,
                        network_profile=network_profile,
                        addon_profiles=addon_profiles,
                        aad_profile=aad_profile)

    # Due to SPN replication latency, we do a few retries here
    max_retry = 30
    retry_exception = Exception(None)
    for _ in range(0, max_retry):
        try:
            return sdk_no_wait(no_wait,
                               client.managed_clusters.create_or_update,
                               resource_group_name=resource_group_name,
                               resource_name=name,
                               parameters=mc)
        except CloudError as ex:
            retry_exception = ex
            if 'not found in Active Directory tenant' in ex.message:
                time.sleep(3)
            else:
                raise ex
    raise retry_exception