def kubernetes_exception_handler(
        ex,
        fault_type,
        summary,
        error_message='Error occured while connecting to the kubernetes cluster: ',
        message_for_unauthorized_request='The user does not have required privileges on the kubernetes cluster to deploy Azure Arc enabled Kubernetes agents. Please ensure you have cluster admin privileges on the cluster to onboard.',
        message_for_not_found='The requested kubernetes resource was not found.',
        raise_error=True):
    telemetry.set_user_fault()
    if isinstance(ex, ApiException):
        status_code = ex.status
        if status_code == 403:
            logger.warning(message_for_unauthorized_request)
        if status_code == 404:
            logger.warning(message_for_not_found)
        if raise_error:
            telemetry.set_exception(exception=ex,
                                    fault_type=fault_type,
                                    summary=summary)
            raise CLIInternalError(error_message + "\nError Response: " +
                                   str(ex.body))
    else:
        if raise_error:
            telemetry.set_exception(exception=ex,
                                    fault_type=fault_type,
                                    summary=summary)
            raise CLIInternalError(error_message + "\nError: " + str(ex))
Exemple #2
0
def auto_register(func, *args, **kwargs):
    import copy
    from azure.core.polling._poller import LROPoller
    from azure.core.exceptions import HttpResponseError

    # kwagrs will be modified in SDK
    kwargs_backup = copy.deepcopy(kwargs)
    try:
        res = func(*args, **kwargs)
        if isinstance(res, LROPoller):
            # polling the result to handle the case when target subscription is not registered
            return res.result()
        return res

    except HttpResponseError as ex:
        # source subscription is not registered
        if ex.error and ex.error.code == 'SubscriptionNotRegistered':
            if register_provider():
                return func(*args, **kwargs_backup)
            raise CLIInternalError('Registeration failed, please manually run command '
                                   '`az provider register -n Microsoft.ServiceLinker` to register the provider.')
        # target subscription is not registered, raw check
        if ex.error and ex.error.code == 'UnauthorizedResourceAccess' and 'not registered' in ex.error.message:
            if 'parameters' in kwargs_backup and 'target_id' in kwargs_backup.get('parameters'):
                segments = parse_resource_id(kwargs_backup.get('parameters').get('target_id'))
                target_subs = segments.get('subscription')
                # double check whether target subscription is registered
                if not provider_is_registered(target_subs):
                    if register_provider(target_subs):
                        return func(*args, **kwargs_backup)
                    raise CLIInternalError('Registeration failed, please manually run command '
                                           '`az provider register -n Microsoft.ServiceLinker --subscription {}` '
                                           'to register the provider.'.format(target_subs))
        raise ex
def check_remote_source_code(source_location):
    lower_source_location = source_location.lower()

    # git
    if lower_source_location.startswith(
            "git@") or lower_source_location.startswith("git://"):
        return source_location

    # http
    if lower_source_location.startswith("https://") or lower_source_location.startswith("http://") \
       or lower_source_location.startswith("github.com/"):
        isVSTS = any(url in lower_source_location
                     for url in TASK_VALID_VSTS_URLS)
        if isVSTS or re.search(r"\.git(?:#.+)?$", lower_source_location):
            # git url must contain ".git" or be from VSTS/Azure DevOps.
            # This is because Azure DevOps doesn't follow the standard git server convention of putting
            # .git at the end of their URLs, so we have to special case them.
            return source_location
        if not lower_source_location.startswith("github.com/"):
            # Others are tarball
            if requests.head(source_location).status_code < 400:
                return source_location
            raise CLIInternalError(
                "'{}' doesn't exist.".format(source_location))

    # oci
    if lower_source_location.startswith("oci://"):
        return source_location
    raise CLIInternalError("'{}' doesn't exist.".format(source_location))
Exemple #4
0
def generate_random_string(length=5, prefix='', lower_only=False, ensure_complexity=False):
    '''Generate a random string
    :param length: the length of generated random string, not including the prefix
    :param prefix: the prefix string
    :param lower_only: ensure the generated string only includes lower case characters
    :param ensure_complexity: ensure the generated string satisfy complexity requirements
    '''
    import random
    import string

    if lower_only and ensure_complexity:
        raise CLIInternalError('lower_only and ensure_complexity can not both be specified to True')
    if ensure_complexity and length < 8:
        raise CLIInternalError('ensure_complexity needs length >= 8')

    character_set = string.ascii_letters + string.digits
    if lower_only:
        character_set = string.ascii_lowercase

    while True:
        randstr = '{}{}'.format(prefix, ''.join(random.sample(character_set, length)))
        lowers = [c for c in randstr if c.islower()]
        uppers = [c for c in randstr if c.isupper()]
        numbers = [c for c in randstr if c.isnumeric()]
        if not ensure_complexity or (lowers and uppers and numbers):
            break

    return randstr
Exemple #5
0
def get_helm_registry(cmd, config_dp_endpoint, dp_endpoint_dogfood=None, release_train_dogfood=None):
    # Setting uri
    get_chart_location_url = "{}/{}/GetLatestHelmPackagePath?api-version=2019-11-01-preview".format(config_dp_endpoint, 'azure-arc-k8sagents')
    release_train = os.getenv('RELEASETRAIN') if os.getenv('RELEASETRAIN') else 'stable'
    if dp_endpoint_dogfood:
        get_chart_location_url = "{}/azure-arc-k8sagents/GetLatestHelmPackagePath?api-version=2019-11-01-preview".format(dp_endpoint_dogfood)
        if release_train_dogfood:
            release_train = release_train_dogfood
    uri_parameters = ["releaseTrain={}".format(release_train)]
    resource = cmd.cli_ctx.cloud.endpoints.active_directory_resource_id

    # Sending request
    try:
        r = send_raw_request(cmd.cli_ctx, 'post', get_chart_location_url, uri_parameters=uri_parameters, resource=resource)
    except Exception as e:
        telemetry.set_exception(exception=e, fault_type=consts.Get_HelmRegistery_Path_Fault_Type,
                                summary='Error while fetching helm chart registry path')
        raise CLIInternalError("Error while fetching helm chart registry path: " + str(e))
    if r.content:
        try:
            return r.json().get('repositoryPath')
        except Exception as e:
            telemetry.set_exception(exception=e, fault_type=consts.Get_HelmRegistery_Path_Fault_Type,
                                    summary='Error while fetching helm chart registry path')
            raise CLIInternalError("Error while fetching helm chart registry path from JSON response: " + str(e))
    else:
        telemetry.set_exception(exception='No content in response', fault_type=consts.Get_HelmRegistery_Path_Fault_Type,
                                summary='No content in acr path response')
        raise CLIInternalError("No content was found in helm registry path response.")
Exemple #6
0
def await_github_action(cmd, token, repo, branch, name, resource_group_name, timeout_secs=1200):
    from .custom import show_github_action
    from ._clients import PollingAnimation

    start = datetime.utcnow()
    animation = PollingAnimation()
    animation.tick()

    github_repo = get_github_repo(token, repo)

    gh_action_status = "InProgress"
    while gh_action_status == "InProgress":
        time.sleep(SHORT_POLLING_INTERVAL_SECS)
        animation.tick()
        gh_action_status = safe_get(show_github_action(cmd, name, resource_group_name), "properties", "operationState")
        if (datetime.utcnow() - start).seconds >= timeout_secs:
            raise CLIInternalError("Timed out while waiting for the Github action to be created.")
        animation.flush()
    if gh_action_status == "Failed":
        raise CLIInternalError("The Github Action creation failed.")  # TODO ask backend team for a status url / message

    workflow = None
    while workflow is None:
        animation.tick()
        time.sleep(SHORT_POLLING_INTERVAL_SECS)
        workflow = get_workflow(github_repo, name)
        animation.flush()

        if (datetime.utcnow() - start).seconds >= timeout_secs:
            raise CLIInternalError("Timed out while waiting for the Github action to start.")

    runs = workflow.get_runs()
    while runs is None or not [r for r in runs if r.status in ('queued', 'in_progress')]:
        time.sleep(SHORT_POLLING_INTERVAL_SECS)
        runs = workflow.get_runs()
        if (datetime.utcnow() - start).seconds >= timeout_secs:
            raise CLIInternalError("Timed out while waiting for the Github action to be started.")
    runs = [r for r in runs if r.status in ('queued', 'in_progress')]
    runs.sort(key=lambda r: r.created_at, reverse=True)
    run = runs[0]  # run with the latest created_at date that's either in progress or queued
    logger.warning(f"Github action run: https://github.com/{repo}/actions/runs/{run.id}")
    logger.warning("Waiting for deployment to complete...")
    run_id = run.id
    status = run.status
    while status in ('queued', 'in_progress'):
        time.sleep(LONG_POLLING_INTERVAL_SECS)
        animation.tick()
        status = github_repo.get_workflow_run(run_id).status
        animation.flush()
        if (datetime.utcnow() - start).seconds >= timeout_secs:
            raise CLIInternalError("Timed out while waiting for the Github action to complete.")

    animation.flush()  # needed to clear the animation from the terminal
    run = github_repo.get_workflow_run(run_id)
    if run.status != "completed" or run.conclusion != "success":
        raise ValidationError("Github action build or deployment failed. "
                              f"Please see https://github.com/{repo}/actions/runs/{run.id} for more details")
Exemple #7
0
 def __init__(self, cmd: AzCliCommand, raw_parameters: BaseAKSParamDict,
              models: BaseAKSModels, decorator_mode: DecoratorMode):
     if not isinstance(raw_parameters, BaseAKSParamDict):
         raise CLIInternalError(
             "Unexpected raw_parameters object with type '{}'.".format(
                 type(raw_parameters)))
     if not validate_decorator_mode(decorator_mode):
         raise CLIInternalError(
             "Unexpected decorator_mode '{}' with type '{}'.".format(
                 decorator_mode, type(decorator_mode)))
     self.cmd = cmd
     self.raw_param = raw_parameters
     self.models = models
     self.decorator_mode = decorator_mode
     self.intermediates = dict()
Exemple #8
0
 def __call__(self, parser, namespace, values, option_string=None):
     try:
         rule_id = _get_rule_id(values[0])
         baseline_row_values = values[1:]
         super(AppendBaselines, self).__call__(parser, namespace, (rule_id, baseline_row_values), option_string)
     except ValueError:
         raise CLIInternalError("Unexpected error")
def _archive_file_recursively(tar, name, arcname, parent_ignored,
                              parent_matching_rule_index, ignore_check):
    # create a TarInfo object from the file
    tarinfo = tar.gettarinfo(name, arcname)

    if tarinfo is None:
        raise CLIInternalError("tarfile: unsupported type {}".format(name))

    # check if the file/dir is ignored
    ignored, matching_rule_index = ignore_check(tarinfo, parent_ignored,
                                                parent_matching_rule_index)

    if not ignored:
        # append the tar header and data to the archive
        if tarinfo.isreg():
            with open(name, "rb") as f:
                tar.addfile(tarinfo, f)
        else:
            tar.addfile(tarinfo)

    # even the dir is ignored, its child items can still be included, so continue to scan
    if tarinfo.isdir():
        for f in os.listdir(name):
            _archive_file_recursively(
                tar,
                os.path.join(name, f),
                os.path.join(arcname, f),
                parent_ignored=ignored,
                parent_matching_rule_index=matching_rule_index,
                ignore_check=ignore_check)
def _get_login_account_principal_id(cli_ctx):
    from azure.graphrbac.models import GraphErrorException
    from azure.cli.core._profile import Profile, _USER_ENTITY, _USER_TYPE, _SERVICE_PRINCIPAL, _USER_NAME
    from azure.graphrbac import GraphRbacManagementClient
    profile = Profile(cli_ctx=cli_ctx)
    cred, _, tenant_id = profile.get_login_credentials(
        resource=cli_ctx.cloud.endpoints.active_directory_graph_resource_id)
    client = GraphRbacManagementClient(
        cred,
        tenant_id,
        base_url=cli_ctx.cloud.endpoints.active_directory_graph_resource_id)
    active_account = profile.get_subscription()
    assignee = active_account[_USER_ENTITY][_USER_NAME]
    try:
        if active_account[_USER_ENTITY][_USER_TYPE] == _SERVICE_PRINCIPAL:
            result = list(
                client.service_principals.list(
                    filter=f"servicePrincipalNames/any(c:c eq '{assignee}')"))
        else:
            result = [client.signed_in_user.get()]
    except GraphErrorException as ex:
        logger.warning("Graph query error %s", ex)
    if not result:
        raise CLIInternalError((
            f"Failed to retrieve principal id for '{assignee}', which is needed to create a "
            f"role assignment. Consider using '--principal-ids' to bypass the lookup"
        ))

    return result[0].object_id
Exemple #11
0
def delete_arc_agents(release_namespace, kube_config, kube_context,
                      configuration):
    cmd_helm_delete = [
        "helm", "delete", "azure-arc", "--namespace", release_namespace
    ]
    if kube_config:
        cmd_helm_delete.extend(["--kubeconfig", kube_config])
    if kube_context:
        cmd_helm_delete.extend(["--kube-context", kube_context])
    response_helm_delete = Popen(cmd_helm_delete, stdout=PIPE, stderr=PIPE)
    _, error_helm_delete = response_helm_delete.communicate()
    if response_helm_delete.returncode != 0:
        if 'forbidden' in error_helm_delete.decode(
                "ascii"
        ) or 'Error: warning: Hook pre-delete' in error_helm_delete.decode(
                "ascii"
        ) or 'Error: timed out waiting for the condition' in error_helm_delete.decode(
                "ascii"):
            telemetry.set_user_fault()
        telemetry.set_exception(
            exception=error_helm_delete.decode("ascii"),
            fault_type=consts.Delete_HelmRelease_Fault_Type,
            summary='Unable to delete helm release')
        raise CLIInternalError(
            "Error occured while cleaning up arc agents. " +
            "Helm release deletion failed: " +
            error_helm_delete.decode("ascii") +
            " Please run 'helm delete azure-arc' to ensure that the release is deleted."
        )
    ensure_namespace_cleanup(configuration)
Exemple #12
0
    def get_token(self, *scopes, **kwargs):  # pylint:disable=unused-argument
        logger.debug(
            "AdalAuthentication.get_token invoked by Track 2 SDK with scopes=%s",
            scopes)

        _, token, full_token, _ = self._get_token(
            _try_scopes_to_resource(scopes))

        # NEVER use expiresIn (expires_in) as the token is cached and expiresIn will be already out-of date
        # when being retrieved.

        # User token entry sample:
        # {
        #     "tokenType": "Bearer",
        #     "expiresOn": "2020-11-13 14:44:42.492318",
        #     "resource": "https://management.core.windows.net/",
        #     "userId": "*****@*****.**",
        #     "accessToken": "eyJ0eXAiOiJKV...",
        #     "refreshToken": "0.ATcAImuCVN...",
        #     "_clientId": "04b07795-8ddb-461a-bbee-02f9e1bf7b46",
        #     "_authority": "https://login.microsoftonline.com/54826b22-38d6-4fb2-bad9-b7b93a3e9c5a",
        #     "isMRRT": True,
        #     "expiresIn": 3599
        # }

        # Service Principal token entry sample:
        # {
        #     "tokenType": "Bearer",
        #     "expiresIn": 3599,
        #     "expiresOn": "2020-11-12 13:50:47.114324",
        #     "resource": "https://management.core.windows.net/",
        #     "accessToken": "eyJ0eXAiOiJKV...",
        #     "isMRRT": True,
        #     "_clientId": "22800c35-46c2-4210-b8a7-d8c3ec3b526f",
        #     "_authority": "https://login.microsoftonline.com/54826b22-38d6-4fb2-bad9-b7b93a3e9c5a"
        # }
        if 'expiresOn' in full_token:
            import datetime
            expires_on_timestamp = int(
                _timestamp(
                    datetime.datetime.strptime(full_token['expiresOn'],
                                               '%Y-%m-%d %H:%M:%S.%f')))
            return AccessToken(token, expires_on_timestamp)

        # Cloud Shell (Managed Identity) token entry sample:
        # {
        #     "access_token": "eyJ0eXAiOiJKV...",
        #     "refresh_token": "",
        #     "expires_in": "2106",
        #     "expires_on": "1605686811",
        #     "not_before": "1605682911",
        #     "resource": "https://management.core.windows.net/",
        #     "token_type": "Bearer"
        # }
        if 'expires_on' in full_token:
            return AccessToken(token, int(full_token['expires_on']))

        from azure.cli.core.azclierror import CLIInternalError
        raise CLIInternalError(
            "No expiresOn or expires_on is available in the token entry.")
Exemple #13
0
def _get_new_identity_type_for_assign(app, system_assigned, user_assigned):
    new_identity_type = None

    if app.identity and app.identity.type:
        new_identity_type = app.identity.type
    else:
        new_identity_type = models_20220301preview.ManagedIdentityType.NONE

    if system_assigned:
        if new_identity_type in (
                models_20220301preview.ManagedIdentityType.USER_ASSIGNED,
                models_20220301preview.ManagedIdentityType.
                SYSTEM_ASSIGNED_USER_ASSIGNED):
            new_identity_type = models_20220301preview.ManagedIdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED
        else:
            new_identity_type = models_20220301preview.ManagedIdentityType.SYSTEM_ASSIGNED

    if user_assigned:
        if new_identity_type in (
                models_20220301preview.ManagedIdentityType.SYSTEM_ASSIGNED,
                models_20220301preview.ManagedIdentityType.
                SYSTEM_ASSIGNED_USER_ASSIGNED):
            new_identity_type = models_20220301preview.ManagedIdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED
        else:
            new_identity_type = models_20220301preview.ManagedIdentityType.USER_ASSIGNED

    if not new_identity_type or new_identity_type == models_20220301preview.ManagedIdentityType.NONE:
        raise CLIInternalError(
            "Internal error: invalid new identity type:{}.".format(
                new_identity_type))

    return new_identity_type
Exemple #14
0
def _register_resource_provider(cmd, resource_provider):
    from azure.mgmt.resource.resources.models import ProviderRegistrationRequest, ProviderConsentDefinition

    logger.warning(f"Registering resource provider {resource_provider} ...")
    properties = ProviderRegistrationRequest(third_party_provider_consent=ProviderConsentDefinition(consent_to_authorization=True))

    client = providers_client_factory(cmd.cli_ctx)
    try:
        client.register(resource_provider, properties=properties)
        # wait for registration to finish
        timeout_secs = 120
        registration = _is_resource_provider_registered(cmd, resource_provider)
        start = datetime.utcnow()
        while not registration:
            registration = _is_resource_provider_registered(cmd, resource_provider)
            time.sleep(SHORT_POLLING_INTERVAL_SECS)
            if (datetime.utcnow() - start).seconds >= timeout_secs:
                raise CLIInternalError(f"Timed out while waiting for the {resource_provider} resource provider to be registered.")

    except Exception as e:
        msg = ("This operation requires requires registering the resource provider {0}. "
               "We were unable to perform that registration on your behalf: "
               "Server responded with error message -- {1} . "
               "Please check with your admin on permissions, "
               "or try running registration manually with: az provider register --wait --namespace {0}")
        raise ValidationError(resource_provider, msg.format(e.args)) from e
Exemple #15
0
 def __init__(self, param_dict):
     if not isinstance(param_dict, dict):
         raise CLIInternalError(
             "Unexpected param_dict object with type '{}'.".format(
                 type(param_dict)))
     self.__store = param_dict.copy()
     self.__count = {}
Exemple #16
0
def _get_user_identity_payload_for_remove(new_identity_type,
                                          user_identity_list_to_remove):
    """
    :param new_identity_type: ManagedIdentityType
    :param user_identity_list_to_remove: None, an empty list or a list of string of user-assigned managed identity resource id to remove.
    :return None object or a non-empty dict from user-assigned managed identity resource id to None object
    """
    user_identity_payload = {}
    if new_identity_type in (
            models_20220301preview.ManagedIdentityType.USER_ASSIGNED,
            models_20220301preview.ManagedIdentityType.
            SYSTEM_ASSIGNED_USER_ASSIGNED):
        # empty list means remove all user-assigned managed identites
        if user_identity_list_to_remove is not None and len(
                user_identity_list_to_remove) == 0:
            raise CLIInternalError(
                "When remove all user-assigned managed identities, "
                "target identity type should not be {}.".format(
                    new_identity_type))
        # non-empty list
        elif user_identity_list_to_remove:
            for id in user_identity_list_to_remove:
                user_identity_payload[id] = None

    if not user_identity_payload:
        user_identity_payload = None

    return user_identity_payload
Exemple #17
0
    def provision(self):
        '''Create the target resource, and return the parameters for connection creation.
        '''
        target_type = self._get_target_type()
        creation_steps = AddonConfig.get(target_type).get('create')

        logger.warning('Start creating a new %s', target_type.value)
        for cnt, step in enumerate(creation_steps):
            # apply parmeters to format the command
            cmd = step.format(**self._params)
            try:
                run_cli_cmd(cmd)
            except CLIInternalError as err:
                logger.warning('Creation failed, start rolling back')
                self.rollback(cnt)
                raise CLIInternalError(
                    'Provision failed, please create the target resource manually '
                    'and then create the connection. Error details: {}'.format(
                        str(err)))

        target_id = self.get_target_id()
        logger.warning('Created, the resource id is: %s', target_id)

        auth_info = self.get_auth_info()
        logger.warning('The auth info used to create connection is: %s',
                       str(auth_info))

        return target_id, auth_info
Exemple #18
0
def user_confirmation(message, yes=False):
    if yes:
        return
    try:
        if not prompt_y_n(message):
            raise ManualInterrupt('Operation cancelled.')
    except NoTTYException:
        raise CLIInternalError('Unable to prompt for confirmation as no tty available. Use --yes.')
def upload_source_code(cmd, client, registry_name, resource_group_name,
                       source_location, tar_file_path, docker_file_path,
                       docker_file_in_tar):
    _pack_source_code(source_location, tar_file_path, docker_file_path,
                      docker_file_in_tar)

    size = os.path.getsize(tar_file_path)
    unit = 'GiB'
    for S in ['Bytes', 'KiB', 'MiB', 'GiB']:
        if size < 1024:
            unit = S
            break
        size = size / 1024.0

    logger.info("Uploading archived source code from '%s'...", tar_file_path)
    upload_url = None
    relative_path = None
    try:
        source_upload_location = client.get_build_source_upload_url(
            resource_group_name, registry_name)
        upload_url = source_upload_location.upload_url
        relative_path = source_upload_location.relative_path
    except (AttributeError, CloudError) as e:
        raise CLIInternalError(
            "Failed to get a SAS URL to upload context. Error: {}".format(
                e.message)) from e

    if not upload_url:
        raise CLIInternalError("Failed to get a SAS URL to upload context.")

    account_name, endpoint_suffix, container_name, blob_name, sas_token = get_blob_info(
        upload_url)
    BlockBlobService = get_sdk(cmd.cli_ctx, ResourceType.DATA_STORAGE,
                               'blob#BlockBlobService')
    BlockBlobService(
        account_name=account_name,
        sas_token=sas_token,
        endpoint_suffix=endpoint_suffix,
        # Increase socket timeout from default of 20s for clients with slow network connection.
        socket_timeout=300).create_blob_from_path(
            container_name=container_name,
            blob_name=blob_name,
            file_path=tar_file_path)
    logger.info("Sending context ({0:.3f} {1}) to registry: {2}...".format(
        size, unit, registry_name))
    return relative_path
Exemple #20
0
    def _validate_subnet(cmd, namespace):
        subnet = getattr(namespace, key)

        if not is_valid_resource_id(subnet):
            if not namespace.vnet:
                raise RequiredArgumentMissingError(
                    f"Must specify --vnet if --{key.replace('_', '-')} is not an id."
                )

            validate_vnet(cmd, namespace)

            subnet = namespace.vnet + '/subnets/' + subnet
            setattr(namespace, key, subnet)

        parts = parse_resource_id(subnet)

        if parts['subscription'] != get_subscription_id(cmd.cli_ctx):
            raise InvalidArgumentValueError(
                f"--{key.replace('_', '-')} subscription '{parts['subscription']}' must equal cluster subscription."
            )

        if parts['namespace'].lower() != 'microsoft.network':
            raise InvalidArgumentValueError(
                f"--{key.replace('_', '-')} namespace '{parts['namespace']}' must equal Microsoft.Network."
            )

        if parts['type'].lower() != 'virtualnetworks':
            raise InvalidArgumentValueError(
                f"--{key.replace('_', '-')} type '{parts['type']}' must equal virtualNetworks."
            )

        if parts['last_child_num'] != 1:
            raise InvalidArgumentValueError(
                f"--{key.replace('_', '-')} '{subnet}' must have one child.")

        if 'child_namespace_1' in parts:
            raise InvalidArgumentValueError(
                f"--{key.replace('_', '-')} '{subnet}' must not have child namespace."
            )

        if parts['child_type_1'].lower() != 'subnets':
            raise InvalidArgumentValueError(
                f"--{key.replace('_', '-')} child type '{subnet}' must equal subnets."
            )

        client = get_mgmt_service_client(cmd.cli_ctx,
                                         ResourceType.MGMT_NETWORK)
        try:
            client.subnets.get(parts['resource_group'], parts['name'],
                               parts['child_name_1'])
        except Exception as err:
            if isinstance(err, ResourceNotFoundError):
                raise InvalidArgumentValueError(
                    f"Invald --{key.replace('_', '-')}, error when getting '{subnet}': {str(err)}"
                ) from err
            raise CLIInternalError(
                f"Unexpected error when getting subnet '{subnet}': {str(err)}"
            ) from err
    def _ensure_agentpool(self, agentpool: AgentPool) -> None:
        """Internal function to ensure that the incoming `agentpool` object is valid and the same as the attached
        `agentpool` object in the context.

        If the incoming `agentpool` is not valid or is inconsistent with the `agentpool` in the context, raise a
        CLIInternalError.

        :return: None
        """
        if not isinstance(agentpool, self.models.AgentPool):
            raise CLIInternalError(
                "Unexpected agentpool object with type '{}'.".format(
                    type(agentpool)))

        if self.context.agentpool != agentpool:
            raise CLIInternalError(
                "Inconsistent state detected. The incoming `agentpool` "
                "is not the same as the `agentpool` in the context.")
Exemple #22
0
def flatten(dd, separator='.', prefix=''):
    try:
        if isinstance(dd, dict):
            return {prefix + separator + k if prefix else k: v for kk, vv in dd.items() for k, v in flatten(vv, separator, kk).items()}
        else:
            return {prefix: dd}
    except Exception as e:
        telemetry.set_exception(exception=e, fault_type=consts.Error_Flattening_User_Supplied_Value_Dict,
                                summary='Error while flattening the user supplied helm values dict')
        raise CLIInternalError("Error while flattening the user supplied helm values dict")
Exemple #23
0
def format_styled_text(styled_text):
    # https://python-prompt-toolkit.readthedocs.io/en/stable/pages/printing_text.html#style-text-tuples
    formatted_parts = []

    for text in styled_text:
        # str can also be indexed, bypassing IndexError, so explicitly check if the type is tuple
        if not (isinstance(text, tuple) and len(text) == 2):
            from azure.cli.core.azclierror import CLIInternalError
            raise CLIInternalError("Invalid styled text. It should be a list of 2-element tuples.")

        style = text[0]
        if style not in THEME:
            from azure.cli.core.azclierror import CLIInternalError
            raise CLIInternalError("Invalid style. Only use pre-defined style in Style enum.")

        formatted_parts.append(THEME[text[0]] + text[1])

    # Reset control sequence
    formatted_parts.append(Fore.RESET)
    return ''.join(formatted_parts)
Exemple #24
0
def pull_helm_chart(registry_path, kube_config, kube_context):
    cmd_helm_chart_pull = ["helm", "chart", "pull", registry_path]
    if kube_config:
        cmd_helm_chart_pull.extend(["--kubeconfig", kube_config])
    if kube_context:
        cmd_helm_chart_pull.extend(["--kube-context", kube_context])
    response_helm_chart_pull = subprocess.Popen(cmd_helm_chart_pull, stdout=PIPE, stderr=PIPE)
    _, error_helm_chart_pull = response_helm_chart_pull.communicate()
    if response_helm_chart_pull.returncode != 0:
        telemetry.set_exception(exception=error_helm_chart_pull.decode("ascii"), fault_type=consts.Pull_HelmChart_Fault_Type,
                                summary='Unable to pull helm chart from the registry')
        raise CLIInternalError("Unable to pull helm chart from the registry '{}': ".format(registry_path) + error_helm_chart_pull.decode("ascii"))
    def set_up_http_proxy_config(self, mc: ManagedCluster) -> ManagedCluster:
        """Set up http proxy config for the ManagedCluster object.

        :return: the ManagedCluster object
        """
        if not isinstance(mc, self.models.ManagedCluster):
            raise CLIInternalError(
                "Unexpected mc object with type '{}'.".format(type(mc))
            )

        mc.http_proxy_config = self.context.get_http_proxy_config()
        return mc
    def set_up_pod_security_policy(self, mc: ManagedCluster) -> ManagedCluster:
        """Set up pod security policy for the ManagedCluster object.

        :return: the ManagedCluster object
        """
        if not isinstance(mc, self.models.ManagedCluster):
            raise CLIInternalError(
                "Unexpected mc object with type '{}'.".format(type(mc))
            )

        mc.enable_pod_security_policy = self.context.get_enable_pod_security_policy()
        return mc
    def set_up_node_resource_group(self, mc: ManagedCluster) -> ManagedCluster:
        """Set up node resource group for the ManagedCluster object.

        :return: the ManagedCluster object
        """
        if not isinstance(mc, self.models.ManagedCluster):
            raise CLIInternalError(
                "Unexpected mc object with type '{}'.".format(type(mc))
            )

        mc.node_resource_group = self.context.get_node_resource_group()
        return mc
Exemple #28
0
def register_command_group(name,
                           is_preview=False,
                           is_experimental=False,
                           hide=False,
                           redirect=None,
                           expiration=None):
    """This decorator is used to register an AAZCommandGroup as a cli command group.
    A registered AAZCommandGroup will be added into module's command group table.
    """
    if is_preview and is_experimental:
        raise CLIInternalError(
            PREVIEW_EXPERIMENTAL_CONFLICT_ERROR.format(name))
    deprecated_info = {}
    if hide:
        deprecated_info['hide'] = hide
    if redirect:
        deprecated_info['redirect'] = f'az {redirect}'
    if expiration:
        deprecated_info['expiration'] = expiration

    def decorator(cls):
        assert issubclass(cls, AAZCommandGroup)
        cls.AZ_NAME = name
        short_summary, long_summary, _ = _parse_cls_doc(cls)
        cls.AZ_HELP = {
            "type": "group",
            "short-summary": short_summary,
            "long-summary": long_summary
        }

        # the only way to load command group help in knack is by _load_from_file
        # TODO: change knack to load AZ_HELP directly
        import yaml
        from knack.help_files import helps
        helps[name] = yaml.safe_dump(cls.AZ_HELP)

        if is_preview:
            cls.AZ_PREVIEW_INFO = partial(PreviewItem,
                                          target=f'az {name}',
                                          object_type='command group')
        if is_experimental:
            cls.AZ_EXPERIMENTAL_INFO = partial(ExperimentalItem,
                                               target=f'az {name}',
                                               object_type='command group')
        if deprecated_info:
            cls.AZ_DEPRECATE_INFO = partial(Deprecated,
                                            target=f'az {name}',
                                            object_type='command group',
                                            **deprecated_info)
        return cls

    return decorator
Exemple #29
0
    def _validate_subnet(cmd, namespace):
        subnet = getattr(namespace, key)

        if not is_valid_resource_id(subnet):
            if not namespace.vnet:
                raise RequiredArgumentMissingError(
                    'Must specify --vnet if --%s is not an id.' %
                    key.replace('_', '-'))

            validate_vnet(cmd, namespace)

            subnet = namespace.vnet + '/subnets/' + subnet
            setattr(namespace, key, subnet)

        parts = parse_resource_id(subnet)

        if parts['subscription'] != get_subscription_id(cmd.cli_ctx):
            raise InvalidArgumentValueError(
                "--%s subscription '%s' must equal cluster subscription." %
                (key.replace('_', '-'), parts["subscription"]))

        if parts['namespace'].lower() != 'microsoft.network':
            raise InvalidArgumentValueError(
                "--%s namespace '%s' must equal Microsoft.Network." %
                (key.replace('_', '-'), parts["namespace"]))

        if parts['type'].lower() != 'virtualnetworks':
            raise InvalidArgumentValueError(
                "--%s type '%s' must equal virtualNetworks." %
                (key.replace('_', '-'), parts["type"]))

        if parts['last_child_num'] != 1:
            raise InvalidArgumentValueError("--%s '%s' must have one child." %
                                            (key.replace('_', '-'), subnet))

        if 'child_namespace_1' in parts:
            raise InvalidArgumentValueError(
                "--%s '%s' must not have child namespace." %
                (key.replace('_', '-'), subnet))

        if parts['child_type_1'].lower() != 'subnets':
            raise InvalidArgumentValueError(
                "--%s child type '%s' must equal subnets." %
                (key.replace('_', '-'), subnet))

        client = get_mgmt_service_client(cmd.cli_ctx,
                                         ResourceType.MGMT_NETWORK)
        try:
            client.subnets.get(parts['resource_group'], parts['name'],
                               parts['child_name_1'])
        except CloudError as err:
            raise CLIInternalError(err.message) from err
def handle_raw_exception(e):
    import json

    stringErr = str(e)

    if "{" in stringErr and "}" in stringErr:
        jsonError = stringErr[stringErr.index("{"):stringErr.rindex("}") + 1]
        jsonError = json.loads(jsonError)

        if 'error' in jsonError:
            jsonError = jsonError['error']

            if 'code' in jsonError and 'message' in jsonError:
                code = jsonError['code']
                message = jsonError['message']
                raise CLIInternalError('({}) {}'.format(code, message))
        elif "Message" in jsonError:
            message = jsonError["Message"]
            raise CLIInternalError(message)
        elif "message" in jsonError:
            message = jsonError["message"]
            raise CLIInternalError(message)
    raise e