Ejemplo n.º 1
0
def validate_activation_mode(activation_mode):
    """Validate activation mode parameters"""
    if activation_mode not in [None, 'SharedProcess', 'ExclusiveProcess']:
        raise CLIError('Invalid activation mode specified')
Ejemplo n.º 2
0
def create_storage_account(cmd,
                           resource_group_name,
                           account_name,
                           sku=None,
                           location=None,
                           kind=None,
                           tags=None,
                           custom_domain=None,
                           encryption_services=None,
                           access_tier=None,
                           https_only=None,
                           enable_files_aadds=None,
                           bypass=None,
                           default_action=None,
                           assign_identity=False,
                           enable_large_file_share=None,
                           enable_files_adds=None,
                           domain_name=None,
                           net_bios_domain_name=None,
                           forest_name=None,
                           domain_guid=None,
                           domain_sid=None,
                           azure_storage_sid=None,
                           enable_hierarchical_namespace=None,
                           encryption_key_type_for_table=None,
                           encryption_key_type_for_queue=None,
                           routing_choice=None,
                           publish_microsoft_endpoints=None,
                           publish_internet_endpoints=None,
                           require_infrastructure_encryption=None,
                           allow_blob_public_access=None,
                           min_tls_version=None,
                           allow_shared_key_access=None):
    StorageAccountCreateParameters, Kind, Sku, CustomDomain, AccessTier, Identity, Encryption, NetworkRuleSet = \
        cmd.get_models('StorageAccountCreateParameters', 'Kind', 'Sku', 'CustomDomain', 'AccessTier', 'Identity',
                       'Encryption', 'NetworkRuleSet')
    scf = storage_client_factory(cmd.cli_ctx)
    if kind is None:
        logger.warning(
            "The default kind for created storage account will change to 'StorageV2' from 'Storage' "
            "in the future")
    params = StorageAccountCreateParameters(sku=Sku(name=sku),
                                            kind=Kind(kind),
                                            location=location,
                                            tags=tags,
                                            encryption=Encryption())
    # TODO: remove this part when server side remove the constraint
    if encryption_services is None:
        params.encryption.services = {'blob': {}}

    if custom_domain:
        params.custom_domain = CustomDomain(name=custom_domain,
                                            use_sub_domain=None)
    if encryption_services:
        params.encryption = Encryption(services=encryption_services)
    if access_tier:
        params.access_tier = AccessTier(access_tier)
    if assign_identity:
        params.identity = Identity(type='SystemAssigned')
    if https_only is not None:
        params.enable_https_traffic_only = https_only
    if enable_hierarchical_namespace is not None:
        params.is_hns_enabled = enable_hierarchical_namespace

    AzureFilesIdentityBasedAuthentication = cmd.get_models(
        'AzureFilesIdentityBasedAuthentication')
    if enable_files_aadds is not None:
        params.azure_files_identity_based_authentication = AzureFilesIdentityBasedAuthentication(
            directory_service_options='AADDS' if enable_files_aadds else 'None'
        )
    if enable_files_adds is not None:
        ActiveDirectoryProperties = cmd.get_models('ActiveDirectoryProperties')
        if enable_files_adds:  # enable AD
            if not (domain_name and net_bios_domain_name and forest_name
                    and domain_guid and domain_sid and azure_storage_sid):
                raise CLIError(
                    "To enable ActiveDirectoryDomainServicesForFile, user must specify all of: "
                    "--domain-name, --net-bios-domain-name, --forest-name, --domain-guid, --domain-sid and "
                    "--azure_storage_sid arguments in Azure Active Directory Properties Argument group."
                )

            active_directory_properties = ActiveDirectoryProperties(
                domain_name=domain_name,
                net_bios_domain_name=net_bios_domain_name,
                forest_name=forest_name,
                domain_guid=domain_guid,
                domain_sid=domain_sid,
                azure_storage_sid=azure_storage_sid)
            # TODO: Enabling AD will automatically disable AADDS. Maybe we should throw error message

            params.azure_files_identity_based_authentication = AzureFilesIdentityBasedAuthentication(
                directory_service_options='AD',
                active_directory_properties=active_directory_properties)

        else:  # disable AD
            if domain_name or net_bios_domain_name or forest_name or domain_guid or domain_sid or azure_storage_sid:  # pylint: disable=too-many-boolean-expressions
                raise CLIError(
                    "To disable ActiveDirectoryDomainServicesForFile, user can't specify any of: "
                    "--domain-name, --net-bios-domain-name, --forest-name, --domain-guid, --domain-sid and "
                    "--azure_storage_sid arguments in Azure Active Directory Properties Argument group."
                )

            params.azure_files_identity_based_authentication = AzureFilesIdentityBasedAuthentication(
                directory_service_options='None')

    if enable_large_file_share:
        LargeFileSharesState = cmd.get_models('LargeFileSharesState')
        params.large_file_shares_state = LargeFileSharesState("Enabled")

    if NetworkRuleSet and (bypass or default_action):
        if bypass and not default_action:
            raise CLIError(
                'incorrect usage: --default-action ACTION [--bypass SERVICE ...]'
            )
        params.network_rule_set = NetworkRuleSet(bypass=bypass,
                                                 default_action=default_action,
                                                 ip_rules=None,
                                                 virtual_network_rules=None)

    if encryption_key_type_for_table is not None or encryption_key_type_for_queue is not None:
        EncryptionServices = cmd.get_models('EncryptionServices')
        EncryptionService = cmd.get_models('EncryptionService')
        params.encryption = Encryption()
        params.encryption.services = EncryptionServices()
        if encryption_key_type_for_table is not None:
            table_encryption_service = EncryptionService(
                enabled=True, key_type=encryption_key_type_for_table)
            params.encryption.services.table = table_encryption_service
        if encryption_key_type_for_queue is not None:
            queue_encryption_service = EncryptionService(
                enabled=True, key_type=encryption_key_type_for_queue)
            params.encryption.services.queue = queue_encryption_service

    if any([
            routing_choice, publish_microsoft_endpoints,
            publish_internet_endpoints
    ]):
        RoutingPreference = cmd.get_models('RoutingPreference')
        params.routing_preference = RoutingPreference(
            routing_choice=routing_choice,
            publish_microsoft_endpoints=publish_microsoft_endpoints,
            publish_internet_endpoints=publish_internet_endpoints)
    if allow_blob_public_access is not None:
        params.allow_blob_public_access = allow_blob_public_access

    if require_infrastructure_encryption:
        params.encryption.require_infrastructure_encryption = require_infrastructure_encryption

    if min_tls_version:
        params.minimum_tls_version = min_tls_version

    if allow_shared_key_access is not None:
        params.allow_shared_key_access = allow_shared_key_access

    return scf.storage_accounts.begin_create(resource_group_name, account_name,
                                             params)
Ejemplo n.º 3
0
def _get_credentials(cli_ctx,
                     registry_name,
                     resource_group_name,
                     username,
                     password,
                     only_refresh_token,
                     repository=None,
                     artifact_repository=None,
                     permission=None):
    """Try to get AAD authorization tokens or admin user credentials.
    :param str registry_name: The name of container registry
    :param str resource_group_name: The name of resource group
    :param str username: The username used to log into the container registry
    :param str password: The password used to log into the container registry
    :param bool only_refresh_token: Whether to ask for only refresh token, or for both refresh and access tokens
    :param str repository: Repository for which the access token is requested
    :param str artifact_repository: Artifact repository for which the access token is requested
    :param str permission: The requested permission on the repository, '*' or 'pull'
    """
    # 1. if username was specified, verify that password was also specified
    if username:
        # Try to use the pre-defined login server suffix to construct login server from registry name.
        # This is to avoid a management server request if username/password are already provided.
        # In all other cases, including the suffix not defined, login server will be obtained from server.
        login_server_suffix = get_login_server_suffix(cli_ctx)
        if login_server_suffix:
            login_server = '{}{}'.format(registry_name, login_server_suffix)
        else:
            registry, _ = get_registry_by_name(cli_ctx, registry_name,
                                               resource_group_name)
            login_server = registry.login_server

        if not password:
            try:
                password = prompt_pass(msg='Password: '******'Please specify both username and password in non-interactive mode.'
                )

        return login_server, username, password

    registry, resource_group_name = get_registry_by_name(
        cli_ctx, registry_name, resource_group_name)
    login_server = registry.login_server

    # 2. if we don't yet have credentials, attempt to get a refresh token
    if not password and registry.sku.name in MANAGED_REGISTRY_SKU:
        try:
            password = _get_aad_token(cli_ctx, login_server,
                                      only_refresh_token, repository,
                                      artifact_repository, permission)
            return login_server, EMPTY_GUID, password
        except CLIError as e:
            logger.warning(
                "Unable to get AAD authorization tokens with message: %s",
                str(e))

    # 3. if we still don't have credentials, attempt to get the admin credentials (if enabled)
    if not password and registry.admin_user_enabled:
        try:
            cred = cf_acr_registries(cli_ctx).list_credentials(
                resource_group_name, registry_name)
            username = cred.username
            password = cred.passwords[0].value
            return login_server, username, password
        except CLIError as e:
            logger.warning(
                "Unable to get admin user credentials with message: %s",
                str(e))

    # 4. if we still don't have credentials, prompt the user
    if not password:
        try:
            username = prompt('Username: '******'Password: '******'Unable to authenticate using AAD or admin login credentials. '
                +
                'Please specify both username and password in non-interactive mode.'
            )

    return login_server, None, None
Ejemplo n.º 4
0
def retention_validator(ns):
    if ns.backup_retention:
        val = ns.backup_retention
        if not 7 <= val <= 35:
            raise CLIError(
                'incorrect usage: --backup_retention. Range is 7 to 35 days.')
Ejemplo n.º 5
0
def delete_api_key(client, application, resource_group_name, api_key):
    existing_key = list(filter(lambda result: result.name == api_key, client.list(resource_group_name, application)))
    if existing_key != []:
        return client.delete(resource_group_name, application, existing_key[0].id.split('/')[-1])
    raise CLIError('--api-key provided but key not found for deletion.')
Ejemplo n.º 6
0
def set_key(cmd,
            key,
            name=None,
            label=None,
            content_type=None,
            tags=None,
            value=None,
            yes=False,
            connection_string=None):
    connection_string = resolve_connection_string(cmd, name, connection_string)
    azconfig_client = AzconfigClient(connection_string)

    retry_times = 3
    retry_interval = 1

    label = label if label and label != ModifyKeyValueOptions.empty_label else None
    for i in range(0, retry_times):
        try:
            retrieved_kv = azconfig_client.get_keyvalue(
                key, QueryKeyValueOptions(label))
        except HTTPException as exception:
            raise CLIError(str(exception))

        if retrieved_kv is None:
            set_kv = KeyValue(key, value, label, tags, content_type)
        else:
            set_kv = KeyValue(
                key=key,
                label=label,
                value=retrieved_kv.value if value is None else value,
                content_type=retrieved_kv.content_type
                if content_type is None else content_type,
                tags=retrieved_kv.tags if tags is None else tags)
            set_kv.etag = retrieved_kv.etag

        verification_kv = {
            "key": set_kv.key,
            "label": set_kv.label,
            "content_type": set_kv.content_type,
            "value": set_kv.value,
            "tags": set_kv.tags
        }

        entry = json.dumps(verification_kv,
                           indent=2,
                           sort_keys=True,
                           ensure_ascii=False)
        confirmation_message = "Are you sure you want to set the key: \n" + entry + "\n"
        user_confirmation(confirmation_message, yes)

        try:
            return azconfig_client.add_keyvalue(set_kv, ModifyKeyValueOptions(
            )) if set_kv.etag is None else azconfig_client.update_keyvalue(
                set_kv, ModifyKeyValueOptions())
        except HTTPException as exception:
            if exception.status == StatusCodes.PRECONDITION_FAILED:
                logger.debug(
                    'Retrying setting %s times with exception: concurrent setting operations',
                    i + 1)
                time.sleep(retry_interval)
            else:
                raise CLIError(str(exception))
        except Exception as exception:
            raise CLIError(str(exception))
    raise CLIError(
        "Fail to set the key '{}' due to a conflicting operation.".format(key))
Ejemplo n.º 7
0
def set_keyvault(cmd,
                 key,
                 secret_identifier,
                 name=None,
                 label=None,
                 tags=None,
                 yes=False,
                 connection_string=None):
    connection_string = resolve_connection_string(cmd, name, connection_string)
    azconfig_client = AzconfigClient(connection_string)

    keyvault_ref_value = json.dumps({"uri": secret_identifier},
                                    ensure_ascii=False,
                                    separators=(',', ':'))
    retry_times = 3
    retry_interval = 1

    label = label if label and label != ModifyKeyValueOptions.empty_label else None
    for i in range(0, retry_times):
        try:
            retrieved_kv = azconfig_client.get_keyvalue(
                key, QueryKeyValueOptions(label))
        except HTTPException as exception:
            raise CLIError(str(exception))

        if retrieved_kv is None:
            set_kv = KeyValue(key, keyvault_ref_value, label, tags,
                              KeyVaultConstants.KEYVAULT_CONTENT_TYPE)
        else:
            logger.warning(
                "This operation will result in overwriting existing key whose value is: %s",
                retrieved_kv.value)
            set_kv = KeyValue(
                key=key,
                label=label,
                value=keyvault_ref_value,
                content_type=KeyVaultConstants.KEYVAULT_CONTENT_TYPE,
                tags=retrieved_kv.tags if tags is None else tags)
            set_kv.etag = retrieved_kv.etag

        verification_kv = {
            "key": set_kv.key,
            "label": set_kv.label,
            "content_type": set_kv.content_type,
            "value": set_kv.value,
            "tags": set_kv.tags
        }
        entry = json.dumps(verification_kv,
                           indent=2,
                           sort_keys=True,
                           ensure_ascii=False)
        confirmation_message = "Are you sure you want to set the keyvault reference: \n" + entry + "\n"
        user_confirmation(confirmation_message, yes)

        try:
            return azconfig_client.add_keyvalue(set_kv, ModifyKeyValueOptions(
            )) if set_kv.etag is None else azconfig_client.update_keyvalue(
                set_kv, ModifyKeyValueOptions())
        except HTTPException as exception:
            if exception.status == StatusCodes.PRECONDITION_FAILED:
                logger.debug(
                    'Retrying setting %s times with exception: concurrent setting operations',
                    i + 1)
                time.sleep(retry_interval)
            else:
                raise CLIError(str(exception))
        except Exception as exception:
            raise CLIError(str(exception))
    raise CLIError(
        "Failed to set the keyvault reference '{}' due to a conflicting operation."
        .format(key))
Ejemplo n.º 8
0
def validate_streaming_job_start(namespace):
    from knack.util import CLIError
    if namespace.output_start_mode == 'CustomTime' and namespace.output_start_time is None:
        raise CLIError(
            'usage error: --output-start-time is required when --output-start-mode is CustomTime'
        )
def create(
    cmd,
    configuration_file=None,
    product_id=None,
    device_type=None,
    attestation_type=None,
    certificate_path=None,
    connection_string=None,
    endorsement_key=None,
    badge_type=BadgeType.IotDevice.value,
    validation_type=ValidationType.test.value,
    models=None,
    skip_provisioning=False,
    base_url=None,
):
    if attestation_type == AttestationType.x509.value and not certificate_path:
        raise CLIError("If attestation type is x509, certificate path is required")
    if attestation_type == AttestationType.tpm.value and not endorsement_key:
        raise CLIError("If attestation type is TPM, endorsement key is required")
    if badge_type == BadgeType.Pnp.value and not models:
        raise CLIError("If badge type is Pnp, models is required")
    if badge_type == BadgeType.IotEdgeCompatible.value and not all(
        [connection_string, attestation_type == AttestationType.connectionString.value]
    ):
        raise CLIError(
            "Connection string is required for Edge Compatible modules testing"
        )
    if badge_type != BadgeType.IotEdgeCompatible.value and (
        connection_string or attestation_type == AttestationType.connectionString.value
    ):
        raise CLIError(
            "Connection string is only available for Edge Compatible modules testing"
        )
    if validation_type != ValidationType.test.value and not product_id:
        raise CLIError(
            "Product Id is required for validation type {}".format(validation_type)
        )
    if not any(
        [
            configuration_file,
            all([device_type, attestation_type, badge_type]),
        ]
    ):
        raise CLIError(
            "If configuration file is not specified, attestation and device definition parameters must be specified"
        )
    test_configuration = (
        _create_from_file(configuration_file)
        if configuration_file
        else _build_test_configuration(
            product_id=product_id,
            device_type=device_type,
            attestation_type=attestation_type,
            certificate_path=certificate_path,
            endorsement_key=endorsement_key,
            badge_type=badge_type,
            connection_string=connection_string,
            models=models,
            validation_type=validation_type
        )
    )

    ap = AICSProvider(cmd, base_url)

    provisioning = not skip_provisioning
    test_data = ap.create_test(
        test_configuration=test_configuration, provisioning=provisioning
    )

    return test_data
Ejemplo n.º 10
0
def _validate_parameters(repository, image):
    if bool(repository) == bool(image):
        raise CLIError('Usage error: --image IMAGE | --repository REPOSITORY')
Ejemplo n.º 11
0
def _legacy_delete(cmd,
                   registry_name,
                   repository,
                   tag=None,
                   manifest=None,
                   resource_group_name=None,
                   username=None,
                   password=None,
                   yes=False):
    _, resource_group_name = validate_managed_registry(cmd.cli_ctx,
                                                       registry_name,
                                                       resource_group_name,
                                                       DELETE_NOT_SUPPORTED)

    login_server, username, password = get_access_credentials(
        cli_ctx=cmd.cli_ctx,
        registry_name=registry_name,
        resource_group_name=resource_group_name,
        username=username,
        password=password,
        repository=repository,
        permission='*')

    _INVALID = "Please specify either a tag name with --tag or a manifest digest with --manifest."

    # If manifest is not specified
    if manifest is None:
        if not tag:
            user_confirmation(
                "Are you sure you want to delete the repository '{}' "
                "and all images under it?".format(repository), yes)
            path = '/v2/_acr/{}/repository'.format(repository)
        else:
            logger.warning(
                "This command is deprecated. The new command for this operation "
                "is 'az acr repository untag --name %s --image %s:%s'.",
                registry_name, repository, tag)
            user_confirmation(
                "Are you sure you want to delete the tag '{}:{}'?".format(
                    repository, tag), yes)
            path = '/v2/_acr/{}/tags/{}'.format(repository, tag)
    # If --manifest is specified as a flag
    elif not manifest:
        # Raise if --tag is empty
        if not tag:
            raise CLIError(_INVALID)
        logger.warning(
            "This command is deprecated. The new command for this operation "
            "is 'az acr repository delete --name %s --image %s:%s'.",
            registry_name, repository, tag)
        manifest = _delete_manifest_confirmation(login_server=login_server,
                                                 username=username,
                                                 password=password,
                                                 repository=repository,
                                                 tag=tag,
                                                 manifest=manifest,
                                                 yes=yes)
        path = '/v2/{}/manifests/{}'.format(repository, manifest)
    # If --manifest is specified with a value
    else:
        # Raise if --tag is not empty
        if tag:
            raise CLIError(_INVALID)
        logger.warning(
            "This command is deprecated. The new command for this operation "
            "is 'az acr repository delete --name %s --image %s@%s'.",
            registry_name, repository, manifest)
        manifest = _delete_manifest_confirmation(login_server=login_server,
                                                 username=username,
                                                 password=password,
                                                 repository=repository,
                                                 tag=tag,
                                                 manifest=manifest,
                                                 yes=yes)
        path = '/v2/{}/manifests/{}'.format(repository, manifest)

    return request_data_from_registry(http_method='delete',
                                      login_server=login_server,
                                      path=path,
                                      username=username,
                                      password=password)[0]
Ejemplo n.º 12
0
def acr_repository_delete(cmd,
                          registry_name,
                          repository=None,
                          image=None,
                          tag=None,
                          manifest=None,
                          resource_group_name=None,
                          username=None,
                          password=None,
                          yes=False):
    _validate_parameters(repository, image)

    # Check if this is a legacy command. --manifest can be used as a flag so None is checked.
    if repository and (tag or manifest is not None):
        return _legacy_delete(cmd=cmd,
                              registry_name=registry_name,
                              repository=repository,
                              tag=tag,
                              manifest=manifest,
                              resource_group_name=resource_group_name,
                              username=username,
                              password=password,
                              yes=yes)

    # At this point the specified command must not be a legacy command so we process it as a new command.
    # If --tag/--manifest are specified with --repository, it's a legacy command handled above.
    # If --tag/--manifest are specified with --image, error out here.
    if tag:
        raise CLIError(
            "The parameter --tag is redundant and deprecated. Please use --image to delete an image."
        )
    if manifest is not None:
        raise CLIError(
            "The parameter --manifest is redundant and deprecated. Please use --image to delete an image."
        )

    _, resource_group_name = validate_managed_registry(cmd.cli_ctx,
                                                       registry_name,
                                                       resource_group_name,
                                                       DELETE_NOT_SUPPORTED)

    if image:
        # If --image is specified, repository/tag/manifest must be empty.
        repository, tag, manifest = _parse_image_name(image, allow_digest=True)

    login_server, username, password = get_access_credentials(
        cli_ctx=cmd.cli_ctx,
        registry_name=registry_name,
        resource_group_name=resource_group_name,
        username=username,
        password=password,
        repository=repository,
        permission='*')

    if tag or manifest:
        manifest = _delete_manifest_confirmation(login_server=login_server,
                                                 username=username,
                                                 password=password,
                                                 repository=repository,
                                                 tag=tag,
                                                 manifest=manifest,
                                                 yes=yes)
        path = '/v2/{}/manifests/{}'.format(repository, manifest)
    else:
        user_confirmation(
            "Are you sure you want to delete the repository '{}' "
            "and all images under it?".format(repository), yes)
        path = '/v2/_acr/{}/repository'.format(repository)

    return request_data_from_registry(http_method='delete',
                                      login_server=login_server,
                                      path=path,
                                      username=username,
                                      password=password)[0]
Ejemplo n.º 13
0
def storage_blob_download_batch(client,
                                source,
                                destination,
                                source_container_name,
                                pattern=None,
                                dryrun=False,
                                progress_callback=None,
                                max_connections=2):
    def _download_blob(blob_service, container, destination_folder,
                       normalized_blob_name, blob_name):
        # TODO: try catch IO exception
        destination_path = os.path.join(destination_folder,
                                        normalized_blob_name)
        destination_folder = os.path.dirname(destination_path)
        if not os.path.exists(destination_folder):
            mkdir_p(destination_folder)

        blob = blob_service.get_blob_to_path(
            container,
            blob_name,
            destination_path,
            max_connections=max_connections,
            progress_callback=progress_callback)
        return blob.name

    source_blobs = collect_blobs(client, source_container_name, pattern)
    blobs_to_download = {}
    for blob_name in source_blobs:
        # remove starting path seperator and normalize
        normalized_blob_name = normalize_blob_file_path(None, blob_name)
        if normalized_blob_name in blobs_to_download:
            raise CLIError(
                'Multiple blobs with download path: `{}`. As a solution, use the `--pattern` parameter '
                'to select for a subset of blobs to download OR utilize the `storage blob download` '
                'command instead to download individual blobs.'.format(
                    normalized_blob_name))
        blobs_to_download[normalized_blob_name] = blob_name

    if dryrun:
        logger = get_logger(__name__)
        logger.warning('download action: from %s to %s', source, destination)
        logger.warning('    pattern %s', pattern)
        logger.warning('  container %s', source_container_name)
        logger.warning('      total %d', len(source_blobs))
        logger.warning(' operations')
        for b in source_blobs:
            logger.warning('  - %s', b)
        return []

    # Tell progress reporter to reuse the same hook
    if progress_callback:
        progress_callback.reuse = True

    results = []
    for index, blob_normed in enumerate(blobs_to_download):
        # add blob name and number to progress message
        if progress_callback:
            progress_callback.message = '{}/{}: "{}"'.format(
                index + 1, len(blobs_to_download),
                blobs_to_download[blob_normed])
        results.append(
            _download_blob(client, source_container_name, destination,
                           blob_normed, blobs_to_download[blob_normed]))

    # end progress hook
    if progress_callback:
        progress_callback.hook.end()

    return results
Ejemplo n.º 14
0
def query_work_items(wiql=None,
                     id=None,
                     path=None,
                     organization=None,
                     project=None,
                     detect=None):  # pylint: disable=redefined-builtin
    """Query for a list of work items.
    :param wiql: The query in Work Item Query Language format.  Ignored if --id or --path is specified.
    :type wiql: str
    :param id: The UUID of an existing query.  Required unless --path or --wiql are specified.
    :type id: str
    :param path: The path of an existing query.  Ignored if --id is specified.
    :type path: str
    :param organization: Azure Devops organization URL. Example: https://dev.azure.com/MyOrganizationName/
    :type organization: str
    :param project: Name or ID of the team project.
    :type project: str
    :param detect: When 'On' unsupplied arg values will be detected from the current working
                   directory's repo.
    :type detect: str
    :rtype: :class:`<WorkItem> <work-item-tracking.v4_0.models.WorkItem>`
    """
    try:
        if wiql is None and path is None and id is None:
            raise CLIError(
                "Either the --wiql, --id, or --path argument must be specified."
            )
        organization, project = resolve_instance_and_project(
            detect=detect,
            organization=organization,
            project=project,
            project_required=False)
        client = get_work_item_tracking_client(organization)
        if id is None and path is not None:
            if project is None:
                raise CLIError(
                    "The --project argument must be specified for this query.")
            query = client.get_query(project=project, query=path)
            id = query.id
        if id is not None:
            query_result = client.query_by_id(id=id)
        else:
            wiql_object = Wiql()
            wiql_object.query = wiql
            query_result = client.query_by_wiql(wiql=wiql_object)
        if query_result.work_items:
            _last_query_result[
                _LAST_QUERY_RESULT_KEY] = query_result  # store query result for table view
            safety_buffer = 100  # a buffer in the max url length to protect going over the limit
            remaining_url_length = 2048 - safety_buffer
            remaining_url_length -= len(organization)
            # following subtracts relative url, the asof parameter and beginning of id and field parameters.
            # asof value length will vary, but this should be the longest possible
            remaining_url_length -=\
                len('/_apis/wit/workItems?ids=&fields=&asOf=2017-11-07T17%3A05%3A34.06699999999999999Z')
            fields = []
            fields_length_in_url = 0
            if query_result.columns:
                for field_ref in query_result.columns:
                    fields.append(field_ref.reference_name)
                    if fields_length_in_url > 0:
                        fields_length_in_url += 3  # add 3 for %2C delimiter
                    fields_length_in_url += len(
                        uri_quote(field_ref.reference_name))
                    if fields_length_in_url > 800:
                        logger.info(
                            "Not retrieving all fields due to max url length.")
                        break
            remaining_url_length -= fields_length_in_url
            max_work_items = 1000
            work_items_batch_size = 200
            current_batch = []
            work_items = []
            work_item_url_length = 0
            for work_item_ref in query_result.work_items:
                if len(work_items) >= max_work_items:
                    logger.info("Only retrieving the first %s work items.",
                                max_work_items)
                    break
                if work_item_url_length > 0:
                    work_item_url_length += 3  # add 3 for %2C delimiter
                work_item_url_length += len(str(work_item_ref.id))
                current_batch.append(work_item_ref.id)

                if remaining_url_length - work_item_url_length <= 0 or len(
                        current_batch) == work_items_batch_size:
                    # url is near max length, go ahead and send first request for details.
                    # url can go over by an id length because we have a safety buffer
                    current_batched_items = client.get_work_items(
                        ids=current_batch,
                        as_of=query_result.as_of,
                        fields=fields)
                    for work_item in current_batched_items:
                        work_items.append(work_item)
                    current_batch = []
                    work_item_url_length = 0

            if current_batch:
                current_batched_items = client.get_work_items(
                    ids=current_batch, as_of=query_result.as_of, fields=fields)
                for work_item in current_batched_items:
                    work_items.append(work_item)
            # put items in the same order they appeared in the initial query results
            work_items = sorted(work_items,
                                key=_get_sort_key_from_last_query_results)
            return work_items
        return None
    except VstsServiceError as ex:
        raise CLIError(ex)
Ejemplo n.º 15
0
    def load_command_table(self, command_loader):
        """Load a command table into our parser."""
        # If we haven't already added a subparser, we
        # better do it.
        cmd_tbl = command_loader.command_table
        grp_tbl = command_loader.command_group_table
        if not self.subparsers:
            sp = self.add_subparsers(dest='_command_package')
            sp.required = True
            self.subparsers = {(): sp}

        for command_name, metadata in cmd_tbl.items():
            subparser = self._get_subparser(command_name.split(), grp_tbl)
            deprecate_info = metadata.deprecate_info
            if not subparser or (deprecate_info and deprecate_info.expired()):
                continue

            command_verb = command_name.split()[-1]
            # To work around http://bugs.python.org/issue9253, we artificially add any new
            # parsers we add to the "choices" section of the subparser.
            subparser.choices[command_verb] = command_verb

            # inject command_module designer's help formatter -- default is HelpFormatter
            fc = metadata.formatter_class or argparse.HelpFormatter

            command_parser = subparser.add_parser(command_verb,
                                                  description=metadata.description,
                                                  parents=self.parents,
                                                  conflict_handler='error',
                                                  help_file=metadata.help,
                                                  formatter_class=fc,
                                                  cli_help=self.cli_help,
                                                  _command_source=metadata.command_source)
            self.subparser_map[command_name] = command_parser
            command_parser.cli_ctx = self.cli_ctx
            command_validator = metadata.validator
            argument_validators = []
            argument_groups = {}
            for _, arg in metadata.arguments.items():
                # don't add deprecated arguments to the parser
                deprecate_info = arg.type.settings.get('deprecate_info', None)
                if deprecate_info and deprecate_info.expired():
                    continue

                if arg.validator:
                    argument_validators.append(arg.validator)
                try:
                    if arg.arg_group:
                        try:
                            group = argument_groups[arg.arg_group]
                        except KeyError:
                            # group not found so create
                            group_name = '{} Arguments'.format(arg.arg_group)
                            group = command_parser.add_argument_group(arg.arg_group, group_name)
                            argument_groups[arg.arg_group] = group
                        param = AzCliCommandParser._add_argument(group, arg)
                    else:
                        param = AzCliCommandParser._add_argument(command_parser, arg)
                except argparse.ArgumentError as ex:
                    raise CLIError("command authoring error for '{}': '{}' {}".format(
                        command_name, ex.args[0].dest, ex.message))  # pylint: disable=no-member
                param.completer = arg.completer
                param.deprecate_info = arg.deprecate_info
                param.preview_info = arg.preview_info
                param.experimental_info = arg.experimental_info
            command_parser.set_defaults(
                func=metadata,
                command=command_name,
                _cmd=metadata,
                _command_validator=command_validator,
                _argument_validators=argument_validators,
                _parser=command_parser)
def update(
    cmd,
    test_id,
    configuration_file=None,
    attestation_type=None,
    certificate_path=None,
    connection_string=None,
    endorsement_key=None,
    badge_type=None,
    models=None,
    base_url=None,
):
    provisioning = False
    # verify required parameters for various options
    if attestation_type == AttestationType.x509.value and not certificate_path:
        raise CLIError("If attestation type is x509, certificate path is required")
    if attestation_type == AttestationType.tpm.value and not endorsement_key:
        raise CLIError("If attestation type is tpm, endorsement key is required")
    if badge_type == BadgeType.Pnp.value and not models:
        raise CLIError("If badge type is Pnp, models is required")
    if badge_type == BadgeType.IotEdgeCompatible.value and not all(
        [connection_string, attestation_type == AttestationType.connectionString.value]
    ):
        raise CLIError(
            "Connection string is required for Edge Compatible modules testing"
        )
    if badge_type != BadgeType.IotEdgeCompatible.value and (
        connection_string or attestation_type == AttestationType.connectionString.value
    ):
        raise CLIError(
            "Connection string is only available for Edge Compatible modules testing"
        )
    ap = AICSProvider(cmd, base_url)
    if configuration_file:
        test_configuration = _create_from_file(configuration_file)
        return ap.update_test(
            test_id=test_id,
            test_configuration=test_configuration,
            provisioning=provisioning,
        )

    if not any([attestation_type, badge_type, models]):
        raise CLIError(
            "Configuration file, attestation information, or device configuration must be specified"
        )

    test_configuration = ap.show_test(test_id=test_id)

    provisioning_configuration = test_configuration["provisioningConfiguration"]
    registration_id = provisioning_configuration["dpsRegistrationId"]

    # change attestation
    if attestation_type:
        # reset the provisioningConfiguration
        test_configuration["provisioningConfiguration"] = {
            "type": attestation_type,
            "dpsRegistrationId": registration_id,
        }
        provisioning = True
        if attestation_type == AttestationType.symmetricKey.value:
            test_configuration["provisioningConfiguration"][
                "symmetricKeyEnrollmentInformation"
            ] = {}
        elif attestation_type == AttestationType.tpm.value:
            test_configuration["provisioningConfiguration"][
                "tpmEnrollmentInformation"
            ] = {"endorsementKey": endorsement_key}
        elif attestation_type == AttestationType.x509.value:
            test_configuration["provisioningConfiguration"][
                "x509EnrollmentInformation"
            ] = {
                "base64EncodedX509Certificate": _read_certificate_from_file(
                    certificate_path
                )
            }
        elif attestation_type == AttestationType.connectionString.value:
            test_configuration["provisioningConfiguration"][
                "deviceConnectionString"
            ] = connection_string

    # reset PnP models
    badge_config = test_configuration["certificationBadgeConfigurations"]

    if (
        badge_type == BadgeType.Pnp.value
        or badge_config[0]["type"].lower() == BadgeType.Pnp.value.lower()
    ) and models:
        models_array = _process_models_directory(models)
        test_configuration["certificationBadgeConfigurations"] = [
            {"type": BadgeType.Pnp.value, "digitalTwinModelDefinitions": models_array}
        ]
    elif badge_type:
        test_configuration["certificationBadgeConfigurations"] = [{"type": badge_type}]

    return ap.update_test(
        test_id=test_id,
        test_configuration=test_configuration,
        provisioning=provisioning,
    )
Ejemplo n.º 17
0
def export_config(
        cmd,
        destination,
        name=None,
        connection_string=None,
        label=None,
        key=None,
        prefix="",  # prefix to remove
        yes=False,
        skip_features=False,
        # to-file parameters
        path=None,
        format_=None,
        separator=None,
        naming_convention='pascal',
        # to-config-store parameters
        dest_name=None,
        dest_connection_string=None,
        dest_label=None,
        preserve_labels=False,
        # to-app-service parameters
        appservice_account=None):
    src_features = []
    dest_features = []
    dest_kvs = []
    destination = destination.lower()
    format_ = format_.lower() if format_ else None
    naming_convention = naming_convention.lower()

    if destination == 'appconfig':
        if dest_label is not None and preserve_labels:
            raise CLIError(
                "Export failed! Please provide only one of these arguments: '--dest-label' or '--preserve-labels'. See 'az appconfig kv export -h' for examples."
            )
        if preserve_labels:
            # We need dest_label to be the same as label for preview later.
            # This will have no effect on label while writing to config store
            # as we check preserve_labels again before labelling KVs.
            dest_label = label

    # fetch key values from user's configstore
    src_kvs = __read_kv_from_config_store(cmd,
                                          name=name,
                                          connection_string=connection_string,
                                          key=key,
                                          label=label,
                                          prefix_to_remove=prefix)

    # We need to separate KV from feature flags
    __discard_features_from_retrieved_kv(src_kvs)

    if not skip_features:
        # Get all Feature flags with matching label
        if destination == 'file':
            if format_ == 'properties':
                skip_features = True
            else:
                # src_features is a list of FeatureFlag objects
                src_features = list_feature(
                    cmd,
                    feature='*',
                    label=QueryKeyValueCollectionOptions.empty_label
                    if label is None else label,
                    name=name,
                    connection_string=connection_string,
                    all_=True)
        elif destination == 'appconfig':
            # src_features is a list of FeatureFlag objects
            src_features = list_feature(
                cmd,
                feature='*',
                label=QueryKeyValueCollectionOptions.empty_label
                if label is None else label,
                name=name,
                connection_string=connection_string,
                all_=True)

    # if customer needs preview & confirmation
    if not yes:
        if destination == 'appconfig':
            # dest_kvs contains features and KV that match the label
            dest_kvs = __read_kv_from_config_store(
                cmd,
                name=dest_name,
                connection_string=dest_connection_string,
                key=None,
                label=dest_label)
            __discard_features_from_retrieved_kv(dest_kvs)

            if not skip_features:
                # Append all features to dest_features list
                dest_features = list_feature(
                    cmd,
                    feature='*',
                    label=QueryKeyValueCollectionOptions.empty_label
                    if dest_label is None else dest_label,
                    name=dest_name,
                    connection_string=dest_connection_string,
                    all_=True)

        elif destination == 'appservice':
            dest_kvs = __read_kv_from_app_service(
                cmd, appservice_account=appservice_account)

        # generate preview and wait for user confirmation
        need_kv_change = __print_preview(
            old_json=__serialize_kv_list_to_comparable_json_object(
                keyvalues=dest_kvs, level=destination),
            new_json=__serialize_kv_list_to_comparable_json_object(
                keyvalues=src_kvs, level=destination))

        need_feature_change = False
        if src_features:
            need_feature_change = __print_features_preview(
                old_json=__serialize_feature_list_to_comparable_json_object(
                    features=dest_features),
                new_json=__serialize_feature_list_to_comparable_json_object(
                    features=src_features))

        if not need_kv_change and not need_feature_change:
            return

        user_confirmation("Do you want to continue? \n")

    # export to destination
    if destination == 'file':
        __write_kv_and_features_to_file(file_path=path,
                                        key_values=src_kvs,
                                        features=src_features,
                                        format_=format_,
                                        separator=separator,
                                        skip_features=skip_features,
                                        naming_convention=naming_convention)
    elif destination == 'appconfig':
        __write_kv_and_features_to_config_store(
            cmd,
            key_values=src_kvs,
            features=src_features,
            name=dest_name,
            connection_string=dest_connection_string,
            label=dest_label,
            preserve_labels=preserve_labels)
    elif destination == 'appservice':
        __write_kv_to_app_service(cmd,
                                  key_values=src_kvs,
                                  appservice_account=appservice_account)
Ejemplo n.º 18
0
def process_ts_create_or_update_namespace(namespace):
    from azure.cli.core.commands.validators import validate_tags
    validate_tags(namespace)
    if namespace.template_file and not os.path.isfile(namespace.template_file):
        raise CLIError('Please enter a valid file path')
Ejemplo n.º 19
0
def import_config(
        cmd,
        source,
        name=None,
        connection_string=None,
        label=None,
        prefix="",  # prefix to add
        yes=False,
        skip_features=False,
        # from-file parameters
        path=None,
        format_=None,
        separator=None,
        depth=None,
        # from-configstore parameters
        src_name=None,
        src_connection_string=None,
        src_key=None,
        src_label=None,
        preserve_labels=False,
        # from-appservice parameters
        appservice_account=None):
    src_features = []
    dest_features = []
    dest_kvs = []
    source = source.lower()
    format_ = format_.lower() if format_ else None

    # fetch key values from source
    if source == 'file':
        src_kvs = __read_kv_from_file(file_path=path,
                                      format_=format_,
                                      separator=separator,
                                      prefix_to_add=prefix,
                                      depth=depth)

        if not skip_features:
            # src_features is a list of KeyValue objects
            src_features = __read_features_from_file(file_path=path,
                                                     format_=format_)

    elif source == 'appconfig':
        if label is not None and preserve_labels:
            raise CLIError(
                "Import failed! Please provide only one of these arguments: '--label' or '--preserve-labels'. See 'az appconfig kv import -h' for examples."
            )
        if preserve_labels:
            # We need label to be the same as src_label for preview later.
            # This will have no effect on label while writing to config store
            # as we check preserve_labels again before labelling KVs.
            label = src_label

        src_kvs = __read_kv_from_config_store(
            cmd,
            name=src_name,
            connection_string=src_connection_string,
            key=src_key,
            label=src_label,
            prefix_to_add=prefix)
        # We need to separate KV from feature flags
        __discard_features_from_retrieved_kv(src_kvs)

        if not skip_features:
            # Get all Feature flags with matching label
            all_features = __read_kv_from_config_store(
                cmd,
                name=src_name,
                connection_string=src_connection_string,
                key=FeatureFlagConstants.FEATURE_FLAG_PREFIX + '*',
                label=src_label)
            for feature in all_features:
                if feature.content_type == FeatureFlagConstants.FEATURE_FLAG_CONTENT_TYPE:
                    src_features.append(feature)

    elif source == 'appservice':
        src_kvs = __read_kv_from_app_service(
            cmd, appservice_account=appservice_account, prefix_to_add=prefix)

    # if customer needs preview & confirmation
    if not yes:
        # fetch key values from user's configstore
        dest_kvs = __read_kv_from_config_store(
            cmd,
            name=name,
            connection_string=connection_string,
            key=None,
            label=label)
        __discard_features_from_retrieved_kv(dest_kvs)

        # generate preview and wait for user confirmation
        need_kv_change = __print_preview(
            old_json=__serialize_kv_list_to_comparable_json_object(
                keyvalues=dest_kvs, level=source),
            new_json=__serialize_kv_list_to_comparable_json_object(
                keyvalues=src_kvs, level=source))

        need_feature_change = False
        if src_features and not skip_features:
            # Append all features to dest_features list
            all_features = __read_kv_from_config_store(
                cmd,
                name=name,
                connection_string=connection_string,
                key=FeatureFlagConstants.FEATURE_FLAG_PREFIX + '*',
                label=label)
            for feature in all_features:
                if feature.content_type == FeatureFlagConstants.FEATURE_FLAG_CONTENT_TYPE:
                    dest_features.append(feature)

            need_feature_change = __print_features_preview(
                old_json=
                __serialize_features_from_kv_list_to_comparable_json_object(
                    keyvalues=dest_features),
                new_json=
                __serialize_features_from_kv_list_to_comparable_json_object(
                    keyvalues=src_features))

        if not need_kv_change and not need_feature_change:
            return

        user_confirmation("Do you want to continue? \n")

    # append all feature flags to src_kvs list
    src_kvs.extend(src_features)

    # import into configstore
    __write_kv_and_features_to_config_store(
        cmd,
        key_values=src_kvs,
        name=name,
        connection_string=connection_string,
        label=label,
        preserve_labels=preserve_labels)
Ejemplo n.º 20
0
def _validate_template_spec_out(namespace):
    _validate_template_spec(namespace)
    if namespace.output_folder and not os.path.isdir(namespace.output_folder):
        raise CLIError('Please enter a valid output folder')
Ejemplo n.º 21
0
def restore_key(cmd,
                datetime,
                key=None,
                name=None,
                label=None,
                connection_string=None,
                yes=False):

    connection_string = resolve_connection_string(cmd, name, connection_string)
    azconfig_client = AzconfigClient(connection_string)

    if label == '':
        label = QueryKeyValueCollectionOptions.empty_label

    query_option_then = QueryKeyValueCollectionOptions(key_filter=key,
                                                       label_filter=label,
                                                       query_datetime=datetime)
    query_option_now = QueryKeyValueCollectionOptions(key_filter=key,
                                                      label_filter=label)

    try:
        restore_keyvalues = azconfig_client.get_keyvalues(query_option_then)
        current_keyvalues = azconfig_client.get_keyvalues(query_option_now)
        kvs_to_restore, kvs_to_modify, kvs_to_delete = __compare_kvs_for_restore(
            restore_keyvalues, current_keyvalues)

        if not yes:
            need_change = __print_restore_preview(kvs_to_restore,
                                                  kvs_to_modify, kvs_to_delete)
            if need_change is False:
                logger.debug(
                    'Canceling the restore operation based on user selection.')
                return

        keys_to_restore = len(kvs_to_restore) + len(kvs_to_modify) + len(
            kvs_to_delete)
        restored_so_far = 0

        for kv in chain(kvs_to_restore, kvs_to_modify):
            try:
                azconfig_client.set_keyvalue(kv, ModifyKeyValueOptions())
                restored_so_far += 1
            except HTTPException as exception:
                logger.error('Error while setting the keyvalue:%s', kv)
                logger.error('Failed after restoring %d out of %d keys',
                             restored_so_far, keys_to_restore)
                raise CLIError(str(exception))
        for kv in kvs_to_delete:
            try:
                azconfig_client.delete_keyvalue(kv, ModifyKeyValueOptions())
                restored_so_far += 1
            except HTTPException as exception:
                logger.error('Error while setting the keyvalue:%s', kv)
                logger.error('Failed after restoring %d out of %d keys',
                             restored_so_far, keys_to_restore)
                raise CLIError(str(exception))

        logger.debug('Successfully restored %d out of %d keys',
                     restored_so_far, keys_to_restore)
    except Exception as exception:
        raise CLIError(str(exception))
def update_extension_index(extensions):
    import re
    import tempfile

    from .util import get_ext_metadata, get_whl_from_url

    ext_repos = get_ext_repo_paths()
    index_path = next((x for x in find_files(ext_repos, 'index.json')
                       if 'azure-cli-extensions' in x), None)
    if not index_path:
        raise CLIError(
            "Unable to find 'index.json' in your extension repos. Have "
            "you cloned 'azure-cli-extensions' and added it to you repo "
            "sources with `azdev extension repo add`?")

    NAME_REGEX = r'.*/([^/]*)-\d+.\d+.\d+'

    for extension in extensions:
        # Get extension WHL from URL
        if not extension.endswith('.whl') or not extension.startswith(
                'https:'):
            raise ValueError(
                'usage error: only URL to a WHL file currently supported.')

        # TODO: extend to consider other options
        ext_path = extension

        # Extract the extension name
        try:
            extension_name = re.findall(NAME_REGEX, ext_path)[0]
            extension_name = extension_name.replace('_', '-')
        except IndexError:
            raise ValueError('unable to parse extension name')

        # TODO: Update this!
        extensions_dir = tempfile.mkdtemp()
        ext_dir = tempfile.mkdtemp(dir=extensions_dir)
        whl_cache_dir = tempfile.mkdtemp()
        whl_cache = {}
        ext_file = get_whl_from_url(ext_path, extension_name, whl_cache_dir,
                                    whl_cache)

        with open(index_path, 'r') as infile:
            curr_index = json.loads(infile.read())

        entry = {
            'downloadUrl': ext_path,
            'sha256Digest': _get_sha256sum(ext_file),
            'filename': ext_path.split('/')[-1],
            'metadata': get_ext_metadata(ext_dir, ext_file, extension_name)
        }

        if extension_name not in curr_index['extensions'].keys():
            logger.info("Adding '%s' to index...", extension_name)
            curr_index['extensions'][extension_name] = [entry]
        else:
            logger.info("Updating '%s' in index...", extension_name)
            curr_index['extensions'][extension_name].append(entry)

        # update index and write back to file
        with open(os.path.join(index_path), 'w') as outfile:
            outfile.write(json.dumps(curr_index, indent=4, sort_keys=True))
Ejemplo n.º 23
0
def acr_task_create(
        cmd,  # pylint: disable=too-many-locals
        client,
        task_name,
        registry_name,
        context_path,
        file=None,
        cmd_value=None,
        git_access_token=None,
        image_names=None,
        status='Enabled',
        platform=None,
        cpu=DEFAULT_CPU,
        timeout=DEFAULT_TIMEOUT_IN_SEC,
        values=None,
        source_trigger_name='defaultSourceTriggerName',
        commit_trigger_enabled=True,
        pull_request_trigger_enabled=True,
        schedule=None,
        branch='master',
        no_push=False,
        no_cache=False,
        arg=None,
        secret_arg=None,
        set_value=None,
        set_secret=None,
        base_image_trigger_name='defaultBaseimageTriggerName',
        base_image_trigger_enabled=True,
        base_image_trigger_type='Runtime',
        update_trigger_endpoint=None,
        update_trigger_payload_type='Default',
        resource_group_name=None,
        assign_identity=None,
        target=None,
        auth_mode=None):

    registry, resource_group_name = get_registry_by_name(
        cmd.cli_ctx, registry_name, resource_group_name)

    if context_path.lower() == NULL_CONTEXT:
        context_path = None
        commit_trigger_enabled = False
        pull_request_trigger_enabled = False

    if (commit_trigger_enabled
            or pull_request_trigger_enabled) and not git_access_token:
        raise CLIError(
            "If source control trigger is enabled [--commit-trigger-enabled] or "
            "[--pull-request-trigger-enabled] --git-access-token must be provided."
        )

    if cmd_value and file:
        raise CLIError("Task can be created with either "
                       "--cmd myCommand -c /dev/null or "
                       "-f myFile -c myContext, but not both.")

    step = create_task_step(context_path=context_path,
                            cmd=cmd,
                            file=file,
                            image_names=image_names,
                            values=values,
                            git_access_token=git_access_token,
                            set_value=set_value,
                            set_secret=set_secret,
                            no_push=no_push,
                            no_cache=no_cache,
                            arg=arg,
                            secret_arg=secret_arg,
                            target=target,
                            cmd_value=cmd_value,
                            timeout=timeout)

    SourceControlType = cmd.get_models('SourceControlType')
    source_control_type = SourceControlType.visual_studio_team_service.value
    if context_path is not None and 'GITHUB.COM' in context_path.upper():
        source_control_type = SourceControlType.github.value

    source_triggers = None
    source_trigger_events = _get_trigger_event_list_put(
        cmd, commit_trigger_enabled, pull_request_trigger_enabled)
    # if source_trigger_events contains any event types we assume they are enabled
    if source_trigger_events:
        SourceTrigger, SourceProperties, AuthInfo, TriggerStatus = cmd.get_models(
            'SourceTrigger', 'SourceProperties', 'AuthInfo', 'TriggerStatus')
        source_triggers = [
            SourceTrigger(source_repository=SourceProperties(
                source_control_type=source_control_type,
                repository_url=context_path,
                branch=branch,
                source_control_auth_properties=AuthInfo(
                    token=git_access_token,
                    token_type=DEFAULT_TOKEN_TYPE,
                    scope='repo')),
                          source_trigger_events=source_trigger_events,
                          status=TriggerStatus.enabled.value,
                          name=source_trigger_name)
        ]

    timer_triggers = None
    if schedule:
        timer_triggers = build_timers_info(cmd, schedule)

    base_image_trigger = None
    if base_image_trigger_enabled:
        BaseImageTrigger, TriggerStatus = cmd.get_models(
            'BaseImageTrigger', 'TriggerStatus')
        base_image_trigger = BaseImageTrigger(
            base_image_trigger_type=base_image_trigger_type,
            status=TriggerStatus.enabled.value
            if base_image_trigger_enabled else TriggerStatus.disabled.value,
            name=base_image_trigger_name,
            update_trigger_endpoint=update_trigger_endpoint,
            update_trigger_payload_type=update_trigger_payload_type)

    platform_os, platform_arch, platform_variant = get_validate_platform(
        cmd, platform)

    Task, PlatformProperties, AgentProperties, TriggerProperties = cmd.get_models(
        'Task', 'PlatformProperties', 'AgentProperties', 'TriggerProperties')

    identity = None
    if assign_identity is not None:
        identity = _build_identities_info(cmd, assign_identity)

    task_create_parameters = Task(
        identity=identity,
        location=registry.location,
        step=step,
        platform=PlatformProperties(os=platform_os,
                                    architecture=platform_arch,
                                    variant=platform_variant),
        status=status,
        timeout=timeout,
        agent_configuration=AgentProperties(cpu=cpu),
        trigger=TriggerProperties(source_triggers=source_triggers,
                                  timer_triggers=timer_triggers,
                                  base_image_trigger=base_image_trigger),
        credentials=get_custom_registry_credentials(cmd=cmd,
                                                    auth_mode=auth_mode))

    try:
        return client.create(resource_group_name=resource_group_name,
                             registry_name=registry_name,
                             task_name=task_name,
                             task_create_parameters=task_create_parameters)
    except ValidationError as e:
        raise CLIError(e)
Ejemplo n.º 24
0
def update_storage_account(cmd,
                           instance,
                           sku=None,
                           tags=None,
                           custom_domain=None,
                           use_subdomain=None,
                           encryption_services=None,
                           encryption_key_source=None,
                           encryption_key_vault_properties=None,
                           access_tier=None,
                           https_only=None,
                           enable_files_aadds=None,
                           assign_identity=False,
                           bypass=None,
                           default_action=None):
    StorageAccountUpdateParameters, Sku, CustomDomain, AccessTier, Identity, Encryption, NetworkRuleSet = \
        cmd.get_models('StorageAccountUpdateParameters', 'Sku', 'CustomDomain', 'AccessTier', 'Identity',
                       'Encryption', 'NetworkRuleSet')

    domain = instance.custom_domain
    if custom_domain is not None:
        domain = CustomDomain(name=custom_domain)
        if use_subdomain is not None:
            domain.use_sub_domain_name = use_subdomain == 'true'

    encryption = instance.encryption
    if not encryption and any((encryption_services, encryption_key_source,
                               encryption_key_vault_properties)):
        encryption = Encryption()
    if encryption_services:
        encryption.services = encryption_services
    if encryption_key_source:
        encryption.key_source = encryption_key_source
    if encryption_key_vault_properties:
        if encryption.key_source != 'Microsoft.Keyvault':
            raise ValueError(
                'Specify `--encryption-key-source=Microsoft.Keyvault` to configure key vault properties.'
            )
        encryption.key_vault_properties = encryption_key_vault_properties

    params = StorageAccountUpdateParameters(
        sku=Sku(name=sku) if sku is not None else instance.sku,
        tags=tags if tags is not None else instance.tags,
        custom_domain=domain,
        encryption=encryption,
        access_tier=AccessTier(access_tier)
        if access_tier is not None else instance.access_tier,
        enable_https_traffic_only=https_only
        if https_only is not None else instance.enable_https_traffic_only)
    if enable_files_aadds is not None:
        AzureFilesIdentityBasedAuthentication = cmd.get_models(
            'AzureFilesIdentityBasedAuthentication')
        params.azure_files_identity_based_authentication = AzureFilesIdentityBasedAuthentication(
            directory_service_options='AADDS' if enable_files_aadds else 'None'
        )
    if assign_identity:
        params.identity = Identity()

    if NetworkRuleSet:
        acl = instance.network_rule_set
        if acl:
            if bypass:
                acl.bypass = bypass
            if default_action:
                acl.default_action = default_action
        elif default_action:
            acl = NetworkRuleSet(bypass=bypass,
                                 virtual_network_rules=None,
                                 ip_rules=None,
                                 default_action=default_action)
        elif bypass:
            from knack.util import CLIError
            raise CLIError(
                'incorrect usage: --default-action ACTION [--bypass SERVICE ...]'
            )
        params.network_rule_set = acl

    return params
Ejemplo n.º 25
0
def update_storage_account(cmd,
                           instance,
                           sku=None,
                           tags=None,
                           custom_domain=None,
                           use_subdomain=None,
                           encryption_services=None,
                           encryption_key_source=None,
                           encryption_key_version=None,
                           encryption_key_name=None,
                           encryption_key_vault=None,
                           access_tier=None,
                           https_only=None,
                           enable_files_aadds=None,
                           assign_identity=False,
                           bypass=None,
                           default_action=None,
                           enable_large_file_share=None,
                           enable_files_adds=None,
                           domain_name=None,
                           net_bios_domain_name=None,
                           forest_name=None,
                           domain_guid=None,
                           domain_sid=None,
                           azure_storage_sid=None,
                           routing_choice=None,
                           publish_microsoft_endpoints=None,
                           publish_internet_endpoints=None,
                           allow_blob_public_access=None,
                           min_tls_version=None,
                           allow_shared_key_access=None):
    StorageAccountUpdateParameters, Sku, CustomDomain, AccessTier, Identity, Encryption, NetworkRuleSet = \
        cmd.get_models('StorageAccountUpdateParameters', 'Sku', 'CustomDomain', 'AccessTier', 'Identity', 'Encryption',
                       'NetworkRuleSet')

    domain = instance.custom_domain
    if custom_domain is not None:
        domain = CustomDomain(name=custom_domain)
        if use_subdomain is not None:
            domain.use_sub_domain_name = use_subdomain == 'true'

    encryption = instance.encryption
    if not encryption and any(
        (encryption_services, encryption_key_source, encryption_key_name,
         encryption_key_vault, encryption_key_version is not None)):
        encryption = Encryption()
    if encryption_services:
        encryption.services = encryption_services

    if encryption_key_source:
        encryption.key_source = encryption_key_source

    KeySource = cmd.get_models('KeySource')
    if encryption.key_source == KeySource.microsoft_keyvault:
        if encryption.key_vault_properties is None:
            KeyVaultProperties = cmd.get_models('KeyVaultProperties')
            encryption.key_vault_properties = KeyVaultProperties()
    else:
        if any([
                encryption_key_name, encryption_key_vault,
                encryption_key_version
        ]):
            raise ValueError(
                'Specify `--encryption-key-source=Microsoft.Keyvault` to configure key vault properties.'
            )
        if encryption.key_vault_properties is not None:
            encryption.key_vault_properties = None

    if encryption_key_name:
        encryption.key_vault_properties.key_name = encryption_key_name
    if encryption_key_vault:
        encryption.key_vault_properties.key_vault_uri = encryption_key_vault
    if encryption_key_version is not None:
        encryption.key_vault_properties.key_version = encryption_key_version

    params = StorageAccountUpdateParameters(
        sku=Sku(name=sku) if sku is not None else instance.sku,
        tags=tags if tags is not None else instance.tags,
        custom_domain=domain,
        encryption=encryption,
        access_tier=AccessTier(access_tier)
        if access_tier is not None else instance.access_tier,
        enable_https_traffic_only=https_only
        if https_only is not None else instance.enable_https_traffic_only)
    AzureFilesIdentityBasedAuthentication = cmd.get_models(
        'AzureFilesIdentityBasedAuthentication')
    if enable_files_aadds is not None:
        if enable_files_aadds:  # enable AADDS
            origin_storage_account = get_storage_account_properties(
                cmd.cli_ctx, instance.id)
            if origin_storage_account.azure_files_identity_based_authentication and \
                    origin_storage_account.azure_files_identity_based_authentication.directory_service_options == 'AD':
                raise CLIError(
                    "The Storage account already enabled ActiveDirectoryDomainServicesForFile, "
                    "please disable it by running this cmdlets with \"--enable-files-adds false\" "
                    "before enable AzureActiveDirectoryDomainServicesForFile.")
            params.azure_files_identity_based_authentication = AzureFilesIdentityBasedAuthentication(
                directory_service_options='AADDS'
                if enable_files_aadds else 'None')
        else:  # Only disable AADDS and keep others unchanged
            origin_storage_account = get_storage_account_properties(
                cmd.cli_ctx, instance.id)
            if not origin_storage_account.azure_files_identity_based_authentication or \
                    origin_storage_account.azure_files_identity_based_authentication.directory_service_options\
                    == 'AADDS':
                params.azure_files_identity_based_authentication = AzureFilesIdentityBasedAuthentication(
                    directory_service_options='None')
            else:
                params.azure_files_identity_based_authentication = \
                    origin_storage_account.azure_files_identity_based_authentication

    if enable_files_adds is not None:
        ActiveDirectoryProperties = cmd.get_models('ActiveDirectoryProperties')
        if enable_files_adds:  # enable AD
            if not (domain_name and net_bios_domain_name and forest_name
                    and domain_guid and domain_sid and azure_storage_sid):
                raise CLIError(
                    "To enable ActiveDirectoryDomainServicesForFile, user must specify all of: "
                    "--domain-name, --net-bios-domain-name, --forest-name, --domain-guid, --domain-sid and "
                    "--azure_storage_sid arguments in Azure Active Directory Properties Argument group."
                )
            origin_storage_account = get_storage_account_properties(
                cmd.cli_ctx, instance.id)
            if origin_storage_account.azure_files_identity_based_authentication and \
                    origin_storage_account.azure_files_identity_based_authentication.directory_service_options \
                    == 'AADDS':
                raise CLIError(
                    "The Storage account already enabled AzureActiveDirectoryDomainServicesForFile, "
                    "please disable it by running this cmdlets with \"--enable-files-aadds false\" "
                    "before enable ActiveDirectoryDomainServicesForFile.")
            active_directory_properties = ActiveDirectoryProperties(
                domain_name=domain_name,
                net_bios_domain_name=net_bios_domain_name,
                forest_name=forest_name,
                domain_guid=domain_guid,
                domain_sid=domain_sid,
                azure_storage_sid=azure_storage_sid)
            # TODO: Enabling AD will automatically disable AADDS. Maybe we should throw error message

            params.azure_files_identity_based_authentication = AzureFilesIdentityBasedAuthentication(
                directory_service_options='AD',
                active_directory_properties=active_directory_properties)

        else:  # disable AD
            if domain_name or net_bios_domain_name or forest_name or domain_guid or domain_sid or azure_storage_sid:
                raise CLIError(
                    "To disable ActiveDirectoryDomainServicesForFile, user can't specify any of: "
                    "--domain-name, --net-bios-domain-name, --forest-name, --domain-guid, --domain-sid and "
                    "--azure_storage_sid arguments in Azure Active Directory Properties Argument group."
                )
            # Only disable AD and keep others unchanged
            origin_storage_account = get_storage_account_properties(
                cmd.cli_ctx, instance.id)
            if not origin_storage_account.azure_files_identity_based_authentication or \
                    origin_storage_account.azure_files_identity_based_authentication.directory_service_options == 'AD':
                params.azure_files_identity_based_authentication = AzureFilesIdentityBasedAuthentication(
                    directory_service_options='None')
            else:
                params.azure_files_identity_based_authentication = \
                    origin_storage_account.azure_files_identity_based_authentication

    if assign_identity:
        params.identity = Identity(type='SystemAssigned')
    if enable_large_file_share:
        LargeFileSharesState = cmd.get_models('LargeFileSharesState')
        params.large_file_shares_state = LargeFileSharesState("Enabled")
    if NetworkRuleSet:
        acl = instance.network_rule_set
        if acl:
            if bypass:
                acl.bypass = bypass
            if default_action:
                acl.default_action = default_action
        elif default_action:
            acl = NetworkRuleSet(bypass=bypass,
                                 virtual_network_rules=None,
                                 ip_rules=None,
                                 default_action=default_action)
        elif bypass:
            raise CLIError(
                'incorrect usage: --default-action ACTION [--bypass SERVICE ...]'
            )
        params.network_rule_set = acl

    if hasattr(params, 'routing_preference') and any([
            routing_choice, publish_microsoft_endpoints,
            publish_internet_endpoints
    ]):
        if params.routing_preference is None:
            RoutingPreference = cmd.get_models('RoutingPreference')
            params.routing_preference = RoutingPreference()
        if routing_choice is not None:
            params.routing_preference.routing_choice = routing_choice
        if publish_microsoft_endpoints is not None:
            params.routing_preference.publish_microsoft_endpoints = publish_microsoft_endpoints
        if publish_internet_endpoints is not None:
            params.routing_preference.publish_internet_endpoints = publish_internet_endpoints

    if allow_blob_public_access is not None:
        params.allow_blob_public_access = allow_blob_public_access
    if min_tls_version:
        params.minimum_tls_version = min_tls_version

    if allow_shared_key_access is not None:
        params.allow_shared_key_access = allow_shared_key_access

    return params
Ejemplo n.º 26
0
def cli_cosmosdb_update(client,
                        resource_group_name,
                        account_name,
                        locations=None,
                        tags=None,
                        default_consistency_level=None,
                        max_staleness_prefix=None,
                        max_interval=None,
                        ip_range_filter=None,
                        enable_automatic_failover=None,
                        capabilities=None,
                        enable_virtual_network=None,
                        virtual_network_rules=None,
                        enable_multiple_write_locations=None,
                        disable_key_based_metadata_write_access=None,
                        enable_public_network=None,
                        enable_analytical_storage=None,
                        backup_interval=None,
                        backup_retention=None):
    """Update an existing Azure Cosmos DB database account. """
    existing = client.get(resource_group_name, account_name)

    update_consistency_policy = False
    if max_interval is not None or \
            max_staleness_prefix is not None or \
            default_consistency_level is not None:
        update_consistency_policy = True

    if max_staleness_prefix is None:
        max_staleness_prefix = existing.consistency_policy.max_staleness_prefix

    if max_interval is None:
        max_interval = existing.consistency_policy.max_interval_in_seconds

    if default_consistency_level is None:
        default_consistency_level = existing.consistency_policy.default_consistency_level

    consistency_policy = None
    if update_consistency_policy:
        consistency_policy = ConsistencyPolicy(default_consistency_level=default_consistency_level,
                                               max_staleness_prefix=max_staleness_prefix,
                                               max_interval_in_seconds=max_interval)

    public_network_access = None
    if enable_public_network is not None:
        public_network_access = 'Enabled' if enable_public_network else 'Disabled'

    backup_policy = None
    if backup_interval is not None or backup_retention is not None:
        if isinstance(existing.backup_policy, PeriodicModeBackupPolicy):
            periodic_mode_properties = PeriodicModeProperties(
                backup_interval_in_minutes=backup_interval,
                backup_retention_interval_in_hours=backup_retention
            )
            backup_policy = existing.backup_policy
            backup_policy.periodic_mode_properties = periodic_mode_properties
        else:
            raise CLIError(
                'backup-interval and backup-retention can only be set for accounts with periodic backup policy.')

    params = DatabaseAccountUpdateParameters(
        locations=locations,
        tags=tags,
        consistency_policy=consistency_policy,
        ip_rules=ip_range_filter,
        is_virtual_network_filter_enabled=enable_virtual_network,
        enable_automatic_failover=enable_automatic_failover,
        capabilities=capabilities,
        virtual_network_rules=virtual_network_rules,
        enable_multiple_write_locations=enable_multiple_write_locations,
        disable_key_based_metadata_write_access=disable_key_based_metadata_write_access,
        public_network_access=public_network_access,
        enable_analytical_storage=enable_analytical_storage,
        backup_policy=backup_policy)
    async_docdb_update = client.update(resource_group_name, account_name, params)
    docdb_account = async_docdb_update.result()
    docdb_account = client.get(resource_group_name, account_name)  # Workaround
    return docdb_account
Ejemplo n.º 27
0
def update_export_configuration(cmd,
                                client,
                                application,
                                resource_group_name,
                                export_id,
                                record_types=None,
                                dest_account=None,
                                dest_container=None,
                                dest_sas=None,
                                dest_sub_id=None,
                                dest_type=None,
                                is_enabled=None):
    from .vendored_sdks.mgmt_applicationinsights.models import ApplicationInsightsComponentExportRequest

    export_config_request = ApplicationInsightsComponentExportRequest(
        record_types=', '.join(record_types) if record_types else None,
        is_enabled=is_enabled,
    )

    if dest_sub_id is not None or dest_account is not None or dest_container is not None:
        if not dest_sas:
            raise CLIError(
                "The SAS token for the destination storage container required."
            )
        pre_config = get_export_configuration(client, application,
                                              resource_group_name, export_id)
        if dest_sub_id is None:
            dest_sub_id = pre_config.destination_storage_subscription_id
        if dest_account is None:
            if dest_sub_id != pre_config.destination_storage_subscription_id:
                raise CLIError(
                    "The destination storage account name required.")
            dest_account = pre_config.storage_name
        if dest_container is None:
            dest_container = pre_config.container_name
        if dest_type is None:
            dest_type = 'Blob'

        sc_op = get_mgmt_service_client(
            cmd.cli_ctx,
            ResourceType.MGMT_STORAGE,
            subscription_id=dest_sub_id).storage_accounts
        storage_accounts = list(sc_op.list())
        storage_account = None
        for x in storage_accounts:
            if x.name.lower() == dest_account.lower():
                storage_account = x
                break

        if not storage_account:
            raise CLIError(
                "Destination storage account {} does not exist, "
                "use 'az storage account list' to get storage account list".
                format(dest_account))

        dest_address = getattr(storage_account.primary_endpoints,
                               dest_type.lower(), '')
        dest_address += dest_container + '?' + dest_sas
        export_config_request.destination_type = dest_type
        export_config_request.destination_address = dest_address
        export_config_request.destination_storage_subscription_id = dest_sub_id
        export_config_request.destination_storage_location_id = storage_account.primary_location
        export_config_request.destination_account_id = storage_account.id

    return client.update(resource_group_name, application, export_id,
                         export_config_request)
Ejemplo n.º 28
0
def _create_database_account(client,
                             resource_group_name,
                             account_name,
                             locations=None,
                             tags=None,
                             kind=DatabaseAccountKind.global_document_db.value,
                             default_consistency_level=None,
                             max_staleness_prefix=100,
                             max_interval=5,
                             ip_range_filter=None,
                             enable_automatic_failover=None,
                             capabilities=None,
                             enable_virtual_network=None,
                             virtual_network_rules=None,
                             enable_multiple_write_locations=None,
                             disable_key_based_metadata_write_access=None,
                             key_uri=None,
                             enable_public_network=None,
                             enable_analytical_storage=None,
                             enable_free_tier=None,
                             server_version=None,
                             is_restore_request=None,
                             restore_source=None,
                             restore_timestamp=None,
                             backup_policy_type=None,
                             backup_interval=None,
                             backup_retention=None,
                             databases_to_restore=None,
                             arm_location=None):
    """Create a new Azure Cosmos DB database account."""
    consistency_policy = None
    if default_consistency_level is not None:
        consistency_policy = ConsistencyPolicy(default_consistency_level=default_consistency_level,
                                               max_staleness_prefix=max_staleness_prefix,
                                               max_interval_in_seconds=max_interval)

    if not locations:
        locations = []
        locations.append(Location(location_name=arm_location, failover_priority=0, is_zone_redundant=False))

    public_network_access = None
    if enable_public_network is not None:
        public_network_access = 'Enabled' if enable_public_network else 'Disabled'

    api_properties = {}
    if kind == DatabaseAccountKind.mongo_db.value:
        api_properties['ServerVersion'] = server_version
    elif server_version is not None:
        raise CLIError('server-version is a valid argument only when kind is MongoDB.')

    create_mode = 'Default'
    if is_restore_request is not None:
        create_mode = 'Restore' if is_restore_request else 'Default'

    properties = None
    if create_mode == 'Restore':
        if restore_source is None or restore_timestamp is None:
            raise CLIError('restore-source and restore-timestamp should be provided for a restore request.')
        restore_parameters = RestoreParameters(
            restore_mode='PointInTime',
            restore_source=restore_source,
            restore_timestamp_in_utc=restore_timestamp
        )
        if databases_to_restore is not None:
            logger.debug(databases_to_restore)
            restore_parameters.databases_to_restore = databases_to_restore
        logger.debug(restore_parameters)
        properties = RestoreReqeustDatabaseAccountCreateUpdateProperties(
            locations=locations,
            consistency_policy=consistency_policy,
            ip_rules=ip_range_filter,
            is_virtual_network_filter_enabled=enable_virtual_network,
            enable_automatic_failover=enable_automatic_failover,
            capabilities=capabilities,
            virtual_network_rules=virtual_network_rules,
            enable_multiple_write_locations=enable_multiple_write_locations,
            disable_key_based_metadata_write_access=disable_key_based_metadata_write_access,
            key_vault_key_uri=key_uri,
            public_network_access=public_network_access,
            api_properties=api_properties,
            enable_analytical_storage=enable_analytical_storage,
            enable_free_tier=enable_free_tier,
            restore_parameters=restore_parameters
        )
    else:
        properties = DefaultRequestDatabaseAccountCreateUpdateProperties(
            locations=locations,
            consistency_policy=consistency_policy,
            ip_rules=ip_range_filter,
            is_virtual_network_filter_enabled=enable_virtual_network,
            enable_automatic_failover=enable_automatic_failover,
            capabilities=capabilities,
            virtual_network_rules=virtual_network_rules,
            enable_multiple_write_locations=enable_multiple_write_locations,
            disable_key_based_metadata_write_access=disable_key_based_metadata_write_access,
            key_vault_key_uri=key_uri,
            public_network_access=public_network_access,
            api_properties=api_properties,
            enable_analytical_storage=enable_analytical_storage,
            enable_free_tier=enable_free_tier
        )

    backup_policy = None
    if backup_policy_type is not None:
        if backup_policy_type.lower() == 'periodic':
            backup_policy = PeriodicModeBackupPolicy()
            if backup_interval is not None or backup_retention is not None:
                periodic_mode_properties = PeriodicModeProperties(
                    backup_interval_in_minutes=backup_interval,
                    backup_retention_interval_in_hours=backup_retention
                )
            backup_policy.periodic_mode_properties = periodic_mode_properties
        elif backup_policy_type.lower() == 'continuous':
            backup_policy = ContinuousModeBackupPolicy()
        else:
            raise CLIError('backup-policy-type argument is invalid.')
        properties.backup_policy = backup_policy
    elif backup_interval is not None or backup_retention is not None:
        backup_policy = PeriodicModeBackupPolicy()
        periodic_mode_properties = PeriodicModeProperties(
            backup_interval_in_minutes=backup_interval,
            backup_retention_interval_in_hours=backup_retention
        )
        backup_policy.periodic_mode_properties = periodic_mode_properties

    params = DatabaseAccountCreateUpdateParameters(
        location=arm_location,
        properties=properties,
        tags=tags,
        kind=kind)

    async_docdb_create = client.create_or_update(resource_group_name, account_name, params)
    docdb_account = async_docdb_create.result()
    docdb_account = client.get(resource_group_name, account_name)  # Workaround
    return docdb_account
Ejemplo n.º 29
0
def request_data_from_registry(http_method,
                               login_server,
                               path,
                               username,
                               password,
                               result_index=None,
                               json_payload=None,
                               data_payload=None,
                               params=None,
                               retry_times=3,
                               retry_interval=5):
    if http_method not in ALLOWED_HTTP_METHOD:
        raise ValueError("Allowed http method: {}".format(ALLOWED_HTTP_METHOD))

    if json_payload and data_payload:
        raise ValueError(
            "One of json_payload and data_payload can be specified.")

    if http_method in ['get', 'delete'] and (json_payload or data_payload):
        raise ValueError(
            "Empty payload is required for http method: {}".format(
                http_method))

    if http_method in ['patch', 'put'] and not (json_payload or data_payload):
        raise ValueError(
            "Non-empty payload is required for http method: {}".format(
                http_method))

    url = 'https://{}{}'.format(login_server, path)
    headers = get_authorization_header(username, password)

    for i in range(0, retry_times):
        errorMessage = None
        try:
            response = requests.request(
                method=http_method,
                url=url,
                headers=headers,
                params=params,
                json=json_payload,
                data=data_payload,
                verify=(not should_disable_connection_verify()))
            log_registry_response(response)

            if response.status_code == 200:
                result = response.json(
                )[result_index] if result_index else response.json()
                next_link = response.headers[
                    'link'] if 'link' in response.headers else None
                return result, next_link
            elif response.status_code == 201 or response.status_code == 202:
                result = None
                try:
                    result = response.json(
                    )[result_index] if result_index else response.json()
                except ValueError:
                    logger.debug('Response is empty or is not a valid json.')
                return result, None
            elif response.status_code == 204:
                return None, None
            elif response.status_code == 401:
                raise CLIError(
                    parse_error_message('Authentication required.', response))
            elif response.status_code == 404:
                raise CLIError(
                    parse_error_message('The requested data does not exist.',
                                        response))
            else:
                raise Exception(
                    parse_error_message(
                        'Could not {} the requested data.'.format(http_method),
                        response))
        except CLIError:
            raise
        except Exception as e:  # pylint: disable=broad-except
            errorMessage = str(e)
            logger.debug('Retrying %s with exception %s', i + 1, errorMessage)
            time.sleep(retry_interval)

    raise CLIError(errorMessage)
Ejemplo n.º 30
0
def validate_move_cost(move_cost):
    """Validate move cost argument"""

    if move_cost not in [None, 'Zero', 'Low', 'Medium', 'High']:
        raise CLIError('Invalid move cost specified')