def update_protection_for_vm(cmd,
                             client,
                             resource_group_name,
                             vault_name,
                             container_name,
                             item_name,
                             diskslist=None,
                             disk_list_setting=None,
                             exclude_all_data_disks=None):
    items_client = backup_protected_items_cf(cmd.cli_ctx)
    item = show_item(cmd, items_client, resource_group_name, vault_name,
                     container_name, item_name, "AzureIaasVM", "VM")
    custom_help.validate_item(item)

    if isinstance(item, list):
        raise ValidationError(
            "Multiple items found. Please give native names instead.")
    return custom.update_protection_for_vm(cmd, client, resource_group_name,
                                           vault_name, item, diskslist,
                                           disk_list_setting,
                                           exclude_all_data_disks)
 def get_action(self, values, option_string):  # pylint: disable=no-self-use
     try:
         properties = defaultdict(list)
         for (k, v) in (x.split('=', 1) for x in values):
             properties[k].append(v)
         properties = dict(properties)
     except ValueError:
         raise ValidationError('{} [KEY=VALUE ...]'.format(option_string))
     d = {}
     for k in properties:
         kl = k.lower()
         v = properties[k]
         if kl == 'streams':
             d['streams'] = v
         elif kl == 'facility-names':
             d['facility_names'] = v
         elif kl == 'log-levels':
             d['log_levels'] = v
         elif kl == 'name':
             d['name'] = v[0]
     return d
Beispiel #3
0
def validate_service_state(linker_parameters):
    '''Validate whether user provided params are applicable to service state
    '''
    target_type = None
    target_id = linker_parameters.get('target_service', dict()).get('id')
    for target, resource_id in TARGET_RESOURCES.items():
        matched = re.match(get_resource_regex(resource_id), target_id, re.IGNORECASE)
        if matched:
            target_type = target

    if target_type == RESOURCE.AppConfig and linker_parameters.get('auth_info', dict()).get('auth_type') == 'secret':
        segments = parse_resource_id(target_id)
        rg = segments.get('resource_group')
        name = segments.get('name')
        if not rg or not name:
            return

        output = run_cli_cmd('az appconfig show -g {} -n {}'.format(rg, name))
        if output and output.get('disableLocalAuth') is True:
            raise ValidationError('Secret as auth type is not allowed when local auth is disabled for the '
                                  'specified appconfig, you may use service principal or managed identity.')
Beispiel #4
0
def rewrite_blob(cmd, client, source_url, encryption_scope=None, **kwargs):
    src_properties = client.from_blob_url(source_url).get_blob_properties()
    BlobType = cmd.get_models('_models#BlobType',
                              resource_type=ResourceType.DATA_STORAGE_BLOB)
    if src_properties.blob_type != BlobType.BlockBlob:
        from azure.cli.core.azclierror import ValidationError
        raise ValidationError(
            "Currently only support block blob! The source blob is {}.".format(
                src_properties.blob_type))
    src_content_length = src_properties.size
    if src_content_length <= 5000 * 1024 * 1024:
        return client.upload_blob_from_url(source_url=source_url,
                                           overwrite=True,
                                           encryption_scope=encryption_scope,
                                           destination_lease=kwargs.pop(
                                               'lease', None),
                                           **kwargs)

    block_length = 4000 * 1024 * 1024  # using max block size
    block_ids = get_block_ids(src_content_length, block_length)

    copyoffset = 0
    for block_id in block_ids:
        block_size = block_length
        if copyoffset + block_size > src_content_length:
            block_size = src_content_length - copyoffset
        client.stage_block_from_url(block_id=block_id,
                                    source_url=source_url,
                                    source_offset=copyoffset,
                                    source_length=block_size,
                                    encryption_scope=encryption_scope)
        copyoffset += block_size
    response = client.commit_block_list(
        block_list=block_ids,
        content_settings=src_properties.content_settings,
        metadata=src_properties.metadata,
        encryption_scope=encryption_scope,
        **kwargs)
    return transform_response_with_bytearray(response)
Beispiel #5
0
def _ensure_location_allowed(cmd, location, resource_provider, resource_type):
    providers_client = None
    try:
        providers_client = providers_client_factory(cmd.cli_ctx, get_subscription_id(cmd.cli_ctx))

        if providers_client is not None:
            resource_types = getattr(providers_client.get(resource_provider), 'resource_types', [])
            res_locations = []
            for res in resource_types:
                if res and getattr(res, 'resource_type', "") == resource_type:
                    res_locations = getattr(res, 'locations', [])

            res_locations = [res_loc.lower().replace(" ", "").replace("(", "").replace(")", "") for res_loc in res_locations if res_loc.strip()]

            location_formatted = location.lower().replace(" ", "")
            if location_formatted not in res_locations:
                raise ValidationError("Location '{}' is not currently supported. To get list of supported locations, run `az provider show -n {} --query \"resourceTypes[?resourceType=='{}'].locations\"`".format(
                    location, resource_provider, resource_type))
    except ValidationError as ex:
        raise ex
    except Exception:  # pylint: disable=broad-except
        pass
Beispiel #6
0
def set_mediaservice_trusted_storage(client, resource_group_name, account_name, storage_auth, storage_account_id=None,
                                     system_assigned=False, user_assigned=None):
    ams: MediaService = client.get(resource_group_name, account_name)\
        if resource_group_name else client.get_by_subscription(account_name)
    if storage_auth == 'ManagedIdentity' and storage_account_id is None:
        error_msg = 'Please specify a storage account id for the storage account whose identity you would like to set'
        raise ValidationError(error_msg)

    for storage_account in ams.storage_accounts:
        if storage_auth == 'ManagedIdentity':
            if storage_account.id.lower() == storage_account_id.lower():
                storage_account.identity = ResourceIdentity(use_system_assigned_identity=system_assigned,
                                                            user_assigned_identity=user_assigned)
        else:
            storage_account.identity = None

    media_service = MediaService(name=ams.name, location=ams.location, key_delivery=ams.key_delivery,
                                 identity=ams.identity, encryption=ams.encryption,
                                 storage_accounts=ams.storage_accounts, storage_authentication=storage_auth,
                                 public_network_access=ams.public_network_access)

    return client.create_or_update(resource_group_name, account_name, media_service)
Beispiel #7
0
def _set_up_defaults(
    cmd,
    name,
    resource_group_name,
    logs_customer_id,
    location,
    resource_group: "ResourceGroup",
    env: "ContainerAppEnvironment",
    app: "ContainerApp",
):
    # If no RG passed in and a singular app exists with the same name, get its env and rg
    _get_app_env_and_group(cmd, name, resource_group, env, location)

    # If no env passed in (and not creating a new RG), then try getting an env by location / log analytics ID
    _get_env_and_group_from_log_analytics(cmd, resource_group_name, env,
                                          resource_group, logs_customer_id,
                                          location)

    # try to set RG name by env name
    if env.name and not resource_group.name:
        if not location:
            env_list = [
                e for e in list_managed_environments(cmd=cmd)
                if e["name"] == env.name
            ]
        else:
            env_list = [
                e for e in list_managed_environments(cmd=cmd)
                if e["name"] == env.name and e["location"] == location
            ]
        if len(env_list) == 1:
            resource_group.name = parse_resource_id(
                env_list[0]["id"])["resource_group"]
        if len(env_list) > 1:
            raise ValidationError(
                f"There are multiple environments with name {env.name} on the subscription. "
                "Please specify which resource group your Containerapp environment is in."
            )  # get ACR details from --image, if possible
    _get_acr_from_image(cmd, app)
Beispiel #8
0
def update_policy_for_item(cmd,
                           client,
                           resource_group_name,
                           vault_name,
                           container_name,
                           item_name,
                           policy_name,
                           workload_type=None,
                           backup_management_type=None):

    items_client = backup_protected_items_cf(cmd.cli_ctx)
    item = show_item(cmd, items_client, resource_group_name, vault_name,
                     container_name, item_name, backup_management_type,
                     workload_type)
    custom_help.validate_item(item)

    if isinstance(item, list):
        raise ValidationError(
            "Multiple items found. Please give native names instead.")

    policy = show_policy(protection_policies_cf(cmd.cli_ctx),
                         resource_group_name, vault_name, policy_name)
    custom_help.validate_policy(policy)

    if item.properties.backup_management_type.lower() == "azureiaasvm":
        return custom.update_policy_for_item(cmd, client, resource_group_name,
                                             vault_name, item, policy)

    if item.properties.backup_management_type.lower() == "azurestorage":
        return custom_afs.update_policy_for_item(cmd, client,
                                                 resource_group_name,
                                                 vault_name, item, policy)

    if item.properties.backup_management_type.lower() == "azureworkload":
        return custom_wl.update_policy_for_item(cmd, client,
                                                resource_group_name,
                                                vault_name, item, policy)
    return None
Beispiel #9
0
def backup_now(cmd,
               client,
               resource_group_name,
               vault_name,
               item_name,
               retain_until=None,
               container_name=None,
               backup_management_type=None,
               workload_type=None,
               backup_type=None,
               enable_compression=False):

    items_client = backup_protected_items_cf(cmd.cli_ctx)
    item = show_item(cmd, items_client, resource_group_name, vault_name,
                     container_name, item_name, backup_management_type,
                     workload_type)
    custom_help.validate_item(item)

    if isinstance(item, list):
        raise ValidationError(
            "Multiple items found. Please give native names instead.")

    if retain_until is None:
        retain_until = datetime.now(timezone.utc) + timedelta(days=30)

    if item.properties.backup_management_type.lower() == "azureiaasvm":
        return custom.backup_now(cmd, client, resource_group_name, vault_name,
                                 item, retain_until)

    if item.properties.backup_management_type.lower() == "azurestorage":
        return custom_afs.backup_now(cmd, client, resource_group_name,
                                     vault_name, item, retain_until)

    if item.properties.backup_management_type.lower() == "azureworkload":
        return custom_wl.backup_now(cmd, client, resource_group_name,
                                    vault_name, item, retain_until,
                                    backup_type, enable_compression)
    return None
Beispiel #10
0
def add_mediaservice_secondary_storage(client, resource_group_name, account_name, storage_account,
                                       system_assigned=False, user_assigned=None):
    ams = client.get(resource_group_name, account_name)
    if (system_assigned is False and user_assigned is None and ams.storage_authentication == 'ManagedIdentity'):
        error_msg = 'Please specify either system assigned identity or a user assigned identity'
        raise ValidationError(error_msg)

    storage_accounts_filtered = list(filter(lambda s: storage_account in s.id, ams.storage_accounts))

    storage_account_secondary = StorageAccount(type='Secondary', id=storage_account)
    if ams.storage_authentication == 'ManagedIdentity':
        storage_account_secondary.identity = ResourceIdentity(use_system_assigned_identity=system_assigned,
                                                              user_assigned_identity=user_assigned)
    if not storage_accounts_filtered:
        ams.storage_accounts.append(storage_account_secondary)

    media_service = MediaService(name=ams.name, location=ams.location, key_delivery=ams.key_delivery,
                                 identity=ams.identity, encryption=ams.encryption,
                                 storage_accounts=ams.storage_accounts,
                                 storage_authentication=ams.storage_authentication,
                                 public_network_access=ams.public_network_access)

    return client.create_or_update(resource_group_name, account_name, media_service)
Beispiel #11
0
def create_ase_inbound_services(cmd, resource_group_name, name, subnet, vnet_name=None, skip_dns=False):
    ase_client = _get_ase_client_factory(cmd.cli_ctx)
    ase = ase_client.get(resource_group_name, name)
    if not ase:
        raise ResourceNotFoundError("App Service Environment '{}' not found.".format(name))

    if ase.internal_load_balancing_mode == 'None':
        raise ValidationError('Private DNS Zone is not relevant for External ASE.')

    if ase.kind.lower() == 'asev3':
        # pending SDK update (ase_client.get_ase_v3_networking_configuration(resource_group_name, name))
        raise CommandNotFoundError('create-inbound-services is currently not supported for ASEv3.')

    ase_vip_info = ase_client.get_vip_info(resource_group_name, name)
    inbound_ip_address = ase_vip_info.internal_ip_address
    inbound_subnet_id = _validate_subnet_id(cmd.cli_ctx, subnet, vnet_name, resource_group_name)
    inbound_vnet_id = _get_vnet_id_from_subnet(cmd.cli_ctx, inbound_subnet_id)

    if not skip_dns:
        _ensure_ase_private_dns_zone(cmd.cli_ctx, resource_group_name=resource_group_name, name=name,
                                     inbound_vnet_id=inbound_vnet_id, inbound_ip_address=inbound_ip_address)
    else:
        logger.warning('Parameter --skip-dns is deprecated.')
def show_kustomization(
    cmd,
    client,
    resource_group_name,
    cluster_type,
    cluster_name,
    name,
    kustomization_name,
):

    validate_cc_registration(cmd)

    current_config = show_config(cmd, client, resource_group_name,
                                 cluster_type, cluster_name, name)
    if kustomization_name not in current_config.kustomizations:
        raise ValidationError(
            consts.SHOW_KUSTOMIZATION_NO_EXIST_ERROR.format(
                kustomization_name),
            consts.SHOW_KUSTOMIZATION_NO_EXIST_HELP,
        )
    return {
        kustomization_name: current_config.kustomizations[kustomization_name]
    }
Beispiel #13
0
def restore_azurefileshare(cmd,
                           client,
                           resource_group_name,
                           vault_name,
                           rp_name,
                           container_name,
                           item_name,
                           restore_mode,
                           resolve_conflict,
                           target_storage_account=None,
                           target_file_share=None,
                           target_folder=None):
    backup_management_type = "AzureStorage"
    workload_type = "AzureFileShare"
    items_client = backup_protected_items_cf(cmd.cli_ctx)
    item = show_item(cmd, items_client, resource_group_name, vault_name,
                     container_name, item_name, backup_management_type,
                     workload_type)
    custom_help.validate_item(item)

    if isinstance(item, list):
        raise ValidationError(
            "Multiple items found. Please give native names instead.")

    return custom_afs.restore_AzureFileShare(
        cmd,
        client,
        resource_group_name,
        vault_name,
        rp_name,
        item,
        restore_mode,
        resolve_conflict,
        "FullShareRestore",
        target_storage_account_name=target_storage_account,
        target_file_share_name=target_file_share,
        target_folder=target_folder)
Beispiel #14
0
def list_recovery_points(cmd,
                         client,
                         resource_group_name,
                         vault_name,
                         container_name,
                         item_name,
                         backup_management_type=None,
                         workload_type=None,
                         start_date=None,
                         end_date=None):

    items_client = backup_protected_items_cf(cmd.cli_ctx)
    item = show_item(cmd, items_client, resource_group_name, vault_name,
                     container_name, item_name, backup_management_type,
                     workload_type)
    custom_help.validate_item(item)

    if isinstance(item, list):
        raise ValidationError(
            "Multiple items found. Please give native names instead.")

    if item.properties.backup_management_type.lower() == "azureiaasvm":
        return custom.list_recovery_points(client, resource_group_name,
                                           vault_name, item, start_date,
                                           end_date)

    if item.properties.backup_management_type.lower() == "azurestorage":
        return custom_afs.list_recovery_points(client, resource_group_name,
                                               vault_name, item, start_date,
                                               end_date)
    if item.properties.backup_management_type.lower() == "azureworkload":
        return custom_wl.list_wl_recovery_points(cmd, client,
                                                 resource_group_name,
                                                 vault_name, item, start_date,
                                                 end_date)

    return None
Beispiel #15
0
def get_pool_manager(url):
    proxies = urllib.request.getproxies()
    bypass_proxy = urllib.request.proxy_bypass(
        urllib.parse.urlparse(url).hostname)

    if 'https' in proxies and not bypass_proxy:
        proxy = urllib.parse.urlparse(proxies['https'])

        if proxy.username and proxy.password:
            proxy_headers = urllib3.util.make_headers(
                proxy_basic_auth='{0}:{1}'.format(proxy.username,
                                                  proxy.password))
            logger.debug('Setting proxy-authorization header for basic auth')
        else:
            proxy_headers = None

        logger.info('Using proxy for app service tunnel connection')
        http = urllib3.ProxyManager(proxy.geturl(),
                                    proxy_headers=proxy_headers)
    else:
        http = urllib3.PoolManager()

    if should_disable_connection_verify():
        http.connection_pool_kw['cert_reqs'] = 'CERT_NONE'
    else:
        http.connection_pool_kw['cert_reqs'] = 'CERT_REQUIRED'
        if REQUESTS_CA_BUNDLE in os.environ:
            ca_bundle_file = os.environ[REQUESTS_CA_BUNDLE]
            logger.debug("Using CA bundle file at '%s'.", ca_bundle_file)
            if not os.path.isfile(ca_bundle_file):
                raise ValidationError(
                    'REQUESTS_CA_BUNDLE environment variable is specified with an invalid file path'
                )
        else:
            ca_bundle_file = certifi.where()
        http.connection_pool_kw['ca_certs'] = ca_bundle_file
    return http
Beispiel #16
0
def resolve_deleted_store_metadata(cmd,
                                   config_store_name,
                                   resource_group_name=None,
                                   location=None):
    resource_group = None
    metadata_location = None
    try:
        client = cf_configstore(cmd.cli_ctx)
        deleted_stores = client.list_deleted()
        for deleted_store in deleted_stores:
            # configuration_store_id has a fixed structure /subscriptions/subscription_id/resourceGroups/resource_group_name/providers/Microsoft.AppConfiguration/configurationStores/configuration_store_name
            metadata_resource_group = deleted_store.configuration_store_id.split(
                '/')[4]
            # match the name and additionally match resource group and location if available.
            if deleted_store.name.lower() == config_store_name.lower() and (
                    resource_group_name is None or resource_group_name.lower()
                    == metadata_resource_group.lower()
            ) and (location is None or location == deleted_store.location):
                if metadata_location is None:
                    resource_group = metadata_resource_group
                    metadata_location = deleted_store.location
                else:
                    # It should reach here only when the user has provided only name. If they provide either location or resource group, we should be able to uniquely find the store.
                    raise ValidationError(
                        'Multiple configuration stores found with name {}.'.
                        format(config_store_name))
    except HttpResponseError as ex:
        raise AzureResponseError(
            "Failed to get the list of deleted App Configuration stores for the current user. Make sure that the account that logged in has sufficient permissions to access the App Configuration store.\n{}"
            .format(str(ex)))

    if resource_group is not None and metadata_location is not None:
        return resource_group, metadata_location

    raise ResourceNotFoundError(
        "Failed to find the deleted App Configuration store '{}'. If you think that the store name is correct, please validate all your input parameters again."
        .format(config_store_name))
Beispiel #17
0
def list_protectable_items(cmd,
                           client,
                           resource_group_name,
                           vault_name,
                           workload_type,
                           container_name=None):
    container_uri = None
    if container_name:
        if custom_help.is_native_name(container_name):
            container_uri = container_name
        else:
            container_client = backup_protection_containers_cf(cmd.cli_ctx)
            container = show_container(cmd, container_client, container_name,
                                       resource_group_name, vault_name,
                                       "AzureWorkload")
            custom_help.validate_container(container)
            if isinstance(container, list):
                raise ValidationError("""
                Multiple containers with same Friendly Name found. Please give native names instead.
                """)
            container_uri = container.name
    return custom_wl.list_protectable_items(client, resource_group_name,
                                            vault_name, workload_type,
                                            container_uri)
def show_deployed_object(
    cmd,
    client,
    resource_group_name,
    cluster_type,
    cluster_name,
    name,
    object_name,
    object_namespace,
    object_kind,
):
    validate_cc_registration(cmd)
    current_config = show_config(cmd, client, resource_group_name,
                                 cluster_type, cluster_name, name)

    for status in current_config.statuses:
        if (status.name == object_name and status.namespace == object_namespace
                and status.kind == object_kind):
            return status
    raise ValidationError(
        consts.SHOW_DEPLOYED_OBJECT_NO_EXIST_ERROR.format(
            object_name, object_namespace, object_kind, name),
        consts.SHOW_DEPLOYED_OBJECT_NO_EXIST_HELP,
    )
def application_configuration_service_git_add(cmd, client, service, resource_group,
                                              name, patterns, uri, label,
                                              search_paths=None,
                                              username=None,
                                              password=None,
                                              host_key=None,
                                              host_key_algorithm=None,
                                              private_key=None,
                                              strict_host_key_checking=None,
                                              no_wait=False):
    repo = models.ConfigurationServiceGitRepository(name=name, patterns=patterns, uri=uri, label=label)
    repo = _replace_repo_with_input(repo, patterns, uri, label, search_paths, username, password, host_key, host_key_algorithm, private_key, strict_host_key_checking)

    acs_resource = _get_or_default_acs_resource(client, resource_group, service)
    repos = acs_resource.properties.settings.git_property.repositories
    if next((r for r in repos if r.name == name), None) is not None:
        raise ValidationError("Repo '{}' already exists.".format(name))
    repos.append(repo)
    acs_resource.properties.settings.git_property.repositories = repos

    _validate_acs_settings(client, resource_group, service, acs_resource.properties.settings)

    logger.warning("[2/2] Adding item to Application Configuration Service settings, (this operation can take a while to complete)")
    return sdk_no_wait(no_wait, client.configuration_services.begin_create_or_update, resource_group, service, DEFAULT_NAME, acs_resource)
def _create_subnet_delegation(cmd, nw_client, resource_client,
                              delegation_service_name, resource_group,
                              vnet_name, subnet_name, location, server_name,
                              subnet_address_pref, yes):
    Delegation, Subnet = cmd.get_models(
        'Delegation', 'Subnet', resource_type=ResourceType.MGMT_NETWORK)
    delegation = Delegation(name=delegation_service_name,
                            service_name=delegation_service_name)

    # subnet not exist
    if not check_existence(resource_client,
                           subnet_name,
                           resource_group,
                           'Microsoft.Network',
                           'subnets',
                           parent_name=vnet_name,
                           parent_type='virtualNetworks'):
        subnet_result = Subnet(name=subnet_name,
                               location=location,
                               address_prefix=subnet_address_pref,
                               delegations=[delegation])

        vnet = nw_client.virtual_networks.get(resource_group, vnet_name)
        vnet_subnet_prefixes = [
            subnet.address_prefix for subnet in vnet.subnets
        ]
        if subnet_address_pref in vnet_subnet_prefixes:
            raise ValidationError(
                "The Subnet (default) prefix {} is already taken by another Subnet in the Vnet. Please provide a different prefix for --subnet-prefix parameter"
                .format(subnet_address_pref))

        user_confirmation(
            "Do you want to create a new Subnet {0} in resource group {1}".
            format(subnet_name, resource_group),
            yes=yes)
        logger.warning('Creating new Subnet "%s" in resource group "%s"',
                       subnet_name, resource_group)
        subnet = nw_client.subnets.begin_create_or_update(
            resource_group, vnet_name, subnet_name, subnet_result).result()
    # subnet exist
    else:
        subnet = nw_client.subnets.get(resource_group, vnet_name, subnet_name)
        logger.warning('Using existing Subnet "%s" in resource group "%s"',
                       subnet_name, resource_group)
        if subnet_address_pref not in (DEFAULT_SUBNET_ADDRESS_PREFIX,
                                       subnet.address_prefix):
            logger.warning(
                "The prefix of the subnet you provided does not match the --subnet-prefix value %s. Using current prefix %s",
                subnet_address_pref, subnet.address_prefix)

        # Add Delegation if not delegated already
        if not subnet.delegations:
            logger.warning('Adding "%s" delegation to the existing subnet %s.',
                           delegation_service_name, subnet_name)
            subnet.delegations = [delegation]
            subnet = nw_client.subnets.begin_create_or_update(
                resource_group, vnet_name, subnet_name, subnet).result()
        else:
            for delgtn in subnet.delegations:
                if delgtn.service_name != delegation_service_name:
                    raise CLIError(
                        "Can not use subnet with existing delegations other than {}"
                        .format(delegation_service_name))

    return subnet
def prepare_private_network(cmd, resource_group_name, server_name, vnet,
                            subnet, location, delegation_service_name,
                            vnet_address_pref, subnet_address_pref, yes):

    nw_client = network_client_factory(cmd.cli_ctx)
    resource_client = resource_client_factory(cmd.cli_ctx)

    # Handle vnet and subnet prefix
    if (vnet_address_pref is not None and subnet_address_pref is None) or \
       (vnet_address_pref is None and subnet_address_pref is not None):
        raise ValidationError(
            "You need to provide both Vnet address prefix and Subnet address prefix."
        )
    if vnet_address_pref is None:
        vnet_address_pref = DEFAULT_VNET_ADDRESS_PREFIX
    if subnet_address_pref is None:
        subnet_address_pref = DEFAULT_SUBNET_ADDRESS_PREFIX

    # pylint: disable=too-many-nested-blocks
    if subnet is not None and vnet is None:
        if not is_valid_resource_id(subnet):
            raise ValidationError(
                "Incorrectly formed Subnet ID. If you are providing only --subnet (not --vnet), the Subnet parameter should be in resource ID format."
            )
        if 'child_name_1' not in parse_resource_id(subnet):
            raise ValidationError(
                "Incorrectly formed Subnet ID. Check if the Subnet ID is in the right format."
            )
        logger.warning(
            "You have supplied a Subnet ID. Verifying its existence...")
        subnet_result = process_private_network_with_id_input(
            cmd, subnet, nw_client, resource_client, server_name, location,
            delegation_service_name, vnet_address_pref, subnet_address_pref,
            yes)
    elif subnet is None and vnet is not None:
        if is_valid_resource_id(vnet):
            logger.warning(
                "You have supplied a Vnet ID. Verifying its existence...")
            subnet_result = process_private_network_with_id_input(
                cmd, vnet, nw_client, resource_client, server_name, location,
                delegation_service_name, vnet_address_pref,
                subnet_address_pref, yes)
        elif _is_resource_name(vnet) and is_valid_resource_name(vnet):
            logger.warning(
                "You have supplied a Vnet name. Verifying its existence...")
            subnet_result = _create_vnet_subnet_delegation(
                cmd, nw_client, resource_client, delegation_service_name,
                resource_group_name, vnet, 'Subnet' + server_name, location,
                server_name, vnet_address_pref, subnet_address_pref, yes)
        else:
            raise ValidationError("Incorrectly formed Vnet ID or Vnet name")
    elif subnet is not None and vnet is not None:
        if _is_resource_name(vnet) and _is_resource_name(subnet):
            logger.warning(
                "You have supplied a Vnet and Subnet name. Verifying its existence..."
            )

            subnet_result = _create_vnet_subnet_delegation(
                cmd, nw_client, resource_client, delegation_service_name,
                resource_group_name, vnet, subnet, location, server_name,
                vnet_address_pref, subnet_address_pref, yes)

        else:
            raise ValidationError(
                "If you pass both --vnet and --subnet, consider passing names instead of IDs. If you want to use an existing subnet, please provide the subnet Id only (not vnet Id)."
            )
    else:
        return None

    return subnet_result.id
Beispiel #22
0
def validate_mysql_ha_enabled(server):
    if server.storage_profile.storage_autogrow == "Disabled":
        raise ValidationError(
            "You need to enable auto grow first to enable high availability.")
Beispiel #23
0
def validate_vnet_location(vnet, location):
    if vnet.location != location:
        raise ValidationError(
            "The location of Vnet should be same as the location of the server"
        )
Beispiel #24
0
def get_github_access_token(cmd, scope_list=None):  # pylint: disable=unused-argument
    if scope_list:
        for scope in scope_list:
            if scope not in GITHUB_OAUTH_SCOPES:
                raise ValidationError(
                    "Requested github oauth scope is invalid")
        scope_list = ' '.join(scope_list)

    authorize_url = 'https://github.com/login/device/code'
    authorize_url_data = {
        'scope': scope_list,
        'client_id': GITHUB_OAUTH_CLIENT_ID
    }

    import requests
    import time
    from urllib.parse import parse_qs

    try:
        response = requests.post(authorize_url, data=authorize_url_data)
        parsed_response = parse_qs(response.content.decode('ascii'))

        device_code = parsed_response['device_code'][0]
        user_code = parsed_response['user_code'][0]
        verification_uri = parsed_response['verification_uri'][0]
        interval = int(parsed_response['interval'][0])
        expires_in_seconds = int(parsed_response['expires_in'][0])
        logger.warning(
            'Please navigate to %s and enter the user code %s to activate and '
            'retrieve your github personal access token', verification_uri,
            user_code)

        timeout = time.time() + expires_in_seconds
        logger.warning("Waiting up to '%s' minutes for activation",
                       str(expires_in_seconds // 60))

        confirmation_url = 'https://github.com/login/oauth/access_token'
        confirmation_url_data = {
            'client_id': GITHUB_OAUTH_CLIENT_ID,
            'device_code': device_code,
            'grant_type': 'urn:ietf:params:oauth:grant-type:device_code'
        }

        pending = True
        while pending:
            time.sleep(interval)

            if time.time() > timeout:
                raise UnclassifiedUserFault(
                    'Activation did not happen in time. Please try again')

            confirmation_response = requests.post(confirmation_url,
                                                  data=confirmation_url_data)
            parsed_confirmation_response = parse_qs(
                confirmation_response.content.decode('ascii'))

            if 'error' in parsed_confirmation_response and parsed_confirmation_response[
                    'error'][0]:
                if parsed_confirmation_response['error'][0] == 'slow_down':
                    interval += 5  # if slow_down error is received, 5 seconds is added to minimum polling interval
                elif parsed_confirmation_response['error'][
                        0] != 'authorization_pending':
                    pending = False

            if 'access_token' in parsed_confirmation_response and parsed_confirmation_response[
                    'access_token'][0]:
                return parsed_confirmation_response['access_token'][0]
    except Exception as e:
        raise CLIInternalError(
            'Error: {}. Please try again, or retrieve personal access token from the Github website'
            .format(e))

    raise UnclassifiedUserFault(
        'Activation did not happen in time. Please try again')
Beispiel #25
0
 def validation_error(self, message):
     az_error = ValidationError(message)
     az_error.print_error()
     az_error.send_telemetry()
     self.exit(2)
Beispiel #26
0
def validate_public_cloud(cmd):
    from azure.cli.core.cloud import AZURE_PUBLIC_CLOUD
    if cmd.cli_ctx.cloud.name != AZURE_PUBLIC_CLOUD.name:
        raise ValidationError(
            'This command is not yet supported on soveriegn clouds.')
Beispiel #27
0
def validate_georestore_network(source_server_object, public_access, vnet, subnet):
    if source_server_object.network.public_network_access == 'Disabled' and not any((public_access, vnet, subnet)):
        raise ValidationError("Please specify network parameters if you are geo-restoring a private access server. "
                              "Run 'az mysql flexible-server goe-restore --help' command to see examples")
Beispiel #28
0
def validate_and_format_restore_point_in_time(restore_time):
    try:
        return parser.parse(restore_time)
    except:
        raise ValidationError("The restore point in time value has incorrect date format. "
                              "Please use ISO format e.g., 2021-10-22T00:08:23+00:00.")
Beispiel #29
0
def list_recovery_points(cmd,
                         client,
                         resource_group_name,
                         vault_name,
                         container_name,
                         item_name,
                         backup_management_type=None,
                         workload_type=None,
                         start_date=None,
                         end_date=None,
                         use_secondary_region=None,
                         is_ready_for_move=None,
                         target_tier=None,
                         tier=None,
                         recommended_for_archive=None):

    items_client = backup_protected_items_cf(cmd.cli_ctx)
    item = show_item(cmd, items_client, resource_group_name, vault_name,
                     container_name, item_name, backup_management_type,
                     workload_type, use_secondary_region)
    custom_help.validate_item(item)

    if isinstance(item, list):
        raise ValidationError(
            "Multiple items found. Please give native names instead.")

    if (use_secondary_region
            and (is_ready_for_move is not None or target_tier is not None
                 or recommended_for_archive is not None)):
        raise MutuallyExclusiveArgumentError(
            "Archive based filtering is not supported in secondary region.")

    if item.properties.backup_management_type.lower() == "azureiaasvm":
        return custom.list_recovery_points(cmd, client, resource_group_name,
                                           vault_name, item, start_date,
                                           end_date, use_secondary_region,
                                           is_ready_for_move, target_tier,
                                           tier, recommended_for_archive)

    if item.properties.backup_management_type.lower() == "azurestorage":
        return custom_afs.list_recovery_points(cmd, client,
                                               resource_group_name, vault_name,
                                               item, start_date, end_date,
                                               use_secondary_region,
                                               is_ready_for_move, target_tier,
                                               tier, recommended_for_archive)

    if item.properties.backup_management_type.lower() == "azureworkload":
        return custom_wl.list_wl_recovery_points(
            cmd,
            client,
            resource_group_name,
            vault_name,
            item,
            start_date,
            end_date,
            is_ready_for_move=is_ready_for_move,
            target_tier=target_tier,
            use_secondary_region=use_secondary_region,
            tier=tier,
            recommended_for_archive=recommended_for_archive)

    return None
Beispiel #30
0
def prepare_private_dns_zone(cmd, database_engine, resource_group, server_name,
                             private_dns_zone, subnet_id, location):
    from azure.mgmt.privatedns.models import SubResource
    dns_suffix_client = cf_postgres_flexible_private_dns_zone_suffix_operations(
        cmd.cli_ctx, '_')

    private_dns_zone_suffix = dns_suffix_client.execute(database_engine)
    vnet_sub, vnet_rg, vnet_name, _ = get_id_components(subnet_id)
    private_dns_client = private_dns_client_factory(cmd.cli_ctx)
    private_dns_link_client = private_dns_link_client_factory(cmd.cli_ctx)
    resource_client = resource_client_factory(cmd.cli_ctx)

    vnet_id = resource_id(subscription=vnet_sub,
                          resource_group=vnet_rg,
                          namespace='Microsoft.Network',
                          type='virtualNetworks',
                          name=vnet_name)
    nw_client = network_client_factory(cmd.cli_ctx, subscription_id=vnet_sub)
    vnet = nw_client.virtual_networks.get(vnet_rg, vnet_name)
    from azure.mgmt.privatedns.models import VirtualNetworkLink

    if private_dns_zone is None:
        private_dns_zone = server_name + '.' + private_dns_zone_suffix

    elif not _check_if_resource_name(
            private_dns_zone) and is_valid_resource_id(private_dns_zone):
        subscription, resource_group, private_dns_zone, _ = get_id_components(
            private_dns_zone)
        if private_dns_zone[-len(private_dns_zone_suffix
                                 ):] != private_dns_zone_suffix:
            raise ValidationError(
                'The suffix for the private DNS zone should be "{}"'.format(
                    private_dns_zone_suffix))

        if subscription != get_subscription_id(cmd.cli_ctx):
            logger.warning(
                'The provided private DNS zone ID is in different subscription from the server'
            )
            resource_client = resource_client_factory(
                cmd.cli_ctx, subscription_id=subscription)
            private_dns_client = private_dns_client_factory(
                cmd.cli_ctx, subscription_id=subscription)
            private_dns_link_client = private_dns_link_client_factory(
                cmd.cli_ctx, subscription_id=subscription)
        _resource_group_verify_and_create(resource_client, resource_group,
                                          location)

    elif _check_if_resource_name(private_dns_zone) and not is_valid_resource_name(private_dns_zone) \
            or not _check_if_resource_name(private_dns_zone) and not is_valid_resource_id(private_dns_zone):
        raise ValidationError(
            "Check if the private dns zone name or id is in correct format.")

    elif _check_if_resource_name(private_dns_zone) and private_dns_zone[
            -len(private_dns_zone_suffix):] != private_dns_zone_suffix:
        raise ValidationError(
            'The suffix for the private DNS zone should be "{}"'.format(
                private_dns_zone_suffix))

    link = VirtualNetworkLink(location='global',
                              virtual_network=SubResource(id=vnet.id))
    link.registration_enabled = True

    if not check_existence(resource_client, private_dns_zone, resource_group,
                           'Microsoft.Network', 'privateDnsZones'):
        logger.warning('Creating a private dns zone %s..', private_dns_zone)
        from azure.mgmt.privatedns.models import PrivateZone
        private_zone = private_dns_client.create_or_update(
            resource_group_name=resource_group,
            private_zone_name=private_dns_zone,
            parameters=PrivateZone(location='global'),
            if_none_match='*').result()

        private_dns_link_client.create_or_update(
            resource_group_name=resource_group,
            private_zone_name=private_dns_zone,
            virtual_network_link_name=vnet_name + '-link',
            parameters=link,
            if_none_match='*').result()
    else:
        logger.warning('Using the existing private dns zone %s',
                       private_dns_zone)
        private_zone = private_dns_client.get(
            resource_group_name=resource_group,
            private_zone_name=private_dns_zone)
        # private dns zone link list

        virtual_links = private_dns_link_client.list(
            resource_group_name=resource_group,
            private_zone_name=private_dns_zone)

        link_exist_flag = False
        for virtual_link in virtual_links:
            if virtual_link.virtual_network.id == vnet_id:
                link_exist_flag = True
                break

        if not link_exist_flag:
            private_dns_link_client.create_or_update(
                resource_group_name=resource_group,
                private_zone_name=private_dns_zone,
                virtual_network_link_name=vnet_name + '-link',
                parameters=link,
                if_none_match='*').result()

    return private_zone.id