Esempio n. 1
0
def validate_duration(arg_name: str, duration: str):
    if duration and not re.match(consts.VALID_DURATION_REGEX, duration):
        raise InvalidArgumentValueError(
            consts.INVALID_DURATION_ERROR.format(arg_name),
            consts.INVALID_DURATION_HELP)
def validate_crg_id(namespace):
    if namespace.crg_id:
        from msrestazure.tools import is_valid_resource_id
        if not is_valid_resource_id(namespace.crg_id):
            raise InvalidArgumentValueError("--crg-id is not a valid Azure resource ID.")
def validate_only_equals_operator(args):
    if len(args) < 2:
        raise RequiredArgumentMissingError('Filter Argument Error: values length can\'t be smaller than 2')
    if args[0].lower() not in ['equals', 'notequals']:
        raise InvalidArgumentValueError('Filter Argument Error: operator must be one of the follows: Equals, NotEquals')
def validate_nat_gateway_managed_outbound_ip_count(namespace):
    """validate NAT gateway profile managed outbound IP count"""
    if namespace.nat_gateway_managed_outbound_ip_count is not None:
        if namespace.nat_gateway_managed_outbound_ip_count < 1 or namespace.nat_gateway_managed_outbound_ip_count > 16:
            raise InvalidArgumentValueError("--nat-gateway-managed-outbound-ip-count must be in the range [1,16]")
def validate_nodepool_id(namespace):
    from msrestazure.tools import is_valid_resource_id
    if not is_valid_resource_id(namespace.nodepool_id):
        raise InvalidArgumentValueError("--nodepool-id is not a valid Azure resource ID.")
Esempio n. 6
0
def validate_sdn(namespace):
    if namespace.software_defined_network is not None:
        if namespace.software_defined_network not in ['OVNKubernetes', 'OpenshiftSDN']:
            raise InvalidArgumentValueError(
                f"Invalid --software-defined-network '{namespace.software_defined_network}'.")
Esempio n. 7
0
def validate_worker_count(namespace):
    if namespace.worker_count:
        if namespace.worker_count < 3:
            raise InvalidArgumentValueError('--worker-count must be greater than or equal to 3.')
def _ensure_default_log_analytics_workspace_for_monitoring(
        cmd, subscription_id, cluster_resource_group_name, cluster_name):
    # mapping for azure public cloud
    # log analytics workspaces cannot be created in WCUS region due to capacity limits
    # so mapped to EUS per discussion with log analytics team
    # pylint: disable=too-many-locals,too-many-statements

    azurecloud_location_to_oms_region_code_map = {
        "australiasoutheast": "ASE",
        "australiaeast": "EAU",
        "australiacentral": "CAU",
        "canadacentral": "CCA",
        "centralindia": "CIN",
        "centralus": "CUS",
        "eastasia": "EA",
        "eastus": "EUS",
        "eastus2": "EUS2",
        "eastus2euap": "EAP",
        "francecentral": "PAR",
        "japaneast": "EJP",
        "koreacentral": "SE",
        "northeurope": "NEU",
        "southcentralus": "SCUS",
        "southeastasia": "SEA",
        "uksouth": "SUK",
        "usgovvirginia": "USGV",
        "westcentralus": "EUS",
        "westeurope": "WEU",
        "westus": "WUS",
        "westus2": "WUS2"
    }
    azurecloud_region_to_oms_region_map = {
        "australiacentral": "australiacentral",
        "australiacentral2": "australiacentral",
        "australiaeast": "australiaeast",
        "australiasoutheast": "australiasoutheast",
        "brazilsouth": "southcentralus",
        "canadacentral": "canadacentral",
        "canadaeast": "canadacentral",
        "centralus": "centralus",
        "centralindia": "centralindia",
        "eastasia": "eastasia",
        "eastus": "eastus",
        "eastus2": "eastus2",
        "francecentral": "francecentral",
        "francesouth": "francecentral",
        "japaneast": "japaneast",
        "japanwest": "japaneast",
        "koreacentral": "koreacentral",
        "koreasouth": "koreacentral",
        "northcentralus": "eastus",
        "northeurope": "northeurope",
        "southafricanorth": "westeurope",
        "southafricawest": "westeurope",
        "southcentralus": "southcentralus",
        "southeastasia": "southeastasia",
        "southindia": "centralindia",
        "uksouth": "uksouth",
        "ukwest": "uksouth",
        "westcentralus": "eastus",
        "westeurope": "westeurope",
        "westindia": "centralindia",
        "westus": "westus",
        "westus2": "westus2"
    }

    # mapping for azure china cloud
    # currently log analytics supported only China East 2 region
    azurechina_location_to_oms_region_code_map = {
        "chinaeast": "EAST2",
        "chinaeast2": "EAST2",
        "chinanorth": "EAST2",
        "chinanorth2": "EAST2"
    }
    azurechina_region_to_oms_region_map = {
        "chinaeast": "chinaeast2",
        "chinaeast2": "chinaeast2",
        "chinanorth": "chinaeast2",
        "chinanorth2": "chinaeast2"
    }

    # mapping for azure us governmner cloud
    azurefairfax_location_to_oms_region_code_map = {"usgovvirginia": "USGV"}
    azurefairfax_region_to_oms_region_map = {"usgovvirginia": "usgovvirginia"}

    cluster_location = ''
    resources = cf_resources(cmd.cli_ctx, subscription_id)

    cluster_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Kubernetes' \
        '/connectedClusters/{2}'.format(subscription_id, cluster_resource_group_name, cluster_name)
    try:
        resource = resources.get_by_id(cluster_resource_id,
                                       '2020-01-01-preview')
        cluster_location = resource.location.lower()
    except CloudError as ex:
        raise ex

    cloud_name = cmd.cli_ctx.cloud.name.lower()
    workspace_region = "eastus"
    workspace_region_code = "EUS"

    # sanity check that locations and clouds match.
    if ((cloud_name == 'azurecloud'
         and azurechina_region_to_oms_region_map.get(cluster_location, False))
            or
        (cloud_name == 'azurecloud' and
         azurefairfax_region_to_oms_region_map.get(cluster_location, False))):
        raise InvalidArgumentValueError(
            'Wrong cloud (azurecloud) setting for region {}, please use "az cloud set ..."'
            .format(cluster_location))

    if ((cloud_name == 'azurechinacloud'
         and azurecloud_region_to_oms_region_map.get(cluster_location, False))
            or
        (cloud_name == 'azurechinacloud' and
         azurefairfax_region_to_oms_region_map.get(cluster_location, False))):
        raise InvalidArgumentValueError(
            'Wrong cloud (azurechinacloud) setting for region {}, please use "az cloud set ..."'
            .format(cluster_location))

    if ((cloud_name == 'azureusgovernment'
         and azurecloud_region_to_oms_region_map.get(cluster_location, False))
            or
        (cloud_name == 'azureusgovernment' and
         azurechina_region_to_oms_region_map.get(cluster_location, False))):
        raise InvalidArgumentValueError(
            'Wrong cloud (azureusgovernment) setting for region {}, please use "az cloud set ..."'
            .format(cluster_location))

    if cloud_name == 'azurecloud':
        workspace_region = azurecloud_region_to_oms_region_map.get(
            cluster_location, "eastus")
        workspace_region_code = azurecloud_location_to_oms_region_code_map.get(
            workspace_region, "EUS")
    elif cloud_name == 'azurechinacloud':
        workspace_region = azurechina_region_to_oms_region_map.get(
            cluster_location, "chinaeast2")
        workspace_region_code = azurechina_location_to_oms_region_code_map.get(
            workspace_region, "EAST2")
    elif cloud_name == 'azureusgovernment':
        workspace_region = azurefairfax_region_to_oms_region_map.get(
            cluster_location, "usgovvirginia")
        workspace_region_code = azurefairfax_location_to_oms_region_code_map.get(
            workspace_region, "USGV")
    else:
        logger.error("AKS Monitoring addon not supported in cloud : %s",
                     cloud_name)

    default_workspace_resource_group = 'DefaultResourceGroup-' + workspace_region_code
    default_workspace_name = 'DefaultWorkspace-{0}-{1}'.format(
        subscription_id, workspace_region_code)
    default_workspace_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.OperationalInsights' \
        '/workspaces/{2}'.format(subscription_id, default_workspace_resource_group, default_workspace_name)
    resource_groups = cf_resource_groups(cmd.cli_ctx, subscription_id)

    # check if default RG exists
    if resource_groups.check_existence(default_workspace_resource_group):
        try:
            resource = resources.get_by_id(default_workspace_resource_id,
                                           '2015-11-01-preview')
            return resource.id
        except CloudError as ex:
            if ex.status_code != 404:
                raise ex
    else:
        resource_groups.create_or_update(default_workspace_resource_group,
                                         {'location': workspace_region})

    default_workspace_params = {
        'location': workspace_region,
        'properties': {
            'sku': {
                'name': 'standalone'
            }
        }
    }
    async_poller = resources.create_or_update_by_id(
        default_workspace_resource_id, '2015-11-01-preview',
        default_workspace_params)

    ws_resource_id = ''
    while True:
        result = async_poller.result(15)
        if async_poller.done():
            ws_resource_id = result.id
            break

    return ws_resource_id
def _get_container_insights_settings(cmd, cluster_resource_group_name,
                                     cluster_name, configuration_settings,
                                     configuration_protected_settings,
                                     is_ci_extension_type):

    subscription_id = get_subscription_id(cmd.cli_ctx)
    workspace_resource_id = ''

    if configuration_settings is not None:
        if 'loganalyticsworkspaceresourceid' in configuration_settings:
            configuration_settings['logAnalyticsWorkspaceResourceID'] = \
                configuration_settings.pop('loganalyticsworkspaceresourceid')

        if 'logAnalyticsWorkspaceResourceID' in configuration_settings:
            workspace_resource_id = configuration_settings[
                'logAnalyticsWorkspaceResourceID']

    workspace_resource_id = workspace_resource_id.strip()

    if configuration_protected_settings is not None:
        if 'proxyEndpoint' in configuration_protected_settings:
            # current supported format for proxy endpoint is  http(s)://<user>:<pwd>@<proxyhost>:<port>
            # do some basic validation since the ci agent does the complete validation
            proxy = configuration_protected_settings['proxyEndpoint'].strip(
            ).lower()
            proxy_parts = proxy.split('://')
            if (not proxy) or (not proxy.startswith('http://') and not proxy.startswith('https://')) or \
                    (len(proxy_parts) != 2):
                raise InvalidArgumentValueError(
                    'proxyEndpoint url should in this format http(s)://<user>:<pwd>@<proxyhost>:<port>'
                )
            logger.info(
                "successfully validated proxyEndpoint url hence passing proxy endpoint to extension"
            )
            configuration_protected_settings[
                'omsagent.proxy'] = configuration_protected_settings[
                    'proxyEndpoint']

    if not workspace_resource_id:
        workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
            cmd, subscription_id, cluster_resource_group_name, cluster_name)
    else:
        if not is_valid_resource_id(workspace_resource_id):
            raise InvalidArgumentValueError(
                '{} is not a valid Azure resource ID.'.format(
                    workspace_resource_id))

    if is_ci_extension_type:
        _ensure_container_insights_for_monitoring(
            cmd, workspace_resource_id).result()

    # extract subscription ID and resource group from workspace_resource_id URL
    parsed = parse_resource_id(workspace_resource_id)
    workspace_sub_id, workspace_rg_name, workspace_name = \
        parsed["subscription"], parsed["resource_group"], parsed["name"]

    log_analytics_client = cf_log_analytics(cmd.cli_ctx, workspace_sub_id)
    log_analytics_workspace = log_analytics_client.workspaces.get(
        workspace_rg_name, workspace_name)
    if not log_analytics_workspace:
        raise InvalidArgumentValueError(
            'Fails to retrieve workspace by {}'.format(workspace_name))

    shared_keys = log_analytics_client.shared_keys.get_shared_keys(
        workspace_rg_name, workspace_name)
    if not shared_keys:
        raise InvalidArgumentValueError(
            'Fails to retrieve shared key for workspace {}'.format(
                log_analytics_workspace))
    configuration_protected_settings[
        'omsagent.secret.wsid'] = log_analytics_workspace.customer_id
    configuration_settings[
        'logAnalyticsWorkspaceResourceID'] = workspace_resource_id
    configuration_protected_settings[
        'omsagent.secret.key'] = shared_keys.primary_shared_key
    # set the domain for the ci agent for non azure public clouds
    cloud_name = cmd.cli_ctx.cloud.name
    if cloud_name.lower() == 'azurechinacloud':
        configuration_settings['omsagent.domain'] = 'opinsights.azure.cn'
    elif cloud_name.lower() == 'azureusgovernment':
        configuration_settings['omsagent.domain'] = 'opinsights.azure.us'
    elif cloud_name.lower() == 'usnat':
        configuration_settings[
            'omsagent.domain'] = 'opinsights.azure.eaglex.ic.gov'
    elif cloud_name.lower() == 'ussec':
        configuration_settings[
            'omsagent.domain'] = 'opinsights.azure.microsoft.scloud'
def _deploy_path_mutual_exclusive(args):
    valued_args = [x for x in args if x]
    if len(valued_args) > 1:
        raise InvalidArgumentValueError(
            'At most one of --artifact-path, --source-path, --container-image must be provided.'
        )
def _ensure_deployment_exist(client, resource_group, service, app, deployment):
    try:
        return client.deployments.get(resource_group, service, app, deployment)
    except CloudError:
        raise InvalidArgumentValueError(
            'Deployment {} not found under app {}'.format(deployment, app))
def _get_active_deployment(client, resource_group, service, name):
    try:
        deployments = client.deployments.list(resource_group, service, name)
        return next(iter(x for x in deployments if x.properties.active), None)
    except ResourceNotFoundError:
        raise InvalidArgumentValueError('App {} not found'.format(name))
Esempio n. 13
0
def restore_azure_wl(cmd, client, resource_group_name, vault_name, recovery_config, rehydration_duration=15,
                     rehydration_priority=None, use_secondary_region=None):

    recovery_config_object = cust_help.get_or_read_json(recovery_config)
    restore_mode = recovery_config_object['restore_mode']
    container_uri = recovery_config_object['container_uri']
    item_uri = recovery_config_object['item_uri']
    recovery_point_id = recovery_config_object['recovery_point_id']
    log_point_in_time = recovery_config_object['log_point_in_time']
    item_type = recovery_config_object['item_type']
    workload_type = recovery_config_object['workload_type']
    source_resource_id = recovery_config_object['source_resource_id']
    database_name = recovery_config_object['database_name']
    container_id = recovery_config_object['container_id']
    alternate_directory_paths = recovery_config_object['alternate_directory_paths']
    recovery_mode = recovery_config_object['recovery_mode']
    filepath = recovery_config_object['filepath']

    item = common.show_item(cmd, backup_protected_items_cf(cmd.cli_ctx), resource_group_name, vault_name,
                            container_uri, item_uri, "AzureWorkload")
    cust_help.validate_item(item)
    validate_wl_restore(item, item_type, restore_mode, recovery_mode)

    trigger_restore_properties = _get_restore_request_instance(item_type, log_point_in_time, None)
    if log_point_in_time is None:
        recovery_point = common.show_recovery_point(cmd, recovery_points_cf(cmd.cli_ctx), resource_group_name,
                                                    vault_name, container_uri, item_uri, recovery_point_id,
                                                    workload_type, "AzureWorkload", use_secondary_region)

        if recovery_point is None:
            raise InvalidArgumentValueError("""
            Specified recovery point not found. Please check the recovery config file
            or try removing --use-secondary-region if provided""")

        common.fetch_tier_for_rp(recovery_point)

        if (recovery_point.tier_type is not None and recovery_point.tier_type == 'VaultArchive'):
            if rehydration_priority is None:
                raise InvalidArgumentValueError("""The selected recovery point is in archive tier, provide additional
                parameters of rehydration duration and rehydration priority.""")
            # normal rehydrated restore
            trigger_restore_properties = _get_restore_request_instance(item_type, log_point_in_time,
                                                                       rehydration_priority)

            rehyd_duration = 'P' + str(rehydration_duration) + 'D'
            rehydration_info = RecoveryPointRehydrationInfo(rehydration_retention_duration=rehyd_duration,
                                                            rehydration_priority=rehydration_priority)

            trigger_restore_properties.recovery_point_rehydration_info = rehydration_info

    trigger_restore_properties.recovery_type = restore_mode

    # Get target vm id
    if container_id is not None:
        target_container_name = cust_help.get_protection_container_uri_from_id(container_id)
        target_resource_group = cust_help.get_resource_group_from_id(container_id)
        target_vault_name = cust_help.get_vault_from_arm_id(container_id)
        target_container = common.show_container(cmd, backup_protection_containers_cf(cmd.cli_ctx),
                                                 target_container_name, target_resource_group, target_vault_name,
                                                 'AzureWorkload')
        setattr(trigger_restore_properties, 'target_virtual_machine_id', target_container.properties.source_resource_id)

    if restore_mode == 'AlternateLocation':
        if recovery_mode != "FileRecovery":
            setattr(trigger_restore_properties, 'source_resource_id', source_resource_id)
            setattr(trigger_restore_properties, 'target_info', TargetRestoreInfo(overwrite_option='Overwrite',
                                                                                 database_name=database_name,
                                                                                 container_id=container_id))
            if 'sql' in item_type.lower():
                directory_map = []
                for i in alternate_directory_paths:
                    directory_map.append(SQLDataDirectoryMapping(mapping_type=i[0], source_path=i[1],
                                                                 source_logical_name=i[2], target_path=i[3]))
                setattr(trigger_restore_properties, 'alternate_directory_paths', directory_map)
        else:
            target_info = TargetRestoreInfo(overwrite_option='Overwrite', container_id=container_id,
                                            target_directory_for_file_restore=filepath)
            setattr(trigger_restore_properties, 'target_info', target_info)
            trigger_restore_properties.recovery_mode = recovery_mode

    if log_point_in_time is not None:
        log_point_in_time = datetime_type(log_point_in_time)
        time_range_list = _get_log_time_range(cmd, resource_group_name, vault_name, item, use_secondary_region)
        validate_log_point_in_time(log_point_in_time, time_range_list)
        setattr(trigger_restore_properties, 'point_in_time', log_point_in_time)

    if 'sql' in item_type.lower():
        setattr(trigger_restore_properties, 'should_use_alternate_target_location', True)
        setattr(trigger_restore_properties, 'is_non_recoverable', False)

    trigger_restore_request = RestoreRequestResource(properties=trigger_restore_properties)

    if use_secondary_region:
        if rehydration_priority is not None:
            raise MutuallyExclusiveArgumentError("Archive restore isn't supported for secondary region.")
        vault = vaults_cf(cmd.cli_ctx).get(resource_group_name, vault_name)
        vault_location = vault.location
        azure_region = custom.secondary_region_map[vault_location]
        aad_client = aad_properties_cf(cmd.cli_ctx)
        filter_string = cust_help.get_filter_string({'backupManagementType': 'AzureWorkload'})
        aad_result = aad_client.get(azure_region, filter_string)
        rp_client = recovery_points_passive_cf(cmd.cli_ctx)
        crr_access_token = rp_client.get_access_token(vault_name, resource_group_name, fabric_name, container_uri,
                                                      item_uri, recovery_point_id, aad_result).properties
        crr_client = cross_region_restore_cf(cmd.cli_ctx)
        trigger_restore_properties.region = azure_region
        trigger_crr_request = CrossRegionRestoreRequest(cross_region_restore_access_details=crr_access_token,
                                                        restore_request=trigger_restore_properties)
        result = crr_client.begin_trigger(azure_region, trigger_crr_request, cls=cust_help.get_pipeline_response,
                                          polling=False).result()
        return cust_help.track_backup_crr_job(cmd.cli_ctx, result, azure_region, vault.id)

    # Trigger restore and wait for completion
    result = client.begin_trigger(vault_name, resource_group_name, fabric_name, container_uri, item_uri,
                                  recovery_point_id, trigger_restore_request, cls=cust_help.get_pipeline_response,
                                  polling=False).result()
    return cust_help.track_backup_job(cmd.cli_ctx, result, vault_name, resource_group_name)
Esempio n. 14
0
 def _get_persistent_disk_size(self, enable_persistent_storage, **_):
     if enable_persistent_storage:
         raise InvalidArgumentValueError(
             'Enterprise tier Spring-Cloud instance does not support --enable-persistent-storage'
         )
Esempio n. 15
0
def _validate_resource_group_name(name, message_name):
    if not name:
        return
    matchObj = match(r'^[-\w\._\(\)]+$', name)
    if matchObj is None:
        raise InvalidArgumentValueError('--{0} must conform to the following pattern: \'^[-\\w\\._\\(\\)]+$\'.'.format(message_name))
Esempio n. 16
0
def _validate_route_config_exist(client, resource_group, service, name):
    route_configs = client.gateway_route_configs.list(resource_group, service,
                                                      DEFAULT_NAME)
    if name in (route_config.name for route_config in list(route_configs)):
        raise InvalidArgumentValueError("Route config " + name +
                                        " already exists")
Esempio n. 17
0
def validate_instance_count(namespace):
    if namespace.instance_count is not None:
        if namespace.instance_count < 1:
            raise InvalidArgumentValueError("--instance-count must be greater than 0")
    def Create(self, cmd, client, resource_group_name, cluster_name, name,
               cluster_type, extension_type, scope, auto_upgrade_minor_version,
               release_train, version, target_namespace, release_namespace,
               configuration_settings, configuration_protected_settings,
               configuration_settings_file,
               configuration_protected_settings_file):
        if scope == 'namespace':
            raise InvalidArgumentValueError(
                "Invalid scope '{}'.  This extension can be installed "
                "only at 'cluster' scope.".format(scope))
        if not release_namespace:
            release_namespace = self.DEFAULT_RELEASE_NAMESPACE
        scope_cluster = ScopeCluster(release_namespace=release_namespace)
        ext_scope = Scope(cluster=scope_cluster, namespace=None)

        # validate the config
        self.__validate_config(configuration_settings,
                               configuration_protected_settings)

        # get the arc's location
        subscription_id = get_subscription_id(cmd.cli_ctx)
        cluster_rp, parent_api_version = _get_cluster_rp_api_version(
            cluster_type)
        cluster_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/{2}' \
            '/{3}/{4}'.format(subscription_id, resource_group_name, cluster_rp, cluster_type, cluster_name)
        cluster_location = ''
        resources = cf_resources(cmd.cli_ctx, subscription_id)
        try:
            resource = resources.get_by_id(cluster_resource_id,
                                           parent_api_version)
            cluster_location = resource.location.lower()
        except CloudError as ex:
            raise ex

        # generate values for the extension if none is set.
        configuration_settings['cluster_name'] = configuration_settings.get(
            'cluster_name', cluster_resource_id)
        configuration_settings['domain'] = configuration_settings.get(
            'doamin', '{}.cloudapp.azure.com'.format(cluster_location))
        configuration_settings['location'] = configuration_settings.get(
            'location', cluster_location)
        configuration_settings[
            self.JOB_SCHEDULER_LOCATION_KEY] = configuration_settings.get(
                self.JOB_SCHEDULER_LOCATION_KEY, cluster_location)
        configuration_settings[
            self.CLUSTER_NAME_FRIENDLY_KEY] = configuration_settings.get(
                self.CLUSTER_NAME_FRIENDLY_KEY, cluster_name)

        # create Azure resources need by the extension based on the config.
        self.__create_required_resource(cmd, configuration_settings,
                                        configuration_protected_settings,
                                        subscription_id, resource_group_name,
                                        cluster_name, cluster_location)

        # dereference
        configuration_settings = _dereference(self.reference_mapping,
                                              configuration_settings)
        configuration_protected_settings = _dereference(
            self.reference_mapping, configuration_protected_settings)

        # If release-train is not input, set it to 'stable'
        if release_train is None:
            release_train = 'stable'

        create_identity = True
        extension_instance = ExtensionInstance(
            extension_type=extension_type,
            auto_upgrade_minor_version=auto_upgrade_minor_version,
            release_train=release_train,
            version=version,
            scope=ext_scope,
            configuration_settings=configuration_settings,
            configuration_protected_settings=configuration_protected_settings,
            identity=None,
            location="")
        return extension_instance, name, create_identity
Esempio n. 19
0
 def _validate_visibility(namespace):
     visibility = getattr(namespace, key)
     if visibility is not None:
         visibility = visibility.capitalize()
         if visibility not in ['Private', 'Public']:
             raise InvalidArgumentValueError(f"Invalid --{key.replace('_', '-')} '{visibility}'.")
Esempio n. 20
0
def connection_create_kafka(
        cmd,
        client,  # pylint: disable=too-many-locals
        bootstrap_server,
        kafka_key,
        kafka_secret,
        schema_registry,
        schema_key,
        schema_secret,
        key_vault_id=None,
        connection_name=None,
        client_type=None,
        source_resource_group=None,
        source_id=None,
        site=None,  # Resource.WebApp
        deployment=None,
        spring=None,
        app=None):  # Resource.SpringCloud

    from ._transformers import transform_linker_properties
    # validation
    if 'azure.confluent.cloud' not in bootstrap_server.lower():
        raise InvalidArgumentValueError(
            'Kafka bootstrap server url is invalid: {}'.format(
                bootstrap_server))
    if 'azure.confluent.cloud' not in schema_registry.lower():
        raise InvalidArgumentValueError(
            'Schema registry url is invalid: {}'.format(schema_registry))

    if key_vault_id:
        client = set_user_token_header(client, cmd.cli_ctx)
        from ._utils import create_key_vault_reference_connection_if_not_exist
        create_key_vault_reference_connection_if_not_exist(
            cmd, client, source_id, key_vault_id)

    # create bootstrap-server
    parameters = {
        'target_id': bootstrap_server,
        'auth_info': {
            'name': kafka_key,
            'secret': kafka_secret,
            'auth_type': 'secret'
        },
        'secret_store': {
            'key_vault_id': key_vault_id,
        },
        'client_type': client_type,
    }
    logger.warning('Start creating a connection for bootstrap server ...')
    server_linker = client.begin_create_or_update(resource_uri=source_id,
                                                  linker_name=connection_name,
                                                  parameters=parameters)
    # block to poll the connection
    server_linker = server_linker.result()
    logger.warning('Created')

    # create schema registry
    parameters = {
        'target_id': schema_registry,
        'auth_info': {
            'name': schema_key,
            'secret': schema_secret,
            'auth_type': 'secret'
        },
        'secret_store': {
            'key_vault_id': key_vault_id,
        },
        'client_type': client_type,
    }
    logger.warning('Start creating a connection for schema registry ...')
    registry_linker = client.begin_create_or_update(
        resource_uri=source_id,
        linker_name='{}_schema'.format(connection_name),
        parameters=parameters)
    # block to poll the connection
    registry_linker = registry_linker.result()
    logger.warning('Created')

    return [
        transform_linker_properties(server_linker),
        transform_linker_properties(registry_linker)
    ]
Esempio n. 21
0
def validate_worker_vm_disk_size_gb(namespace):
    if namespace.worker_vm_disk_size_gb:
        if namespace.worker_vm_disk_size_gb < 128:
            raise InvalidArgumentValueError('--worker-vm-disk-size-gb must be greater than or equal to 128.')
Esempio n. 22
0
def validate_instance_not_existed(client, resource_group, name, location):
    availability_parameters = models.NameAvailabilityParameters(type="Microsoft.AppPlatform/Spring", name=name)
    name_availability = client.services.check_name_availability(location, availability_parameters)
    if not name_availability.name_available and name_availability.reason == "AlreadyExists":
        raise InvalidArgumentValueError("Service instance '{}' under resource group '{}' is already existed in region '{}', cannot be created again.".format(name, resource_group, location))
def validate_nat_gateway_idle_timeout(namespace):
    """validate NAT gateway profile idle timeout"""
    if namespace.nat_gateway_idle_timeout is not None:
        if namespace.nat_gateway_idle_timeout < 4 or namespace.nat_gateway_idle_timeout > 120:
            raise InvalidArgumentValueError("--nat-gateway-idle-timeout must be in the range [4,120]")
Esempio n. 24
0
def validate_name(namespace):
    namespace.name = namespace.name.lower()
    matchObj = match(r'^[a-z][a-z0-9-]{2,30}[a-z0-9]$', namespace.name)
    if matchObj is None:
        raise InvalidArgumentValueError(
            '--name should start with lowercase and only contain numbers and lowercases with length [4,31]')
def validate_host_group_id(namespace):
    if namespace.host_group_id:
        from msrestazure.tools import is_valid_resource_id
        if not is_valid_resource_id(namespace.host_group_id):
            raise InvalidArgumentValueError("--host-group-id is not a valid Azure resource ID.")
Esempio n. 26
0
def validate_resource_id(namespace):
    if not is_valid_resource_id(namespace.resource_id):
        raise InvalidArgumentValueError("Invalid resource id {}".format(namespace.resource_id))
Esempio n. 27
0
    def deserialize_object(self, type_name, type_properties):
        from azure.mgmt.monitor.models import EmailReceiver, SmsReceiver, WebhookReceiver, \
            ArmRoleReceiver, AzureAppPushReceiver, ItsmReceiver, AutomationRunbookReceiver, \
            VoiceReceiver, LogicAppReceiver, AzureFunctionReceiver, EventHubReceiver
        syntax = {
            'email':
            'NAME EMAIL_ADDRESS [usecommonalertschema]',
            'sms':
            'NAME COUNTRY_CODE PHONE_NUMBER',
            'webhook':
            'NAME URI [useaadauth OBJECT_ID IDENTIFIER URI] [usecommonalertschema]',
            'armrole':
            'NAME ROLE_ID [usecommonalertschema]',
            'azureapppush':
            'NAME EMAIL_ADDRESS',
            'itsm':
            'NAME WORKSPACE_ID CONNECTION_ID TICKET_CONFIG REGION',
            'automationrunbook':
            'NAME AUTOMATION_ACCOUNT_ID RUNBOOK_NAME WEBHOOK_RESOURCE_ID '
            'SERVICE_URI [isglobalrunbook] [usecommonalertschema]',
            'voice':
            'NAME COUNTRY_CODE PHONE_NUMBER',
            'logicapp':
            'NAME RESOURCE_ID CALLBACK_URL [usecommonalertschema]',
            'azurefunction':
            'NAME FUNCTION_APP_RESOURCE_ID '
            'FUNCTION_NAME HTTP_TRIGGER_URL [usecommonalertschema]',
            'eventhub':
            'NAME SUBSCRIPTION_ID EVENT_HUB_NAME_SPACE EVENT_HUB_NAME [usecommonalertschema] '
        }

        receiver = None
        useCommonAlertSchema = 'usecommonalertschema' in (
            property.lower() for property in type_properties)
        try:
            if type_name == 'email':
                receiver = EmailReceiver(
                    name=type_properties[0],
                    email_address=type_properties[1],
                    use_common_alert_schema=useCommonAlertSchema)
            elif type_name == 'sms':
                receiver = SmsReceiver(name=type_properties[0],
                                       country_code=type_properties[1],
                                       phone_number=type_properties[2])
            elif type_name == 'webhook':
                useAadAuth = len(type_properties
                                 ) >= 3 and type_properties[2] == 'useaadauth'
                object_id = type_properties[3] if useAadAuth else None
                identifier_uri = type_properties[4] if useAadAuth else None
                receiver = WebhookReceiver(
                    name=type_properties[0],
                    service_uri=type_properties[1],
                    use_common_alert_schema=useCommonAlertSchema,
                    use_aad_auth=useAadAuth,
                    object_id=object_id,
                    identifier_uri=identifier_uri)
            elif type_name == 'armrole':
                receiver = ArmRoleReceiver(
                    name=type_properties[0],
                    role_id=type_properties[1],
                    use_common_alert_schema=useCommonAlertSchema)
            elif type_name == 'azureapppush':
                receiver = AzureAppPushReceiver(
                    name=type_properties[0], email_address=type_properties[1])
            elif type_name == 'itsm':
                receiver = ItsmReceiver(
                    name=type_properties[0],
                    workspace_id=type_properties[1],
                    connection_id=type_properties[2],
                    ticket_configuration=type_properties[3],
                    region=type_properties[4])
            elif type_name == 'automationrunbook':
                isGlobalRunbook = 'isglobalrunbook' in (
                    property.lower() for property in type_properties)
                receiver = AutomationRunbookReceiver(
                    name=type_properties[0],
                    automation_account_id=type_properties[1],
                    runbook_name=type_properties[2],
                    webhook_resource_id=type_properties[3],
                    service_uri=type_properties[4],
                    is_global_runbook=isGlobalRunbook,
                    use_common_alert_schema=useCommonAlertSchema)
            elif type_name == 'voice':
                receiver = VoiceReceiver(name=type_properties[0],
                                         country_code=type_properties[1],
                                         phone_number=type_properties[2])
            elif type_name == 'logicapp':
                receiver = LogicAppReceiver(
                    name=type_properties[0],
                    resource_id=type_properties[1],
                    callback_url=type_properties[2],
                    use_common_alert_schema=useCommonAlertSchema)
            elif type_name == 'azurefunction':
                receiver = AzureFunctionReceiver(
                    name=type_properties[0],
                    function_app_resource_id=type_properties[1],
                    function_name=type_properties[2],
                    http_trigger_url=type_properties[3],
                    use_common_alert_schema=useCommonAlertSchema)
            elif type_name == 'eventhub':
                receiver = EventHubReceiver(
                    name=type_properties[0],
                    subscription_id=type_properties[1],
                    event_hub_name_space=type_properties[2],
                    event_hub_name=type_properties[3],
                    use_common_alert_schema=useCommonAlertSchema)
            else:
                raise InvalidArgumentValueError(
                    'The type "{}" is not recognizable.'.format(type_name))

        except IndexError:
            raise InvalidArgumentValueError('--action {}'.format(
                syntax[type_name]))
        return receiver
Esempio n. 28
0
def validate_ingress_timeout(namespace):
    if namespace.ingress_read_timeout is not None and (namespace.ingress_read_timeout < 1 or
                                                       namespace.ingress_read_timeout > 1800):
        raise InvalidArgumentValueError("Invalid value: Ingress read timeout must be in the range [1,1800].")
def validate_severity(namespace):
    if namespace.filter_severity:
        validate_only_equals_operator(namespace.filter_severity)
        for x in namespace.filter_severity[1:]:
            if x not in ['Sev0', 'Sev1', 'Sev2', 'Sev3', 'Sev4']:
                raise InvalidArgumentValueError('Argument Error: filter-severity values have to be one of [Equals, NotEquals, Sev0, Sev1, Sev2, Sev3, Sev4]')
Esempio n. 30
0
def validate_git_url(url: str):
    if not re.match(consts.VALID_URL_REGEX, url):
        raise InvalidArgumentValueError(consts.INVALID_URL_ERROR,
                                        consts.INVALID_URL_HELP)