コード例 #1
0
ファイル: _validators.py プロジェクト: jiayexie/azure-cli
def process_nw_test_connectivity_namespace(cmd, namespace):
    from msrestazure.tools import is_valid_resource_id, resource_id, parse_resource_id

    compute_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_COMPUTE).virtual_machines
    vm_name = parse_resource_id(namespace.source_resource)['name']
    rg = namespace.resource_group_name or parse_resource_id(namespace.source_resource).get('resource_group', None)
    if not rg:
        raise CLIError('usage error: --source-resource ID | --source-resource NAME --resource-group NAME')
    vm = compute_client.get(rg, vm_name)
    namespace.location = vm.location  # pylint: disable=no-member
    get_network_watcher_from_location(remove=True)(cmd, namespace)

    if namespace.source_resource and not is_valid_resource_id(namespace.source_resource):
        namespace.source_resource = resource_id(
            subscription=get_subscription_id(cmd.cli_ctx),
            resource_group=rg,
            namespace='Microsoft.Compute',
            type='virtualMachines',
            name=namespace.source_resource)

    if namespace.dest_resource and not is_valid_resource_id(namespace.dest_resource):
        namespace.dest_resource = resource_id(
            subscription=get_subscription_id(cmd.cli_ctx),
            resource_group=namespace.resource_group_name,
            namespace='Microsoft.Compute',
            type='virtualMachines',
            name=namespace.dest_resource)
コード例 #2
0
ファイル: _validators.py プロジェクト: jiayexie/azure-cli
def process_nw_packet_capture_create_namespace(cmd, namespace):
    from msrestazure.tools import is_valid_resource_id, resource_id
    get_network_watcher_from_vm(cmd, namespace)

    storage_usage = CLIError('usage error: --storage-account NAME_OR_ID [--storage-path '
                             'PATH] [--file-path PATH] | --file-path PATH')
    if not namespace.storage_account and not namespace.file_path:
        raise storage_usage

    if namespace.storage_path and not namespace.storage_account:
        raise storage_usage

    if not is_valid_resource_id(namespace.vm):
        namespace.vm = resource_id(
            subscription=get_subscription_id(cmd.cli_ctx),
            resource_group=namespace.resource_group_name,
            namespace='Microsoft.Compute',
            type='virtualMachines',
            name=namespace.vm)

    if namespace.storage_account and not is_valid_resource_id(namespace.storage_account):
        namespace.storage_account = resource_id(
            subscription=get_subscription_id(cmd.cli_ctx),
            resource_group=namespace.resource_group_name,
            namespace='Microsoft.Storage',
            type='storageAccounts',
            name=namespace.storage_account)

    if namespace.file_path:
        file_path = namespace.file_path
        if not file_path.endswith('.cap'):
            raise CLIError("usage error: --file-path PATH must end with the '*.cap' extension")
        file_path = file_path.replace('/', '\\')
        namespace.file_path = file_path
コード例 #3
0
ファイル: validators.py プロジェクト: derekbekoe/azure-cli
def validate_diagnostic_settings(cmd, namespace):
    from azure.cli.core.commands.client_factory import get_subscription_id
    from msrestazure.tools import is_valid_resource_id, resource_id
    from knack.util import CLIError
    resource_group_error = "--resource-group is required when name is provided for storage account or workspace or " \
                           "service bus namespace and rule. "

    get_target_resource_validator('resource_uri', required=True, preserve_resource_group_parameter=True)(cmd, namespace)

    if namespace.storage_account and not is_valid_resource_id(namespace.storage_account):
        if namespace.resource_group_name is None:
            raise CLIError(resource_group_error)
        namespace.storage_account = resource_id(subscription=get_subscription_id(cmd.cli_ctx),
                                                resource_group=namespace.resource_group_name,
                                                namespace='microsoft.Storage',
                                                type='storageAccounts',
                                                name=namespace.storage_account)

    if namespace.workspace and not is_valid_resource_id(namespace.workspace):
        if namespace.resource_group_name is None:
            raise CLIError(resource_group_error)
        namespace.workspace = resource_id(subscription=get_subscription_id(cmd.cli_ctx),
                                          resource_group=namespace.resource_group_name,
                                          namespace='microsoft.OperationalInsights',
                                          type='workspaces',
                                          name=namespace.workspace)

    if not namespace.storage_account and not namespace.workspace and not namespace.event_hub:
        raise CLIError(
            'One of the following parameters is expected: --storage-account, --event-hub-name, or --workspace.')

    try:
        del namespace.resource_group_name
    except AttributeError:
        pass
コード例 #4
0
ファイル: validators.py プロジェクト: sptramer/azure-cli
def validate_diagnostic_settings(cmd, namespace):
    from azure.cli.core.commands.client_factory import get_subscription_id
    from msrestazure.tools import is_valid_resource_id, resource_id, parse_resource_id
    from knack.util import CLIError

    get_target_resource_validator('resource_uri', required=True, preserve_resource_group_parameter=True)(cmd, namespace)
    if not namespace.resource_group_name:
        namespace.resource_group_name = parse_resource_id(namespace.resource_uri)['resource_group']

    if namespace.storage_account and not is_valid_resource_id(namespace.storage_account):
        namespace.storage_account = resource_id(subscription=get_subscription_id(cmd.cli_ctx),
                                                resource_group=namespace.resource_group_name,
                                                namespace='microsoft.Storage',
                                                type='storageAccounts',
                                                name=namespace.storage_account)

    if namespace.workspace and not is_valid_resource_id(namespace.workspace):
        namespace.workspace = resource_id(subscription=get_subscription_id(cmd.cli_ctx),
                                          resource_group=namespace.resource_group_name,
                                          namespace='microsoft.OperationalInsights',
                                          type='workspaces',
                                          name=namespace.workspace)

    if namespace.event_hub and is_valid_resource_id(namespace.event_hub):
        namespace.event_hub = parse_resource_id(namespace.event_hub)['name']

    if namespace.event_hub_rule:
        if not is_valid_resource_id(namespace.event_hub_rule):
            if not namespace.event_hub:
                raise CLIError('usage error: --event-hub-rule ID | --event-hub-rule NAME --event-hub NAME')
            # use value from --event-hub if the rule is a name
            namespace.event_hub_rule = resource_id(
                subscription=get_subscription_id(cmd.cli_ctx),
                resource_group=namespace.resource_group_name,
                namespace='Microsoft.EventHub',
                type='namespaces',
                name=namespace.event_hub,
                child_type_1='AuthorizationRules',
                child_name_1=namespace.event_hub_rule)
        elif not namespace.event_hub:
            # extract the event hub name from `--event-hub-rule` if provided as an ID
            namespace.event_hub = parse_resource_id(namespace.event_hub_rule)['name']

    if not any([namespace.storage_account, namespace.workspace, namespace.event_hub]):
        raise CLIError(
            'usage error - expected one or more:  --storage-account NAME_OR_ID | --workspace NAME_OR_ID '
            '| --event-hub NAME_OR_ID | --event-hub-rule ID')

    try:
        del namespace.resource_group_name
    except AttributeError:
        pass
コード例 #5
0
ファイル: custom.py プロジェクト: yugangw-msft/azure-cli
def _replica_create(cmd, client, resource_group_name, server_name, source_server, no_wait=False, **kwargs):
    provider = 'Microsoft.DBForMySQL' if isinstance(client, MySqlServersOperations) else 'Microsoft.DBforPostgreSQL'
    # set source server id
    if not is_valid_resource_id(source_server):
        if len(source_server.split('/')) == 1:
            source_server = resource_id(subscription=get_subscription_id(cmd.cli_ctx),
                                        resource_group=resource_group_name,
                                        namespace=provider,
                                        type='servers',
                                        name=source_server)
        else:
            raise CLIError('The provided source-server {} is invalid.'.format(source_server))

    source_server_id_parts = parse_resource_id(source_server)
    try:
        source_server_object = client.get(source_server_id_parts['resource_group'], source_server_id_parts['name'])
    except CloudError as e:
        raise CLIError('Unable to get source server: {}.'.format(str(e)))

    parameters = None
    if provider == 'Microsoft.DBForMySQL':
        from azure.mgmt.rdbms import mysql
        parameters = mysql.models.ServerForCreate(
            sku=mysql.models.Sku(name=source_server_object.sku.name),
            properties=mysql.models.ServerPropertiesForReplica(source_server_id=source_server),
            location=source_server_object.location)

    return sdk_no_wait(no_wait, client.create, resource_group_name, server_name, parameters)
コード例 #6
0
def create(cmd, client, resource_group_name, activity_log_alert_name, scopes=None, condition=None,
           action_groups=frozenset(), tags=None, disable=False, description=None, webhook_properties=None):
    from msrestazure.tools import resource_id
    from azure.mgmt.monitor.models import (ActivityLogAlertResource, ActivityLogAlertAllOfCondition,
                                           ActivityLogAlertLeafCondition, ActivityLogAlertActionList)
    from azure.mgmt.monitor.models import ActivityLogAlertActionGroup as ActionGroup
    from azure.cli.core.commands.client_factory import get_subscription_id
    from knack.util import CLIError

    if not scopes:
        scopes = [resource_id(subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name)]

    if _get_alert_settings(client, resource_group_name, activity_log_alert_name, throw_if_missing=False):
        raise CLIError('The activity log alert {} already exists in resource group {}.'.format(activity_log_alert_name,
                                                                                               resource_group_name))

    # Add alert conditions
    condition = condition or ActivityLogAlertAllOfCondition(
        all_of=[ActivityLogAlertLeafCondition(field='category', equals='ServiceHealth')])

    # Add action groups
    action_group_rids = _normalize_names(cmd.cli_ctx, action_groups, resource_group_name, 'microsoft.insights',
                                         'actionGroups')
    action_groups = [ActionGroup(action_group_id=i, webhook_properties=webhook_properties) for i in action_group_rids]
    alert_actions = ActivityLogAlertActionList(action_groups=action_groups)

    settings = ActivityLogAlertResource(location='global', scopes=scopes, condition=condition,
                                        actions=alert_actions, enabled=not disable, description=description, tags=tags)

    return client.create_or_update(resource_group_name=resource_group_name,
                                   activity_log_alert_name=activity_log_alert_name, activity_log_alert=settings)
コード例 #7
0
ファイル: _validators.py プロジェクト: jiayexie/azure-cli
    def simple_validator(cmd, namespace):
        if namespace.virtual_network_name is None and namespace.subnet is None:
            return
        if namespace.subnet == '':
            return
        usage_error = ValueError('incorrect usage: ( --subnet ID | --subnet NAME --vnet-name NAME)')
        # error if vnet-name is provided without subnet
        if namespace.virtual_network_name and not namespace.subnet:
            raise usage_error

        # determine if subnet is name or ID
        is_id = is_valid_resource_id(namespace.subnet)

        # error if vnet-name is provided along with a subnet ID
        if is_id and namespace.virtual_network_name:
            raise usage_error
        elif not is_id and not namespace.virtual_network_name:
            raise usage_error

        if not is_id:
            namespace.subnet = resource_id(
                subscription=get_subscription_id(cmd.cli_ctx),
                resource_group=namespace.resource_group_name,
                namespace='Microsoft.Network',
                type='virtualNetworks',
                name=namespace.virtual_network_name,
                child_type_1='subnets',
                child_name_1=namespace.subnet)
コード例 #8
0
ファイル: _validators.py プロジェクト: jiayexie/azure-cli
 def _validate_name_or_id(public_ip):
     # determine if public_ip_address is name or ID
     is_id = is_valid_resource_id(public_ip)
     return public_ip if is_id else resource_id(
         subscription=get_subscription_id(cmd.cli_ctx),
         resource_group=namespace.resource_group_name,
         namespace='Microsoft.Network',
         type='publicIPAddresses',
         name=public_ip)
コード例 #9
0
def validate_virtual_hub(cmd, namespace):
    from msrestazure.tools import is_valid_resource_id, resource_id
    if namespace.virtual_hub and not is_valid_resource_id(namespace.virtual_hub):
        namespace.virtual_hub = resource_id(
            subscription=get_subscription_id(cmd.cli_ctx),
            resource_group=namespace.resource_group_name,
            namespace='Microsoft.Network',
            type='virtualHubs',
            name=namespace.virtual_hub
        )
コード例 #10
0
def validate_express_route_port(cmd, namespace):
    from msrestazure.tools import is_valid_resource_id, resource_id
    if namespace.express_route_port and not is_valid_resource_id(namespace.express_route_port):
        namespace.express_route_port = resource_id(
            subscription=get_subscription_id(cmd.cli_ctx),
            resource_group=namespace.resource_group_name,
            namespace='Microsoft.Network',
            type='expressRoutePorts',
            name=namespace.express_route_port
        )
コード例 #11
0
ファイル: _validators.py プロジェクト: jiayexie/azure-cli
def _generate_ag_subproperty_id(cli_ctx, namespace, child_type, child_name, subscription=None):
    from msrestazure.tools import resource_id
    return resource_id(
        subscription=subscription or get_subscription_id(cli_ctx),
        resource_group=namespace.resource_group_name,
        namespace='Microsoft.Network',
        type='applicationGateways',
        name=namespace.application_gateway_name,
        child_type_1=child_type,
        child_name_1=child_name)
コード例 #12
0
ファイル: _validators.py プロジェクト: jiayexie/azure-cli
def _generate_lb_subproperty_id(cli_ctx, namespace, child_type, child_name, subscription=None):
    from msrestazure.tools import resource_id
    return resource_id(
        subscription=subscription or get_subscription_id(cli_ctx),
        resource_group=namespace.resource_group_name,
        namespace='Microsoft.Network',
        type='loadBalancers',
        name=namespace.load_balancer_name,
        child_type_1=child_type,
        child_name_1=child_name)
コード例 #13
0
ファイル: _validators.py プロジェクト: jiayexie/azure-cli
 def _validate_name_or_id(value, resource_type):
     if not is_valid_resource_id(value):
         subscription = getattr(namespace, 'subscription', get_subscription_id(cmd.cli_ctx))
         return resource_id(
             subscription=subscription,
             resource_group=namespace.resource_group_name,
             namespace='Microsoft.Network',
             type=resource_type,
             name=value)
     return value
コード例 #14
0
ファイル: _validators.py プロジェクト: jiayexie/azure-cli
def validate_route_filter(cmd, namespace):
    from msrestazure.tools import is_valid_resource_id, resource_id
    if namespace.route_filter:
        if not is_valid_resource_id(namespace.route_filter):
            namespace.route_filter = resource_id(
                subscription=get_subscription_id(cmd.cli_ctx),
                resource_group=namespace.resource_group_name,
                namespace='Microsoft.Network',
                type='routeFilters',
                name=namespace.route_filter)
コード例 #15
0
ファイル: _validators.py プロジェクト: jiayexie/azure-cli
def validate_ddos_name_or_id(cmd, namespace):

    if namespace.ddos_protection_plan:
        from msrestazure.tools import is_valid_resource_id, resource_id
        if not is_valid_resource_id(namespace.ddos_protection_plan):
            namespace.ddos_protection_plan = resource_id(
                subscription=get_subscription_id(cmd.cli_ctx),
                resource_group=namespace.resource_group_name,
                namespace='Microsoft.Network', type='ddosProtectionPlans',
                name=namespace.ddos_protection_plan
            )
コード例 #16
0
ファイル: _validator.py プロジェクト: derekbekoe/azure-cli
def validate_storageaccount(cmd, namespace):
    from azure.cli.core.commands.client_factory import get_subscription_id
    from msrestazure.tools import is_valid_resource_id, resource_id
    if namespace.storage_account_resource_id:
        if not is_valid_resource_id(namespace.storage_account_resource_id):
            namespace.storage_account_resource_id = resource_id(
                subscription=get_subscription_id(cmd.cli_ctx),
                resource_group=namespace.resource_group_name,
                namespace='Microsoft.Storage',
                type='storageAccounts',
                name=namespace.storage_account_resource_id)
コード例 #17
0
ファイル: _validators.py プロジェクト: jiayexie/azure-cli
def process_nw_flow_log_set_namespace(cmd, namespace):
    from msrestazure.tools import is_valid_resource_id, resource_id
    if namespace.storage_account and not is_valid_resource_id(namespace.storage_account):
        namespace.storage_account = resource_id(
            subscription=get_subscription_id(cmd.cli_ctx),
            resource_group=namespace.resource_group_name,
            namespace='Microsoft.Storage',
            type='storageAccounts',
            name=namespace.storage_account)

    process_nw_flow_log_show_namespace(cmd, namespace)
コード例 #18
0
    def validator(cmd, namespace):
        from msrestazure.tools import resource_id
        type_field_name = '{}_type'.format(property_name)
        property_val = getattr(namespace, property_name, None)
        parent_val = getattr(namespace, parent_name, None) if parent_name else None

        # Check for the different scenarios (order matters)
        # 1) provided value indicates None (pair of empty quotes)
        if property_val in ('', '""', "''") or (property_val is None and default_none):
            if not allow_none:
                raise CLIError('{} cannot be None.'.format(property_option))
            setattr(namespace, type_field_name, 'none')
            setattr(namespace, property_name, None)
            if parent_name and parent_val:
                logger.warning('Ignoring: %s %s', parent_option, parent_val)
                setattr(namespace, parent_name, None)
            return  # SUCCESS

        # Create a resource ID we can check for existence.
        (resource_id_parts, value_was_id) = _validate_name_or_id(
            cmd.cli_ctx, namespace.resource_group_name, property_val, property_type, parent_val, parent_type)

        # 2) resource exists
        if resource_exists(cmd.cli_ctx, **resource_id_parts):
            setattr(namespace, type_field_name, 'existingId')
            setattr(namespace, property_name, resource_id(**resource_id_parts))
            if parent_val:
                if value_was_id:
                    logger.warning('Ignoring: %s %s', parent_option, parent_val)
                setattr(namespace, parent_name, None)
            return  # SUCCESS

        # if a parent name was required but not specified, raise a usage error
        if has_parent and not value_was_id and not parent_val and not allow_new:
            raise ValueError('incorrect usage: {0} ID | {0} NAME {1} NAME'.format(
                property_option, parent_option))

        # if non-existent ID was supplied, throw error depending on whether a new resource can
        # be created.
        if value_was_id:
            usage_message = '{} NAME'.format(property_option) if not has_parent \
                else '{} NAME [{} NAME]'.format(property_option, parent_option)
            action_message = 'Specify ( {} ) to create a new resource.'.format(usage_message) if \
                allow_new else 'Create the required resource and try again.'
            raise CLIError('{} {} does not exist. {}'.format(
                property_name, property_val, action_message))

        # 3) try to create new resource
        if allow_new:
            setattr(namespace, type_field_name, 'new')
        else:
            raise CLIError(
                '{} {} does not exist. Create the required resource and try again.'.format(
                    property_name, property_val))
コード例 #19
0
ファイル: _validators.py プロジェクト: jiayexie/azure-cli
 def simple_validator(cmd, namespace):
     if namespace.network_security_group:
         # determine if network_security_group is name or ID
         is_id = is_valid_resource_id(namespace.network_security_group)
         if not is_id:
             namespace.network_security_group = resource_id(
                 subscription=get_subscription_id(cmd.cli_ctx),
                 resource_group=namespace.resource_group_name,
                 namespace='Microsoft.Network',
                 type='networkSecurityGroups',
                 name=namespace.network_security_group)
コード例 #20
0
ファイル: _validators.py プロジェクト: jiayexie/azure-cli
def validate_target_listener(cmd, namespace):
    from msrestazure.tools import is_valid_resource_id, resource_id
    if namespace.target_listener and not is_valid_resource_id(namespace.target_listener):
        namespace.target_listener = resource_id(
            subscription=get_subscription_id(cmd.cli_ctx),
            resource_group=namespace.resource_group_name,
            name=namespace.application_gateway_name,
            namespace='Microsoft.Network',
            type='applicationGateways',
            child_type_1='httpListeners',
            child_name_1=namespace.target_listener)
コード例 #21
0
ファイル: _validators.py プロジェクト: jiayexie/azure-cli
 def simple_validator(cmd, namespace):
     if namespace.virtual_network:
         # determine if vnet is name or ID
         is_id = is_valid_resource_id(namespace.virtual_network)
         if not is_id:
             namespace.virtual_network = resource_id(
                 subscription=get_subscription_id(cmd.cli_ctx),
                 resource_group=namespace.resource_group_name,
                 namespace='Microsoft.Network',
                 type='virtualNetworks',
                 name=namespace.virtual_network)
コード例 #22
0
ファイル: custom.py プロジェクト: yugangw-msft/azure-cli
def _server_restore(cmd, client, resource_group_name, server_name, source_server, restore_point_in_time, no_wait=False):
    provider = 'Microsoft.DBforPostgreSQL'
    if isinstance(client, MySqlServersOperations):
        provider = 'Microsoft.DBforMySQL'
    elif isinstance(client, MariaDBServersOperations):
        provider = 'Microsoft.DBforMariaDB'

    parameters = None
    if not is_valid_resource_id(source_server):
        if len(source_server.split('/')) == 1:
            source_server = resource_id(
                subscription=get_subscription_id(cmd.cli_ctx),
                resource_group=resource_group_name,
                namespace=provider,
                type='servers',
                name=source_server)
        else:
            raise ValueError('The provided source-server {} is invalid.'.format(source_server))

    if provider == 'Microsoft.DBforMySQL':
        from azure.mgmt.rdbms import mysql
        parameters = mysql.models.ServerForCreate(
            properties=mysql.models.ServerPropertiesForRestore(
                source_server_id=source_server,
                restore_point_in_time=restore_point_in_time),
            location=None)
    elif provider == 'Microsoft.DBforPostgreSQL':
        from azure.mgmt.rdbms import postgresql
        parameters = postgresql.models.ServerForCreate(
            properties=postgresql.models.ServerPropertiesForRestore(
                source_server_id=source_server,
                restore_point_in_time=restore_point_in_time),
            location=None)
    elif provider == 'Microsoft.DBforMariaDB':
        from azure.mgmt.rdbms import mariadb
        parameters = mariadb.models.ServerForCreate(
            properties=mariadb.models.ServerPropertiesForRestore(
                source_server_id=source_server,
                restore_point_in_time=restore_point_in_time),
            location=None)

    parameters.properties.source_server_id = source_server
    parameters.properties.restore_point_in_time = restore_point_in_time

    # Here is a workaround that we don't support cross-region restore currently,
    # so the location must be set as the same as source server (not the resource group)
    id_parts = parse_resource_id(source_server)
    try:
        source_server_object = client.get(id_parts['resource_group'], id_parts['name'])
        parameters.location = source_server_object.location
    except Exception as e:
        raise ValueError('Unable to get source server: {}.'.format(str(e)))

    return sdk_no_wait(no_wait, client.create, resource_group_name, server_name, parameters)
コード例 #23
0
ファイル: _validators.py プロジェクト: derekbekoe/azure-cli
def validate_partner_namespace(cmd, namespace):
    from azure.cli.core.commands.client_factory import get_subscription_id
    from msrestazure.tools import is_valid_resource_id, resource_id
    if namespace.partner_namespace:
        if not is_valid_resource_id(namespace.partner_namespace):
            namespace.partner_namespace = resource_id(
                subscription=get_subscription_id(cmd.cli_ctx),
                resource_group=namespace.resource_group_name,
                namespace='Microsoft.ServiceBus',
                type='namespaces',
                name=namespace.partner_namespace)
コード例 #24
0
ファイル: _validators.py プロジェクト: yugangw-msft/azure-cli
def validate_storage_msi(cmd, namespace):
    from azure.cli.core.commands.client_factory import get_subscription_id
    from msrestazure.tools import is_valid_resource_id, resource_id

    if namespace.storage_account_managed_identity is not None:
        if not is_valid_resource_id(namespace.storage_account_managed_identity):
            namespace.storage_account_managed_identity = resource_id(
                subscription=get_subscription_id(cmd.cli_ctx),
                resource_group=namespace.resource_group_name,
                namespace='Microsoft.ManagedIdentity',
                type='userAssignedIdentities',
                name=namespace.storage_account_managed_identity
            )
コード例 #25
0
ファイル: _validators.py プロジェクト: yugangw-msft/azure-cli
def validate_load_balancer(cmd, namespace):
    '''
    Validates if name or id has been provided. If name has been provided, it assumes the load balancer is in the same group.
    '''
    lb = namespace.load_balancer_resource_id

    if not is_valid_resource_id(lb):
        namespace.load_balancer_resource_id = resource_id(
            subscription=get_subscription_id(cmd.cli_ctx),
            resource_group=namespace.resource_group_name,
            namespace='Microsoft.Network', type='loadBalancers',
            name=lb
        )
コード例 #26
0
ファイル: _validators.py プロジェクト: yugangw-msft/azure-cli
def validate_public_ip_address(cmd, namespace):
    '''
    Validates if name or id has been provided. If name has been provided, it assumes the public ip address is in the same group.
    '''
    public_ip = namespace.public_ip_address_resource_id

    if public_ip and not is_valid_resource_id(public_ip):
        namespace.public_ip_address_resource_id = resource_id(
            subscription=get_subscription_id(cmd.cli_ctx),
            resource_group=namespace.resource_group_name,
            namespace='Microsoft.Network', type='publicIPAddresses',
            name=public_ip
        )
コード例 #27
0
ファイル: custom.py プロジェクト: yugangw-msft/azure-cli
def _get_virtual_network_id(cmd, resource_group_name, subnet, virtual_network):
    from azure.cli.core.commands.client_factory import get_subscription_id
    from msrestazure.tools import is_valid_resource_id, resource_id
    if not is_valid_resource_id(subnet):
        if virtual_network is None:
            raise CLIError("usage error: --subnet ID | --subnet NAME --vnet-name NAME")
        subnet = resource_id(
            subscription=get_subscription_id(cmd.cli_ctx),
            resource_group=resource_group_name,
            namespace='Microsoft.Network', type='virtualNetworks',
            name=virtual_network, child_type_1='subnets', child_name_1=subnet
        )
    return subnet
コード例 #28
0
ファイル: _validators.py プロジェクト: yugangw-msft/azure-cli
def validate_sqlvm_group(cmd, namespace):
    '''
    Validates if name or id has been provided. If name has been provided, it assumes the group is in the same resource group.
    '''
    group = namespace.sql_virtual_machine_group_resource_id

    if group and not is_valid_resource_id(group):
        namespace.sql_virtual_machine_group_resource_id = resource_id(
            subscription=get_subscription_id(cmd.cli_ctx),
            resource_group=namespace.resource_group_name,
            namespace='Microsoft.SqlVirtualMachine', type='sqlVirtualMachineGroups',
            name=group
        )
コード例 #29
0
def _front_door_subresource_id(cmd, resource_group, front_door_name, child_type, child_name):
    from azure.cli.core.commands.client_factory import get_subscription_id
    from msrestazure.tools import resource_id

    subscription_id = get_subscription_id(cmd.cli_ctx)

    return resource_id(
        subscription=subscription_id,
        resource_group=resource_group,
        namespace='Microsoft.Network',
        type='frontdoors',
        name=front_door_name,
        child_type_1=child_type,
        child_name_1=child_name)
コード例 #30
0
ファイル: _validators.py プロジェクト: jiayexie/azure-cli
def process_nw_troubleshooting_start_namespace(cmd, namespace):
    from msrestazure.tools import is_valid_resource_id, resource_id
    storage_usage = CLIError('usage error: --storage-account NAME_OR_ID [--storage-path PATH]')
    if namespace.storage_path and not namespace.storage_account:
        raise storage_usage

    if not is_valid_resource_id(namespace.storage_account):
        namespace.storage_account = resource_id(
            subscription=get_subscription_id(cmd.cli_ctx),
            resource_group=namespace.resource_group_name,
            namespace='Microsoft.Storage',
            type='storageAccounts',
            name=namespace.storage_account)

    process_nw_troubleshooting_show_namespace(cmd, namespace)
コード例 #31
0
def flexible_server_restore(cmd, client,
                            resource_group_name, server_name,
                            source_server, restore_point_in_time=None, zone=None, no_wait=False,
                            subnet=None, subnet_address_prefix=None, vnet=None, vnet_address_prefix=None,
                            private_dns_zone_arguments=None, yes=False):

    server_name = server_name.lower()
    db_context = DbContext(
        cmd=cmd, azure_sdk=postgresql_flexibleservers, cf_firewall=cf_postgres_flexible_firewall_rules, cf_db=cf_postgres_flexible_db,
        cf_availability=cf_postgres_check_resource_availability, cf_private_dns_zone_suffix=cf_postgres_flexible_private_dns_zone_suffix_operations, logging_name='PostgreSQL', command_group='postgres', server_client=client)
    validate_server_name(db_context, server_name, 'Microsoft.DBforPostgreSQL/flexibleServers')

    if not is_valid_resource_id(source_server):
        if len(source_server.split('/')) == 1:
            source_server_id = resource_id(
                subscription=get_subscription_id(cmd.cli_ctx),
                resource_group=resource_group_name,
                namespace=RESOURCE_PROVIDER,
                type='flexibleServers',
                name=source_server)
        else:
            raise ValueError('The provided source server {} is invalid.'.format(source_server))
    else:
        source_server_id = source_server

    try:
        id_parts = parse_resource_id(source_server_id)
        source_server_object = client.get(id_parts['resource_group'], id_parts['name'])

        location = ''.join(source_server_object.location.lower().split())
        parameters = postgresql_flexibleservers.models.Server(
            location=location,
            point_in_time_utc=restore_point_in_time,
            source_server_resource_id=source_server_id,  # this should be the source server name, not id
            create_mode="PointInTimeRestore",
            availability_zone=zone
        )

        if source_server_object.network.public_network_access == 'Disabled':
            network = postgresql_flexibleservers.models.Network()
            if subnet is not None or vnet is not None:
                subnet_id = prepare_private_network(cmd,
                                                    resource_group_name,
                                                    server_name,
                                                    vnet=vnet,
                                                    subnet=subnet,
                                                    location=location,
                                                    delegation_service_name=DELEGATION_SERVICE_NAME,
                                                    vnet_address_pref=vnet_address_prefix,
                                                    subnet_address_pref=subnet_address_prefix,
                                                    yes=yes)
            else:
                subnet_id = source_server_object.network.delegated_subnet_resource_id

            if private_dns_zone_arguments is not None:
                private_dns_zone_id = prepare_private_dns_zone(db_context,
                                                               'PostgreSQL',
                                                               resource_group_name,
                                                               server_name,
                                                               private_dns_zone=private_dns_zone_arguments,
                                                               subnet_id=subnet_id,
                                                               location=location,
                                                               yes=yes)
            else:
                private_dns_zone_id = source_server_object.network.private_dns_zone_arm_resource_id

            network.delegated_subnet_resource_id = subnet_id
            network.private_dns_zone_arm_resource_id = private_dns_zone_id
            parameters.network = network

    except Exception as e:
        raise ResourceNotFoundError(e)

    return sdk_no_wait(no_wait, client.begin_create, resource_group_name, server_name, parameters)
コード例 #32
0
    def test_metric_alert_condition_create(self, resource_group, vm1, vm2):
        from azure.mgmt.core.tools import resource_id
        self.kwargs.update({
            'alert':
            'alert1',
            'plan':
            'plan1',
            'app':
            self.create_random_name('app', 15),
            'ag1':
            'ag1',
            'ag2':
            'ag2',
            'webhooks':
            '{{test=banoodle}}',
            'sub':
            self.get_subscription_id(),
            'vm_id':
            resource_id(resource_group=resource_group,
                        subscription=self.get_subscription_id(),
                        name=vm1,
                        namespace='Microsoft.Compute',
                        type='virtualMachines'),
            'vm_id_2':
            resource_id(resource_group=resource_group,
                        subscription=self.get_subscription_id(),
                        name=vm2,
                        namespace='Microsoft.Compute',
                        type='virtualMachines')
        })
        self.cmd('monitor action-group create -g {rg} -n {ag1}')

        cond1 = "total \'transactions\' > 5.0 where ResponseType includes Success and ApiName includes GetBlob"
        dim1 = self.cmd(
            'monitor metrics alert dimension create -n ResponseType --op include -v Success'
        ).output.strip()
        dim2 = self.cmd(
            'monitor metrics alert dimension create -n ApiName -v GetBlob'
        ).output.strip()
        self.cmd(
            'monitor metrics alert condition create -t static --aggregation total --metric transactions --dimension "{}" "{}" --op GreaterThan --threshold 5'
            .format(dim1, dim2),
            checks=[self.check('@', cond1)])

        cond2 = "avg 'Percentage Cpu' >< dynamic medium 1 of 6 since 2020-11-02T12:11:11+00:00"
        condition = self.cmd(
            'monitor metrics alert condition create -t dynamic --aggregation Average --metric "Percentage Cpu" --op GreaterOrLessThan --num-periods 6 --num-violations 1 --since 2020-11-02T12:11:11Z --sensitivity medium',
            checks=[self.check('@', cond2)]).output.strip()

        self.cmd(
            'monitor metrics alert create -g {rg} -n {alert} --scopes {vm_id} {vm_id_2} --action {ag1} --region westus --description "High CPU" --condition '
            + condition,
            checks=[
                self.check('description', 'High CPU'),
                self.check('severity', 2),
                self.check('autoMitigate', None),
                self.check('windowSize', '0:05:00'),
                self.check('evaluationFrequency', '0:01:00'),
                self.check('length(scopes)', 2),
                self.check('criteria.allOf[0].alertSensitivity', 'Medium'),
                self.check('criteria.allOf[0].criterionType',
                           'DynamicThresholdCriterion'),
                self.check(
                    'criteria.allOf[0].failingPeriods.minFailingPeriodsToAlert',
                    1.0),
                self.check(
                    'criteria.allOf[0].failingPeriods.numberOfEvaluationPeriods',
                    6.0),
                self.check('criteria.allOf[0].operator', 'GreaterOrLessThan'),
                self.check('criteria.allOf[0].ignoreDataBefore',
                           '2020-11-02T12:11:11+00:00')
            ])
コード例 #33
0
def acr_create(cmd,
               client,
               registry_name,
               resource_group_name,
               sku,
               location=None,
               admin_enabled=False,
               default_action=None,
               workspace=None,
               identity=None,
               key_encryption_key=None,
               public_network_enabled=None,
               zone_redundancy=None,
               allow_trusted_services=None,
               allow_exports=None,
               tags=None):

    if default_action and sku not in get_premium_sku(cmd):
        raise CLIError(NETWORK_RULE_NOT_SUPPORTED)

    if sku not in get_managed_sku(cmd):
        raise CLIError(
            "Classic SKU is no longer supported. Please select a managed SKU.")

    if re.match(r'\w*[A-Z]\w*', registry_name):
        raise InvalidArgumentValueError(
            "argument error: Connected registry name must use only lowercase.")

    Registry, Sku, NetworkRuleSet = cmd.get_models('Registry', 'Sku',
                                                   'NetworkRuleSet')
    registry = Registry(location=location,
                        sku=Sku(name=sku),
                        admin_user_enabled=admin_enabled,
                        zone_redundancy=zone_redundancy,
                        tags=tags)
    if default_action:
        registry.network_rule_set = NetworkRuleSet(
            default_action=default_action)

    if public_network_enabled is not None:
        _configure_public_network_access(cmd, registry, public_network_enabled)

    if identity or key_encryption_key:
        _configure_cmk(cmd, registry, resource_group_name, identity,
                       key_encryption_key)

    _handle_network_bypass(cmd, registry, allow_trusted_services)
    _handle_export_policy(cmd, registry, allow_exports)

    lro_poller = client.begin_create(resource_group_name, registry_name,
                                     registry)

    if workspace:
        from msrestazure.tools import is_valid_resource_id, resource_id
        from azure.cli.core.commands import LongRunningOperation
        from azure.cli.core.commands.client_factory import get_subscription_id
        acr = LongRunningOperation(cmd.cli_ctx)(lro_poller)
        if not is_valid_resource_id(workspace):
            workspace = resource_id(subscription=get_subscription_id(
                cmd.cli_ctx),
                                    resource_group=resource_group_name,
                                    namespace='microsoft.OperationalInsights',
                                    type='workspaces',
                                    name=workspace)
        _create_diagnostic_settings(cmd.cli_ctx, acr, workspace)
        return acr

    return lro_poller
コード例 #34
0
def flexible_server_georestore(cmd,
                               client,
                               resource_group_name,
                               server_name,
                               source_server,
                               location,
                               zone=None,
                               no_wait=False,
                               subnet=None,
                               subnet_address_prefix=None,
                               vnet=None,
                               vnet_address_prefix=None,
                               private_dns_zone_arguments=None,
                               public_access=None,
                               yes=False):
    provider = 'Microsoft.DBforMySQL'
    server_name = server_name.lower()

    if not is_valid_resource_id(source_server):
        if len(source_server.split('/')) == 1:
            source_server_id = resource_id(subscription=get_subscription_id(
                cmd.cli_ctx),
                                           resource_group=resource_group_name,
                                           namespace=provider,
                                           type='flexibleServers',
                                           name=source_server)
        else:
            raise ValueError(
                'The provided source-server {} is invalid.'.format(
                    source_server))
    else:
        source_server_id = source_server

    try:
        id_parts = parse_resource_id(source_server_id)
        source_server_object = client.get(id_parts['resource_group'],
                                          id_parts['name'])

        db_context = DbContext(
            cmd=cmd,
            cf_firewall=cf_mysql_flexible_firewall_rules,
            cf_db=cf_mysql_flexible_db,
            cf_availability=cf_mysql_check_resource_availability,
            cf_private_dns_zone_suffix=
            cf_mysql_flexible_private_dns_zone_suffix_operations,
            logging_name='MySQL',
            command_group='mysql',
            server_client=client,
            location=source_server_object.location)

        validate_server_name(db_context, server_name,
                             provider + '/flexibleServers')
        validate_georestore_location(db_context, location)
        validate_georestore_network(source_server_object, public_access, vnet,
                                    subnet)

        parameters = mysql_flexibleservers.models.Server(
            location=location,
            source_server_resource_id=
            source_server_id,  # this should be the source server name, not id
            create_mode="GeoRestore",
            availability_zone=zone)

        db_context.location = location
        if source_server_object.network.public_network_access == 'Enabled' and not any(
            (public_access, vnet, subnet)):
            public_access = 'Enabled'

        parameters.network, _, _ = flexible_server_provision_network_resource(
            cmd=cmd,
            resource_group_name=resource_group_name,
            server_name=server_name,
            location=location,
            db_context=db_context,
            private_dns_zone_arguments=private_dns_zone_arguments,
            public_access=public_access,
            vnet=vnet,
            subnet=subnet,
            vnet_address_prefix=vnet_address_prefix,
            subnet_address_prefix=subnet_address_prefix,
            yes=yes)

    except Exception as e:
        raise ResourceNotFoundError(e)

    return sdk_no_wait(no_wait, client.begin_create, resource_group_name,
                       server_name, parameters)
コード例 #35
0
    def test_databricks(self, resource_group):

        self.kwargs.update({
            'workspace_name': 'my-test-workspace',
            'subscription': '00000000-0000-0000-0000-000000000000',
            'custom_workspace_name': 'my-custom-workspace',
            'managed_resource_group': 'custom-managed-rg'
        })

        self.cmd('az databricks workspace create '
                 '--resource-group {rg} '
                 '--name {workspace_name} '
                 '--location "westus" '
                 '--sku standard',
                 checks=[JMESPathCheck('name', self.kwargs.get('workspace_name', '')),
                         JMESPathCheck('sku.name', self.kwargs.get('sku.name', 'standard'))])

        managed_resource_group_id = '/subscriptions/{}/resourceGroups/{}'.format(self.kwargs.get('subscription', ''), self.kwargs.get('managed_resource_group', ''))
        self.cmd('az databricks workspace create '
                 '--resource-group {rg} '
                 '--name {custom_workspace_name} '
                 '--location "westus" '
                 '--sku standard '
                 '--managed-resource-group {managed_resource_group} '
                 '--relay-namespace-name custom-relay-space '
                 '--storage-account-name customdbstorage '
                 '--storage-account-sku Standard_LRS',
                 checks=[JMESPathCheck('name', self.kwargs.get('custom_workspace_name', '')),
                         JMESPathCheck('parameters.relayNamespaceName.value', 'custom-relay-space'),
                         JMESPathCheck('parameters.storageAccountName.value', 'customdbstorage'),
                         JMESPathCheck('parameters.storageAccountSkuName.value', 'Standard_LRS'),
                         JMESPathCheck('managedResourceGroupId', managed_resource_group_id)])

        self.cmd('az databricks workspace update '
                 '--resource-group {rg} '
                 '--name {workspace_name} '
                 '--tags type=test',
                 checks=[JMESPathCheck('tags.type', 'test')])

        self.cmd('az databricks workspace show '
                 '--resource-group {rg} '
                 '--name {workspace_name}',
                 checks=[JMESPathCheck('name', self.kwargs.get('workspace_name', ''))])

        workspace_resource_id = resource_id(
            subscription=self.kwargs.get('subscription', ''),
            resource_group=resource_group,
            namespace='Microsoft.Databricks',
            type='workspaces',
            name=self.kwargs.get('workspace_name', ''))

        self.cmd('az databricks workspace show '
                 '--ids {}'.format(workspace_resource_id),
                 checks=[JMESPathCheck('name', self.kwargs.get('workspace_name', ''))])

        # todo service 502
        # self.cmd('az databricks workspace list',
        #          '--resource-group='
        #          checks=[])

        self.cmd('az databricks workspace list '
                 '--resource-group {rg} ',
                 checks=[])

        self.cmd('az databricks workspace delete '
                 '--resource-group {rg} '
                 '--name {workspace_name} '
                 '-y',
                 checks=[])

        self.cmd('az databricks workspace delete '
                 '--resource-group {rg} '
                 '--name {custom_workspace_name} '
                 '-y',
                 checks=[])
コード例 #36
0
def enable_addons(cmd,
                  client,
                  resource_group_name,
                  name,
                  addons,
                  check_enabled=True,
                  workspace_resource_id=None,
                  subnet_name=None,
                  appgw_name=None,
                  appgw_subnet_prefix=None,
                  appgw_subnet_cidr=None,
                  appgw_id=None,
                  appgw_subnet_id=None,
                  appgw_watch_namespace=None,
                  enable_sgxquotehelper=False,
                  enable_secret_rotation=False,
                  rotation_poll_interval=None,
                  no_wait=False,
                  enable_msi_auth_for_monitoring=False):
    instance = client.get(resource_group_name, name)
    # this is overwritten by _update_addons(), so the value needs to be recorded here
    msi_auth = True if instance.service_principal_profile.client_id == "msi" else False

    subscription_id = get_subscription_id(cmd.cli_ctx)
    instance = update_addons(cmd, instance, subscription_id, resource_group_name, name, addons, enable=True,
                             check_enabled=check_enabled,
                             workspace_resource_id=workspace_resource_id,
                             enable_msi_auth_for_monitoring=enable_msi_auth_for_monitoring, subnet_name=subnet_name,
                             appgw_name=appgw_name, appgw_subnet_prefix=appgw_subnet_prefix,
                             appgw_subnet_cidr=appgw_subnet_cidr, appgw_id=appgw_id, appgw_subnet_id=appgw_subnet_id,
                             appgw_watch_namespace=appgw_watch_namespace,
                             enable_sgxquotehelper=enable_sgxquotehelper,
                             enable_secret_rotation=enable_secret_rotation, rotation_poll_interval=rotation_poll_interval, no_wait=no_wait)

    if CONST_MONITORING_ADDON_NAME in instance.addon_profiles and instance.addon_profiles[
       CONST_MONITORING_ADDON_NAME].enabled:
        if CONST_MONITORING_USING_AAD_MSI_AUTH in instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config and \
                str(instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config[
                    CONST_MONITORING_USING_AAD_MSI_AUTH]).lower() == 'true':
            if not msi_auth:
                raise ArgumentUsageError(
                    "--enable-msi-auth-for-monitoring can not be used on clusters with service principal auth.")
            else:
                # create a Data Collection Rule (DCR) and associate it with the cluster
                ensure_container_insights_for_monitoring(cmd, instance.addon_profiles[CONST_MONITORING_ADDON_NAME],
                                                         subscription_id, resource_group_name, name, instance.location,
                                                         aad_route=True, create_dcr=True, create_dcra=True)
        else:
            # monitoring addon will use legacy path
            ensure_container_insights_for_monitoring(cmd, instance.addon_profiles[CONST_MONITORING_ADDON_NAME],
                                                     subscription_id, resource_group_name, name, instance.location,
                                                     aad_route=False)

    monitoring_addon_enabled = CONST_MONITORING_ADDON_NAME in instance.addon_profiles and instance.addon_profiles[
        CONST_MONITORING_ADDON_NAME].enabled
    ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in instance.addon_profiles and instance.addon_profiles[
        CONST_INGRESS_APPGW_ADDON_NAME].enabled

    os_type = 'Linux'
    enable_virtual_node = False
    if CONST_VIRTUAL_NODE_ADDON_NAME + os_type in instance.addon_profiles:
        enable_virtual_node = True

    need_post_creation_role_assignment = monitoring_addon_enabled or ingress_appgw_addon_enabled or enable_virtual_node
    if need_post_creation_role_assignment:
        # adding a wait here since we rely on the result for role assignment
        result = LongRunningOperation(cmd.cli_ctx)(
            client.begin_create_or_update(resource_group_name, name, instance))
        cloud_name = cmd.cli_ctx.cloud.name
        # mdm metrics supported only in Azure Public cloud so add the role assignment only in this cloud
        if monitoring_addon_enabled and cloud_name.lower() == 'azurecloud':
            from msrestazure.tools import resource_id
            cluster_resource_id = resource_id(
                subscription=subscription_id,
                resource_group=resource_group_name,
                namespace='Microsoft.ContainerService', type='managedClusters',
                name=name
            )
            add_monitoring_role_assignment(result, cluster_resource_id, cmd)
        if ingress_appgw_addon_enabled:
            add_ingress_appgw_addon_role_assignment(result, cmd)
        if enable_virtual_node:
            # All agent pool will reside in the same vnet, we will grant vnet level Contributor role
            # in later function, so using a random agent pool here is OK
            random_agent_pool = result.agent_pool_profiles[0]
            if random_agent_pool.vnet_subnet_id != "":
                add_virtual_node_role_assignment(
                    cmd, result, random_agent_pool.vnet_subnet_id)
            # Else, the cluster is not using custom VNet, the permission is already granted in AKS RP,
            # we don't need to handle it in client side in this case.

    else:
        result = sdk_no_wait(no_wait, client.begin_create_or_update,
                             resource_group_name, name, instance)
    return result
 def test_monitor_diagnostic_settings_subscription(self, resource_group, storage_account):
     from msrestazure.tools import resource_id
     self.kwargs.update({
         'name': self.create_random_name('clitest', 20),
         'ws': self.create_random_name('cliws', 20),
         'storage': resource_id(
             resource_group=resource_group,
             subscription=self.get_subscription_id(),
             name=storage_account,
             namespace='Microsoft.Storage',
             type='storageAccounts')
     })
     self.kwargs['log_config'] = json.dumps([
         {
             "category": "Security",
             "enabled": True,
         },
         {
             "category": "Administrative",
             "enabled": True,
         },
         {
             "category": "ServiceHealth",
             "enabled": True,
         },
         {
             "category": "Alert",
             "enabled": True,
         },
         {
             "category": "Recommendation",
             "enabled": True,
         },
         {
             "category": "Policy",
             "enabled": True,
         },
         {
             "category": "Autoscale",
             "enabled": True,
         },
         {
             "category": "ResourceHealth",
             "enabled": True,
         }
     ])
     diagns = self.cmd("monitor diagnostic-settings subscription list").get_output_in_json()['value']
     for diagn in diagns:
         name = diagn['name']
         self.cmd("monitor diagnostic-settings subscription delete --name {} -y".format(name))
     self.cmd("monitor diagnostic-settings subscription create -l southcentralus --name {name} --storage-account {storage} "
              "--logs \'{log_config}\'",
              checks=[
                  self.check('storageAccountId', '{storage}'),
                  self.check('serviceBusRuleId', None),
              ])
     self.cmd("monitor diagnostic-settings subscription show --name {name}", checks=[
         self.check('storageAccountId', '{storage}'),
         self.check('serviceBusRuleId', None),
     ])
     self.cmd("monitor diagnostic-settings subscription list", checks=[
         self.check('length(@)', 1)
     ])
     self.kwargs['ws_id'] = self.cmd('monitor log-analytics workspace create -n {ws} -g {rg} --query id -otsv').output.strip()
     self.cmd('monitor diagnostic-settings subscription update --name {name} --workspace {ws_id}', checks=[
         self.check('storageAccountId', '{storage}'),
         self.check('workspaceId', '{ws_id}')
     ])
     self.cmd("monitor diagnostic-settings subscription delete --name {name} -y")
コード例 #38
0
    def test_monitor_log_analytics_workspace_data_export(self, resource_group, account_1):
        from msrestazure.tools import resource_id
        self.kwargs.update({
            'workspace_name': self.create_random_name('clitest', 20),
            'data_export_name': 'clitest',
            'data_export_name_2': 'clitest2',
            'sa_1': account_1,
            'sa_id_1': resource_id(
                resource_group=resource_group,
                subscription=self.get_subscription_id(),
                name=account_1,
                namespace='Microsoft.Storage',
                type='storageAccounts'),
            'namespacename': self.create_random_name(prefix='eventhubs-nscli', length=20),
            'eventhubname': "hub_name",
            'rg': resource_group
        })

        self.cmd(
            "monitor log-analytics workspace create -g {rg} -n {workspace_name} --quota 1 --level 100 --sku CapacityReservation",
            checks=[
                self.check('provisioningState', 'Succeeded'),
                self.check('retentionInDays', 30),
                self.check('sku.name', 'capacityreservation'),
                self.check('sku.capacityReservationLevel', 100),
                self.check('workspaceCapping.dailyQuotaGb', 1.0)
            ])
        self.kwargs.update({
            'table_name': 'Syslog'
        })

        self.cmd('monitor log-analytics workspace data-export create -g {rg} --workspace-name {workspace_name} -n {data_export_name} '
                 '--destination {sa_id_1} --enable -t {table_name}',
                 checks=[
                 ])

        from azure.core.exceptions import HttpResponseError
        with self.assertRaisesRegexp(HttpResponseError, 'Table SecurityEvent Heartbeat does not exist in the workspace'):
            self.cmd('monitor log-analytics workspace data-export create -g {rg} --workspace-name {workspace_name} -n {data_export_name_2} '
                     '--destination {sa_id_1} --enable -t "SecurityEvent Heartbeat"',
                     checks=[
                     ])
        with self.assertRaisesRegexp(HttpResponseError, 'you can create 10 export rules to 10 different destinations'):
            self.cmd('monitor log-analytics workspace data-export create -g {rg} --workspace-name {workspace_name} -n {data_export_name_2} '
                     '--destination {sa_id_1} --enable -t {table_name}',
                     checks=[
                     ])
        with self.assertRaisesRegexp(HttpResponseError, 'Table ABC does not exist in the workspace'):
            self.cmd('monitor log-analytics workspace data-export create -g {rg} --workspace-name {workspace_name} -n {data_export_name_2} '
                     '--destination {sa_id_1} --enable -t ABC',
                     checks=[
                     ])
        with self.assertRaisesRegexp(HttpResponseError, 'you can create 10 export rules to 10 different destinations'):
            self.cmd('monitor log-analytics workspace data-export create -g {rg} --workspace-name {workspace_name} -n {data_export_name_2} '
                     '--destination {sa_id_1} --enable -t AppPerformanceCounters',
                     checks=[
                     ])
        self.cmd('monitor log-analytics workspace data-export show -g {rg} --workspace-name {workspace_name} -n {data_export_name}', checks=[
        ])

        self.cmd('monitor log-analytics workspace data-export list -g {rg} --workspace-name {workspace_name}', checks=[
            self.check('length(@)', 1)
        ])

        result = self.cmd('eventhubs namespace create --resource-group {rg} --name {namespacename}').get_output_in_json()
        self.kwargs.update({
            'namespace_id': result['id']
        })
        result = self.cmd('eventhubs eventhub create --resource-group {rg} --namespace-name {namespacename} --name {eventhubname}').get_output_in_json()
        self.kwargs.update({
            'eventhub_id': result['id']
        })
        self.cmd(
            'monitor log-analytics workspace data-export update -g {rg} --workspace-name {workspace_name} -n {data_export_name} '
            '--destination {namespace_id} --enable true -t Usage Alert',
            checks=[
            ])

        self.cmd('eventhubs eventhub list -g {rg} --namespace-name {namespacename}')

        self.cmd('monitor log-analytics workspace data-export delete -g {rg} --workspace-name {workspace_name} -n {data_export_name} -y')

        self.cmd(
            'monitor log-analytics workspace data-export create -g {rg} --workspace-name {workspace_name} -n {data_export_name} '
            '--destination {eventhub_id} --enable false -t {table_name}',
            checks=[
            ])

        self.cmd('monitor log-analytics workspace data-export delete -g {rg} --workspace-name {workspace_name} -n {data_export_name} -y')
        with self.assertRaisesRegexp(SystemExit, '3'):
            self.cmd('monitor log-analytics workspace data-export show -g {rg} --workspace-name {workspace_name} -n {data_export_name}')
コード例 #39
0
    def test_monitor_log_analytics_workspace_linked_storage(self, resource_group, account_1,
                                                            account_2, account_3, account_4):
        from msrestazure.tools import resource_id
        self.kwargs.update({
            'name': self.create_random_name('clitest', 20),
            'name_2': self.create_random_name('clitest', 20),
            'rg': resource_group,
            'sa_1': account_1,
            'sa_2': account_2,
            'sa_3': account_3,
            'sa_4': account_4,
            'sa_id_1': resource_id(
                resource_group=resource_group,
                subscription=self.get_subscription_id(),
                name=account_1,
                namespace='Microsoft.Storage',
                type='storageAccounts'),
            'sa_id_2': resource_id(
                resource_group=resource_group,
                subscription=self.get_subscription_id(),
                name=account_2,
                namespace='Microsoft.Storage',
                type='storageAccounts'),
            'sa_id_3': resource_id(
                resource_group=resource_group,
                subscription=self.get_subscription_id(),
                name=account_3,
                namespace='Microsoft.Storage',
                type='storageAccounts'),
            'sa_id_4': resource_id(
                resource_group=resource_group,
                subscription=self.get_subscription_id(),
                name=account_4,
                namespace='Microsoft.Storage',
                type='storageAccounts'),
        })

        self.cmd("monitor log-analytics workspace create -g {rg} -n {name} --tags clitest=myron", checks=[
            self.check('provisioningState', 'Succeeded'),
            self.check('retentionInDays', 30),
            self.check('sku.name', 'pergb2018')
        ])

        self.cmd('monitor log-analytics workspace linked-storage create '
                 '--type CustomLogs -g {rg} -n {name} --storage-accounts {sa_1}',
                 checks=[
                     self.check('storageAccountIds[0]', '{sa_id_1}'),
                     self.check('name', 'customlogs')
                 ])

        self.cmd('monitor log-analytics workspace linked-storage add '
                 '--type CustomLogs -g {rg} -n {name} --storage-accounts {sa_2} {sa_id_3}',
                 checks=[
                     self.check('storageAccountIds[0]', '{sa_id_1}'),
                     self.check('storageAccountIds[1]', '{sa_id_2}'),
                     self.check('storageAccountIds[2]', '{sa_id_3}')
                 ])

        self.cmd('monitor log-analytics workspace linked-storage remove '
                 '--type CustomLogs -g {rg} -n {name} --storage-accounts {sa_1}',
                 checks=[
                     self.check('storageAccountIds[0]', '{sa_id_2}'),
                     self.check('storageAccountIds[1]', '{sa_id_3}')
                 ])

        self.cmd('monitor log-analytics workspace linked-storage show '
                 '--type CustomLogs -g {rg} -n {name}',
                 checks=[
                     self.check('storageAccountIds[0]', '{sa_id_2}'),
                     self.check('storageAccountIds[1]', '{sa_id_3}')
                 ])

        self.cmd("monitor log-analytics workspace create -g {rg} -n {name_2} --tags clitest=myron", checks=[
            self.check('provisioningState', 'Succeeded'),
            self.check('retentionInDays', 30),
            self.check('sku.name', 'pergb2018')
        ])

        self.cmd('monitor log-analytics workspace linked-storage create '
                 '--type AzureWatson -g {rg} -n {name} --storage-accounts {sa_1}',
                 checks=[
                     self.check('storageAccountIds[0]', '{sa_id_1}'),
                     self.check('name', 'azurewatson')
                 ])

        self.cmd('monitor log-analytics workspace linked-storage list '
                 '-g {rg} -n {name}',
                 checks=[
                     self.check('length(@)', 2)
                 ])

        self.cmd('monitor log-analytics workspace linked-storage delete '
                 '--type AzureWatson -g {rg} -n {name} -y')

        self.cmd('monitor log-analytics workspace linked-storage list '
                 '-g {rg} -n {name}',
                 checks=[
                     self.check('length(@)', 1)
                 ])
コード例 #40
0
def aks_kollect_cmd(
        cmd,  # pylint: disable=too-many-statements,too-many-locals
        client,
        resource_group_name: str,
        name: str,
        storage_account: str,
        sas_token: str,
        container_logs: str,
        kube_objects: str,
        node_logs: str,
        node_logs_windows: str) -> None:
    colorama.init()

    mc = client.get(resource_group_name, name)

    if not which('kubectl'):
        raise CLIError('Can not find kubectl executable in PATH')

    storage_account_id = None
    if storage_account is None:
        print(
            "No storage account specified. Try getting storage account from diagnostic settings"
        )
        storage_account_id = _get_storage_account_from_diag_settings(
            cmd.cli_ctx, resource_group_name, name)
        if storage_account_id is None:
            raise CLIError(
                "A storage account must be specified, since there isn't one in the diagnostic settings."
            )

    from msrestazure.tools import (is_valid_resource_id, parse_resource_id,
                                   resource_id)
    if storage_account_id is None:
        if not is_valid_resource_id(storage_account):
            storage_account_id = resource_id(
                subscription=get_subscription_id(cmd.cli_ctx),
                resource_group=resource_group_name,
                namespace='Microsoft.Storage',
                type='storageAccounts',
                name=storage_account)
        else:
            storage_account_id = storage_account

    if is_valid_resource_id(storage_account_id):
        try:
            parsed_storage_account = parse_resource_id(storage_account_id)
        except CloudError as ex:
            raise CLIError(ex.message)
    else:
        raise CLIError("Invalid storage account id %s" % storage_account_id)

    storage_account_name = parsed_storage_account['name']

    readonly_sas_token = None
    if sas_token is None:
        storage_client = get_storage_client(
            cmd.cli_ctx, parsed_storage_account['subscription'])
        storage_account_keys = storage_client.storage_accounts.list_keys(
            parsed_storage_account['resource_group'], storage_account_name)
        kwargs = {
            'account_name': storage_account_name,
            'account_key': storage_account_keys.keys[0].value
        }
        cloud_storage_client = _cloud_storage_account_service_factory(
            cmd.cli_ctx, kwargs)

        sas_token = cloud_storage_client.generate_shared_access_signature(
            'b', 'sco', 'rwdlacup',
            datetime.datetime.utcnow() + datetime.timedelta(days=1))

        readonly_sas_token = cloud_storage_client.generate_shared_access_signature(
            'b', 'sco', 'rl',
            datetime.datetime.utcnow() + datetime.timedelta(days=1))

        readonly_sas_token = readonly_sas_token.strip('?')

    print()
    print(
        'This will deploy a daemon set to your cluster to collect logs and diagnostic information and '
        f'save them to the storage account '
        f'{colorama.Style.BRIGHT}{colorama.Fore.GREEN}{storage_account_name}{colorama.Style.RESET_ALL} as '
        f'outlined in {_format_hyperlink("http://aka.ms/AKSPeriscope")}.')
    print()
    print(
        'If you share access to that storage account to Azure support, you consent to the terms outlined'
        f' in {_format_hyperlink("http://aka.ms/DiagConsent")}.')
    print()
    if not prompt_y_n('Do you confirm?', default="n"):
        return

    print()
    print("Getting credentials for cluster %s " % name)
    _, temp_kubeconfig_path = tempfile.mkstemp()
    credentialResults = client.list_cluster_admin_credentials(
        resource_group_name, name, None)
    kubeconfig = credentialResults.kubeconfigs[0].value.decode(
        encoding='UTF-8')
    print_or_merge_credentials(temp_kubeconfig_path, kubeconfig, False, None)

    print()
    print("Starts collecting diag info for cluster %s " % name)

    # Base the container name on the fqdn (or private fqdn) of the managed cluster
    container_name = _generate_container_name(mc.fqdn, mc.private_fqdn)
    sas_token = sas_token.strip('?')

    cluster_features = _get_cluster_features(cmd.cli_ctx, resource_group_name,
                                             name)

    run_id = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H-%M-%SZ")
    kustomize_yaml = _get_kustomize_yaml(storage_account_name, sas_token,
                                         container_name, run_id,
                                         cluster_features, container_logs,
                                         kube_objects, node_logs,
                                         node_logs_windows)
    kustomize_folder = tempfile.mkdtemp()
    kustomize_file_path = os.path.join(kustomize_folder, "kustomization.yaml")
    try:
        with os.fdopen(os.open(kustomize_file_path, os.O_RDWR | os.O_CREAT),
                       'w+t') as kustomize_file:
            kustomize_file.write(kustomize_yaml)

        try:
            print()
            print("Cleaning up aks-periscope resources if existing")

            subprocess.call([
                "kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
                "serviceaccount,configmap,daemonset,secret", "--all", "-n",
                CONST_PERISCOPE_NAMESPACE, "--ignore-not-found"
            ],
                            stderr=subprocess.STDOUT)

            subprocess.call([
                "kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
                "ClusterRoleBinding", "aks-periscope-role-binding",
                "--ignore-not-found"
            ],
                            stderr=subprocess.STDOUT)

            subprocess.call([
                "kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
                "ClusterRoleBinding", "aks-periscope-role-binding-view",
                "--ignore-not-found"
            ],
                            stderr=subprocess.STDOUT)

            subprocess.call([
                "kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
                "ClusterRole", "aks-periscope-role", "--ignore-not-found"
            ],
                            stderr=subprocess.STDOUT)

            subprocess.call([
                "kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
                "--all", "apd", "-n", CONST_PERISCOPE_NAMESPACE,
                "--ignore-not-found"
            ],
                            stderr=subprocess.DEVNULL)

            subprocess.call([
                "kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
                "CustomResourceDefinition",
                "diagnostics.aks-periscope.azure.github.com",
                "--ignore-not-found"
            ],
                            stderr=subprocess.STDOUT)

            print()
            print("Deploying aks-periscope")

            subprocess.check_output([
                "kubectl", "--kubeconfig", temp_kubeconfig_path, "apply", "-k",
                kustomize_folder, "-n", CONST_PERISCOPE_NAMESPACE
            ],
                                    stderr=subprocess.STDOUT)
        except subprocess.CalledProcessError as err:
            raise CLIError(err.output)
    finally:
        os.remove(kustomize_file_path)
        os.rmdir(kustomize_folder)

    print()

    token_in_storage_account_url = readonly_sas_token if readonly_sas_token is not None else sas_token
    log_storage_account_url = f"https://{storage_account_name}.blob.core.windows.net/" \
                              f"{container_name}?{token_in_storage_account_url}"

    print(
        f'{colorama.Fore.GREEN}Your logs are being uploaded to storage account {_format_bright(storage_account_name)}'
    )

    print()
    print(
        f'You can download Azure Storage Explorer here '
        f'{_format_hyperlink("https://azure.microsoft.com/en-us/features/storage-explorer/")}'
        f' to check the logs by adding the storage account using the following URL:'
    )
    print(f'{_format_hyperlink(log_storage_account_url)}')

    print()
    if not prompt_y_n('Do you want to see analysis results now?', default="n"):
        print(
            f"You can run 'az aks kanalyze -g {resource_group_name} -n {name}' "
            f"anytime to check the analysis results.")
    else:
        _display_diagnostics_report(temp_kubeconfig_path)
コード例 #41
0
def validate_diagnostic_settings(cmd, namespace):
    from azure.cli.core.commands.client_factory import get_subscription_id
    from msrestazure.tools import is_valid_resource_id, resource_id, parse_resource_id

    get_target_resource_validator('resource_uri',
                                  required=True,
                                  preserve_resource_group_parameter=True)(
                                      cmd, namespace)
    if not namespace.resource_group_name:
        namespace.resource_group_name = parse_resource_id(
            namespace.resource_uri)['resource_group']

    if namespace.storage_account and not is_valid_resource_id(
            namespace.storage_account):
        namespace.storage_account = resource_id(
            subscription=get_subscription_id(cmd.cli_ctx),
            resource_group=namespace.resource_group_name,
            namespace='microsoft.Storage',
            type='storageAccounts',
            name=namespace.storage_account)

    if namespace.workspace and not is_valid_resource_id(namespace.workspace):
        namespace.workspace = resource_id(
            subscription=get_subscription_id(cmd.cli_ctx),
            resource_group=namespace.resource_group_name,
            namespace='microsoft.OperationalInsights',
            type='workspaces',
            name=namespace.workspace)

    if namespace.event_hub and is_valid_resource_id(namespace.event_hub):
        namespace.event_hub = parse_resource_id(namespace.event_hub)['name']

    if namespace.event_hub_rule:
        if not is_valid_resource_id(namespace.event_hub_rule):
            if not namespace.event_hub:
                raise CLIError(
                    'usage error: --event-hub-rule ID | --event-hub-rule NAME --event-hub NAME'
                )
            # use value from --event-hub if the rule is a name
            namespace.event_hub_rule = resource_id(
                subscription=get_subscription_id(cmd.cli_ctx),
                resource_group=namespace.resource_group_name,
                namespace='Microsoft.EventHub',
                type='namespaces',
                name=namespace.event_hub,
                child_type_1='AuthorizationRules',
                child_name_1=namespace.event_hub_rule)
        elif not namespace.event_hub:
            # extract the event hub name from `--event-hub-rule` if provided as an ID
            namespace.event_hub = parse_resource_id(
                namespace.event_hub_rule)['name']

    if not any(
        [namespace.storage_account, namespace.workspace, namespace.event_hub]):
        raise CLIError(
            'usage error - expected one or more:  --storage-account NAME_OR_ID | --workspace NAME_OR_ID '
            '| --event-hub NAME_OR_ID | --event-hub-rule ID')

    try:
        del namespace.resource_group_name
    except AttributeError:
        pass
コード例 #42
0
def _replica_create(cmd,
                    client,
                    resource_group_name,
                    server_name,
                    source_server,
                    no_wait=False,
                    location=None,
                    sku_name=None,
                    **kwargs):
    provider = 'Microsoft.DBforPostgreSQL'
    if isinstance(client, MySqlServersOperations):
        provider = 'Microsoft.DBforMySQL'
    elif isinstance(client, MariaDBServersOperations):
        provider = 'Microsoft.DBforMariaDB'
    # set source server id
    if not is_valid_resource_id(source_server):
        if len(source_server.split('/')) == 1:
            source_server = resource_id(subscription=get_subscription_id(
                cmd.cli_ctx),
                                        resource_group=resource_group_name,
                                        namespace=provider,
                                        type='servers',
                                        name=source_server)
        else:
            raise CLIError('The provided source-server {} is invalid.'.format(
                source_server))

    source_server_id_parts = parse_resource_id(source_server)
    try:
        source_server_object = client.get(
            source_server_id_parts['resource_group'],
            source_server_id_parts['name'])
    except CloudError as e:
        raise CLIError('Unable to get source server: {}.'.format(str(e)))

    if location is None:
        location = source_server_object.location

    if sku_name is None:
        sku_name = source_server_object.sku.name

    parameters = None
    if provider == 'Microsoft.DBforMySQL':
        parameters = mysql.models.ServerForCreate(
            sku=mysql.models.Sku(name=sku_name),
            properties=mysql.models.ServerPropertiesForReplica(
                source_server_id=source_server),
            location=location)
    elif provider == 'Microsoft.DBforPostgreSQL':
        parameters = postgresql.models.ServerForCreate(
            sku=postgresql.models.Sku(name=sku_name),
            properties=postgresql.models.ServerPropertiesForReplica(
                source_server_id=source_server),
            location=location)
    elif provider == 'Microsoft.DBforMariaDB':
        parameters = mariadb.models.ServerForCreate(
            sku=mariadb.models.Sku(name=sku_name),
            properties=mariadb.models.ServerPropertiesForReplica(
                source_server_id=source_server),
            location=location)

    return sdk_no_wait(no_wait, client.begin_create, resource_group_name,
                       server_name, parameters)
コード例 #43
0
ファイル: azure_rm_resource.py プロジェクト: zship/ansible
    def exec_module(self, **kwargs):
        for key in self.module_arg_spec:
            setattr(self, key, kwargs[key])
        self.mgmt_client = self.get_mgmt_svc_client(
            GenericRestClient,
            base_url=self._cloud_environment.endpoints.resource_manager)

        if self.state == 'absent':
            self.method = 'DELETE'
            self.status_code.append(204)

        if self.url is None:
            orphan = None
            rargs = dict()
            rargs['subscription'] = self.subscription_id
            rargs['resource_group'] = self.resource_group
            if not (self.provider is None
                    or self.provider.lower().startswith('.microsoft')):
                rargs['namespace'] = "Microsoft." + self.provider
            else:
                rargs['namespace'] = self.provider

            if self.resource_type is not None and self.resource_name is not None:
                rargs['type'] = self.resource_type
                rargs['name'] = self.resource_name
                for i in range(len(self.subresource)):
                    resource_ns = self.subresource[i].get('namespace', None)
                    resource_type = self.subresource[i].get('type', None)
                    resource_name = self.subresource[i].get('name', None)
                    if resource_type is not None and resource_name is not None:
                        rargs['child_namespace_' + str(i + 1)] = resource_ns
                        rargs['child_type_' + str(i + 1)] = resource_type
                        rargs['child_name_' + str(i + 1)] = resource_name
                    else:
                        orphan = resource_type
            else:
                orphan = self.resource_type

            self.url = resource_id(**rargs)

            if orphan is not None:
                self.url += '/' + orphan
        query_parameters = {}
        query_parameters['api-version'] = self.api_version

        header_parameters = {}
        header_parameters['Content-Type'] = 'application/json; charset=utf-8'

        needs_update = True
        response = None

        if self.idempotency:
            original = self.mgmt_client.query(self.url, "GET",
                                              query_parameters, None, None,
                                              [200, 404], 0, 0)

            if original.status_code == 404:
                if self.state == 'absent':
                    needs_update = False
            else:
                try:
                    response = json.loads(original.text)
                    needs_update = (dict_merge(response, self.body) !=
                                    response)
                except Exception:
                    pass

        if needs_update:
            response = self.mgmt_client.query(self.url, self.method,
                                              query_parameters,
                                              header_parameters, self.body,
                                              self.status_code,
                                              self.polling_timeout,
                                              self.polling_interval)
            if self.state == 'present':
                try:
                    response = json.loads(response.text)
                except Exception:
                    response = response.text
            else:
                response = None

        self.results['response'] = response
        self.results['changed'] = needs_update

        return self.results
コード例 #44
0
    def test_aks_byo_appgw_with_ingress_appgw_addon(self, resource_group,
                                                    resource_group_location):
        aks_name = self.create_random_name('cliakstest', 16)
        vnet_name = self.create_random_name('cliakstest', 16)
        self.kwargs.update({
            'resource_group': resource_group,
            'aks_name': aks_name,
            'vnet_name': vnet_name
        })

        # create virtual network
        create_vnet = 'network vnet create --resource-group={resource_group} --name={vnet_name} ' \
                      '--address-prefix 11.0.0.0/16 --subnet-name aks-subnet --subnet-prefix 11.0.0.0/24  -o json'
        vnet = self.cmd(
            create_vnet,
            checks=[self.check('newVNet.provisioningState',
                               'Succeeded')]).get_output_in_json()

        create_subnet = 'network vnet subnet create -n appgw-subnet --resource-group={resource_group} --vnet-name {vnet_name} ' \
                        '--address-prefixes 11.0.1.0/24  -o json'
        self.cmd(create_subnet,
                 checks=[self.check('provisioningState', 'Succeeded')])

        vnet_id = vnet['newVNet']["id"]
        assert vnet_id is not None
        self.kwargs.update({
            'vnet_id': vnet_id,
        })

        # create public ip for app gateway
        create_pip = 'network public-ip create -n appgw-ip -g {resource_group} ' \
                     '--allocation-method Static --sku Standard  -o json'
        self.cmd(
            create_pip,
            checks=[self.check('publicIp.provisioningState', 'Succeeded')])

        # create app gateway
        create_appgw = 'network application-gateway create -n appgw -g {resource_group} ' \
                       '--sku Standard_v2 --public-ip-address appgw-ip --subnet {vnet_id}/subnets/appgw-subnet'
        self.cmd(create_appgw)

        # construct group id
        from msrestazure.tools import parse_resource_id, resource_id
        parsed_vnet_id = parse_resource_id(vnet_id)
        group_id = resource_id(subscription=parsed_vnet_id["subscription"],
                               resource_group=parsed_vnet_id["resource_group"])
        appgw_id = group_id + "/providers/Microsoft.Network/applicationGateways/appgw"

        self.kwargs.update({'appgw_id': appgw_id, 'appgw_group_id': group_id})

        # create aks cluster
        create_cmd = 'aks create -n {aks_name} -g {resource_group} --enable-managed-identity --service-principal xxxx --client-secret yyyy --generate-ssh-keys ' \
                     '--vnet-subnet-id {vnet_id}/subnets/aks-subnet ' \
                     '-a ingress-appgw --appgw-id {appgw_id} -o json'
        aks_cluster = self.cmd(
            create_cmd,
            checks=[
                self.check('provisioningState', 'Succeeded'),
                self.check('addonProfiles.ingressapplicationgateway.enabled',
                           True),
                self.check(
                    'addonProfiles.ingressapplicationgateway.config.applicationgatewayid',
                    appgw_id)
            ]).get_output_in_json()

        addon_client_id = aks_cluster["addonProfiles"][
            "ingressapplicationgateway"]["identity"]["clientId"]

        self.kwargs.update({
            'addon_client_id': addon_client_id,
        })
コード例 #45
0
def format_resource_id(val, subscription_id, namespace, types, resource_group):
    return resource_id(name=val,
                       resource_group=resource_group,
                       namespace=namespace,
                       type=types,
                       subscription=subscription_id) if not is_valid_resource_id(val) else val
コード例 #46
0
    def test_metric_alert_v2_scenario(self, resource_group, storage_account):

        from msrestazure.tools import resource_id
        self.kwargs.update({
            'alert':
            'alert1',
            'sa':
            storage_account,
            'plan':
            'plan1',
            'app':
            self.create_random_name('app', 15),
            'ag1':
            'ag1',
            'ag2':
            'ag2',
            'webhooks':
            '{{test=banoodle}}',
            'sub':
            self.get_subscription_id(),
            'sa_id':
            resource_id(resource_group=resource_group,
                        subscription=self.get_subscription_id(),
                        name=storage_account,
                        namespace='Microsoft.Storage',
                        type='storageAccounts')
        })
        self.cmd('monitor action-group create -g {rg} -n {ag1}')
        self.cmd('monitor action-group create -g {rg} -n {ag2}')
        self.cmd(
            'monitor metrics alert create -g {rg} -n {alert} --scopes {sa_id} --action {ag1} --description "Test" --condition "total transactions > 5 where ResponseType includes Success and ApiName includes GetBlob" --condition "avg SuccessE2ELatency > 250 where ApiName includes GetBlob or PutBlob"',
            checks=[
                self.check('description', 'Test'),
                self.check('severity', 2),
                self.check('autoMitigate', None),
                self.check('windowSize', '0:05:00'),
                self.check('evaluationFrequency', '0:01:00'),
                self.check('length(criteria.allOf)', 2),
                self.check('length(criteria.allOf[0].dimensions)', 2),
                self.check('length(criteria.allOf[1].dimensions)', 1)
            ])
        self.cmd(
            'monitor metrics alert update -g {rg} -n {alert} --severity 3 --description "alt desc" --add-action ag2 test=best --remove-action ag1 --remove-condition cond0 --add-condition "total transactions < 100" --evaluation-frequency 5m --window-size 15m --tags foo=boo --auto-mitigate',
            checks=[
                self.check('description', 'alt desc'),
                self.check('severity', 3),
                self.check('autoMitigate', True),
                self.check('windowSize', '0:15:00'),
                self.check('evaluationFrequency', '0:05:00'),
                self.check('length(criteria.allOf)', 2),
                self.check('length(criteria.allOf[0].dimensions)', 1),
                self.check('length(criteria.allOf[1].dimensions)', 0),
                self.check(
                    "contains(actions[0].actionGroupId, 'actionGroups/ag2')",
                    True),
                self.check('length(actions)', 1)
            ])
        self.cmd(
            'monitor metrics alert update -g {rg} -n {alert} --enabled false',
            checks=[self.check('enabled', False)])
        self.cmd('monitor metrics alert list -g {rg}',
                 checks=self.check('length(@)', 1))
        self.cmd('monitor metrics alert show -g {rg} -n {alert}')
        self.cmd('monitor metrics alert delete -g {rg} -n {alert}')
        self.cmd('monitor metrics alert list -g {rg}',
                 checks=self.check('length(@)', 0))

        # test appservice plan with dimensions *
        self.cmd('appservice plan create -g {rg} -n {plan}')
        self.kwargs['app_id'] = self.cmd(
            'webapp create -g {rg} -n {app} -p plan1').get_output_in_json(
            )['id']
        self.cmd(
            'monitor metrics alert create -g {rg} -n {alert}2 --scopes {app_id} --action {ag1} --description "Test *" --condition "total Http4xx > 10 where Instance includes *"',
            checks=[
                self.check('length(criteria.allOf)', 1),
                self.check('length(criteria.allOf[0].dimensions)', 1),
                self.check('criteria.allOf[0].dimensions[0].values[0]', '*')
            ])
コード例 #47
0
def sqlvm_create(client,
                 cmd,
                 sql_virtual_machine_name,
                 resource_group_name,
                 sql_server_license_type,
                 location=None,
                 sql_image_sku=None,
                 enable_auto_patching=None,
                 day_of_week=None,
                 maintenance_window_starting_hour=None,
                 maintenance_window_duration=None,
                 enable_auto_backup=None,
                 enable_encryption=False,
                 retention_period=None,
                 storage_account_url=None,
                 storage_access_key=None,
                 backup_password=None,
                 backup_system_dbs=False,
                 backup_schedule_type=None,
                 full_backup_frequency=None,
                 full_backup_start_time=None,
                 full_backup_window_hours=None,
                 log_backup_frequency=None,
                 enable_key_vault_credential=None,
                 credential_name=None,
                 azure_key_vault_url=None,
                 service_principal_name=None,
                 service_principal_secret=None,
                 connectivity_type=None,
                 port=None,
                 sql_auth_update_username=None,
                 sql_auth_update_password=None,
                 sql_workload_type=None,
                 enable_r_services=None,
                 tags=None):
    '''
    Creates a SQL virtual machine.
    '''
    from azure.cli.core.commands.client_factory import get_subscription_id

    subscription_id = get_subscription_id(cmd.cli_ctx)

    virtual_machine_resource_id = resource_id(
        subscription=subscription_id,
        resource_group=resource_group_name,
        namespace='Microsoft.Compute',
        type='virtualMachines',
        name=sql_virtual_machine_name)

    tags = tags or {}

    # If customer has provided any auto_patching settings, enabling plugin should be True
    if (day_of_week or maintenance_window_duration
            or maintenance_window_starting_hour):
        enable_auto_patching = True

    auto_patching_object = AutoPatchingSettings(
        enable=enable_auto_patching,
        day_of_week=day_of_week,
        maintenance_window_starting_hour=maintenance_window_starting_hour,
        maintenance_window_duration=maintenance_window_duration)

    # If customer has provided any auto_backup settings, enabling plugin should be True
    if (enable_encryption or retention_period or storage_account_url
            or storage_access_key or backup_password or backup_system_dbs
            or backup_schedule_type or full_backup_frequency
            or full_backup_start_time or full_backup_window_hours
            or log_backup_frequency):
        enable_auto_backup = True
        if not storage_access_key:
            storage_access_key = prompt_pass('Storage Key: ', confirm=True)
        if enable_encryption and not backup_password:
            backup_password = prompt_pass('Backup Password: '******'Service Principal Secret: ', confirm=True)

    keyvault_object = KeyVaultCredentialSettings(
        enable=enable_key_vault_credential,
        credential_name=credential_name,
        azure_key_vault_url=azure_key_vault_url,
        service_principal_name=service_principal_name,
        service_principal_secret=service_principal_secret)

    connectivity_object = SqlConnectivityUpdateSettings(
        port=port,
        connectivity_type=connectivity_type,
        sql_auth_update_user_name=sql_auth_update_username,
        sql_auth_update_password=sql_auth_update_password)

    workload_type_object = SqlWorkloadTypeUpdateSettings(
        sql_workload_type=sql_workload_type)

    additional_features_object = AdditionalFeaturesServerConfigurations(
        is_rservices_enabled=enable_r_services)

    server_configuration_object = ServerConfigurationsManagementSettings(
        sql_connectivity_update_settings=connectivity_object,
        sql_workload_type_update_settings=workload_type_object,
        additional_features_server_configurations=additional_features_object)

    sqlvm_object = SqlVirtualMachine(
        location=location,
        virtual_machine_resource_id=virtual_machine_resource_id,
        sql_server_license_type=sql_server_license_type,
        sql_image_sku=sql_image_sku,
        auto_patching_settings=auto_patching_object,
        auto_backup_settings=auto_backup_object,
        key_vault_credential_settings=keyvault_object,
        server_configurations_management_settings=server_configuration_object,
        tags=tags)

    # Since it's a running operation, we will do the put and then the get to display the instance.
    LongRunningOperation(cmd.cli_ctx)(sdk_no_wait(False,
                                                  client.create_or_update,
                                                  resource_group_name,
                                                  sql_virtual_machine_name,
                                                  sqlvm_object))

    return client.get(resource_group_name, sql_virtual_machine_name)
コード例 #48
0
    def exec_module(self, **kwargs):
        for key in self.module_arg_spec:
            setattr(self, key, kwargs[key])
        self.mgmt_client = self.get_mgmt_svc_client(
            GenericRestClient,
            base_url=self._cloud_environment.endpoints.resource_manager)

        if self.state == 'absent':
            self.method = 'DELETE'
            self.status_code.append(204)

        if self.url is None:
            orphan = None
            rargs = dict()
            rargs['subscription'] = self.subscription_id
            rargs['resource_group'] = self.resource_group
            if not (self.provider is None
                    or self.provider.lower().startswith('.microsoft')):
                rargs['namespace'] = "Microsoft." + self.provider
            else:
                rargs['namespace'] = self.provider

            if self.resource_type is not None and self.resource_name is not None:
                rargs['type'] = self.resource_type
                rargs['name'] = self.resource_name
                for i in range(len(self.subresource)):
                    resource_ns = self.subresource[i].get('namespace', None)
                    resource_type = self.subresource[i].get('type', None)
                    resource_name = self.subresource[i].get('name', None)
                    if resource_type is not None and resource_name is not None:
                        rargs['child_namespace_' + str(i + 1)] = resource_ns
                        rargs['child_type_' + str(i + 1)] = resource_type
                        rargs['child_name_' + str(i + 1)] = resource_name
                    else:
                        orphan = resource_type
            else:
                orphan = self.resource_type

            self.url = resource_id(**rargs)

            if orphan is not None:
                self.url += '/' + orphan

        # if api_version was not specified, get latest one
        if not self.api_version:
            try:
                # extract provider and resource type
                if "/providers/" in self.url:
                    provider = self.url.split("/providers/")[1].split("/")[0]
                    resourceType = self.url.split(provider +
                                                  "/")[1].split("/")[0]
                    url = "/subscriptions/" + self.subscription_id + "/providers/" + provider
                    api_versions = json.loads(
                        self.mgmt_client.query(url, "GET", {
                            'api-version': '2015-01-01'
                        }, None, None, [200], 0, 0).text)
                    for rt in api_versions['resourceTypes']:
                        if rt['resourceType'].lower() == resourceType.lower():
                            self.api_version = rt['apiVersions'][0]
                            break
                if not self.api_version:
                    self.fail("Couldn't find api version for {0}/{1}".format(
                        provider, resourceType))
            except Exception as exc:
                self.fail("Failed to obtain API version: {0}".format(str(exc)))

        query_parameters = {}
        query_parameters['api-version'] = self.api_version

        header_parameters = {}
        header_parameters['Content-Type'] = 'application/json; charset=utf-8'

        needs_update = True
        response = None

        if self.idempotency:
            original = self.mgmt_client.query(self.url, "GET",
                                              query_parameters, None, None,
                                              [200, 404], 0, 0)

            if original.status_code == 404:
                if self.state == 'absent':
                    needs_update = False
            else:
                try:
                    response = json.loads(original.text)
                    needs_update = (dict_merge(response, self.body) !=
                                    response)
                except Exception:
                    pass

        if needs_update:
            response = self.mgmt_client.query(self.url, self.method,
                                              query_parameters,
                                              header_parameters, self.body,
                                              self.status_code,
                                              self.polling_timeout,
                                              self.polling_interval)
            if self.state == 'present':
                try:
                    response = json.loads(response.text)
                except Exception:
                    response = response.text
            else:
                response = None

        self.results['response'] = response
        self.results['changed'] = needs_update

        return self.results
コード例 #49
0
def add_ingress_appgw_addon_role_assignment(result, cmd):
    service_principal_msi_id = None
    # Check if service principal exists, if it does, assign permissions to service principal
    # Else, provide permissions to MSI
    if (hasattr(result, 'service_principal_profile')
            and hasattr(result.service_principal_profile, 'client_id')
            and result.service_principal_profile.client_id != 'msi'):
        service_principal_msi_id = result.service_principal_profile.client_id
        is_service_principal = True
    elif ((hasattr(result, 'addon_profiles'))
          and (CONST_INGRESS_APPGW_ADDON_NAME in result.addon_profiles)
          and (hasattr(result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME],
                       'identity'))
          and (hasattr(
              result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].identity,
              'object_id'))):
        service_principal_msi_id = result.addon_profiles[
            CONST_INGRESS_APPGW_ADDON_NAME].identity.object_id
        is_service_principal = False

    if service_principal_msi_id is not None:
        config = result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].config
        from msrestazure.tools import parse_resource_id, resource_id
        if CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID in config:
            appgw_id = config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID]
            parsed_appgw_id = parse_resource_id(appgw_id)
            appgw_group_id = resource_id(
                subscription=parsed_appgw_id["subscription"],
                resource_group=parsed_appgw_id["resource_group"])
            if not add_role_assignment(cmd.cli_ctx,
                                       'Contributor',
                                       service_principal_msi_id,
                                       is_service_principal,
                                       scope=appgw_group_id):
                logger.warning(
                    'Could not create a role assignment for application gateway: %s '
                    'specified in %s addon. '
                    'Are you an Owner on this subscription?', appgw_id,
                    CONST_INGRESS_APPGW_ADDON_NAME)
        if CONST_INGRESS_APPGW_SUBNET_ID in config:
            subnet_id = config[CONST_INGRESS_APPGW_SUBNET_ID]
            if not add_role_assignment(cmd.cli_ctx,
                                       'Network Contributor',
                                       service_principal_msi_id,
                                       is_service_principal,
                                       scope=subnet_id):
                logger.warning(
                    'Could not create a role assignment for subnet: %s '
                    'specified in %s addon. '
                    'Are you an Owner on this subscription?', subnet_id,
                    CONST_INGRESS_APPGW_ADDON_NAME)
        if CONST_INGRESS_APPGW_SUBNET_CIDR in config:
            if result.agent_pool_profiles[0].vnet_subnet_id is not None:
                parsed_subnet_vnet_id = parse_resource_id(
                    result.agent_pool_profiles[0].vnet_subnet_id)
                vnet_id = resource_id(
                    subscription=parsed_subnet_vnet_id["subscription"],
                    resource_group=parsed_subnet_vnet_id["resource_group"],
                    namespace="Microsoft.Network",
                    type="virtualNetworks",
                    name=parsed_subnet_vnet_id["name"])
                if not add_role_assignment(cmd.cli_ctx,
                                           'Contributor',
                                           service_principal_msi_id,
                                           is_service_principal,
                                           scope=vnet_id):
                    logger.warning(
                        'Could not create a role assignment for virtual network: %s '
                        'specified in %s addon. '
                        'Are you an Owner on this subscription?', vnet_id,
                        CONST_INGRESS_APPGW_ADDON_NAME)
コード例 #50
0
    def test_aro_list_credentials(self, resource_group):
        from msrestazure.tools import resource_id

        subscription = self.get_subscription_id()

        master_subnet = self.create_random_name('dev_master', 14)
        worker_subnet = self.create_random_name('dev_worker', 14)

        self.kwargs.update({
            'name':
            self.create_random_name('aro', 14),
            'resource_group':
            resource_group,
            'subscription':
            subscription,
            'master_subnet':
            master_subnet,
            'worker_subnet':
            worker_subnet,
            'master_ip_range':
            '10.{}.{}.0/24'.format(randint(0, 127), randint(0, 255)),
            'worker_ip_range':
            '10.{}.{}.0/24'.format(randint(0, 127), randint(0, 255)),
            'master_subnet_resource':
            resource_id(subscription=subscription,
                        resource_group=resource_group,
                        namespace='Microsoft.Network',
                        type='virtualNetworks',
                        child_type_1='subnets',
                        name='dev-vnet',
                        child_name_1=master_subnet),
            'worker_subnet_resource':
            resource_id(subscription=subscription,
                        resource_group=resource_group,
                        namespace='Microsoft.Network',
                        type='virtualNetworks',
                        child_type_1='subnets',
                        name='dev-vnet',
                        child_name_1=worker_subnet),
        })

        self.cmd(
            'network vnet create -g {rg} -n dev-vnet --address-prefixes 10.0.0.0/9'
        )
        self.cmd(
            'network vnet subnet create -g {rg} --vnet-name dev-vnet -n {master_subnet} --address-prefixes {master_ip_range} --service-endpoints Microsoft.ContainerRegistry'
        )
        self.cmd(
            'network vnet subnet create -g {rg} --vnet-name dev-vnet -n {worker_subnet} --address-prefixes {worker_ip_range} --service-endpoints Microsoft.ContainerRegistry'
        )
        self.cmd(
            'network vnet subnet update -g {rg} --vnet-name dev-vnet -n {master_subnet} --disable-private-link-service-network-policies true'
        )

        with mock.patch('azure.cli.command_modules.aro._rbac._gen_uuid',
                        side_effect=self.create_guid):
            self.cmd(
                'aro create -g {rg} -n {name} --master-subnet {master_subnet_resource} --worker-subnet {worker_subnet_resource} --subscription {subscription} --tags test=list-cred'
            )

        self.cmd(
            'aro list-credentials -g {rg} -n {name} --subscription {subscription}',
            checks=[self.check('kubeadminUsername', 'kubeadmin')])
コード例 #51
0
    def test_databricks(self, resource_group, key_vault):
        subscription_id = self.get_subscription_id()
        self.kwargs.update({
            'kv': key_vault,
            'workspace_name': 'my-test-workspace',
            'custom_workspace_name': 'my-custom-workspace',
            'managed_resource_group': 'custom-managed-rg'
        })

        self.cmd(
            'az databricks workspace create '
            '--resource-group {rg} '
            '--name {workspace_name} '
            '--location "eastus2euap" '
            '--sku premium',
            checks=[
                self.check('name', '{workspace_name}'),
                self.check('sku.name', 'premium')
            ])

        managed_resource_group_id = '/subscriptions/{}/resourceGroups/{}'.format(
            subscription_id, self.kwargs.get('managed_resource_group', ''))
        self.cmd(
            'az databricks workspace create '
            '--resource-group {rg} '
            '--name {custom_workspace_name} '
            '--location "westus" '
            '--sku standard '
            '--managed-resource-group {managed_resource_group}',
            checks=[
                self.check('name', '{custom_workspace_name}'),
                self.check('managedResourceGroupId', managed_resource_group_id)
            ])

        workspace = self.cmd(
            'az databricks workspace update '
            '--resource-group {rg} '
            '--name {workspace_name} '
            '--tags type=test '
            '--prepare-encryption',
            checks=[
                self.check('tags.type', 'test'),
                self.exists('storageAccountIdentity.principalId')
            ]).get_output_in_json()
        principalId = workspace['storageAccountIdentity']['principalId']

        self.kwargs.update({'oid': principalId, 'key_name': 'testkey'})

        self.cmd('az keyvault set-policy -n {kv} --object-id {oid} -g {rg} '
                 '--key-permissions get wrapKey unwrapKey recover')

        self.cmd(
            'az keyvault update -n {kv} -g {rg} --set properties.enableSoftDelete=true'
        )

        keyvault = self.cmd(
            'az keyvault update -n {kv} -g {rg} --set properties.enablePurgeProtection=true'
        ).get_output_in_json()

        key = self.cmd(
            'az keyvault key create -n {key_name} -p software --vault-name {kv}'
        ).get_output_in_json()
        key_version = key['key']['kid'].rsplit('/', 1)[1]

        self.kwargs.update({
            'key_version': key_version,
            'key_vault': keyvault['properties']['vaultUri']
        })

        self.cmd(
            'az databricks workspace update '
            '--resource-group {rg} '
            '--name {workspace_name} '
            '--key-source Microsoft.KeyVault '
            '--key-name {key_name} '
            '--key-version {key_version} '
            '--key-vault {key_vault}',
            checks=[
                self.check('parameters.encryption.value.keySource',
                           'Microsoft.Keyvault'),
                self.check('parameters.encryption.value.keyName',
                           '{key_name}'),
                self.check('parameters.encryption.value.keyVersion',
                           '{key_version}'),
                self.check('parameters.encryption.value.keyVaultUri',
                           '{key_vault}')
            ])

        self.cmd(
            'az databricks workspace update '
            '--resource-group {rg} '
            '--name {workspace_name} '
            '--key-source Default',
            checks=[
                self.check('parameters.encryption.value.keySource', 'Default'),
                self.not_exists('parameters.encryption.value.keyName')
            ])

        self.cmd(
            'az databricks workspace show '
            '--resource-group {rg} '
            '--name {workspace_name}',
            checks=[self.check('name', '{workspace_name}')])

        workspace_resource_id = resource_id(subscription=subscription_id,
                                            resource_group=resource_group,
                                            namespace='Microsoft.Databricks',
                                            type='workspaces',
                                            name=self.kwargs.get(
                                                'workspace_name', ''))

        self.cmd('az databricks workspace show '
                 '--ids {}'.format(workspace_resource_id),
                 checks=[self.check('name', '{workspace_name}')])

        self.cmd('az databricks workspace list '
                 '--resource-group {rg} ',
                 checks=[])

        self.cmd(
            'az databricks workspace delete '
            '--resource-group {rg} '
            '--name {workspace_name} '
            '-y',
            checks=[])

        self.cmd(
            'az databricks workspace delete '
            '--resource-group {rg} '
            '--name {custom_workspace_name} '
            '-y',
            checks=[])
コード例 #52
0
def create(cmd,
           client,
           resource_group_name,
           activity_log_alert_name,
           scopes=None,
           condition=None,
           action_groups=frozenset(),
           tags=None,
           disable=False,
           description=None,
           webhook_properties=None):
    from msrestazure.tools import resource_id
    from azure.mgmt.monitor.models import (ActivityLogAlertResource,
                                           ActivityLogAlertAllOfCondition,
                                           ActivityLogAlertLeafCondition,
                                           ActivityLogAlertActionList)
    from azure.mgmt.monitor.models import ActivityLogAlertActionGroup as ActionGroup
    from azure.cli.core.commands.client_factory import get_subscription_id
    from knack.util import CLIError

    if not scopes:
        scopes = [
            resource_id(subscription=get_subscription_id(cmd.cli_ctx),
                        resource_group=resource_group_name)
        ]

    if _get_alert_settings(client,
                           resource_group_name,
                           activity_log_alert_name,
                           throw_if_missing=False):
        raise CLIError(
            'The activity log alert {} already exists in resource group {}.'.
            format(activity_log_alert_name, resource_group_name))

    # Add alert conditions
    condition = condition or ActivityLogAlertAllOfCondition(all_of=[
        ActivityLogAlertLeafCondition(field='category', equals='ServiceHealth')
    ])

    # Add action groups
    action_group_rids = _normalize_names(cmd.cli_ctx, action_groups,
                                         resource_group_name,
                                         'microsoft.insights', 'actionGroups')
    action_groups = [
        ActionGroup(action_group_id=i, webhook_properties=webhook_properties)
        for i in action_group_rids
    ]
    alert_actions = ActivityLogAlertActionList(action_groups=action_groups)

    settings = ActivityLogAlertResource(location='global',
                                        scopes=scopes,
                                        condition=condition,
                                        actions=alert_actions,
                                        enabled=not disable,
                                        description=description,
                                        tags=tags)

    return client.create_or_update(
        resource_group_name=resource_group_name,
        activity_log_alert_name=activity_log_alert_name,
        activity_log_alert=settings)
コード例 #53
0
    def test_dynamic_metric_alert_multiple_scopes(self, resource_group, vm1,
                                                  vm2):
        from azure.mgmt.core.tools import resource_id
        self.kwargs.update({
            'alert':
            'alert1',
            'plan':
            'plan1',
            'app':
            self.create_random_name('app', 15),
            'ag1':
            'ag1',
            'ag2':
            'ag2',
            'webhooks':
            '{{test=banoodle}}',
            'sub':
            self.get_subscription_id(),
            'vm_id':
            resource_id(resource_group=resource_group,
                        subscription=self.get_subscription_id(),
                        name=vm1,
                        namespace='Microsoft.Compute',
                        type='virtualMachines'),
            'vm_id_2':
            resource_id(resource_group=resource_group,
                        subscription=self.get_subscription_id(),
                        name=vm2,
                        namespace='Microsoft.Compute',
                        type='virtualMachines')
        })
        self.cmd('monitor action-group create -g {rg} -n {ag1}')
        self.cmd('monitor action-group create -g {rg} -n {ag2}')
        self.cmd(
            'monitor metrics alert create -g {rg} -n {alert} --scopes {vm_id} {vm_id_2} --action {ag1} --region westus --condition "avg Percentage CPU > dynamic low 2 of 4 since 2020-11-01T16:00:00.000Z" --description "High CPU"',
            checks=[
                self.check('description', 'High CPU'),
                self.check('severity', 2),
                self.check('autoMitigate', None),
                self.check('windowSize', '0:05:00'),
                self.check('evaluationFrequency', '0:01:00'),
                self.check('length(scopes)', 2),
                self.check('criteria.allOf[0].alertSensitivity', 'Low'),
                self.check('criteria.allOf[0].criterionType',
                           'DynamicThresholdCriterion'),
                self.check(
                    'criteria.allOf[0].failingPeriods.minFailingPeriodsToAlert',
                    2.0),
                self.check(
                    'criteria.allOf[0].failingPeriods.numberOfEvaluationPeriods',
                    4.0),
                self.check('criteria.allOf[0].operator', 'GreaterThan'),
                self.check('criteria.allOf[0].ignoreDataBefore',
                           '2020-11-01T16:00:00+00:00')
            ])

        self.cmd(
            'monitor metrics alert update -g {rg} -n {alert} --severity 3 --description "High Or Low CPU" --add-action ag2 test=best --remove-action ag1 --remove-conditions cond0 --evaluation-frequency 5m --window-size 15m --tags foo=boo --auto-mitigate --add-condition "avg Percentage CPU >< dynamic medium 1 of 6 since 2020-10-01T10:23:00.000Z"',
            checks=[
                self.check('description', 'High Or Low CPU'),
                self.check('severity', 3),
                self.check('autoMitigate', True),
                self.check('windowSize', '0:15:00'),
                self.check('evaluationFrequency', '0:05:00'),
                self.check('length(criteria.allOf)', 1),
                self.check('length(scopes)', 2),
                self.check('criteria.allOf[0].alertSensitivity', 'Medium'),
                self.check('criteria.allOf[0].criterionType',
                           'DynamicThresholdCriterion'),
                self.check(
                    'criteria.allOf[0].failingPeriods.minFailingPeriodsToAlert',
                    1.0),
                self.check(
                    'criteria.allOf[0].failingPeriods.numberOfEvaluationPeriods',
                    6.0),
                self.check('criteria.allOf[0].operator', 'GreaterOrLessThan'),
                self.check('criteria.allOf[0].ignoreDataBefore',
                           '2020-10-01T10:23:00+00:00')
            ])

        self.cmd('monitor metrics alert list -g {rg}',
                 checks=self.check('length(@)', 1))
        self.cmd('monitor metrics alert show -g {rg} -n {alert}')
        self.cmd('monitor metrics alert delete -g {rg} -n {alert}')
        self.cmd('monitor metrics alert list -g {rg}',
                 checks=self.check('length(@)', 0))
コード例 #54
0
def list_policy_states(cmd,
                       client,
                       all_results=False,
                       management_group_name=None,
                       resource_group_name=None,
                       resource=None,
                       namespace=None,
                       resource_type_parent=None,
                       resource_type=None,
                       policy_set_definition_name=None,
                       policy_definition_name=None,
                       policy_assignment_name=None,
                       from_value=None,
                       to_value=None,
                       order_by_clause=None,
                       select_clause=None,
                       top_value=None,
                       filter_clause=None,
                       apply_clause=None):

    from azure.mgmt.policyinsights.models import QueryOptions

    query_options = QueryOptions(top=top_value,
                                 order_by=order_by_clause,
                                 select=select_clause,
                                 from_property=from_value,
                                 to=to_value,
                                 filter=filter_clause,
                                 apply=apply_clause)

    policy_states_resource = 'latest'
    if all_results is True:
        policy_states_resource = 'default'

    subscription_id = get_subscription_id(cmd.cli_ctx)

    if policy_assignment_name:
        if resource_group_name:
            states = client.list_query_results_for_resource_group_level_policy_assignment(
                policy_states_resource, subscription_id, resource_group_name,
                policy_assignment_name, query_options)
        else:
            states = client.list_query_results_for_subscription_level_policy_assignment(
                policy_states_resource, subscription_id,
                policy_assignment_name, query_options)
    elif policy_definition_name:
        states = client.list_query_results_for_policy_definition(
            policy_states_resource, subscription_id, policy_definition_name,
            query_options)
    elif policy_set_definition_name:
        states = client.list_query_results_for_policy_set_definition(
            policy_states_resource, subscription_id,
            policy_set_definition_name, query_options)
    elif resource:
        if not is_valid_resource_id(resource):
            if resource_type_parent:
                resource_type_parent = _remove_leading_and_trailing_slash(
                    resource_type_parent)
                resource_type = "{}/{}".format(resource_type_parent,
                                               resource_type)
            resource = resource_id(subscription=subscription_id,
                                   resource_group=resource_group_name,
                                   namespace=namespace,
                                   type=resource_type,
                                   name=resource)
        states = client.list_query_results_for_resource(
            policy_states_resource, resource, query_options)
    elif resource_group_name:
        states = client.list_query_results_for_resource_group(
            policy_states_resource, subscription_id, resource_group_name,
            query_options)
    elif management_group_name:
        states = client.list_query_results_for_management_group(
            policy_states_resource, management_group_name, query_options)
    else:
        states = client.list_query_results_for_subscription(
            policy_states_resource, subscription_id, query_options)

    return states.value
コード例 #55
0
    def exec_module(self, **kwargs):
        for key in self.module_arg_spec:
            setattr(self, key, kwargs[key])
        self.mgmt_client = self.get_mgmt_svc_client(
            GenericRestClient,
            base_url=self._cloud_environment.endpoints.resource_manager)

        if self.url is None:
            orphan = None
            rargs = dict()
            rargs['subscription'] = self.subscription_id
            rargs['resource_group'] = self.resource_group
            if not (self.provider is None
                    or self.provider.lower().startswith('.microsoft')):
                rargs['namespace'] = "Microsoft." + self.provider
            else:
                rargs['namespace'] = self.provider

            if self.resource_type is not None and self.resource_name is not None:
                rargs['type'] = self.resource_type
                rargs['name'] = self.resource_name
                for i in range(len(self.subresource)):
                    resource_ns = self.subresource[i].get('namespace', None)
                    resource_type = self.subresource[i].get('type', None)
                    resource_name = self.subresource[i].get('name', None)
                    if resource_type is not None and resource_name is not None:
                        rargs['child_namespace_' + str(i + 1)] = resource_ns
                        rargs['child_type_' + str(i + 1)] = resource_type
                        rargs['child_name_' + str(i + 1)] = resource_name
                    else:
                        orphan = resource_type
            else:
                orphan = self.resource_type

            self.url = resource_id(**rargs)

            if orphan is not None:
                self.url += '/' + orphan

        self.results['url'] = self.url

        query_parameters = {}
        query_parameters['api-version'] = self.api_version

        header_parameters = {}
        header_parameters['Content-Type'] = 'application/json; charset=utf-8'

        response = self.mgmt_client.query(self.url, "GET", query_parameters,
                                          header_parameters, None, [200, 404],
                                          0, 0)

        try:
            response = json.loads(response.text)
            if response is list:
                self.results['response'] = response
            else:
                self.results['response'] = [response]
        except Exception:
            self.results['response'] = []

        return self.results
コード例 #56
0
def _server_restore(cmd,
                    client,
                    resource_group_name,
                    server_name,
                    source_server,
                    restore_point_in_time,
                    no_wait=False):
    provider = 'Microsoft.DBforPostgreSQL'
    if isinstance(client, MySqlServersOperations):
        provider = 'Microsoft.DBforMySQL'
    elif isinstance(client, MariaDBServersOperations):
        provider = 'Microsoft.DBforMariaDB'

    parameters = None
    if not is_valid_resource_id(source_server):
        if len(source_server.split('/')) == 1:
            source_server = resource_id(subscription=get_subscription_id(
                cmd.cli_ctx),
                                        resource_group=resource_group_name,
                                        namespace=provider,
                                        type='servers',
                                        name=source_server)
        else:
            raise ValueError(
                'The provided source-server {} is invalid.'.format(
                    source_server))

    if provider == 'Microsoft.DBforMySQL':
        parameters = mysql.models.ServerForCreate(
            properties=mysql.models.ServerPropertiesForRestore(
                source_server_id=source_server,
                restore_point_in_time=restore_point_in_time),
            location=None)
    elif provider == 'Microsoft.DBforPostgreSQL':
        parameters = postgresql.models.ServerForCreate(
            properties=postgresql.models.ServerPropertiesForRestore(
                source_server_id=source_server,
                restore_point_in_time=restore_point_in_time),
            location=None)
    elif provider == 'Microsoft.DBforMariaDB':
        parameters = mariadb.models.ServerForCreate(
            properties=mariadb.models.ServerPropertiesForRestore(
                source_server_id=source_server,
                restore_point_in_time=restore_point_in_time),
            location=None)

    parameters.properties.source_server_id = source_server
    parameters.properties.restore_point_in_time = restore_point_in_time

    # Here is a workaround that we don't support cross-region restore currently,
    # so the location must be set as the same as source server (not the resource group)
    id_parts = parse_resource_id(source_server)
    try:
        source_server_object = client.get(id_parts['resource_group'],
                                          id_parts['name'])
        parameters.location = source_server_object.location
    except Exception as e:
        raise ValueError('Unable to get source server: {}.'.format(str(e)))

    return sdk_no_wait(no_wait, client.begin_create, resource_group_name,
                       server_name, parameters)
コード例 #57
0
def process_nw_topology_namespace(cmd, namespace):
    from msrestazure.tools import is_valid_resource_id, resource_id, parse_resource_id
    SubResource = cmd.get_models('SubResource')
    subscription_id = get_subscription_id(cmd.cli_ctx)

    location = namespace.location
    rg = namespace.target_resource_group_name
    vnet = namespace.target_vnet
    subnet = namespace.target_subnet

    vnet_id = vnet if is_valid_resource_id(vnet) else None
    subnet_id = subnet if is_valid_resource_id(subnet) else None

    if rg and not vnet and not subnet:
        # targeting resource group - OK
        pass
    elif subnet:
        subnet_usage = CLIError(
            'usage error: --subnet ID | --subnet NAME --resource-group NAME --vnet NAME'
        )
        # targeting subnet - OK
        if subnet_id and (vnet or rg):
            raise subnet_usage
        elif not subnet_id and (not rg or not vnet or vnet_id):
            raise subnet_usage
        if subnet_id:
            rg = parse_resource_id(subnet_id)['resource_group']
            namespace.target_subnet = SubResource(subnet)
        else:
            subnet_id = subnet_id or resource_id(subscription=subscription_id,
                                                 resource_group=rg,
                                                 namespace='Microsoft.Network',
                                                 type='virtualNetworks',
                                                 name=vnet,
                                                 child_type_1='subnets',
                                                 child_name_1=subnet)
            namespace.target_resource_group_name = None
            namespace.target_vnet = None
            namespace.target_subnet = SubResource(subnet_id)
    elif vnet:
        # targeting vnet - OK
        vnet_usage = CLIError(
            'usage error: --vnet ID | --vnet NAME --resource-group NAME')
        if vnet_id and (subnet or rg):
            raise vnet_usage
        elif not vnet_id and not rg or subnet:
            raise vnet_usage
        if vnet_id:
            rg = parse_resource_id(vnet_id)['resource_group']
            namespace.target_vnet = SubResource(vnet)
        else:
            vnet_id = vnet_id or resource_id(subscription=subscription_id,
                                             resource_group=rg,
                                             namespace='Microsoft.Network',
                                             type='virtualNetworks',
                                             name=vnet)
            namespace.target_resource_group_name = None
            namespace.target_vnet = SubResource(vnet_id)
    else:
        raise CLIError(
            'usage error: --resource-group NAME | --vnet NAME_OR_ID | --subnet NAME_OR_ID'
        )

    # retrieve location from resource group
    if not location:
        resource_client = \
            get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES).resource_groups
        resource_group = resource_client.get(rg)
        namespace.location = resource_group.location  # pylint: disable=no-member

    get_network_watcher_from_location(remove=True,
                                      watcher_name='network_watcher_name',
                                      rg_name='resource_group_name')(cmd,
                                                                     namespace)
コード例 #58
0
    def exec_module(self, **kwargs):
        for key in self.module_arg_spec:
            setattr(self, key, kwargs[key])
        self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient,
                                                    base_url=self._cloud_environment.endpoints.resource_manager)

        if self.url is None:
            orphan = None
            rargs = dict()
            rargs['subscription'] = self.subscription_id
            rargs['resource_group'] = self.resource_group
            if not (self.provider is None or self.provider.lower().startswith('.microsoft')):
                rargs['namespace'] = "Microsoft." + self.provider
            else:
                rargs['namespace'] = self.provider

            if self.resource_type is not None and self.resource_name is not None:
                rargs['type'] = self.resource_type
                rargs['name'] = self.resource_name
                for i in range(len(self.subresource)):
                    resource_ns = self.subresource[i].get('namespace', None)
                    resource_type = self.subresource[i].get('type', None)
                    resource_name = self.subresource[i].get('name', None)
                    if resource_type is not None and resource_name is not None:
                        rargs['child_namespace_' + str(i + 1)] = resource_ns
                        rargs['child_type_' + str(i + 1)] = resource_type
                        rargs['child_name_' + str(i + 1)] = resource_name
                    else:
                        orphan = resource_type
            else:
                orphan = self.resource_type

            self.url = resource_id(**rargs)

            if orphan is not None:
                self.url += '/' + orphan

        # if api_version was not specified, get latest one
        if not self.api_version:
            try:
                # extract provider and resource type
                if "/providers/" in self.url:
                    provider = self.url.split("/providers/")[1].split("/")[0]
                    resourceType = self.url.split(provider + "/")[1].split("/")[0]
                    url = "/subscriptions/" + self.subscription_id + "/providers/" + provider
                    api_versions = json.loads(self.mgmt_client.query(url, "GET", {'api-version': '2015-01-01'}, None, None, [200], 0, 0).text)
                    for rt in api_versions['resourceTypes']:
                        if rt['resourceType'].lower() == resourceType.lower():
                            self.api_version = rt['apiVersions'][0]
                            break
                if not self.api_version:
                    self.fail("Couldn't find api version for {0}/{1}".format(provider, resourceType))
            except Exception as exc:
                self.fail("Failed to obtain API version: {0}".format(str(exc)))

        self.results['url'] = self.url

        query_parameters = {}
        query_parameters['api-version'] = self.api_version

        header_parameters = {}
        header_parameters['Content-Type'] = 'application/json; charset=utf-8'
        skiptoken = None

        while True:
            if skiptoken:
                query_parameters['skiptoken'] = skiptoken
            response = self.mgmt_client.query(self.url, "GET", query_parameters, header_parameters, None, [200, 404], 0, 0)
            try:
                response = json.loads(response.text)
                if isinstance(response, dict):
                    if response.get('value'):
                        self.results['response'] = self.results['response'] + response['value']
                        skiptoken = response.get('nextLink')
                    else:
                        self.results['response'] = self.results['response'] + [response]
            except Exception as e:
                self.fail('Failed to parse response: ' + str(e))
            if not skiptoken:
                break
        return self.results
コード例 #59
0
def validate_vnet(cmd, namespace):
    if not namespace.vnet and not namespace.app_subnet and \
       not namespace.service_runtime_subnet and not namespace.reserved_cidr_range:
        return
    validate_vnet_required_parameters(namespace)

    vnet_id = ''
    if namespace.vnet:
        vnet_id = namespace.vnet
        # format the app_subnet and service_runtime_subnet
        if not is_valid_resource_id(vnet_id):
            if vnet_id.count('/') > 0:
                raise CLIError(
                    '--vnet {0} is not a valid name or resource ID'.format(
                        vnet_id))
            vnet_id = resource_id(subscription=get_subscription_id(
                cmd.cli_ctx),
                                  resource_group=namespace.resource_group,
                                  namespace='Microsoft.Network',
                                  type='virtualNetworks',
                                  name=vnet_id)
        else:
            vnet = parse_resource_id(vnet_id)
            if vnet['namespace'].lower() != 'microsoft.network' or vnet[
                    'type'].lower() != 'virtualnetworks':
                raise CLIError(
                    '--vnet {0} is not a valid VirtualNetwork resource ID'.
                    format(vnet_id))
        namespace.app_subnet = _construct_subnet_id(vnet_id,
                                                    namespace.app_subnet)
        namespace.service_runtime_subnet = _construct_subnet_id(
            vnet_id, namespace.service_runtime_subnet)
    else:
        app_vnet_id = _parse_vnet_id_from_subnet(namespace.app_subnet)
        service_runtime_vnet_id = _parse_vnet_id_from_subnet(
            namespace.service_runtime_subnet)
        if app_vnet_id.lower() != service_runtime_vnet_id.lower():
            raise CLIError(
                '--app-subnet and --service-runtime-subnet should be in the same Virtual Networks.'
            )
        vnet_id = app_vnet_id
    if namespace.app_subnet.lower() == namespace.service_runtime_subnet.lower(
    ):
        raise CLIError(
            '--app-subnet and --service-runtime-subnet should not be the same.'
        )

    vnet_obj = _get_vnet(cmd, vnet_id)
    instance_location = namespace.location
    if instance_location is None:
        instance_location = _get_rg_location(cmd.cli_ctx,
                                             namespace.resource_group)
    else:
        instance_location_slice = instance_location.split(" ")
        instance_location = "".join(
            [piece.lower() for piece in instance_location_slice])
    if vnet_obj.location.lower() != instance_location.lower():
        raise CLIError(
            '--vnet and Azure Spring Cloud instance should be in the same location.'
        )
    for subnet in vnet_obj.subnets:
        _validate_subnet(namespace, subnet)

    if namespace.reserved_cidr_range:
        _validate_cidr_range(namespace)
    else:
        namespace.reserved_cidr_range = _set_default_cidr_range(vnet_obj.address_space.address_prefixes) if \
            vnet_obj and vnet_obj.address_space and vnet_obj.address_space.address_prefixes \
            else '10.234.0.0/16,10.244.0.0/16,172.17.0.1/16'
コード例 #60
0
def _server_georestore(cmd,
                       client,
                       resource_group_name,
                       server_name,
                       sku_name,
                       location,
                       source_server,
                       backup_retention=None,
                       geo_redundant_backup=None,
                       no_wait=False,
                       **kwargs):
    provider = 'Microsoft.DBforPostgreSQL'
    if isinstance(client, MySqlServersOperations):
        provider = 'Microsoft.DBforMySQL'
    elif isinstance(client, MariaDBServersOperations):
        provider = 'Microsoft.DBforMariaDB'

    parameters = None

    if not is_valid_resource_id(source_server):
        if len(source_server.split('/')) == 1:
            source_server = resource_id(subscription=get_subscription_id(
                cmd.cli_ctx),
                                        resource_group=resource_group_name,
                                        namespace=provider,
                                        type='servers',
                                        name=source_server)
        else:
            raise ValueError(
                'The provided source-server {} is invalid.'.format(
                    source_server))

    if provider == 'Microsoft.DBforMySQL':
        parameters = mysql.models.ServerForCreate(
            sku=mysql.models.Sku(name=sku_name),
            properties=mysql.models.ServerPropertiesForGeoRestore(
                storage_profile=mysql.models.StorageProfile(
                    backup_retention_days=backup_retention,
                    geo_redundant_backup=geo_redundant_backup),
                source_server_id=source_server),
            location=location)
    elif provider == 'Microsoft.DBforPostgreSQL':
        parameters = postgresql.models.ServerForCreate(
            sku=postgresql.models.Sku(name=sku_name),
            properties=postgresql.models.ServerPropertiesForGeoRestore(
                storage_profile=postgresql.models.StorageProfile(
                    backup_retention_days=backup_retention,
                    geo_redundant_backup=geo_redundant_backup),
                source_server_id=source_server),
            location=location)
    elif provider == 'Microsoft.DBforMariaDB':
        parameters = mariadb.models.ServerForCreate(
            sku=mariadb.models.Sku(name=sku_name),
            properties=mariadb.models.ServerPropertiesForGeoRestore(
                storage_profile=mariadb.models.StorageProfile(
                    backup_retention_days=backup_retention,
                    geo_redundant_backup=geo_redundant_backup),
                source_server_id=source_server),
            location=location)

    parameters.properties.source_server_id = source_server

    source_server_id_parts = parse_resource_id(source_server)
    try:
        source_server_object = client.get(
            source_server_id_parts['resource_group'],
            source_server_id_parts['name'])
        if parameters.sku.name is None:
            parameters.sku.name = source_server_object.sku.name
    except Exception as e:
        raise ValueError('Unable to get source server: {}.'.format(str(e)))

    return sdk_no_wait(no_wait, client.begin_create, resource_group_name,
                       server_name, parameters)