Ejemplo n.º 1
0
def transform_effective_nsg(result):
    from msrestazure.tools import parse_resource_id
    transformed = []
    for item in result['value']:
        association = item['association']
        try:
            nic = parse_resource_id(association['networkInterface']['id'])['name']
        except TypeError:
            nic = '-'
        try:
            subnet = parse_resource_id(association['subnet']['id'])['name']
        except TypeError:
            subnet = '-'
        nsg = parse_resource_id(item['networkSecurityGroup']['id'])['name']
        print_names = True
        for rule in item['effectiveSecurityRules']:
            transformed.append(OrderedDict([
                ('NIC', nic if print_names else ' '),
                ('Subnet', subnet if print_names else ' '),
                ('NSG', nsg if print_names else ' '),
                ('Rule Name', rule['name']),
                ('Protocol', rule['protocol']),
                ('Direction', rule['direction']),
                ('Access', rule['access'])
            ]))
            print_names = False
    return transformed
Ejemplo n.º 2
0
def process_nw_test_connectivity_namespace(cmd, namespace):
    from msrestazure.tools import is_valid_resource_id, resource_id, parse_resource_id

    compute_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_COMPUTE).virtual_machines
    vm_name = parse_resource_id(namespace.source_resource)['name']
    rg = namespace.resource_group_name or parse_resource_id(namespace.source_resource).get('resource_group', None)
    if not rg:
        raise CLIError('usage error: --source-resource ID | --source-resource NAME --resource-group NAME')
    vm = compute_client.get(rg, vm_name)
    namespace.location = vm.location  # pylint: disable=no-member
    get_network_watcher_from_location(remove=True)(cmd, namespace)

    if namespace.source_resource and not is_valid_resource_id(namespace.source_resource):
        namespace.source_resource = resource_id(
            subscription=get_subscription_id(cmd.cli_ctx),
            resource_group=rg,
            namespace='Microsoft.Compute',
            type='virtualMachines',
            name=namespace.source_resource)

    if namespace.dest_resource and not is_valid_resource_id(namespace.dest_resource):
        namespace.dest_resource = resource_id(
            subscription=get_subscription_id(cmd.cli_ctx),
            resource_group=namespace.resource_group_name,
            namespace='Microsoft.Compute',
            type='virtualMachines',
            name=namespace.dest_resource)
Ejemplo n.º 3
0
def validate_diagnostic_settings(cmd, namespace):
    from azure.cli.core.commands.client_factory import get_subscription_id
    from msrestazure.tools import is_valid_resource_id, resource_id, parse_resource_id
    from knack.util import CLIError

    get_target_resource_validator('resource_uri', required=True, preserve_resource_group_parameter=True)(cmd, namespace)
    if not namespace.resource_group_name:
        namespace.resource_group_name = parse_resource_id(namespace.resource_uri)['resource_group']

    if namespace.storage_account and not is_valid_resource_id(namespace.storage_account):
        namespace.storage_account = resource_id(subscription=get_subscription_id(cmd.cli_ctx),
                                                resource_group=namespace.resource_group_name,
                                                namespace='microsoft.Storage',
                                                type='storageAccounts',
                                                name=namespace.storage_account)

    if namespace.workspace and not is_valid_resource_id(namespace.workspace):
        namespace.workspace = resource_id(subscription=get_subscription_id(cmd.cli_ctx),
                                          resource_group=namespace.resource_group_name,
                                          namespace='microsoft.OperationalInsights',
                                          type='workspaces',
                                          name=namespace.workspace)

    if namespace.event_hub and is_valid_resource_id(namespace.event_hub):
        namespace.event_hub = parse_resource_id(namespace.event_hub)['name']

    if namespace.event_hub_rule:
        if not is_valid_resource_id(namespace.event_hub_rule):
            if not namespace.event_hub:
                raise CLIError('usage error: --event-hub-rule ID | --event-hub-rule NAME --event-hub NAME')
            # use value from --event-hub if the rule is a name
            namespace.event_hub_rule = resource_id(
                subscription=get_subscription_id(cmd.cli_ctx),
                resource_group=namespace.resource_group_name,
                namespace='Microsoft.EventHub',
                type='namespaces',
                name=namespace.event_hub,
                child_type_1='AuthorizationRules',
                child_name_1=namespace.event_hub_rule)
        elif not namespace.event_hub:
            # extract the event hub name from `--event-hub-rule` if provided as an ID
            namespace.event_hub = parse_resource_id(namespace.event_hub_rule)['name']

    if not any([namespace.storage_account, namespace.workspace, namespace.event_hub]):
        raise CLIError(
            'usage error - expected one or more:  --storage-account NAME_OR_ID | --workspace NAME_OR_ID '
            '| --event-hub NAME_OR_ID | --event-hub-rule ID')

    try:
        del namespace.resource_group_name
    except AttributeError:
        pass
Ejemplo n.º 4
0
def build_msi_role_assignment(vm_vmss_name, vm_vmss_resource_id, role_definition_id,
                              role_assignment_guid, identity_scope, is_vm=True):
    from msrestazure.tools import parse_resource_id
    result = parse_resource_id(identity_scope)
    if result.get('type'):  # is a resource id?
        name = '{}/Microsoft.Authorization/{}'.format(result['name'], role_assignment_guid)
        assignment_type = '{}/{}/providers/roleAssignments'.format(result['namespace'], result['type'])
    else:
        name = role_assignment_guid
        assignment_type = 'Microsoft.Authorization/roleAssignments'

    # pylint: disable=line-too-long
    msi_rp_api_version = '2015-08-31-PREVIEW'
    return {
        'name': name,
        'type': assignment_type,
        'apiVersion': '2015-07-01',  # the minimum api-version to create the assignment
        'dependsOn': [
            'Microsoft.Compute/{}/{}'.format('virtualMachines' if is_vm else 'virtualMachineScaleSets', vm_vmss_name)
        ],
        'properties': {
            'roleDefinitionId': role_definition_id,
            'principalId': "[reference('{}/providers/Microsoft.ManagedIdentity/Identities/default', '{}').principalId]".format(
                vm_vmss_resource_id, msi_rp_api_version),
            'scope': identity_scope
        }
    }
Ejemplo n.º 5
0
def parse_domain_name(domain):
    from msrestazure.tools import parse_resource_id, is_valid_resource_id
    domain_name = None
    if is_valid_resource_id(domain):
        parsed_domain_id = parse_resource_id(domain)
        domain_name = parsed_domain_id['resource_name']
    return domain_name
Ejemplo n.º 6
0
def transform_sqlvm_output(result):
    '''
    Transforms the result of SQL virtual machine group to eliminate unnecessary parameters.
    '''
    from collections import OrderedDict
    from msrestazure.tools import parse_resource_id
    try:
        resource_group = getattr(result, 'resource_group', None) or parse_resource_id(result.id)['resource_group']
        # Create a dictionary with the relevant parameters
        output = OrderedDict([('id', result.id),
                              ('location', result.location),
                              ('name', result.name),
                              ('provisioningState', result.provisioning_state),
                              ('sqlImageOffer', result.sql_image_offer),
                              ('sqlImageSku', result.sql_image_sku),
                              ('resourceGroup', resource_group),
                              ('sqlServerLicenseType', result.sql_server_license_type),
                              ('virtualMachineResourceId', result.virtual_machine_resource_id),
                              ('tags', result.tags)])

        # Note, wsfcDomainCredentials will not display
        if result.sql_virtual_machine_group_resource_id is not None:
            output['sqlVirtualMachineGroupResourceId'] = result.sql_virtual_machine_group_resource_id
        if result.auto_patching_settings is not None:
            output['autoPatchingSettings'] = format_auto_patching_settings(result.auto_patching_settings)
        if result.auto_backup_settings is not None:
            output['autoBackupSettings'] = format_auto_backup_settings(result.auto_backup_settings)
        if result.server_configurations_management_settings is not None:
            output['serverConfigurationManagementSettings'] = format_server_configuration_management_settings(result.server_configurations_management_settings)

        return output
    except AttributeError:
        from msrest.pipeline import ClientRawResponse
        # Return the response object if the formating fails
        return None if isinstance(result, ClientRawResponse) else result
Ejemplo n.º 7
0
    def resolve_storage_source(self, source):
        blob_uri = None
        disk = None
        snapshot = None
        if source.lower().endswith('.vhd'):
            blob_uri = source
            return (blob_uri, disk, snapshot)

        tokenize = parse_resource_id(source)
        if tokenize.get('type') == 'disks':
            disk = source
            return (blob_uri, disk, snapshot)

        if tokenize.get('type') == 'snapshots':
            snapshot = source
            return (blob_uri, disk, snapshot)

        # not a disk or snapshots
        if 'type' in tokenize:
            return (blob_uri, disk, snapshot)

        # source can be name of snapshot or disk
        snapshot_instance = self.get_snapshot(source)
        if snapshot_instance:
            snapshot = snapshot_instance.id
            return (blob_uri, disk, snapshot)

        disk_instance = self.get_disk(source)
        if disk_instance:
            disk = disk_instance.id
        return (blob_uri, disk, snapshot)
Ejemplo n.º 8
0
def _replica_create(cmd, client, resource_group_name, server_name, source_server, no_wait=False, **kwargs):
    provider = 'Microsoft.DBForMySQL' if isinstance(client, MySqlServersOperations) else 'Microsoft.DBforPostgreSQL'
    # set source server id
    if not is_valid_resource_id(source_server):
        if len(source_server.split('/')) == 1:
            source_server = resource_id(subscription=get_subscription_id(cmd.cli_ctx),
                                        resource_group=resource_group_name,
                                        namespace=provider,
                                        type='servers',
                                        name=source_server)
        else:
            raise CLIError('The provided source-server {} is invalid.'.format(source_server))

    source_server_id_parts = parse_resource_id(source_server)
    try:
        source_server_object = client.get(source_server_id_parts['resource_group'], source_server_id_parts['name'])
    except CloudError as e:
        raise CLIError('Unable to get source server: {}.'.format(str(e)))

    parameters = None
    if provider == 'Microsoft.DBForMySQL':
        from azure.mgmt.rdbms import mysql
        parameters = mysql.models.ServerForCreate(
            sku=mysql.models.Sku(name=source_server_object.sku.name),
            properties=mysql.models.ServerPropertiesForReplica(source_server_id=source_server),
            location=source_server_object.location)

    return sdk_no_wait(no_wait, client.create, resource_group_name, server_name, parameters)
Ejemplo n.º 9
0
def get_storage_account_endpoint(cmd, storage_account, is_wasb):
    from ._client_factory import cf_storage
    from msrestazure.tools import parse_resource_id, is_valid_resource_id
    host = None
    if is_valid_resource_id(storage_account):
        parsed_storage_account = parse_resource_id(storage_account)
        resource_group_name = parsed_storage_account['resource_group']
        storage_account_name = parsed_storage_account['resource_name']

        storage_client = cf_storage(cmd.cli_ctx)
        storage_account = storage_client.storage_accounts.get_properties(
            resource_group_name=resource_group_name,
            account_name=storage_account_name)

        def extract_endpoint(storage_account, is_wasb):
            if not storage_account:
                return None
            return storage_account.primary_endpoints.dfs if not is_wasb else storage_account.primary_endpoints.blob

        def extract_host(uri):
            import re
            return uri and re.search('//(.*)/', uri).groups()[0]

        host = extract_host(extract_endpoint(storage_account, is_wasb))
    return host
Ejemplo n.º 10
0
def _validate_name_or_id(
        cli_ctx, resource_group_name, property_value, property_type, parent_value, parent_type):
    from azure.cli.core.commands.client_factory import get_subscription_id
    from msrestazure.tools import parse_resource_id, is_valid_resource_id
    has_parent = parent_type is not None
    if is_valid_resource_id(property_value):
        resource_id_parts = parse_resource_id(property_value)
        value_supplied_was_id = True
    elif has_parent:
        resource_id_parts = dict(
            name=parent_value,
            resource_group=resource_group_name,
            namespace=parent_type.split('/')[0],
            type=parent_type.split('/')[1],
            subscription=get_subscription_id(cli_ctx),
            child_name_1=property_value,
            child_type_1=property_type)
        value_supplied_was_id = False
    else:
        resource_id_parts = dict(
            name=property_value,
            resource_group=resource_group_name,
            namespace=property_type.split('/')[0],
            type=property_type.split('/')[1],
            subscription=get_subscription_id(cli_ctx))
        value_supplied_was_id = False
    return (resource_id_parts, value_supplied_was_id)
Ejemplo n.º 11
0
            def __call__(self, parser, namespace, values, option_string=None):
                ''' The SplitAction will take the given ID parameter and spread the parsed
                parts of the id into the individual backing fields.

                Since the id value is expected to be of type `IterateValue`, all the backing
                (dest) fields will also be of type `IterateValue`
                '''
                from msrestazure.tools import parse_resource_id
                import os
                if isinstance(values, str):
                    values = [values]
                expanded_values = []
                for val in values:
                    try:
                        # support piping values from JSON. Does not require use of --query
                        json_vals = json.loads(val)
                        if not isinstance(json_vals, list):
                            json_vals = [json_vals]
                        for json_val in json_vals:
                            if 'id' in json_val:
                                expanded_values += [json_val['id']]
                    except ValueError:
                        # supports piping of --ids to the command when using TSV. Requires use of --query
                        expanded_values = expanded_values + val.split(os.linesep)
                try:
                    for value in expanded_values:
                        parts = parse_resource_id(value)
                        for arg in [arg for arg in arguments.values() if arg.type.settings.get('id_part')]:
                            self.set_argument_value(namespace, arg, parts)
                except Exception as ex:
                    raise ValueError(ex)
Ejemplo n.º 12
0
def check_existence(cli_ctx, value, resource_group, provider_namespace, resource_type,
                    parent_name=None, parent_type=None):
    # check for name or ID and set the type flags
    from azure.cli.core.commands.client_factory import get_mgmt_service_client
    from msrestazure.azure_exceptions import CloudError
    from msrestazure.tools import parse_resource_id
    from azure.cli.core.profiles import ResourceType
    resource_client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES).resources

    id_parts = parse_resource_id(value)

    rg = id_parts.get('resource_group', resource_group)
    ns = id_parts.get('namespace', provider_namespace)

    if parent_name and parent_type:
        parent_path = '{}/{}'.format(parent_type, parent_name)
        resource_name = id_parts.get('child_name_1', value)
        resource_type = id_parts.get('child_type_1', resource_type)
    else:
        parent_path = ''
        resource_name = id_parts['name']
        resource_type = id_parts.get('type', resource_type)
    api_version = _resolve_api_version(cli_ctx, provider_namespace, resource_type, parent_path)

    try:
        resource_client.get(rg, ns, parent_path, resource_type, resource_name, api_version)
        return True
    except CloudError:
        return False
Ejemplo n.º 13
0
 def get_source_vm(self):
     vm_resource_id = format_resource_id(self.source,
                                         self.subscription_id,
                                         'Microsoft.Compute',
                                         'virtualMachines',
                                         self.resource_group)
     resource = parse_resource_id(vm_resource_id)
     return self.get_vm(resource['resource_group'], resource['name']) if resource['type'] == 'virtualMachines' else None
Ejemplo n.º 14
0
 def get_resource_type(resource_id):
     parsed = parse_resource_id(resource_id)
     # parse_resource_id returns dictionary with "child_type_#" to represent
     # types sequence. "type" stores root type.
     child_type_keys = [k for k in parsed.keys() if k.find("child_type_") != -1]
     types = [parsed.get(k) for k in sorted(child_type_keys)]
     types.insert(0, parsed.get('type'))
     return '/'.join(types)
Ejemplo n.º 15
0
def _query_account_rg(cli_ctx, account_name):
    """Query the storage account's resource group, which the mgmt sdk requires."""
    scf = get_mgmt_service_client(cli_ctx, CUSTOM_MGMT_STORAGE)
    acc = next((x for x in scf.storage_accounts.list() if x.name == account_name), None)
    if acc:
        from msrestazure.tools import parse_resource_id
        return parse_resource_id(acc.id)['resource_group'], scf
    raise ValueError("Storage account '{}' not found.".format(account_name))
Ejemplo n.º 16
0
def process_autoscale_create_namespace(cmd, namespace):
    from msrestazure.tools import parse_resource_id

    validate_tags(namespace)
    get_target_resource_validator('resource', required=True, preserve_resource_group_parameter=True)(cmd, namespace)
    if not namespace.resource_group_name:
        namespace.resource_group_name = parse_resource_id(namespace.resource).get('resource_group', None)
    get_default_location_from_resource_group(cmd, namespace)
Ejemplo n.º 17
0
def get_network_watcher_from_vm(cmd, namespace):
    from msrestazure.tools import parse_resource_id

    compute_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_COMPUTE).virtual_machines
    vm_name = parse_resource_id(namespace.vm)['name']
    vm = compute_client.get(namespace.resource_group_name, vm_name)
    namespace.location = vm.location  # pylint: disable=no-member
    get_network_watcher_from_location()(cmd, namespace)
Ejemplo n.º 18
0
def experiment_show_table_format(experiment):
    """Format the experiment as a table"""
    from msrestazure.tools import parse_resource_id
    row = OrderedDict()
    row['Name'] = experiment['name']
    row['Resource Group'] = experiment['resourceGroup']
    row['Workspace'] = parse_resource_id(experiment['id'])['name']
    row['State'] = experiment['provisioningState']
    return row
Ejemplo n.º 19
0
def get_arm_resource_by_id(cli_ctx, arm_id, api_version=None):
    from msrestazure.tools import parse_resource_id, is_valid_resource_id

    if not is_valid_resource_id(arm_id):
        raise CLIError("'{}' is not a valid ID.".format(arm_id))

    client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)

    if not api_version:

        parts = parse_resource_id(arm_id)

        # to retrieve the provider, we need to know the namespace
        namespaces = {k: v for k, v in parts.items() if 'namespace' in k}

        # every ARM ID has at least one namespace, so start with that
        namespace = namespaces.pop('namespace')
        namespaces.pop('resource_namespace')
        # find the most specific child namespace (if any) and use that value instead
        highest_child = 0
        for k, v in namespaces.items():
            child_number = int(k.split('_')[2])
            if child_number > highest_child:
                namespace = v
                highest_child = child_number

        # retrieve provider info for the namespace
        provider = client.providers.get(namespace)

        # assemble the resource type key used by the provider list operation.  type1/type2/type3/...
        resource_type_str = ''
        if not highest_child:
            resource_type_str = parts['resource_type']
        else:
            types = {int(k.split('_')[2]): v for k, v in parts.items() if k.startswith('child_type')}
            for k in sorted(types.keys()):
                if k < highest_child:
                    continue
                resource_type_str = '{}{}/'.format(resource_type_str, parts['child_type_{}'.format(k)])
            resource_type_str = resource_type_str.rstrip('/')

        api_version = None
        rt = next((t for t in provider.resource_types if t.resource_type.lower() == resource_type_str.lower()), None)
        if not rt:
            from azure.cli.core.parser import IncorrectUsageError
            raise IncorrectUsageError('Resource type {} not found.'.format(resource_type_str))
        try:
            # if the service specifies, use the default API version
            api_version = rt.default_api_version
        except AttributeError:
            # if the service doesn't specify, use the most recent non-preview API version unless there is only a
            # single API version. API versions are returned by the service in a sorted list
            api_version = next((x for x in rt.api_versions if not x.endswith('preview')), rt.api_versions[0])

    return client.resources.get_by_id(arm_id, api_version)
Ejemplo n.º 20
0
def _server_restore(cmd, client, resource_group_name, server_name, source_server, restore_point_in_time, no_wait=False):
    provider = 'Microsoft.DBforPostgreSQL'
    if isinstance(client, MySqlServersOperations):
        provider = 'Microsoft.DBforMySQL'
    elif isinstance(client, MariaDBServersOperations):
        provider = 'Microsoft.DBforMariaDB'

    parameters = None
    if not is_valid_resource_id(source_server):
        if len(source_server.split('/')) == 1:
            source_server = resource_id(
                subscription=get_subscription_id(cmd.cli_ctx),
                resource_group=resource_group_name,
                namespace=provider,
                type='servers',
                name=source_server)
        else:
            raise ValueError('The provided source-server {} is invalid.'.format(source_server))

    if provider == 'Microsoft.DBforMySQL':
        from azure.mgmt.rdbms import mysql
        parameters = mysql.models.ServerForCreate(
            properties=mysql.models.ServerPropertiesForRestore(
                source_server_id=source_server,
                restore_point_in_time=restore_point_in_time),
            location=None)
    elif provider == 'Microsoft.DBforPostgreSQL':
        from azure.mgmt.rdbms import postgresql
        parameters = postgresql.models.ServerForCreate(
            properties=postgresql.models.ServerPropertiesForRestore(
                source_server_id=source_server,
                restore_point_in_time=restore_point_in_time),
            location=None)
    elif provider == 'Microsoft.DBforMariaDB':
        from azure.mgmt.rdbms import mariadb
        parameters = mariadb.models.ServerForCreate(
            properties=mariadb.models.ServerPropertiesForRestore(
                source_server_id=source_server,
                restore_point_in_time=restore_point_in_time),
            location=None)

    parameters.properties.source_server_id = source_server
    parameters.properties.restore_point_in_time = restore_point_in_time

    # Here is a workaround that we don't support cross-region restore currently,
    # so the location must be set as the same as source server (not the resource group)
    id_parts = parse_resource_id(source_server)
    try:
        source_server_object = client.get(id_parts['resource_group'], id_parts['name'])
        parameters.location = source_server_object.location
    except Exception as e:
        raise ValueError('Unable to get source server: {}.'.format(str(e)))

    return sdk_no_wait(no_wait, client.create, resource_group_name, server_name, parameters)
Ejemplo n.º 21
0
def _verify_keyvault_good_for_encryption(cli_ctx, disk_vault_id, kek_vault_id, vm_or_vmss, force):
    def _report_client_side_validation_error(msg):
        if force:
            logger.warning("WARNING: %s %s", msg, "Encryption might fail.")
        else:
            from knack.util import CLIError
            raise CLIError("ERROR: {}".format(msg))

    resource_type = "VMSS" if vm_or_vmss.type.lower().endswith("virtualmachinescalesets") else "VM"

    from azure.cli.core.commands.client_factory import get_mgmt_service_client
    from azure.cli.core.profiles import ResourceType
    from msrestazure.tools import parse_resource_id

    client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_KEYVAULT).vaults
    disk_vault_resource_info = parse_resource_id(disk_vault_id)
    key_vault = client.get(disk_vault_resource_info['resource_group'], disk_vault_resource_info['name'])

    # ensure vault has 'EnabledForDiskEncryption' permission
    if not key_vault.properties.enabled_for_disk_encryption:
        _report_client_side_validation_error("Keyvault '{}' is not enabled for disk encryption.".format(
            disk_vault_resource_info['resource_name']))

    if kek_vault_id:
        kek_vault_info = parse_resource_id(kek_vault_id)
        if disk_vault_resource_info['name'].lower() != kek_vault_info['name'].lower():
            client.get(kek_vault_info['resource_group'], kek_vault_info['name'])

    # verify subscription mataches
    vm_vmss_resource_info = parse_resource_id(vm_or_vmss.id)
    if vm_vmss_resource_info['subscription'].lower() != disk_vault_resource_info['subscription'].lower():
        _report_client_side_validation_error("{} {}'s subscription does not match keyvault's subscription."
                                             .format(resource_type, vm_vmss_resource_info['name']))

    # verify region matches
    if key_vault.location.replace(' ', '').lower() != vm_or_vmss.location.replace(' ', '').lower():
        _report_client_side_validation_error(
            "{} {}'s region does not match keyvault's region.".format(resource_type, vm_vmss_resource_info['name']))
Ejemplo n.º 22
0
def _get_resource_group_from_account_name(client, account_name):
    """
    Fetch resource group from vault name
    :param str vault_name: name of the key vault
    :return: resource group name or None
    :rtype: str
    """
    for acct in client.list():
        id_comps = parse_resource_id(acct.id)
        if id_comps['name'] == account_name:
            return id_comps['resource_group']
    raise CLIError(
        "The Resource 'Microsoft.DataLakeStore/accounts/{}'".format(account_name) +
        " not found within subscription: {}".format(client.config.subscription_id))
Ejemplo n.º 23
0
    def _validator(cmd, namespace):
        from msrestazure.tools import parse_resource_id

        location = namespace.location
        network_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK).network_watchers
        watcher = next((x for x in network_client.list_all() if x.location.lower() == location.lower()), None)
        if not watcher:
            raise CLIError("network watcher is not enabled for region '{}'.".format(location))
        id_parts = parse_resource_id(watcher.id)
        setattr(namespace, rg_name, id_parts['resource_group'])
        setattr(namespace, watcher_name, id_parts['name'])

        if remove:
            del namespace.location
Ejemplo n.º 24
0
def _verify_keyvault_good_for_encryption(cli_ctx, disk_vault_id, kek_vault_id, vmss, force):
    def _report_client_side_validation_error(msg):
        if force:
            logger.warning(msg)
        else:
            from knack.util import CLIError
            raise CLIError(msg)

    from azure.cli.core.commands.client_factory import get_mgmt_service_client
    from azure.mgmt.keyvault import KeyVaultManagementClient
    from msrestazure.tools import parse_resource_id

    client = get_mgmt_service_client(cli_ctx, KeyVaultManagementClient).vaults
    disk_vault_resource_info = parse_resource_id(disk_vault_id)
    key_vault = client.get(disk_vault_resource_info['resource_group'], disk_vault_resource_info['name'])

    # ensure vault has 'EnabledForDiskEncryption' permission
    if not key_vault.properties.enabled_for_disk_encryption:
        _report_client_side_validation_error("keyvault '{}' is not enabled for disk encryption. ".format(
            disk_vault_resource_info['resource_name']))

    if kek_vault_id:
        kek_vault_info = parse_resource_id(kek_vault_id)
        if disk_vault_resource_info['name'].lower() != kek_vault_info['name'].lower():
            client.get(kek_vault_info['resource_group'], kek_vault_info['name'])

    # verify subscription mataches
    vmss_resource_info = parse_resource_id(vmss.id)
    if vmss_resource_info['subscription'].lower() != disk_vault_resource_info['subscription'].lower():
        _report_client_side_validation_error(
            "VM scale set's subscription doesn't match keyvault's subscription. Encryption might fail")

    # verify region matches
    if key_vault.location.replace(' ', '').lower() != vmss.location.replace(' ', '').lower():
        _report_client_side_validation_error(
            "VM scale set's region doesn't match keyvault's region. Encryption might fail")
Ejemplo n.º 25
0
def _get_resource_group_from_vault_name(cli_ctx, vault_name):
    """
    Fetch resource group from vault name
    :param str vault_name: name of the key vault
    :return: resource group name or None
    :rtype: str
    """
    from azure.mgmt.keyvault import KeyVaultManagementClient
    from msrestazure.tools import parse_resource_id

    client = get_mgmt_service_client(cli_ctx, KeyVaultManagementClient).vaults
    for vault in client.list():
        id_comps = parse_resource_id(vault.id)
        if id_comps['name'] == vault_name:
            return id_comps['resource_group']
    return None
Ejemplo n.º 26
0
def _server_georestore(cmd, client, resource_group_name, server_name, sku_name, location, source_server,
                       backup_retention=None, geo_redundant_backup=None, no_wait=False, **kwargs):
    provider = 'Microsoft.DBForMySQL' if isinstance(client, ServersOperations) else 'Microsoft.DBforPostgreSQL'
    parameters = None

    if not is_valid_resource_id(source_server):
        if len(source_server.split('/')) == 1:
            source_server = resource_id(subscription=get_subscription_id(cmd.cli_ctx),
                                        resource_group=resource_group_name,
                                        namespace=provider,
                                        type='servers',
                                        name=source_server)
        else:
            raise ValueError('The provided source-server {} is invalid.'.format(source_server))

    if provider == 'Microsoft.DBForMySQL':
        from azure.mgmt.rdbms import mysql
        parameters = mysql.models.ServerForCreate(
            sku=mysql.models.Sku(name=sku_name),
            properties=mysql.models.ServerPropertiesForGeoRestore(
                storage_profile=mysql.models.StorageProfile(
                    backup_retention_days=backup_retention,
                    geo_redundant_backup=geo_redundant_backup),
                source_server_id=source_server),
            location=location)
    elif provider == 'Microsoft.DBforPostgreSQL':
        from azure.mgmt.rdbms import postgresql
        parameters = postgresql.models.ServerForCreate(
            sku=postgresql.models.Sku(name=sku_name),
            properties=postgresql.models.ServerPropertiesForGeoRestore(
                storage_profile=postgresql.models.StorageProfile(
                    backup_retention_days=backup_retention,
                    geo_redundant_backup=geo_redundant_backup),
                source_server_id=source_server),
            location=location)

    parameters.properties.source_server_id = source_server

    source_server_id_parts = parse_resource_id(source_server)
    try:
        source_server_object = client.get(source_server_id_parts['resource_group'], source_server_id_parts['name'])
        if parameters.sku.name is None:
            parameters.sku.name = source_server_object.sku.name
    except Exception as e:
        raise ValueError('Unable to get source server: {}.'.format(str(e)))

    return sdk_no_wait(no_wait, client.create, resource_group_name, server_name, parameters)
Ejemplo n.º 27
0
def get_key_for_storage_account(cmd, storage_account):  # pylint: disable=unused-argument
    from ._client_factory import cf_storage
    from msrestazure.tools import parse_resource_id, is_valid_resource_id
    from knack.util import CLIError

    storage_account_key = None
    if is_valid_resource_id(storage_account):
        parsed_storage_account = parse_resource_id(storage_account)
        resource_group_name = parsed_storage_account['resource_group']
        storage_account_name = parsed_storage_account['resource_name']

        storage_client = cf_storage(cmd.cli_ctx)
        keys = storage_client.storage_accounts.list_keys(resource_group_name, storage_account_name)
        storage_account_key = keys.keys[0].value  # pylint: disable=no-member
    elif storage_account:
        raise CLIError('Failed to get access key for storage account: {}'.format(storage_account))
    return storage_account_key
Ejemplo n.º 28
0
def _get_diagnostics_from_workspace(cli_ctx, log_analytics_workspace):
    log_analytics_client = cf_log_analytics(cli_ctx)

    for workspace in log_analytics_client.list():
        if log_analytics_workspace == workspace.name:
            keys = log_analytics_client.get_shared_keys(
                parse_resource_id(workspace.id)['resource_group'], workspace.name)

            log_analytics = LogAnalytics(
                workspace_id=workspace.customer_id, workspace_key=keys.primary_shared_key)

            diagnostics = ContainerGroupDiagnostics(
                log_analytics=log_analytics)

            return (diagnostics, {'oms-resource-link': workspace.id})

    return None, {}
Ejemplo n.º 29
0
    def exec_module(self, **kwargs):
        """Main module execution method"""
        for key in list(self.module_arg_spec.keys()) + ['tags']:
            setattr(self, key, kwargs[key])

        result = None
        changed = False

        resource_group = self.get_resource_group(self.resource_group)
        if not self.location:
            self.location = resource_group.location

        disk_instance = self.get_managed_disk()
        result = disk_instance

        # need create or update
        if self.state == 'present':
            parameter = self.generate_managed_disk_property()
            if not disk_instance or self.is_different(disk_instance, parameter):
                changed = True
                if not self.check_mode:
                    result = self.create_or_update_managed_disk(parameter)
                else:
                    result = True

        # unmount from the old virtual machine and mount to the new virtual machine
        vm_name = parse_resource_id(disk_instance.get('managed_by', '')).get('name') if disk_instance else None
        if self.managed_by != vm_name:
            changed = True
            if not self.check_mode:
                if vm_name:
                    self.detach(vm_name, result)
                if self.managed_by:
                    self.attach(self.managed_by, result)
                result = self.get_managed_disk()

        if self.state == 'absent' and disk_instance:
            changed = True
            if not self.check_mode:
                self.delete_managed_disk()
            result = True

        self.results['changed'] = changed
        self.results['state'] = result
        return self.results
Ejemplo n.º 30
0
def process_nw_flow_log_show_namespace(cmd, namespace):
    from msrestazure.tools import is_valid_resource_id, resource_id, parse_resource_id

    if not is_valid_resource_id(namespace.nsg):
        namespace.nsg = resource_id(
            subscription=get_subscription_id(cmd.cli_ctx),
            resource_group=namespace.resource_group_name,
            namespace='Microsoft.Network',
            type='networkSecurityGroups',
            name=namespace.nsg)

    network_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK).network_security_groups
    id_parts = parse_resource_id(namespace.nsg)
    nsg_name = id_parts['name']
    rg = id_parts['resource_group']
    nsg = network_client.get(rg, nsg_name)
    namespace.location = nsg.location  # pylint: disable=no-member
    get_network_watcher_from_location(remove=True)(cmd, namespace)
Ejemplo n.º 31
0
def encrypt_vmss(
        cmd,
        resource_group_name,
        vmss_name,  # pylint: disable=too-many-locals, too-many-statements
        disk_encryption_keyvault,
        key_encryption_keyvault=None,
        key_encryption_key=None,
        key_encryption_algorithm='RSA-OAEP',
        volume_type=None,
        force=False):
    from msrestazure.tools import parse_resource_id

    # pylint: disable=no-member
    UpgradeMode, VirtualMachineScaleSetExtension, VirtualMachineScaleSetExtensionProfile = cmd.get_models(
        'UpgradeMode', 'VirtualMachineScaleSetExtension',
        'VirtualMachineScaleSetExtensionProfile')

    compute_client = _compute_client_factory(cmd.cli_ctx)
    vmss = compute_client.virtual_machine_scale_sets.get(
        resource_group_name, vmss_name)
    is_linux = _is_linux_os(vmss.virtual_machine_profile)
    extension = vm_extension_info['Linux' if is_linux else 'Windows']

    # 1. First validate arguments
    volume_type = _handles_default_volume_type_for_vmss_encryption(
        is_linux, volume_type, force)

    # retrieve keyvault details
    disk_encryption_keyvault_url = get_key_vault_base_url(
        cmd.cli_ctx, (parse_resource_id(disk_encryption_keyvault))['name'])

    # disk encryption key itself can be further protected, so let us verify
    if key_encryption_key:
        key_encryption_keyvault = key_encryption_keyvault or disk_encryption_keyvault

    #  to avoid bad server errors, ensure the vault has the right configurations
    _verify_keyvault_good_for_encryption(cmd.cli_ctx, disk_encryption_keyvault,
                                         key_encryption_keyvault, vmss, force)

    # if key name and not key url, get url.
    if key_encryption_key and '://' not in key_encryption_key:
        key_encryption_key = _get_keyvault_key_url(
            cmd.cli_ctx, (parse_resource_id(key_encryption_keyvault))['name'],
            key_encryption_key)

    # 2. we are ready to provision/update the disk encryption extensions
    public_config = {
        'KeyVaultURL':
        disk_encryption_keyvault_url,
        'KeyEncryptionKeyURL':
        key_encryption_key or '',
        "KeyVaultResourceId":
        disk_encryption_keyvault,
        "KekVaultResourceId":
        key_encryption_keyvault if key_encryption_key else '',
        'KeyEncryptionAlgorithm':
        key_encryption_algorithm if key_encryption_key else '',
        'VolumeType':
        volume_type,
        'EncryptionOperation':
        'EnableEncryption'
    }

    ext = VirtualMachineScaleSetExtension(
        name=extension['name'],
        publisher=extension['publisher'],
        type=extension['name'],
        type_handler_version=extension['version'],
        settings=public_config,
        auto_upgrade_minor_version=True,
        force_update_tag=uuid.uuid4())
    exts = [ext]

    # remove any old ade extensions set by this command and add the new one.
    vmss_ext_profile = vmss.virtual_machine_profile.extension_profile
    if vmss_ext_profile and vmss_ext_profile.extensions:
        exts.extend(old_ext for old_ext in
                    vmss.virtual_machine_profile.extension_profile.extensions
                    if old_ext.type != ext.type or old_ext.name != ext.name)
    vmss.virtual_machine_profile.extension_profile = VirtualMachineScaleSetExtensionProfile(
        extensions=exts)

    poller = compute_client.virtual_machine_scale_sets.create_or_update(
        resource_group_name, vmss_name, vmss)
    LongRunningOperation(cmd.cli_ctx)(poller)
    _show_post_action_message(resource_group_name, vmss.name,
                              vmss.upgrade_policy.mode == UpgradeMode.manual,
                              True)
Ejemplo n.º 32
0
            lock_client = get_client_from_cli_profile(ManagementLockClient,
                                                               subscription_id=subscription.subscription_id)

            rg_list = None
            while not rg_list:
                # If Azure API gives error (like API limit), wait 10 seconds and try again.
                try:
                    rg_list = resource_client.resource_groups.list()
                except CloudError as e:
                    print('EXCEPTION {}'.format(e))
                    sleep(10)

            sub_lock_list = None
            while not sub_lock_list:
                # If Azure API gives error (like API limit), wait 10 seconds and try again.
                try:
                    sub_lock_list = lock_client.management_locks.list_at_subscription_level()
                except CloudError as e:
                    print('EXCEPTION {}'.format(e))
                    sleep(10)
            lock_dictionary = dict()
            for lock in sub_lock_list:
                lock_id_parse = parse_resource_id(lock.id)
                lock_dictionary[lock_id_parse.get('name').lower()] = lock.as_dict()

            for rg in rg_list:
                # Dictionary allows for .get() methods wich return NULL if not found.
                rg_dict = rg.as_dict()
                name = rg.name
                csvwriter.writerow([rg.name, subscription.display_name, lock_dictionary.get(rg.name.lower() + '-lock', {}).get('name')])
Ejemplo n.º 33
0
def encrypt_vm(cmd, resource_group_name, vm_name,  # pylint: disable=too-many-locals, too-many-statements
               aad_client_id,
               disk_encryption_keyvault,
               aad_client_secret=None, aad_client_cert_thumbprint=None,
               key_encryption_keyvault=None,
               key_encryption_key=None,
               key_encryption_algorithm='RSA-OAEP',
               volume_type=None,
               encrypt_format_all=False):
    # pylint: disable=no-member
    compute_client = _compute_client_factory(cmd.cli_ctx)
    vm = compute_client.virtual_machines.get(resource_group_name, vm_name)
    os_type = vm.storage_profile.os_disk.os_type.value
    is_linux = _is_linux_vm(os_type)
    extension = vm_extension_info[os_type]
    backup_encryption_settings = vm.storage_profile.os_disk.encryption_settings
    vm_encrypted = backup_encryption_settings.enabled if backup_encryption_settings else False

    # 1. First validate arguments

    if not aad_client_cert_thumbprint and not aad_client_secret:
        raise CLIError('Please provide either --aad-client-cert-thumbprint or --aad-client-secret')

    if volume_type is None:
        if vm.storage_profile.data_disks:
            raise CLIError('VM has data disks, please supply --volume-type')
        else:
            volume_type = 'OS'

    # encryption is not supported on all linux distros, but service never tells you
    # so let us verify at the client side
    if is_linux:
        image_reference = getattr(vm.storage_profile, 'image_reference', None)
        if image_reference:
            result, message = _check_encrypt_is_supported(image_reference, volume_type)
            if not result:
                logger.warning(message)

    # sequence_version should be unique
    sequence_version = uuid.uuid4()

    # retrieve keyvault details
    disk_encryption_keyvault_url = get_key_vault_base_url(
        cmd.cli_ctx, (parse_resource_id(disk_encryption_keyvault))['name'])

    # disk encryption key itself can be further protected, so let us verify
    if key_encryption_key:
        key_encryption_keyvault = key_encryption_keyvault or disk_encryption_keyvault
        if '://' not in key_encryption_key:  # appears a key name
            key_encryption_key = _get_keyvault_key_url(
                cmd.cli_ctx, (parse_resource_id(key_encryption_keyvault))['name'], key_encryption_key)

    # 2. we are ready to provision/update the disk encryption extensions
    # The following logic was mostly ported from xplat-cli
    public_config = {
        'AADClientID': aad_client_id,
        'AADClientCertThumbprint': aad_client_cert_thumbprint,
        'KeyVaultURL': disk_encryption_keyvault_url,
        'VolumeType': volume_type,
        'EncryptionOperation': 'EnableEncryption' if not encrypt_format_all else 'EnableEncryptionFormatAll',
        'KeyEncryptionKeyURL': key_encryption_key,
        'KeyEncryptionAlgorithm': key_encryption_algorithm,
        'SequenceVersion': sequence_version,
    }
    private_config = {
        'AADClientSecret': aad_client_secret if is_linux else (aad_client_secret or '')
    }

    VirtualMachineExtension, DiskEncryptionSettings, KeyVaultSecretReference, KeyVaultKeyReference, SubResource = \
        cmd.get_models('VirtualMachineExtension', 'DiskEncryptionSettings', 'KeyVaultSecretReference',
                       'KeyVaultKeyReference', 'SubResource')

    ext = VirtualMachineExtension(vm.location,  # pylint: disable=no-member
                                  publisher=extension['publisher'],
                                  virtual_machine_extension_type=extension['name'],
                                  protected_settings=private_config,
                                  type_handler_version=extension['version'],
                                  settings=public_config,
                                  auto_upgrade_minor_version=True)

    poller = compute_client.virtual_machine_extensions.create_or_update(
        resource_group_name, vm_name, extension['name'], ext)
    poller.result()

    # verify the extension was ok
    extension_result = compute_client.virtual_machine_extensions.get(
        resource_group_name, vm_name, extension['name'], 'instanceView')
    if extension_result.provisioning_state != 'Succeeded':
        raise CLIError('Extension needed for disk encryption was not provisioned correctly')
    if not (extension_result.instance_view.statuses and
            extension_result.instance_view.statuses[0].message):
        raise CLIError('Could not found url pointing to the secret for disk encryption')

    # 3. update VM's storage profile with the secrets
    status_url = extension_result.instance_view.statuses[0].message

    vm = compute_client.virtual_machines.get(resource_group_name, vm_name)
    secret_ref = KeyVaultSecretReference(secret_url=status_url,
                                         source_vault=SubResource(disk_encryption_keyvault))

    key_encryption_key_obj = None
    if key_encryption_key:
        key_encryption_key_obj = KeyVaultKeyReference(key_encryption_key,
                                                      SubResource(key_encryption_keyvault))

    disk_encryption_settings = DiskEncryptionSettings(disk_encryption_key=secret_ref,
                                                      key_encryption_key=key_encryption_key_obj,
                                                      enabled=True)
    if vm_encrypted:
        # stop the vm before update if the vm is already encrypted
        logger.warning("Deallocating the VM before updating encryption settings...")
        compute_client.virtual_machines.deallocate(resource_group_name, vm_name).result()
        vm = compute_client.virtual_machines.get(resource_group_name, vm_name)

    vm.storage_profile.os_disk.encryption_settings = disk_encryption_settings
    set_vm(cmd, vm)

    if vm_encrypted:
        # and start after the update
        logger.warning("Restarting the VM after the update...")
        compute_client.virtual_machines.start(resource_group_name, vm_name).result()

    if is_linux and volume_type != _DATA_VOLUME_TYPE:
        # TODO: expose a 'wait' command to do the monitor and handle the reboot
        logger.warning("The encryption request was accepted. Please use 'show' command to monitor "
                       "the progress. If you see 'VMRestartPending', please restart the VM, and "
                       "the encryption will finish shortly")
def _ensure_container_insights_for_monitoring(cmd, workspace_resource_id):
    # extract subscription ID and resource group from workspace_resource_id URL
    parsed = parse_resource_id(workspace_resource_id)
    subscription_id, resource_group = parsed["subscription"], parsed[
        "resource_group"]

    resources = cf_resources(cmd.cli_ctx, subscription_id)
    try:
        resource = resources.get_by_id(workspace_resource_id,
                                       '2015-11-01-preview')
        location = resource.location
    except CloudError as ex:
        raise ex

    unix_time_in_millis = int(
        (datetime.datetime.utcnow() -
         datetime.datetime.utcfromtimestamp(0)).total_seconds() * 1000.0)

    solution_deployment_name = 'ContainerInsights-{}'.format(
        unix_time_in_millis)

    # pylint: disable=line-too-long
    template = {
        "$schema":
        "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
        "contentVersion":
        "1.0.0.0",
        "parameters": {
            "workspaceResourceId": {
                "type": "string",
                "metadata": {
                    "description": "Azure Monitor Log Analytics Resource ID"
                }
            },
            "workspaceRegion": {
                "type": "string",
                "metadata": {
                    "description":
                    "Azure Monitor Log Analytics workspace region"
                }
            },
            "solutionDeploymentName": {
                "type": "string",
                "metadata": {
                    "description": "Name of the solution deployment"
                }
            }
        },
        "resources": [{
            "type": "Microsoft.Resources/deployments",
            "name": "[parameters('solutionDeploymentName')]",
            "apiVersion": "2017-05-10",
            "subscriptionId":
            "[split(parameters('workspaceResourceId'),'/')[2]]",
            "resourceGroup":
            "[split(parameters('workspaceResourceId'),'/')[4]]",
            "properties": {
                "mode": "Incremental",
                "template": {
                    "$schema":
                    "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
                    "contentVersion":
                    "1.0.0.0",
                    "parameters": {},
                    "variables": {},
                    "resources": [{
                        "apiVersion":
                        "2015-11-01-preview",
                        "type":
                        "Microsoft.OperationsManagement/solutions",
                        "location":
                        "[parameters('workspaceRegion')]",
                        "name":
                        "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')"
                        "[8], ')')]",
                        "properties": {
                            "workspaceResourceId":
                            "[parameters('workspaceResourceId')]"
                        },
                        "plan": {
                            "name":
                            "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),"
                            "'/')[8], ')')]",
                            "product":
                            "[Concat('OMSGallery/', 'ContainerInsights')]",
                            "promotionCode":
                            "",
                            "publisher":
                            "Microsoft"
                        }
                    }]
                },
                "parameters": {}
            }
        }]
    }

    params = {
        "workspaceResourceId": {
            "value": workspace_resource_id
        },
        "workspaceRegion": {
            "value": location
        },
        "solutionDeploymentName": {
            "value": solution_deployment_name
        }
    }

    deployment_name = 'arc-k8s-monitoring-{}'.format(unix_time_in_millis)
    # publish the Container Insights solution to the Log Analytics workspace
    return _invoke_deployment(cmd,
                              resource_group,
                              deployment_name,
                              template,
                              params,
                              validate=False,
                              no_wait=False,
                              subscription_id=subscription_id)
Ejemplo n.º 35
0
def get_cache_from_resource_id(client, cache_resource_id):
    from msrestazure.tools import parse_resource_id
    id_comps = parse_resource_id(cache_resource_id)
    return client.get(id_comps['resource_group'], id_comps['name'])
Ejemplo n.º 36
0
 def get_namespace(resource_id):
     parsed = parse_resource_id(resource_id)
     return parsed.get('namespace')
Ejemplo n.º 37
0
 def get_storage_container(self, resource_id, client):
     parsed = parse_resource_id(resource_id)
     return client.blob_containers.get(
         parsed.get('resource_group'),
         parsed.get('name'),  # Account name
         parsed.get('resource_name'))  # Container name
Ejemplo n.º 38
0
 def get_resource_group(resource_id):
     result = parse_resource_id(resource_id).get("resource_group")
     # parse_resource_id fails to parse resource id for resource groups
     if result is None:
         return resource_id.split('/')[4]
     return result
Ejemplo n.º 39
0
 def get_namespace(resource_id):
     return parse_resource_id(resource_id).get('namespace')
Ejemplo n.º 40
0
    def test_aks_byo_appgw_with_ingress_appgw_addon(self, resource_group,
                                                    resource_group_location):
        aks_name = self.create_random_name('cliakstest', 16)
        vnet_name = self.create_random_name('cliakstest', 16)
        self.kwargs.update({
            'resource_group': resource_group,
            'aks_name': aks_name,
            'vnet_name': vnet_name
        })

        # create virtual network
        create_vnet = 'network vnet create --resource-group={resource_group} --name={vnet_name} ' \
                      '--address-prefix 11.0.0.0/16 --subnet-name aks-subnet --subnet-prefix 11.0.0.0/24  -o json'
        vnet = self.cmd(
            create_vnet,
            checks=[self.check('newVNet.provisioningState',
                               'Succeeded')]).get_output_in_json()

        create_subnet = 'network vnet subnet create -n appgw-subnet --resource-group={resource_group} --vnet-name {vnet_name} ' \
                        '--address-prefixes 11.0.1.0/24  -o json'
        self.cmd(create_subnet,
                 checks=[self.check('provisioningState', 'Succeeded')])

        vnet_id = vnet['newVNet']["id"]
        assert vnet_id is not None
        self.kwargs.update({
            'vnet_id': vnet_id,
        })

        # create public ip for app gateway
        create_pip = 'network public-ip create -n appgw-ip -g {resource_group} ' \
                     '--allocation-method Static --sku Standard  -o json'
        self.cmd(
            create_pip,
            checks=[self.check('publicIp.provisioningState', 'Succeeded')])

        # create app gateway
        create_appgw = 'network application-gateway create -n appgw -g {resource_group} ' \
                       '--sku Standard_v2 --public-ip-address appgw-ip --subnet {vnet_id}/subnets/appgw-subnet'
        self.cmd(create_appgw)

        # construct group id
        from msrestazure.tools import parse_resource_id, resource_id
        parsed_vnet_id = parse_resource_id(vnet_id)
        group_id = resource_id(subscription=parsed_vnet_id["subscription"],
                               resource_group=parsed_vnet_id["resource_group"])
        appgw_id = group_id + "/providers/Microsoft.Network/applicationGateways/appgw"

        self.kwargs.update({'appgw_id': appgw_id, 'appgw_group_id': group_id})

        # create aks cluster
        create_cmd = 'aks create -n {aks_name} -g {resource_group} --enable-managed-identity --service-principal xxxx --client-secret yyyy --generate-ssh-keys ' \
                     '--vnet-subnet-id {vnet_id}/subnets/aks-subnet ' \
                     '-a ingress-appgw --appgw-id {appgw_id} -o json'
        aks_cluster = self.cmd(
            create_cmd,
            checks=[
                self.check('provisioningState', 'Succeeded'),
                self.check('addonProfiles.ingressapplicationgateway.enabled',
                           True),
                self.check(
                    'addonProfiles.ingressapplicationgateway.config.applicationgatewayid',
                    appgw_id)
            ]).get_output_in_json()

        addon_client_id = aks_cluster["addonProfiles"][
            "ingressapplicationgateway"]["identity"]["clientId"]

        self.kwargs.update({
            'addon_client_id': addon_client_id,
        })
Ejemplo n.º 41
0
 def get_resource_name(resource_id):
     return parse_resource_id(resource_id).get('resource_name')
Ejemplo n.º 42
0
def validate_vnet(cmd, namespace):
    if not namespace.vnet and not namespace.app_subnet and \
       not namespace.service_runtime_subnet and not namespace.reserved_cidr_range:
        return
    validate_vnet_required_parameters(namespace)

    vnet_id = ''
    if namespace.vnet:
        vnet_id = namespace.vnet
        # format the app_subnet and service_runtime_subnet
        if not is_valid_resource_id(vnet_id):
            if vnet_id.count('/') > 0:
                raise CLIError(
                    '--vnet {0} is not a valid name or resource ID'.format(
                        vnet_id))
            vnet_id = resource_id(subscription=get_subscription_id(
                cmd.cli_ctx),
                                  resource_group=namespace.resource_group,
                                  namespace='Microsoft.Network',
                                  type='virtualNetworks',
                                  name=vnet_id)
        else:
            vnet = parse_resource_id(vnet_id)
            if vnet['namespace'].lower() != 'microsoft.network' or vnet[
                    'type'].lower() != 'virtualnetworks':
                raise CLIError(
                    '--vnet {0} is not a valid VirtualNetwork resource ID'.
                    format(vnet_id))
        namespace.app_subnet = _construct_subnet_id(vnet_id,
                                                    namespace.app_subnet)
        namespace.service_runtime_subnet = _construct_subnet_id(
            vnet_id, namespace.service_runtime_subnet)
    else:
        app_vnet_id = _parse_vnet_id_from_subnet(namespace.app_subnet)
        service_runtime_vnet_id = _parse_vnet_id_from_subnet(
            namespace.service_runtime_subnet)
        if app_vnet_id.lower() != service_runtime_vnet_id.lower():
            raise CLIError(
                '--app-subnet and --service-runtime-subnet should be in the same Virtual Networks.'
            )
        vnet_id = app_vnet_id
    if namespace.app_subnet.lower() == namespace.service_runtime_subnet.lower(
    ):
        raise CLIError(
            '--app-subnet and --service-runtime-subnet should not be the same.'
        )

    vnet_obj = _get_vnet(cmd, vnet_id)
    instance_location = namespace.location
    if instance_location is None:
        instance_location = _get_rg_location(cmd.cli_ctx,
                                             namespace.resource_group)
    else:
        instance_location_slice = instance_location.split(" ")
        instance_location = "".join(
            [piece.lower() for piece in instance_location_slice])
    if vnet_obj.location.lower() != instance_location.lower():
        raise CLIError(
            '--vnet and Azure Spring Cloud instance should be in the same location.'
        )
    for subnet in vnet_obj.subnets:
        _validate_subnet(namespace, subnet)

    if namespace.reserved_cidr_range:
        _validate_cidr_range(namespace)
    else:
        namespace.reserved_cidr_range = _set_default_cidr_range(vnet_obj.address_space.address_prefixes) if \
            vnet_obj and vnet_obj.address_space and vnet_obj.address_space.address_prefixes \
            else '10.234.0.0/16,10.244.0.0/16,172.17.0.1/16'
Ejemplo n.º 43
0
    def exec_module(self, **kwargs):

        for key in list(self.module_arg_spec.keys()) + ['tags']:
            setattr(self, key, kwargs[key])

        results = None
        changed = False
        nic = None
        nsg = None

        resource_group = self.get_resource_group(self.resource_group)
        if not self.location:
            # Set default location
            self.location = resource_group.location

        # parse the virtual network resource group and name
        virtual_network_dict = parse_resource_id(self.virtual_network_name)
        virtual_network_name = virtual_network_dict.get('name')
        virtual_network_resource_group = virtual_network_dict.get('resource_group', self.resource_group)

        if self.state == 'present' and not self.ip_configurations:
            # construct the ip_configurations array for compatiable
            self.deprecate('Setting ip_configuration flatten is deprecated and will be removed.'
                           ' Using ip_configurations list to define the ip configuration', version='2.9')
            self.ip_configurations = [
                dict(
                    private_ip_address=self.private_ip_address,
                    private_ip_allocation_method=self.private_ip_allocation_method,
                    public_ip_address_name=self.public_ip_address_name if self.public_ip else None,
                    public_ip_allocation_method=self.public_ip_allocation_method,
                    name='default'
                )
            ]

        try:
            self.log('Fetching network interface {0}'.format(self.name))
            nic = self.network_client.network_interfaces.get(self.resource_group, self.name)

            self.log('Network interface {0} exists'.format(self.name))
            self.check_provisioning_state(nic, self.state)
            results = nic_to_dict(nic)
            self.log(results, pretty_print=True)

            nsg = None
            if self.state == 'present':
                # check for update
                update_tags, results['tags'] = self.update_tags(results['tags'])
                if update_tags:
                    changed = True

                if self.security_group_name:
                    nsg = self.get_security_group(self.security_group_name)
                    if nsg and results['network_security_group'].get('id') != nsg.id:
                        self.log("CHANGED: network interface {0} network security group".format(self.name))
                        changed = True

                if results['ip_configurations'][0]['subnet']['virtual_network_name'] != virtual_network_name:
                    self.log("CHANGED: network interface {0} virtual network name".format(self.name))
                    changed = True

                if results['ip_configurations'][0]['subnet']['resource_group'] != virtual_network_resource_group:
                    self.log("CHANGED: network interface {0} virtual network resource group".format(self.name))
                    changed = True

                if results['ip_configurations'][0]['subnet']['name'] != self.subnet_name:
                    self.log("CHANGED: network interface {0} subnet name".format(self.name))
                    changed = True

                # check the ip_configuration is changed
                # construct two set with the same structure and then compare
                # the list should contains:
                # name, private_ip_address, public_ip_address_name, private_ip_allocation_method, subnet_name
                ip_configuration_result = construct_ip_configuration_set(results['ip_configurations'])
                ip_configuration_request = construct_ip_configuration_set(self.ip_configurations)
                if ip_configuration_result != ip_configuration_request:
                    self.log("CHANGED: network interface {0} ip configurations".format(self.name))
                    changed = True

            elif self.state == 'absent':
                self.log("CHANGED: network interface {0} exists but requested state is 'absent'".format(self.name))
                changed = True
        except CloudError:
            self.log('Network interface {0} does not exist'.format(self.name))
            if self.state == 'present':
                self.log("CHANGED: network interface {0} does not exist but requested state is 'present'".format(self.name))
                changed = True

        self.results['changed'] = changed
        self.results['state'] = results

        if self.check_mode:
            return self.results

        if changed:
            if self.state == 'present':
                subnet = self.get_subnet(virtual_network_resource_group, virtual_network_name, self.subnet_name)
                if not subnet:
                    self.fail('subnet {0} is not exist'.format(self.subnet_name))
                nic_ip_configurations = [
                    self.network_models.NetworkInterfaceIPConfiguration(
                        private_ip_allocation_method=ip_config.get('private_ip_allocation_method'),
                        private_ip_address=ip_config.get('private_ip_address'),
                        name=ip_config.get('name'),
                        subnet=subnet,
                        public_ip_address=self.get_or_create_public_ip_address(ip_config),
                        primary=ip_config.get('primary')
                    ) for ip_config in self.ip_configurations
                ]

                nsg = nsg or self.create_default_securitygroup(self.resource_group, self.location, self.name, self.os_type, self.open_ports)
                self.log('Creating or updating network interface {0}'.format(self.name))
                nic = self.network_models.NetworkInterface(
                    id=results['id'] if results else None,
                    location=self.location,
                    tags=self.tags,
                    ip_configurations=nic_ip_configurations,
                    network_security_group=nsg
                )
                self.results['state'] = self.create_or_update_nic(nic)
            elif self.state == 'absent':
                self.log('Deleting network interface {0}'.format(self.name))
                self.delete_nic()
                # Delete doesn't return anything. If we get this far, assume success
                self.results['state']['status'] = 'Deleted'

        return self.results
Ejemplo n.º 44
0
def encrypt_vm(
        cmd,
        resource_group_name,
        vm_name,  # pylint: disable=too-many-locals, too-many-statements
        disk_encryption_keyvault,
        aad_client_id=None,
        aad_client_secret=None,
        aad_client_cert_thumbprint=None,
        key_encryption_keyvault=None,
        key_encryption_key=None,
        key_encryption_algorithm='RSA-OAEP',
        volume_type=None,
        encrypt_format_all=False,
        force=False):
    from msrestazure.tools import parse_resource_id
    from knack.util import CLIError

    # pylint: disable=no-member
    compute_client = _compute_client_factory(cmd.cli_ctx)
    vm = compute_client.virtual_machines.get(resource_group_name, vm_name)
    is_linux = _is_linux_os(vm)
    backup_encryption_settings = vm.storage_profile.os_disk.encryption_settings
    vm_encrypted = backup_encryption_settings.enabled if backup_encryption_settings else False
    _, has_old_ade = _detect_ade_status(vm)
    use_new_ade = not aad_client_id and not has_old_ade
    extension = vm_extension_info['Linux' if is_linux else 'Windows']

    if not use_new_ade and not aad_client_id:
        raise CLIError('Please provide --aad-client-id')

    # 1. First validate arguments
    if not use_new_ade and not aad_client_cert_thumbprint and not aad_client_secret:
        raise CLIError(
            'Please provide either --aad-client-cert-thumbprint or --aad-client-secret'
        )

    if volume_type is None:
        if not is_linux:
            volume_type = _ALL_VOLUME_TYPE
        elif vm.storage_profile.data_disks:
            raise CLIError('VM has data disks, please supply --volume-type')
        else:
            volume_type = 'OS'

    # sequence_version should be unique
    sequence_version = uuid.uuid4()

    # retrieve keyvault details
    disk_encryption_keyvault_url = get_key_vault_base_url(
        cmd.cli_ctx, (parse_resource_id(disk_encryption_keyvault))['name'])

    # disk encryption key itself can be further protected, so let us verify
    if key_encryption_key:
        key_encryption_keyvault = key_encryption_keyvault or disk_encryption_keyvault

    #  to avoid bad server errors, ensure the vault has the right configurations
    _verify_keyvault_good_for_encryption(cmd.cli_ctx, disk_encryption_keyvault,
                                         key_encryption_keyvault, vm, force)

    # if key name and not key url, get url.
    if key_encryption_key and '://' not in key_encryption_key:  # if key name and not key url
        key_encryption_key = _get_keyvault_key_url(
            cmd.cli_ctx, (parse_resource_id(key_encryption_keyvault))['name'],
            key_encryption_key)

    # 2. we are ready to provision/update the disk encryption extensions
    # The following logic was mostly ported from xplat-cli
    public_config = {
        'KeyVaultURL': disk_encryption_keyvault_url,
        'VolumeType': volume_type,
        'EncryptionOperation': 'EnableEncryption'
        if not encrypt_format_all else 'EnableEncryptionFormatAll',
        'KeyEncryptionKeyURL': key_encryption_key,
        'KeyEncryptionAlgorithm': key_encryption_algorithm,
        'SequenceVersion': sequence_version,
    }
    if use_new_ade:
        public_config.update({
            "KeyVaultResourceId":
            disk_encryption_keyvault,
            "KekVaultResourceId":
            key_encryption_keyvault if key_encryption_key else '',
        })
    else:
        public_config.update({
            'AADClientID':
            aad_client_id,
            'AADClientCertThumbprint':
            aad_client_cert_thumbprint,
        })

    ade_legacy_private_config = {
        'AADClientSecret':
        aad_client_secret if is_linux else (aad_client_secret or '')
    }

    VirtualMachineExtension, DiskEncryptionSettings, KeyVaultSecretReference, KeyVaultKeyReference, SubResource = \
        cmd.get_models('VirtualMachineExtension', 'DiskEncryptionSettings', 'KeyVaultSecretReference',
                       'KeyVaultKeyReference', 'SubResource')

    ext = VirtualMachineExtension(
        location=vm.location,  # pylint: disable=no-member
        publisher=extension['publisher'],
        virtual_machine_extension_type=extension['name'],
        protected_settings=None if use_new_ade else ade_legacy_private_config,
        type_handler_version=extension['version']
        if use_new_ade else extension['legacy_version'],
        settings=public_config,
        auto_upgrade_minor_version=True)

    poller = compute_client.virtual_machine_extensions.create_or_update(
        resource_group_name, vm_name, extension['name'], ext)
    LongRunningOperation(cmd.cli_ctx)(poller)
    poller.result()

    # verify the extension was ok
    extension_result = compute_client.virtual_machine_extensions.get(
        resource_group_name, vm_name, extension['name'], 'instanceView')
    if extension_result.provisioning_state != 'Succeeded':
        raise CLIError(
            'Extension needed for disk encryption was not provisioned correctly'
        )

    if not use_new_ade:
        if not (extension_result.instance_view.statuses
                and extension_result.instance_view.statuses[0].message):
            raise CLIError(
                'Could not find url pointing to the secret for disk encryption'
            )

        # 3. update VM's storage profile with the secrets
        status_url = extension_result.instance_view.statuses[0].message

        vm = compute_client.virtual_machines.get(resource_group_name, vm_name)
        secret_ref = KeyVaultSecretReference(
            secret_url=status_url,
            source_vault=SubResource(id=disk_encryption_keyvault))

        key_encryption_key_obj = None
        if key_encryption_key:
            key_encryption_key_obj = KeyVaultKeyReference(
                key_url=key_encryption_key,
                source_vault=SubResource(id=key_encryption_keyvault))

        disk_encryption_settings = DiskEncryptionSettings(
            disk_encryption_key=secret_ref,
            key_encryption_key=key_encryption_key_obj,
            enabled=True)
        if vm_encrypted:
            # stop the vm before update if the vm is already encrypted
            logger.warning(
                "Deallocating the VM before updating encryption settings...")
            compute_client.virtual_machines.deallocate(resource_group_name,
                                                       vm_name).result()
            vm = compute_client.virtual_machines.get(resource_group_name,
                                                     vm_name)

        vm.storage_profile.os_disk.encryption_settings = disk_encryption_settings
        set_vm(cmd, vm)

        if vm_encrypted:
            # and start after the update
            logger.warning("Restarting the VM after the update...")
            compute_client.virtual_machines.start(resource_group_name,
                                                  vm_name).result()

    if is_linux and volume_type != _DATA_VOLUME_TYPE:
        old_ade_msg = "If you see 'VMRestartPending', please restart the VM, and the encryption will finish shortly"
        logger.warning(
            "The encryption request was accepted. Please use 'show' command to monitor "
            "the progress. %s", "" if use_new_ade else old_ade_msg)
Ejemplo n.º 45
0
def get_base_resource_group() -> Any:  # should be str
    return parse_resource_id(os.environ["ONEFUZZ_RESOURCE_GROUP"])["resource_group"]
Ejemplo n.º 46
0
    def hostvars(self):
        if self._hostvars != {}:
            return self._hostvars

        system = "unknown"
        if 'osProfile' in self._vm_model['properties']:
            if 'linuxConfiguration' in self._vm_model['properties'][
                    'osProfile']:
                system = 'linux'
            if 'windowsConfiguration' in self._vm_model['properties'][
                    'osProfile']:
                system = 'windows'

        new_hostvars = dict(
            public_ipv4_addresses=[],
            public_dns_hostnames=[],
            private_ipv4_addresses=[],
            id=self._vm_model['id'],
            location=self._vm_model['location'],
            name=self._vm_model['name'],
            powerstate=self._powerstate,
            provisioning_state=self._vm_model['properties']
            ['provisioningState'].lower(),
            tags=self._vm_model.get('tags', {}),
            resource_type=self._vm_model.get('type', "unknown"),
            vmid=self._vm_model['properties']['vmId'],
            os_profile=dict(system=system, ),
            vmss=dict(
                id=self._vmss['id'],
                name=self._vmss['name'],
            ) if self._vmss else {},
            virtual_machine_size=self._vm_model['properties']
            ['hardwareProfile']['vmSize']
            if self._vm_model['properties'].get('hardwareProfile') else None,
            plan=self._vm_model['properties']['plan']['name']
            if self._vm_model['properties'].get('plan') else None,
            resource_group=parse_resource_id(
                self._vm_model['id']).get('resource_group').lower())

        # set nic-related values from the primary NIC first
        for nic in sorted(self.nics, key=lambda n: n.is_primary, reverse=True):
            # and from the primary IP config per NIC first
            for ipc in sorted(nic._nic_model['properties']['ipConfigurations'],
                              key=lambda i: i['properties']['primary'],
                              reverse=True):
                private_ip = ipc['properties'].get('privateIPAddress')
                if private_ip:
                    new_hostvars['private_ipv4_addresses'].append(private_ip)
                pip_id = ipc['properties'].get('publicIPAddress', {}).get('id')
                if pip_id:
                    new_hostvars['public_ip_id'] = pip_id

                    pip = nic.public_ips[pip_id]
                    new_hostvars['public_ip_name'] = pip._pip_model['name']
                    new_hostvars['public_ipv4_addresses'].append(
                        pip._pip_model['properties'].get('ipAddress', None))
                    pip_fqdn = pip._pip_model['properties'].get(
                        'dnsSettings', {}).get('fqdn')
                    if pip_fqdn:
                        new_hostvars['public_dns_hostnames'].append(pip_fqdn)

            new_hostvars['mac_address'] = nic._nic_model['properties'].get(
                'macAddress')
            new_hostvars['network_interface'] = nic._nic_model['name']
            new_hostvars['network_interface_id'] = nic._nic_model['id']
            new_hostvars['security_group_id'] = nic._nic_model['properties']['networkSecurityGroup']['id'] \
                if nic._nic_model['properties'].get('networkSecurityGroup') else None
            new_hostvars['security_group'] = parse_resource_id(new_hostvars['security_group_id'])['resource_name'] \
                if nic._nic_model['properties'].get('networkSecurityGroup') else None

        # set image and os_disk
        new_hostvars['image'] = {}
        new_hostvars['os_disk'] = {}
        storageProfile = self._vm_model['properties'].get('storageProfile')
        if storageProfile:
            imageReference = storageProfile.get('imageReference')
            if imageReference:
                if imageReference.get('publisher'):
                    new_hostvars['image'] = dict(
                        sku=imageReference.get('sku'),
                        publisher=imageReference.get('publisher'),
                        version=imageReference.get('version'),
                        offer=imageReference.get('offer'))
                elif imageReference.get('id'):
                    new_hostvars['image'] = dict(id=imageReference.get('id'))

            osDisk = storageProfile.get('osDisk')
            new_hostvars['os_disk'] = dict(
                name=osDisk.get('name'),
                operating_system_type=osDisk.get('osType').lower()
                if osDisk.get('osType') else None)

        self._hostvars = new_hostvars

        return self._hostvars
Ejemplo n.º 47
0
 def get_subscription_id(resource_id):
     return parse_resource_id(resource_id).get('subscription')
Ejemplo n.º 48
0
def run(cmd,
        vm_name,
        resource_group_name,
        run_id=None,
        repair_vm_id=None,
        custom_script_file=None,
        parameters=None,
        run_on_repair=False):

    # Init command helper object
    command = command_helper(logger, cmd, 'vm repair run')
    LINUX_RUN_SCRIPT_NAME = 'linux-run-driver.sh'
    WINDOWS_RUN_SCRIPT_NAME = 'win-run-driver.ps1'

    try:
        # Fetch VM data
        source_vm = get_vm(cmd, resource_group_name, vm_name)
        is_linux = _is_linux_os(source_vm)

        if is_linux:
            script_name = LINUX_RUN_SCRIPT_NAME
        else:
            script_name = WINDOWS_RUN_SCRIPT_NAME

        # If run_on_repair is False, then repair_vm is the source_vm (scripts run directly on source vm)
        repair_vm_id = parse_resource_id(repair_vm_id)
        repair_vm_name = repair_vm_id['name']
        repair_resource_group = repair_vm_id['resource_group']

        run_command_params = []
        additional_scripts = []

        # Normal scenario with run id
        if not custom_script_file:
            # Fetch run path from GitHub
            repair_script_path = _fetch_run_script_path(run_id)
            run_command_params.append(
                'script_path="./{}"'.format(repair_script_path))
        # Custom script scenario for script testers
        else:
            run_command_params.append('script_path=no-op')
            additional_scripts.append(custom_script_file)

        # Append Parameters
        if parameters:
            if is_linux:
                param_string = _process_bash_parameters(parameters)
            else:
                param_string = _process_ps_parameters(parameters)
            run_command_params.append('params="{}"'.format(param_string))
        if run_on_repair:
            vm_string = 'repair VM'
        else:
            vm_string = 'VM'
        logger.info('Running script on %s: %s', vm_string, repair_vm_name)

        # Run script and measure script run-time
        script_start_time = timeit.default_timer()
        stdout, stderr = _invoke_run_command(script_name, repair_vm_name,
                                             repair_resource_group, is_linux,
                                             run_command_params,
                                             additional_scripts)
        command.script.run_time = timeit.default_timer() - script_start_time
        logger.debug("stderr: %s", stderr)

        # Parse through stdout to populate log properties: 'level', 'message'
        run_script_succeeded = _check_script_succeeded(stdout)
        logs = _parse_run_script_raw_logs(stdout)

        # Process log-start and log-end
        # Log is cutoff at the start if over 4k bytes
        log_cutoff = True
        log_fullpath = ''
        for log in logs:
            if log['level'] == 'Log-Start':
                log_cutoff = False
            if log['level'] == 'Log-End':
                split_log = log['message'].split(']')
                if len(split_log) == 2:
                    log_fullpath = split_log[1]
        if log_cutoff:
            logger.warning(
                'Log file is too large and has been cutoff at the start of file. Please locate the log file within the %s using the logFullpath to check full logs.',
                vm_string)

        # Output 'output' or 'error' level logs depending on status
        if run_script_succeeded:
            command.script.set_status_success()
            command.message = 'Script completed succesfully.'
            command.script.output = '\n'.join([
                log['message'] for log in logs
                if log['level'].lower() == 'output'
            ])
            logger.info('\nScript returned with output:\n%s\n',
                        command.script.output)
        else:
            command.script.set_status_error()
            command.message = 'Script completed with errors.'
            command.script.output = '\n'.join([
                log['message'] for log in logs
                if log['level'].lower() == 'error'
            ])
            logger.error('\nScript returned with error:\n%s\n',
                         command.script.output)

        command.set_status_success()
    except KeyboardInterrupt:
        command.error_stack_trace = traceback.format_exc()
        command.error_message = "Command interrupted by user input."
        command.message = "Repair run failed. Command interrupted by user input."
    except AzCommandError as azCommandError:
        command.error_stack_trace = traceback.format_exc()
        command.error_message = str(azCommandError)
        command.message = "Repair run failed."
    except requests.exceptions.RequestException as exception:
        command.error_stack_trace = traceback.format_exc()
        command.error_message = str(exception)
        command.message = "Failed to fetch run script data from GitHub. Please check this repository is reachable: https://github.com/Azure/repair-script-library"
    except RunScriptNotFoundForIdError as exception:
        command.error_stack_trace = traceback.format_exc()
        command.error_message = str(exception)
        command.message = "Repair run failed. Run ID not found."
    except Exception as exception:
        command.error_stack_trace = traceback.format_exc()
        command.error_message = str(exception)
        command.message = 'An unexpected error occurred. Try running again with the --debug flag to debug.'
    finally:
        if command.error_stack_trace:
            logger.debug(command.error_stack_trace)

    if not command.is_status_success():
        command.set_status_error()
        command.script.output = 'Repair run failed.'
        return_dict = command.init_return_dict()
    else:
        # Build return Dict
        return_dict = command.init_return_dict()
        return_dict['script_status'] = command.script.status
        return_dict['logs'] = stdout
        return_dict['log_full_path'] = log_fullpath
        return_dict['output'] = command.script.output
        return_dict['vm_name'] = repair_vm_name
        return_dict['resource_group'] = repair_resource_group

    return return_dict
Ejemplo n.º 49
0
def process_nw_topology_namespace(cmd, namespace):
    from msrestazure.tools import is_valid_resource_id, resource_id, parse_resource_id
    SubResource = cmd.get_models('SubResource')
    subscription_id = get_subscription_id(cmd.cli_ctx)

    location = namespace.location
    rg = namespace.target_resource_group_name
    vnet = namespace.target_vnet
    subnet = namespace.target_subnet

    vnet_id = vnet if is_valid_resource_id(vnet) else None
    subnet_id = subnet if is_valid_resource_id(subnet) else None

    if rg and not vnet and not subnet:
        # targeting resource group - OK
        pass
    elif subnet:
        subnet_usage = CLIError(
            'usage error: --subnet ID | --subnet NAME --resource-group NAME --vnet NAME'
        )
        # targeting subnet - OK
        if subnet_id and (vnet or rg):
            raise subnet_usage
        elif not subnet_id and (not rg or not vnet or vnet_id):
            raise subnet_usage
        if subnet_id:
            rg = parse_resource_id(subnet_id)['resource_group']
            namespace.target_subnet = SubResource(subnet)
        else:
            subnet_id = subnet_id or resource_id(subscription=subscription_id,
                                                 resource_group=rg,
                                                 namespace='Microsoft.Network',
                                                 type='virtualNetworks',
                                                 name=vnet,
                                                 child_type_1='subnets',
                                                 child_name_1=subnet)
            namespace.target_resource_group_name = None
            namespace.target_vnet = None
            namespace.target_subnet = SubResource(subnet_id)
    elif vnet:
        # targeting vnet - OK
        vnet_usage = CLIError(
            'usage error: --vnet ID | --vnet NAME --resource-group NAME')
        if vnet_id and (subnet or rg):
            raise vnet_usage
        elif not vnet_id and not rg or subnet:
            raise vnet_usage
        if vnet_id:
            rg = parse_resource_id(vnet_id)['resource_group']
            namespace.target_vnet = SubResource(vnet)
        else:
            vnet_id = vnet_id or resource_id(subscription=subscription_id,
                                             resource_group=rg,
                                             namespace='Microsoft.Network',
                                             type='virtualNetworks',
                                             name=vnet)
            namespace.target_resource_group_name = None
            namespace.target_vnet = SubResource(vnet_id)
    else:
        raise CLIError(
            'usage error: --resource-group NAME | --vnet NAME_OR_ID | --subnet NAME_OR_ID'
        )

    # retrieve location from resource group
    if not location:
        resource_client = \
            get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES).resource_groups
        resource_group = resource_client.get(rg)
        namespace.location = resource_group.location  # pylint: disable=no-member

    get_network_watcher_from_location(remove=True,
                                      watcher_name='network_watcher_name',
                                      rg_name='resource_group_name')(cmd,
                                                                     namespace)
Ejemplo n.º 50
0
def restore(cmd,
            vm_name,
            resource_group_name,
            disk_name=None,
            repair_vm_id=None,
            yes=False):

    # Init command helper object
    command = command_helper(logger, cmd, 'vm repair restore')

    try:
        # Fetch source and repair VM data
        source_vm = get_vm(cmd, resource_group_name, vm_name)
        is_managed = _uses_managed_disk(source_vm)
        repair_vm_id = parse_resource_id(repair_vm_id)
        repair_vm_name = repair_vm_id['name']
        repair_resource_group = repair_vm_id['resource_group']
        source_disk = None

        # MANAGED DISK
        if is_managed:
            source_disk = source_vm.storage_profile.os_disk.name
            # Detach repaired data disk command
            detach_disk_command = 'az vm disk detach -g {g} --vm-name {repair} --name {disk}' \
                                  .format(g=repair_resource_group, repair=repair_vm_name, disk=disk_name)
            # Update OS disk with repaired data disk
            attach_fixed_command = 'az vm update -g {g} -n {n} --os-disk {disk}' \
                                   .format(g=resource_group_name, n=vm_name, disk=disk_name)

            # Maybe run attach and delete concurrently
            logger.info('Detaching repaired data disk from repair VM...')
            _call_az_command(detach_disk_command)
            logger.info(
                'Attaching repaired data disk to source VM as an OS disk...')
            _call_az_command(attach_fixed_command)
        # UNMANAGED DISK
        else:
            source_disk = source_vm.storage_profile.os_disk.vhd.uri
            # Get disk uri from disk name
            repair_vm = get_vm(cmd, repair_vm_id['resource_group'],
                               repair_vm_id['name'])
            data_disks = repair_vm.storage_profile.data_disks
            # The params went through validator so no need for existence checks
            disk_uri = [
                disk.vhd.uri for disk in data_disks if disk.name == disk_name
            ][0]

            detach_unamanged_command = 'az vm unmanaged-disk detach -g {g} --vm-name {repair} --name {disk}' \
                                       .format(g=repair_resource_group, repair=repair_vm_name, disk=disk_name)
            # Update OS disk with disk
            # storageProfile.osDisk.name="{disk}"
            attach_unmanaged_command = 'az vm update -g {g} -n {n} --set storageProfile.osDisk.vhd.uri="{uri}"' \
                                       .format(g=resource_group_name, n=vm_name, uri=disk_uri)
            logger.info('Detaching repaired data disk from repair VM...')
            _call_az_command(detach_unamanged_command)
            logger.info(
                'Attaching repaired data disk to source VM as an OS disk...')
            _call_az_command(attach_unmanaged_command)
        # Clean
        _clean_up_resources(repair_resource_group, confirm=not yes)
        command.set_status_success()
    except KeyboardInterrupt:
        command.error_stack_trace = traceback.format_exc()
        command.error_message = "Command interrupted by user input."
        command.message = "Command interrupted by user input. If the restore command fails at retry, please rerun the repair process from \'az vm repair create\'."
    except AzCommandError as azCommandError:
        command.error_stack_trace = traceback.format_exc()
        command.error_message = str(azCommandError)
        command.message = "Repair restore failed. If the restore command fails at retry, please rerun the repair process from \'az vm repair create\'."
    except Exception as exception:
        command.error_stack_trace = traceback.format_exc()
        command.error_message = str(exception)
        command.message = 'An unexpected error occurred. Try running again with the --debug flag to debug.'
    finally:
        if command.error_stack_trace:
            logger.debug(command.error_stack_trace)

    if not command.is_status_success():
        command.set_status_error()
        return_dict = command.init_return_dict()
    else:
        # Construct return dict
        command.message = '\'{disk}\' successfully attached to \'{n}\' as an OS disk. Please test your repairs and once confirmed, ' \
                          'you may choose to delete the source OS disk \'{src_disk}\' within resource group \'{rg}\' manually if you no longer need it, to avoid any undesired costs.' \
                          .format(disk=disk_name, n=vm_name, src_disk=source_disk, rg=resource_group_name)
        return_dict = command.init_return_dict()
        logger.info('\n%s\n', return_dict['message'])

    return return_dict
def _get_container_insights_settings(cmd, cluster_resource_group_name,
                                     cluster_name, configuration_settings,
                                     configuration_protected_settings,
                                     is_ci_extension_type):

    subscription_id = get_subscription_id(cmd.cli_ctx)
    workspace_resource_id = ''

    if configuration_settings is not None:
        if 'loganalyticsworkspaceresourceid' in configuration_settings:
            configuration_settings['logAnalyticsWorkspaceResourceID'] = \
                configuration_settings.pop('loganalyticsworkspaceresourceid')

        if 'logAnalyticsWorkspaceResourceID' in configuration_settings:
            workspace_resource_id = configuration_settings[
                'logAnalyticsWorkspaceResourceID']

    workspace_resource_id = workspace_resource_id.strip()

    if configuration_protected_settings is not None:
        if 'proxyEndpoint' in configuration_protected_settings:
            # current supported format for proxy endpoint is  http(s)://<user>:<pwd>@<proxyhost>:<port>
            # do some basic validation since the ci agent does the complete validation
            proxy = configuration_protected_settings['proxyEndpoint'].strip(
            ).lower()
            proxy_parts = proxy.split('://')
            if (not proxy) or (not proxy.startswith('http://') and not proxy.startswith('https://')) or \
                    (len(proxy_parts) != 2):
                raise InvalidArgumentValueError(
                    'proxyEndpoint url should in this format http(s)://<user>:<pwd>@<proxyhost>:<port>'
                )
            logger.info(
                "successfully validated proxyEndpoint url hence passing proxy endpoint to extension"
            )
            configuration_protected_settings[
                'omsagent.proxy'] = configuration_protected_settings[
                    'proxyEndpoint']

    if not workspace_resource_id:
        workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
            cmd, subscription_id, cluster_resource_group_name, cluster_name)
    else:
        if not is_valid_resource_id(workspace_resource_id):
            raise InvalidArgumentValueError(
                '{} is not a valid Azure resource ID.'.format(
                    workspace_resource_id))

    if is_ci_extension_type:
        _ensure_container_insights_for_monitoring(
            cmd, workspace_resource_id).result()

    # extract subscription ID and resource group from workspace_resource_id URL
    parsed = parse_resource_id(workspace_resource_id)
    workspace_sub_id, workspace_rg_name, workspace_name = \
        parsed["subscription"], parsed["resource_group"], parsed["name"]

    log_analytics_client = cf_log_analytics(cmd.cli_ctx, workspace_sub_id)
    log_analytics_workspace = log_analytics_client.workspaces.get(
        workspace_rg_name, workspace_name)
    if not log_analytics_workspace:
        raise InvalidArgumentValueError(
            'Fails to retrieve workspace by {}'.format(workspace_name))

    shared_keys = log_analytics_client.shared_keys.get_shared_keys(
        workspace_rg_name, workspace_name)
    if not shared_keys:
        raise InvalidArgumentValueError(
            'Fails to retrieve shared key for workspace {}'.format(
                log_analytics_workspace))
    configuration_protected_settings[
        'omsagent.secret.wsid'] = log_analytics_workspace.customer_id
    configuration_settings[
        'logAnalyticsWorkspaceResourceID'] = workspace_resource_id
    configuration_protected_settings[
        'omsagent.secret.key'] = shared_keys.primary_shared_key
    # set the domain for the ci agent for non azure public clouds
    cloud_name = cmd.cli_ctx.cloud.name
    if cloud_name.lower() == 'azurechinacloud':
        configuration_settings['omsagent.domain'] = 'opinsights.azure.cn'
    elif cloud_name.lower() == 'azureusgovernment':
        configuration_settings['omsagent.domain'] = 'opinsights.azure.us'
    elif cloud_name.lower() == 'usnat':
        configuration_settings[
            'omsagent.domain'] = 'opinsights.azure.eaglex.ic.gov'
    elif cloud_name.lower() == 'ussec':
        configuration_settings[
            'omsagent.domain'] = 'opinsights.azure.microsoft.scloud'
Ejemplo n.º 52
0
def get_auth_if_no_valid_key_vault_connection(logger, source_name, source_id,
                                              key_vault_connections):
    auth_type = 'systemAssignedIdentity'
    client_id = None
    subscription_id = None

    if key_vault_connections:
        from ._resource_config import RESOURCE
        from msrestazure.tools import (is_valid_resource_id)

        # https://docs.microsoft.com/azure/app-service/app-service-key-vault-references
        if source_name == RESOURCE.WebApp:
            try:
                webapp = run_cli_cmd(
                    'az rest -u {}?api-version=2020-09-01 -o json'.format(
                        source_id))
                reference_identity = webapp.get('properties').get(
                    'keyVaultReferenceIdentity')
            except Exception as e:
                raise ValidationError(
                    '{}. Unable to get "properties.keyVaultReferenceIdentity" from {}.'
                    'Please check your source id is correct.'.format(
                        e, source_id))

            if is_valid_resource_id(reference_identity):  # User Identity
                auth_type = 'userAssignedIdentity'
                segments = parse_resource_id(reference_identity)
                subscription_id = segments.get('subscription')
                try:
                    identity = webapp.get('identity').get(
                        'userAssignedIdentities').get(reference_identity)
                    client_id = identity.get('clientId')
                except Exception:  # pylint: disable=broad-except
                    try:
                        identity = run_cli_cmd(
                            'az identity show --ids {} -o json'.format(
                                reference_identity))
                        client_id = identity.get('clientId')
                    except Exception:  # pylint: disable=broad-except
                        pass
                if not subscription_id or not client_id:
                    raise ValidationError(
                        'Unable to get subscriptionId or clientId'
                        'of the keyVaultReferenceIdentity {}'.format(
                            reference_identity))
                for connection in key_vault_connections:
                    auth_info = connection.get('authInfo')
                    if auth_info.get(
                            'clientId') == client_id and auth_info.get(
                                'subscriptionId') == subscription_id:
                        logger.warning('key vualt reference connection: %s',
                                       connection.get('id'))
                        return
            else:  # System Identity
                for connection in key_vault_connections:
                    if connection.get('authInfo').get('authType') == auth_type:
                        logger.warning('key vualt reference connection: %s',
                                       connection.get('id'))
                        return

        # any connection with csi enabled is a valid connection
        elif source_name == RESOURCE.KubernetesCluster:
            for connection in key_vault_connections:
                if connection.get('target_service', dict()).get(
                        'resource_properties',
                        dict()).get('connect_as_kubernetes_csi_driver'):
                    return
            return {'authType': 'userAssignedIdentity'}

        else:
            logger.warning('key vualt reference connection: %s',
                           key_vault_connections[0].get('id'))
            return

    auth_info = {'authType': auth_type}
    if client_id and subscription_id:
        auth_info['clientId'] = client_id
        auth_info['subscriptionId'] = subscription_id
    return auth_info
Ejemplo n.º 53
0
def get_storage_account_properties(cli_ctx, account_id):
    scf = storage_client_factory(cli_ctx)
    from msrestazure.tools import parse_resource_id
    result = parse_resource_id(account_id)
    return scf.storage_accounts.get_properties(result['resource_group'], result['name'])
Ejemplo n.º 54
0
    def _get_disk_info(self):
        from msrestazure.tools import parse_resource_id  # pylint: disable=import-error
        disks_info = {}
        disks_info['managed_disk'] = bool(
            getattr(self._vm.storage_profile.os_disk, 'managed_disk', None))
        if disks_info['managed_disk']:
            res_info = parse_resource_id(
                self._vm.storage_profile.os_disk.managed_disk.id)
            disk = self._vm_client.disks.get(res_info['resource_group'],
                                             res_info['name'])
            disks_info['os_disk'] = {
                'name': disk.name,
                'size': disk.disk_size_gb,
                'is_premium': disk.sku.tier.lower() == 'premium',
                'caching': self._vm.storage_profile.os_disk.caching.value,
            }
            disks_info['data_disks'] = []
            for data_disk in self._vm.storage_profile.data_disks:
                res_info = parse_resource_id(data_disk.managed_disk.id)
                disk = self._vm_client.disks.get(res_info['resource_group'],
                                                 res_info['name'])
                disks_info['data_disks'].append({
                    'name':
                    disk.name,
                    'size':
                    disk.disk_size_gb,
                    'is_premium':
                    disk.sku.tier.lower() == 'premium',
                    'caching':
                    data_disk.caching.value,
                    'lun':
                    data_disk.lun
                })
        else:
            storage_accounts = list(
                self._storage_client.storage_accounts.list())
            blob_uri = self._vm.storage_profile.os_disk.vhd.uri
            parts = list(filter(None, blob_uri.split('/')))
            storage_account_name = parts[1].split('.')[0]
            disk_name, container_name = parts[-1], parts[-2]
            storage_account = next(
                x for x in storage_accounts
                if x.name.lower() == storage_account_name.lower())
            rg = parse_resource_id(storage_account.id)['resource_group']
            key = self._storage_client.storage_accounts.list_keys(
                rg, storage_account.name).keys[0].value
            disks_info['os_disk'] = {
                'name': disk_name,
                'account_name': storage_account_name,
                'table_endpoint': storage_account.primary_endpoints.table,
                'is_premium':
                storage_account.sku.tier.value.lower() == 'premium',
                'caching': self._vm.storage_profile.os_disk.caching.value,
                'key': key
            }
            if disks_info['os_disk']['is_premium']:
                disks_info['os_disk']['size'] = self._get_blob_size(
                    storage_account.name, container_name, disk_name, key)

            disks_info['data_disks'] = []
            for data_disk in self._vm.storage_profile.data_disks:
                blob_uri = data_disk.vhd.uri
                parts = list(filter(None, blob_uri.split('/')))
                storage_account_name = parts[1].split('.')[0]
                disk_name, container_name = parts[-1], parts[-2]
                storage_account = next(
                    x for x in storage_accounts
                    if x.name.lower() == storage_account_name.lower())
                rg = parse_resource_id(storage_account.id)['resource_group']
                key = self._storage_client.storage_accounts.list_keys(
                    rg, storage_account.name).keys[0].value
                is_premium = storage_account.sku.tier.value.lower(
                ) == 'premium'
                disks_info['data_disks'].append({
                    'name':
                    disk_name,
                    'account_name':
                    storage_account_name,
                    'table_endpoint':
                    storage_account.primary_endpoints.table,
                    'is_premium':
                    is_premium,
                    'caching':
                    self._vm.storage_profile.os_disk.caching.value,
                    'key':
                    key,
                    'lun':
                    data_disk.lun
                })
                if is_premium:
                    disks_info['data_disks'][-1]['size'] = self._get_blob_size(
                        storage_account.name, container_name, disk_name, key)

        return disks_info
Ejemplo n.º 55
0
def cluster_application_update(cli_ctx,
                               oc,
                               client_id,
                               client_secret,
                               refresh_cluster_credentials):
    # QUESTION: is there possible unification with the create path?

    rp_client_sp = None
    client_sp = None
    random_id = generate_random_id()

    # if any of these are set - we expect users to have access to fix rbac so we fail
    # common for 1 and 2 flows
    fail = client_id or client_secret or refresh_cluster_credentials

    aad = AADManager(cli_ctx)

    # check if we can see if RP service principal exists
    try:
        rp_client_sp = aad.get_service_principal(resolve_rp_client_id())
        if not rp_client_sp:
            raise ResourceNotFoundError("RP service principal not found.")
    except GraphErrorException as e:
        if fail:
            logger.error(e.message)
            raise
        logger.info(e.message)

    # refresh_cluster_credentials refreshes cluster SP application.
    # At firsts it tries to re-use existing application and generate new password.
    # If application does not exist - creates new one
    if refresh_cluster_credentials:
        try:
            app = aad.get_application_by_client_id(client_id or oc.service_principal_profile.client_id)
            if not app:
                # we were not able to find and applications, create new one
                parts = parse_resource_id(oc.cluster_profile.resource_group_id)
                cluster_resource_group = parts['resource_group']

                app, client_secret = aad.create_application(cluster_resource_group or 'aro-' + random_id)
                client_id = app.app_id
            else:
                client_secret = aad.refresh_application_credentials(app.object_id)
        except GraphErrorException as e:
            logger.error(e.message)
            raise

    # attempt to get/create SP if one was not found.
    try:
        client_sp = aad.get_service_principal(client_id or oc.service_principal_profile.client_id)
    except GraphErrorException as e:
        if fail:
            logger.error(e.message)
            raise
        logger.info(e.message)

    if fail and not client_sp:
        client_sp = aad.create_service_principal(client_id or oc.service_principal_profile.client_id)

    sp_obj_ids = [sp.object_id for sp in [rp_client_sp, client_sp] if sp]
    ensure_resource_permissions(cli_ctx, oc, fail, sp_obj_ids)

    return client_id, client_secret
    def serialize_vm(self, vm):
        '''
        Convert a VirtualMachine object to dict.

        :param vm: VirtualMachine object
        :return: dict
        '''

        result = self.serialize_obj(vm, AZURE_OBJECT_CLASS, enum_modules=AZURE_ENUM_MODULES)
        resource_group = parse_resource_id(result['id']).get('resource_group')
        instance = None
        power_state = None

        try:
            instance = self.compute_client.virtual_machines.instance_view(resource_group, vm.name)
            instance = self.serialize_obj(instance, AZURE_OBJECT_CLASS, enum_modules=AZURE_ENUM_MODULES)
        except Exception as exc:
            self.fail("Error getting virtual machine {0} instance view - {1}".format(vm.name, str(exc)))

        for index in range(len(instance['statuses'])):
            code = instance['statuses'][index]['code'].split('/')
            if code[0] == 'PowerState':
                power_state = code[1]
            elif code[0] == 'OSState' and code[1] == 'generalized':
                power_state = 'generalized'
                break

        new_result = {}
        new_result['power_state'] = power_state
        new_result['id'] = vm.id
        new_result['resource_group'] = resource_group
        new_result['name'] = vm.name
        new_result['state'] = 'present'
        new_result['location'] = vm.location
        new_result['vm_size'] = result['properties']['hardwareProfile']['vmSize']
        os_profile = result['properties'].get('osProfile')
        if os_profile is not None:
            new_result['admin_username'] = os_profile.get('adminUsername')
        image = result['properties']['storageProfile'].get('imageReference')
        if image is not None:
            if image.get('publisher', None) is not None:
                new_result['image'] = {
                    'publisher': image['publisher'],
                    'sku': image['sku'],
                    'offer': image['offer'],
                    'version': image['version']
                }
            else:
                new_result['image'] = {
                    'id': image.get('id', None)
                }

        new_result['boot_diagnostics'] = {
            'enabled': 'diagnosticsProfile' in result['properties'] and
                       'bootDiagnostics' in result['properties']['diagnosticsProfile'] and
                       result['properties']['diagnosticsProfile']['bootDiagnostics']['enabled'] or False,
            'storage_uri': 'diagnosticsProfile' in result['properties'] and
                           'bootDiagnostics' in result['properties']['diagnosticsProfile'] and
                           result['properties']['diagnosticsProfile']['bootDiagnostics']['storageUri'] or None
        }
        if new_result['boot_diagnostics']['enabled']:
            new_result['boot_diagnostics']['console_screenshot_uri'] = result['properties']['instanceView']['bootDiagnostics']['consoleScreenshotBlobUri']
            new_result['boot_diagnostics']['serial_console_log_uri'] = result['properties']['instanceView']['bootDiagnostics']['serialConsoleLogBlobUri']

        vhd = result['properties']['storageProfile']['osDisk'].get('vhd')
        if vhd is not None:
            url = urlparse(vhd['uri'])
            new_result['storage_account_name'] = url.netloc.split('.')[0]
            new_result['storage_container_name'] = url.path.split('/')[1]
            new_result['storage_blob_name'] = url.path.split('/')[-1]

        new_result['os_disk_caching'] = result['properties']['storageProfile']['osDisk']['caching']
        new_result['os_type'] = result['properties']['storageProfile']['osDisk']['osType']
        new_result['data_disks'] = []
        disks = result['properties']['storageProfile']['dataDisks']
        for disk_index in range(len(disks)):
            new_result['data_disks'].append({
                'lun': disks[disk_index].get('lun'),
                'disk_size_gb': disks[disk_index].get('diskSizeGB'),
                'managed_disk_type': disks[disk_index].get('managedDisk', {}).get('storageAccountType'),
                'caching': disks[disk_index].get('caching')
            })

        new_result['network_interface_names'] = []
        nics = result['properties']['networkProfile']['networkInterfaces']
        for nic_index in range(len(nics)):
            new_result['network_interface_names'].append(re.sub('.*networkInterfaces/', '', nics[nic_index]['id']))

        new_result['tags'] = vm.tags
        return new_result
Ejemplo n.º 57
0
def aks_kollect_cmd(
        cmd,  # pylint: disable=too-many-statements,too-many-locals
        client,
        resource_group_name: str,
        name: str,
        storage_account: str,
        sas_token: str,
        container_logs: str,
        kube_objects: str,
        node_logs: str,
        node_logs_windows: str) -> None:
    colorama.init()

    mc = client.get(resource_group_name, name)

    if not which('kubectl'):
        raise CLIError('Can not find kubectl executable in PATH')

    storage_account_id = None
    if storage_account is None:
        print(
            "No storage account specified. Try getting storage account from diagnostic settings"
        )
        storage_account_id = _get_storage_account_from_diag_settings(
            cmd.cli_ctx, resource_group_name, name)
        if storage_account_id is None:
            raise CLIError(
                "A storage account must be specified, since there isn't one in the diagnostic settings."
            )

    from msrestazure.tools import (is_valid_resource_id, parse_resource_id,
                                   resource_id)
    if storage_account_id is None:
        if not is_valid_resource_id(storage_account):
            storage_account_id = resource_id(
                subscription=get_subscription_id(cmd.cli_ctx),
                resource_group=resource_group_name,
                namespace='Microsoft.Storage',
                type='storageAccounts',
                name=storage_account)
        else:
            storage_account_id = storage_account

    if is_valid_resource_id(storage_account_id):
        try:
            parsed_storage_account = parse_resource_id(storage_account_id)
        except CloudError as ex:
            raise CLIError(ex.message)
    else:
        raise CLIError("Invalid storage account id %s" % storage_account_id)

    storage_account_name = parsed_storage_account['name']

    readonly_sas_token = None
    if sas_token is None:
        storage_client = get_storage_client(
            cmd.cli_ctx, parsed_storage_account['subscription'])
        storage_account_keys = storage_client.storage_accounts.list_keys(
            parsed_storage_account['resource_group'], storage_account_name)
        kwargs = {
            'account_name': storage_account_name,
            'account_key': storage_account_keys.keys[0].value
        }
        cloud_storage_client = _cloud_storage_account_service_factory(
            cmd.cli_ctx, kwargs)

        sas_token = cloud_storage_client.generate_shared_access_signature(
            'b', 'sco', 'rwdlacup',
            datetime.datetime.utcnow() + datetime.timedelta(days=1))

        readonly_sas_token = cloud_storage_client.generate_shared_access_signature(
            'b', 'sco', 'rl',
            datetime.datetime.utcnow() + datetime.timedelta(days=1))

        readonly_sas_token = readonly_sas_token.strip('?')

    print()
    print(
        'This will deploy a daemon set to your cluster to collect logs and diagnostic information and '
        f'save them to the storage account '
        f'{colorama.Style.BRIGHT}{colorama.Fore.GREEN}{storage_account_name}{colorama.Style.RESET_ALL} as '
        f'outlined in {_format_hyperlink("http://aka.ms/AKSPeriscope")}.')
    print()
    print(
        'If you share access to that storage account to Azure support, you consent to the terms outlined'
        f' in {_format_hyperlink("http://aka.ms/DiagConsent")}.')
    print()
    if not prompt_y_n('Do you confirm?', default="n"):
        return

    print()
    print("Getting credentials for cluster %s " % name)
    _, temp_kubeconfig_path = tempfile.mkstemp()
    credentialResults = client.list_cluster_admin_credentials(
        resource_group_name, name, None)
    kubeconfig = credentialResults.kubeconfigs[0].value.decode(
        encoding='UTF-8')
    print_or_merge_credentials(temp_kubeconfig_path, kubeconfig, False, None)

    print()
    print("Starts collecting diag info for cluster %s " % name)

    # Base the container name on the fqdn (or private fqdn) of the managed cluster
    container_name = _generate_container_name(mc.fqdn, mc.private_fqdn)
    sas_token = sas_token.strip('?')

    cluster_features = _get_cluster_features(cmd.cli_ctx, resource_group_name,
                                             name)

    run_id = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H-%M-%SZ")
    kustomize_yaml = _get_kustomize_yaml(storage_account_name, sas_token,
                                         container_name, run_id,
                                         cluster_features, container_logs,
                                         kube_objects, node_logs,
                                         node_logs_windows)
    kustomize_folder = tempfile.mkdtemp()
    kustomize_file_path = os.path.join(kustomize_folder, "kustomization.yaml")
    try:
        with os.fdopen(os.open(kustomize_file_path, os.O_RDWR | os.O_CREAT),
                       'w+t') as kustomize_file:
            kustomize_file.write(kustomize_yaml)

        try:
            print()
            print("Cleaning up aks-periscope resources if existing")

            subprocess.call([
                "kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
                "serviceaccount,configmap,daemonset,secret", "--all", "-n",
                CONST_PERISCOPE_NAMESPACE, "--ignore-not-found"
            ],
                            stderr=subprocess.STDOUT)

            subprocess.call([
                "kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
                "ClusterRoleBinding", "aks-periscope-role-binding",
                "--ignore-not-found"
            ],
                            stderr=subprocess.STDOUT)

            subprocess.call([
                "kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
                "ClusterRoleBinding", "aks-periscope-role-binding-view",
                "--ignore-not-found"
            ],
                            stderr=subprocess.STDOUT)

            subprocess.call([
                "kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
                "ClusterRole", "aks-periscope-role", "--ignore-not-found"
            ],
                            stderr=subprocess.STDOUT)

            subprocess.call([
                "kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
                "--all", "apd", "-n", CONST_PERISCOPE_NAMESPACE,
                "--ignore-not-found"
            ],
                            stderr=subprocess.DEVNULL)

            subprocess.call([
                "kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
                "CustomResourceDefinition",
                "diagnostics.aks-periscope.azure.github.com",
                "--ignore-not-found"
            ],
                            stderr=subprocess.STDOUT)

            print()
            print("Deploying aks-periscope")

            subprocess.check_output([
                "kubectl", "--kubeconfig", temp_kubeconfig_path, "apply", "-k",
                kustomize_folder, "-n", CONST_PERISCOPE_NAMESPACE
            ],
                                    stderr=subprocess.STDOUT)
        except subprocess.CalledProcessError as err:
            raise CLIError(err.output)
    finally:
        os.remove(kustomize_file_path)
        os.rmdir(kustomize_folder)

    print()

    token_in_storage_account_url = readonly_sas_token if readonly_sas_token is not None else sas_token
    log_storage_account_url = f"https://{storage_account_name}.blob.core.windows.net/" \
                              f"{container_name}?{token_in_storage_account_url}"

    print(
        f'{colorama.Fore.GREEN}Your logs are being uploaded to storage account {_format_bright(storage_account_name)}'
    )

    print()
    print(
        f'You can download Azure Storage Explorer here '
        f'{_format_hyperlink("https://azure.microsoft.com/en-us/features/storage-explorer/")}'
        f' to check the logs by adding the storage account using the following URL:'
    )
    print(f'{_format_hyperlink(log_storage_account_url)}')

    print()
    if not prompt_y_n('Do you want to see analysis results now?', default="n"):
        print(
            f"You can run 'az aks kanalyze -g {resource_group_name} -n {name}' "
            f"anytime to check the analysis results.")
    else:
        _display_diagnostics_report(temp_kubeconfig_path)
Ejemplo n.º 58
0
def get_subscription() -> Any:  # should be str
    return parse_resource_id(os.environ["ONEFUZZ_DATA_STORAGE"])["subscription"]
Ejemplo n.º 59
0
def _get_vnet(cmd, vnet_id):
    vnet = parse_resource_id(vnet_id)
    network_client = _get_network_client(cmd.cli_ctx,
                                         subscription_id=vnet['subscription'])
    return network_client.virtual_networks.get(vnet['resource_group'],
                                               vnet['resource_name'])
Ejemplo n.º 60
0
def encrypt_vmss(cmd, resource_group_name, vmss_name,  # pylint: disable=too-many-locals, too-many-statements
                 disk_encryption_keyvault,
                 key_encryption_keyvault=None,
                 key_encryption_key=None,
                 key_encryption_algorithm='RSA-OAEP',
                 volume_type=None,
                 force=False):
    # pylint: disable=no-member
    UpgradeMode, VirtualMachineScaleSetExtension, VirtualMachineScaleSetExtensionProfile = cmd.get_models(
        'UpgradeMode', 'VirtualMachineScaleSetExtension', 'VirtualMachineScaleSetExtensionProfile')

    compute_client = _compute_client_factory(cmd.cli_ctx)
    vmss = compute_client.virtual_machine_scale_sets.get(resource_group_name, vmss_name)
    os_type = 'Linux' if vmss.virtual_machine_profile.os_profile.linux_configuration else 'Windows'
    is_linux = _is_linux_vm(os_type)
    extension = vmss_extension_info[os_type]

    # 1. First validate arguments
    volume_type = _handles_default_volume_type_for_vmss_encryption(is_linux, volume_type, force)

    # encryption is not supported on all linux distros, but service never tells you
    # so let us verify at the client side
    if is_linux:
        image_reference = getattr(vmss.virtual_machine_profile.storage_profile, 'image_reference', None)
        if image_reference:
            result, message = _check_encrypt_is_supported(image_reference, volume_type)
            if not result:
                logger.warning(message)

    # retrieve keyvault details
    disk_encryption_keyvault_url = get_key_vault_base_url(cmd.cli_ctx,
                                                          (parse_resource_id(disk_encryption_keyvault))['name'])

    # disk encryption key itself can be further protected, so let us verify
    if key_encryption_key:
        key_encryption_keyvault = key_encryption_keyvault or disk_encryption_keyvault
        if '://' not in key_encryption_key:  # appears a key name
            key_encryption_key = _get_keyvault_key_url(
                cmd.cli_ctx, (parse_resource_id(key_encryption_keyvault))['name'], key_encryption_key)

    #  to avoid bad server errors, ensure the vault has the right configurations
    _verify_keyvault_good_for_encryption(cmd.cli_ctx, disk_encryption_keyvault, key_encryption_keyvault, vmss, force)

    # 2. we are ready to provision/update the disk encryption extensions
    public_config = {
        'KeyVaultURL': disk_encryption_keyvault_url,
        'KeyEncryptionKeyURL': key_encryption_key or '',
        "KeyVaultResourceId": disk_encryption_keyvault,
        "KekVaultResourceId": key_encryption_keyvault if key_encryption_key else '',
        'KeyEncryptionAlgorithm': key_encryption_algorithm if key_encryption_key else '',
        'VolumeType': volume_type,
        'EncryptionOperation': 'EnableEncryption'
    }

    ext = VirtualMachineScaleSetExtension(name=extension['name'],
                                          publisher=extension['publisher'],
                                          type=extension['name'],
                                          type_handler_version=extension['version'],
                                          settings=public_config,
                                          auto_upgrade_minor_version=True,
                                          force_update_tag=uuid.uuid4())
    if not vmss.virtual_machine_profile.extension_profile:
        vmss.virtual_machine_profile.extension_profile = VirtualMachineScaleSetExtensionProfile([])
    vmss.virtual_machine_profile.extension_profile.extensions.append(ext)
    poller = compute_client.virtual_machine_scale_sets.create_or_update(resource_group_name, vmss_name, vmss)
    LongRunningOperation(cmd.cli_ctx)(poller)
    _show_post_action_message(resource_group_name, vmss.name, vmss.upgrade_policy.mode == UpgradeMode.manual, True)