Esempio n. 1
0
def restore_disks(cmd, client, resource_group_name, vault_name, container_name, item_name, rp_name, storage_account,
                  target_resource_group=None, restore_to_staging_storage_account=None, restore_only_osdisk=None,
                  diskslist=None, restore_as_unmanaged_disks=None):
    item = show_item(cmd, backup_protected_items_cf(cmd.cli_ctx), resource_group_name, vault_name, container_name,
                     item_name, "AzureIaasVM", "VM")
    _validate_item(item)
    recovery_point = show_recovery_point(cmd, recovery_points_cf(cmd.cli_ctx), resource_group_name, vault_name,
                                         container_name, item_name, rp_name, "AzureIaasVM", "VM")
    vault = vaults_cf(cmd.cli_ctx).get(resource_group_name, vault_name)
    vault_location = vault.location

    # Get container and item URIs
    container_uri = _get_protection_container_uri_from_id(item.id)
    item_uri = _get_protected_item_uri_from_id(item.id)

    # Original Storage Account Restore Logic
    use_original_storage_account = _should_use_original_storage_account(recovery_point,
                                                                        restore_to_staging_storage_account)
    if use_original_storage_account:
        logger.warning(
            """
            The disks will be restored to their original storage accounts. The VM config file will be uploaded to given
            storage account.
            """)

    # Construct trigger restore request object
    sa_name, sa_rg = _get_resource_name_and_rg(resource_group_name, storage_account)
    _storage_account_id = _get_storage_account_id(cmd.cli_ctx, sa_name, sa_rg)
    _source_resource_id = item.properties.source_resource_id
    target_rg_id = None

    if restore_as_unmanaged_disks and target_resource_group is not None:
        raise CLIError(
            """
            Both restore_as_unmanaged_disks and target_resource_group can't be spceified.
            Please give Only one parameter and retry.
            """)

    if recovery_point.properties.is_managed_virtual_machine:
        if target_resource_group is not None:
            target_rg_id = '/'.join(_source_resource_id.split('/')[:4]) + "/" + target_resource_group
        if not restore_as_unmanaged_disks and target_resource_group is None:
            logger.warning(
                """
                The disks of the managed VM will be restored as unmanaged since targetRG parameter is not provided.
                This will NOT leverage the instant restore functionality.
                Hence it can be significantly slow based on given storage account.
                To leverage instant restore, provide the target RG parameter.
                Otherwise, provide the intent next time by passing the --restore-as-unmanaged-disks parameter
                """)

    _validate_restore_disk_parameters(restore_only_osdisk, diskslist)
    restore_disk_lun_list = None
    if restore_only_osdisk:
        restore_disk_lun_list = []

    if diskslist:
        restore_disk_lun_list = diskslist

    trigger_restore_properties = IaasVMRestoreRequest(create_new_cloud_service=True,
                                                      recovery_point_id=rp_name,
                                                      recovery_type='RestoreDisks',
                                                      region=vault_location,
                                                      storage_account_id=_storage_account_id,
                                                      source_resource_id=_source_resource_id,
                                                      target_resource_group_id=target_rg_id,
                                                      original_storage_account_option=use_original_storage_account,
                                                      restore_disk_lun_list=restore_disk_lun_list)
    trigger_restore_request = RestoreRequestResource(properties=trigger_restore_properties)

    # Trigger restore
    result = client.trigger(vault_name, resource_group_name, fabric_name,
                            container_uri, item_uri, rp_name,
                            trigger_restore_request, raw=True)
    return _track_backup_job(cmd.cli_ctx, result, vault_name, resource_group_name)
def prepare_vnet(cmd, server_name, vnet, subnet, resource_group_name, loc,
                 delegation_service_name, vnet_address_pref,
                 subnet_address_pref):
    Delegation, Subnet, VirtualNetwork, AddressSpace, ServiceEndpoint = cmd.get_models(
        'Delegation',
        'Subnet',
        'VirtualNetwork',
        'AddressSpace',
        'ServiceEndpointPropertiesFormat',
        resource_type=ResourceType.MGMT_NETWORK)
    delegation = Delegation(name=delegation_service_name,
                            service_name=delegation_service_name)
    service_endpoint = ServiceEndpoint(service='Microsoft.Storage')
    nw_client = network_client_factory(cmd.cli_ctx)

    # pylint: disable=too-many-nested-blocks
    if subnet is not None and vnet is None:
        if is_valid_resource_id(subnet):
            logger.warning(
                "You have supplied a Subnet Id. Verifying its existence...")
            parsed_subnet_id = parse_resource_id(subnet)
            subnet_name = parsed_subnet_id['resource_name']
            vnet_name = parsed_subnet_id['name']
            resource_group = parsed_subnet_id['resource_group']
            subscription = parsed_subnet_id['subscription']
            resource_client = resource_client_factory(cmd.cli_ctx)
            rg = resource_client.resource_groups.get(resource_group)
            location = rg.location
            validate_rg_loc_sub(subscription, location,
                                get_subscription_id(cmd.cli_ctx), loc)
            subnet_result = check_resource_existence(cmd, subnet_name,
                                                     vnet_name, resource_group)
            if subnet_result:
                logger.info(
                    'Using existing subnet "%s" in resource group "%s"',
                    subnet_result.name, resource_group)

                if not subnet_result.delegations:
                    logger.info(
                        'Adding "%s" delegation to the existing subnet.', )
                    subnet_result.delegations = [delegation]
                    subnet_result.service_endpoints = [service_endpoint]
                    subnet_result = nw_client.subnets.begin_create_or_update(
                        resource_group, vnet_name, subnet_name,
                        subnet_result).result()
                else:
                    for delgtn in subnet_result.delegations:
                        if delgtn.service_name != delegation_service_name:
                            raise CLIError(
                                "Can not use subnet with existing delegations other than {}"
                                .format(delegation_service_name))
            else:
                logger.warning("The supplied subnet id does not exist.")
                subnet_result = _create_vnet_subnet_delegation(
                    nw_client, resource_group_name, vnet_name,
                    'Subnet' + server_name[6:], location, server_name,
                    delegation, service_endpoint, VirtualNetwork, Subnet,
                    AddressSpace, DEFAULT_VNET_ADDRESS_PREFIX,
                    DEFAULT_SUBNET_PREFIX)
        else:
            raise CLIError("Incorrectly formed Subnet id.")
    elif subnet is None and vnet is not None:
        if is_valid_resource_id(vnet):
            logger.warning(
                "You have supplied a Vnet Id. Verifying its existence...")
            parsed_vnet_id = parse_resource_id(vnet)
            vnet_name = parsed_vnet_id['resource_name']
            resource_group = parsed_vnet_id['resource_group']
            subscription = parsed_vnet_id['subscription']
            resource_client = resource_client_factory(cmd.cli_ctx)
            rg = resource_client.resource_groups.get(resource_group)
            location = rg.location
            validate_rg_loc_sub(subscription, location,
                                get_subscription_id(cmd.cli_ctx), loc)
            subnet_result = _create_vnet_subnet_delegation(
                nw_client, resource_group, vnet_name,
                'Subnet' + server_name[6:], location, server_name, delegation,
                service_endpoint, VirtualNetwork, Subnet, AddressSpace,
                DEFAULT_VNET_ADDRESS_PREFIX, DEFAULT_SUBNET_PREFIX)
        elif len(vnet.split('\\')) == 1:
            logger.warning(
                "You have supplied a Vnet Name. Verifying its existence...")
            subnet_result = _create_vnet_subnet_delegation(
                nw_client, resource_group_name, vnet,
                'Subnet' + server_name[6:], loc, server_name, delegation,
                service_endpoint, VirtualNetwork, Subnet, AddressSpace,
                DEFAULT_VNET_ADDRESS_PREFIX, DEFAULT_SUBNET_PREFIX)

            # If the supplied Vnet name was an existing one, ensure that it was on the right sub, vnet and location.
            parsed_subnet_id = parse_resource_id(subnet_result.id)
            resource_group = parsed_subnet_id['resource_group']
            subscription = parsed_subnet_id['subscription']
            resource_client = resource_client_factory(cmd.cli_ctx)
            rg = resource_client.resource_groups.get(resource_group)
            location = rg.location
            validate_rg_loc_sub(subscription, location,
                                get_subscription_id(cmd.cli_ctx), loc)
        else:
            raise CLIError("Incorrectly formed Vnet id or Vnet name")
    elif subnet is not None and vnet is not None and vnet_address_pref is not None and subnet_address_pref is not None:
        if (len(vnet.split('\\')) == 1) and (len(subnet.split('\\')) == 1):
            subnet_result = _create_with_resource_names(
                cmd, vnet, subnet, resource_group_name, delegation,
                service_endpoint, nw_client, delegation_service_name,
                server_name, VirtualNetwork, Subnet, AddressSpace, loc,
                vnet_address_pref, subnet_address_pref)
        else:
            raise CLIError(
                "If you pass an address prefix, please consider passing a name (instead of Id) for a subnet or vnet."
            )
    elif subnet is not None and vnet is not None:
        if (len(vnet.split('\\')) == 1) and (len(subnet.split('\\')) == 1):
            subnet_result = _create_with_resource_names(
                cmd, vnet, subnet, resource_group_name, delegation,
                service_endpoint, nw_client, delegation_service_name,
                server_name, VirtualNetwork, Subnet, AddressSpace, loc,
                DEFAULT_VNET_ADDRESS_PREFIX, DEFAULT_SUBNET_PREFIX)
        else:
            raise CLIError(
                "If you pass both --vnet and --subnet, consider passing names instead of ids."
            )
    else:
        return None
    return subnet_result.id
Esempio n. 3
0
def _validate_managed_disk_sku(sku):

    allowed_skus = ['Premium_LRS', 'Standard_LRS']
    if sku and sku.lower() not in [x.lower() for x in allowed_skus]:
        raise CLIError("invalid storage SKU '{}': allowed values: '{}'".format(sku, allowed_skus))
def validate_load_balancer_idle_timeout(namespace):
    """validate load balancer profile idle timeout"""
    if namespace.load_balancer_idle_timeout is not None:
        if namespace.load_balancer_idle_timeout < 4 or namespace.load_balancer_idle_timeout > 100:
            raise CLIError("--load-balancer-idle-timeout must be in the range [4,100]")
def validate_create_parameters(namespace):
    if not namespace.name:
        raise CLIError('--name has no value')
    if namespace.dns_name_prefix is not None and not namespace.dns_name_prefix:
        raise CLIError('--dns-prefix has no value')
def validate_load_balancer_outbound_ip_prefixes(namespace):
    """validate load balancer profile outbound IP prefix ids"""
    if namespace.load_balancer_outbound_ip_prefixes is not None:
        ip_prefix_id_list = [x.strip() for x in namespace.load_balancer_outbound_ip_prefixes.split(',')]
        if not all(ip_prefix_id_list):
            raise CLIError("--load-balancer-outbound-ip-prefixes cannot contain whitespace")
def validate_user(namespace):
    if namespace.user.lower() != "clusteruser" and \
            namespace.user.lower() != "clustermonitoringuser":
        raise CLIError("--user can only be clusterUser or clusterMonitoringUser")
Esempio n. 8
0
def _documentdb_deprecate(_, args):
    if args[0] == 'documentdb':
        from azure.cli.core.util import CLIError
        raise CLIError('All documentdb commands have been renamed to cosmosdb')
Esempio n. 9
0
def decrypt_vm(resource_group_name, vm_name, volume_type=None, force=False):
    '''
    Disable disk encryption on OS disk, Data disks, or both
    '''
    compute_client = _compute_client_factory()
    vm = compute_client.virtual_machines.get(resource_group_name, vm_name)
    # pylint: disable=no-member
    os_type = vm.storage_profile.os_disk.os_type.value

    # 1. be nice, figure out the default volume type and also verify VM will not be busted
    is_linux = _is_linux_vm(os_type)
    if is_linux:
        if volume_type:
            if not force:
                if volume_type == _DATA_VOLUME_TYPE:
                    status = show_vm_encryption_status(resource_group_name,
                                                       vm_name)
                    if status['osDisk'] == _STATUS_ENCRYPTED:
                        raise CLIError(
                            "Linux VM's OS disk is encrypted. Disabling encryption on data "
                            "disk can render the VM unbootable. Use '--force' "
                            "to ingore the warning")
                else:
                    raise CLIError(
                        "Only Data disks can have encryption disabled in a Linux VM. "
                        "Use '--force' to ingore the warning")
        else:
            volume_type = _DATA_VOLUME_TYPE
    elif volume_type is None:
        if vm.storage_profile.data_disks:
            raise CLIError("VM has data disks, please specify --volume-type")

    extension = vm_extension_info[os_type]
    # sequence_version should be incremented since encryptions occurred before
    sequence_version = uuid.uuid4()

    # 2. update the disk encryption extension
    # The following logic was mostly ported from xplat-cli
    public_config = {
        'VolumeType': volume_type,
        'EncryptionOperation': 'DisableEncryption',
        'SequenceVersion': sequence_version,
    }

    from azure.mgmt.compute.models import VirtualMachineExtension, DiskEncryptionSettings

    ext = VirtualMachineExtension(
        vm.location,  # pylint: disable=no-member
        publisher=extension['publisher'],
        virtual_machine_extension_type=extension['name'],
        type_handler_version=extension['version'],
        settings=public_config,
        auto_upgrade_minor_version=True)

    poller = compute_client.virtual_machine_extensions.create_or_update(
        resource_group_name, vm_name, extension['name'], ext)
    poller.result()

    # 3. Remove the secret from VM's storage profile
    extension_result = compute_client.virtual_machine_extensions.get(
        resource_group_name, vm_name, extension['name'], 'instanceView')
    if extension_result.provisioning_state != 'Succeeded':
        raise CLIError("Extension updating didn't succeed")

    vm = compute_client.virtual_machines.get(resource_group_name, vm_name)
    disk_encryption_settings = DiskEncryptionSettings(enabled=False)
    vm.storage_profile.os_disk.encryption_settings = disk_encryption_settings
    set_vm(vm)
Esempio n. 10
0
def import_certificate(client, vault_base_url, certificate_name, certificate_data,
                       disabled=False, password=None, certificate_policy=None, tags=None):
    import binascii

    x509 = None
    content_type = None
    try:
        x509 = crypto.load_certificate(crypto.FILETYPE_PEM, certificate_data)
        # if we get here, we know it was a PEM file
        content_type = 'application/x-pem-file'
        try:
            # for PEM files (including automatic endline conversion for Windows)
            certificate_data = certificate_data.decode('utf-8').replace('\r\n', '\n')
        except UnicodeDecodeError:
            certificate_data = binascii.b2a_base64(certificate_data).decode('utf-8')
    except (ValueError, crypto.Error):
        pass

    if not x509:
        try:
            if password:
                x509 = crypto.load_pkcs12(certificate_data, password).get_certificate()
            else:
                x509 = crypto.load_pkcs12(certificate_data).get_certificate()
            content_type = 'application/x-pkcs12'
            certificate_data = binascii.b2a_base64(certificate_data).decode('utf-8')
        except crypto.Error:
            raise CLIError(
                'We could not parse the provided certificate as .pem or .pfx. Please verify the certificate with OpenSSL.')  # pylint: disable=line-too-long

    not_before, not_after = None, None

    if x509.get_notBefore():
        not_before = _asn1_to_iso8601(x509.get_notBefore())

    if x509.get_notAfter():
        not_after = _asn1_to_iso8601(x509.get_notAfter())

    cert_attrs = CertificateAttributes(
        enabled=not disabled,
        not_before=not_before,
        expires=not_after)

    if certificate_policy:
        secret_props = certificate_policy.get('secret_properties')
        if secret_props:
            secret_props['content_type'] = content_type
        elif certificate_policy and not secret_props:
            certificate_policy['secret_properties'] = SecretProperties(content_type=content_type)
    else:
        certificate_policy = CertificatePolicy(
            secret_properties=SecretProperties(content_type=content_type))

    logger.info("Starting 'keyvault certificate import'")
    result = client.import_certificate(vault_base_url=vault_base_url,
                                       certificate_name=certificate_name,
                                       base64_encoded_certificate=certificate_data,
                                       certificate_attributes=cert_attrs,
                                       certificate_policy=certificate_policy,
                                       tags=tags,
                                       password=password)
    logger.info("Finished 'keyvault certificate import'")
    return result
Esempio n. 11
0
def db_show_conn_str(
        cmd,
        client_provider,
        database_name='<databasename>',
        server_name='<servername>',
        auth_type=ClientAuthenticationType.sql_password.value):

    server_suffix = _get_server_dns_suffx(cmd.cli_ctx)

    conn_str_props = {
        'server': server_name,
        'server_fqdn': '{}{}'.format(server_name, server_suffix),
        'server_suffix': server_suffix,
        'db': database_name
    }

    formats = {
        ClientType.ado_net.value: {
            ClientAuthenticationType.sql_password.value:
                'Server=tcp:{server_fqdn},1433;Database={db};User ID=<username>;'
                'Password=<password>;Encrypt=true;Connection Timeout=30;',
            ClientAuthenticationType.active_directory_password.value:
                'Server=tcp:{server_fqdn},1433;Database={db};User ID=<username>;'
                'Password=<password>;Encrypt=true;Connection Timeout=30;'
                'Authentication="Active Directory Password"',
            ClientAuthenticationType.active_directory_integrated.value:
                'Server=tcp:{server_fqdn},1433;Database={db};Encrypt=true;'
                'Connection Timeout=30;Authentication="Active Directory Integrated"'
        },
        ClientType.sqlcmd.value: {
            ClientAuthenticationType.sql_password.value:
                'sqlcmd -S tcp:{server_fqdn},1433 -d {db} -U <username> -P <password> -N -l 30',
            ClientAuthenticationType.active_directory_password.value:
                'sqlcmd -S tcp:{server_fqdn},1433 -d {db} -U <username> -P <password> -G -N -l 30',
            ClientAuthenticationType.active_directory_integrated.value:
                'sqlcmd -S tcp:{server_fqdn},1433 -d {db} -G -N -l 30',
        },
        ClientType.jdbc.value: {
            ClientAuthenticationType.sql_password.value:
                'jdbc:sqlserver://{server_fqdn}:1433;database={db};user=<username>@{server};'
                'password=<password>;encrypt=true;trustServerCertificate=false;'
                'hostNameInCertificate=*{server_suffix};loginTimeout=30',
            ClientAuthenticationType.active_directory_password.value:
                'jdbc:sqlserver://{server_fqdn}:1433;database={db};user=<username>;'
                'password=<password>;encrypt=true;trustServerCertificate=false;'
                'hostNameInCertificate=*{server_suffix};loginTimeout=30;'
                'authentication=ActiveDirectoryPassword',
            ClientAuthenticationType.active_directory_integrated.value:
                'jdbc:sqlserver://{server_fqdn}:1433;database={db};'
                'encrypt=true;trustServerCertificate=false;'
                'hostNameInCertificate=*{server_suffix};loginTimeout=30;'
                'authentication=ActiveDirectoryIntegrated',
        },
        ClientType.php_pdo.value: {
            # pylint: disable=line-too-long
            ClientAuthenticationType.sql_password.value:
                '$conn = new PDO("sqlsrv:server = tcp:{server_fqdn},1433; Database = {db}; LoginTimeout = 30; Encrypt = 1; TrustServerCertificate = 0;", "<username>", "<password>");',
            ClientAuthenticationType.active_directory_password.value:
                CLIError('PHP Data Object (PDO) driver only supports SQL Password authentication.'),
            ClientAuthenticationType.active_directory_integrated.value:
                CLIError('PHP Data Object (PDO) driver only supports SQL Password authentication.'),
        },
        ClientType.php.value: {
            # pylint: disable=line-too-long
            ClientAuthenticationType.sql_password.value:
                '$connectionOptions = array("UID"=>"<username>@{server}", "PWD"=>"<password>", "Database"=>{db}, "LoginTimeout" => 30, "Encrypt" => 1, "TrustServerCertificate" => 0); $serverName = "tcp:{server_fqdn},1433"; $conn = sqlsrv_connect($serverName, $connectionOptions);',
            ClientAuthenticationType.active_directory_password.value:
                CLIError('PHP sqlsrv driver only supports SQL Password authentication.'),
            ClientAuthenticationType.active_directory_integrated.value:
                CLIError('PHP sqlsrv driver only supports SQL Password authentication.'),
        },
        ClientType.odbc.value: {
            ClientAuthenticationType.sql_password.value:
                'Driver={{ODBC Driver 13 for SQL Server}};Server=tcp:{server_fqdn},1433;'
                'Database={db};Uid=<username>@{server};Pwd=<password>;Encrypt=yes;'
                'TrustServerCertificate=no;',
            ClientAuthenticationType.active_directory_password.value:
                'Driver={{ODBC Driver 13 for SQL Server}};Server=tcp:{server_fqdn},1433;'
                'Database={db};Uid=<username>@{server};Pwd=<password>;Encrypt=yes;'
                'TrustServerCertificate=no;Authentication=ActiveDirectoryPassword',
            ClientAuthenticationType.active_directory_integrated.value:
                'Driver={{ODBC Driver 13 for SQL Server}};Server=tcp:{server_fqdn},1433;'
                'Database={db};Encrypt=yes;TrustServerCertificate=no;'
                'Authentication=ActiveDirectoryIntegrated',
        }
    }

    f = formats[client_provider][auth_type]

    if isinstance(f, Exception):
        # Error
        raise f

    # Success
    return f.format(**conn_str_props)
Esempio n. 12
0
def import_key(client, vault_base_url, key_name, destination=None, key_ops=None, disabled=False, expires=None,
               not_before=None, tags=None, pem_file=None, pem_password=None, byok_file=None):
    """ Import a private key. Supports importing base64 encoded private keys from PEM files.
        Supports importing BYOK keys into HSM for premium KeyVaults. """
    from azure.keyvault.models import KeyAttributes, JsonWebKey

    def _to_bytes(hex_string):
        # zero pads and decodes a hex string
        if len(hex_string) % 2:
            hex_string = '0{}'.format(hex_string)
        return codecs.decode(hex_string, 'hex_codec')

    def _set_rsa_parameters(dest, src):
        # map OpenSSL parameter names to JsonWebKey property names
        conversion_dict = {
            'modulus': 'n',
            'publicExponent': 'e',
            'privateExponent': 'd',
            'prime1': 'p',
            'prime2': 'q',
            'exponent1': 'dp',
            'exponent2': 'dq',
            'coefficient': 'qi'
        }
        # regex: looks for matches that fit the following patterns:
        #   integerPattern: 65537 (0x10001)
        #   hexPattern:
        #      00:a0:91:4d:00:23:4a:c6:83:b2:1b:4c:15:d5:be:
        #      d8:87:bd:c9:59:c2:e5:7a:f5:4a:e7:34:e8:f0:07:
        # The desired match should always be the first component of the match
        regex = re.compile(r'([^:\s]*(:[^\:)]+\))|([^:\s]*(:\s*[0-9A-Fa-f]{2})+))')
        # regex2: extracts the hex string from a format like: 65537 (0x10001)
        regex2 = re.compile(r'(?<=\(0x{1})([0-9A-Fa-f]*)(?=\))')

        key_params = crypto.dump_privatekey(crypto.FILETYPE_TEXT, src).decode('utf-8')
        for match in regex.findall(key_params):
            comps = match[0].split(':', 1)
            name = conversion_dict.get(comps[0], None)
            if name:
                value = comps[1].replace(' ', '').replace('\n', '').replace(':', '')
                try:
                    value = _to_bytes(value)
                except Exception:  # pylint:disable=broad-except
                    # if decoding fails it is because of an integer pattern. Extract the hex
                    # string and retry
                    value = _to_bytes(regex2.findall(value)[0])
                setattr(dest, name, value)

    key_attrs = KeyAttributes(not disabled, not_before, expires)
    key_obj = JsonWebKey(key_ops=key_ops)
    if pem_file:
        key_obj.kty = 'RSA'
        logger.info('Reading %s', pem_file)
        with open(pem_file, 'r') as f:
            pem_data = f.read()
        # load private key and prompt for password if encrypted
        try:
            pem_password = str(pem_password).encode() if pem_password else None
            # despite documentation saying password should be a string, it needs to actually
            # be UTF-8 encoded bytes
            pkey = crypto.load_privatekey(crypto.FILETYPE_PEM, pem_data, pem_password)
        except crypto.Error:
            raise CLIError(
                'Import failed: Unable to decrypt private key. --pem-password may be incorrect.')
        except TypeError:
            raise CLIError('Invalid --pem-password.')
        logger.info('setting RSA parameters from PEM data')
        _set_rsa_parameters(key_obj, pkey)
    elif byok_file:
        with open(byok_file, 'rb') as f:
            byok_data = f.read()
        key_obj.kty = 'RSA-HSM'
        key_obj.t = byok_data

    return client.import_key(
        vault_base_url, key_name, key_obj, destination == 'hsm', key_attrs, tags)
Esempio n. 13
0
def create_keyvault(client,  # pylint: disable=too-many-locals
                    resource_group_name, vault_name, location=None, sku=SkuName.standard.value,
                    enabled_for_deployment=None,
                    enabled_for_disk_encryption=None,
                    enabled_for_template_deployment=None,
                    enable_soft_delete=None,
                    no_self_perms=None,
                    tags=None):
    from azure.mgmt.keyvault.models import VaultCreateOrUpdateParameters
    from azure.cli.core._profile import Profile, CLOUD
    from azure.graphrbac.models import GraphErrorException
    profile = Profile()
    cred, _, tenant_id = profile.get_login_credentials(
        resource=CLOUD.endpoints.active_directory_graph_resource_id)

    graph_client = GraphRbacManagementClient(
        cred,
        tenant_id,
        base_url=CLOUD.endpoints.active_directory_graph_resource_id)
    subscription = profile.get_subscription()
    if no_self_perms:
        access_policies = []
    else:
        permissions = Permissions(keys=[KeyPermissions.get,
                                        KeyPermissions.create,
                                        KeyPermissions.delete,
                                        KeyPermissions.list,
                                        KeyPermissions.update,
                                        KeyPermissions.import_enum,
                                        KeyPermissions.backup,
                                        KeyPermissions.restore,
                                        KeyPermissions.recover],
                                  secrets=[
                                      SecretPermissions.get,
                                      SecretPermissions.list,
                                      SecretPermissions.set,
                                      SecretPermissions.delete,
                                      SecretPermissions.backup,
                                      SecretPermissions.restore,
                                      SecretPermissions.recover],
                                  certificates=[
                                      CertificatePermissions.get,
                                      CertificatePermissions.list,
                                      CertificatePermissions.delete,
                                      CertificatePermissions.create,
                                      CertificatePermissions.import_enum,
                                      CertificatePermissions.update,
                                      CertificatePermissions.managecontacts,
                                      CertificatePermissions.getissuers,
                                      CertificatePermissions.listissuers,
                                      CertificatePermissions.setissuers,
                                      CertificatePermissions.deleteissuers,
                                      CertificatePermissions.manageissuers,
                                      CertificatePermissions.recover],
                                  storage=[
                                      StoragePermissions.get,
                                      StoragePermissions.list,
                                      StoragePermissions.delete,
                                      StoragePermissions.set,
                                      StoragePermissions.update,
                                      StoragePermissions.regeneratekey,
                                      StoragePermissions.setsas,
                                      StoragePermissions.listsas,
                                      StoragePermissions.getsas,
                                      StoragePermissions.deletesas])
        try:
            object_id = _get_current_user_object_id(graph_client)
        except GraphErrorException:
            object_id = _get_object_id(graph_client, subscription=subscription)
        if not object_id:
            raise CLIError('Cannot create vault.\nUnable to query active directory for information '
                           'about the current user.\nYou may try the --no-self-perms flag to '
                           'create a vault without permissions.')
        access_policies = [AccessPolicyEntry(tenant_id=tenant_id,
                                             object_id=object_id,
                                             permissions=permissions)]
    properties = VaultProperties(tenant_id=tenant_id,
                                 sku=Sku(name=sku),
                                 access_policies=access_policies,
                                 vault_uri=None,
                                 enabled_for_deployment=enabled_for_deployment,
                                 enabled_for_disk_encryption=enabled_for_disk_encryption,
                                 enabled_for_template_deployment=enabled_for_template_deployment,
                                 enable_soft_delete=enable_soft_delete)
    parameters = VaultCreateOrUpdateParameters(location=location,
                                               tags=tags,
                                               properties=properties)
    return client.create_or_update(resource_group_name=resource_group_name,
                                   vault_name=vault_name,
                                   parameters=parameters)
Esempio n. 14
0
def resume_protection(cmd, client, resource_group_name, vault_name, item, policy):
    if item.properties.protection_state != "ProtectionStopped":
        raise CLIError("Azure Virtual Machine is already protected")
    return update_policy_for_item(cmd, client, resource_group_name, vault_name, item, policy)
Esempio n. 15
0
def validate_managed_instance_storage_size(namespace):
    # Validate if entered storage size value is an increment of 32 if provided
    if (not namespace.storage_size_in_gb) or (namespace.storage_size_in_gb and namespace.storage_size_in_gb % 32 == 0):
        pass
    else:
        raise CLIError('incorrect usage: --storage must be specified in increments of 32 GB')
Esempio n. 16
0
def encrypt_vm(
        resource_group_name,
        vm_name,  # pylint: disable=too-many-locals, too-many-statements
        aad_client_id,
        disk_encryption_keyvault,
        aad_client_secret=None,
        aad_client_cert_thumbprint=None,
        key_encryption_keyvault=None,
        key_encryption_key=None,
        key_encryption_algorithm='RSA-OAEP',
        volume_type=None,
        encrypt_format_all=False):
    '''
    Enable disk encryption on OS disk, Data disks, or both
    :param str aad_client_id: Client ID of AAD app with permissions to write secrets to KeyVault
    :param str aad_client_secret: Client Secret of AAD app with permissions to
    write secrets to KeyVault
    :param str aad_client_cert_thumbprint: Thumbprint of AAD app certificate with permissions
    to write secrets to KeyVault
    :param str disk_encryption_keyvault:the KeyVault where generated encryption key will be placed
    :param str key_encryption_key: KeyVault key name or URL used to encrypt the disk encryption key
    :param str key_encryption_keyvault: the KeyVault containing the key encryption key
    used to encrypt the disk encryption key. If missing, CLI will use --disk-encryption-keyvault
    '''
    # pylint: disable=no-member
    compute_client = _compute_client_factory()
    vm = compute_client.virtual_machines.get(resource_group_name, vm_name)
    os_type = vm.storage_profile.os_disk.os_type.value
    is_linux = _is_linux_vm(os_type)
    extension = vm_extension_info[os_type]
    backup_encryption_settings = vm.storage_profile.os_disk.encryption_settings
    vm_encrypted = backup_encryption_settings.enabled if backup_encryption_settings else False

    # 1. First validate arguments

    if not aad_client_cert_thumbprint and not aad_client_secret:
        raise CLIError(
            'Please provide either --aad-client-id or --aad-client-cert-thumbprint'
        )

    if volume_type is None:
        if vm.storage_profile.data_disks:
            raise CLIError('VM has data disks, please supply --volume-type')
        else:
            volume_type = 'OS'

    # encryption is not supported on all linux distros, but service never tells you
    # so let us verify at the client side
    if is_linux:
        image_reference = getattr(vm.storage_profile, 'image_reference', None)
        if image_reference:
            result, message = _check_encrypt_is_supported(
                image_reference, volume_type)
            if not result:
                logger.warning(message)

    # sequence_version should be unique
    sequence_version = uuid.uuid4()

    # retrieve keyvault details
    disk_encryption_keyvault_url = get_key_vault_base_url(
        (parse_resource_id(disk_encryption_keyvault))['name'])

    # disk encryption key itself can be further protected, so let us verify
    if key_encryption_key:
        key_encryption_keyvault = key_encryption_keyvault or disk_encryption_keyvault
        if '://' not in key_encryption_key:  # appears a key name
            key_encryption_key = _get_keyvault_key_url(
                (parse_resource_id(key_encryption_keyvault))['name'],
                key_encryption_key)

    # 2. we are ready to provision/update the disk encryption extensions
    # The following logic was mostly ported from xplat-cli
    public_config = {
        'AADClientID': aad_client_id,
        'AADClientCertThumbprint': aad_client_cert_thumbprint,
        'KeyVaultURL': disk_encryption_keyvault_url,
        'VolumeType': volume_type,
        'EncryptionOperation': 'EnableEncryption'
        if not encrypt_format_all else 'EnableEncryptionFormatAll',
        'KeyEncryptionKeyURL': key_encryption_key,
        'KeyEncryptionAlgorithm': key_encryption_algorithm,
        'SequenceVersion': sequence_version,
    }
    private_config = {
        'AADClientSecret':
        aad_client_secret if is_linux else (aad_client_secret or '')
    }

    from azure.mgmt.compute.models import (VirtualMachineExtension,
                                           DiskEncryptionSettings,
                                           KeyVaultSecretReference,
                                           KeyVaultKeyReference, SubResource)

    ext = VirtualMachineExtension(
        vm.location,  # pylint: disable=no-member
        publisher=extension['publisher'],
        virtual_machine_extension_type=extension['name'],
        protected_settings=private_config,
        type_handler_version=extension['version'],
        settings=public_config,
        auto_upgrade_minor_version=True)

    poller = compute_client.virtual_machine_extensions.create_or_update(
        resource_group_name, vm_name, extension['name'], ext)
    poller.result()

    # verify the extension was ok
    extension_result = compute_client.virtual_machine_extensions.get(
        resource_group_name, vm_name, extension['name'], 'instanceView')
    if extension_result.provisioning_state != 'Succeeded':
        raise CLIError(
            'Extension needed for disk encryption was not provisioned correctly'
        )
    if not (extension_result.instance_view.statuses
            and extension_result.instance_view.statuses[0].message):
        raise CLIError(
            'Could not found url pointing to the secret for disk encryption')

    # 3. update VM's storage profile with the secrets
    status_url = extension_result.instance_view.statuses[0].message

    vm = compute_client.virtual_machines.get(resource_group_name, vm_name)
    secret_ref = KeyVaultSecretReference(
        secret_url=status_url,
        source_vault=SubResource(disk_encryption_keyvault))

    key_encryption_key_obj = None
    if key_encryption_key:
        key_encryption_key_obj = KeyVaultKeyReference(
            key_encryption_key, SubResource(key_encryption_keyvault))

    disk_encryption_settings = DiskEncryptionSettings(
        disk_encryption_key=secret_ref,
        key_encryption_key=key_encryption_key_obj,
        enabled=True)
    if vm_encrypted:
        # stop the vm before update if the vm is already encrypted
        logger.warning(
            "Deallocating the VM before updating encryption settings...")
        compute_client.virtual_machines.deallocate(resource_group_name,
                                                   vm_name).result()
        vm = compute_client.virtual_machines.get(resource_group_name, vm_name)

    vm.storage_profile.os_disk.encryption_settings = disk_encryption_settings
    set_vm(vm)

    if vm_encrypted:
        # and start after the update
        logger.warning("Restarting the VM after the update...")
        compute_client.virtual_machines.start(resource_group_name,
                                              vm_name).result()

    if is_linux and volume_type != _DATA_VOLUME_TYPE:
        # TODO: expose a 'wait' command to do the monitor and handle the reboot
        logger.warning(
            "The encryption request was accepted. Please use 'show' command to monitor "
            "the progress. If you see 'VMRestartPending', please restart the VM, and "
            "the encryption will finish shortly")
Esempio n. 17
0
def _add_whl_ext(source, ext_sha256=None, pip_extra_index_urls=None, pip_proxy=None):  # pylint: disable=too-many-statements
    if not source.endswith('.whl'):
        raise ValueError('Unknown extension type. Only Python wheels are supported.')
    url_parse_result = urlparse(source)
    is_url = (url_parse_result.scheme == 'http' or url_parse_result.scheme == 'https')
    logger.debug('Extension source is url? %s', is_url)
    whl_filename = os.path.basename(url_parse_result.path) if is_url else os.path.basename(source)
    parsed_filename = WHEEL_INFO_RE(whl_filename)
    # Extension names can have - but .whl format changes it to _ (PEP 0427). Undo this.
    extension_name = parsed_filename.groupdict().get('name').replace('_', '-') if parsed_filename else None
    if not extension_name:
        raise CLIError('Unable to determine extension name from {}. Is the file name correct?'.format(source))
    if extension_exists(extension_name):
        raise CLIError('The extension {} already exists.'.format(extension_name))
    ext_file = None
    if is_url:
        # Download from URL
        tmp_dir = tempfile.mkdtemp()
        ext_file = os.path.join(tmp_dir, whl_filename)
        logger.debug('Downloading %s to %s', source, ext_file)
        try:
            _whl_download_from_url(url_parse_result, ext_file)
        except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError) as err:
            raise CLIError('Please ensure you have network connection. Error detail: {}'.format(str(err)))
        logger.debug('Downloaded to %s', ext_file)
    else:
        # Get file path
        ext_file = os.path.realpath(os.path.expanduser(source))
        if not os.path.isfile(ext_file):
            raise CLIError("File {} not found.".format(source))
    # Validate the extension
    logger.debug('Validating the extension %s', ext_file)
    if ext_sha256:
        valid_checksum, computed_checksum = is_valid_sha256sum(ext_file, ext_sha256)
        if valid_checksum:
            logger.debug("Checksum of %s is OK", ext_file)
        else:
            logger.debug("Invalid checksum for %s. Expected '%s', computed '%s'.",
                         ext_file, ext_sha256, computed_checksum)
            raise CLIError("The checksum of the extension does not match the expected value. "
                           "Use --debug for more information.")
    try:
        _validate_whl_extension(ext_file)
    except AssertionError:
        logger.debug(traceback.format_exc())
        raise CLIError('The extension is invalid. Use --debug for more information.')
    except CLIError as e:
        raise e
    logger.debug('Validation successful on %s', ext_file)
    # Install with pip
    extension_path = get_extension_path(extension_name)
    pip_args = ['install', '--target', extension_path, ext_file]

    if pip_proxy:
        pip_args = pip_args + ['--proxy', pip_proxy]
    if pip_extra_index_urls:
        for extra_index_url in pip_extra_index_urls:
            pip_args = pip_args + ['--extra-index-url', extra_index_url]

    logger.debug('Executing pip with args: %s', pip_args)
    with HomebrewPipPatch():
        pip_status_code = _run_pip(pip_args)
    if pip_status_code > 0:
        logger.debug('Pip failed so deleting anything we might have installed at %s', extension_path)
        shutil.rmtree(extension_path, ignore_errors=True)
        raise CLIError('An error occurred. Pip failed with status code {}. '
                       'Use --debug for more information.'.format(pip_status_code))
    # Save the whl we used to install the extension in the extension dir.
    dst = os.path.join(extension_path, whl_filename)
    shutil.copyfile(ext_file, dst)
    logger.debug('Saved the whl to %s', dst)
Esempio n. 18
0
 def _report_client_side_validation_error(msg):
     if force:
         logger.warning(msg)
     else:
         raise CLIError(msg)
def validate_acr(namespace):
    if namespace.attach_acr and namespace.detach_acr:
        raise CLIError('Cannot specify "--attach-acr" and "--detach-acr" at the same time.')
Esempio n. 20
0
def acs_create(resource_group_name, deployment_name, name, ssh_key_value, dns_name_prefix=None,  # pylint: disable=too-many-locals
               admin_username="******", agent_count=3,
               agent_vm_size="Standard_D2_v2", location=None, master_count=1,
               orchestrator_type="dcos", service_principal=None, client_secret=None, tags=None,
               windows=False, admin_password="", generate_ssh_keys=False,  # pylint: disable=unused-argument
               validate=False, no_wait=False):
    """Create a new Acs.
    :param resource_group_name: The name of the resource group. The name
     is case insensitive.
    :type resource_group_name: str
    :param deployment_name: The name of the deployment.
    :type deployment_name: str
    :param dns_name_prefix: Sets the Domain name prefix for the cluster.
     The concatenation of the domain name and the regionalized DNS zone
     make up the fully qualified domain name associated with the public
     IP address.
    :type dns_name_prefix: str
    :param name: Resource name for the container service.
    :type name: str
    :param ssh_key_value: Configure all linux machines with the SSH RSA
     public key string.  Your key should include three parts, for example
    'ssh-rsa AAAAB...snip...UcyupgH azureuser@linuxvm
    :type ssh_key_value: str
    :param content_version: If included it must match the ContentVersion
     in the template.
    :type content_version: str
    :param admin_username: User name for the Linux Virtual Machines.
    :type admin_username: str
    :param agent_count: The number of agents for the cluster.  Note, for
     DC/OS clusters you will also get 1 or 2 public agents in addition to
     these selected masters.
    :type agent_count: int
    :param agent_vm_size: The size of the Virtual Machine.
    :type agent_vm_size: str
    :param location: Location for VM resources.
    :type location: str
    :param master_count: The number of masters for the cluster.
    :type master_count: int
    :param orchestrator_type: The type of orchestrator used to manage the
     applications on the cluster. Possible values include: 'dcos', 'swarm'
    :type orchestrator_type: str or :class:`orchestratorType
     <Default.models.orchestratorType>`
    :param service_principal: The service principal used for cluster authentication
     to Azure APIs. If not specified, it is created for you and stored in the
     ${HOME}/.azure directory.
    :type service_principal: str
    :param client_secret: The secret associated with the service principal. If
     --service-principal is specified, then secret should also be specified. If
     --service-principal is not specified, the secret is auto-generated for you
     and stored in ${HOME}/.azure/ directory.
    :param tags: Tags object.
    :type tags: object
    :param windows: If true, the cluster will be built for running Windows container.
    :type windows: bool
    :param admin_password: The adminstration password for Windows nodes. Only available if --windows=true
    :type admin_password: str
    :param bool raw: returns the direct response alongside the
     deserialized response
    :rtype:
    :class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
     instance that returns :class:`DeploymentExtended
     <Default.models.DeploymentExtended>`
    :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
     if raw=true
    :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
    """
    if ssh_key_value is not None and not _is_valid_ssh_rsa_public_key(ssh_key_value):
        raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(ssh_key_value))

    subscription_id = _get_subscription_id()
    if not dns_name_prefix:
        # Use subscription id to provide uniqueness and prevent DNS name clashes
        dns_name_prefix = '{}-{}-{}'.format(name, resource_group_name, subscription_id[0:6])

    register_providers()
    groups = _resource_client_factory().resource_groups
    # Just do the get, we don't need the result, it will error out if the group doesn't exist.
    rg = groups.get(resource_group_name)

    if orchestrator_type == 'Kubernetes' or orchestrator_type == 'kubernetes':
        # TODO: This really needs to be broken out and unit tested.
        client = _graph_client_factory()
        if not service_principal:
            # --service-principal not specified, try to load it from local disk
            principalObj = load_acs_service_principal(subscription_id)
            if principalObj:
                service_principal = principalObj.get('service_principal')
                client_secret = principalObj.get('client_secret')
                _validate_service_principal(client, service_principal)
            else:
                # Nothing to load, make one.
                if not client_secret:
                    client_secret = binascii.b2a_hex(os.urandom(10)).decode('utf-8')
                salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
                url = 'http://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)

                service_principal = _build_service_principal(client, name, url, client_secret)
                logger.info('Created a service principal: %s', service_principal)
                store_acs_service_principal(subscription_id, client_secret, service_principal)
            # Either way, update the role assignment, this fixes things if we fail part-way through
            if not _add_role_assignment('Contributor', service_principal):
                raise CLIError(
                    'Could not create a service principal with the right permissions. Are you an Owner on this project?')
        else:
            # --service-principal specfied, validate --client-secret was too
            if not client_secret:
                raise CLIError('--client-secret is required if --service-principal is specified')
            _validate_service_principal(client, service_principal)

        return _create_kubernetes(resource_group_name, deployment_name, dns_name_prefix, name,
                                  ssh_key_value, admin_username=admin_username,
                                  agent_count=agent_count, agent_vm_size=agent_vm_size,
                                  location=location, service_principal=service_principal,
                                  client_secret=client_secret, master_count=master_count,
                                  windows=windows, admin_password=admin_password,
                                  validate=validate, no_wait=no_wait, tags=tags)

    if windows:
        raise CLIError('--windows is only supported for Kubernetes clusters')
    if location is None:
        location = rg.location  # pylint:disable=no-member
    return _create_non_kubernetes(resource_group_name, deployment_name, dns_name_prefix, name,
                                  ssh_key_value, admin_username, agent_count, agent_vm_size, location,
                                  orchestrator_type, master_count, tags, validate, no_wait)
def _validate_subnet_id(subnet_id, name):
    if subnet_id is None or subnet_id == '':
        return
    from msrestazure.tools import is_valid_resource_id
    if not is_valid_resource_id(subnet_id):
        raise CLIError(name + " is not a valid Azure resource ID.")
Esempio n. 22
0
def _create_kubernetes(resource_group_name, deployment_name, dns_name_prefix, name, ssh_key_value,
                       admin_username="******", agent_count=3, agent_vm_size="Standard_D2_v2",
                       location=None, service_principal=None, client_secret=None, master_count=1,
                       windows=False, admin_password='', validate=False, no_wait=False, tags=None):
    if not location:
        location = '[resourceGroup().location]'
    windows_profile = None
    os_type = 'Linux'
    if windows:
        if len(admin_password) == 0:
            raise CLIError('--admin-password is required.')
        if len(admin_password) < 6:
            raise CLIError('--admin-password must be at least 6 characters')
        windows_profile = {
            "adminUsername": admin_username,
            "adminPassword": admin_password,
        }
        os_type = 'Windows'

    template = {
        "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
        "contentVersion": "1.0.0.0",
        "parameters": {
            "clientSecret": {
                "type": "secureString",
                "metadata": {
                    "description": "The client secret for the service principal"
                }
            }
        },
        "resources": [
            {
                "apiVersion": "2017-01-31",
                "location": location,
                "type": "Microsoft.ContainerService/containerServices",
                "name": name,
                "tags": tags,
                "properties": {
                    "orchestratorProfile": {
                        "orchestratorType": "kubernetes"
                    },
                    "masterProfile": {
                        "count": master_count,
                        "dnsPrefix": dns_name_prefix
                    },
                    "agentPoolProfiles": [
                        {
                            "name": "agentpools",
                            "count": agent_count,
                            "vmSize": agent_vm_size,
                            "dnsPrefix": dns_name_prefix + '-k8s-agents',
                            "osType": os_type,
                        }
                    ],
                    "linuxProfile": {
                        "ssh": {
                            "publicKeys": [
                                {
                                    "keyData": ssh_key_value
                                }
                            ]
                        },
                        "adminUsername": admin_username
                    },
                    "windowsProfile": windows_profile,
                    "servicePrincipalProfile": {
                        "ClientId": service_principal,
                        "Secret": "[parameters('clientSecret')]"
                    }
                }
            }
        ]
    }
    params = {
        "clientSecret": {
            "value": client_secret
        }
    }

    return _invoke_deployment(resource_group_name, deployment_name, template, params, validate, no_wait)
def _validate_cluster_autoscaler_key(key):
    if not key:
        raise CLIError('Empty key specified for cluster-autoscaler-profile')
    valid_keys = list(k.replace("_", "-") for k, v in ManagedClusterPropertiesAutoScalerProfile._attribute_map.items())  # pylint: disable=protected-access
    if key not in valid_keys:
        raise CLIError('Invalid key specified for cluster-autoscaler-profile: %s' % key)
Esempio n. 24
0
def enable_for_AzureFileShare(cmd, client, resource_group_name, vault_name,
                              afs_name, storage_account_name, policy_name):

    # get registered storage accounts
    storage_account = None
    containers_client = backup_protection_containers_cf(cmd.cli_ctx)
    registered_containers = common.list_containers(containers_client,
                                                   resource_group_name,
                                                   vault_name, "AzureStorage")
    storage_account = _get_storage_account_from_list(registered_containers,
                                                     storage_account_name)

    # get unregistered storage accounts
    if storage_account is None:
        unregistered_containers = list_protectable_containers(
            cmd.cli_ctx, resource_group_name, vault_name)
        storage_account = _get_storage_account_from_list(
            unregistered_containers, storage_account_name)

        if storage_account is None:
            # refresh containers in the vault
            protection_containers_client = protection_containers_cf(
                cmd.cli_ctx)
            filter_string = helper.get_filter_string(
                {'backupManagementType': "AzureStorage"})

            refresh_result = protection_containers_client.refresh(
                vault_name,
                resource_group_name,
                fabric_name,
                filter=filter_string,
                raw=True)
            helper.track_refresh_operation(cmd.cli_ctx, refresh_result,
                                           vault_name, resource_group_name)

            # refetch the protectable containers after refresh
            unregistered_containers = list_protectable_containers(
                cmd.cli_ctx, resource_group_name, vault_name)
            storage_account = _get_storage_account_from_list(
                unregistered_containers, storage_account_name)

            if storage_account is None:
                raise CLIError("Storage account not found or not supported.")

        # register storage account
        protection_containers_client = protection_containers_cf(cmd.cli_ctx)
        properties = AzureStorageContainer(
            backup_management_type="AzureStorage",
            source_resource_id=storage_account.properties.container_id,
            workload_type="AzureFileShare")
        param = ProtectionContainerResource(properties=properties)
        result = protection_containers_client.register(vault_name,
                                                       resource_group_name,
                                                       fabric_name,
                                                       storage_account.name,
                                                       param,
                                                       raw=True)
        helper.track_register_operation(cmd.cli_ctx, result, vault_name,
                                        resource_group_name,
                                        storage_account.name)

    policy = common.show_policy(protection_policies_cf(cmd.cli_ctx),
                                resource_group_name, vault_name, policy_name)
    helper.validate_policy(policy)

    protectable_item = _get_protectable_item_for_afs(cmd.cli_ctx, vault_name,
                                                     resource_group_name,
                                                     afs_name, storage_account)
    helper.validate_azurefileshare_item(protectable_item)

    container_uri = helper.get_protection_container_uri_from_id(
        protectable_item.id)
    item_uri = helper.get_protectable_item_uri_from_id(protectable_item.id)
    item_properties = AzureFileshareProtectedItem()

    item_properties.policy_id = policy.id
    item_properties.source_resource_id = protectable_item.properties.parent_container_fabric_id
    item = ProtectedItemResource(properties=item_properties)

    result = client.create_or_update(vault_name,
                                     resource_group_name,
                                     fabric_name,
                                     container_uri,
                                     item_uri,
                                     item,
                                     raw=True)
    return helper.track_backup_job(cmd.cli_ctx, result, vault_name,
                                   resource_group_name)
def validate_rg_loc_sub(s_subscription, s_location, subscription, location):
    if not ((s_location == location) and (s_subscription == subscription)):
        raise CLIError(
            "Incorrect Usage : The location and subscription of the server,Vnet and Subnet should be same."
        )
Esempio n. 26
0
def validate_update_policy_request(existing_policy, new_policy):
    existing_backup_management_type = existing_policy.properties.backup_management_type
    new_backup_management_type = new_policy.properties.backup_management_type
    if existing_backup_management_type != new_backup_management_type:
        raise CLIError(
            "BackupManagementType cannot be different than the existing type.")
Esempio n. 27
0
def _parse_image_argument(namespace):
    """ Systematically determines what type is supplied for the --image parameter. Updates the
        namespace and returns the type for subsequent processing. """
    # 1 - easy check for URI
    if namespace.image.lower().endswith('.vhd'):
        return 'uri'

    # 2 - attempt to match an URN alias (most likely)
    from azure.cli.command_modules.vm._actions import load_images_from_aliases_doc
    images = load_images_from_aliases_doc()
    matched = next((x for x in images if x['urnAlias'].lower() == namespace.image.lower()), None)
    if matched:
        namespace.os_publisher = matched['publisher']
        namespace.os_offer = matched['offer']
        namespace.os_sku = matched['sku']
        namespace.os_version = matched['version']
        return 'urn'

    # 3 - attempt to match an URN pattern
    urn_match = re.match('([^:]*):([^:]*):([^:]*):([^:]*)', namespace.image)
    if urn_match:
        namespace.os_publisher = urn_match.group(1)
        namespace.os_offer = urn_match.group(2)
        namespace.os_sku = urn_match.group(3)
        namespace.os_version = urn_match.group(4)
        compute_client = _compute_client_factory()
        if namespace.os_version.lower() == 'latest':
            top_one = compute_client.virtual_machine_images.list(namespace.location,
                                                                 namespace.os_publisher,
                                                                 namespace.os_offer,
                                                                 namespace.os_sku,
                                                                 top=1,
                                                                 orderby='name desc')
            if not top_one:
                raise CLIError("Can't resolve the vesion of '{}'".format(namespace.image))

            image_version = top_one[0].name
        else:
            image_version = namespace.os_version

        image = compute_client.virtual_machine_images.get(namespace.location,
                                                          namespace.os_publisher,
                                                          namespace.os_offer,
                                                          namespace.os_sku,
                                                          image_version)

        # pylint: disable=no-member
        if image.plan:
            namespace.plan_name = image.plan.name
            namespace.plan_product = image.plan.product
            namespace.plan_publisher = image.plan.publisher
        return 'urn'

    # 4 - check if a fully-qualified ID (assumes it is an image ID)
    if is_valid_resource_id(namespace.image):
        return 'image_id'

    # 5 - check if an existing managed disk image resource
    compute_client = _compute_client_factory()
    try:
        compute_client.images.get(namespace.resource_group_name, namespace.image)
        namespace.image = _get_resource_id(namespace.image, namespace.resource_group_name,
                                           'images', 'Microsoft.Compute')
        return 'image_id'
    except CloudError:
        err = 'Invalid image "{}". Use a custom image name, id, or pick one from {}'
        raise CLIError(err.format(namespace.image, [x['urnAlias'] for x in images]))
Esempio n. 28
0
def is_range_valid(start_date, end_date):
    if start_date > end_date:
        raise CLIError("""Start date must be earlier than end date.""")
Esempio n. 29
0
def _validate_vm_create_storage_profile(namespace, for_scale_set=False):

    # use minimal parameters to resolve the expected storage profile
    if getattr(namespace, 'attach_os_disk', None) and not namespace.image:
        if namespace.use_unmanaged_disk:
            # STORAGE PROFILE #3
            namespace.storage_profile = StorageProfile.SASpecializedOSDisk
        else:
            # STORAGE PROFILE #6
            namespace.storage_profile = StorageProfile.ManagedSpecializedOSDisk
    elif namespace.image and not getattr(namespace, 'attach_os_disk', None):
        image_type = _parse_image_argument(namespace)
        if image_type == 'uri':
            # STORAGE PROFILE #2
            namespace.storage_profile = StorageProfile.SACustomImage
        elif image_type == 'image_id':
            # STORAGE PROFILE #5
            namespace.storage_profile = StorageProfile.ManagedCustomImage
        elif image_type == 'urn':
            if namespace.use_unmanaged_disk:
                # STORAGE PROFILE #1
                namespace.storage_profile = StorageProfile.SAPirImage
            else:
                # STORAGE PROFILE #4
                namespace.storage_profile = StorageProfile.ManagedPirImage
        else:
            raise CLIError('Unrecognized image type: {}'.format(image_type))
    else:
        # did not specify image XOR attach-os-disk
        raise CLIError('incorrect usage: --image IMAGE | --attach-os-disk DISK')

    # perform parameter validation for the specific storage profile
    # start with the required/forbidden parameters for VM
    if namespace.storage_profile == StorageProfile.ManagedPirImage:
        required = ['image']
        forbidden = ['os_type', 'attach_os_disk', 'storage_account',
                     'storage_container_name', 'use_unmanaged_disk']
        if for_scale_set:
            forbidden.append('os_disk_name')
        _validate_managed_disk_sku(namespace.storage_sku)

    elif namespace.storage_profile == StorageProfile.ManagedCustomImage:
        required = ['image']
        forbidden = ['os_type', 'attach_os_disk', 'storage_account',
                     'storage_container_name', 'use_unmanaged_disk']
        if for_scale_set:
            forbidden.append('os_disk_name')
        _validate_managed_disk_sku(namespace.storage_sku)

    elif namespace.storage_profile == StorageProfile.ManagedSpecializedOSDisk:
        required = ['os_type', 'attach_os_disk']
        forbidden = ['os_disk_name', 'os_caching', 'storage_account',
                     'storage_container_name', 'use_unmanaged_disk', 'storage_sku']
        _validate_managed_disk_sku(namespace.storage_sku)

    elif namespace.storage_profile == StorageProfile.SAPirImage:
        required = ['image', 'use_unmanaged_disk']
        forbidden = ['os_type', 'data_caching', 'attach_os_disk', 'data_disk_sizes_gb']

    elif namespace.storage_profile == StorageProfile.SACustomImage:
        required = ['image', 'os_type', 'use_unmanaged_disk']
        forbidden = ['attach_os_disk', 'data_caching', 'data_disk_sizes_gb']

    elif namespace.storage_profile == StorageProfile.SASpecializedOSDisk:
        required = ['os_type', 'attach_os_disk', 'use_unmanaged_disk']
        forbidden = ['os_disk_name', 'os_caching', 'data_caching', 'image', 'storage_account',
                     'storage_container_name', 'data_disk_sizes_gb', 'storage_sku']

    else:
        raise CLIError('Unrecognized storage profile: {}'.format(namespace.storage_profile))

    if for_scale_set:
        # VMSS lacks some parameters, so scrub these out
        props_to_remove = ['attach_os_disk', 'storage_account']
        for prop in props_to_remove:
            if prop in required:
                required.remove(prop)
            if prop in forbidden:
                forbidden.remove(prop)

    # set default storage SKU if not provided and using an image based OS
    if not namespace.storage_sku and namespace.storage_profile not in [StorageProfile.ManagedSpecializedOSDisk, StorageProfile.SASpecializedOSDisk]:  # pylint: disable=line-too-long
        namespace.storage_sku = 'Standard_LRS' if for_scale_set else 'Premium_LRS'

    # Now verify that the status of required and forbidden parameters
    _validate_storage_required_forbidden_parameters(namespace, required, forbidden)

    if namespace.storage_profile == StorageProfile.ManagedCustomImage:
        # extract additional information from a managed custom image
        res = parse_resource_id(namespace.image)
        compute_client = _compute_client_factory()
        image_info = compute_client.images.get(res['resource_group'], res['name'])
        # pylint: disable=no-member
        namespace.os_type = image_info.storage_profile.os_disk.os_type.value
        namespace.image_data_disks = image_info.storage_profile.data_disks

    elif namespace.storage_profile == StorageProfile.ManagedSpecializedOSDisk:
        # accept disk name or ID
        namespace.attach_os_disk = _get_resource_id(
            namespace.attach_os_disk, namespace.resource_group_name, 'disks', 'Microsoft.Compute')

    if not namespace.os_type:
        namespace.os_type = 'windows' if 'windows' in namespace.os_offer.lower() else 'linux'
Esempio n. 30
0
def enable_protection_for_vm(cmd, client, resource_group_name, vault_name, vm, policy_name, diskslist=None,
                             disk_list_setting=None, exclude_all_data_disks=None):
    vm_name, vm_rg = _get_resource_name_and_rg(resource_group_name, vm)
    vm = virtual_machines_cf(cmd.cli_ctx).get(vm_rg, vm_name)
    vault = vaults_cf(cmd.cli_ctx).get(resource_group_name, vault_name)
    policy = show_policy(protection_policies_cf(cmd.cli_ctx), resource_group_name, vault_name, policy_name)

    if vm.location.lower() != vault.location.lower():
        raise CLIError(
            """
            The VM should be in the same location as that of the Recovery Services vault to enable protection.
            """)

    if policy.properties.backup_management_type != BackupManagementType.azure_iaas_vm.value:
        raise CLIError(
            """
            The policy type should match with the workload being protected.
            Use the relevant get-default policy command and use it to protect the workload.
            """)

    # Get protectable item.
    protectable_item = _get_protectable_item_for_vm(cmd.cli_ctx, vault_name, resource_group_name, vm_name, vm_rg)
    if protectable_item is None:
        raise CLIError(
            """
            The specified Azure Virtual Machine Not Found. Possible causes are
               1. VM does not exist
               2. The VM name or the Service name needs to be case sensitive
               3. VM is already Protected with same or other Vault.
                  Please Unprotect VM first and then try to protect it again.

            Please contact Microsoft for further assistance.
            """)

    # Construct enable protection request object
    container_uri = _get_protection_container_uri_from_id(protectable_item.id)
    item_uri = _get_protectable_item_uri_from_id(protectable_item.id)
    vm_item_properties = _get_vm_item_properties_from_vm_type(vm.type)
    vm_item_properties.policy_id = policy.id
    vm_item_properties.source_resource_id = protectable_item.properties.virtual_machine_id

    if disk_list_setting is not None:
        if diskslist is None:
            raise CLIError("Please provide LUNs of disks that will be included or excluded.")
        is_inclusion_list = False
        if disk_list_setting == "include":
            is_inclusion_list = True
        disk_exclusion_properties = DiskExclusionProperties(disk_lun_list=diskslist,
                                                            is_inclusion_list=is_inclusion_list)
        extended_properties = ExtendedProperties(disk_exclusion_properties=disk_exclusion_properties)
        vm_item_properties.extended_properties = extended_properties
    elif exclude_all_data_disks:
        disk_exclusion_properties = DiskExclusionProperties(disk_lun_list=[],
                                                            is_inclusion_list=True)
        extended_properties = ExtendedProperties(disk_exclusion_properties=disk_exclusion_properties)
        vm_item_properties.extended_properties = extended_properties

    vm_item = ProtectedItemResource(properties=vm_item_properties)

    # Trigger enable protection and wait for completion
    result = client.create_or_update(vault_name, resource_group_name, fabric_name,
                                     container_uri, item_uri, vm_item, raw=True)
    return _track_backup_job(cmd.cli_ctx, result, vault_name, resource_group_name)