예제 #1
0
def validate_both_start_end_dates(namespace):
    """Validates the existence of both start and end dates in the parameter or neither"""
    if (namespace.start_date and not namespace.end_date) or (namespace.end_date and not namespace.start_date):
        raise CLIError("usage error: Both --start-date and --end-date need to be supplied or neither.")
예제 #2
0
def enable_protection_for_vm(cmd, client, resource_group_name, vault_name, vm, policy_name, diskslist=None,
                             disk_list_setting=None, exclude_all_data_disks=None):
    vm_name, vm_rg = _get_resource_name_and_rg(resource_group_name, vm)
    vm = virtual_machines_cf(cmd.cli_ctx).get(vm_rg, vm_name)
    vault = vaults_cf(cmd.cli_ctx).get(resource_group_name, vault_name)
    policy = show_policy(protection_policies_cf(cmd.cli_ctx), resource_group_name, vault_name, policy_name)

    if vm.location.lower() != vault.location.lower():
        raise CLIError(
            """
            The VM should be in the same location as that of the Recovery Services vault to enable protection.
            """)

    if policy.properties.backup_management_type != BackupManagementType.azure_iaas_vm.value:
        raise CLIError(
            """
            The policy type should match with the workload being protected.
            Use the relevant get-default policy command and use it to protect the workload.
            """)

    # Get protectable item.
    protectable_item = _get_protectable_item_for_vm(cmd.cli_ctx, vault_name, resource_group_name, vm_name, vm_rg)
    if protectable_item is None:
        raise CLIError(
            """
            The specified Azure Virtual Machine Not Found. Possible causes are
               1. VM does not exist
               2. The VM name or the Service name needs to be case sensitive
               3. VM is already Protected with same or other Vault.
                  Please Unprotect VM first and then try to protect it again.

            Please contact Microsoft for further assistance.
            """)

    # Construct enable protection request object
    container_uri = _get_protection_container_uri_from_id(protectable_item.id)
    item_uri = _get_protectable_item_uri_from_id(protectable_item.id)
    vm_item_properties = _get_vm_item_properties_from_vm_type(vm.type)
    vm_item_properties.policy_id = policy.id
    vm_item_properties.source_resource_id = protectable_item.properties.virtual_machine_id

    if disk_list_setting is not None:
        if diskslist is None:
            raise CLIError("Please provide LUNs of disks that will be included or excluded.")
        is_inclusion_list = False
        if disk_list_setting == "include":
            is_inclusion_list = True
        disk_exclusion_properties = DiskExclusionProperties(disk_lun_list=diskslist,
                                                            is_inclusion_list=is_inclusion_list)
        extended_properties = ExtendedProperties(disk_exclusion_properties=disk_exclusion_properties)
        vm_item_properties.extended_properties = extended_properties
    elif exclude_all_data_disks:
        disk_exclusion_properties = DiskExclusionProperties(disk_lun_list=[],
                                                            is_inclusion_list=True)
        extended_properties = ExtendedProperties(disk_exclusion_properties=disk_exclusion_properties)
        vm_item_properties.extended_properties = extended_properties

    vm_item = ProtectedItemResource(properties=vm_item_properties)

    # Trigger enable protection and wait for completion
    result = client.create_or_update(vault_name, resource_group_name, fabric_name,
                                     container_uri, item_uri, vm_item, raw=True)
    return _track_backup_job(cmd.cli_ctx, result, vault_name, resource_group_name)
예제 #3
0
def resume_protection(cmd, client, resource_group_name, vault_name, item, policy):
    if item.properties.protection_state != "ProtectionStopped":
        raise CLIError("Azure Virtual Machine is already protected")
    return update_policy_for_item(cmd, client, resource_group_name, vault_name, item, policy)
def flexible_server_update_custom_func(cmd,
                                       instance,
                                       sku_name=None,
                                       tier=None,
                                       storage_mb=None,
                                       backup_retention=None,
                                       administrator_login_password=None,
                                       ssl_enforcement=None,
                                       subnet_arm_resource_id=None,
                                       tags=None,
                                       auto_grow=None,
                                       assign_identity=False,
                                       ha_enabled=None,
                                       replication_role=None,
                                       maintenance_window=None,
                                       iops=None):
    # validator
    location = ''.join(instance.location.lower().split())
    sku_info, iops_info = get_mysql_list_skus_info(cmd, location)
    mysql_arguments_validator(tier,
                              sku_name,
                              storage_mb,
                              backup_retention,
                              sku_info,
                              instance=instance)

    from importlib import import_module

    server_module_path = instance.__module__
    module = import_module(
        server_module_path
    )  # replacement not needed for update in flex servers
    ServerForUpdate = getattr(module, 'ServerForUpdate')

    if storage_mb:
        instance.storage_profile.storage_mb = storage_mb * 1024

    sku_rank = {
        'Standard_B1s': 1,
        'Standard_B1ms': 2,
        'Standard_B2s': 3,
        'Standard_D2ds_v4': 4,
        'Standard_D4ds_v4': 5,
        'Standard_D8ds_v4': 6,
        'Standard_D16ds_v4': 7,
        'Standard_D32ds_v4': 8,
        'Standard_D48ds_v4': 9,
        'Standard_D64ds_v4': 10,
        'Standard_E2ds_v4': 11,
        'Standard_E4ds_v4': 12,
        'Standard_E8ds_v4': 13,
        'Standard_E16ds_v4': 14,
        'Standard_E32ds_v4': 15,
        'Standard_E48ds_v4': 16,
        'Standard_E64ds_v4': 17
    }
    if location == 'eastus2euap':
        sku_rank.update({
            'Standard_D2s_v3': 4,
            'Standard_D4s_v3': 5,
            'Standard_D8s_v3': 6,
            'Standard_D16s_v3': 7,
            'Standard_D32s_v3': 8,
            'Standard_D48s_v3': 9,
            'Standard_D64s_v3': 10,
            'Standard_E2s_v3': 11,
            'Standard_E4s_v3': 12,
            'Standard_E8s_v3': 13,
            'Standard_E16s_v3': 14,
            'Standard_E32s_v3': 15,
            'Standard_E48s_v3': 16,
            'Standard_E64s_v3': 17
        })

    if iops:
        if (tier is not None
                and sku_name is None) or (tier is None
                                          and sku_name is not None):
            raise CLIError(
                'Argument Error. If you pass --tier, --sku_name is a mandatory parameter and vice-versa.'
            )

        if tier is None and sku_name is None:
            iops = _determine_iops(instance.storage_profile.storage_mb // 1024,
                                   iops_info, iops, instance.sku.tier,
                                   instance.sku.name)

        else:
            new_sku_rank = sku_rank[sku_name]
            old_sku_rank = sku_rank[instance.sku.name]
            supplied_iops = iops
            max_allowed_iops_new_sku = iops_info[tier][sku_name]
            default_iops = 100
            free_iops = (instance.storage_profile.storage_mb // 1024) * 3

            # Downgrading SKU
            if new_sku_rank < old_sku_rank:
                if supplied_iops > max_allowed_iops_new_sku:
                    iops = max_allowed_iops_new_sku
                    logger.warning(
                        'The max IOPS for your sku is %s. Provisioning the server with %s...',
                        iops, iops)
                elif supplied_iops < default_iops:
                    if free_iops < default_iops:
                        iops = default_iops
                        logger.warning(
                            'The min IOPS is %s. Provisioning the server with %s...',
                            default_iops, default_iops)
                    else:
                        iops = min(max_allowed_iops_new_sku, free_iops)
                        logger.warning(
                            'Updating the server with %s free IOPS...', iops)
            else:  # Upgrading SKU
                if supplied_iops > max_allowed_iops_new_sku:
                    iops = max_allowed_iops_new_sku
                    logger.warning(
                        'The max IOPS for your sku is %s. Provisioning the server with %s...',
                        iops, iops)
                elif supplied_iops <= max_allowed_iops_new_sku:
                    iops = max(supplied_iops,
                               min(free_iops, max_allowed_iops_new_sku))
                    if iops != supplied_iops:
                        logger.warning(
                            'Updating the server with %s free IOPS...', iops)
                elif supplied_iops < default_iops:
                    if free_iops < default_iops:
                        iops = default_iops
                        logger.warning(
                            'The min IOPS is %s. Updating the server with %s...',
                            default_iops, default_iops)
                    else:
                        iops = min(max_allowed_iops_new_sku, free_iops)
                        logger.warning(
                            'Updating the server with %s free IOPS...', iops)
            instance.sku.name = sku_name
            instance.sku.tier = tier
        instance.storage_profile.storage_iops = iops

    # pylint: disable=too-many-boolean-expressions
    if (iops is None and tier is None
            and sku_name) or (iops is None and sku_name is None and tier):
        raise CLIError(
            'Argument Error. If you pass --tier, --sku_name is a mandatory parameter and vice-versa.'
        )

    if iops is None and sku_name and tier:
        new_sku_rank = sku_rank[sku_name]
        old_sku_rank = sku_rank[instance.sku.name]
        instance.sku.name = sku_name
        instance.sku.tier = tier
        max_allowed_iops_new_sku = iops_info[tier][sku_name]
        iops = instance.storage_profile.storage_iops

        if new_sku_rank < old_sku_rank:  # Downgrading
            if instance.storage_profile.storage_iops > max_allowed_iops_new_sku:
                iops = max_allowed_iops_new_sku
                logger.warning('Updating the server with max %s IOPS...', iops)
        else:  # Upgrading
            if instance.storage_profile.storage_iops < (
                    instance.storage_profile.storage_mb // 1024) * 3:
                iops = min(max_allowed_iops_new_sku,
                           (instance.storage_profile.storage_mb // 1024) * 3)
                logger.warning('Updating the server with free %s IOPS...',
                               iops)

        instance.storage_profile.storage_iops = iops

    if backup_retention:
        instance.storage_profile.backup_retention_days = backup_retention

    if auto_grow:
        instance.storage_profile.storage_autogrow = auto_grow

    if subnet_arm_resource_id:
        instance.delegated_subnet_arguments.subnet_arm_resource_id = subnet_arm_resource_id

    if maintenance_window:
        logger.warning(
            'If you are updating maintenancw window with other parameter, maintenance window will be updated first. Please update the other parameters later.'
        )
        # if disabled is pass in reset to default values
        if maintenance_window.lower() == "disabled":
            day_of_week = start_hour = start_minute = 0
            custom_window = "Disabled"
        else:
            day_of_week, start_hour, start_minute = parse_maintenance_window(
                maintenance_window)
            custom_window = "Enabled"

        # set values - if maintenance_window when is None when created then create a new object
        if instance.maintenance_window is None:
            instance.maintenance_window = mysql_flexibleservers.models.MaintenanceWindow(
                day_of_week=day_of_week,
                start_hour=start_hour,
                start_minute=start_minute,
                custom_window=custom_window)
        else:
            instance.maintenance_window.day_of_week = day_of_week
            instance.maintenance_window.start_hour = start_hour
            instance.maintenance_window.start_minute = start_minute
            instance.maintenance_window.custom_window = custom_window

        return ServerForUpdate(maintenance_window=instance.maintenance_window)

    params = ServerForUpdate(
        sku=instance.sku,
        storage_profile=instance.storage_profile,
        administrator_login_password=administrator_login_password,
        ssl_enforcement=ssl_enforcement,
        delegated_subnet_arguments=instance.delegated_subnet_arguments,
        tags=tags,
        ha_enabled=ha_enabled,
        replication_role=replication_role)

    if assign_identity:
        if server_module_path.find('mysql'):
            if instance.identity is None:
                instance.identity = mysql_flexibleservers.models.Identity()
            params.identity = instance.identity

    return params
예제 #5
0
def acs_create(resource_group_name, deployment_name, name, ssh_key_value, dns_name_prefix=None,  # pylint: disable=too-many-locals
               admin_username="******", agent_count="3",
               agent_vm_size="Standard_D2_v2", location=None, master_count="1",
               orchestrator_type="dcos", service_principal=None, client_secret=None, tags=None,
               windows=False, admin_password="", generate_ssh_keys=False,  # pylint: disable=unused-argument
               validate=False, no_wait=False):
    """Create a new Acs.
    :param resource_group_name: The name of the resource group. The name
     is case insensitive.
    :type resource_group_name: str
    :param deployment_name: The name of the deployment.
    :type deployment_name: str
    :param dns_name_prefix: Sets the Domain name prefix for the cluster.
     The concatenation of the domain name and the regionalized DNS zone
     make up the fully qualified domain name associated with the public
     IP address.
    :type dns_name_prefix: str
    :param name: Resource name for the container service.
    :type name: str
    :param ssh_key_value: Configure all linux machines with the SSH RSA
     public key string.  Your key should include three parts, for example
    'ssh-rsa AAAAB...snip...UcyupgH azureuser@linuxvm
    :type ssh_key_value: str
    :param content_version: If included it must match the ContentVersion
     in the template.
    :type content_version: str
    :param admin_username: User name for the Linux Virtual Machines.
    :type admin_username: str
    :param agent_count: The number of agents for the cluster.  Note, for
     DC/OS clusters you will also get 1 or 2 public agents in addition to
     these selected masters.
    :type agent_count: str
    :param agent_vm_size: The size of the Virtual Machine.
    :type agent_vm_size: str
    :param location: Location for VM resources.
    :type location: str
    :param master_count: The number of masters for the cluster.
    :type master_count: str
    :param orchestrator_type: The type of orchestrator used to manage the
     applications on the cluster. Possible values include: 'dcos', 'swarm'
    :type orchestrator_type: str or :class:`orchestratorType
     <Default.models.orchestratorType>`
    :param service_principal: The service principal used for cluster authentication
     to Azure APIs. If not specified, it is created for you and stored in the
     ${HOME}/.azure directory.
    :type service_principal: str
    :param client_secret: The secret associated with the service principal. If
     --service-principal is specified, then secret should also be specified. If
     --service-principal is not specified, the secret is auto-generated for you
     and stored in ${HOME}/.azure/ directory.
    :param tags: Tags object.
    :type tags: object
    :param windows: If true, the cluster will be built for running Windows container.
    :type windows: bool
    :param admin_password: The adminstration password for Windows nodes. Only available if --windows=true
    :type admin_password: str
    :param bool raw: returns the direct response alongside the
     deserialized response
    :rtype:
    :class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
     instance that returns :class:`DeploymentExtended
     <Default.models.DeploymentExtended>`
    :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
     if raw=true
    :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
    """
    if ssh_key_value is not None and not _is_valid_ssh_rsa_public_key(ssh_key_value):
        raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(ssh_key_value))

    subscription_id = _get_subscription_id()
    if not dns_name_prefix:
        # Use subscription id to provide uniqueness and prevent DNS name clashes
        dns_name_prefix = '{}-{}-{}'.format(name, resource_group_name, subscription_id[0:6])

    register_providers()
    groups = _resource_client_factory().resource_groups
    # Just do the get, we don't need the result, it will error out if the group doesn't exist.
    rg = groups.get(resource_group_name)

    if orchestrator_type == 'Kubernetes' or orchestrator_type == 'kubernetes':
        # TODO: This really needs to be broken out and unit tested.
        client = _graph_client_factory()
        if not service_principal:
            # --service-principal not specified, try to load it from local disk
            principalObj = load_acs_service_principal(subscription_id)
            if principalObj:
                service_principal = principalObj.get('service_principal')
                client_secret = principalObj.get('client_secret')
                _validate_service_principal(client, service_principal)
            else:
                # Nothing to load, make one.
                if not client_secret:
                    client_secret = binascii.b2a_hex(os.urandom(10)).decode('utf-8')
                salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
                url = 'http://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)

                service_principal = _build_service_principal(client, name, url, client_secret)
                logger.info('Created a service principal: %s', service_principal)
                store_acs_service_principal(subscription_id, client_secret, service_principal)
            # Either way, update the role assignment, this fixes things if we fail part-way through
            if not _add_role_assignment('Contributor', service_principal):
                raise CLIError(
                    'Could not create a service principal with the right permissions. Are you an Owner on this project?')
        else:
            # --service-principal specfied, validate --client-secret was too
            if not client_secret:
                raise CLIError('--client-secret is required if --service-principal is specified')
            _validate_service_principal(client, service_principal)

        return _create_kubernetes(resource_group_name, deployment_name, dns_name_prefix, name,
                                  ssh_key_value, admin_username=admin_username,
                                  agent_count=agent_count, agent_vm_size=agent_vm_size,
                                  location=location, service_principal=service_principal,
                                  client_secret=client_secret, master_count=master_count,
                                  windows=windows, admin_password=admin_password,
                                  validate=validate, no_wait=no_wait, tags=tags)

    if windows:
        raise CLIError('--windows is only supported for Kubernetes clusters')
    if location is None:
        location = rg.location  # pylint:disable=no-member
    return _create_non_kubernetes(resource_group_name, deployment_name, dns_name_prefix, name,
                                  ssh_key_value, admin_username, agent_count, agent_vm_size, location,
                                  orchestrator_type, master_count, tags, validate, no_wait)
예제 #6
0
def validate_acr(namespace):
    if namespace.attach_acr and namespace.detach_acr:
        raise CLIError(
            'Cannot specify "--attach-acr" and "--detach-acr" at the same time.'
        )
예제 #7
0
def validate_load_balancer_idle_timeout(namespace):
    """validate load balancer profile idle timeout"""
    if namespace.load_balancer_idle_timeout is not None:
        if namespace.load_balancer_idle_timeout < 4 or namespace.load_balancer_idle_timeout > 120:
            raise CLIError(
                "--load-balancer-idle-timeout must be in the range [4,120]")
예제 #8
0
def validate_vnet(cmd, namespace):
    if not namespace.vnet and not namespace.app_subnet and \
       not namespace.service_runtime_subnet and not namespace.reserved_cidr_range:
        return
    validate_vnet_required_parameters(namespace)

    vnet_id = ''
    if namespace.vnet:
        vnet_id = namespace.vnet
        # format the app_subnet and service_runtime_subnet
        if not is_valid_resource_id(vnet_id):
            if vnet_id.count('/') > 0:
                raise CLIError(
                    '--vnet {0} is not a valid name or resource ID'.format(
                        vnet_id))
            vnet_id = resource_id(subscription=get_subscription_id(
                cmd.cli_ctx),
                                  resource_group=namespace.resource_group,
                                  namespace='Microsoft.Network',
                                  type='virtualNetworks',
                                  name=vnet_id)
        else:
            vnet = parse_resource_id(vnet_id)
            if vnet['namespace'].lower() != 'microsoft.network' or vnet[
                    'type'].lower() != 'virtualnetworks':
                raise CLIError(
                    '--vnet {0} is not a valid VirtualNetwork resource ID'.
                    format(vnet_id))
        namespace.app_subnet = _construct_subnet_id(vnet_id,
                                                    namespace.app_subnet)
        namespace.service_runtime_subnet = _construct_subnet_id(
            vnet_id, namespace.service_runtime_subnet)
    else:
        app_vnet_id = _parse_vnet_id_from_subnet(namespace.app_subnet)
        service_runtime_vnet_id = _parse_vnet_id_from_subnet(
            namespace.service_runtime_subnet)
        if app_vnet_id.lower() != service_runtime_vnet_id.lower():
            raise CLIError(
                '--app-subnet and --service-runtime-subnet should be in the same Virtual Networks.'
            )
        vnet_id = app_vnet_id
    if namespace.app_subnet.lower() == namespace.service_runtime_subnet.lower(
    ):
        raise CLIError(
            '--app-subnet and --service-runtime-subnet should not be the same.'
        )

    vnet_obj = _get_vnet(cmd, vnet_id)
    instance_location = namespace.location
    if instance_location is None:
        instance_location = _get_rg_location(cmd.cli_ctx,
                                             namespace.resource_group)
    else:
        instance_location_slice = instance_location.split(" ")
        instance_location = "".join(
            [piece.lower() for piece in instance_location_slice])
    if vnet_obj.location.lower() != instance_location.lower():
        raise CLIError(
            '--vnet and Azure Spring Cloud instance should be in the same location.'
        )
    for subnet in vnet_obj.subnets:
        _validate_subnet(namespace, subnet)

    if namespace.reserved_cidr_range:
        _validate_cidr_range(namespace)
    else:
        namespace.reserved_cidr_range = _set_default_cidr_range(vnet_obj.address_space.address_prefixes) if \
            vnet_obj and vnet_obj.address_space and vnet_obj.address_space.address_prefixes \
            else '10.234.0.0/16,10.244.0.0/16,172.17.0.1/16'
예제 #9
0
def validate_sku(namespace):
    if namespace.sku is not None:
        namespace.sku = namespace.sku.upper()
        if namespace.sku not in ['BASIC', 'STANDARD']:
            raise CLIError(
                "The pricing tier only accepts value [Basic, Standard]")
예제 #10
0
def add_extension(
        cmd=None,
        source=None,
        extension_name=None,
        index_url=None,
        yes=None,  # pylint: disable=unused-argument, too-many-statements
        pip_extra_index_urls=None,
        pip_proxy=None,
        system=None,
        version=None,
        cli_ctx=None,
        upgrade=None):
    ext_sha256 = None

    version = None if version == 'latest' else version
    cmd_cli_ctx = cli_ctx or cmd.cli_ctx
    if extension_name:
        cmd_cli_ctx.get_progress_controller().add(message='Searching')
        ext = None
        try:
            ext = get_extension(extension_name)
        except ExtensionNotInstalledException:
            pass
        if ext:
            if isinstance(ext, WheelExtension):
                if not upgrade:
                    logger.warning("Extension '%s' is already installed.",
                                   extension_name)
                    return
                logger.warning("Extension '%s' %s is already installed.",
                               extension_name, ext.get_version())
                if version and version == ext.get_version():
                    return
                logger.warning(
                    "It will be overriden with version {}.".format(version)
                    if version else "It will be updated if available.")
                update_extension(cmd=cmd,
                                 extension_name=extension_name,
                                 index_url=index_url,
                                 pip_extra_index_urls=pip_extra_index_urls,
                                 pip_proxy=pip_proxy,
                                 cli_ctx=cli_ctx,
                                 version=version)
                return
            logger.warning(
                "Overriding development version of '%s' with production version.",
                extension_name)
        try:
            source, ext_sha256 = resolve_from_index(extension_name,
                                                    index_url=index_url,
                                                    target_version=version,
                                                    cli_ctx=cmd_cli_ctx)
        except NoExtensionCandidatesError as err:
            logger.debug(err)

            if version:
                err = "No matching extensions for '{} ({})'. Use --debug for more information.".format(
                    extension_name, version)
            else:
                err = "No matching extensions for '{}'. Use --debug for more information.".format(
                    extension_name)
            raise CLIError(err)

    extension_name = _add_whl_ext(cli_ctx=cmd_cli_ctx,
                                  source=source,
                                  ext_sha256=ext_sha256,
                                  pip_extra_index_urls=pip_extra_index_urls,
                                  pip_proxy=pip_proxy,
                                  system=system)
    try:
        ext = get_extension(extension_name)
        _augment_telemetry_with_ext_info(extension_name, ext)
        if extension_name and ext.experimental:
            logger.warning(
                "The installed extension '%s' is experimental and not covered by customer support. "
                "Please use with discretion.", extension_name)
        elif extension_name and ext.preview:
            logger.warning("The installed extension '%s' is in preview.",
                           extension_name)
        CommandIndex().invalidate()
    except ExtensionNotInstalledException:
        pass
예제 #11
0
def _add_whl_ext(cli_ctx,
                 source,
                 ext_sha256=None,
                 pip_extra_index_urls=None,
                 pip_proxy=None,
                 system=None):  # pylint: disable=too-many-statements
    cli_ctx.get_progress_controller().add(message='Analyzing')
    if not source.endswith('.whl'):
        raise ValueError(
            'Unknown extension type. Only Python wheels are supported.')
    url_parse_result = urlparse(source)
    is_url = (url_parse_result.scheme == 'http'
              or url_parse_result.scheme == 'https')
    logger.debug('Extension source is url? %s', is_url)
    whl_filename = os.path.basename(
        url_parse_result.path) if is_url else os.path.basename(source)
    parsed_filename = WHEEL_INFO_RE(whl_filename)
    # Extension names can have - but .whl format changes it to _ (PEP 0427). Undo this.
    extension_name = parsed_filename.groupdict().get('name').replace(
        '_', '-') if parsed_filename else None
    if not extension_name:
        raise CLIError(
            'Unable to determine extension name from {}. Is the file name correct?'
            .format(source))
    if extension_exists(extension_name, ext_type=WheelExtension):
        raise CLIError(
            'The extension {} already exists.'.format(extension_name))
    if extension_name == 'rdbms-connect':
        _install_deps_for_psycopg2()
    ext_file = None
    if is_url:
        # Download from URL
        tmp_dir = tempfile.mkdtemp()
        ext_file = os.path.join(tmp_dir, whl_filename)
        logger.debug('Downloading %s to %s', source, ext_file)
        import requests
        try:
            cli_ctx.get_progress_controller().add(message='Downloading')
            _whl_download_from_url(url_parse_result, ext_file)
        except (requests.exceptions.ConnectionError,
                requests.exceptions.HTTPError) as err:
            raise CLIError(
                'Please ensure you have network connection. Error detail: {}'.
                format(str(err)))
        logger.debug('Downloaded to %s', ext_file)
    else:
        # Get file path
        ext_file = os.path.realpath(os.path.expanduser(source))
        if not os.path.isfile(ext_file):
            raise CLIError("File {} not found.".format(source))
    # Validate the extension
    logger.debug('Validating the extension %s', ext_file)
    if ext_sha256:
        valid_checksum, computed_checksum = is_valid_sha256sum(
            ext_file, ext_sha256)
        if valid_checksum:
            logger.debug("Checksum of %s is OK", ext_file)
        else:
            logger.debug(
                "Invalid checksum for %s. Expected '%s', computed '%s'.",
                ext_file, ext_sha256, computed_checksum)
            raise CLIError(
                "The checksum of the extension does not match the expected value. "
                "Use --debug for more information.")
    try:
        cli_ctx.get_progress_controller().add(message='Validating')
        _validate_whl_extension(ext_file)
    except AssertionError:
        logger.debug(traceback.format_exc())
        raise CLIError(
            'The extension is invalid. Use --debug for more information.')
    except CLIError as e:
        raise e
    logger.debug('Validation successful on %s', ext_file)
    # Check for distro consistency
    check_distro_consistency()
    cli_ctx.get_progress_controller().add(message='Installing')
    # Install with pip
    extension_path = build_extension_path(extension_name, system)
    pip_args = ['install', '--target', extension_path, ext_file]

    if pip_proxy:
        pip_args = pip_args + ['--proxy', pip_proxy]
    if pip_extra_index_urls:
        for extra_index_url in pip_extra_index_urls:
            pip_args = pip_args + ['--extra-index-url', extra_index_url]

    logger.debug('Executing pip with args: %s', pip_args)
    with HomebrewPipPatch():
        pip_status_code = _run_pip(pip_args, extension_path)
    if pip_status_code > 0:
        logger.debug(
            'Pip failed so deleting anything we might have installed at %s',
            extension_path)
        shutil.rmtree(extension_path, ignore_errors=True)
        raise CLIError(
            'An error occurred. Pip failed with status code {}. '
            'Use --debug for more information.'.format(pip_status_code))
    # Save the whl we used to install the extension in the extension dir.
    dst = os.path.join(extension_path, whl_filename)
    shutil.copyfile(ext_file, dst)
    logger.debug('Saved the whl to %s', dst)

    return extension_name
예제 #12
0
def _install_deps_for_psycopg2():  # pylint: disable=too-many-statements
    # Below system dependencies are required to install the psycopg2 dependency for Linux and macOS
    import platform
    import subprocess
    from azure.cli.core.util import get_linux_distro
    from azure.cli.core._environment import _ENV_AZ_INSTALLER
    installer = os.getenv(_ENV_AZ_INSTALLER)
    system = platform.system()
    if system == 'Darwin':
        subprocess.call(['xcode-select', '--install'],
                        stdout=subprocess.DEVNULL,
                        stderr=subprocess.DEVNULL)
        if installer != 'HOMEBREW':
            from shutil import which
            if which('brew') is None:
                logger.warning(
                    'You may need to install postgresql with homebrew first before you install this extension.'
                )
                return
        exit_code = subprocess.call(['brew', 'list', 'postgresql'],
                                    stdout=subprocess.DEVNULL,
                                    stderr=subprocess.DEVNULL)
        if exit_code != 0:
            update_cmd = ['brew', 'install', 'postgresql']
            logger.warning(
                'This extension depends on postgresql and it will be installed first.'
            )
            logger.debug("Install dependencies with '%s'",
                         " ".join(update_cmd))
            subprocess.call(update_cmd)
        # Fix the issue of -lssl not found during building psycopg2
        if os.environ.get('LIBRARY_PATH') is None:
            os.environ['LIBRARY_PATH'] = '/usr/local/opt/openssl/lib/'
        else:
            os.environ['LIBRARY_PATH'] = os.pathsep.join([
                os.environ.get('LIBRARY_PATH'), '/usr/local/opt/openssl/lib/'
            ])
    elif system == 'Linux':
        distname, _ = get_linux_distro()
        distname = distname.lower().strip()
        if installer == 'DEB' or any(x in distname
                                     for x in ['ubuntu', 'debian']):
            from azure.cli.core.util import in_cloud_console
            if in_cloud_console():
                raise CLIError(
                    "This extension is not supported in Cloud Shell as you do not have permission to install extra dependencies."
                )
            exit_code = subprocess.call(
                ['dpkg', '-s', 'gcc', 'libpq-dev', 'python3-dev'],
                stdout=subprocess.DEVNULL,
                stderr=subprocess.DEVNULL)
            if exit_code != 0:
                logger.warning(
                    'This extension depends on gcc, libpq-dev, python3-dev and they will be installed first.'
                )
                apt_update_cmd = 'apt-get update'.split()
                apt_install_cmd = 'apt-get install -y gcc libpq-dev python3-dev'.split(
                )
                if os.geteuid() != 0:  # pylint: disable=no-member
                    apt_update_cmd.insert(0, 'sudo')
                    apt_install_cmd.insert(0, 'sudo')
                exit_code = subprocess.call(apt_update_cmd, True)
                if exit_code == 0:
                    logger.debug("Install dependencies with '%s'",
                                 " ".join(apt_install_cmd))
                    subprocess.call(apt_install_cmd, True)
        elif installer == 'RPM' or any(x in distname for x in [
                'centos', 'rhel', 'red hat', 'fedora', 'opensuse', 'suse',
                'sles'
        ]):
            if any(x in distname
                   for x in ['centos', 'rhel', 'red hat', 'fedora']):
                yum_install_cmd = 'yum install -y gcc postgresql-devel python3-devel'.split(
                )
                if os.geteuid() != 0:  # pylint: disable=no-member
                    yum_install_cmd.insert(0, 'sudo')
                logger.debug("Install dependencies with '%s'",
                             " ".join(yum_install_cmd))
                logger.warning(
                    'This extension depends on gcc, postgresql-devel, python3-devel and they will be installed first if not exist.'
                )
                subprocess.call(yum_install_cmd)
            elif any(x in distname for x in ['opensuse', 'suse', 'sles']):
                zypper_refresh_cmd = ['zypper', 'refresh']
                zypper_install_cmd = 'zypper install -y gcc postgresql-devel python3-devel'.split(
                )
                logger.warning(
                    'This extension depends on gcc postgresql-devel, python3-devel and they will be installed first if not exist.'
                )
                if os.geteuid() != 0:  # pylint: disable=no-member
                    zypper_refresh_cmd.insert(0, 'sudo')
                    zypper_install_cmd.insert(0, 'sudo')
                exit_code = subprocess.call(zypper_refresh_cmd)
                if exit_code == 0:
                    logger.debug("Install dependencies with '%s'",
                                 " ".join(zypper_install_cmd))
                    subprocess.call(zypper_install_cmd)
예제 #13
0
def validate_peering_type(namespace):
    if namespace.peering_type and namespace.peering_type == 'MicrosoftPeering':

        if not namespace.advertised_public_prefixes:
            raise CLIError(
                'missing required MicrosoftPeering parameter --advertised-public-prefixes')
예제 #14
0
def remove_extension(extension_name):
    try:
        get_extension(extension_name)
        shutil.rmtree(get_extension_path(extension_name))
    except ExtensionNotInstalledException as e:
        raise CLIError(e)
예제 #15
0
 def _report_client_side_validation_error(msg):
     if force:
         logger.warning(msg)
     else:
         raise CLIError(msg)
예제 #16
0
def validate_instance_count(namespace):
    if namespace.instance_count is not None:
        if namespace.instance_count < 1:
            raise CLIError("--instance-count must be greater than 0")
예제 #17
0
def validate_load_balancer_outbound_ip_prefixes(namespace):
    """validate load balancer profile outbound IP prefix ids"""
    if namespace.load_balancer_outbound_ip_prefixes is not None:
        ip_prefix_id_list = [x.strip() for x in namespace.load_balancer_outbound_ip_prefixes.split(',')]
        if not all(ip_prefix_id_list):
            raise CLIError("--load-balancer-outbound-ip-prefixes cannot contain whitespace")
예제 #18
0
def validate_resource_id(namespace):
    if not is_valid_resource_id(namespace.resource_id):
        raise CLIError("Invalid resource id {}".format(namespace.resource_id))
예제 #19
0
def validate_user(namespace):
    if namespace.user.lower() != "clusteruser" and \
            namespace.user.lower() != "clustermonitoringuser":
        raise CLIError(
            "--user can only be clusterUser or clusterMonitoringUser")
예제 #20
0
def set_active_subscription(subscription):
    """Set the current subscription"""
    if not id:
        raise CLIError('Please provide subscription id or unique name.')
    profile = Profile()
    profile.set_active_subscription(subscription)
예제 #21
0
def validate_create_parameters(namespace):
    if not namespace.name:
        raise CLIError('--name has no value')
    if namespace.dns_name_prefix is not None and not namespace.dns_name_prefix:
        raise CLIError('--dns-prefix has no value')
예제 #22
0
def _check_name_availability(iot_hub_resource, hub_name):
    name_availability = iot_hub_resource.check_name_availability(hub_name)
    if name_availability is not None and not name_availability.name_available:
        raise CLIError(name_availability.message)
def flexible_server_create(cmd,
                           client,
                           resource_group_name=None,
                           server_name=None,
                           sku_name=None,
                           tier=None,
                           location=None,
                           storage_mb=None,
                           administrator_login=None,
                           administrator_login_password=None,
                           version=None,
                           backup_retention=None,
                           tags=None,
                           public_access=None,
                           database_name=None,
                           subnet_arm_resource_id=None,
                           high_availability=None,
                           zone=None,
                           assign_identity=False,
                           vnet_resource_id=None,
                           vnet_address_prefix=None,
                           subnet_address_prefix=None,
                           iops=None):
    # validator
    if location is None:
        location = DEFAULT_LOCATION_MySQL
    sku_info, iops_info = get_mysql_list_skus_info(cmd, location)
    mysql_arguments_validator(tier,
                              sku_name,
                              storage_mb,
                              backup_retention,
                              sku_info,
                              version=version)

    db_context = DbContext(azure_sdk=mysql_flexibleservers,
                           cf_firewall=cf_mysql_flexible_firewall_rules,
                           cf_db=cf_mysql_flexible_db,
                           logging_name='MySQL',
                           command_group='mysql',
                           server_client=client)

    # Raise error when user passes values for both parameters
    if subnet_arm_resource_id is not None and public_access is not None:
        raise CLIError(
            "Incorrect usage : A combination of the parameters --subnet "
            "and --public_access is invalid. Use either one of them.")

    # When address space parameters are passed, the only valid combination is : --vnet, --subnet, --vnet-address-prefix, --subnet-address-prefix
    # pylint: disable=too-many-boolean-expressions
    if (vnet_address_prefix is not None) or (subnet_address_prefix
                                             is not None):
        if (((vnet_address_prefix is not None) and
             (subnet_address_prefix is None))
                or ((vnet_address_prefix is None) and
                    (subnet_address_prefix is not None))
                or ((vnet_address_prefix is not None) and
                    (subnet_address_prefix is not None) and
                    ((vnet_resource_id is None) or
                     (subnet_arm_resource_id is None)))):
            raise CLIError(
                "Incorrect usage : "
                "--vnet, --subnet, --vnet-address-prefix, --subnet-address-prefix must be supplied together."
            )

    server_result = firewall_id = subnet_id = None

    # Populate desired parameters
    location, resource_group_name, server_name = generate_missing_parameters(
        cmd, location, resource_group_name, server_name, 'mysql')
    server_name = server_name.lower()

    # Handle Vnet scenario
    if (subnet_arm_resource_id is not None) or (vnet_resource_id is not None):
        subnet_id = prepare_vnet(cmd, server_name, vnet_resource_id,
                                 subnet_arm_resource_id, resource_group_name,
                                 location, DELEGATION_SERVICE_NAME,
                                 vnet_address_prefix, subnet_address_prefix)
        delegated_subnet_arguments = mysql_flexibleservers.models.DelegatedSubnetArguments(
            subnet_arm_resource_id=subnet_id)
    elif public_access is None and subnet_arm_resource_id is None and vnet_resource_id is None:
        subnet_id = create_vnet(cmd, server_name, location,
                                resource_group_name, DELEGATION_SERVICE_NAME)
        delegated_subnet_arguments = mysql_flexibleservers.models.DelegatedSubnetArguments(
            subnet_arm_resource_id=subnet_id)
    else:
        delegated_subnet_arguments = None

    # calculate IOPS
    iops = _determine_iops(storage_mb, iops_info, iops, tier, sku_name)

    storage_mb *= 1024  # storage input comes in GiB value
    administrator_login_password = generate_password(
        administrator_login_password)
    if server_result is None:
        # Create mysql server
        # Note : passing public_access has no effect as the accepted values are 'Enabled' and 'Disabled'. So the value ends up being ignored.
        server_result = _create_server(
            db_context, cmd, resource_group_name, server_name, location,
            backup_retention, sku_name, tier, storage_mb, administrator_login,
            administrator_login_password, version, tags,
            delegated_subnet_arguments, assign_identity, public_access,
            high_availability, zone, iops)

        # Adding firewall rule
        if public_access is not None and str(public_access).lower() != 'none':
            if str(public_access).lower() == 'all':
                start_ip, end_ip = '0.0.0.0', '255.255.255.255'
            else:
                start_ip, end_ip = parse_public_access_input(public_access)
            firewall_id = create_firewall_rule(db_context, cmd,
                                               resource_group_name,
                                               server_name, start_ip, end_ip)

        # Create mysql database if it does not exist
        if database_name is None:
            database_name = DEFAULT_DB_NAME
        _create_database(db_context, cmd, resource_group_name, server_name,
                         database_name)

    user = server_result.administrator_login
    server_id = server_result.id
    loc = server_result.location
    version = server_result.version
    sku = server_result.sku.name
    host = server_result.fully_qualified_domain_name

    logger.warning(
        'Make a note of your password. If you forget, you would have to reset your password with'
        '\'az mysql flexible-server update -n %s -g %s -p <new-password>\'.',
        server_name, resource_group_name)

    _update_local_contexts(cmd, server_name, resource_group_name, location,
                           user)

    return _form_response(
        user, sku, loc, server_id, host, version, administrator_login_password
        if administrator_login_password is not None else '*****',
        _create_mysql_connection_string(host, database_name, user,
                                        administrator_login_password),
        database_name, firewall_id, subnet_id)
예제 #24
0
def set_cloud(cloud_name):
    try:
        switch_active_cloud(cloud_name)
    except CloudNotRegisteredException as e:
        raise CLIError(e)
예제 #25
0
def _create_kubernetes(resource_group_name, deployment_name, dns_name_prefix, name, ssh_key_value,
                       admin_username="******", agent_count="3", agent_vm_size="Standard_D2_v2",
                       location=None, service_principal=None, client_secret=None, master_count="1",
                       windows=False, admin_password='', validate=False, no_wait=False, tags=None):
    if not location:
        location = '[resourceGroup().location]'
    windows_profile = None
    os_type = 'Linux'
    if windows:
        if len(admin_password) == 0:
            raise CLIError('--admin-password is required.')
        if len(admin_password) < 6:
            raise CLIError('--admin-password must be at least 6 characters')
        windows_profile = {
            "adminUsername": admin_username,
            "adminPassword": admin_password,
        }
        os_type = 'Windows'

    template = {
        "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
        "contentVersion": "1.0.0.0",
        "parameters": {
            "clientSecret": {
                "type": "secureString",
                "metadata": {
                    "description": "The client secret for the service principal"
                }
            }
        },
        "resources": [
            {
                "apiVersion": "2017-01-31",
                "location": location,
                "type": "Microsoft.ContainerService/containerServices",
                "name": name,
                "tags": tags,
                "properties": {
                    "orchestratorProfile": {
                        "orchestratorType": "kubernetes"
                    },
                    "masterProfile": {
                        "count": master_count,
                        "dnsPrefix": dns_name_prefix
                    },
                    "agentPoolProfiles": [
                        {
                            "name": "agentpools",
                            "count": agent_count,
                            "vmSize": agent_vm_size,
                            "dnsPrefix": dns_name_prefix + '-k8s-agents',
                            "osType": os_type,
                        }
                    ],
                    "linuxProfile": {
                        "ssh": {
                            "publicKeys": [
                                {
                                    "keyData": ssh_key_value
                                }
                            ]
                        },
                        "adminUsername": admin_username
                    },
                    "windowsProfile": windows_profile,
                    "servicePrincipalProfile": {
                        "ClientId": service_principal,
                        "Secret": "[parameters('clientSecret')]"
                    }
                }
            }
        ]
    }
    params = {
        "clientSecret": {
            "value": client_secret
        }
    }

    return _invoke_deployment(resource_group_name, deployment_name, template, params, validate, no_wait)
예제 #26
0
def decrypt_vm(resource_group_name, vm_name, volume_type=None, force=False):
    '''
    Disable disk encryption on OS disk, Data disks, or both
    '''
    compute_client = _compute_client_factory()
    vm = compute_client.virtual_machines.get(resource_group_name, vm_name)
    # pylint: disable=no-member
    os_type = vm.storage_profile.os_disk.os_type.value

    # 1. be nice, figure out the default volume type and also verify VM will not be busted
    is_linux = _is_linux_vm(os_type)
    if is_linux:
        if volume_type:
            if not force:
                if volume_type == _DATA_VOLUME_TYPE:
                    status = show_vm_encryption_status(resource_group_name,
                                                       vm_name)
                    if status['osDisk'] == _STATUS_ENCRYPTED:
                        raise CLIError(
                            "Linux VM's OS disk is encrypted. Disabling encryption on data "
                            "disk can render the VM unbootable. Use '--force' "
                            "to ingore the warning")
                else:
                    raise CLIError(
                        "Only Data disks can have encryption disabled in a Linux VM. "
                        "Use '--force' to ingore the warning")
        else:
            volume_type = _DATA_VOLUME_TYPE
    elif volume_type is None:
        if vm.storage_profile.data_disks:
            raise CLIError("VM has data disks, please specify --volume-type")

    # sequence_version should be incremented since encryptions occurred before
    extension = vm_extension_info[os_type]
    sequence_version = uuid.uuid4()

    # 2. update the disk encryption extension
    # The following logic was mostly ported from xplat-cli
    public_config = {
        'VolumeType': volume_type,
        'EncryptionOperation': 'DisableEncryption',
        'SequenceVersion': sequence_version,
    }

    from azure.mgmt.compute.models import VirtualMachineExtension, DiskEncryptionSettings

    ext = VirtualMachineExtension(
        vm.location,  # pylint: disable=no-member
        publisher=extension['publisher'],
        virtual_machine_extension_type=extension['name'],
        type_handler_version=extension['version'],
        settings=public_config,
        auto_upgrade_minor_version=True)

    poller = compute_client.virtual_machine_extensions.create_or_update(
        resource_group_name, vm_name, extension['name'], ext)
    poller.result()

    # 3. Remove the secret from VM's storage profile
    extension_result = compute_client.virtual_machine_extensions.get(
        resource_group_name, vm_name, extension['name'], 'instanceView')
    if extension_result.provisioning_state != 'Succeeded':
        raise CLIError("Extension updating didn't succeed")

    vm = compute_client.virtual_machines.get(resource_group_name, vm_name)
    disk_encryption_settings = DiskEncryptionSettings(enabled=False)
    vm.storage_profile.os_disk.encryption_settings = disk_encryption_settings
    set_vm(vm)
예제 #27
0
def restore_disks(cmd, client, resource_group_name, vault_name, container_name, item_name, rp_name, storage_account,
                  target_resource_group=None, restore_to_staging_storage_account=None, restore_only_osdisk=None,
                  diskslist=None, restore_as_unmanaged_disks=None):
    item = show_item(cmd, backup_protected_items_cf(cmd.cli_ctx), resource_group_name, vault_name, container_name,
                     item_name, "AzureIaasVM", "VM")
    _validate_item(item)
    recovery_point = show_recovery_point(cmd, recovery_points_cf(cmd.cli_ctx), resource_group_name, vault_name,
                                         container_name, item_name, rp_name, "AzureIaasVM", "VM")
    vault = vaults_cf(cmd.cli_ctx).get(resource_group_name, vault_name)
    vault_location = vault.location

    # Get container and item URIs
    container_uri = _get_protection_container_uri_from_id(item.id)
    item_uri = _get_protected_item_uri_from_id(item.id)

    # Original Storage Account Restore Logic
    use_original_storage_account = _should_use_original_storage_account(recovery_point,
                                                                        restore_to_staging_storage_account)
    if use_original_storage_account:
        logger.warning(
            """
            The disks will be restored to their original storage accounts. The VM config file will be uploaded to given
            storage account.
            """)

    # Construct trigger restore request object
    sa_name, sa_rg = _get_resource_name_and_rg(resource_group_name, storage_account)
    _storage_account_id = _get_storage_account_id(cmd.cli_ctx, sa_name, sa_rg)
    _source_resource_id = item.properties.source_resource_id
    target_rg_id = None

    if restore_as_unmanaged_disks and target_resource_group is not None:
        raise CLIError(
            """
            Both restore_as_unmanaged_disks and target_resource_group can't be spceified.
            Please give Only one parameter and retry.
            """)

    if recovery_point.properties.is_managed_virtual_machine:
        if target_resource_group is not None:
            target_rg_id = '/'.join(_source_resource_id.split('/')[:4]) + "/" + target_resource_group
        if not restore_as_unmanaged_disks and target_resource_group is None:
            logger.warning(
                """
                The disks of the managed VM will be restored as unmanaged since targetRG parameter is not provided.
                This will NOT leverage the instant restore functionality.
                Hence it can be significantly slow based on given storage account.
                To leverage instant restore, provide the target RG parameter.
                Otherwise, provide the intent next time by passing the --restore-as-unmanaged-disks parameter
                """)

    _validate_restore_disk_parameters(restore_only_osdisk, diskslist)
    restore_disk_lun_list = None
    if restore_only_osdisk:
        restore_disk_lun_list = []

    if diskslist:
        restore_disk_lun_list = diskslist

    trigger_restore_properties = IaasVMRestoreRequest(create_new_cloud_service=True,
                                                      recovery_point_id=rp_name,
                                                      recovery_type='RestoreDisks',
                                                      region=vault_location,
                                                      storage_account_id=_storage_account_id,
                                                      source_resource_id=_source_resource_id,
                                                      target_resource_group_id=target_rg_id,
                                                      original_storage_account_option=use_original_storage_account,
                                                      restore_disk_lun_list=restore_disk_lun_list)
    trigger_restore_request = RestoreRequestResource(properties=trigger_restore_properties)

    # Trigger restore
    result = client.trigger(vault_name, resource_group_name, fabric_name,
                            container_uri, item_uri, rp_name,
                            trigger_restore_request, raw=True)
    return _track_backup_job(cmd.cli_ctx, result, vault_name, resource_group_name)
예제 #28
0
def encrypt_vm(
        resource_group_name,
        vm_name,  # pylint: disable=too-many-locals, too-many-statements
        aad_client_id,
        disk_encryption_keyvault,
        aad_client_secret=None,
        aad_client_cert_thumbprint=None,
        key_encryption_keyvault=None,
        key_encryption_key=None,
        key_encryption_algorithm='RSA-OAEP',
        volume_type=None):
    '''
    Enable disk encryption on OS disk, Data disks, or both
    :param str aad_client_id: Client ID of AAD app with permissions to write secrets to KeyVault
    :param str aad_client_secret: Client Secret of AAD app with permissions to
    write secrets to KeyVault
    :param str aad_client_cert_thumbprint: Thumbprint of AAD app certificate with permissions
    to write secrets to KeyVault
    :param str disk_encryption_keyvault:the KeyVault where generated encryption key will be placed
    :param str key_encryption_key: KeyVault key name or URL used to encrypt the disk encryption key
    :param str key_encryption_keyvault: the KeyVault containing the key encryption key
    used to encrypt the disk encryption key. If missing, CLI will use --disk-encryption-keyvault
    '''
    # pylint: disable=no-member
    compute_client = _compute_client_factory()
    vm = compute_client.virtual_machines.get(resource_group_name, vm_name)
    os_type = vm.storage_profile.os_disk.os_type.value
    is_linux = _is_linux_vm(os_type)
    extension = vm_extension_info[os_type]

    # 1. First validate arguments

    if not aad_client_cert_thumbprint and not aad_client_secret:
        raise CLIError(
            'Please provide either --aad-client-id or --aad-client-cert-thumbprint'
        )

    if volume_type is None:
        if vm.storage_profile.data_disks:
            raise CLIError('VM has data disks, please supply --volume-type')
        else:
            volume_type = 'OS'

    # encryption is not supported on all linux distros, but service never tells you
    # so let us verify at the client side
    if is_linux:
        image_reference = getattr(vm.storage_profile, 'image_reference', None)
        if image_reference:
            result, message = _check_encrypt_is_supported(
                image_reference, volume_type)
            if not result:
                logger.warning(message)

    # sequence_version should be unique
    sequence_version = uuid.uuid4()

    # retrieve keyvault details
    disk_encryption_keyvault_url = _get_key_vault_base_url(
        (parse_resource_id(disk_encryption_keyvault))['name'])

    # disk encryption key itself can be further protected, so let us verify
    if key_encryption_key:
        key_encryption_keyvault = key_encryption_keyvault or disk_encryption_keyvault
        if '://' not in key_encryption_key:  # appears a key name
            key_encryption_key = _get_keyvault_key_url(
                (parse_resource_id(key_encryption_keyvault))['name'],
                key_encryption_key)

    # 2. we are ready to provision/update the disk encryption extensions
    # The following logic was mostly ported from xplat-cli
    public_config = {
        'AADClientID': aad_client_id,
        'AADClientCertThumbprint': aad_client_cert_thumbprint,
        'KeyVaultURL': disk_encryption_keyvault_url,
        'VolumeType': volume_type,
        'EncryptionOperation': 'EnableEncryption',
        'KeyEncryptionKeyURL': key_encryption_key,
        'KeyEncryptionAlgorithm': key_encryption_algorithm,
        'SequenceVersion': sequence_version,
    }
    private_config = {
        'AADClientSecret':
        aad_client_secret if is_linux else (aad_client_secret or '')
    }

    from azure.mgmt.compute.models import (VirtualMachineExtension,
                                           DiskEncryptionSettings,
                                           KeyVaultSecretReference,
                                           KeyVaultKeyReference, SubResource)

    ext = VirtualMachineExtension(
        vm.location,  # pylint: disable=no-member
        publisher=extension['publisher'],
        virtual_machine_extension_type=extension['name'],
        protected_settings=private_config,
        type_handler_version=extension['version'],
        settings=public_config,
        auto_upgrade_minor_version=True)

    poller = compute_client.virtual_machine_extensions.create_or_update(
        resource_group_name, vm_name, extension['name'], ext)
    poller.result()

    # verify the extension was ok
    extension_result = compute_client.virtual_machine_extensions.get(
        resource_group_name, vm_name, extension['name'], 'instanceView')
    if extension_result.provisioning_state != 'Succeeded':
        raise CLIError(
            'Extension needed for disk encryption was not provisioned correctly'
        )
    if not (extension_result.instance_view.statuses
            and extension_result.instance_view.statuses[0].message):
        raise CLIError(
            'Could not found url pointing to the secret for disk encryption')

    # 3. update VM's storage profile with the secrets
    status_url = extension_result.instance_view.statuses[0].message

    vm = compute_client.virtual_machines.get(resource_group_name, vm_name)
    secret_ref = KeyVaultSecretReference(
        secret_url=status_url,
        source_vault=SubResource(disk_encryption_keyvault))

    key_encryption_key_obj = None
    if key_encryption_key:
        key_encryption_key_obj = KeyVaultKeyReference(
            key_encryption_key, SubResource(key_encryption_keyvault))

    disk_encryption_settings = DiskEncryptionSettings(
        disk_encryption_key=secret_ref,
        key_encryption_key=key_encryption_key_obj,
        enabled=True)

    vm.storage_profile.os_disk.encryption_settings = disk_encryption_settings
    set_vm(vm)
    if is_linux and volume_type != _DATA_VOLUME_TYPE:
        # TODO: expose a 'wait' command to do the monitor and handle the reboot
        logger.warning(
            "The encryption request was accepted. Please use 'show' command to monitor "
            "the progress. If you see 'VMRestartPending', please restart the VM, and "
            "the encryption will finish shortly")
예제 #29
0
def create_keyvault(client,  # pylint: disable=too-many-locals
                    resource_group_name, vault_name, location=None, sku=SkuName.standard.value,
                    enabled_for_deployment=None,
                    enabled_for_disk_encryption=None,
                    enabled_for_template_deployment=None,
                    enable_soft_delete=None,
                    no_self_perms=None,
                    tags=None):
    from azure.mgmt.keyvault.models import VaultCreateOrUpdateParameters
    from azure.cli.core._profile import Profile, CLOUD
    from azure.graphrbac.models import GraphErrorException
    profile = Profile()
    cred, _, tenant_id = profile.get_login_credentials(
        resource=CLOUD.endpoints.active_directory_graph_resource_id)

    graph_client = GraphRbacManagementClient(
        cred,
        tenant_id,
        base_url=CLOUD.endpoints.active_directory_graph_resource_id)
    subscription = profile.get_subscription()
    if no_self_perms:
        access_policies = []
    else:
        permissions = Permissions(keys=[KeyPermissions.get,
                                        KeyPermissions.create,
                                        KeyPermissions.delete,
                                        KeyPermissions.list,
                                        KeyPermissions.update,
                                        KeyPermissions.import_enum,
                                        KeyPermissions.backup,
                                        KeyPermissions.restore,
                                        KeyPermissions.recover],
                                  secrets=[
                                      SecretPermissions.get,
                                      SecretPermissions.list,
                                      SecretPermissions.set,
                                      SecretPermissions.delete,
                                      SecretPermissions.backup,
                                      SecretPermissions.restore,
                                      SecretPermissions.recover],
                                  certificates=[
                                      CertificatePermissions.get,
                                      CertificatePermissions.list,
                                      CertificatePermissions.delete,
                                      CertificatePermissions.create,
                                      CertificatePermissions.import_enum,
                                      CertificatePermissions.update,
                                      CertificatePermissions.managecontacts,
                                      CertificatePermissions.getissuers,
                                      CertificatePermissions.listissuers,
                                      CertificatePermissions.setissuers,
                                      CertificatePermissions.deleteissuers,
                                      CertificatePermissions.manageissuers,
                                      CertificatePermissions.recover],
                                  storage=[
                                      StoragePermissions.get,
                                      StoragePermissions.list,
                                      StoragePermissions.delete,
                                      StoragePermissions.set,
                                      StoragePermissions.update,
                                      StoragePermissions.regeneratekey,
                                      StoragePermissions.setsas,
                                      StoragePermissions.listsas,
                                      StoragePermissions.getsas,
                                      StoragePermissions.deletesas])
        try:
            object_id = _get_current_user_object_id(graph_client)
        except GraphErrorException:
            object_id = _get_object_id(graph_client, subscription=subscription)
        if not object_id:
            raise CLIError('Cannot create vault.\nUnable to query active directory for information '
                           'about the current user.\nYou may try the --no-self-perms flag to '
                           'create a vault without permissions.')
        access_policies = [AccessPolicyEntry(tenant_id=tenant_id,
                                             object_id=object_id,
                                             permissions=permissions)]
    properties = VaultProperties(tenant_id=tenant_id,
                                 sku=Sku(name=sku),
                                 access_policies=access_policies,
                                 vault_uri=None,
                                 enabled_for_deployment=enabled_for_deployment,
                                 enabled_for_disk_encryption=enabled_for_disk_encryption,
                                 enabled_for_template_deployment=enabled_for_template_deployment,
                                 enable_soft_delete=enable_soft_delete)
    parameters = VaultCreateOrUpdateParameters(location=location,
                                               tags=tags,
                                               properties=properties)
    return client.create_or_update(resource_group_name=resource_group_name,
                                   vault_name=vault_name,
                                   parameters=parameters)
예제 #30
0
def update_streaming_endpoint(instance,
                              tags=None,
                              cross_domain_policy=None,
                              client_access_policy=None,
                              description=None,
                              max_cache_age=None,
                              ips=None,
                              disable_cdn=None,
                              cdn_provider=None,
                              cdn_profile=None,
                              custom_host_names=None):

    if not instance:
        raise CLIError('The streaming endpoint resource was not found.')

    if ips is not None:
        is_ips_argument_empty = len(ips) == 1 and ips[0] == ""
        if is_ips_argument_empty:
            if instance.access_control is not None and instance.access_control.ip is not None:
                instance.access_control.ip = None
        else:
            if instance.access_control is None:
                instance.access_control = StreamingEndpointAccessControl()
            if instance.access_control.ip is None:
                instance.access_control.ip = IPAccessControl(allow=[])
            for ip in ips:
                instance.access_control.ip.allow.append(
                    create_ip_range(instance.name, ip))

    if instance.cross_site_access_policies is None:
        instance.cross_site_access_policies = CrossSiteAccessPolicies()

    if client_access_policy is not None:
        if not client_access_policy:
            instance.cross_site_access_policies.client_access_policy = None
        else:
            instance.cross_site_access_policies.client_access_policy = client_access_policy

    if cross_domain_policy is not None:
        if not cross_domain_policy:
            instance.cross_site_access_policies.cross_domain_policy = None
        else:
            instance.cross_site_access_policies.cross_domain_policy = cross_domain_policy

    if max_cache_age is not None:
        instance.max_cache_age = max_cache_age
    if tags is not None:
        instance.tags = tags
    if description is not None:
        instance.description = description
    if custom_host_names is not None:
        is_custom_host_names_argument_empty = len(
            custom_host_names) == 1 and custom_host_names[0] == ""
        if is_custom_host_names_argument_empty:
            instance.custom_host_names = []
        else:
            instance.custom_host_names = custom_host_names
    if cdn_provider is not None:
        instance.cdn_provider = cdn_provider
    if cdn_profile is not None:
        instance.cdn_profile = cdn_profile
    if cdn_provider is not None or cdn_profile is not None:
        if ips is None and instance.access_control is not None:
            instance.access_control = None
        instance.cdn_enabled = True

    if disable_cdn is not None:
        instance.cdn_enabled = not disable_cdn

    return instance