def acr_token_create(cmd, client, registry_name, token_name, scope_map_name=None, repository_actions_list=None, status=None, resource_group_name=None, no_passwords=None, expiration=None, expiration_in_days=None): from knack.log import get_logger from ._utils import get_resource_id_by_registry_name if bool(repository_actions_list) == bool(scope_map_name): raise CLIError("usage error: --repository | --scope-map") if no_passwords and (expiration_in_days is not None or expiration is not None): raise CLIError( "usage error: --no-passwords and expiration arguments are mutually exclusive." ) if expiration_in_days is not None and expiration is not None: raise CLIError( "usage error: --expiration and --expiration-in-days are mutually exclusive." ) resource_group_name = get_resource_group_name_by_registry_name( cmd.cli_ctx, registry_name, resource_group_name) logger = get_logger(__name__) if repository_actions_list: scope_map_id = _create_default_scope_map(cmd, resource_group_name, registry_name, token_name, repository_actions_list, logger) else: arm_resource_id = get_resource_id_by_registry_name( cmd.cli_ctx, registry_name) scope_map_id = '{}/{}/{}'.format(arm_resource_id, SCOPE_MAPS, scope_map_name) Token = cmd.get_models('Token') poller = client.create(resource_group_name, registry_name, token_name, Token(scope_map_id=scope_map_id, status=status)) if no_passwords: return poller token = LongRunningOperation(cmd.cli_ctx)(poller) _create_default_passwords(cmd, resource_group_name, registry_name, token, logger, expiration_in_days, expiration) return token
def decrypt_vmss(cmd, resource_group_name, vmss_name, volume_type=None, force=False): UpgradeMode, VirtualMachineScaleSetExtension = cmd.get_models( 'UpgradeMode', 'VirtualMachineScaleSetExtension') compute_client = _compute_client_factory(cmd.cli_ctx) vmss = compute_client.virtual_machine_scale_sets.get( resource_group_name, vmss_name) is_linux = _is_linux_os(vmss.virtual_machine_profile) extension = vm_extension_info['Linux' if is_linux else 'Windows'] # 1. be nice, figure out the default volume type volume_type = _handles_default_volume_type_for_vmss_encryption( is_linux, volume_type, force) # 2. update the disk encryption extension public_config = { 'VolumeType': volume_type, 'EncryptionOperation': 'DisableEncryption', } ext = VirtualMachineScaleSetExtension( name=extension['name'], publisher=extension['publisher'], type=extension['name'], type_handler_version=extension['version'], settings=public_config, auto_upgrade_minor_version=True, force_update_tag=uuid.uuid4()) if (not vmss.virtual_machine_profile.extension_profile or not vmss.virtual_machine_profile.extension_profile.extensions): extensions = [] else: extensions = vmss.virtual_machine_profile.extension_profile.extensions ade_extension = [x for x in extensions if x.type.lower() == extension['name'].lower() and x.publisher.lower() == extension['publisher'].lower()] # pylint: disable=line-too-long if not ade_extension: from knack.util import CLIError raise CLIError("VM scale set '{}' was not encrypted".format(vmss_name)) index = vmss.virtual_machine_profile.extension_profile.extensions.index( ade_extension[0]) vmss.virtual_machine_profile.extension_profile.extensions[index] = ext poller = compute_client.virtual_machine_scale_sets.create_or_update( resource_group_name, vmss_name, vmss) LongRunningOperation(cmd.cli_ctx)(poller) _show_post_action_message(resource_group_name, vmss.name, vmss.upgrade_policy.mode == UpgradeMode.manual, False)
def _execute_command(kwargs): from msrest.paging import Paged from msrest.exceptions import ValidationError, ClientRequestError from msrestazure.azure_operation import AzureOperationPoller from azure.cli.core._profile import Profile from azure.cli.command_modules.keyvault.keyvaultclient import \ (KeyVaultClient, KeyVaultAuthentication) from azure.cli.command_modules.keyvault.keyvaultclient.generated import \ (KeyVaultClient as BaseKeyVaultClient) from azure.cli.command_modules.keyvault.keyvaultclient.generated.models import \ (KeyVaultErrorException) try: def get_token(server, resource, scope): # pylint: disable=unused-argument return Profile().get_login_credentials(resource)[0]._token_retriever() # pylint: disable=protected-access op = get_op_handler(operation) # since the convenience client can be inconvenient, we have to check and create the # correct client version if 'generated' in op.__module__: client = BaseKeyVaultClient(KeyVaultAuthentication(get_token)) else: client = KeyVaultClient(KeyVaultAuthentication(get_token)) # pylint: disable=redefined-variable-type result = op(client, **kwargs) # apply results transform if specified if transform_result: return _encode_hex(transform_result(result)) # otherwise handle based on return type of results if isinstance(result, AzureOperationPoller): return _encode_hex(LongRunningOperation('Starting {}'.format(name))(result)) elif isinstance(result, Paged): try: return _encode_hex(list(result)) except TypeError: # TODO: Workaround for an issue in either KeyVault server-side or msrest # See https://github.com/Azure/autorest/issues/1309 return [] else: return _encode_hex(result) except (ValidationError, KeyVaultErrorException) as ex: try: raise CLIError(ex.inner_exception.error.message) except AttributeError: raise CLIError(ex) except ClientRequestError as ex: if 'Failed to establish a new connection' in str(ex.inner_exception): raise CLIError('Max retries exceeded attempting to connect to vault. ' 'Try flushing your DNS cache or try again later.') raise CLIError(ex)
def acr_create(cmd, client, registry_name, resource_group_name, sku, location=None, admin_enabled=False, default_action=None, tags=None, workspace=None, identity=None, key_encryption_key=None): if default_action and sku not in get_premium_sku(cmd): raise CLIError(NETWORK_RULE_NOT_SUPPORTED) if sku not in get_managed_sku(cmd): raise CLIError( "Classic SKU is no longer supported. Please select a managed SKU.") Registry, Sku, NetworkRuleSet = cmd.get_models('Registry', 'Sku', 'NetworkRuleSet') registry = Registry(location=location, sku=Sku(name=sku), admin_user_enabled=admin_enabled, tags=tags) if default_action: registry.network_rule_set = NetworkRuleSet( default_action=default_action) if identity or key_encryption_key: _configure_cmk(cmd, registry, resource_group_name, identity, key_encryption_key) lro_poller = client.create(resource_group_name, registry_name, registry) if workspace: from msrestazure.tools import is_valid_resource_id, resource_id from azure.cli.core.commands import LongRunningOperation from azure.cli.core.commands.client_factory import get_subscription_id acr = LongRunningOperation(cmd.cli_ctx)(lro_poller) if not is_valid_resource_id(workspace): workspace = resource_id(subscription=get_subscription_id( cmd.cli_ctx), resource_group=resource_group_name, namespace='microsoft.OperationalInsights', type='workspaces', name=workspace) _create_diagnostic_settings(cmd.cli_ctx, acr, workspace) return acr return lro_poller
def capture_vm(resource_group_name, vm_name, vhd_name_prefix, storage_container='vhds', overwrite=True): '''Captures the VM by copying virtual hard disks of the VM and outputs a template that can be used to create similar VMs. :param str vhd_name_prefix: the VHD name prefix specify for the VM disks :param str storage_container: the storage account container name to save the disks :param str overwrite: overwrite the existing disk file ''' client = _compute_client_factory() parameter = VirtualMachineCaptureParameters(vhd_name_prefix, storage_container, overwrite) poller = client.virtual_machines.capture(resource_group_name, vm_name, parameter) result = LongRunningOperation()(poller) print(json.dumps(result.output, indent=2)) # pylint: disable=no-member
def _vm_set(instance, lro_operation=None): '''Update the given Virtual Machine instance''' instance.resources = None # Issue: https://github.com/Azure/autorest/issues/934 client = _compute_client_factory() parsed_id = _parse_rg_name(instance.id) poller = client.virtual_machines.create_or_update( resource_group_name=parsed_id[0], vm_name=parsed_id[1], parameters=instance) if lro_operation: return lro_operation(poller) else: return LongRunningOperation()(poller)
def iot_dps_linked_hub_create(cmd, client, dps_name, resource_group_name, connection_string, location, apply_allocation_policy=None, allocation_weight=None, no_wait=False): dps_linked_hubs = [] dps_linked_hubs.extend(iot_dps_linked_hub_list(client, dps_name, resource_group_name)) dps_linked_hubs.append(IotHubDefinitionDescription(connection_string, location, apply_allocation_policy, allocation_weight)) dps = iot_dps_get(client, dps_name, resource_group_name) dps_property = IotDpsPropertiesDescription(None, None, dps_linked_hubs, dps.properties.allocation_policy, dps.properties.authorization_policies) dps_description = ProvisioningServiceDescription(dps.location, dps_property, dps.sku) if no_wait: return client.iot_dps_resource.create_or_update(resource_group_name, dps_name, dps_description) LongRunningOperation(cmd.cli_ctx)(client.iot_dps_resource.create_or_update(resource_group_name, dps_name, dps_description)) return iot_dps_linked_hub_list(client, dps_name, resource_group_name)
def delete_grafana(cmd, grafana_name, resource_group_name=None): client = cf_amg(cmd.cli_ctx) grafana = client.grafana.get(resource_group_name, grafana_name) # delete first poller = client.grafana.begin_delete(resource_group_name, grafana_name) LongRunningOperation(cmd.cli_ctx)(poller) # delete role assignment logger.warning( "Grafana instance of '%s' was delete. Now removing role assignments for associated with its " "managed identity", grafana_name) _delete_role_assignment(cmd.cli_ctx, grafana.identity.principal_id)
def remove_vpn_server_config_ipsec_policy(cmd, resource_group_name, vpn_server_configuration_name, index, no_wait=False): client = network_client_factory(cmd.cli_ctx).vpn_server_configurations vpn_server_config = client.get(resource_group_name, vpn_server_configuration_name) try: vpn_server_config.vpn_client_ipsec_policies.pop(index) except IndexError: raise CLIError('invalid index: {}. Index can range from 0 to {}'.format(index, len(vpn_server_config.vpn_client_ipsec_policies) - 1)) poller = sdk_no_wait(no_wait, client.create_or_update, resource_group_name, vpn_server_configuration_name, vpn_server_config) if no_wait: return poller from azure.cli.core.commands import LongRunningOperation return LongRunningOperation(cmd.cli_ctx)(poller).vpn_client_ipsec_policies
def create_grafana(cmd, resource_group_name, grafana_name, location=None, skip_system_assigned_identity=False, skip_role_assignments=False, tags=None): from azure.cli.core.commands.arm import resolve_role_id from azure.cli.core.commands import LongRunningOperation client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES) resource = { "sku": { "name": "standard" }, "location": location, "identity": None if skip_system_assigned_identity else { "type": "SystemAssigned" }, "tags": tags } poller = client.resources.begin_create_or_update(resource_group_name, "Microsoft.Dashboard", "", "grafana", grafana_name, "2021-09-01-preview", resource) if skip_role_assignments: return poller resource = LongRunningOperation(cmd.cli_ctx)(poller) logger.warning( "Grafana instance of '%s' was created. Now creating default role assignments for its " "managed identity and current CLI user", grafana_name) subscription_scope = '/subscriptions/' + client._config.subscription_id # pylint: disable=protected-access user_principal_id = _get_login_account_principal_id(cmd.cli_ctx) grafana_admin_role_id = resolve_role_id(cmd.cli_ctx, "Grafana Admin", subscription_scope) _create_role_assignment(cmd.cli_ctx, user_principal_id, grafana_admin_role_id, resource.id) if resource.identity: monitoring_reader_role_id = resolve_role_id(cmd.cli_ctx, "Monitoring Reader", subscription_scope) _create_role_assignment(cmd.cli_ctx, resource.identity.principal_id, monitoring_reader_role_id, subscription_scope) return resource
def keyvault_command_handler(command_args): from azure.cli.core.util import get_arg_list from azure.cli.core.commands.client_factory import resolve_client_arg_name from msrest.paging import Paged from azure.cli.core.util import poller_classes op = get_op_handler() op_args = get_arg_list(op) command_type = merged_kwargs.get('command_type', None) client_factory = command_type.settings.get('client_factory', None) if command_type \ else merged_kwargs.get('client_factory', None) client_arg_name = resolve_client_arg_name( operations_tmpl.format(method_name), kwargs) if client_arg_name in op_args: client = client_factory(self.command_loader.cli_ctx, command_args) command_args[client_arg_name] = client if 'cmd' not in op_args: command_args.pop('cmd') try: result = op(**command_args) # apply results transform if specified transform_result = merged_kwargs.get('transform', None) if transform_result: return _encode_hex(transform_result( result, **command_args)) # otherwise handle based on return type of results if isinstance(result, poller_classes()): return _encode_hex( LongRunningOperation( self.command_loader.cli_ctx, 'Starting {}'.format(name))(result)) if isinstance(result, Paged): try: return _encode_hex(list(result)) except TypeError: # TODO: Workaround for an issue in either KeyVault server-side or msrest # See https://github.com/Azure/autorest/issues/1309 return [] return _encode_hex(result) except Exception as ex: # pylint: disable=broad-except if name == 'show': # show_exception_handler needs to be called before the keyvault_exception_handler from azure.cli.core.commands.arm import show_exception_handler try: show_exception_handler(ex) except Exception: # pylint: disable=broad-except pass return keyvault_exception_handler(self.command_loader, ex)
def create_ase_inbound_services(cmd, resource_group_name, name, subnet, vnet_name=None, skip_dns=False): ase_client = _get_ase_client_factory(cmd.cli_ctx) ase = ase_client.get(resource_group_name, name) if not ase: raise ResourceNotFoundError( "App Service Environment '{}' not found.".format(name)) inbound_subnet_id = _validate_subnet_id(cmd.cli_ctx, subnet, vnet_name, resource_group_name) inbound_vnet_id = _get_vnet_id_from_subnet(cmd.cli_ctx, inbound_subnet_id) if ase.kind.lower() == 'asev3': _ensure_subnet_private_endpoint_network_policy(cmd.cli_ctx, inbound_subnet_id, False) network_client = _get_network_client_factory(cmd.cli_ctx) pls_connection = PrivateLinkServiceConnection( private_link_service_id=ase.id, group_ids=['hostingEnvironments'], request_message='Link from CLI', name='{}-private-connection'.format(name)) private_endpoint = PrivateEndpoint(location=ase.location, tags=None, subnet=Subnet(id=inbound_subnet_id)) private_endpoint.private_link_service_connections = [pls_connection] poller = network_client.private_endpoints.begin_create_or_update( resource_group_name, '{}-private-endpoint'.format(name), private_endpoint) LongRunningOperation(cmd.cli_ctx)(poller) ase_pe = poller.result() nic_name = parse_resource_id(ase_pe.network_interfaces[0].id)['name'] nic = network_client.network_interfaces.get(resource_group_name, nic_name) inbound_ip_address = nic.ip_configurations[0].private_ip_address elif ase.kind.lower() == 'asev2': if ase.internal_load_balancing_mode == 0: raise ValidationError( 'Private DNS Zone is not relevant for External ASEv2.') ase_vip_info = ase_client.get_vip_info(resource_group_name, name) inbound_ip_address = ase_vip_info.internal_ip_address if not skip_dns: _ensure_ase_private_dns_zone(cmd.cli_ctx, resource_group_name=resource_group_name, name=name, inbound_vnet_id=inbound_vnet_id, inbound_ip_address=inbound_ip_address)
def standby(cmd, client, resource_group_name, account_name, live_event_name, no_wait=False): if no_wait: return sdk_no_wait(no_wait, client.begin_allocate, resource_group_name, account_name, live_event_name) LongRunningOperation(cmd.cli_ctx)(client.begin_allocate( resource_group_name, account_name, live_event_name)) return client.get(resource_group_name, account_name, live_event_name)
def _deploy_arm_template_core(cmd, resource_group_name, template, parameters, deployment_name=None, mode='incremental', validate_only=False, no_wait=False): DeploymentProperties = cmd.get_models( 'DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) properties = DeploymentProperties(template=template, template_link=None, parameters=parameters, mode=mode) client = resource_client_factory(cmd.cli_ctx) Deployment = cmd.get_models( 'Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) deployment = Deployment(properties=properties) if validate_only: if cmd.supported_api_version( min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES): deploy_poll = sdk_no_wait(no_wait, client.deployments.begin_validate, resource_group_name, deployment_name, deployment) return LongRunningOperation(cmd.cli_ctx)(deploy_poll) return sdk_no_wait(no_wait, client.deployments.validate, resource_group_name, deployment_name, deployment) deploy_poll = sdk_no_wait(no_wait, client.deployments.begin_create_or_update, resource_group_name, deployment_name, deployment) return LongRunningOperation(cmd.cli_ctx)(deploy_poll)
def acr_create(registry_name, #pylint: disable=too-many-arguments resource_group_name, location, storage_account_name=None, admin_enabled='false'): '''Creates a container registry. :param str registry_name: The name of container registry :param str resource_group_name: The name of resource group :param str location: The name of location :param str storage_account_name: The name of storage account :param str admin_enabled: Indicates whether the admin user is enabled ''' client = get_acr_service_client().registries admin_user_enabled = admin_enabled == 'true' if storage_account_name is None: storage_account_name = random_storage_account_name(registry_name) LongRunningOperation()( arm_deploy_template(resource_group_name, registry_name, location, storage_account_name, admin_user_enabled) ) registry = client.get_properties(resource_group_name, registry_name) else: storage_account_key = get_access_key_by_storage_account_name(storage_account_name) registry = client.create_or_update( resource_group_name, registry_name, Registry( location=location, storage_account=StorageAccountProperties( storage_account_name, storage_account_key ), admin_user_enabled=admin_user_enabled ) ) logger.warning('\nCreate a new service principal and assign access:') logger.warning( ' az ad sp create-for-rbac --scopes %s --role Owner --password <password>', registry.id) #pylint: disable=no-member logger.warning('\nUse an existing service principal and assign access:') logger.warning( ' az role assignment create --scope %s --role Owner --assignee <app-id>', registry.id) #pylint: disable=no-member return registry
def acr_create(registry_name, #pylint: disable=too-many-arguments resource_group_name, location, storage_account_name=None, enable_admin=False): '''Create a container registry. :param str registry_name: The name of container registry :param str resource_group_name: The name of resource group :param str location: The name of location :param str storage_account_name: The name of storage account :param bool enable_admin: Enable admin user ''' client = get_acr_service_client().registries if storage_account_name is None: storage_account_name = str(uuid.uuid4()).replace('-', '')[:24] LongRunningOperation()( arm_deploy_template(resource_group_name, registry_name, location, storage_account_name, enable_admin) ) else: storage_account_key = get_access_key_by_storage_account_name(storage_account_name) registry = client.create_or_update( resource_group_name, registry_name, Registry( location=location, storage_account=StorageAccountProperties( storage_account_name, storage_account_key ), admin_user_enabled=enable_admin ) ) registry = client.get_properties(resource_group_name, registry_name) logger.warning('\nCreate a new service principal and assign access:') logger.warning( ' az ad sp create-for-rbac --scopes %s --role Owner --password <password>', registry.id) #pylint: disable=no-member logger.warning('\nUse an existing service principal and assign access:') logger.warning( ' az role assignment create --scope %s --role Owner --assignee <app-id>', registry.id) #pylint: disable=no-member return registry
def deploy_arm_template( cmd, resource_group_name, # pylint: disable=too-many-arguments template_file=None, deployment_name=None, parameters=None, mode=None): DeploymentProperties = cmd.get_models( 'DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES, ) template = {} # TODO: get_file_json() can return None if specified, otherwise it can throw an error. template = get_file_json(template_file, preserve_order=True) template_obj = template # So template should always be a dict, otherwise this next line will fail. template_obj['resources'] = template_obj.get('resources', []) # template_obj is not used after this point, can remove it. parameters = BotTemplateDeployer.__process_parameters(parameters) or {} # Turn the template into JSON string, then load it back to a dict, list, etc. template = json.loads(json.dumps(template)) parameters = json.loads(json.dumps(parameters)) properties = DeploymentProperties(template=template, template_link=None, parameters=parameters, mode=mode) resource_mgmt_client = get_mgmt_service_client( cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES).deployments if cmd.supported_api_version( min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES): Deployment = cmd.get_models( 'Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) deployment = Deployment(properties=properties) deployment_poller = resource_mgmt_client.create_or_update( resource_group_name, deployment_name, deployment) else: deployment_poller = resource_mgmt_client.create_or_update( resource_group_name, deployment_name, properties) return LongRunningOperation( cmd.cli_ctx, 'Deploying ARM Tempalte')(deployment_poller)
def stop(cmd, client, resource_group_name, account_name, streaming_endpoint_name, no_wait=False): if no_wait: return sdk_no_wait(no_wait, client.begin_stop, resource_group_name, account_name, streaming_endpoint_name) LongRunningOperation(cmd.cli_ctx)(client.begin_stop( resource_group_name, account_name, streaming_endpoint_name)) return client.get(resource_group_name, account_name, streaming_endpoint_name)
def _create_default_passwords(cmd, resource_group_name, registry_name, token, logger, expiration_in_days, expiration): from ._client_factory import cf_acr_token_credentials, cf_acr_registries cred_client = cf_acr_token_credentials(cmd.cli_ctx) poller = acr_token_credential_generate(cmd, cred_client, registry_name, token.name, password1=True, password2=True, resource_group_name=resource_group_name, expiration_in_days=expiration_in_days, expiration=expiration) credentials = LongRunningOperation(cmd.cli_ctx)(poller) setattr(token.credentials, 'username', credentials.username) setattr(token.credentials, 'passwords', credentials.passwords) registry_client = cf_acr_registries(cmd.cli_ctx) login_server = registry_client.get(resource_group_name, registry_name).login_server logger.warning('Please store your generated credentials safely. Meanwhile you can use it through' ' "docker login %s -u %s -p %s".', login_server, token.credentials.username, token.credentials.passwords[0].value)
def sqlvm_remove_from_group(client, cmd, sql_virtual_machine_name, resource_group_name): ''' Removes a SQL virtual machine from a group. ''' sqlvm_object = client.get(resource_group_name, sql_virtual_machine_name) sqlvm_object.sql_virtual_machine_group_resource_id = None sqlvm_object.wsfc_domain_credentials = None # Since it's a running operation, we will do the put and then the get to display the instance. LongRunningOperation(cmd.cli_ctx)(sdk_no_wait(False, client.create_or_update, resource_group_name, sql_virtual_machine_name, sqlvm_object)) return client.get(resource_group_name, sql_virtual_machine_name)
def _create_nsg_rule(cli_ctx, resource_group_name, network_security_group_name, security_rule_name, priority, description=None, protocol=None, access=None, direction=None, source_port_range='*', source_address_prefix='*', destination_port_range=80, destination_address_prefix='*'): settings = SecurityRule(protocol=protocol, source_address_prefix=source_address_prefix, destination_address_prefix=destination_address_prefix, access=access, direction=direction, description=description, source_port_range=source_port_range, destination_port_range=destination_port_range, priority=priority, name=security_rule_name) network_client = _get_network_client_factory(cli_ctx) poller = network_client.security_rules.begin_create_or_update( resource_group_name, network_security_group_name, security_rule_name, settings) LongRunningOperation(cli_ctx)(poller)
def iot_dps_linked_hub_delete(cmd, client, dps_name, resource_group_name, linked_hub, no_wait=False): dps_linked_hubs = [] dps_linked_hubs.extend(iot_dps_linked_hub_list(client, dps_name, resource_group_name)) if not _is_linked_hub_existed(dps_linked_hubs, linked_hub): raise CLIError("Linked hub {0} doesn't existed.".format(linked_hub)) updated_hub = [p for p in dps_linked_hubs if p.name.lower() != linked_hub.lower()] dps = iot_dps_get(client, dps_name, resource_group_name) dps_property = IotDpsPropertiesDescription(None, None, updated_hub, dps.properties.allocation_policy, dps.properties.authorization_policies) dps_description = ProvisioningServiceDescription(dps.location, dps_property, dps.sku) if no_wait: return client.iot_dps_resource.create_or_update(resource_group_name, dps_name, dps_description) LongRunningOperation(cmd.cli_ctx)(client.iot_dps_resource.create_or_update(resource_group_name, dps_name, dps_description)) return iot_dps_linked_hub_list(client, dps_name, resource_group_name)
def delete_service_correlation(cmd, client, resource_group_name, cluster_name, application_name, service_name, correlated_service_name): service = client.services.get(resource_group_name, cluster_name, application_name, service_name) # add the new child to the parent collection delete_from_collection(service.properties, 'correlation_scheme', 'service_name', correlated_service_name) # update the parent object poller = client.services.begin_create_or_update(resource_group_name, cluster_name, application_name, service_name, service) return LongRunningOperation(cmd.cli_ctx)(poller)
def iot_dps_access_policy_delete(cmd, client, dps_name, resource_group_name, access_policy_name, no_wait=False): dps_access_policies = [] dps_access_policies.extend(iot_dps_access_policy_list(client, dps_name, resource_group_name)) if not _is_policy_existed(dps_access_policies, access_policy_name): raise CLIError("Access policy {0} doesn't existed.".format(access_policy_name)) updated_policies = [p for p in dps_access_policies if p.key_name.lower() != access_policy_name.lower()] dps = iot_dps_get(client, dps_name, resource_group_name) dps_property = IotDpsPropertiesDescription(None, None, dps.properties.iot_hubs, dps.properties.allocation_policy, updated_policies) dps_description = ProvisioningServiceDescription(dps.location, dps_property, dps.sku) if no_wait: return client.iot_dps_resource.create_or_update(resource_group_name, dps_name, dps_description) LongRunningOperation(cmd.cli_ctx)(client.iot_dps_resource.create_or_update(resource_group_name, dps_name, dps_description)) return iot_dps_access_policy_list(client, dps_name, resource_group_name)
def sqlvm_group_create(client, cmd, sql_virtual_machine_group_name, resource_group_name, sql_image_offer, sql_image_sku, domain_fqdn, cluster_operator_account, sql_service_account, storage_account_url, storage_account_key=None, location=None, cluster_bootstrap_account=None, file_share_witness_path=None, ou_path=None, tags=None): ''' Creates a SQL virtual machine group. ''' tags = tags or {} if not storage_account_key: storage_account_key = prompt_pass('Storage Key: ', confirm=True) # Create the windows server failover cluster domain profile object. wsfc_domain_profile_object = WsfcDomainProfile( domain_fqdn=domain_fqdn, ou_path=ou_path, cluster_bootstrap_account=cluster_bootstrap_account, cluster_operator_account=cluster_operator_account, sql_service_account=sql_service_account, file_share_witness_path=file_share_witness_path, storage_account_url=storage_account_url, storage_account_primary_key=storage_account_key) sqlvm_group_object = SqlVirtualMachineGroup( sql_image_offer=sql_image_offer, sql_image_sku=sql_image_sku, wsfc_domain_profile=wsfc_domain_profile_object, location=location, tags=tags) # Since it's a running operation, we will do the put and then the get to display the instance. LongRunningOperation(cmd.cli_ctx)(sdk_no_wait( False, client.create_or_update, resource_group_name, sql_virtual_machine_group_name, sqlvm_group_object)) return client.get(resource_group_name, sql_virtual_machine_group_name)
def _invoke_deployment( cmd, resource_group_name, deployment_name, template, parameters, validate, no_wait, subscription_id=None, ): DeploymentProperties = cmd.get_models( "DeploymentProperties", resource_type=ResourceType.MGMT_RESOURCE_RESOURCES, ) properties = DeploymentProperties(template=template, parameters=parameters, mode="incremental") smc = get_mgmt_service_client( cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES, subscription_id=subscription_id, ).deployments if validate: logger.info("==== BEGIN TEMPLATE ====") logger.info(json.dumps(template, indent=2)) logger.info("==== END TEMPLATE ====") Deployment = cmd.get_models( "Deployment", resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) deployment = Deployment(properties=properties) if validate: if cmd.supported_api_version( min_api="2019-10-01", resource_type=ResourceType.MGMT_RESOURCE_RESOURCES, ): validation_poller = smc.begin_validate(resource_group_name, deployment_name, deployment) return LongRunningOperation(cmd.cli_ctx)(validation_poller) return smc.validate(resource_group_name, deployment_name, deployment) return sdk_no_wait( no_wait, smc.begin_create_or_update, resource_group_name, deployment_name, deployment, )
def create_new_acr(cmd, registry_name, resource_group_name, location=None, sku="Basic"): # from azure.cli.command_modules.acr.custom import acr_create from azure.cli.command_modules.acr._client_factory import cf_acr_registries from azure.cli.core.profiles import ResourceType from azure.cli.core.commands import LongRunningOperation client = cf_acr_registries(cmd.cli_ctx) # return acr_create(cmd, client, registry_name, resource_group_name, sku, location) Registry, Sku = cmd.get_models('Registry', 'Sku', resource_type=ResourceType.MGMT_CONTAINERREGISTRY, operation_group="registries") registry = Registry(location=location, sku=Sku(name=sku), admin_user_enabled=True, zone_redundancy=None, tags=None) lro_poller = client.begin_create(resource_group_name, registry_name, registry) acr = LongRunningOperation(cmd.cli_ctx)(lro_poller) return acr
def _deploy_arm_template_core(cli_ctx, resource_group_name, deployment_name, template, parameters): from azure.mgmt.resource.resources.models import DeploymentProperties from azure.cli.core.commands import LongRunningOperation properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental') client = resource_client_factory(cli_ctx) deploy_poll = client.deployments.create_or_update(resource_group_name, deployment_name, properties, raw=False) result = LongRunningOperation(cli_ctx)(deploy_poll) return result
def reset(cmd, client, resource_group_name, account_name, live_event_name, no_wait=False): if no_wait: return sdk_no_wait(no_wait, client.reset, resource_group_name, account_name, live_event_name) LongRunningOperation(cmd.cli_ctx)(client.reset(resource_group_name, account_name, live_event_name)) return client.get(resource_group_name, account_name, live_event_name)
def sqlvm_aglistener_create(client, cmd, availability_group_listener_name, sql_virtual_machine_group_name, resource_group_name, availability_group_name, ip_address, subnet_resource_id, load_balancer_resource_id, probe_port, sql_virtual_machine_instances, port=1433, public_ip_address_resource_id=None, vnet_name=None): ''' Creates an availability group listener ''' # Not using vnet, just for validation vnet_name = vnet_name # Create the private ip address private_ip_object = PrivateIPAddress( ip_address=ip_address, subnet_resource_id=subnet_resource_id if is_valid_resource_id(subnet_resource_id) else None) # Create the load balancer configurations load_balancer_object = LoadBalancerConfiguration( private_ip_address=private_ip_object, public_ip_address_resource_id=public_ip_address_resource_id, load_balancer_resource_id=load_balancer_resource_id, probe_port=probe_port, sql_virtual_machine_instances=sql_virtual_machine_instances) # Create the availability group listener object ag_listener_object = AvailabilityGroupListener( availability_group_name=availability_group_name, load_balancer_configurations=[load_balancer_object], port=port) LongRunningOperation(cmd.cli_ctx)(sdk_no_wait( False, client.create_or_update, resource_group_name, sql_virtual_machine_group_name, availability_group_listener_name, ag_listener_object)) return client.get(resource_group_name, sql_virtual_machine_group_name, availability_group_listener_name)