def build_export_policy_rules(export_policy=None): rules = [] if export_policy: ep_list = json.loads(export_policy) for ep in ep_list: rule_index = ep['rule_index'] if 'rule_index' in ep else None unix_read_only = ep[ 'unix_read_only'] if 'unix_read_only' in ep else None unix_read_write = ep[ 'unix_read_write'] if 'unix_read_write' in ep else None cifs = ep['cifs'] if 'cifs' in ep else None nfsv3 = ep['nfsv3'] if 'nfsv3' in ep else None nfsv4 = ep['nfsv4'] if 'nfsv4' in ep else None allowed_clients = ep[ 'allowed_clients'] if 'allowed_clients' in ep else None export_policy = ExportPolicyRule(rule_index=rule_index, unix_read_only=unix_read_only, unix_read_write=unix_read_write, cifs=cifs, nfsv3=nfsv3, nfsv4=nfsv4, allowed_clients=allowed_clients) rules.append(export_policy) return rules
def create_volume(cmd, client, account_name, pool_name, volume_name, resource_group_name, location, file_path, usage_threshold, vnet, subnet='default', service_level=None, protocol_types=None, volume_type=None, endpoint_type=None, replication_schedule=None, remote_volume_resource_id=None, tags=None, snapshot_id=None): subs_id = get_subscription_id(cmd.cli_ctx) # determine vnet - supplied value can be name or ARM resource Id if is_valid_resource_id(vnet): resource_parts = parse_resource_id(vnet) vnet = resource_parts['resource_name'] # default the resource group of the subnet to the volume's rg unless the subnet is specified by id subnet_rg = resource_group_name # determine subnet - supplied value can be name or ARM reource Id if is_valid_resource_id(subnet): resource_parts = parse_resource_id(subnet) subnet = resource_parts['resource_name'] subnet_rg = resource_parts['resource_group'] # if NFSv4 is specified then the export policy must reflect this # the RP ordinarily only creates a default setting NFSv3. Export # policy is not settable directly on creation in CLI only via the # add export policy subcommand if (protocol_types is not None) and ("NFSv4.1" in protocol_types): rules = [] export_policy = ExportPolicyRule(rule_index=1, unix_read_only=False, unix_read_write=True, cifs=False, nfsv3=False, nfsv41=True, allowed_clients="0.0.0.0/0") rules.append(export_policy) volume_export_policy = VolumePropertiesExportPolicy(rules=rules) else: volume_export_policy = None # if we have a data protection volume requested then build the component if volume_type == "DataProtection": replication = ReplicationObject( endpoint_type=endpoint_type, replication_schedule=replication_schedule, remote_volume_resource_id=remote_volume_resource_id ) data_protection = VolumePropertiesDataProtection(replication=replication) else: data_protection = None subnet_id = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/virtualNetworks/%s/subnets/%s" % (subs_id, subnet_rg, vnet, subnet) body = Volume( usage_threshold=int(usage_threshold) * gib_scale, creation_token=file_path, service_level=service_level, location=location, subnet_id=subnet_id, protocol_types=protocol_types, export_policy=volume_export_policy, volume_type=volume_type, data_protection=data_protection, tags=tags, snapshot_id=snapshot_id) return client.create_or_update(body, resource_group_name, account_name, pool_name, volume_name)
def add_export_policy_rule(cmd, instance, allowed_clients, rule_index, unix_read_only, unix_read_write, cifs, nfsv3, nfsv41): rules = [] export_policy = ExportPolicyRule(rule_index=rule_index, unix_read_only=unix_read_only, unix_read_write=unix_read_write, cifs=cifs, nfsv3=nfsv3, nfsv41=nfsv41, allowed_clients=allowed_clients) rules.append(export_policy) for rule in instance.export_policy.rules: rules.append(rule) volume_export_policy = VolumePropertiesExportPolicy(rules=rules) params = VolumePatch( export_policy=volume_export_policy, service_level=instance.service_level, usage_threshold=instance.usage_threshold) _update_mapper(instance, params, ['export_policy']) return params
def add_export_policy_rule(instance, allowed_clients, rule_index, unix_read_only, unix_read_write, cifs, nfsv3, nfsv41, kerberos5_r=None, kerberos5_rw=None, kerberos5i_r=None, kerberos5i_rw=None, kerberos5p_r=None, kerberos5p_rw=None, has_root_access=None, chown_mode=None): rules = [] export_policy = ExportPolicyRule(rule_index=rule_index, unix_read_only=unix_read_only, unix_read_write=unix_read_write, cifs=cifs, nfsv3=nfsv3, nfsv41=nfsv41, allowed_clients=allowed_clients, kerberos5_read_only=kerberos5_r, kerberos5_read_write=kerberos5_rw, kerberos5_i_read_only=kerberos5i_r, kerberos5_i_read_write=kerberos5i_rw, kerberos5_p_read_only=kerberos5p_r, kerberos5_p_read_write=kerberos5p_rw, has_root_access=has_root_access, chown_mode=chown_mode) rules.append(export_policy) for rule in instance.export_policy.rules: rules.append(rule) volume_export_policy = VolumePropertiesExportPolicy(rules=rules) params = VolumePatch(export_policy=volume_export_policy, service_level=instance.service_level, usage_threshold=instance.usage_threshold) _update_mapper(instance, params, ['export_policy']) return params
def get_export_policy_rules(self): # ExportPolicyRule(rule_index: int=None, unix_read_only: bool=None, unix_read_write: bool=None, # kerberos5_read_only: bool=False, kerberos5_read_write: bool=False, kerberos5i_read_only: bool=False, # kerberos5i_read_write: bool=False, kerberos5p_read_only: bool=False, kerberos5p_read_write: bool=False, # cifs: bool=None, nfsv3: bool=None, nfsv41: bool=None, allowed_clients: str=None, has_root_access: bool=True ptypes = self.parameters.get('protocol_types') if ptypes is None: return None ptypes = [x.lower() for x in ptypes] if 'nfsv4.1' in ptypes: ptypes.append('nfsv41') else: return None # only create a policy when NFSv4 is used (for now) options = dict(rule_index=1, allowed_clients='0.0.0.0/0', unix_read_write=True) for protocol in ('cifs', 'nfsv3', 'nfsv41'): options[protocol] = protocol in ptypes if options: return VolumePropertiesExportPolicy( rules=[ExportPolicyRule(**options)]) return None
def create_volume(client, resource_group_name, anf_account_name, capacitypool_name, volume_name, volume_usage_quota, service_level, subnet_id, location, tags=None): """Creates a volume within a capacity pool Function that in this example creates a NFSv4.1 volume within a capacity pool, as a note service level needs to be the same as the capacity pool. This function also defines the volume body as the configuration settings of the new volume. Args: client (NetAppManagementClient): Azure Resource Provider Client designed to interact with ANF resources resource_group_name (string): Name of the resource group where the volume will be created, it needs to be the same as the account anf_account_name (string): Name of the Azure NetApp Files Account where the capacity pool holding the volume exists capacitypool_name (string): Capacity pool name where volume will be created volume_name (string): Volume name volume_usage_quota (long): Volume size in bytes, minimum value is 107374182400 (100GiB), maximum value is 109951162777600 (100TiB) service_level (string): Volume service level, needs to be the same as the capacity pool, valid values are "Ultra","Premium","Standard" subnet_id (string): Subnet resource id of the delegated to ANF Volumes subnet location (string): Azure short name of the region where resource will be deployed, needs to be the same as the account tags (object): Optional. Key-value pairs to tag the resource, default value is None. E.g. {'cc':'1234','dept':'IT'} Returns: Volume: Returns the newly created volume resource """ rule_list = [ ExportPolicyRule(allowed_clients="0.0.0.0/0", cifs=False, nfsv3=False, nfsv41=True, rule_index=1, unix_read_only=False, unix_read_write=True) ] export_policies = VolumePropertiesExportPolicy(rules=rule_list) volume_body = Volume(usage_threshold=volume_usage_quota, creation_token=volume_name, location=location, service_level=service_level, subnet_id=subnet_id, protocol_types=["NFSv4.1"], export_policy=export_policies, tags=tags) return client.volumes.begin_create_or_update(resource_group_name, anf_account_name, capacitypool_name, volume_name, volume_body).result()
def run_example(): """Azure NetApp Files SDK management example.""" print("Azure NetAppFiles Python SDK Sample") print("Sample project that performs CRUD management operations with Azure NetApp Files SDK with Python") print("-----------------------------------------------------------------------------------------------") # Creating the Azure NetApp Files Client with an Application # (service principal) token provider credentials, subscription_id = sample_utils.get_credentials() anf_client = AzureNetAppFilesManagementClient( credentials, subscription_id) # Creating an Azure NetApp Account console_output('Creating Azure NetApp Files account ...') account = None try: account = create_account( anf_client, RESOURCE_GROUP_NAME, ANF_ACCOUNT_NAME, LOCATION) console_output( '\tAccount successfully created, resource id: {}'.format( account.id)) except CloudError as ex: console_output( 'An error ocurred. Error details: {}'.format(ex.message)) raise # Creating a Capacity Pool console_output('Creating Capacity Pool ...') capacity_pool = None try: capacity_pool = create_capacitypool_async( anf_client, RESOURCE_GROUP_NAME, account.name, CAPACITYPOOL_NAME, CAPACITYPOOL_SERVICE_LEVEL, CAPACITYPOOL_SIZE, LOCATION) console_output('\tCapacity Pool successfully created, resource id: {}' .format(capacity_pool.id)) except CloudError as ex: console_output( 'An error ocurred. Error details: {}'.format(ex.message)) raise # Creating a Volume # # Note: With exception of Accounts, all resources with Name property # returns a relative path up to the name and to use this property in # other methods, like Get for example, the argument needs to be # sanitized and just the actual name needs to be used (the hierarchy # needs to be cleaned up in the name). # Capacity Pool Name property example: "pmarques-anf01/pool01" # "pool01" is the actual name that needs to be used instead. Below you # will see a sample function that parses the name from its # resource id: resource_uri_utils.get_anf_capacity_pool() console_output('Creating a Volume ...') subnet_id = '/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/virtualNetworks/{}/subnets/{}'\ .format(subscription_id, VNET_RESOURCE_GROUP_NAME, VNET_NAME, SUBNET_NAME) volume = None try: pool_name = resource_uri_utils.get_anf_capacity_pool(capacity_pool.id) volume = create_volume(anf_client, RESOURCE_GROUP_NAME, account.name, pool_name, VOLUME_NAME, VOLUME_USAGE_QUOTA, CAPACITYPOOL_SERVICE_LEVEL, subnet_id, LOCATION) console_output('\tVolume successfully created, resource id: {}' .format(volume.id)) except CloudError as ex: console_output( 'An error ocurred. Error details: {}'.format(ex.message)) raise # Creating a snapshot console_output('Creating a Snapshot ...') snapshot = None try: volume_name = resource_uri_utils.get_anf_volume(volume.id) snapshot = create_snapshot(anf_client, RESOURCE_GROUP_NAME, account.name, pool_name, VOLUME_NAME, SNAPSHOT_NAME, LOCATION) sample_utils.wait_for_anf_resource(anf_client, snapshot.id) console_output( '\tSnapshot successfully created, resource id: {}' .format(snapshot.id)) except CloudError as ex: console_output( 'An error ocurred. Error details: {}'.format(ex.message)) raise # Creating a new volume from snapshot # # Note: SnapshotId is not the actual resource Id of the snapshot, this # value is the unique identifier (guid) of the snapshot, represented # by the SnapshotId instead. console_output('Creating New Volume from Snapshot ...') volume_from_snapshot = None try: new_volume_name = "Vol-{}".format( resource_uri_utils.get_anf_snapshot(snapshot.id)) volume_from_snapshot = create_volume_from_snapshot(anf_client, RESOURCE_GROUP_NAME, account.name, pool_name, volume, snapshot.snapshot_id, new_volume_name) console_output('\tNew volume from snapshot successfully created, resource id: {}'.format( volume_from_snapshot.id)) except CloudError as ex: console_output( 'An error ocurred. Error details: {}'.format(ex.message)) raise # Updating a Capacity Pool console_output('Performing updates on Capacity Pool and Volume...') new_capacity_pool_size_tib = 10 console_output('\tChanging Capacity Pools size from {}TiB to {}TiB'.format( sample_utils.get_bytes_in_tib(capacity_pool.size), new_capacity_pool_size_tib)) try: capacity_pool_patch = CapacityPoolPatch(location=capacity_pool.location, service_level=capacity_pool.service_level, size=sample_utils.get_tib_in_bytes(new_capacity_pool_size_tib)) capacity_pool = anf_client.pools.update(capacity_pool_patch, RESOURCE_GROUP_NAME, account.name, resource_uri_utils.get_anf_capacity_pool(capacity_pool.id)) console_output('\t\tCapacity Pool successfully updated, new size {}TiB, resource id: {}'.format( sample_utils.get_bytes_in_tib(capacity_pool.size), capacity_pool.id)) except CloudError as ex: console_output( 'An error ocurred. Error details: {}'.format(ex.message)) raise # Volume updates: resize and adding a new export policy new_volume_size_tib = 1 console_output('\tChanging volume size from {}TiB to {}TiB'.format( sample_utils.get_bytes_in_tib(volume.usage_threshold), new_volume_size_tib)) # Getting list of export policies and adding a new one at the end rule_list = sorted(volume.export_policy.rules, key=lambda r: r.rule_index, reverse=True) # Currently, ANF's volume export policy supports up to 5 rules export_policies_patch = None if len(rule_list) <= 4: rule_list.append(ExportPolicyRule( allowed_clients="10.0.0.4/32", cifs=False, nfsv3=True, nfsv41=False, rule_index=rule_list[0].rule_index + 1, unix_read_only=False, unix_read_write=True)) export_policies_patch = VolumePatchPropertiesExportPolicy( rules=rule_list) if export_policies_patch is None: volume_patch = VolumePatch( location=volume.location, service_level=volume.service_level, usage_threshold=sample_utils.get_tib_in_bytes(new_volume_size_tib)) else: volume_patch = VolumePatch( location=volume.location, service_level=volume.service_level, usage_threshold=sample_utils.get_tib_in_bytes(new_volume_size_tib), export_policy=export_policies_patch) try: updated_volume = anf_client.volumes.update(volume_patch, RESOURCE_GROUP_NAME, account.name, resource_uri_utils.get_anf_capacity_pool( capacity_pool.id), resource_uri_utils.get_anf_volume(volume.id)) console_output('\t\tVolume successfully updated, new size: {}TiB, export policy count: {}, resource id: {}' .format(sample_utils.get_bytes_in_tib(updated_volume.usage_threshold), len(updated_volume.export_policy.rules), updated_volume.id)) except CloudError as ex: console_output( 'An error ocurred. Error details: {}'.format(ex.message)) raise # Retrieving resources console_output('Performing retrieval operations ...') # Accounts # Getting a list of ANF Accounts console_output('\tListing accounts...') account_list = None try: account_list = list(anf_client.accounts.list(RESOURCE_GROUP_NAME)) for i, retrieved_account in enumerate(account_list): console_output('\t\t{} - Account Name: {}, Id: {}' .format(i, retrieved_account.name, retrieved_account.id)) except CloudError as ex: console_output( 'An error ocurred. Error details: {}'.format(ex.message)) raise # Getting a single ANF Account console_output('\tGetting a single account...') try: retrieved_account = anf_client.accounts.get( RESOURCE_GROUP_NAME, account.name) console_output('\t\tAccount Name: {}, Id: {}'.format( retrieved_account.name, retrieved_account.id)) except CloudError as ex: console_output( 'An error ocurred. Error details: {}'.format(ex.message)) raise # Capacity Pools # Getting a list of capacity pools from an account console_output('\tListing capacity pools from account {}...' .format(account.name)) capacitypool_list = None try: capacitypool_list = list(anf_client.pools.list(RESOURCE_GROUP_NAME, resource_uri_utils.get_anf_account(account.id))) for i, retrieved_pool in enumerate(capacitypool_list): console_output('\t\t{} - Capacity Pool Name: {}, Id: {}' .format(i, retrieved_pool.name, retrieved_pool.id)) except CloudError as ex: console_output( 'An error ocurred. Error details: {}'.format(ex.message)) raise # Getting a single capacity pool console_output('\tGetting a single capacity pool...') try: retrieved_pool = anf_client.pools.get(RESOURCE_GROUP_NAME, resource_uri_utils.get_anf_account( account.id), resource_uri_utils.get_anf_capacity_pool(capacity_pool.id)) console_output('\t\tCapacity Pool Name: {}, Id: {}'.format( retrieved_pool.name, retrieved_pool.id)) except CloudError as ex: console_output( 'An error ocurred. Error details: {}'.format(ex.message)) raise # Volumes # Getting a list of volumes from a capacity pool console_output('\tListing volumes from capacity pool {}...'.format( capacity_pool.name)) volume_list = None try: volume_list = list(anf_client.volumes.list(RESOURCE_GROUP_NAME, resource_uri_utils.get_anf_account( account.id), resource_uri_utils.get_anf_capacity_pool(capacity_pool.id))) for i, retrieved_volume in enumerate(volume_list): console_output('\t\t{} - Volume Name: {}, Id: {}' .format(i, retrieved_volume.name, retrieved_volume.id)) except CloudError as ex: console_output( 'An error ocurred. Error details: {}'.format(ex.message)) raise # Getting a single volume console_output('\tGetting a single volume...') try: retrieved_volume = anf_client.volumes.get(RESOURCE_GROUP_NAME, resource_uri_utils.get_anf_account( account.id), resource_uri_utils.get_anf_capacity_pool( capacity_pool.id), resource_uri_utils.get_anf_volume(volume.id)) console_output('\t\tVolume Name: {}, Id: {}'.format( retrieved_volume.name, retrieved_volume.id)) except CloudError as ex: console_output( 'An error ocurred. Error details: {}'.format(ex.message)) raise # Snapshots # Getting a list of snapshots from volume console_output( '\tListing snapshots from from volume {}...'.format(volume.name)) snapshot_list = None try: snapshot_list = list(anf_client.snapshots.list(RESOURCE_GROUP_NAME, resource_uri_utils.get_anf_account( account.id), resource_uri_utils.get_anf_capacity_pool( capacity_pool.id), resource_uri_utils.get_anf_volume(volume.id))) for i, retrieved_snapshot in enumerate(snapshot_list): console_output('\t\t{} - Snapshot Name: {}, Id: {}' .format(i, retrieved_snapshot.name, retrieved_snapshot.id)) except CloudError as ex: console_output( 'An error ocurred. Error details: {}'.format(ex.message)) raise # Getting a single snapshot console_output('\tGetting a single snapshot...') try: retrieved_snapshot = anf_client.snapshots.get(RESOURCE_GROUP_NAME, resource_uri_utils.get_anf_account( account.id), resource_uri_utils.get_anf_capacity_pool( capacity_pool.id), resource_uri_utils.get_anf_volume( volume.id), resource_uri_utils.get_anf_snapshot(snapshot.id)) console_output('\t\tSnapshot Name: {}, Id: {}'.format( retrieved_snapshot.name, retrieved_snapshot.id)) except CloudError as ex: console_output( 'An error ocurred. Error details: {}'.format(ex.message)) raise # Cleaning up. This process needs to start the cleanup from the innermost # resources down in the hierarchy chain in our case # Snapshots->Volumes->Capacity Pools->Accounts if SHOULD_CLEANUP: console_output('Cleaning up...') # Cleaning up snapshot console_output( "\tWaiting for 1 minute to let the snapshot used to create a new \ volume to complete the split operation therefore not being locked...") time.sleep(60) console_output("\tDeleting Snapshot {}...".format( resource_uri_utils.get_anf_snapshot(snapshot.id))) try: anf_client.snapshots.delete(RESOURCE_GROUP_NAME, account.name, resource_uri_utils.get_anf_capacity_pool( capacity_pool.id), resource_uri_utils.get_anf_volume( volume.id), resource_uri_utils.get_anf_snapshot(snapshot.id)).wait() # ARM Workaround to wait the deletion complete/propagate sample_utils.wait_for_no_anf_resource(anf_client, snapshot.id) console_output('\t\tDeleted Snapshot: {}'.format(snapshot.id)) except CloudError as ex: console_output( 'An error ocurred. Error details: {}'.format(ex.message)) raise # Cleaning up volumes # Note: Volume deletion operations at the RP level are executed # serially console_output("\tDeleting Volumes...") try: volume_ids = [volume.id, volume_from_snapshot.id] for volume_id in volume_ids: console_output("\t\tDeleting {}".format(volume_id)) anf_client.volumes.delete(RESOURCE_GROUP_NAME, account.name, resource_uri_utils.get_anf_capacity_pool( capacity_pool.id), resource_uri_utils.get_anf_volume(volume_id)).wait() sample_utils.wait_for_no_anf_resource(anf_client, volume_id) console_output('\t\tDeleted Volume: {}'.format(volume_id)) except CloudError as ex: console_output( 'An error ocurred. Error details: {}'.format(ex.message)) raise # Cleaning up Capacity Pool console_output("\tDeleting Capacity Pool {} ...".format( resource_uri_utils.get_anf_capacity_pool(capacity_pool.id))) try: anf_client.pools.delete(RESOURCE_GROUP_NAME, account.name, resource_uri_utils.get_anf_capacity_pool( capacity_pool.id)).wait() sample_utils.wait_for_no_anf_resource(anf_client, capacity_pool.id) console_output( '\t\tDeleted Capacity Pool: {}'.format(capacity_pool.id)) except CloudError as ex: console_output( 'An error ocurred. Error details: {}'.format(ex.message)) raise # Cleaning up Account console_output("\tDeleting Account {} ...".format(account.name)) try: anf_client.accounts.delete(RESOURCE_GROUP_NAME, account.name) sample_utils.wait_for_no_anf_resource(anf_client, account.id) console_output('\t\tDeleted Account: {}'.format(account.id)) except CloudError as ex: console_output( 'An error ocurred. Error details: {}'.format(ex.message)) raise
def create_volume(cmd, client, account_name, pool_name, volume_name, resource_group_name, location, file_path, usage_threshold, vnet, subnet='default', service_level=None, protocol_types=None, volume_type=None, endpoint_type=None, replication_schedule=None, remote_volume_resource_id=None, tags=None, snapshot_id=None, snapshot_policy_id=None, backup_policy_id=None, backup_enabled=None, backup_id=None, policy_enforced=None, vault_id=None, kerberos_enabled=None, security_style=None, throughput_mibps=None, kerberos5_r=None, kerberos5_rw=None, kerberos5i_r=None, kerberos5i_rw=None, kerberos5p_r=None, kerberos5p_rw=None, has_root_access=None, snapshot_dir_visible=None, smb_encryption=None, smb_continuously_avl=None, encryption_key_source=None, rule_index=None, unix_read_only=None, unix_read_write=None, cifs=None, allowed_clients=None, ldap_enabled=None): subs_id = get_subscription_id(cmd.cli_ctx) # default the resource group of the subnet to the volume's rg unless the subnet is specified by id subnet_rg = resource_group_name # determine vnet - supplied value can be name or ARM resource Id if is_valid_resource_id(vnet): resource_parts = parse_resource_id(vnet) vnet = resource_parts['resource_name'] subnet_rg = resource_parts['resource_group'] # determine subnet - supplied value can be name or ARM reource Id if is_valid_resource_id(subnet): resource_parts = parse_resource_id(subnet) subnet = resource_parts['resource_name'] subnet_rg = resource_parts['resource_group'] # if NFSv4 is specified then the export policy must reflect this # the RP ordinarily only creates a default setting NFSv3. if (protocol_types is not None) and ("NFSv4.1" in protocol_types): rules = [] if allowed_clients is None: raise CLIError( "Parameter allowed-clients needs to be set when protocol-type is NFSv4.1" ) if rule_index is None: raise CLIError( "Parameter rule-index needs to be set when protocol-type is NFSv4.1" ) export_policy = ExportPolicyRule(rule_index=rule_index, unix_read_only=unix_read_only, unix_read_write=unix_read_write, cifs=cifs, nfsv3=False, nfsv41=True, allowed_clients=allowed_clients, kerberos5_read_only=kerberos5_r, kerberos5_read_write=kerberos5_rw, kerberos5i_read_only=kerberos5i_r, kerberos5i_read_write=kerberos5i_rw, kerberos5p_read_only=kerberos5p_r, kerberos5p_read_write=kerberos5p_rw, has_root_access=has_root_access) rules.append(export_policy) volume_export_policy = VolumePropertiesExportPolicy(rules=rules) else: volume_export_policy = None data_protection = None replication = None snapshot = None backup = None # Make sure volume_type is set correctly if replication parameters are set if endpoint_type is not None and replication_schedule is not None and remote_volume_resource_id is not None: volume_type = "DataProtection" if volume_type == "DataProtection": replication = ReplicationObject( endpoint_type=endpoint_type, replication_schedule=replication_schedule, remote_volume_resource_id=remote_volume_resource_id) if snapshot_policy_id is not None: snapshot = VolumeSnapshotProperties( snapshot_policy_id=snapshot_policy_id) if backup_policy_id is not None: backup = VolumeBackupProperties(backup_policy_id=backup_policy_id, policy_enforced=policy_enforced, vault_id=vault_id, backup_enabled=backup_enabled) if replication is not None or snapshot is not None or backup is not None: data_protection = VolumePropertiesDataProtection( replication=replication, snapshot=snapshot, backup=backup) subnet_id = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/virtualNetworks/%s/subnets/%s" % ( subs_id, subnet_rg, vnet, subnet) body = Volume(usage_threshold=int(usage_threshold) * gib_scale, creation_token=file_path, service_level=service_level, location=location, subnet_id=subnet_id, protocol_types=protocol_types, export_policy=volume_export_policy, volume_type=volume_type, data_protection=data_protection, backup_id=backup_id, kerberos_enabled=kerberos_enabled, throughput_mibps=throughput_mibps, snapshot_directory_visible=snapshot_dir_visible, security_style=security_style, tags=tags, snapshot_id=snapshot_id, smb_encryption=smb_encryption, smb_continuously_available=smb_continuously_avl, encryption_key_source=encryption_key_source, ldap_enabled=ldap_enabled) return client.begin_create_or_update(resource_group_name, account_name, pool_name, volume_name, body)