def enable_protection_for_vm(cmd, client, resource_group_name, vault_name, vm, policy_name): vm_name, vm_rg = _get_resource_name_and_rg(resource_group_name, vm) vm = virtual_machines_cf(cmd.cli_ctx).get(vm_rg, vm_name) vault = vaults_cf(cmd.cli_ctx).get(resource_group_name, vault_name) policy = show_policy(protection_policies_cf(cmd.cli_ctx), resource_group_name, vault_name, policy_name) if vm.location.lower() != vault.location.lower(): raise CLIError(""" The VM should be in the same location as that of the Recovery Services vault to enable protection. """) if policy.properties.backup_management_type != BackupManagementType.azure_iaas_vm.value: raise CLIError(""" The policy type should match with the workload being protected. Use the relevant get-default policy command and use it to protect the workload. """) # Get protectable item. protectable_item = _get_protectable_item_for_vm(cmd.cli_ctx, vault_name, resource_group_name, vm_name, vm_rg) if protectable_item is None: raise CLIError(""" The specified Azure Virtual Machine Not Found. Possible causes are 1. VM does not exist 2. The VM name or the Service name needs to be case sensitive 3. VM is already Protected with same or other Vault. Please Unprotect VM first and then try to protect it again. Please contact Microsoft for further assistance. """) # Construct enable protection request object container_uri = _get_protection_container_uri_from_id(protectable_item.id) item_uri = _get_protectable_item_uri_from_id(protectable_item.id) vm_item_properties = _get_vm_item_properties_from_vm_type(vm.type) vm_item_properties.policy_id = policy.id vm_item_properties.source_resource_id = protectable_item.properties.virtual_machine_id vm_item = ProtectedItemResource(properties=vm_item_properties) # Trigger enable protection and wait for completion result = sdk_no_wait(True, client.create_or_update, vault_name, resource_group_name, fabric_name, container_uri, item_uri, vm_item) return _track_backup_job(cmd.cli_ctx, result, vault_name, resource_group_name)
def restore_disks(cmd, client, resource_group_name, vault_name, container_name, item_name, rp_name, storage_account, restore_to_staging_storage_account=None): item = show_item(cmd, backup_protected_items_cf(cmd.cli_ctx), resource_group_name, vault_name, container_name, item_name, "AzureIaasVM", "VM") _validate_item(item) recovery_point = show_recovery_point(cmd, recovery_points_cf(cmd.cli_ctx), resource_group_name, vault_name, container_name, item_name, rp_name, "AzureIaasVM", "VM") vault = vaults_cf(cmd.cli_ctx).get(resource_group_name, vault_name) vault_location = vault.location # Get container and item URIs container_uri = _get_protection_container_uri_from_id(item.id) item_uri = _get_protected_item_uri_from_id(item.id) # Original Storage Account Restore Logic use_original_storage_account = _should_use_original_storage_account(recovery_point, restore_to_staging_storage_account) if use_original_storage_account: logger.warning( """ The disks will be restored to their original storage accounts. The VM config file will be uploaded to given storage account. """) # Construct trigger restore request object sa_name, sa_rg = _get_resource_name_and_rg(resource_group_name, storage_account) _storage_account_id = _get_storage_account_id(cmd.cli_ctx, sa_name, sa_rg) _source_resource_id = item.properties.source_resource_id trigger_restore_properties = IaasVMRestoreRequest(create_new_cloud_service=True, recovery_point_id=rp_name, recovery_type='RestoreDisks', region=vault_location, storage_account_id=_storage_account_id, source_resource_id=_source_resource_id, original_storage_account_option=use_original_storage_account) trigger_restore_request = RestoreRequestResource(properties=trigger_restore_properties) # Trigger restore result = sdk_no_wait(True, client.trigger, vault_name, resource_group_name, fabric_name, container_uri, item_uri, rp_name, trigger_restore_request) return _track_backup_job(cmd.cli_ctx, result, vault_name, resource_group_name)
def _force_delete_vault(cmd, vault_name, resource_group_name): logger.warning('Attemping to force delete vault: %s', vault_name) container_client = backup_protection_containers_cf(cmd.cli_ctx) backup_item_client = backup_protected_items_cf(cmd.cli_ctx) item_client = protected_items_cf(cmd.cli_ctx) vault_client = vaults_cf(cmd.cli_ctx) containers = _get_containers( container_client, 'AzureIaasVM', 'Registered', resource_group_name, vault_name) for container in containers: container_name = container.name.rsplit(';', 1)[1] items = list_items( cmd, backup_item_client, resource_group_name, vault_name, container_name) for item in items: item_name = item.name.rsplit(';', 1)[1] logger.warning("Deleting backup item '%s' in container '%s'", item_name, container_name) disable_protection(cmd, item_client, resource_group_name, vault_name, container_name, item_name, delete_backup_data=True) # now delete the vault vault_client.delete(resource_group_name, vault_name)
def restore_disks(client, resource_group_name, vault_name, container_name, item_name, rp_name, storage_account): item = show_item(backup_protected_items_cf(None), resource_group_name, vault_name, container_name, item_name, "AzureIaasVM", "VM") vault = vaults_cf(None).get(resource_group_name, vault_name, custom_headers=_get_custom_headers()) vault_location = vault.location # Get container and item URIs container_uri = _get_protection_container_uri_from_id(item.id) item_uri = _get_protected_item_uri_from_id(item.id) # Construct trigger restore request object sa_name, sa_rg = _get_resource_name_and_rg(resource_group_name, storage_account) _storage_account_id = _get_storage_account_id(sa_name, sa_rg) _source_resource_id = item.properties.source_resource_id trigger_restore_properties = IaasVMRestoreRequest( create_new_cloud_service=True, recovery_point_id=rp_name, recovery_type='RestoreDisks', region=vault_location, storage_account_id=_storage_account_id, source_resource_id=_source_resource_id) trigger_restore_request = RestoreRequestResource( properties=trigger_restore_properties) # Trigger restore result = client.trigger(vault_name, resource_group_name, fabric_name, container_uri, item_uri, rp_name, trigger_restore_request, raw=True, custom_headers=_get_custom_headers()) return _track_backup_job(result, vault_name, resource_group_name)
def _force_delete_vault(cmd, vault_name, resource_group_name): logger.warning('Attemping to force delete vault: %s', vault_name) container_client = backup_protection_containers_cf(cmd.cli_ctx) backup_item_client = backup_protected_items_cf(cmd.cli_ctx) item_client = protected_items_cf(cmd.cli_ctx) vault_client = vaults_cf(cmd.cli_ctx) containers = _get_containers( container_client, 'AzureIaasVM', 'Registered', resource_group_name, vault_name) for container in containers: container_name = container.name.rsplit(';', 1)[1] items = list_items( cmd, backup_item_client, resource_group_name, vault_name, container_name) for item in items: item_name = item.name.rsplit(';', 1)[1] logger.warning("Deleting backup item '%s' in container '%s'", item_name, container_name) disable_protection(cmd, item_client, resource_group_name, vault_name, item, True) # now delete the vault try: vault_client.delete(resource_group_name, vault_name) except Exception: raise CLIError("Vault cannot be deleted as there are existing resources within the vault")
def restore_disks(cmd, client, resource_group_name, vault_name, container_name, item_name, rp_name, storage_account, target_resource_group=None, restore_to_staging_storage_account=None, restore_only_osdisk=None, diskslist=None, restore_as_unmanaged_disks=None): item = show_item(cmd, backup_protected_items_cf(cmd.cli_ctx), resource_group_name, vault_name, container_name, item_name, "AzureIaasVM", "VM") _validate_item(item) recovery_point = show_recovery_point(cmd, recovery_points_cf(cmd.cli_ctx), resource_group_name, vault_name, container_name, item_name, rp_name, "AzureIaasVM", "VM") vault = vaults_cf(cmd.cli_ctx).get(resource_group_name, vault_name) vault_location = vault.location # Get container and item URIs container_uri = _get_protection_container_uri_from_id(item.id) item_uri = _get_protected_item_uri_from_id(item.id) # Original Storage Account Restore Logic use_original_storage_account = _should_use_original_storage_account( recovery_point, restore_to_staging_storage_account) if use_original_storage_account: logger.warning(""" The disks will be restored to their original storage accounts. The VM config file will be uploaded to given storage account. """) # Construct trigger restore request object sa_name, sa_rg = _get_resource_name_and_rg(resource_group_name, storage_account) _storage_account_id = _get_storage_account_id(cmd.cli_ctx, sa_name, sa_rg) _source_resource_id = item.properties.source_resource_id target_rg_id = None if restore_as_unmanaged_disks and target_resource_group is not None: raise CLIError(""" Both restore_as_unmanaged_disks and target_resource_group can't be spceified. Please give Only one parameter and retry. """) if recovery_point.properties.is_managed_virtual_machine: if target_resource_group is not None: target_rg_id = '/'.join(_source_resource_id.split('/') [:4]) + "/" + target_resource_group if not restore_as_unmanaged_disks: logger.warning(""" The disks of the managed VM will be restored as unmanaged since targetRG parameter is not provided. This will NOT leverage the instant restore functionality. Hence it can be significantly slow based on given storage account. To leverage instant restore, provide the target RG parameter. Otherwise, provide the intent next time by passing the --restore-as-unmanaged-disks parameter """) _validate_restore_disk_parameters(restore_only_osdisk, diskslist) restore_disk_lun_list = None if restore_only_osdisk: restore_disk_lun_list = [] if diskslist: restore_disk_lun_list = diskslist trigger_restore_properties = IaasVMRestoreRequest( create_new_cloud_service=True, recovery_point_id=rp_name, recovery_type='RestoreDisks', region=vault_location, storage_account_id=_storage_account_id, source_resource_id=_source_resource_id, target_resource_group_id=target_rg_id, original_storage_account_option=use_original_storage_account, restore_disk_lun_list=restore_disk_lun_list) trigger_restore_request = RestoreRequestResource( properties=trigger_restore_properties) # Trigger restore result = sdk_no_wait(True, client.trigger, vault_name, resource_group_name, fabric_name, container_uri, item_uri, rp_name, trigger_restore_request) return _track_backup_job(cmd.cli_ctx, result, vault_name, resource_group_name)
def restore_azure_wl(cmd, client, resource_group_name, vault_name, recovery_config, rehydration_duration=15, rehydration_priority=None, use_secondary_region=None): recovery_config_object = cust_help.get_or_read_json(recovery_config) restore_mode = recovery_config_object['restore_mode'] container_uri = recovery_config_object['container_uri'] item_uri = recovery_config_object['item_uri'] recovery_point_id = recovery_config_object['recovery_point_id'] log_point_in_time = recovery_config_object['log_point_in_time'] item_type = recovery_config_object['item_type'] workload_type = recovery_config_object['workload_type'] source_resource_id = recovery_config_object['source_resource_id'] database_name = recovery_config_object['database_name'] container_id = recovery_config_object['container_id'] alternate_directory_paths = recovery_config_object['alternate_directory_paths'] recovery_mode = recovery_config_object['recovery_mode'] filepath = recovery_config_object['filepath'] item = common.show_item(cmd, backup_protected_items_cf(cmd.cli_ctx), resource_group_name, vault_name, container_uri, item_uri, "AzureWorkload") cust_help.validate_item(item) validate_wl_restore(item, item_type, restore_mode, recovery_mode) trigger_restore_properties = _get_restore_request_instance(item_type, log_point_in_time, None) if log_point_in_time is None: recovery_point = common.show_recovery_point(cmd, recovery_points_cf(cmd.cli_ctx), resource_group_name, vault_name, container_uri, item_uri, recovery_point_id, workload_type, "AzureWorkload", use_secondary_region) if recovery_point is None: raise InvalidArgumentValueError(""" Specified recovery point not found. Please check the recovery config file or try removing --use-secondary-region if provided""") common.fetch_tier_for_rp(recovery_point) if (recovery_point.tier_type is not None and recovery_point.tier_type == 'VaultArchive'): if rehydration_priority is None: raise InvalidArgumentValueError("""The selected recovery point is in archive tier, provide additional parameters of rehydration duration and rehydration priority.""") # normal rehydrated restore trigger_restore_properties = _get_restore_request_instance(item_type, log_point_in_time, rehydration_priority) rehyd_duration = 'P' + str(rehydration_duration) + 'D' rehydration_info = RecoveryPointRehydrationInfo(rehydration_retention_duration=rehyd_duration, rehydration_priority=rehydration_priority) trigger_restore_properties.recovery_point_rehydration_info = rehydration_info trigger_restore_properties.recovery_type = restore_mode # Get target vm id if container_id is not None: target_container_name = cust_help.get_protection_container_uri_from_id(container_id) target_resource_group = cust_help.get_resource_group_from_id(container_id) target_vault_name = cust_help.get_vault_from_arm_id(container_id) target_container = common.show_container(cmd, backup_protection_containers_cf(cmd.cli_ctx), target_container_name, target_resource_group, target_vault_name, 'AzureWorkload') setattr(trigger_restore_properties, 'target_virtual_machine_id', target_container.properties.source_resource_id) if restore_mode == 'AlternateLocation': if recovery_mode != "FileRecovery": setattr(trigger_restore_properties, 'source_resource_id', source_resource_id) setattr(trigger_restore_properties, 'target_info', TargetRestoreInfo(overwrite_option='Overwrite', database_name=database_name, container_id=container_id)) if 'sql' in item_type.lower(): directory_map = [] for i in alternate_directory_paths: directory_map.append(SQLDataDirectoryMapping(mapping_type=i[0], source_path=i[1], source_logical_name=i[2], target_path=i[3])) setattr(trigger_restore_properties, 'alternate_directory_paths', directory_map) else: target_info = TargetRestoreInfo(overwrite_option='Overwrite', container_id=container_id, target_directory_for_file_restore=filepath) setattr(trigger_restore_properties, 'target_info', target_info) trigger_restore_properties.recovery_mode = recovery_mode if log_point_in_time is not None: log_point_in_time = datetime_type(log_point_in_time) time_range_list = _get_log_time_range(cmd, resource_group_name, vault_name, item, use_secondary_region) validate_log_point_in_time(log_point_in_time, time_range_list) setattr(trigger_restore_properties, 'point_in_time', log_point_in_time) if 'sql' in item_type.lower(): setattr(trigger_restore_properties, 'should_use_alternate_target_location', True) setattr(trigger_restore_properties, 'is_non_recoverable', False) trigger_restore_request = RestoreRequestResource(properties=trigger_restore_properties) if use_secondary_region: if rehydration_priority is not None: raise MutuallyExclusiveArgumentError("Archive restore isn't supported for secondary region.") vault = vaults_cf(cmd.cli_ctx).get(resource_group_name, vault_name) vault_location = vault.location azure_region = custom.secondary_region_map[vault_location] aad_client = aad_properties_cf(cmd.cli_ctx) filter_string = cust_help.get_filter_string({'backupManagementType': 'AzureWorkload'}) aad_result = aad_client.get(azure_region, filter_string) rp_client = recovery_points_passive_cf(cmd.cli_ctx) crr_access_token = rp_client.get_access_token(vault_name, resource_group_name, fabric_name, container_uri, item_uri, recovery_point_id, aad_result).properties crr_client = cross_region_restore_cf(cmd.cli_ctx) trigger_restore_properties.region = azure_region trigger_crr_request = CrossRegionRestoreRequest(cross_region_restore_access_details=crr_access_token, restore_request=trigger_restore_properties) result = crr_client.begin_trigger(azure_region, trigger_crr_request, cls=cust_help.get_pipeline_response, polling=False).result() return cust_help.track_backup_crr_job(cmd.cli_ctx, result, azure_region, vault.id) # Trigger restore and wait for completion result = client.begin_trigger(vault_name, resource_group_name, fabric_name, container_uri, item_uri, recovery_point_id, trigger_restore_request, cls=cust_help.get_pipeline_response, polling=False).result() return cust_help.track_backup_job(cmd.cli_ctx, result, vault_name, resource_group_name)
def enable_protection_for_vm(cmd, client, resource_group_name, vault_name, vm, policy_name, diskslist=None, disk_list_setting=None, exclude_all_data_disks=None): vm_name, vm_rg = _get_resource_name_and_rg(resource_group_name, vm) vm = virtual_machines_cf(cmd.cli_ctx).get(vm_rg, vm_name) vault = vaults_cf(cmd.cli_ctx).get(resource_group_name, vault_name) policy = show_policy(protection_policies_cf(cmd.cli_ctx), resource_group_name, vault_name, policy_name) # throw error if policy has more than 1000 protected VMs. if policy.properties.protected_items_count >= 1000: raise CLIError("Cannot configure backup for more than 1000 VMs per policy") if vm.location.lower() != vault.location.lower(): raise CLIError( """ The VM should be in the same location as that of the Recovery Services vault to enable protection. """) if policy.properties.backup_management_type != BackupManagementType.azure_iaas_vm.value: raise CLIError( """ The policy type should match with the workload being protected. Use the relevant get-default policy command and use it to protect the workload. """) # Get protectable item. protectable_item = _get_protectable_item_for_vm(cmd.cli_ctx, vault_name, resource_group_name, vm_name, vm_rg) if protectable_item is None: raise CLIError( """ The specified Azure Virtual Machine Not Found. Possible causes are 1. VM does not exist 2. The VM name or the Service name needs to be case sensitive 3. VM is already Protected with same or other Vault. Please Unprotect VM first and then try to protect it again. Please contact Microsoft for further assistance. """) # Construct enable protection request object container_uri = _get_protection_container_uri_from_id(protectable_item.id) item_uri = _get_protectable_item_uri_from_id(protectable_item.id) vm_item_properties = _get_vm_item_properties_from_vm_type(vm.type) vm_item_properties.policy_id = policy.id vm_item_properties.source_resource_id = protectable_item.properties.virtual_machine_id if disk_list_setting is not None: if diskslist is None: raise CLIError("Please provide LUNs of disks that will be included or excluded.") is_inclusion_list = False if disk_list_setting == "include": is_inclusion_list = True disk_exclusion_properties = DiskExclusionProperties(disk_lun_list=diskslist, is_inclusion_list=is_inclusion_list) extended_properties = ExtendedProperties(disk_exclusion_properties=disk_exclusion_properties) vm_item_properties.extended_properties = extended_properties elif exclude_all_data_disks: disk_exclusion_properties = DiskExclusionProperties(disk_lun_list=[], is_inclusion_list=True) extended_properties = ExtendedProperties(disk_exclusion_properties=disk_exclusion_properties) vm_item_properties.extended_properties = extended_properties vm_item = ProtectedItemResource(properties=vm_item_properties) # Trigger enable protection and wait for completion result = client.create_or_update(vault_name, resource_group_name, fabric_name, container_uri, item_uri, vm_item, raw=True) return _track_backup_job(cmd.cli_ctx, result, vault_name, resource_group_name)