def re_register_wl_container(cmd, client, vault_name, resource_group_name, workload_type, container_name, container_type): workload_type = workload_type_map[workload_type] if not cust_help.is_native_name(container_name): raise CLIError( """ Container name passed cannot be a friendly name. Please pass a native container name. """) backup_cf = backup_protection_containers_cf(cmd.cli_ctx) containers = common.list_containers(backup_cf, resource_group_name, vault_name, container_type) source_resource_id = None for container in containers: if container.name == container_name: source_resource_id = container.properties.source_resource_id break if not source_resource_id: raise CLIError( """ No such registered container exists. """) properties = AzureVMAppContainerProtectionContainer(backup_management_type=container_type, workload_type=workload_type, operation_type='Reregister', source_resource_id=source_resource_id) param = ProtectionContainerResource(properties=properties) # Trigger register and wait for completion result = sdk_no_wait(True, client.register, vault_name, resource_group_name, fabric_name, container_name, param) return cust_help.track_register_operation(cmd.cli_ctx, result, vault_name, resource_group_name, container_name)
def _force_delete_vault(cmd, vault_name, resource_group_name): logger.warning('Attemping to force delete vault: %s', vault_name) container_client = backup_protection_containers_cf(cmd.cli_ctx) backup_item_client = backup_protected_items_cf(cmd.cli_ctx) item_client = protected_items_cf(cmd.cli_ctx) vault_client = vaults_cf(cmd.cli_ctx) containers = _get_containers(container_client, 'AzureIaasVM', 'Registered', resource_group_name, vault_name) for container in containers: container_name = container.name.rsplit(';', 1)[1] items = list_items(cmd, backup_item_client, resource_group_name, vault_name, container_name) for item in items: item_name = item.name.rsplit(';', 1)[1] logger.warning("Deleting backup item '%s' in container '%s'", item_name, container_name) disable_protection(cmd, item_client, resource_group_name, vault_name, container_name, item_name, delete_backup_data=True) # now delete the vault vault_client.delete(resource_group_name, vault_name)
def list_items(cmd, client, resource_group_name, vault_name, container_name=None, container_type="AzureIaasVM", item_type="VM"): filter_string = _get_filter_string({ 'backupManagementType': container_type, 'itemType': item_type }) items = client.list(vault_name, resource_group_name, filter_string) paged_items = _get_list_from_paged_response(items) if container_name: if _is_native_name(container_name): container_uri = container_name else: container = show_container( backup_protection_containers_cf(cmd.cli_ctx), container_name, resource_group_name, vault_name, container_type) _validate_container(container) container_uri = container.name return [ item for item in paged_items if _get_protection_container_uri_from_id(item.id).lower() == container_uri.lower() ] return paged_items
def list_protectable_items(cmd, client, resource_group_name, vault_name, workload_type, backup_management_type="AzureWorkload", container_name=None, protectable_item_type=None, server_name=None): if backup_management_type != "AzureWorkload": raise ValidationError(""" Only supported value of backup-management-type is 'AzureWorkload' for this command. """) container_uri = None if container_name: if custom_help.is_native_name(container_name): container_uri = container_name else: container_client = backup_protection_containers_cf(cmd.cli_ctx) container = show_container(cmd, container_client, container_name, resource_group_name, vault_name, backup_management_type) custom_help.validate_container(container) if isinstance(container, list): raise ValidationError(""" Multiple containers with same Friendly Name found. Please give native names instead. """) container_uri = container.name return custom_wl.list_protectable_items(client, resource_group_name, vault_name, workload_type, backup_management_type, container_uri, protectable_item_type, server_name)
def unregister_container(cmd, client, vault_name, resource_group_name, container_name, backup_management_type=None): container = None container_type = custom_help.validate_and_extract_container_type( container_name, backup_management_type) if not custom_help.is_native_name(container_name): containrs_client = backup_protection_containers_cf(cmd.cli_ctx) container = show_container(cmd, containrs_client, container_name, resource_group_name, vault_name, backup_management_type) container_name = container.name if container_type.lower() == "azurestorage": return custom_afs.unregister_afs_container(cmd, client, vault_name, resource_group_name, container_name) if container_type.lower() == "azureworkload": return custom_wl.unregister_wl_container(cmd, client, vault_name, resource_group_name, container_name) return None
def unregister_container(cmd, client, vault_name, resource_group_name, container_name, backup_management_type=None): container = None container_friendly_name = None container_type = custom_help.validate_and_extract_container_type( container_name, backup_management_type) containrs_client = backup_protection_containers_cf(cmd.cli_ctx) container = show_container(cmd, containrs_client, container_name, resource_group_name, vault_name, backup_management_type) container_name = container.name container_friendly_name = container.properties.friendly_name if container_type.lower() == "azurestorage": return custom_afs.unregister_afs_container(cmd, client, vault_name, resource_group_name, container_name) if container_type.lower() == "azureworkload": return custom_wl.unregister_wl_container(cmd, client, vault_name, resource_group_name, container_name) if container_type.lower() == "mab": mab_client = registered_identities_cf(cmd.cli_ctx) mab_client.delete(resource_group_name, vault_name, container_friendly_name) return None
def list_items(cmd, client, resource_group_name, vault_name, container_name=None, container_type="AzureIaasVM", item_type="VM"): filter_string = _get_filter_string({ 'backupManagementType': container_type, 'itemType': item_type }) items = client.list(vault_name, resource_group_name, filter_string) paged_items = _get_list_from_paged_response(items) if container_name is not None: container = show_container( backup_protection_containers_cf(cmd.cli_ctx), container_name, resource_group_name, vault_name, container_type) _validate_container(container) return [ item for item in paged_items if item.properties.container_name.lower() in container.name.lower() ] return paged_items
def _force_delete_vault(cmd, vault_name, resource_group_name): logger.warning('Attemping to force delete vault: %s', vault_name) container_client = backup_protection_containers_cf(cmd.cli_ctx) backup_item_client = backup_protected_items_cf(cmd.cli_ctx) item_client = protected_items_cf(cmd.cli_ctx) vault_client = vaults_cf(cmd.cli_ctx) containers = _get_containers(container_client, 'AzureIaasVM', 'Registered', resource_group_name, vault_name) for container in containers: container_name = container.name.rsplit(';', 1)[1] items = list_items(cmd, backup_item_client, resource_group_name, vault_name, container_name) for item in items: item_name = item.name.rsplit(';', 1)[1] logger.warning("Deleting backup item '%s' in container '%s'", item_name, container_name) disable_protection(cmd, item_client, resource_group_name, vault_name, item, True) # now delete the vault try: vault_client.delete(resource_group_name, vault_name) except Exception: raise CLIError( "Vault cannot be deleted as there are existing resources within the vault" )
def enable_for_AzureFileShare(cmd, client, resource_group_name, vault_name, afs_name, storage_account_name, policy_name): # get registered storage accounts storage_account = None containers_client = backup_protection_containers_cf(cmd.cli_ctx) registered_containers = common.list_containers(containers_client, resource_group_name, vault_name, "AzureStorage") storage_account = _get_storage_account_from_list(registered_containers, storage_account_name) # get unregistered storage accounts if storage_account is None: unregistered_containers = list_protectable_containers(cmd.cli_ctx, resource_group_name, vault_name) storage_account = _get_storage_account_from_list(unregistered_containers, storage_account_name) if storage_account is None: # refresh containers in the vault protection_containers_client = protection_containers_cf(cmd.cli_ctx) filter_string = helper.get_filter_string({'backupManagementType': "AzureStorage"}) refresh_result = protection_containers_client.refresh(vault_name, resource_group_name, fabric_name, filter=filter_string, raw=True) helper.track_refresh_operation(cmd.cli_ctx, refresh_result, vault_name, resource_group_name) # refetch the protectable containers after refresh unregistered_containers = list_protectable_containers(cmd.cli_ctx, resource_group_name, vault_name) storage_account = _get_storage_account_from_list(unregistered_containers, storage_account_name) if storage_account is None: raise CLIError("Storage account not found or not supported.") # register storage account protection_containers_client = protection_containers_cf(cmd.cli_ctx) properties = AzureStorageContainer(backup_management_type="AzureStorage", source_resource_id=storage_account.properties.container_id, workload_type="AzureFileShare") param = ProtectionContainerResource(properties=properties) result = protection_containers_client.register(vault_name, resource_group_name, fabric_name, storage_account.name, param, raw=True) helper.track_register_operation(cmd.cli_ctx, result, vault_name, resource_group_name, storage_account.name) policy = common.show_policy(protection_policies_cf(cmd.cli_ctx), resource_group_name, vault_name, policy_name) helper.validate_policy(policy) protectable_item = _get_protectable_item_for_afs(cmd.cli_ctx, vault_name, resource_group_name, afs_name, storage_account) helper.validate_azurefileshare_item(protectable_item) container_uri = helper.get_protection_container_uri_from_id(protectable_item.id) item_uri = helper.get_protectable_item_uri_from_id(protectable_item.id) item_properties = AzureFileshareProtectedItem() item_properties.policy_id = policy.id item_properties.source_resource_id = protectable_item.properties.parent_container_fabric_id item = ProtectedItemResource(properties=item_properties) result = client.create_or_update(vault_name, resource_group_name, fabric_name, container_uri, item_uri, item, raw=True) return helper.track_backup_job(cmd.cli_ctx, result, vault_name, resource_group_name)
def list_protectable_items(cmd, client, resource_group_name, vault_name, workload_type, container_name=None): container_uri = None if container_name: if custom_help.is_native_name(container_name): container_uri = container_name else: container_client = backup_protection_containers_cf(cmd.cli_ctx) container = show_container(cmd, container_client, container_name, resource_group_name, vault_name, "AzureWorkload") custom_help.validate_container(container) container_uri = container.name return custom_wl.list_protectable_items(client, resource_group_name, vault_name, workload_type, container_uri)
def show_recovery_config(cmd, client, resource_group_name, vault_name, restore_mode, container_name, item_name, rp_name=None, target_item_name=None, log_point_in_time=None, target_server_type=None, target_server_name=None, workload_type=None, backup_management_type="AzureWorkload", from_full_rp_name=None, filepath=None, target_container_name=None, target_resource_group=None, target_vault_name=None): target_resource_group = resource_group_name if target_resource_group is None else target_resource_group target_vault_name = vault_name if target_vault_name is None else target_vault_name target_container_name = container_name if target_container_name is None else target_container_name target_item = None if target_item_name is not None: protectable_items_client = backup_protectable_items_cf(cmd.cli_ctx) target_item = show_protectable_instance( cmd, protectable_items_client, target_resource_group, target_vault_name, target_server_name, target_server_type, workload_type, target_container_name) target_container = None if target_container_name is not None: container_client = backup_protection_containers_cf(cmd.cli_ctx) target_container = common.show_container(cmd, container_client, target_container_name, target_resource_group, target_vault_name, backup_management_type) if isinstance(target_container, list): raise ValidationError(""" Multiple containers with same Friendly Name found. Please give native names instead. """) return custom_wl.show_recovery_config( cmd, client, resource_group_name, vault_name, restore_mode, container_name, item_name, rp_name, target_item, target_item_name, log_point_in_time, from_full_rp_name, filepath, target_container, target_resource_group, target_vault_name)
def unregister_container(cmd, client, vault_name, resource_group_name, container_name, backup_management_type=None): containrs_client = backup_protection_containers_cf(cmd.cli_ctx) container = show_container(cmd, containrs_client, container_name, resource_group_name, vault_name, backup_management_type) if container.properties.backup_management_type.lower() == "azurestorage": custom_afs.unregister_afs_container(cmd, client, vault_name, resource_group_name, container.name)
def unregister_container(cmd, client, vault_name, resource_group_name, container_name, backup_management_type=None): container = None # container_friendly_name = None container_type = custom_help.validate_and_extract_container_type( container_name, backup_management_type) if not custom_help.is_native_name(container_name): containrs_client = backup_protection_containers_cf(cmd.cli_ctx) container = show_container(cmd, containrs_client, container_name, resource_group_name, vault_name, backup_management_type) container_name = container.name # container_friendly_name = container.properties.friendly_name if container_type.lower() == "azurestorage": return custom_afs.unregister_afs_container(cmd, client, vault_name, resource_group_name, container_name) if container_type.lower() == "azureworkload": return custom_wl.unregister_wl_container(cmd, client, vault_name, resource_group_name, container_name) # if container_type.lower() == "mab": # if container_friendly_name is None: # containrs_client = backup_protection_containers_cf(cmd.cli_ctx) # container = show_container(cmd, containrs_client, container_name, resource_group_name, vault_name, # backup_management_type) # container_friendly_name = container.properties.friendly_name # mab_client = registered_identities_cf(cmd.cli_ctx) # result = mab_client.delete(resource_group_name, vault_name, container_friendly_name, # cls=custom_help.get_pipeline_response) # return custom_help.track_mab_unregister_operation(cmd.cli_ctx, result, vault_name, resource_group_name, # container_name) return None
def list_items(client, resource_group_name, vault_name, container_name, container_type="AzureIaasVM", item_type="VM"): filter_string = _get_filter_string({ 'backupManagementType': container_type, 'itemType': item_type }) items = client.list(vault_name, resource_group_name, filter_string, custom_headers=_get_custom_headers()) paged_items = _get_list_from_paged_response(items) container = show_container(backup_protection_containers_cf(None), container_name, resource_group_name, vault_name, container_type) return [ item for item in paged_items if item.properties.container_name.lower() in container.name.lower() ]
def list_protectable_items(cmd, client, resource_group_name, vault_name, workload_type, container_name=None): container_uri = None if container_name: if custom_help.is_native_name(container_name): container_uri = container_name else: container_client = backup_protection_containers_cf(cmd.cli_ctx) container = show_container(cmd, container_client, container_name, resource_group_name, vault_name, "AzureWorkload") custom_help.validate_container(container) if isinstance(container, list): raise ValidationError(""" Multiple containers with same Friendly Name found. Please give native names instead. """) container_uri = container.name return custom_wl.list_protectable_items(client, resource_group_name, vault_name, workload_type, container_uri)
def restore_azure_wl(cmd, client, resource_group_name, vault_name, recovery_config, rehydration_duration=15, rehydration_priority=None): recovery_config_object = cust_help.get_or_read_json(recovery_config) restore_mode = recovery_config_object['restore_mode'] container_uri = recovery_config_object['container_uri'] item_uri = recovery_config_object['item_uri'] recovery_point_id = recovery_config_object['recovery_point_id'] log_point_in_time = recovery_config_object['log_point_in_time'] item_type = recovery_config_object['item_type'] source_resource_id = recovery_config_object['source_resource_id'] database_name = recovery_config_object['database_name'] container_id = recovery_config_object['container_id'] alternate_directory_paths = recovery_config_object['alternate_directory_paths'] recovery_mode = recovery_config_object['recovery_mode'] filepath = recovery_config_object['filepath'] recovery_point = common.show_recovery_point(cmd, recovery_points_cf(cmd.cli_ctx), resource_group_name, vault_name, container_uri, item_uri, recovery_point_id, item_type, backup_management_type="AzureWorkload") rp_list = [recovery_point] common.fetch_tier(rp_list) if (rp_list[0].properties.recovery_point_tier_details is not None and rp_list[0].tier_type == 'VaultArchive' and rehydration_priority is None): raise InvalidArgumentValueError("""The selected recovery point is in archive tier, provide additional parameters of rehydration duration and rehydration priority.""") if rp_list[0].properties.recovery_point_tier_details is not None and rp_list[0].tier_type == 'VaultArchive': # Construct trigger restore request object trigger_restore_properties = _get_restore_request_instance(item_type, log_point_in_time, rehydration_priority) rehyd_duration = 'P' + str(rehydration_duration) + 'D' rehydration_info = RecoveryPointRehydrationInfo(rehydration_retention_duration=rehyd_duration, rehydration_priority=rehydration_priority) trigger_restore_properties.recovery_point_rehydration_info = rehydration_info else: trigger_restore_properties = _get_restore_request_instance(item_type, log_point_in_time, None) trigger_restore_properties.recovery_type = restore_mode # Get target vm id if container_id is not None: target_container_name = cust_help.get_protection_container_uri_from_id(container_id) target_resource_group = cust_help.get_resource_group_from_id(container_id) target_vault_name = cust_help.get_vault_from_arm_id(container_id) target_container = common.show_container(cmd, backup_protection_containers_cf(cmd.cli_ctx), target_container_name, target_resource_group, target_vault_name, 'AzureWorkload') setattr(trigger_restore_properties, 'target_virtual_machine_id', target_container.properties.source_resource_id) if restore_mode == 'AlternateLocation': if recovery_mode != "FileRecovery": setattr(trigger_restore_properties, 'source_resource_id', source_resource_id) setattr(trigger_restore_properties, 'target_info', TargetRestoreInfo(overwrite_option='Overwrite', database_name=database_name, container_id=container_id)) if 'sql' in item_type.lower(): directory_map = [] for i in alternate_directory_paths: directory_map.append(SQLDataDirectoryMapping(mapping_type=i[0], source_path=i[1], source_logical_name=i[2], target_path=i[3])) setattr(trigger_restore_properties, 'alternate_directory_paths', directory_map) else: target_info = TargetRestoreInfo(overwrite_option='Overwrite', container_id=container_id, target_directory_for_file_restore=filepath) setattr(trigger_restore_properties, 'target_info', target_info) trigger_restore_properties.recovery_mode = recovery_mode if log_point_in_time is not None: setattr(trigger_restore_properties, 'point_in_time', datetime_type(log_point_in_time)) if 'sql' in item_type.lower(): setattr(trigger_restore_properties, 'should_use_alternate_target_location', True) setattr(trigger_restore_properties, 'is_non_recoverable', False) trigger_restore_request = RestoreRequestResource(properties=trigger_restore_properties) # Trigger restore and wait for completion result = client.trigger(vault_name, resource_group_name, fabric_name, container_uri, item_uri, recovery_point_id, trigger_restore_request, raw=True, polling=False).result() return cust_help.track_backup_job(cmd.cli_ctx, result, vault_name, resource_group_name)
def restore_azure_wl(cmd, client, resource_group_name, vault_name, recovery_config, rehydration_duration=15, rehydration_priority=None, use_secondary_region=None): recovery_config_object = cust_help.get_or_read_json(recovery_config) restore_mode = recovery_config_object['restore_mode'] container_uri = recovery_config_object['container_uri'] item_uri = recovery_config_object['item_uri'] recovery_point_id = recovery_config_object['recovery_point_id'] log_point_in_time = recovery_config_object['log_point_in_time'] item_type = recovery_config_object['item_type'] workload_type = recovery_config_object['workload_type'] source_resource_id = recovery_config_object['source_resource_id'] database_name = recovery_config_object['database_name'] container_id = recovery_config_object['container_id'] alternate_directory_paths = recovery_config_object['alternate_directory_paths'] recovery_mode = recovery_config_object['recovery_mode'] filepath = recovery_config_object['filepath'] item = common.show_item(cmd, backup_protected_items_cf(cmd.cli_ctx), resource_group_name, vault_name, container_uri, item_uri, "AzureWorkload") cust_help.validate_item(item) validate_wl_restore(item, item_type, restore_mode, recovery_mode) trigger_restore_properties = _get_restore_request_instance(item_type, log_point_in_time, None) if log_point_in_time is None: recovery_point = common.show_recovery_point(cmd, recovery_points_cf(cmd.cli_ctx), resource_group_name, vault_name, container_uri, item_uri, recovery_point_id, workload_type, "AzureWorkload", use_secondary_region) if recovery_point is None: raise InvalidArgumentValueError(""" Specified recovery point not found. Please check the recovery config file or try removing --use-secondary-region if provided""") common.fetch_tier_for_rp(recovery_point) if (recovery_point.tier_type is not None and recovery_point.tier_type == 'VaultArchive'): if rehydration_priority is None: raise InvalidArgumentValueError("""The selected recovery point is in archive tier, provide additional parameters of rehydration duration and rehydration priority.""") # normal rehydrated restore trigger_restore_properties = _get_restore_request_instance(item_type, log_point_in_time, rehydration_priority) rehyd_duration = 'P' + str(rehydration_duration) + 'D' rehydration_info = RecoveryPointRehydrationInfo(rehydration_retention_duration=rehyd_duration, rehydration_priority=rehydration_priority) trigger_restore_properties.recovery_point_rehydration_info = rehydration_info trigger_restore_properties.recovery_type = restore_mode # Get target vm id if container_id is not None: target_container_name = cust_help.get_protection_container_uri_from_id(container_id) target_resource_group = cust_help.get_resource_group_from_id(container_id) target_vault_name = cust_help.get_vault_from_arm_id(container_id) target_container = common.show_container(cmd, backup_protection_containers_cf(cmd.cli_ctx), target_container_name, target_resource_group, target_vault_name, 'AzureWorkload') setattr(trigger_restore_properties, 'target_virtual_machine_id', target_container.properties.source_resource_id) if restore_mode == 'AlternateLocation': if recovery_mode != "FileRecovery": setattr(trigger_restore_properties, 'source_resource_id', source_resource_id) setattr(trigger_restore_properties, 'target_info', TargetRestoreInfo(overwrite_option='Overwrite', database_name=database_name, container_id=container_id)) if 'sql' in item_type.lower(): directory_map = [] for i in alternate_directory_paths: directory_map.append(SQLDataDirectoryMapping(mapping_type=i[0], source_path=i[1], source_logical_name=i[2], target_path=i[3])) setattr(trigger_restore_properties, 'alternate_directory_paths', directory_map) else: target_info = TargetRestoreInfo(overwrite_option='Overwrite', container_id=container_id, target_directory_for_file_restore=filepath) setattr(trigger_restore_properties, 'target_info', target_info) trigger_restore_properties.recovery_mode = recovery_mode if log_point_in_time is not None: log_point_in_time = datetime_type(log_point_in_time) time_range_list = _get_log_time_range(cmd, resource_group_name, vault_name, item, use_secondary_region) validate_log_point_in_time(log_point_in_time, time_range_list) setattr(trigger_restore_properties, 'point_in_time', log_point_in_time) if 'sql' in item_type.lower(): setattr(trigger_restore_properties, 'should_use_alternate_target_location', True) setattr(trigger_restore_properties, 'is_non_recoverable', False) trigger_restore_request = RestoreRequestResource(properties=trigger_restore_properties) if use_secondary_region: if rehydration_priority is not None: raise MutuallyExclusiveArgumentError("Archive restore isn't supported for secondary region.") vault = vaults_cf(cmd.cli_ctx).get(resource_group_name, vault_name) vault_location = vault.location azure_region = custom.secondary_region_map[vault_location] aad_client = aad_properties_cf(cmd.cli_ctx) filter_string = cust_help.get_filter_string({'backupManagementType': 'AzureWorkload'}) aad_result = aad_client.get(azure_region, filter_string) rp_client = recovery_points_passive_cf(cmd.cli_ctx) crr_access_token = rp_client.get_access_token(vault_name, resource_group_name, fabric_name, container_uri, item_uri, recovery_point_id, aad_result).properties crr_client = cross_region_restore_cf(cmd.cli_ctx) trigger_restore_properties.region = azure_region trigger_crr_request = CrossRegionRestoreRequest(cross_region_restore_access_details=crr_access_token, restore_request=trigger_restore_properties) result = crr_client.begin_trigger(azure_region, trigger_crr_request, cls=cust_help.get_pipeline_response, polling=False).result() return cust_help.track_backup_crr_job(cmd.cli_ctx, result, azure_region, vault.id) # Trigger restore and wait for completion result = client.begin_trigger(vault_name, resource_group_name, fabric_name, container_uri, item_uri, recovery_point_id, trigger_restore_request, cls=cust_help.get_pipeline_response, polling=False).result() return cust_help.track_backup_job(cmd.cli_ctx, result, vault_name, resource_group_name)