def show_container(cmd, client, name, resource_group_name, vault_name, backup_management_type=None, status="Registered"): return common.show_container(cmd, client, name, resource_group_name, vault_name, backup_management_type, status)
def show_recovery_config(cmd, client, resource_group_name, vault_name, restore_mode, container_name, item_name, rp_name=None, target_item_name=None, log_point_in_time=None, target_server_type=None, target_server_name=None, workload_type=None, backup_management_type="AzureWorkload", from_full_rp_name=None, filepath=None, target_container_name=None, target_resource_group=None, target_vault_name=None): target_resource_group = resource_group_name if target_resource_group is None else target_resource_group target_vault_name = vault_name if target_vault_name is None else target_vault_name target_container_name = container_name if target_container_name is None else target_container_name target_item = None if target_item_name is not None: protectable_items_client = backup_protectable_items_cf(cmd.cli_ctx) target_item = show_protectable_instance( cmd, protectable_items_client, target_resource_group, target_vault_name, target_server_name, target_server_type, workload_type, target_container_name) target_container = None if target_container_name is not None: container_client = backup_protection_containers_cf(cmd.cli_ctx) target_container = common.show_container(cmd, container_client, target_container_name, target_resource_group, target_vault_name, backup_management_type) if isinstance(target_container, list): raise ValidationError(""" Multiple containers with same Friendly Name found. Please give native names instead. """) return custom_wl.show_recovery_config( cmd, client, resource_group_name, vault_name, restore_mode, container_name, item_name, rp_name, target_item, target_item_name, log_point_in_time, from_full_rp_name, filepath, target_container, target_resource_group, target_vault_name)
def restore_azure_wl(cmd, client, resource_group_name, vault_name, recovery_config, rehydration_duration=15, rehydration_priority=None, use_secondary_region=None): recovery_config_object = cust_help.get_or_read_json(recovery_config) restore_mode = recovery_config_object['restore_mode'] container_uri = recovery_config_object['container_uri'] item_uri = recovery_config_object['item_uri'] recovery_point_id = recovery_config_object['recovery_point_id'] log_point_in_time = recovery_config_object['log_point_in_time'] item_type = recovery_config_object['item_type'] workload_type = recovery_config_object['workload_type'] source_resource_id = recovery_config_object['source_resource_id'] database_name = recovery_config_object['database_name'] container_id = recovery_config_object['container_id'] alternate_directory_paths = recovery_config_object['alternate_directory_paths'] recovery_mode = recovery_config_object['recovery_mode'] filepath = recovery_config_object['filepath'] item = common.show_item(cmd, backup_protected_items_cf(cmd.cli_ctx), resource_group_name, vault_name, container_uri, item_uri, "AzureWorkload") cust_help.validate_item(item) validate_wl_restore(item, item_type, restore_mode, recovery_mode) trigger_restore_properties = _get_restore_request_instance(item_type, log_point_in_time, None) if log_point_in_time is None: recovery_point = common.show_recovery_point(cmd, recovery_points_cf(cmd.cli_ctx), resource_group_name, vault_name, container_uri, item_uri, recovery_point_id, workload_type, "AzureWorkload", use_secondary_region) if recovery_point is None: raise InvalidArgumentValueError(""" Specified recovery point not found. Please check the recovery config file or try removing --use-secondary-region if provided""") common.fetch_tier_for_rp(recovery_point) if (recovery_point.tier_type is not None and recovery_point.tier_type == 'VaultArchive'): if rehydration_priority is None: raise InvalidArgumentValueError("""The selected recovery point is in archive tier, provide additional parameters of rehydration duration and rehydration priority.""") # normal rehydrated restore trigger_restore_properties = _get_restore_request_instance(item_type, log_point_in_time, rehydration_priority) rehyd_duration = 'P' + str(rehydration_duration) + 'D' rehydration_info = RecoveryPointRehydrationInfo(rehydration_retention_duration=rehyd_duration, rehydration_priority=rehydration_priority) trigger_restore_properties.recovery_point_rehydration_info = rehydration_info trigger_restore_properties.recovery_type = restore_mode # Get target vm id if container_id is not None: target_container_name = cust_help.get_protection_container_uri_from_id(container_id) target_resource_group = cust_help.get_resource_group_from_id(container_id) target_vault_name = cust_help.get_vault_from_arm_id(container_id) target_container = common.show_container(cmd, backup_protection_containers_cf(cmd.cli_ctx), target_container_name, target_resource_group, target_vault_name, 'AzureWorkload') setattr(trigger_restore_properties, 'target_virtual_machine_id', target_container.properties.source_resource_id) if restore_mode == 'AlternateLocation': if recovery_mode != "FileRecovery": setattr(trigger_restore_properties, 'source_resource_id', source_resource_id) setattr(trigger_restore_properties, 'target_info', TargetRestoreInfo(overwrite_option='Overwrite', database_name=database_name, container_id=container_id)) if 'sql' in item_type.lower(): directory_map = [] for i in alternate_directory_paths: directory_map.append(SQLDataDirectoryMapping(mapping_type=i[0], source_path=i[1], source_logical_name=i[2], target_path=i[3])) setattr(trigger_restore_properties, 'alternate_directory_paths', directory_map) else: target_info = TargetRestoreInfo(overwrite_option='Overwrite', container_id=container_id, target_directory_for_file_restore=filepath) setattr(trigger_restore_properties, 'target_info', target_info) trigger_restore_properties.recovery_mode = recovery_mode if log_point_in_time is not None: log_point_in_time = datetime_type(log_point_in_time) time_range_list = _get_log_time_range(cmd, resource_group_name, vault_name, item, use_secondary_region) validate_log_point_in_time(log_point_in_time, time_range_list) setattr(trigger_restore_properties, 'point_in_time', log_point_in_time) if 'sql' in item_type.lower(): setattr(trigger_restore_properties, 'should_use_alternate_target_location', True) setattr(trigger_restore_properties, 'is_non_recoverable', False) trigger_restore_request = RestoreRequestResource(properties=trigger_restore_properties) if use_secondary_region: if rehydration_priority is not None: raise MutuallyExclusiveArgumentError("Archive restore isn't supported for secondary region.") vault = vaults_cf(cmd.cli_ctx).get(resource_group_name, vault_name) vault_location = vault.location azure_region = custom.secondary_region_map[vault_location] aad_client = aad_properties_cf(cmd.cli_ctx) filter_string = cust_help.get_filter_string({'backupManagementType': 'AzureWorkload'}) aad_result = aad_client.get(azure_region, filter_string) rp_client = recovery_points_passive_cf(cmd.cli_ctx) crr_access_token = rp_client.get_access_token(vault_name, resource_group_name, fabric_name, container_uri, item_uri, recovery_point_id, aad_result).properties crr_client = cross_region_restore_cf(cmd.cli_ctx) trigger_restore_properties.region = azure_region trigger_crr_request = CrossRegionRestoreRequest(cross_region_restore_access_details=crr_access_token, restore_request=trigger_restore_properties) result = crr_client.begin_trigger(azure_region, trigger_crr_request, cls=cust_help.get_pipeline_response, polling=False).result() return cust_help.track_backup_crr_job(cmd.cli_ctx, result, azure_region, vault.id) # Trigger restore and wait for completion result = client.begin_trigger(vault_name, resource_group_name, fabric_name, container_uri, item_uri, recovery_point_id, trigger_restore_request, cls=cust_help.get_pipeline_response, polling=False).result() return cust_help.track_backup_job(cmd.cli_ctx, result, vault_name, resource_group_name)
def restore_azure_wl(cmd, client, resource_group_name, vault_name, recovery_config, rehydration_duration=15, rehydration_priority=None): recovery_config_object = cust_help.get_or_read_json(recovery_config) restore_mode = recovery_config_object['restore_mode'] container_uri = recovery_config_object['container_uri'] item_uri = recovery_config_object['item_uri'] recovery_point_id = recovery_config_object['recovery_point_id'] log_point_in_time = recovery_config_object['log_point_in_time'] item_type = recovery_config_object['item_type'] source_resource_id = recovery_config_object['source_resource_id'] database_name = recovery_config_object['database_name'] container_id = recovery_config_object['container_id'] alternate_directory_paths = recovery_config_object['alternate_directory_paths'] recovery_mode = recovery_config_object['recovery_mode'] filepath = recovery_config_object['filepath'] recovery_point = common.show_recovery_point(cmd, recovery_points_cf(cmd.cli_ctx), resource_group_name, vault_name, container_uri, item_uri, recovery_point_id, item_type, backup_management_type="AzureWorkload") rp_list = [recovery_point] common.fetch_tier(rp_list) if (rp_list[0].properties.recovery_point_tier_details is not None and rp_list[0].tier_type == 'VaultArchive' and rehydration_priority is None): raise InvalidArgumentValueError("""The selected recovery point is in archive tier, provide additional parameters of rehydration duration and rehydration priority.""") if rp_list[0].properties.recovery_point_tier_details is not None and rp_list[0].tier_type == 'VaultArchive': # Construct trigger restore request object trigger_restore_properties = _get_restore_request_instance(item_type, log_point_in_time, rehydration_priority) rehyd_duration = 'P' + str(rehydration_duration) + 'D' rehydration_info = RecoveryPointRehydrationInfo(rehydration_retention_duration=rehyd_duration, rehydration_priority=rehydration_priority) trigger_restore_properties.recovery_point_rehydration_info = rehydration_info else: trigger_restore_properties = _get_restore_request_instance(item_type, log_point_in_time, None) trigger_restore_properties.recovery_type = restore_mode # Get target vm id if container_id is not None: target_container_name = cust_help.get_protection_container_uri_from_id(container_id) target_resource_group = cust_help.get_resource_group_from_id(container_id) target_vault_name = cust_help.get_vault_from_arm_id(container_id) target_container = common.show_container(cmd, backup_protection_containers_cf(cmd.cli_ctx), target_container_name, target_resource_group, target_vault_name, 'AzureWorkload') setattr(trigger_restore_properties, 'target_virtual_machine_id', target_container.properties.source_resource_id) if restore_mode == 'AlternateLocation': if recovery_mode != "FileRecovery": setattr(trigger_restore_properties, 'source_resource_id', source_resource_id) setattr(trigger_restore_properties, 'target_info', TargetRestoreInfo(overwrite_option='Overwrite', database_name=database_name, container_id=container_id)) if 'sql' in item_type.lower(): directory_map = [] for i in alternate_directory_paths: directory_map.append(SQLDataDirectoryMapping(mapping_type=i[0], source_path=i[1], source_logical_name=i[2], target_path=i[3])) setattr(trigger_restore_properties, 'alternate_directory_paths', directory_map) else: target_info = TargetRestoreInfo(overwrite_option='Overwrite', container_id=container_id, target_directory_for_file_restore=filepath) setattr(trigger_restore_properties, 'target_info', target_info) trigger_restore_properties.recovery_mode = recovery_mode if log_point_in_time is not None: setattr(trigger_restore_properties, 'point_in_time', datetime_type(log_point_in_time)) if 'sql' in item_type.lower(): setattr(trigger_restore_properties, 'should_use_alternate_target_location', True) setattr(trigger_restore_properties, 'is_non_recoverable', False) trigger_restore_request = RestoreRequestResource(properties=trigger_restore_properties) # Trigger restore and wait for completion result = client.trigger(vault_name, resource_group_name, fabric_name, container_uri, item_uri, recovery_point_id, trigger_restore_request, raw=True, polling=False).result() return cust_help.track_backup_job(cmd.cli_ctx, result, vault_name, resource_group_name)