def list_wl_recovery_points(cmd, client, resource_group_name, vault_name, item, start_date=None, end_date=None, extended_info=None): # Get container and item URIs container_uri = cust_help.get_protection_container_uri_from_id(item.id) item_uri = cust_help.get_protected_item_uri_from_id(item.id) query_end_date, query_start_date = cust_help.get_query_dates(end_date, start_date) if query_end_date and query_start_date: cust_help.is_range_valid(query_start_date, query_end_date) filter_string = cust_help.get_filter_string({ 'startDate': query_start_date, 'endDate': query_end_date}) if cmd.name.split()[2] == 'show-log-chain' or extended_info is not None: filter_string = cust_help.get_filter_string({ 'restorePointQueryType': 'Log', 'startDate': query_start_date, 'endDate': query_end_date, 'extendedInfo': extended_info}) # Get recovery points recovery_points = client.list(vault_name, resource_group_name, fabric_name, container_uri, item_uri, filter_string) paged_recovery_points = cust_help.get_list_from_paged_response(recovery_points) return paged_recovery_points
def list_protectable_items(cmd, client, resource_group_name, vault_name, workload_type, backup_management_type="AzureWorkload", container_uri=None, protectable_item_type=None, server_name=None): workload_type = _check_map(workload_type, workload_type_map) if protectable_item_type is not None: protectable_item_type = _check_map(protectable_item_type, protectable_item_type_map) filter_string = cust_help.get_filter_string({ 'backupManagementType': backup_management_type, 'workloadType': workload_type}) # Items list items = client.list(vault_name, resource_group_name, filter_string) paged_items = cust_help.get_list_from_paged_response(items) if protectable_item_type is not None: # Protectable Item Type filter paged_items = [item for item in paged_items if item.properties.protectable_item_type is not None and item.properties.protectable_item_type.lower() == protectable_item_type.lower()] if server_name is not None: # Server Name filter paged_items = [item for item in paged_items if hasattr(item.properties, 'server_name') and item.properties.server_name.lower() == server_name.lower()] if container_uri: # Container URI filter paged_items = [item for item in paged_items if cust_help.get_protection_container_uri_from_id(item.id).lower() == container_uri.lower()] _fetch_nodes_list_and_auto_protection_policy(cmd, paged_items, resource_group_name, vault_name) return paged_items
def register_wl_container(cmd, client, vault_name, resource_group_name, workload_type, resource_id, container_type): if not cust_help.is_id(resource_id): raise CLIError( """ Resource ID is not a valid one. """) workload_type = _check_map(workload_type, workload_type_map) container_name = _get_protectable_container_name(cmd, resource_group_name, vault_name, resource_id) if container_name is None or not cust_help.is_native_name(container_name): filter_string = cust_help.get_filter_string({'backupManagementType': "AzureWorkload"}) # refresh containers and try to get the protectable container object again refresh_result = client.refresh(vault_name, resource_group_name, fabric_name, filter=filter_string, cls=cust_help.get_pipeline_response) cust_help.track_refresh_operation(cmd.cli_ctx, refresh_result, vault_name, resource_group_name) container_name = _get_protectable_container_name(cmd, resource_group_name, vault_name, resource_id) if container_name is None or not cust_help.is_native_name(container_name): raise ResourceNotFoundError( """ Container unavailable or already registered. """) properties = AzureVMAppContainerProtectionContainer(backup_management_type=container_type, source_resource_id=resource_id, workload_type=workload_type) param = ProtectionContainerResource(properties=properties) # Trigger register and wait for completion result = client.register(vault_name, resource_group_name, fabric_name, container_name, param, cls=cust_help.get_pipeline_response) return cust_help.track_register_operation(cmd.cli_ctx, result, vault_name, resource_group_name, container_name)
def list_recovery_points(client, resource_group_name, vault_name, item, start_date=None, end_date=None, use_secondary_region=None): if use_secondary_region: raise InvalidArgumentValueError(""" --use-secondary-region flag is not supported for --backup-management-type AzureStorage. Please either remove the flag or query for any other backup-management-type. """) # Get container and item URIs container_uri = helper.get_protection_container_uri_from_id(item.id) item_uri = helper.get_protected_item_uri_from_id(item.id) query_end_date, query_start_date = helper.get_query_dates( end_date, start_date) filter_string = helper.get_filter_string({ 'startDate': query_start_date, 'endDate': query_end_date }) # Get recovery points recovery_points = client.list(vault_name, resource_group_name, fabric_name, container_uri, item_uri, filter_string) paged_recovery_points = helper.get_list_from_paged_response( recovery_points) return paged_recovery_points
def list_items(cmd, client, resource_group_name, vault_name, workload_type=None, container_name=None, container_type=None): filter_string = custom_help.get_filter_string({ 'backupManagementType': container_type, 'itemType': workload_type }) items = client.list(vault_name, resource_group_name, filter_string) paged_items = custom_help.get_list_from_paged_response(items) if container_name: if custom_help.is_native_name(container_name): return [ item for item in paged_items if _is_container_name_match(item, container_name) ] return [ item for item in paged_items if item.properties.container_name.lower().split(';')[-1] == container_name.lower() ] return paged_items
def _get_containers(client, backup_management_type, status, resource_group_name, vault_name, container_name=None): filter_dict = { 'backupManagementType': backup_management_type, 'status': status } if container_name and not custom_help.is_native_name(container_name): filter_dict['friendlyName'] = container_name filter_string = custom_help.get_filter_string(filter_dict) paged_containers = client.list(vault_name, resource_group_name, filter_string) containers = custom_help.get_list_from_paged_response(paged_containers) if container_name and custom_help.is_native_name(container_name): return [ container for container in containers if container.name == container_name ] return containers
def disable_auto_for_azure_wl(cmd, client, resource_group_name, vault_name, protectable_item): protectable_item_object = protectable_item item_id = protectable_item_object.id protectable_item_type = protectable_item_object.properties.protectable_item_type protectable_item_name = protectable_item_object.properties.friendly_name container_name = cust_help.get_protection_container_uri_from_id(item_id) if protectable_item_type.lower() not in ['sqlinstance', 'sqlavailabilitygroupcontainer']: raise CLIError( """ Protectable Item can only be of type SQLInstance or SQLAG. """) filter_string = cust_help.get_filter_string({ 'backupManagementType': "AzureWorkload", 'itemType': protectable_item_type, 'itemName': protectable_item_name, 'parentName': container_name}) protection_intents = backup_protection_intent_cf(cmd.cli_ctx).list(vault_name, resource_group_name, filter_string) paged_protection_intents = cust_help.get_list_from_paged_response(protection_intents) if len(paged_protection_intents) != 1: raise InvalidArgumentValueError("A unique intent not found. Please check if the values provided are correct.") try: client.delete(vault_name, resource_group_name, fabric_name, paged_protection_intents[0].name) return {'status': True} except Exception: return {'status': False}
def list_protectable_containers(cli_ctx, resource_group_name, vault_name): filter_string = helper.get_filter_string({ 'backupManagementType': "AzureStorage"}) client = protectable_containers_cf(cli_ctx) paged_containers = client.list(vault_name, resource_group_name, fabric_name, filter_string) return helper.get_list_from_paged_response(paged_containers)
def _get_protectable_item_for_afs(cli_ctx, vault_name, resource_group_name, afs_name, storage_account): storage_account_name = storage_account.name protection_containers_client = protection_containers_cf(cli_ctx) protectable_item = _try_get_protectable_item_for_afs( cli_ctx, vault_name, resource_group_name, afs_name, storage_account_name) if protectable_item is None: filter_string = helper.get_filter_string( {'workloadType': "AzureFileShare"}) result = protection_containers_client.inquire(vault_name, resource_group_name, fabric_name, storage_account.name, filter=filter_string, raw=True) helper.track_inquiry_operation(cli_ctx, result, vault_name, resource_group_name, storage_account.name) protectable_item = _try_get_protectable_item_for_afs( cli_ctx, vault_name, resource_group_name, afs_name, storage_account_name) return protectable_item
def _get_containers(client, backup_management_type, status, resource_group_name, vault_name, container_name=None, use_secondary_region=None): filter_dict = { 'backupManagementType': backup_management_type, 'status': status } if container_name and not custom_help.is_native_name(container_name): filter_dict['friendlyName'] = container_name filter_string = custom_help.get_filter_string(filter_dict) if use_secondary_region: if backup_management_type.lower() in crr_not_supported_bmt: raise InvalidArgumentValueError(""" --use-secondary-region flag is not supported for the --backup-management-type provided. Please either remove the flag or query for any other backup-management-type. """) paged_containers = client.list(vault_name, resource_group_name, filter_string) containers = custom_help.get_list_from_paged_response(paged_containers) if container_name and custom_help.is_native_name(container_name): return [ container for container in containers if container.name.lower() == container_name.lower() ] return containers
def list_recovery_points(client, resource_group_name, vault_name, item, start_date=None, end_date=None): # Get container and item URIs container_uri = helper.get_protection_container_uri_from_id(item.id) item_uri = helper.get_protected_item_uri_from_id(item.id) query_end_date, query_start_date = helper.get_query_dates( end_date, start_date) filter_string = helper.get_filter_string({ 'startDate': query_start_date, 'endDate': query_end_date }) # Get recovery points recovery_points = client.list(vault_name, resource_group_name, fabric_name, container_uri, item_uri, filter_string) paged_recovery_points = helper.get_list_from_paged_response( recovery_points) return paged_recovery_points
def list_policies(client, resource_group_name, vault_name, workload_type=None, backup_management_type=None, policy_sub_type=None): workload_type = _check_map(workload_type, workload_type_map) filter_string = custom_help.get_filter_string({ 'backupManagementType': backup_management_type, 'workloadType': workload_type }) policies = client.list(vault_name, resource_group_name, filter_string) paged_policies = custom_help.get_list_from_paged_response(policies) if policy_sub_type: if policy_sub_type == 'Enhanced': paged_policies = [ policy for policy in paged_policies if (hasattr(policy.properties, 'policy_type') and policy.properties.policy_type == 'V2') ] else: paged_policies = [ policy for policy in paged_policies if (not hasattr(policy.properties, 'policy_type') or policy.properties.policy_type is None or policy.properties.policy_type == 'V1') ] return paged_policies
def _try_get_protectable_item_for_afs(cli_ctx, vault_name, resource_group_name, afs_name, storage_account_name): backup_protectable_items_client = backup_protectable_items_cf(cli_ctx) filter_string = helper.get_filter_string({ 'backupManagementType': backup_management_type, 'workloadType': workload_type}) protectable_items_paged = backup_protectable_items_client.list(vault_name, resource_group_name, filter_string) protectable_items = helper.get_list_from_paged_response(protectable_items_paged) result = protectable_items if helper.is_native_name(storage_account_name): result = [protectable_item for protectable_item in result if protectable_item.id.split('/')[12] == storage_account_name.lower()] else: result = [protectable_item for protectable_item in result if protectable_item.properties.parent_container_friendly_name.lower() == storage_account_name.lower()] if helper.is_native_name(afs_name): result = [protectable_item for protectable_item in result if protectable_item.name.lower() == afs_name.lower()] else: result = [protectable_item for protectable_item in result if protectable_item.properties.friendly_name.lower() == afs_name.lower()] if len(result) > 1: raise CLIError("Could not find a unique resource, Please pass native names instead") if len(result) == 1: return result[0] return None
def list_protectable_items(client, resource_group_name, vault_name, workload_type, container_uri=None, protectable_item_type=None): workload_type = _check_map(workload_type, workload_type_map) if protectable_item_type is not None: protectable_item_type = _check_map(protectable_item_type, protectable_item_type_map) filter_string = cust_help.get_filter_string({ 'backupManagementType': "AzureWorkload", 'workloadType': workload_type }) # Items list items = client.list(vault_name, resource_group_name, filter_string) paged_items = cust_help.get_list_from_paged_response(items) if protectable_item_type is not None: # Protectable Item Type filter paged_items = [ item for item in paged_items if item.properties.protectable_item_type.lower() == protectable_item_type.lower() ] if container_uri: return [ item for item in paged_items if cust_help.get_protection_container_uri_from_id(item.id).lower() == container_uri.lower() ] return paged_items
def _fetch_nodes_list_and_auto_protection_policy(cmd, paged_items, resource_group_name, vault_name): protection_intent_client = backup_protection_intent_cf(cmd.cli_ctx) protection_containers_client = protection_containers_cf(cmd.cli_ctx) for item in paged_items: item_id = item.id protectable_item_type = item.properties.protectable_item_type protectable_item_name = item.properties.friendly_name container_name = cust_help.get_protection_container_uri_from_id(item_id) # fetch AutoProtectionPolicy for SQLInstance and SQLAG if protectable_item_type and protectable_item_type.lower() in ['sqlinstance', 'sqlavailabilitygroupcontainer']: setattr(item.properties, "auto_protection_policy", None) filter_string = cust_help.get_filter_string({ 'backupManagementType': "AzureWorkload", 'itemType': protectable_item_type, 'itemName': protectable_item_name, 'parentName': container_name}) protection_intents = protection_intent_client.list(vault_name, resource_group_name, filter_string) paged_protection_intents = cust_help.get_list_from_paged_response(protection_intents) if paged_protection_intents: item.properties.auto_protection_policy = paged_protection_intents[0].properties.policy_id # fetch NodesList for SQLAG if protectable_item_type and protectable_item_type.lower() == 'sqlavailabilitygroupcontainer': setattr(item.properties, "nodes_list", None) container = protection_containers_client.get(vault_name, resource_group_name, fabric_name, container_name) if container.properties.extended_info: item.properties.nodes_list = container.properties.extended_info.nodes_list
def initialize_protectable_items(client, resource_group_name, vault_name, container_name, workload_type): workload_type = workload_type_map[workload_type] filter_string = cust_help.get_filter_string({ 'backupManagementType': 'AzureWorkload', 'workloadType': workload_type}) return client.inquire(vault_name, resource_group_name, fabric_name, container_name, filter_string)
def enable_for_AzureFileShare(cmd, client, resource_group_name, vault_name, afs_name, storage_account_name, policy_name): # get registered storage accounts storage_account = None containers_client = backup_protection_containers_cf(cmd.cli_ctx) registered_containers = common.list_containers(containers_client, resource_group_name, vault_name, "AzureStorage") storage_account = _get_storage_account_from_list(registered_containers, storage_account_name) # get unregistered storage accounts if storage_account is None: unregistered_containers = list_protectable_containers(cmd.cli_ctx, resource_group_name, vault_name) storage_account = _get_storage_account_from_list(unregistered_containers, storage_account_name) if storage_account is None: # refresh containers in the vault protection_containers_client = protection_containers_cf(cmd.cli_ctx) filter_string = helper.get_filter_string({'backupManagementType': "AzureStorage"}) refresh_result = protection_containers_client.refresh(vault_name, resource_group_name, fabric_name, filter=filter_string, raw=True) helper.track_refresh_operation(cmd.cli_ctx, refresh_result, vault_name, resource_group_name) # refetch the protectable containers after refresh unregistered_containers = list_protectable_containers(cmd.cli_ctx, resource_group_name, vault_name) storage_account = _get_storage_account_from_list(unregistered_containers, storage_account_name) if storage_account is None: raise CLIError("Storage account not found or not supported.") # register storage account protection_containers_client = protection_containers_cf(cmd.cli_ctx) properties = AzureStorageContainer(backup_management_type="AzureStorage", source_resource_id=storage_account.properties.container_id, workload_type="AzureFileShare") param = ProtectionContainerResource(properties=properties) result = protection_containers_client.register(vault_name, resource_group_name, fabric_name, storage_account.name, param, raw=True) helper.track_register_operation(cmd.cli_ctx, result, vault_name, resource_group_name, storage_account.name) policy = common.show_policy(protection_policies_cf(cmd.cli_ctx), resource_group_name, vault_name, policy_name) helper.validate_policy(policy) protectable_item = _get_protectable_item_for_afs(cmd.cli_ctx, vault_name, resource_group_name, afs_name, storage_account) helper.validate_azurefileshare_item(protectable_item) container_uri = helper.get_protection_container_uri_from_id(protectable_item.id) item_uri = helper.get_protectable_item_uri_from_id(protectable_item.id) item_properties = AzureFileshareProtectedItem() item_properties.policy_id = policy.id item_properties.source_resource_id = protectable_item.properties.parent_container_fabric_id item = ProtectedItemResource(properties=item_properties) result = client.create_or_update(vault_name, resource_group_name, fabric_name, container_uri, item_uri, item, raw=True) return helper.track_backup_job(cmd.cli_ctx, result, vault_name, resource_group_name)
def list_wl_recovery_points(cmd, client, resource_group_name, vault_name, item, start_date=None, end_date=None, extended_info=None, is_ready_for_move=None, target_tier=None, use_secondary_region=None, tier=None, recommended_for_archive=None): if recommended_for_archive is not None: raise ArgumentUsageError("""--recommended-for-archive is supported by AzureIaasVM backup management type only.""") # Get container and item URIs container_uri = cust_help.get_protection_container_uri_from_id(item.id) item_uri = cust_help.get_protected_item_uri_from_id(item.id) query_end_date, query_start_date = cust_help.get_query_dates(end_date, start_date) if query_end_date and query_start_date: cust_help.is_range_valid(query_start_date, query_end_date) filter_string = cust_help.get_filter_string({ 'startDate': query_start_date, 'endDate': query_end_date}) if cmd.name.split()[2] == 'show-log-chain' or extended_info is not None: filter_string = cust_help.get_filter_string({ 'restorePointQueryType': 'Log', 'startDate': query_start_date, 'endDate': query_end_date, 'extendedInfo': extended_info}) if use_secondary_region: client = recovery_points_crr_cf(cmd.cli_ctx) # Get recovery points recovery_points = client.list(vault_name, resource_group_name, fabric_name, container_uri, item_uri, filter_string) paged_recovery_points = cust_help.get_list_from_paged_response(recovery_points) common.fetch_tier(paged_recovery_points) if use_secondary_region: paged_recovery_points = [item for item in paged_recovery_points if item.properties.recovery_point_tier_details is None or (item.properties.recovery_point_tier_details is not None and item.tier_type != 'VaultArchive')] recovery_point_list = common.check_rp_move_readiness(paged_recovery_points, target_tier, is_ready_for_move) recovery_point_list = common.filter_rp_based_on_tier(recovery_point_list, tier) return recovery_point_list
def list_workload_items(cmd, vault_name, resource_group_name, container_name, container_type="AzureWorkload", workload_type="SQLInstance"): filter_string = cust_help.get_filter_string({ 'backupManagementType': container_type, 'workloadItemType': workload_type}) items = backup_workload_items_cf(cmd.cli_ctx).list(vault_name, resource_group_name, fabric_name, container_name, filter_string) return cust_help.get_list_from_paged_response(items)
def list_protectable_containers(cmd, resource_group_name, vault_name, container_type="AzureWorkload"): filter_string = cust_help.get_filter_string( {'backupManagementType': container_type}) client = protectable_containers_cf(cmd.cli_ctx) paged_containers = client.list(vault_name, resource_group_name, fabric_name, filter_string) return cust_help.get_list_from_paged_response(paged_containers)
def list_associated_items_for_policy(client, resource_group_name, vault_name, name, backup_management_type): filter_string = custom_help.get_filter_string({ 'policyName': name, 'backupManagementType': backup_management_type }) items = client.list(vault_name, resource_group_name, filter_string) return custom_help.get_list_from_paged_response(items)
def list_recovery_points(cmd, client, resource_group_name, vault_name, item, start_date=None, end_date=None, use_secondary_region=None, is_ready_for_move=None, target_tier=None, tier=None, recommended_for_archive=None): if use_secondary_region: raise ArgumentUsageError(""" --use-secondary-region flag is not supported for --backup-management-type AzureStorage. Please either remove the flag or query for any other backup-management-type. """) if is_ready_for_move is not None or target_tier is not None or tier is not None: raise ArgumentUsageError( """Invalid argument has been passed. --is-ready-for-move true, --target-tier and --tier flags are not supported for --backup-management-type AzureStorage.""" ) if recommended_for_archive is not None: raise ArgumentUsageError( """--recommended-for-archive is supported by AzureIaasVM backup management type only.""") if cmd.name.split()[2] == 'show-log-chain': raise ArgumentUsageError( "show-log-chain is supported by AzureWorkload backup management type only." ) # Get container and item URIs container_uri = helper.get_protection_container_uri_from_id(item.id) item_uri = helper.get_protected_item_uri_from_id(item.id) query_end_date, query_start_date = helper.get_query_dates( end_date, start_date) filter_string = helper.get_filter_string({ 'startDate': query_start_date, 'endDate': query_end_date }) # Get recovery points recovery_points = client.list(vault_name, resource_group_name, fabric_name, container_uri, item_uri, filter_string) paged_recovery_points = helper.get_list_from_paged_response( recovery_points) return paged_recovery_points
def list_policies(client, resource_group_name, vault_name, workload_type=None, backup_management_type=None): filter_string = custom_help.get_filter_string({ 'backupManagementType': backup_management_type, 'workloadType': workload_type }) policies = client.list(vault_name, resource_group_name, filter_string) return custom_help.get_list_from_paged_response(policies)
def list_protectable_items(client, resource_group_name, vault_name, workload_type, container_uri=None): workload_type = workload_type_map[workload_type] filter_string = cust_help.get_filter_string({ 'backupManagementType': "AzureWorkload", 'workloadType': workload_type}) # Items list items = client.list(vault_name, resource_group_name, filter_string) paged_items = cust_help.get_list_from_paged_response(items) if container_uri: return [item for item in paged_items if cust_help.get_protection_container_uri_from_id(item.id).lower() == container_uri.lower()] return paged_items
def _get_log_time_range(cmd, resource_group_name, vault_name, item, use_secondary_region): container_uri = cust_help.get_protection_container_uri_from_id(item.id) item_uri = cust_help.get_protected_item_uri_from_id(item.id) filter_string = cust_help.get_filter_string({ 'restorePointQueryType': 'Log'}) client = recovery_points_cf(cmd.cli_ctx) if use_secondary_region: client = recovery_points_crr_cf(cmd.cli_ctx) # Get recovery points recovery_points = client.list(vault_name, resource_group_name, fabric_name, container_uri, item_uri, filter_string) paged_recovery_points = cust_help.get_none_one_or_many(cust_help.get_list_from_paged_response(recovery_points)) _check_none_and_many(paged_recovery_points, "Log time range") return paged_recovery_points.properties.time_ranges
def list_items(cmd, client, resource_group_name, vault_name, workload_type=None, container_name=None, container_type=None, use_secondary_region=None): workload_type = _check_map(workload_type, workload_type_map) filter_string = custom_help.get_filter_string({ 'backupManagementType': container_type, 'itemType': workload_type }) if use_secondary_region: if container_type is None: raise RequiredArgumentMissingError(""" Provide --backup-management-type to list protected items in secondary region """) if container_type and container_type.lower() in crr_not_supported_bmt: raise InvalidArgumentValueError(""" --use-secondary-region flag is not supported for the --backup-management-type provided. Please either remove the flag or query for any other backup-management-type. """) client = backup_protected_items_crr_cf(cmd.cli_ctx) items = client.list(vault_name, resource_group_name, filter_string) paged_items = custom_help.get_list_from_paged_response(items) if container_name: if custom_help.is_native_name(container_name): return [ item for item in paged_items if _is_container_name_match(item, container_name) ] return [ item for item in paged_items if item.properties.container_name.lower().split(';')[-1] == container_name.lower() ] return paged_items
def list_wl_policies(client, resource_group_name, vault_name, workload_type, backup_management_type): if workload_type is None: raise RequiredArgumentMissingError(""" Workload type is required for Azure Workload. Use --workload-type. """) if backup_management_type is None: raise CLIError(""" Backup Management Type needs to be specified for Azure Workload. """) workload_type = _check_map(workload_type, workload_type_map) filter_string = cust_help.get_filter_string({ 'backupManagementType': backup_management_type, 'workloadType': workload_type }) policies = client.list(vault_name, resource_group_name, filter_string) return cust_help.get_list_from_paged_response(policies)
def restore_azure_wl(cmd, client, resource_group_name, vault_name, recovery_config, rehydration_duration=15, rehydration_priority=None, use_secondary_region=None): recovery_config_object = cust_help.get_or_read_json(recovery_config) restore_mode = recovery_config_object['restore_mode'] container_uri = recovery_config_object['container_uri'] item_uri = recovery_config_object['item_uri'] recovery_point_id = recovery_config_object['recovery_point_id'] log_point_in_time = recovery_config_object['log_point_in_time'] item_type = recovery_config_object['item_type'] workload_type = recovery_config_object['workload_type'] source_resource_id = recovery_config_object['source_resource_id'] database_name = recovery_config_object['database_name'] container_id = recovery_config_object['container_id'] alternate_directory_paths = recovery_config_object['alternate_directory_paths'] recovery_mode = recovery_config_object['recovery_mode'] filepath = recovery_config_object['filepath'] item = common.show_item(cmd, backup_protected_items_cf(cmd.cli_ctx), resource_group_name, vault_name, container_uri, item_uri, "AzureWorkload") cust_help.validate_item(item) validate_wl_restore(item, item_type, restore_mode, recovery_mode) trigger_restore_properties = _get_restore_request_instance(item_type, log_point_in_time, None) if log_point_in_time is None: recovery_point = common.show_recovery_point(cmd, recovery_points_cf(cmd.cli_ctx), resource_group_name, vault_name, container_uri, item_uri, recovery_point_id, workload_type, "AzureWorkload", use_secondary_region) if recovery_point is None: raise InvalidArgumentValueError(""" Specified recovery point not found. Please check the recovery config file or try removing --use-secondary-region if provided""") common.fetch_tier_for_rp(recovery_point) if (recovery_point.tier_type is not None and recovery_point.tier_type == 'VaultArchive'): if rehydration_priority is None: raise InvalidArgumentValueError("""The selected recovery point is in archive tier, provide additional parameters of rehydration duration and rehydration priority.""") # normal rehydrated restore trigger_restore_properties = _get_restore_request_instance(item_type, log_point_in_time, rehydration_priority) rehyd_duration = 'P' + str(rehydration_duration) + 'D' rehydration_info = RecoveryPointRehydrationInfo(rehydration_retention_duration=rehyd_duration, rehydration_priority=rehydration_priority) trigger_restore_properties.recovery_point_rehydration_info = rehydration_info trigger_restore_properties.recovery_type = restore_mode # Get target vm id if container_id is not None: target_container_name = cust_help.get_protection_container_uri_from_id(container_id) target_resource_group = cust_help.get_resource_group_from_id(container_id) target_vault_name = cust_help.get_vault_from_arm_id(container_id) target_container = common.show_container(cmd, backup_protection_containers_cf(cmd.cli_ctx), target_container_name, target_resource_group, target_vault_name, 'AzureWorkload') setattr(trigger_restore_properties, 'target_virtual_machine_id', target_container.properties.source_resource_id) if restore_mode == 'AlternateLocation': if recovery_mode != "FileRecovery": setattr(trigger_restore_properties, 'source_resource_id', source_resource_id) setattr(trigger_restore_properties, 'target_info', TargetRestoreInfo(overwrite_option='Overwrite', database_name=database_name, container_id=container_id)) if 'sql' in item_type.lower(): directory_map = [] for i in alternate_directory_paths: directory_map.append(SQLDataDirectoryMapping(mapping_type=i[0], source_path=i[1], source_logical_name=i[2], target_path=i[3])) setattr(trigger_restore_properties, 'alternate_directory_paths', directory_map) else: target_info = TargetRestoreInfo(overwrite_option='Overwrite', container_id=container_id, target_directory_for_file_restore=filepath) setattr(trigger_restore_properties, 'target_info', target_info) trigger_restore_properties.recovery_mode = recovery_mode if log_point_in_time is not None: log_point_in_time = datetime_type(log_point_in_time) time_range_list = _get_log_time_range(cmd, resource_group_name, vault_name, item, use_secondary_region) validate_log_point_in_time(log_point_in_time, time_range_list) setattr(trigger_restore_properties, 'point_in_time', log_point_in_time) if 'sql' in item_type.lower(): setattr(trigger_restore_properties, 'should_use_alternate_target_location', True) setattr(trigger_restore_properties, 'is_non_recoverable', False) trigger_restore_request = RestoreRequestResource(properties=trigger_restore_properties) if use_secondary_region: if rehydration_priority is not None: raise MutuallyExclusiveArgumentError("Archive restore isn't supported for secondary region.") vault = vaults_cf(cmd.cli_ctx).get(resource_group_name, vault_name) vault_location = vault.location azure_region = custom.secondary_region_map[vault_location] aad_client = aad_properties_cf(cmd.cli_ctx) filter_string = cust_help.get_filter_string({'backupManagementType': 'AzureWorkload'}) aad_result = aad_client.get(azure_region, filter_string) rp_client = recovery_points_passive_cf(cmd.cli_ctx) crr_access_token = rp_client.get_access_token(vault_name, resource_group_name, fabric_name, container_uri, item_uri, recovery_point_id, aad_result).properties crr_client = cross_region_restore_cf(cmd.cli_ctx) trigger_restore_properties.region = azure_region trigger_crr_request = CrossRegionRestoreRequest(cross_region_restore_access_details=crr_access_token, restore_request=trigger_restore_properties) result = crr_client.begin_trigger(azure_region, trigger_crr_request, cls=cust_help.get_pipeline_response, polling=False).result() return cust_help.track_backup_crr_job(cmd.cli_ctx, result, azure_region, vault.id) # Trigger restore and wait for completion result = client.begin_trigger(vault_name, resource_group_name, fabric_name, container_uri, item_uri, recovery_point_id, trigger_restore_request, cls=cust_help.get_pipeline_response, polling=False).result() return cust_help.track_backup_job(cmd.cli_ctx, result, vault_name, resource_group_name)