def backup_now(cmd, client, resource_group_name, vault_name, item_name, retain_until=None, container_name=None, backup_management_type=None, workload_type=None, backup_type=None, enable_compression=False): items_client = backup_protected_items_cf(cmd.cli_ctx) item = show_item(cmd, items_client, resource_group_name, vault_name, container_name, item_name, backup_management_type, workload_type) custom_help.validate_item(item) if isinstance(item, list): raise ValidationError( "Multiple items found. Please give native names instead.") if item.properties.backup_management_type.lower() == "azureiaasvm": return custom.backup_now(cmd, client, resource_group_name, vault_name, item, retain_until) if item.properties.backup_management_type.lower() == "azurestorage": return custom_afs.backup_now(cmd, client, resource_group_name, vault_name, item, retain_until) if item.properties.backup_management_type.lower() == "azureworkload": return custom_wl.backup_now(cmd, client, resource_group_name, vault_name, item, retain_until, backup_type, enable_compression) return None
def undelete_protection(cmd, client, resource_group_name, vault_name, container_name, item_name, backup_management_type, workload_type=None): items_client = backup_protected_items_cf(cmd.cli_ctx) item = show_item(cmd, items_client, resource_group_name, vault_name, container_name, item_name, backup_management_type, workload_type) custom_help.validate_item(item) if isinstance(item, list): raise ValidationError( "Multiple items found. Please give native names instead.") if item.properties.backup_management_type.lower() == "azureiaasvm": return custom.undelete_protection(cmd, client, resource_group_name, vault_name, item) if item.properties.backup_management_type.lower() == "azureworkload": return custom_wl.undelete_protection(cmd, client, resource_group_name, vault_name, item) return None
def resume_protection(cmd, client, resource_group_name, vault_name, container_name, item_name, policy_name, workload_type=None, backup_management_type=None): items_client = backup_protected_items_cf(cmd.cli_ctx) item = show_item(cmd, items_client, resource_group_name, vault_name, container_name, item_name, backup_management_type, workload_type) custom_help.validate_item(item) if isinstance(item, list): raise CLIError( "Multiple items found. Please give native names instead.") policy = show_policy(protection_policies_cf(cmd.cli_ctx), resource_group_name, vault_name, policy_name) custom_help.validate_policy(policy) if item.properties.backup_management_type.lower() == "azurestorage": return custom_afs.resume_protection(cmd, client, resource_group_name, vault_name, item, policy) if item.properties.backup_management_type.lower() == "azureworkload": return custom_wl.resume_protection(cmd, client, resource_group_name, vault_name, item, policy) return None
def move_recovery_points(cmd, resource_group_name, vault_name, container_name, item_name, rp_name, source_tier, destination_tier, backup_management_type=None, workload_type=None): items_client = backup_protected_items_cf(cmd.cli_ctx) item = show_item(cmd, items_client, resource_group_name, vault_name, container_name, item_name, backup_management_type, workload_type) custom_help.validate_item(item) if isinstance(item, list): raise ValidationError( "Multiple items found. Please give native names instead.") if item.properties.backup_management_type.lower() == "azureiaasvm": return custom.move_recovery_points(cmd, resource_group_name, vault_name, item, rp_name, source_tier, destination_tier) if item.properties.backup_management_type.lower() == "azureworkload": return custom_wl.move_wl_recovery_points(cmd, resource_group_name, vault_name, item, rp_name, source_tier, destination_tier) raise ArgumentUsageError( 'This command is not supported for --backup-management-type AzureStorage.' )
def list_recovery_points(cmd, client, resource_group_name, vault_name, container_name, item_name, backup_management_type=None, workload_type=None, start_date=None, end_date=None): items_client = backup_protected_items_cf(cmd.cli_ctx) item = show_item(cmd, items_client, resource_group_name, vault_name, container_name, item_name, backup_management_type, workload_type) custom_help.validate_item(item) if isinstance(item, list): raise CLIError( "Multiple items found. Please give native names instead.") if item.properties.backup_management_type.lower() == "azureiaasvm": return custom.list_recovery_points(client, resource_group_name, vault_name, item, start_date, end_date) if item.properties.backup_management_type.lower() == "azurestorage": return custom_afs.list_recovery_points(client, resource_group_name, vault_name, item, start_date, end_date) return None
def disable_protection(cmd, client, resource_group_name, vault_name, item_name, container_name, backup_management_type=None, workload_type=None, delete_backup_data=False, **kwargs): items_client = backup_protected_items_cf(cmd.cli_ctx) item = show_item(cmd, items_client, resource_group_name, vault_name, container_name, item_name, backup_management_type, workload_type) custom_help.validate_item(item) if isinstance(item, list): raise CLIError( "Multiple items found. Please give native names instead.") if item.properties.backup_management_type.lower() == "azureiaasvm": return custom.disable_protection(cmd, client, resource_group_name, vault_name, item, delete_backup_data, **kwargs) if item.properties.backup_management_type.lower() == "azurestorage": return custom_afs.disable_protection(cmd, client, resource_group_name, vault_name, item, delete_backup_data, **kwargs) return None
def show_recovery_point(cmd, client, resource_group_name, vault_name, container_name, item_name, name, workload_type=None, backup_management_type=None): items_client = backup_protected_items_cf(cmd.cli_ctx) item = show_item(cmd, items_client, resource_group_name, vault_name, container_name, item_name, backup_management_type, workload_type) custom_help.validate_item(item) if isinstance(item, list): raise CLIError( "Multiple items found. Please give native names instead.") # Get container and item URIs container_uri = custom_help.get_protection_container_uri_from_id(item.id) item_uri = custom_help.get_protected_item_uri_from_id(item.id) return client.get(vault_name, resource_group_name, fabric_name, container_uri, item_uri, name)
def show_recovery_point(cmd, client, resource_group_name, vault_name, container_name, item_name, name, workload_type=None, backup_management_type=None, use_secondary_region=None): items_client = backup_protected_items_cf(cmd.cli_ctx) item = show_item(cmd, items_client, resource_group_name, vault_name, container_name, item_name, backup_management_type, workload_type, use_secondary_region) custom_help.validate_item(item) if isinstance(item, list): raise CLIError( "Multiple items found. Please give native names instead.") # Get container and item URIs container_uri = custom_help.get_protection_container_uri_from_id(item.id) item_uri = custom_help.get_protected_item_uri_from_id(item.id) container_type = custom_help.validate_and_extract_container_type( container_name, backup_management_type) if use_secondary_region: if container_type and container_type.lower() == "azurestorage": raise InvalidArgumentValueError(""" --use-secondary-region flag is not supported for --backup-management-type AzureStorage. Please either remove the flag or query for any other backup-management-type. """) client = recovery_points_crr_cf(cmd.cli_ctx) recovery_points = client.list(vault_name, resource_group_name, fabric_name, container_uri, item_uri, None) paged_rps = custom_help.get_list_from_paged_response(recovery_points) filtered_rps = [ rp for rp in paged_rps if rp.name.lower() == name.lower() ] recovery_point = custom_help.get_none_one_or_many(filtered_rps) if recovery_point is None: raise InvalidArgumentValueError( "The recovery point provided does not exist. Please provide valid RP." ) return recovery_point try: response = client.get(vault_name, resource_group_name, fabric_name, container_uri, item_uri, name) except Exception as ex: errorMessage = str(ex) raise InvalidArgumentValueError( "Specified recovery point can not be fetched - \n" + errorMessage) return response
def update_protection_for_vm(cmd, client, resource_group_name, vault_name, container_name, item_name, diskslist=None, disk_list_setting=None, exclude_all_data_disks=None): items_client = backup_protected_items_cf(cmd.cli_ctx) item = show_item(cmd, items_client, resource_group_name, vault_name, container_name, item_name, "AzureIaasVM", "VM") custom_help.validate_item(item) if isinstance(item, list): raise CLIError("Multiple items found. Please give native names instead.") return custom.update_protection_for_vm(cmd, client, resource_group_name, vault_name, item, diskslist, disk_list_setting, exclude_all_data_disks)
def update_policy_for_item(cmd, client, resource_group_name, vault_name, container_name, item_name, policy_name, workload_type=None, backup_management_type=None, tenant_id=None): items_client = backup_protected_items_cf(cmd.cli_ctx) item = show_item(cmd, items_client, resource_group_name, vault_name, container_name, item_name, backup_management_type, workload_type) custom_help.validate_item(item) if isinstance(item, list): raise ValidationError( "Multiple items found. Please give native names instead.") policy = show_policy(protection_policies_cf(cmd.cli_ctx), resource_group_name, vault_name, policy_name) custom_help.validate_policy(policy) is_critical_operation = custom_help.has_resource_guard_mapping( cmd.cli_ctx, resource_group_name, vault_name, "updateProtection") if item.properties.backup_management_type.lower() == "azureiaasvm": return custom.update_policy_for_item(cmd, client, resource_group_name, vault_name, item, policy, tenant_id, is_critical_operation) if item.properties.backup_management_type.lower() == "azurestorage": return custom_afs.update_policy_for_item(cmd, client, resource_group_name, vault_name, item, policy, tenant_id, is_critical_operation) if item.properties.backup_management_type.lower() == "azureworkload": return custom_wl.update_policy_for_item(cmd, client, resource_group_name, vault_name, item, policy, tenant_id, is_critical_operation) return None
def restore_azurefiles(cmd, client, resource_group_name, vault_name, rp_name, container_name, item_name, restore_mode, resolve_conflict, target_storage_account=None, target_file_share=None, target_folder=None, source_file_type=None, source_file_path=None,): backup_management_type = "AzureStorage" workload_type = "AzureFileShare" items_client = backup_protected_items_cf(cmd.cli_ctx) item = show_item(cmd, items_client, resource_group_name, vault_name, container_name, item_name, backup_management_type, workload_type) custom_help.validate_item(item) if isinstance(item, list): raise CLIError("Multiple items found. Please give native names instead.") return custom_afs.restore_AzureFileShare(cmd, client, resource_group_name, vault_name, rp_name, item, restore_mode, resolve_conflict, "ItemLevelRestore", target_storage_account_name=target_storage_account, target_file_share_name=target_file_share, target_folder=target_folder, source_file_type=source_file_type, source_file_path=source_file_path)
def show_recovery_config(cmd, client, resource_group_name, vault_name, restore_mode, container_name, item_name, rp_name, target_item, target_item_name, log_point_in_time, from_full_rp_name, filepath, target_container): if log_point_in_time is not None: datetime_type(log_point_in_time) if restore_mode == 'AlternateWorkloadRestore': if target_item is None: raise CLIError(""" Target Item must be provided. """) protectable_item_type = target_item.properties.protectable_item_type if protectable_item_type.lower() not in [ "sqlinstance", "saphanasystem" ]: raise CLIError(""" Target Item must be either of type HANAInstance or SQLInstance. """) if restore_mode == 'RestoreAsFiles' and target_container is None: raise CLIError("Target Container must be provided.") if rp_name is None and log_point_in_time is None: raise CLIError(""" Log point in time or recovery point name must be provided. """) item = common.show_item(cmd, backup_protected_items_cf(cmd.cli_ctx), resource_group_name, vault_name, container_name, item_name, "AzureWorkload") cust_help.validate_item(item) item_type = item.properties.workload_type item_name = item.name if not cust_help.is_sql(item_type) and not cust_help.is_hana(item_type): raise CLIError(""" Item must be either of type SQLDataBase or SAPHanaDatabase. """) # Mapping of restore mode restore_mode_map = { 'OriginalWorkloadRestore': 'OriginalLocation', 'AlternateWorkloadRestore': 'AlternateLocation', 'RestoreAsFiles': 'AlternateLocation' } if rp_name is None and restore_mode == "RestoreAsFiles" and from_full_rp_name is not None: rp_name = from_full_rp_name rp_name = rp_name if rp_name is not None else 'DefaultRangeRecoveryPoint' if rp_name == 'DefaultRangeRecoveryPoint': recovery_points = list_wl_recovery_points(cmd, client, resource_group_name, vault_name, item, None, None, True) recovery_points = [rp for rp in recovery_points if rp.name == rp_name] if recovery_points == []: raise CLIError(""" Invalid input. """) recovery_point = recovery_points[0] else: recovery_point = common.show_recovery_point( cmd, client, resource_group_name, vault_name, container_name, item_name, rp_name, item_type, backup_management_type="AzureWorkload") alternate_directory_paths = [] if 'sql' in item_type.lower( ) and restore_mode == 'AlternateWorkloadRestore': items = list_workload_items(cmd, vault_name, resource_group_name, container_name) for titem in items: if titem.properties.friendly_name == target_item.properties.friendly_name: if titem.properties.server_name == target_item.properties.server_name: for path in recovery_point.properties.extended_info.data_directory_paths: target_path = cust_help.get_target_path( path.type, path.path, path.logical_name, titem.properties.data_directory_paths) alternate_directory_paths.append( (path.type, path.path, path.logical_name, target_path)) db_name = None if restore_mode == 'AlternateWorkloadRestore': friendly_name = target_item.properties.friendly_name db_name = friendly_name + '/' + target_item_name container_id = None if restore_mode == 'AlternateWorkloadRestore': container_id = '/'.join(target_item.id.split('/')[:-2]) if not ('sql' in item_type.lower() and restore_mode == 'AlternateWorkloadRestore'): alternate_directory_paths = None recovery_mode = None if restore_mode == 'RestoreAsFiles': recovery_mode = 'FileRecovery' container_id = target_container.id return { 'restore_mode': restore_mode_map[restore_mode], 'container_uri': item.properties.container_name, 'item_uri': item_name, 'recovery_point_id': recovery_point.name, 'log_point_in_time': log_point_in_time, 'item_type': 'SQL' if 'sql' in item_type.lower() else 'SAPHana', 'source_resource_id': item.properties.source_resource_id, 'database_name': db_name, 'container_id': container_id, 'recovery_mode': recovery_mode, 'filepath': filepath, 'alternate_directory_paths': alternate_directory_paths }
def list_recovery_points(cmd, client, resource_group_name, vault_name, container_name, item_name, backup_management_type=None, workload_type=None, start_date=None, end_date=None, use_secondary_region=None, is_ready_for_move=None, target_tier=None, tier=None, recommended_for_archive=None): items_client = backup_protected_items_cf(cmd.cli_ctx) item = show_item(cmd, items_client, resource_group_name, vault_name, container_name, item_name, backup_management_type, workload_type, use_secondary_region) custom_help.validate_item(item) if isinstance(item, list): raise ValidationError( "Multiple items found. Please give native names instead.") if (use_secondary_region and (is_ready_for_move is not None or target_tier is not None or recommended_for_archive is not None)): raise MutuallyExclusiveArgumentError( "Archive based filtering is not supported in secondary region.") if item.properties.backup_management_type.lower() == "azureiaasvm": return custom.list_recovery_points(cmd, client, resource_group_name, vault_name, item, start_date, end_date, use_secondary_region, is_ready_for_move, target_tier, tier, recommended_for_archive) if item.properties.backup_management_type.lower() == "azurestorage": return custom_afs.list_recovery_points(cmd, client, resource_group_name, vault_name, item, start_date, end_date, use_secondary_region, is_ready_for_move, target_tier, tier, recommended_for_archive) if item.properties.backup_management_type.lower() == "azureworkload": return custom_wl.list_wl_recovery_points( cmd, client, resource_group_name, vault_name, item, start_date, end_date, is_ready_for_move=is_ready_for_move, target_tier=target_tier, use_secondary_region=use_secondary_region, tier=tier, recommended_for_archive=recommended_for_archive) return None
def restore_azure_wl(cmd, client, resource_group_name, vault_name, recovery_config, rehydration_duration=15, rehydration_priority=None, use_secondary_region=None): recovery_config_object = cust_help.get_or_read_json(recovery_config) restore_mode = recovery_config_object['restore_mode'] container_uri = recovery_config_object['container_uri'] item_uri = recovery_config_object['item_uri'] recovery_point_id = recovery_config_object['recovery_point_id'] log_point_in_time = recovery_config_object['log_point_in_time'] item_type = recovery_config_object['item_type'] workload_type = recovery_config_object['workload_type'] source_resource_id = recovery_config_object['source_resource_id'] database_name = recovery_config_object['database_name'] container_id = recovery_config_object['container_id'] alternate_directory_paths = recovery_config_object['alternate_directory_paths'] recovery_mode = recovery_config_object['recovery_mode'] filepath = recovery_config_object['filepath'] item = common.show_item(cmd, backup_protected_items_cf(cmd.cli_ctx), resource_group_name, vault_name, container_uri, item_uri, "AzureWorkload") cust_help.validate_item(item) validate_wl_restore(item, item_type, restore_mode, recovery_mode) trigger_restore_properties = _get_restore_request_instance(item_type, log_point_in_time, None) if log_point_in_time is None: recovery_point = common.show_recovery_point(cmd, recovery_points_cf(cmd.cli_ctx), resource_group_name, vault_name, container_uri, item_uri, recovery_point_id, workload_type, "AzureWorkload", use_secondary_region) if recovery_point is None: raise InvalidArgumentValueError(""" Specified recovery point not found. Please check the recovery config file or try removing --use-secondary-region if provided""") common.fetch_tier_for_rp(recovery_point) if (recovery_point.tier_type is not None and recovery_point.tier_type == 'VaultArchive'): if rehydration_priority is None: raise InvalidArgumentValueError("""The selected recovery point is in archive tier, provide additional parameters of rehydration duration and rehydration priority.""") # normal rehydrated restore trigger_restore_properties = _get_restore_request_instance(item_type, log_point_in_time, rehydration_priority) rehyd_duration = 'P' + str(rehydration_duration) + 'D' rehydration_info = RecoveryPointRehydrationInfo(rehydration_retention_duration=rehyd_duration, rehydration_priority=rehydration_priority) trigger_restore_properties.recovery_point_rehydration_info = rehydration_info trigger_restore_properties.recovery_type = restore_mode # Get target vm id if container_id is not None: target_container_name = cust_help.get_protection_container_uri_from_id(container_id) target_resource_group = cust_help.get_resource_group_from_id(container_id) target_vault_name = cust_help.get_vault_from_arm_id(container_id) target_container = common.show_container(cmd, backup_protection_containers_cf(cmd.cli_ctx), target_container_name, target_resource_group, target_vault_name, 'AzureWorkload') setattr(trigger_restore_properties, 'target_virtual_machine_id', target_container.properties.source_resource_id) if restore_mode == 'AlternateLocation': if recovery_mode != "FileRecovery": setattr(trigger_restore_properties, 'source_resource_id', source_resource_id) setattr(trigger_restore_properties, 'target_info', TargetRestoreInfo(overwrite_option='Overwrite', database_name=database_name, container_id=container_id)) if 'sql' in item_type.lower(): directory_map = [] for i in alternate_directory_paths: directory_map.append(SQLDataDirectoryMapping(mapping_type=i[0], source_path=i[1], source_logical_name=i[2], target_path=i[3])) setattr(trigger_restore_properties, 'alternate_directory_paths', directory_map) else: target_info = TargetRestoreInfo(overwrite_option='Overwrite', container_id=container_id, target_directory_for_file_restore=filepath) setattr(trigger_restore_properties, 'target_info', target_info) trigger_restore_properties.recovery_mode = recovery_mode if log_point_in_time is not None: log_point_in_time = datetime_type(log_point_in_time) time_range_list = _get_log_time_range(cmd, resource_group_name, vault_name, item, use_secondary_region) validate_log_point_in_time(log_point_in_time, time_range_list) setattr(trigger_restore_properties, 'point_in_time', log_point_in_time) if 'sql' in item_type.lower(): setattr(trigger_restore_properties, 'should_use_alternate_target_location', True) setattr(trigger_restore_properties, 'is_non_recoverable', False) trigger_restore_request = RestoreRequestResource(properties=trigger_restore_properties) if use_secondary_region: if rehydration_priority is not None: raise MutuallyExclusiveArgumentError("Archive restore isn't supported for secondary region.") vault = vaults_cf(cmd.cli_ctx).get(resource_group_name, vault_name) vault_location = vault.location azure_region = custom.secondary_region_map[vault_location] aad_client = aad_properties_cf(cmd.cli_ctx) filter_string = cust_help.get_filter_string({'backupManagementType': 'AzureWorkload'}) aad_result = aad_client.get(azure_region, filter_string) rp_client = recovery_points_passive_cf(cmd.cli_ctx) crr_access_token = rp_client.get_access_token(vault_name, resource_group_name, fabric_name, container_uri, item_uri, recovery_point_id, aad_result).properties crr_client = cross_region_restore_cf(cmd.cli_ctx) trigger_restore_properties.region = azure_region trigger_crr_request = CrossRegionRestoreRequest(cross_region_restore_access_details=crr_access_token, restore_request=trigger_restore_properties) result = crr_client.begin_trigger(azure_region, trigger_crr_request, cls=cust_help.get_pipeline_response, polling=False).result() return cust_help.track_backup_crr_job(cmd.cli_ctx, result, azure_region, vault.id) # Trigger restore and wait for completion result = client.begin_trigger(vault_name, resource_group_name, fabric_name, container_uri, item_uri, recovery_point_id, trigger_restore_request, cls=cust_help.get_pipeline_response, polling=False).result() return cust_help.track_backup_job(cmd.cli_ctx, result, vault_name, resource_group_name)