def show_item(cmd, client, resource_group_name, vault_name, container_name, name, backup_management_type=None, workload_type=None): return common.show_item(cmd, client, resource_group_name, vault_name, container_name, name, backup_management_type, workload_type)
def enable_for_AzureFileShare(cmd, client, resource_group_name, vault_name, afs_name, storage_account_name, policy_name): # get registered storage accounts storage_account = None containers_client = backup_protection_containers_cf(cmd.cli_ctx) registered_containers = common.list_containers(containers_client, resource_group_name, vault_name, "AzureStorage") storage_account = _get_storage_account_from_list(registered_containers, storage_account_name) # get unregistered storage accounts if storage_account is None: unregistered_containers = list_protectable_containers( cmd.cli_ctx, resource_group_name, vault_name) storage_account = _get_storage_account_from_list( unregistered_containers, storage_account_name) if storage_account is None: # refresh containers in the vault protection_containers_client = protection_containers_cf( cmd.cli_ctx) filter_string = helper.get_filter_string( {'backupManagementType': "AzureStorage"}) refresh_result = protection_containers_client.refresh( vault_name, resource_group_name, fabric_name, filter=filter_string, raw=True) helper.track_refresh_operation(cmd.cli_ctx, refresh_result, vault_name, resource_group_name) # refetch the protectable containers after refresh unregistered_containers = list_protectable_containers( cmd.cli_ctx, resource_group_name, vault_name) storage_account = _get_storage_account_from_list( unregistered_containers, storage_account_name) if storage_account is None: raise CLIError("Storage account not found or not supported.") # register storage account protection_containers_client = protection_containers_cf(cmd.cli_ctx) properties = AzureStorageContainer( backup_management_type="AzureStorage", source_resource_id=storage_account.properties.container_id, workload_type="AzureFileShare") param = ProtectionContainerResource(properties=properties) result = protection_containers_client.register(vault_name, resource_group_name, fabric_name, storage_account.name, param, raw=True) helper.track_register_operation(cmd.cli_ctx, result, vault_name, resource_group_name, storage_account.name) protectable_item = _get_protectable_item_for_afs(cmd.cli_ctx, vault_name, resource_group_name, afs_name, storage_account) if protectable_item is None: items_client = backup_protected_items_cf(cmd.cli_ctx) item = common.show_item(cmd, items_client, resource_group_name, vault_name, storage_account_name, afs_name, "AzureStorage") if item is None: raise CLIError("Could not find a fileshare with name " + afs_name + " to protect or a protected fileshare of name " + afs_name) return item policy = common.show_policy(protection_policies_cf(cmd.cli_ctx), resource_group_name, vault_name, policy_name) helper.validate_policy(policy) helper.validate_azurefileshare_item(protectable_item) container_uri = helper.get_protection_container_uri_from_id( protectable_item.id) item_uri = helper.get_protectable_item_uri_from_id(protectable_item.id) item_properties = AzureFileshareProtectedItem() item_properties.policy_id = policy.id item_properties.source_resource_id = protectable_item.properties.parent_container_fabric_id item = ProtectedItemResource(properties=item_properties) result = client.create_or_update(vault_name, resource_group_name, fabric_name, container_uri, item_uri, item, raw=True) return helper.track_backup_job(cmd.cli_ctx, result, vault_name, resource_group_name)
def show_recovery_config(cmd, client, resource_group_name, vault_name, restore_mode, container_name, item_name, rp_name, target_item, target_item_name, log_point_in_time, from_full_rp_name, filepath, target_container): if log_point_in_time is not None: datetime_type(log_point_in_time) if restore_mode == 'AlternateWorkloadRestore': if target_item is None: raise CLIError(""" Target Item must be provided. """) protectable_item_type = target_item.properties.protectable_item_type if protectable_item_type.lower() not in [ "sqlinstance", "saphanasystem" ]: raise CLIError(""" Target Item must be either of type HANAInstance or SQLInstance. """) if restore_mode == 'RestoreAsFiles' and target_container is None: raise CLIError("Target Container must be provided.") if rp_name is None and log_point_in_time is None: raise CLIError(""" Log point in time or recovery point name must be provided. """) item = common.show_item(cmd, backup_protected_items_cf(cmd.cli_ctx), resource_group_name, vault_name, container_name, item_name, "AzureWorkload") cust_help.validate_item(item) item_type = item.properties.workload_type item_name = item.name if not cust_help.is_sql(item_type) and not cust_help.is_hana(item_type): raise CLIError(""" Item must be either of type SQLDataBase or SAPHanaDatabase. """) # Mapping of restore mode restore_mode_map = { 'OriginalWorkloadRestore': 'OriginalLocation', 'AlternateWorkloadRestore': 'AlternateLocation', 'RestoreAsFiles': 'AlternateLocation' } if rp_name is None and restore_mode == "RestoreAsFiles" and from_full_rp_name is not None: rp_name = from_full_rp_name rp_name = rp_name if rp_name is not None else 'DefaultRangeRecoveryPoint' if rp_name == 'DefaultRangeRecoveryPoint': recovery_points = list_wl_recovery_points(cmd, client, resource_group_name, vault_name, item, None, None, True) recovery_points = [rp for rp in recovery_points if rp.name == rp_name] if recovery_points == []: raise CLIError(""" Invalid input. """) recovery_point = recovery_points[0] else: recovery_point = common.show_recovery_point( cmd, client, resource_group_name, vault_name, container_name, item_name, rp_name, item_type, backup_management_type="AzureWorkload") alternate_directory_paths = [] if 'sql' in item_type.lower( ) and restore_mode == 'AlternateWorkloadRestore': items = list_workload_items(cmd, vault_name, resource_group_name, container_name) for titem in items: if titem.properties.friendly_name == target_item.properties.friendly_name: if titem.properties.server_name == target_item.properties.server_name: for path in recovery_point.properties.extended_info.data_directory_paths: target_path = cust_help.get_target_path( path.type, path.path, path.logical_name, titem.properties.data_directory_paths) alternate_directory_paths.append( (path.type, path.path, path.logical_name, target_path)) db_name = None if restore_mode == 'AlternateWorkloadRestore': friendly_name = target_item.properties.friendly_name db_name = friendly_name + '/' + target_item_name container_id = None if restore_mode == 'AlternateWorkloadRestore': container_id = '/'.join(target_item.id.split('/')[:-2]) if not ('sql' in item_type.lower() and restore_mode == 'AlternateWorkloadRestore'): alternate_directory_paths = None recovery_mode = None if restore_mode == 'RestoreAsFiles': recovery_mode = 'FileRecovery' container_id = target_container.id return { 'restore_mode': restore_mode_map[restore_mode], 'container_uri': item.properties.container_name, 'item_uri': item_name, 'recovery_point_id': recovery_point.name, 'log_point_in_time': log_point_in_time, 'item_type': 'SQL' if 'sql' in item_type.lower() else 'SAPHana', 'source_resource_id': item.properties.source_resource_id, 'database_name': db_name, 'container_id': container_id, 'recovery_mode': recovery_mode, 'filepath': filepath, 'alternate_directory_paths': alternate_directory_paths }
def restore_azure_wl(cmd, client, resource_group_name, vault_name, recovery_config, rehydration_duration=15, rehydration_priority=None, use_secondary_region=None): recovery_config_object = cust_help.get_or_read_json(recovery_config) restore_mode = recovery_config_object['restore_mode'] container_uri = recovery_config_object['container_uri'] item_uri = recovery_config_object['item_uri'] recovery_point_id = recovery_config_object['recovery_point_id'] log_point_in_time = recovery_config_object['log_point_in_time'] item_type = recovery_config_object['item_type'] workload_type = recovery_config_object['workload_type'] source_resource_id = recovery_config_object['source_resource_id'] database_name = recovery_config_object['database_name'] container_id = recovery_config_object['container_id'] alternate_directory_paths = recovery_config_object['alternate_directory_paths'] recovery_mode = recovery_config_object['recovery_mode'] filepath = recovery_config_object['filepath'] item = common.show_item(cmd, backup_protected_items_cf(cmd.cli_ctx), resource_group_name, vault_name, container_uri, item_uri, "AzureWorkload") cust_help.validate_item(item) validate_wl_restore(item, item_type, restore_mode, recovery_mode) trigger_restore_properties = _get_restore_request_instance(item_type, log_point_in_time, None) if log_point_in_time is None: recovery_point = common.show_recovery_point(cmd, recovery_points_cf(cmd.cli_ctx), resource_group_name, vault_name, container_uri, item_uri, recovery_point_id, workload_type, "AzureWorkload", use_secondary_region) if recovery_point is None: raise InvalidArgumentValueError(""" Specified recovery point not found. Please check the recovery config file or try removing --use-secondary-region if provided""") common.fetch_tier_for_rp(recovery_point) if (recovery_point.tier_type is not None and recovery_point.tier_type == 'VaultArchive'): if rehydration_priority is None: raise InvalidArgumentValueError("""The selected recovery point is in archive tier, provide additional parameters of rehydration duration and rehydration priority.""") # normal rehydrated restore trigger_restore_properties = _get_restore_request_instance(item_type, log_point_in_time, rehydration_priority) rehyd_duration = 'P' + str(rehydration_duration) + 'D' rehydration_info = RecoveryPointRehydrationInfo(rehydration_retention_duration=rehyd_duration, rehydration_priority=rehydration_priority) trigger_restore_properties.recovery_point_rehydration_info = rehydration_info trigger_restore_properties.recovery_type = restore_mode # Get target vm id if container_id is not None: target_container_name = cust_help.get_protection_container_uri_from_id(container_id) target_resource_group = cust_help.get_resource_group_from_id(container_id) target_vault_name = cust_help.get_vault_from_arm_id(container_id) target_container = common.show_container(cmd, backup_protection_containers_cf(cmd.cli_ctx), target_container_name, target_resource_group, target_vault_name, 'AzureWorkload') setattr(trigger_restore_properties, 'target_virtual_machine_id', target_container.properties.source_resource_id) if restore_mode == 'AlternateLocation': if recovery_mode != "FileRecovery": setattr(trigger_restore_properties, 'source_resource_id', source_resource_id) setattr(trigger_restore_properties, 'target_info', TargetRestoreInfo(overwrite_option='Overwrite', database_name=database_name, container_id=container_id)) if 'sql' in item_type.lower(): directory_map = [] for i in alternate_directory_paths: directory_map.append(SQLDataDirectoryMapping(mapping_type=i[0], source_path=i[1], source_logical_name=i[2], target_path=i[3])) setattr(trigger_restore_properties, 'alternate_directory_paths', directory_map) else: target_info = TargetRestoreInfo(overwrite_option='Overwrite', container_id=container_id, target_directory_for_file_restore=filepath) setattr(trigger_restore_properties, 'target_info', target_info) trigger_restore_properties.recovery_mode = recovery_mode if log_point_in_time is not None: log_point_in_time = datetime_type(log_point_in_time) time_range_list = _get_log_time_range(cmd, resource_group_name, vault_name, item, use_secondary_region) validate_log_point_in_time(log_point_in_time, time_range_list) setattr(trigger_restore_properties, 'point_in_time', log_point_in_time) if 'sql' in item_type.lower(): setattr(trigger_restore_properties, 'should_use_alternate_target_location', True) setattr(trigger_restore_properties, 'is_non_recoverable', False) trigger_restore_request = RestoreRequestResource(properties=trigger_restore_properties) if use_secondary_region: if rehydration_priority is not None: raise MutuallyExclusiveArgumentError("Archive restore isn't supported for secondary region.") vault = vaults_cf(cmd.cli_ctx).get(resource_group_name, vault_name) vault_location = vault.location azure_region = custom.secondary_region_map[vault_location] aad_client = aad_properties_cf(cmd.cli_ctx) filter_string = cust_help.get_filter_string({'backupManagementType': 'AzureWorkload'}) aad_result = aad_client.get(azure_region, filter_string) rp_client = recovery_points_passive_cf(cmd.cli_ctx) crr_access_token = rp_client.get_access_token(vault_name, resource_group_name, fabric_name, container_uri, item_uri, recovery_point_id, aad_result).properties crr_client = cross_region_restore_cf(cmd.cli_ctx) trigger_restore_properties.region = azure_region trigger_crr_request = CrossRegionRestoreRequest(cross_region_restore_access_details=crr_access_token, restore_request=trigger_restore_properties) result = crr_client.begin_trigger(azure_region, trigger_crr_request, cls=cust_help.get_pipeline_response, polling=False).result() return cust_help.track_backup_crr_job(cmd.cli_ctx, result, azure_region, vault.id) # Trigger restore and wait for completion result = client.begin_trigger(vault_name, resource_group_name, fabric_name, container_uri, item_uri, recovery_point_id, trigger_restore_request, cls=cust_help.get_pipeline_response, polling=False).result() return cust_help.track_backup_job(cmd.cli_ctx, result, vault_name, resource_group_name)