def fix_restore_time(host_snapshot_id, restore_time): if isinstance(restore_time, str): fix_restore_time_str = restore_time cdp_time_point = xdatetime.string2datetime(restore_time) else: fix_restore_time_str = restore_time.strftime( xdatetime.FORMAT_WITH_MICROSECOND) cdp_time_point = restore_time idle_time_list = find_idle_time( get_cluster_cdp_file_list(host_snapshot_id, cdp_time_point)) if not idle_time_list: _logger.info('fix_restore_time idle_time_list is empty.Failed.ignore.') return False, fix_restore_time_str for idle_time in reversed(idle_time_list): if idle_time['start'] <= cdp_time_point <= idle_time['end']: _logger.info('datetime {} in safe range {}'.format( cdp_time_point, idle_time)) # 不需要修正 return False, fix_restore_time_str if idle_time['start'] <= cdp_time_point: _logger.info('fix_restore_time {} -> {} end'.format( cdp_time_point, idle_time)) return True, idle_time['end'].strftime( xdatetime.FORMAT_WITH_MICROSECOND) else: _logger.error(r'fix_restore_time can NOT find valid {} {}'.format( cdp_time_point, idle_time_list[0])) return False, fix_restore_time_str
def _disk_snapshot(self, point_type, storage_ident, host_snapshot, snapshot_time, logic): storagenode = StorageNode.objects.filter(ident=storage_ident).first() folder_path = os.path.join(storagenode.path, 'takeover_kvm_user_data') if not boxService.box_service.isFolderExist(folder_path): boxService.box_service.makeDirs(folder_path) if point_type == xdata.SNAPSHOT_TYPE_NORMAL: snapshot_time = None else: snapshot_time = xdatetime.string2datetime(snapshot_time) snapshot_time = snapshot_time.timestamp() return self._get_disk_snapshot(host_snapshot, snapshot_time, folder_path, logic)
def _get_host_cdp_snapshot_current_valid_rang(agent_host_obj, begin_datetime, end_timestamp): host_item = {'host_ident': agent_host_obj.ident, 'host_id': agent_host_obj.id, 'host_display_name': agent_host_obj.display_name, 'range': list()} api_request = {'begin': begin_datetime.strftime(xdatetime.FORMAT_ONLY_DATE), 'end': end_timestamp.strftime(xdatetime.FORMAT_ONLY_DATE), 'use_serializer': False} api_response = HostSnapshotsWithCdpPerHost().get(None, ident=agent_host_obj.ident, api_request=api_request) if not status.is_success(api_response.status_code): e = get_response_error_string(api_response) debug = "HostSnapshotsWithCdpPerHost().get(begin:{} end:{} ident:{}) failed {}".format(begin_datetime, end_timestamp, agent_host_obj.ident, api_response.status_code) _logger.error('_get_host_cdp_snapshot_current_valid_rang Failed.e={},debug={}'.format(e, debug)) return host_item for host_snapshot in api_response.data: host_item['range'].append({'begin': xdatetime.string2datetime(host_snapshot['begin']).timestamp(), 'end': xdatetime.string2datetime(host_snapshot['end']).timestamp()}) return host_item
def get_host_backup_point(host_obj): from apiv1.views import HostSnapshotsWithCdpPerHost, HostSnapshotsWithNormalPerHost api_request = { 'begin': '2016-01-01', 'end': (datetime.datetime.now() + datetime.timedelta(days=1)).strftime('%Y-%m-%d'), 'finish': True, 'use_serializer': False } host_snapshot_list = list() try: api_response = HostSnapshotsWithCdpPerHost().get( None, ident=host_obj.ident, api_request=api_request) if status.is_success(api_response.status_code): for host_snapshot in api_response.data: data = { "id": '{}|{}|{}|{}'.format(xdata.SNAPSHOT_TYPE_CDP, host_snapshot['id'], host_snapshot['begin'], host_snapshot['end']), "time": host_snapshot['begin'], } host_snapshot_list.append(data) api_response = HostSnapshotsWithNormalPerHost().get( request=None, ident=host_obj.ident, api_request=api_request) if status.is_success(api_response.status_code): for host_snapshot in api_response.data: data = { "id": '{}|{}|{}'.format(xdata.SNAPSHOT_TYPE_NORMAL, host_snapshot['id'], host_snapshot['start_datetime']), "time": host_snapshot['start_datetime'], } host_snapshot_list.append(data) except Exception as e: _logger.error('get_host_backup_point>>:{}'.format(e), exc_info=True) host_snapshot_list.sort(key=lambda o: xdatetime.string2datetime(o['time'])) return host_snapshot_list
def query_latest_host_backup(host_ident, last_host_snapshot_id, last_datetime): last_host_snapshot_id = int(last_host_snapshot_id) if last_host_snapshot_id <= 0: # 1.返回最新的: 主机快照, 磁盘快照链 latest_host_snapshot, latest_disk_snapshots = RemoteBackupHelperRemote.get_host_latest_snapshots( host_ident) if not latest_disk_snapshots: return None return RemoteBackupHelperRemote._convert_host_snapshot_2_new_host_backup_info_with_snapshots( host_ident, latest_host_snapshot, latest_disk_snapshots) # 查询是否有最新的状态产生 else: host_snapshots = RemoteBackupHelperRemote.query_host_snapshot_order_by_time( host_ident) # 获取当前主机可用的快照点 cur_snapshot = HostSnapshot.objects.get(id=last_host_snapshot_id) cur_snapshot_time = cur_snapshot.start_datetime # 2.返回最新的: 主机快照, 磁盘快照 next_host_snapshot = host_snapshots.filter( start_datetime__gt=cur_snapshot_time).last() if last_datetime == '': # 最后一个点同步的是普通备份点 if not next_host_snapshot: return None else: pass else: if not next_host_snapshot: # 不存在的情况下 last_backup_datetime = xdatetime.string2datetime( last_datetime) if cur_snapshot.cdp_info.last_datetime > last_backup_datetime: pass else: return None latest_host_snapshot, latest_disk_snapshots = RemoteBackupHelperRemote.get_host_latest_snapshots( host_ident) if not latest_disk_snapshots: return None return RemoteBackupHelperRemote._convert_host_snapshot_2_new_host_backup_info_with_snapshots( host_ident, latest_host_snapshot, latest_disk_snapshots)
def get_report(request): host_ident = request.GET.get('host_ident') start_date = request.GET.get('stime') end_date = request.GET.get('endtime') result = {'r': 0, 'e': '操作成功', 'report': list()} start_date = xdatetime.string2datetime(start_date) end_date = xdatetime.string2datetime(end_date) + timedelta(days=1) point_type = '整机备份' api_request = { 'begin': start_date.strftime(xdatetime.FORMAT_ONLY_DATE), 'end': end_date.strftime(xdatetime.FORMAT_ONLY_DATE), 'use_serializer': False } api_response = HostSnapshotsWithNormalPerHost().get( request=request, ident=host_ident, api_request=api_request) if not status.is_success(api_response.status_code): e = get_response_error_string(api_response) debug = "HostSnapshotsWithNormalPerHost().get(begin:{} end:{} ident:{}) failed {}".format( start_date, end_date, host_ident, api_response.status_code) return HttpResponse( json.dumps({ "r": 1, "e": e, "debug": debug }, ensure_ascii=False)) for host_snapshot in api_response.data: point_id = '{}|{}|{}'.format(xdata.SNAPSHOT_TYPE_NORMAL, host_snapshot['id'], host_snapshot['start_datetime']) result['report'].append({ "id": point_id, "content": '{} {}'.format(point_type, host_snapshot['start_datetime']), "start": host_snapshot['start_datetime'], "group": _get_group_type('normal', point_id), 'type': 'point' }) api_response = HostSnapshotsWithCdpPerHost().get(None, ident=host_ident, api_request=api_request) if not status.is_success(api_response.status_code): e = get_response_error_string(api_response) debug = "HostSnapshotsWithCdpPerHost().get(begin:{} end:{} ident:{}) failed {}".format( start_date, end_date, host_ident, api_response.status_code) return HttpResponse( json.dumps({ "r": 1, "e": e, "debug": debug }, ensure_ascii=False)) for host_snapshot in api_response.data: point_id_filter = '{}|{}|{}'.format(xdata.SNAPSHOT_TYPE_CDP, host_snapshot['id'], host_snapshot['begin']) point_id = '{}|{}'.format(point_id_filter, host_snapshot['end']) result['report'].append({ "id": point_id, "content": 'CDP备份 {}'.format(host_snapshot['begin']), "start": host_snapshot['begin'], "end": host_snapshot['end'], "group": _get_group_type('cdp', point_id_filter) }) # 计算no_point host_snapshots = HostSnapshot.objects.filter( host__ident=host_ident, successful=False, finish_datetime__isnull=False, finish_datetime__gte=api_request['begin'], finish_datetime__lt=api_request['end']) i = 0 for host_snapshot in host_snapshots: result['report'].append({ "id": 'no_point_{}'.format(i), "content": '无备份点 {}'.format( host_snapshot.finish_datetime.strftime( xdatetime.FORMAT_WITH_USER_SECOND)), "start": host_snapshot.finish_datetime.strftime( xdatetime.FORMAT_WITH_USER_SECOND), 'type': 'point', "group": 'no_point' }) i = i + 1 return HttpResponse(json.dumps(result, ensure_ascii=False))
def get_times_scores_by_host_snapshot(host_snapshot_id, cdp_slice_end_time, query_start_time, query_end_time): query_start_datetime = datetime.strptime(query_start_time, '%Y-%m-%d %H:%M:%S.%f') query_end_datetime = datetime.strptime(query_end_time, '%Y-%m-%d %H:%M:%S.%f') disks_idents = query_current_disks_ident_by_host_snapshot(host_snapshot_id) host_snapshot = HostSnapshot.objects.get(id=host_snapshot_id) cdp_end_timestamp = xdatetime.string2datetime( cdp_slice_end_time).timestamp() validator_list = [ GetSnapshotList.is_disk_snapshot_object_exist, GetSnapshotList.is_disk_snapshot_file_exist ] # 该cdp片段的所有磁盘的: cdps image disks_cdps_image_4slice = list() _disks_cdps_image_4slice = list() for disk_ident in disks_idents: cur_ident, rstamp = GetDiskSnapshot.query_cdp_disk_snapshot_ident( host_snapshot, disk_ident, cdp_end_timestamp) if cur_ident and rstamp: disk_snapshot = DiskSnapshot.objects.get(ident=cur_ident) disk_all_image_until_slice = GetSnapshotList.query_snapshots_by_snapshot_object( disk_snapshot, validator_list, rstamp) is_exist_chain(disk_all_image_until_slice, cur_ident) disk_cdps_image_4slice = get_disk_cdps_image_for_slice( disk_all_image_until_slice) disks_cdps_image_4slice += disk_cdps_image_4slice _disks_cdps_image_4slice.append(disk_cdps_image_4slice) # 保留与查询存在交集的: disk_cdp_image disks_cdps_image_within_query = filter_disk_cdp_image_within_query_time( query_start_datetime, query_end_datetime, disks_cdps_image_4slice) disks_cdps_image_path = [ disk_image_obj.path for disk_image_obj in disks_cdps_image_within_query ] disks_cdps_image_info = get_cdp_images_infos(disks_cdps_image_path) times_iovals = images_infos_convert_to_struct_time_ioval( disks_cdps_image_info) times_iovals = filter_time_ioval_within_query_time(query_start_datetime, query_end_datetime, times_iovals) times_scores = convert_times_iovals_to_times_scores(times_iovals) times_scores = get_constant_elems(times_scores, 200) # 在查询时间内,没有找到时间点,则往窗口左侧找,若无则启用该cdp片段的host_snapshot时间 if len(times_scores) == 0: disks_cdps_image_before_query = get_disks_cdps_image_lt_query_start_datetime( _disks_cdps_image_4slice, query_start_datetime) disks_cdps_image_path = [ disk_cdp_image.path for disk_cdp_image in disks_cdps_image_before_query ] disks_cdps_image_info = get_cdp_images_infos(disks_cdps_image_path) times_iovals = images_infos_convert_to_struct_time_ioval( disks_cdps_image_info) time_ioval = get_closed_time_ioval(times_iovals, query_start_datetime) closed_time = host_snapshot.start_datetime if time_ioval is None else time_ioval[ 'time'] times_scores = [{'time': closed_time, 'score': -1}] return times_scores
_logger.info('datetime {} in safe range {}'.format( cdp_time_point, idle_time)) # 不需要修正 return False, fix_restore_time_str if idle_time['start'] <= cdp_time_point: _logger.info('fix_restore_time {} -> {} end'.format( cdp_time_point, idle_time)) return True, idle_time['end'].strftime( xdatetime.FORMAT_WITH_MICROSECOND) else: _logger.error(r'fix_restore_time can NOT find valid {} {}'.format( cdp_time_point, idle_time_list[0])) return False, fix_restore_time_str if __name__ == '__main__': host_snapshot_id = 639 cdp_slice_end_time = '2019-06-18T05:22:38.370910' centre_time = xdatetime.string2datetime('2019-06-18T01:50:00.214514') window_secs = 20 _list = get_cluster_io_daychart(host_snapshot_id, cdp_slice_end_time, centre_time, window_secs) _logger.info('list={}'.format(_list)) # bNeedFix, fix_restore_time = fix_restore_time(3, '2019-06-10T16:56:52.093974') # _logger.info('bNeedFix={},fix_restore_time={}'.format(bNeedFix, fix_restore_time)) # pointid = 'cdp|3|2019-06-10T13:45:01.025442|2019-06-10T16:56:52.093974' # score_list = get_cluster_io_daychart(3, '2019-06-10T16:56:52.093974', 1560157012, 20) # _logger.info(score_list) # cdp_file_list = get_cluster_cdp_file_list(host_snapshot_id, centre_time) # _logger.info('cdp_file_list={}'.format(cdp_file_list)) # get_cdp_times_scores_by_host_snapshot(pointid)