def r_resize(uuid, size): args_rules = [Rules.UUID.value, Rules.DISK_SIZE_STR.value] try: ji.Check.previewing(args_rules, {'uuid': uuid, 'size': size}) disk = Disk() disk.uuid = uuid disk.get_by('uuid') ret = dict() ret['state'] = ji.Common.exchange_state(20000) if disk.size >= int(size): ret['state'] = ji.Common.exchange_state(41257) return ret config = Config() config.id = 1 config.get() disk.size = int(size) disk.quota(config=config) # 将在事件返回层(models/event_processor.py:224 附近),更新数据库中 disk 对象 message = { '_object': 'disk', 'action': 'resize', 'uuid': disk.uuid, 'guest_uuid': disk.guest_uuid, 'storage_mode': config.storage_mode, 'size': disk.size, 'dfs_volume': config.dfs_volume, 'node_id': disk.node_id, 'image_path': disk.path, 'disks': [disk.__dict__], 'passback_parameters': { 'size': disk.size } } if config.storage_mode in [ StorageMode.shared_mount.value, StorageMode.ceph.value, StorageMode.glusterfs.value ]: message['node_id'] = Host.get_lightest_host()['node_id'] if disk.guest_uuid.__len__() == 36: message['device_node'] = dev_table[disk.sequence] Utils.emit_instruction(message=json.dumps(message, ensure_ascii=False)) return ret except ji.PreviewingError, e: return json.loads(e.message)
def r_delete(ids): ret = dict() ret['state'] = ji.Common.exchange_state(20000) config = Config() config.id = 1 config.get() # 取全部活着的 hosts available_hosts = Host.get_available_hosts(nonrandom=None) if available_hosts.__len__() == 0: ret['state'] = ji.Common.exchange_state(50351) return ret chosen_host = available_hosts[0] node_id = chosen_host['node_id'] os_template_image = OSTemplateImage() # TODO: 加入对,是否有被 Guest 引用的判断 for _id in ids.split(','): os_template_image.id = _id os_template_image.get() for _id in ids.split(','): os_template_image.id = _id os_template_image.get() # 暂时不支持从计算节点上,删除公共镜像 if os_template_image.kind == OSTemplateImageKind.public.value: os_template_image.delete() continue elif os_template_image.kind == OSTemplateImageKind.custom.value: os_template_image.progress = 254 message = { '_object': 'os_template_image', 'action': 'delete', 'storage_mode': config.storage_mode, 'dfs_volume': config.dfs_volume, 'template_path': os_template_image.path, # uuid 这里没有实际意义,仅仅是为了迁就 JimV-C 的个命令格式 'uuid': None, 'node_id': node_id, 'os_template_image_id': os_template_image.id, 'passback_parameters': {'id': os_template_image.id} } Utils.emit_instruction(message=json.dumps(message)) os_template_image.update() return ret
def r_delete(uuids): args_rules = [Rules.UUIDS.value] try: ji.Check.previewing(args_rules, {'uuids': uuids}) ret = dict() ret['state'] = ji.Common.exchange_state(20000) disk = Disk() # 检测所指定的 UUDIs 磁盘都存在 for uuid in uuids.split(','): disk.uuid = uuid disk.get_by('uuid') # 判断磁盘是否与虚拟机处于离状态 if disk.state not in [DiskState.idle.value, DiskState.dirty.value]: ret['state'] = ji.Common.exchange_state(41256) return ret config = Config() config.id = 1 config.get() # 执行删除操作 for uuid in uuids.split(','): disk.uuid = uuid disk.get_by('uuid') message = { '_object': 'disk', 'action': 'delete', 'uuid': disk.uuid, 'storage_mode': config.storage_mode, 'dfs_volume': config.dfs_volume, 'node_id': disk.node_id, 'image_path': disk.path } if config.storage_mode in [ StorageMode.shared_mount.value, StorageMode.ceph.value, StorageMode.glusterfs.value ]: message['node_id'] = Host.get_lightest_host()['node_id'] Utils.emit_instruction( message=json.dumps(message, ensure_ascii=False)) return ret except ji.PreviewingError, e: return json.loads(e.message)
def r_get(): config = Config() try: config.id = 1 config.get() ret = dict() ret['state'] = ji.Common.exchange_state(20000) ret['data'] = config.__dict__ return ret except ji.PreviewingError, e: return json.loads(e.message)
def r_join(node_id, _token): # 如果该 node_id 已经被加入,那么要用户先去计算节点处删除作废的节点后,再添加。 ret = dict() ret['state'] = ji.Common.exchange_state(20000) ret['data'] = dict() args_rules = [Rules.NODE_ID.value, Rules.TOKEN.value] try: ji.Check.previewing(args_rules, {'node_id': node_id, 'token': _token}) token = Token() token.token = _token # 检验 token 有效性 if not token.valid(): ret['state'] = ji.Common.exchange_state(41208) return ret nodes_id = list() hosts = Host.get_all() for host in hosts: nodes_id.append(host['node_id']) # 检测 node_id 是否已经存在 if node_id in nodes_id: ret['state'] = ji.Common.exchange_state(40901) else: config = Config() config.id = 1 config.get() ret['data']['redis_host'] = request.host ret['data']['redis_port'] = app_config.get('redis_port', 6379) ret['data']['redis_password'] = app_config.get( 'redis_password', '') ret['data']['redis_dbid'] = app_config.get('redis_dbid', 0) ret['data']['vm_network'] = config.vm_network ret['data']['vm_manage_network'] = config.vm_manage_network return ret except ji.PreviewingError, e: return json.loads(e.message)
def r_detail(uuid): disk = Disk() disk.uuid = uuid disk.get_by(field='uuid') disk.wrap_device(dev_table=dev_table) guest = None os_template_image = None config = Config() config.id = 1 config.get() if disk.sequence != -1: guest = Guest() guest.uuid = disk.guest_uuid guest.get_by('uuid') os_template_image = OSTemplateImage() os_template_image.id = guest.os_template_image_id os_template_image.get() guest = guest.__dict__ os_template_image = os_template_image.__dict__ ret = dict() ret['state'] = ji.Common.exchange_state(20000) ret['data'] = { 'guest': guest, 'os_template_image': os_template_image, 'disk': disk.__dict__, 'config': config.__dict__ } return ret
def r_before_request(): try: g.ts = ji.Common.ts() if not is_not_need_to_auth( request.endpoint ) and request.blueprint is not None and request.method != 'OPTIONS': g.config = Config() g.config.id = 1 g.config.get() token = session.get('token', '') g.token = Utils.verify_token(token) user = User() user.id = g.token['uid'] try: user.get() except ji.PreviewingError, e: # 如果该用户获取失败,则清除该用户对应的session。因为该用户可能已经被删除。 for key in session.keys(): session.pop(key=key) return json.loads(e.message) except ji.JITError, e: ret = json.loads(e.message) if ret['state']['code'] == '404': return redirect(location=url_for('v_config.create'), Response=Response) if ret['state']['sub']['code'] in ['41208']: return redirect(location=url_for('v_misc.login'), Response=Response) return ret
def r_show(): args = list() page = request.args.get('page', 1) if page == '': page = 1 page = int(page) page_size = int(request.args.get('page_size', 20)) keyword = request.args.get('keyword', None) show_area = request.args.get('show_area', 'unmount') guest_uuid = request.args.get('guest_uuid', None) sequence = request.args.get('sequence', None) order_by = request.args.get('order_by', None) order = request.args.get('order', None) filters = list() if page is not None: args.append('page=' + page.__str__()) if page_size is not None: args.append('page_size=' + page_size.__str__()) if keyword is not None: args.append('keyword=' + keyword.__str__()) if guest_uuid is not None: filters.append('guest_uuid:in:' + guest_uuid.__str__()) show_area = 'all' if sequence is not None: filters.append('sequence:in:' + sequence.__str__()) show_area = 'all' if show_area in ['unmount', 'data_disk', 'all']: if show_area == 'unmount': filters.append('sequence:eq:-1') elif show_area == 'data_disk': filters.append('sequence:gt:0') else: pass else: # 与前端页面相照应,首次打开时,默认只显示未挂载的磁盘 filters.append('sequence:eq:-1') if order_by is not None: args.append('order_by=' + order_by) if order is not None: args.append('order=' + order) if filters.__len__() > 0: args.append('filter=' + ','.join(filters)) hosts_url = url_for('api_hosts.r_get_by_filter', _external=True) disks_url = url_for('api_disks.r_get_by_filter', _external=True) if keyword is not None: disks_url = url_for('api_disks.r_content_search', _external=True) # 关键字检索,不支持显示域过滤 show_area = 'all' hosts_ret = requests.get(url=hosts_url, cookies=request.cookies) hosts_ret = json.loads(hosts_ret.content) hosts_mapping_by_node_id = dict() for host in hosts_ret['data']: hosts_mapping_by_node_id[int(host['node_id'])] = host if args.__len__() > 0: disks_url = disks_url + '?' + '&'.join(args) disks_ret = requests.get(url=disks_url, cookies=request.cookies) disks_ret = json.loads(disks_ret.content) guests_uuid = list() disks_uuid = list() for disk in disks_ret['data']: disks_uuid.append(disk['uuid']) if disk['guest_uuid'].__len__() == 36: guests_uuid.append(disk['guest_uuid']) if guests_uuid.__len__() > 0: guests, _ = Guest.get_by_filter(filter_str='uuid:in:' + ','.join(guests_uuid)) guests_uuid_mapping = dict() for guest in guests: guests_uuid_mapping[guest['uuid']] = guest for i, disk in enumerate(disks_ret['data']): if disk['guest_uuid'].__len__() == 36: disks_ret['data'][i]['guest'] = guests_uuid_mapping[ disk['guest_uuid']] if disks_uuid.__len__() > 0: snapshots_id_mapping_by_disks_uuid_url = url_for( 'api_snapshots.r_get_snapshots_by_disks_uuid', disks_uuid=','.join(disks_uuid), _external=True) snapshots_id_mapping_by_disks_uuid_ret = requests.get( url=snapshots_id_mapping_by_disks_uuid_url, cookies=request.cookies) snapshots_id_mapping_by_disks_uuid_ret = json.loads( snapshots_id_mapping_by_disks_uuid_ret.content) snapshots_id_mapping_by_disk_uuid = dict() for snapshot_id_mapping_by_disk_uuid in snapshots_id_mapping_by_disks_uuid_ret[ 'data']: disk_uuid = snapshot_id_mapping_by_disk_uuid['disk_uuid'] snapshot_id = snapshot_id_mapping_by_disk_uuid['snapshot_id'] if disk_uuid not in snapshots_id_mapping_by_disk_uuid: snapshots_id_mapping_by_disk_uuid[disk_uuid] = list() snapshots_id_mapping_by_disk_uuid[disk_uuid].append(snapshot_id) for i, disk in enumerate(disks_ret['data']): if disk['uuid'] in snapshots_id_mapping_by_disk_uuid: disks_ret['data'][i][ 'snapshot'] = snapshots_id_mapping_by_disk_uuid[ disk['uuid']] config = Config() config.id = 1 config.get() show_on_host = False if config.storage_mode == StorageMode.local.value: show_on_host = True last_page = int(ceil(disks_ret['paging']['total'] / float(page_size))) page_length = 5 pages = list() if page < int(ceil(page_length / 2.0)): for i in range(1, page_length + 1): pages.append(i) if i == last_page or last_page == 0: break elif last_page - page < page_length / 2: for i in range(last_page - page_length + 1, last_page + 1): if i < 1: continue pages.append(i) else: for i in range(page - page_length / 2, page + int(ceil(page_length / 2.0))): pages.append(i) if i == last_page or last_page == 0: break ret = dict() ret['state'] = ji.Common.exchange_state(20000) ret['data'] = { 'disks': disks_ret['data'], 'hosts_mapping_by_node_id': hosts_mapping_by_node_id, 'order_by': order_by, 'order': order, 'show_area': show_area, 'config': config.__dict__, 'show_on_host': show_on_host, 'paging': disks_ret['paging'], 'page': page, 'page_size': page_size, 'keyword': keyword, 'pages': pages, 'last_page': last_page } return ret
def r_create(): args_rules = [ Rules.DISK_SIZE.value, Rules.REMARK.value, Rules.QUANTITY.value ] config = Config() config.id = 1 config.get() # 非共享模式,必须指定 node_id if config.storage_mode not in [ StorageMode.shared_mount.value, StorageMode.ceph.value, StorageMode.glusterfs.value ]: args_rules.append(Rules.NODE_ID.value) try: ji.Check.previewing(args_rules, request.json) size = request.json['size'] quantity = request.json['quantity'] ret = dict() ret['state'] = ji.Common.exchange_state(20000) # 如果是共享模式,则让负载最轻的计算节点去创建磁盘 if config.storage_mode in [ StorageMode.shared_mount.value, StorageMode.ceph.value, StorageMode.glusterfs.value ]: available_hosts = Host.get_available_hosts() if available_hosts.__len__() == 0: ret['state'] = ji.Common.exchange_state(50351) return ret # 在可用计算节点中平均分配任务 chosen_host = available_hosts[quantity % available_hosts.__len__()] request.json['node_id'] = chosen_host['node_id'] node_id = request.json['node_id'] if size < 1: ret['state'] = ji.Common.exchange_state(41255) return ret while quantity: quantity -= 1 disk = Disk() disk.guest_uuid = '' disk.size = size disk.uuid = uuid4().__str__() disk.remark = request.json.get('remark', '') disk.node_id = int(node_id) disk.sequence = -1 disk.format = 'qcow2' disk.path = config.storage_path + '/' + disk.uuid + '.' + disk.format disk.quota(config=config) message = { '_object': 'disk', 'action': 'create', 'uuid': disk.uuid, 'storage_mode': config.storage_mode, 'dfs_volume': config.dfs_volume, 'node_id': disk.node_id, 'image_path': disk.path, 'size': disk.size } Utils.emit_instruction( message=json.dumps(message, ensure_ascii=False)) disk.create() return ret except ji.PreviewingError, e: return json.loads(e.message)
def r_update(): config = Config() args_rules = [ ] if 'jimv_edition' in request.json: args_rules.append( Rules.JIMV_EDITION.value, ) if 'storage_mode' in request.json: args_rules.append( Rules.STORAGE_MODE.value, ) if 'dfs_volume' in request.json: args_rules.append( Rules.DFS_VOLUME.value, ) if 'storage_path' in request.json: args_rules.append( Rules.STORAGE_PATH.value, ) if 'vm_network' in request.json: args_rules.append( Rules.VM_NETWORK.value, ) if 'vm_manage_network' in request.json: args_rules.append( Rules.VM_MANAGE_NETWORK.value, ) if args_rules.__len__() < 1: ret = dict() ret['state'] = ji.Common.exchange_state(20000) return ret try: config.id = 1 ji.Check.previewing(args_rules, request.json) config.get() config.jimv_edition = int(request.json.get('jimv_edition', config.jimv_edition)) config.storage_mode = int(request.json.get('storage_mode', config.storage_mode)) config.dfs_volume = request.json.get('dfs_volume', config.dfs_volume) config.storage_path = request.json.get('storage_path', config.storage_path) config.vm_network = request.json.get('vm_network', config.vm_network) config.vm_manage_network = request.json.get('vm_manage_network', config.vm_manage_network) config.update() config.get() ret = dict() ret['state'] = ji.Common.exchange_state(20000) ret['data'] = config.__dict__ return ret except ji.PreviewingError, e: return json.loads(e.message)
def r_create(): args_rules = [ Rules.JIMV_EDITION.value, Rules.STORAGE_MODE.value, Rules.DFS_VOLUME.value, Rules.STORAGE_PATH.value, Rules.VM_NETWORK.value, Rules.VM_MANAGE_NETWORK.value, Rules.IOPS_BASE.value, Rules.IOPS_PRE_UNIT.value, Rules.IOPS_CAP.value, Rules.IOPS_MAX.value, Rules.IOPS_MAX_LENGTH.value, Rules.BPS_BASE.value, Rules.BPS_PRE_UNIT.value, Rules.BPS_CAP.value, Rules.BPS_MAX.value, Rules.BPS_MAX_LENGTH.value ] config = Config() config.id = 1 config.jimv_edition = int(request.json.get('jimv_edition', 0)) config.storage_mode = int(request.json.get('storage_mode', 0)) config.dfs_volume = request.json.get('dfs_volume', '') config.storage_path = request.json.get('storage_path') config.vm_network = request.json.get('vm_network') config.vm_manage_network = request.json.get('vm_manage_network') config.iops_base = int(request.json.get('iops_base', 1000)) config.iops_pre_unit = int(request.json.get('iops_pre_unit', 1)) config.iops_cap = int(request.json.get('iops_cap', 2000)) config.iops_max = int(request.json.get('iops_max', 3000)) config.iops_max_length = int(request.json.get('iops_max_length', 20)) # 200 MiB config.bps_base = int(request.json.get('bps_base', 1024 * 1024 * 200)) # 0.3 MiB config.bps_pre_unit = int(request.json.get('bps_pre_unit', 1024 * 1024 * 0.3)) # 500 MiB config.bps_cap = int(request.json.get('bps_cap', 1024 * 1024 * 500)) # 1 GiB config.bps_max = int(request.json.get('bps_max', 1024 * 1024 * 1024)) config.bps_max_length = int(request.json.get('bps_max_length', 10)) try: ji.Check.previewing(args_rules, config.__dict__) ret = dict() ret['state'] = ji.Common.exchange_state(20000) if config.exist(): ret['state'] = ji.Common.exchange_state(40901) return ret config.create() config.id = 1 config.get() ret['data'] = config.__dict__ return ret except ji.PreviewingError, e: return json.loads(e.message)
def r_update_quota(): config = Config() args_rules = [ ] if 'iops_base' in request.json: args_rules.append( Rules.IOPS_BASE.value, ) if 'iops_pre_unit' in request.json: args_rules.append( Rules.IOPS_PRE_UNIT.value, ) if 'iops_cap' in request.json: args_rules.append( Rules.IOPS_CAP.value, ) if 'iops_max' in request.json: args_rules.append( Rules.IOPS_MAX.value, ) if 'iops_max_length' in request.json: args_rules.append( Rules.IOPS_MAX_LENGTH.value, ) if 'bps_base' in request.json: args_rules.append( Rules.BPS_BASE.value, ) if 'bps_pre_unit' in request.json: args_rules.append( Rules.BPS_PRE_UNIT.value, ) if 'bps_cap' in request.json: args_rules.append( Rules.BPS_CAP.value, ) if 'bps_max' in request.json: args_rules.append( Rules.BPS_MAX.value, ) if 'bps_max_length' in request.json: args_rules.append( Rules.BPS_MAX_LENGTH.value, ) if 'influence_current_guest' in request.json: args_rules.append( Rules.INFLUENCE_CURRENT_GUEST.value, ) if args_rules.__len__() < 1: ret = dict() ret['state'] = ji.Common.exchange_state(20000) return ret try: config.id = 1 ji.Check.previewing(args_rules, request.json) config.get() config.iops_base = int(request.json.get('iops_base', config.iops_base)) config.iops_pre_unit = int(request.json.get('iops_pre_unit', config.iops_pre_unit)) config.iops_cap = int(request.json.get('iops_cap', config.iops_cap)) config.iops_max = int(request.json.get('iops_max', config.iops_max)) config.iops_max_length = int(request.json.get('iops_max_length', config.iops_max_length)) config.bps_base = int(request.json.get('bps_base', config.bps_base)) config.bps_pre_unit = int(request.json.get('bps_pre_unit', config.bps_pre_unit)) config.bps_cap = int(request.json.get('bps_cap', config.bps_cap)) config.bps_max = int(request.json.get('bps_max', config.bps_max)) config.bps_max_length = int(request.json.get('bps_max_length', config.bps_max_length)) if request.json.get('influence_current_guest', False): disks, _ = Disk.get_all() disk = Disk() for disk_info in disks: disk.id = disk_info['id'] disk.get() disk.quota(config=config) disk.update() if disk.sequence >= 0: message = { '_object': 'disk', 'action': 'quota', 'uuid': disk.uuid, 'guest_uuid': disk.guest_uuid, 'node_id': disk.node_id, 'disks': [disk.__dict__] } Utils.emit_instruction(message=json.dumps(message)) config.update() config.get() ret = dict() ret['state'] = ji.Common.exchange_state(20000) ret['data'] = config.__dict__ return ret except ji.PreviewingError, e: return json.loads(e.message)
class EventProcessor(object): message = None log = Log() guest = Guest() guest_migrate_info = GuestMigrateInfo() disk = Disk() snapshot = Snapshot() snapshot_disk_mapping = SnapshotDiskMapping() os_template_image = OSTemplateImage() config = Config() config.id = 1 guest_cpu_memory = GuestCPUMemory() guest_traffic = GuestTraffic() guest_disk_io = GuestDiskIO() host_cpu_memory = HostCPUMemory() host_traffic = HostTraffic() host_disk_usage_io = HostDiskUsageIO() @classmethod def log_processor(cls): cls.log.set(type=cls.message['type'], timestamp=cls.message['timestamp'], host=cls.message['host'], message=cls.message['message'], full_message='' if cls.message['message'].__len__() < 255 else cls.message['message']) cls.log.create() pass @classmethod def guest_event_processor(cls): cls.guest.uuid = cls.message['message']['uuid'] cls.guest.get_by('uuid') cls.guest.node_id = cls.message['node_id'] last_status = cls.guest.status cls.guest.status = cls.message['type'] if cls.message['type'] == GuestState.update.value: # 更新事件不改变 Guest 的状态 cls.guest.status = last_status cls.guest.xml = cls.message['message']['xml'] elif cls.guest.status == GuestState.migrating.value: try: cls.guest_migrate_info.uuid = cls.guest.uuid cls.guest_migrate_info.get_by('uuid') cls.guest_migrate_info.type = cls.message['message']['migrating_info']['type'] cls.guest_migrate_info.time_elapsed = cls.message['message']['migrating_info']['time_elapsed'] cls.guest_migrate_info.time_remaining = cls.message['message']['migrating_info']['time_remaining'] cls.guest_migrate_info.data_total = cls.message['message']['migrating_info']['data_total'] cls.guest_migrate_info.data_processed = cls.message['message']['migrating_info']['data_processed'] cls.guest_migrate_info.data_remaining = cls.message['message']['migrating_info']['data_remaining'] cls.guest_migrate_info.mem_total = cls.message['message']['migrating_info']['mem_total'] cls.guest_migrate_info.mem_processed = cls.message['message']['migrating_info']['mem_processed'] cls.guest_migrate_info.mem_remaining = cls.message['message']['migrating_info']['mem_remaining'] cls.guest_migrate_info.file_total = cls.message['message']['migrating_info']['file_total'] cls.guest_migrate_info.file_processed = cls.message['message']['migrating_info']['file_processed'] cls.guest_migrate_info.file_remaining = cls.message['message']['migrating_info']['file_remaining'] cls.guest_migrate_info.update() except ji.PreviewingError as e: ret = json.loads(e.message) if ret['state']['code'] == '404': cls.guest_migrate_info.type = cls.message['message']['migrating_info']['type'] cls.guest_migrate_info.time_elapsed = cls.message['message']['migrating_info']['time_elapsed'] cls.guest_migrate_info.time_remaining = cls.message['message']['migrating_info']['time_remaining'] cls.guest_migrate_info.data_total = cls.message['message']['migrating_info']['data_total'] cls.guest_migrate_info.data_processed = cls.message['message']['migrating_info']['data_processed'] cls.guest_migrate_info.data_remaining = cls.message['message']['migrating_info']['data_remaining'] cls.guest_migrate_info.mem_total = cls.message['message']['migrating_info']['mem_total'] cls.guest_migrate_info.mem_processed = cls.message['message']['migrating_info']['mem_processed'] cls.guest_migrate_info.mem_remaining = cls.message['message']['migrating_info']['mem_remaining'] cls.guest_migrate_info.file_total = cls.message['message']['migrating_info']['file_total'] cls.guest_migrate_info.file_processed = cls.message['message']['migrating_info']['file_processed'] cls.guest_migrate_info.file_remaining = cls.message['message']['migrating_info']['file_remaining'] cls.guest_migrate_info.create() elif cls.guest.status == GuestState.creating.value: if cls.message['message']['progress'] <= cls.guest.progress: return cls.guest.progress = cls.message['message']['progress'] elif cls.guest.status == GuestState.snapshot_converting.value: cls.os_template_image.id = cls.message['message']['os_template_image_id'] cls.os_template_image.get() if cls.message['message']['progress'] <= cls.os_template_image.progress: return cls.os_template_image.progress = cls.message['message']['progress'] cls.os_template_image.update() return cls.guest.update() # 限定特殊情况下更新磁盘所属 Guest,避免迁移、创建时频繁被无意义的更新 if cls.guest.status in [GuestState.running.value, GuestState.shutoff.value]: cls.disk.update_by_filter({'node_id': cls.guest.node_id}, filter_str='guest_uuid:eq:' + cls.guest.uuid) @classmethod def host_event_processor(cls): key = cls.message['message']['node_id'] value = { 'hostname': cls.message['host'], 'cpu': cls.message['message']['cpu'], 'cpuinfo': cls.message['message'].get('cpuinfo'), 'system_load': cls.message['message']['system_load'], 'memory': cls.message['message']['memory'], 'memory_available': cls.message['message']['memory_available'], 'dmidecode': cls.message['message']['dmidecode'], 'interfaces': cls.message['message']['interfaces'], 'disks': cls.message['message']['disks'], 'boot_time': cls.message['message']['boot_time'], 'nonrandom': False, 'threads_status': cls.message['message']['threads_status'], 'version': cls.message['message']['version'], 'timestamp': ji.Common.ts() } db.r.hset(app_config['hosts_info'], key=key, value=json.dumps(value, ensure_ascii=False)) @classmethod def response_processor(cls): _object = cls.message['message']['_object'] action = cls.message['message']['action'] uuid = cls.message['message']['uuid'] state = cls.message['type'] data = cls.message['message']['data'] node_id = cls.message['node_id'] if _object == 'guest': if action == 'create': if state == ResponseState.success.value: # 系统盘的 UUID 与其 Guest 的 UUID 相同 cls.disk.uuid = uuid cls.disk.get_by('uuid') cls.disk.guest_uuid = uuid cls.disk.state = DiskState.mounted.value # disk_info['virtual-size'] 的单位为Byte,需要除以 1024 的 3 次方,换算成单位为 GB 的值 cls.disk.size = data['disk_info']['virtual-size'] / (1024 ** 3) cls.disk.update() cls.guest.uuid = uuid cls.guest.get_by('uuid') cls.guest.progress = 100 cls.guest.update() else: cls.guest.uuid = uuid cls.guest.get_by('uuid') cls.guest.status = GuestState.dirty.value cls.guest.update() elif action == 'migrate': pass elif action == 'sync_data': if state == ResponseState.success.value: cls.guest.uuid = uuid cls.guest.get_by('uuid') cls.guest.autostart = bool(data['autostart']) cls.guest.update() elif action == 'delete': if state == ResponseState.success.value: cls.config.get() cls.guest.uuid = uuid cls.guest.get_by('uuid') cls.guest.delete() # TODO: 加入是否删除使用的数据磁盘开关,如果为True,则顺便删除使用的磁盘。否则解除该磁盘被使用的状态。 cls.disk.uuid = uuid cls.disk.get_by('uuid') cls.disk.delete() cls.disk.update_by_filter({'guest_uuid': '', 'sequence': -1, 'state': DiskState.idle.value}, filter_str='guest_uuid:eq:' + cls.guest.uuid) SSHKeyGuestMapping.delete_by_filter(filter_str=':'.join(['guest_uuid', 'eq', cls.guest.uuid])) elif action == 'reset_password': if state == ResponseState.success.value: cls.guest.uuid = uuid cls.guest.get_by('uuid') cls.guest.password = cls.message['message']['passback_parameters']['password'] cls.guest.update() elif action == 'autostart': if state == ResponseState.success.value: cls.guest.uuid = uuid cls.guest.get_by('uuid') cls.guest.autostart = cls.message['message']['passback_parameters']['autostart'] cls.guest.update() elif action == 'attach_disk': cls.disk.uuid = cls.message['message']['passback_parameters']['disk_uuid'] cls.disk.get_by('uuid') if state == ResponseState.success.value: cls.disk.guest_uuid = uuid cls.disk.sequence = cls.message['message']['passback_parameters']['sequence'] cls.disk.state = DiskState.mounted.value cls.disk.update() elif action == 'detach_disk': cls.disk.uuid = cls.message['message']['passback_parameters']['disk_uuid'] cls.disk.get_by('uuid') if state == ResponseState.success.value: cls.disk.guest_uuid = '' cls.disk.sequence = -1 cls.disk.state = DiskState.idle.value cls.disk.update() elif action == 'boot': if state == ResponseState.success.value: pass elif action == 'allocate_bandwidth': if state == ResponseState.success.value: cls.guest.uuid = uuid cls.guest.get_by('uuid') cls.guest.bandwidth = cls.message['message']['passback_parameters']['bandwidth'] cls.guest.update() elif action == 'adjust_ability': if state == ResponseState.success.value: cls.guest.uuid = uuid cls.guest.get_by('uuid') cls.guest.cpu = cls.message['message']['passback_parameters']['cpu'] cls.guest.memory = cls.message['message']['passback_parameters']['memory'] cls.guest.update() elif _object == 'disk': if action == 'create': cls.disk.uuid = uuid cls.disk.get_by('uuid') cls.disk.node_id = node_id if state == ResponseState.success.value: cls.disk.state = DiskState.idle.value else: cls.disk.state = DiskState.dirty.value cls.disk.update() elif action == 'resize': if state == ResponseState.success.value: cls.config.get() cls.disk.uuid = uuid cls.disk.get_by('uuid') cls.disk.size = cls.message['message']['passback_parameters']['size'] cls.disk.quota(config=cls.config) cls.disk.update() elif action == 'delete': cls.disk.uuid = uuid cls.disk.get_by('uuid') cls.disk.delete() elif _object == 'snapshot': if action == 'create': cls.snapshot.id = cls.message['message']['passback_parameters']['id'] cls.snapshot.get() if state == ResponseState.success.value: cls.snapshot.snapshot_id = data['snapshot_id'] cls.snapshot.parent_id = data['parent_id'] cls.snapshot.xml = data['xml'] cls.snapshot.progress = 100 cls.snapshot.update() disks, _ = Disk.get_by_filter(filter_str='guest_uuid:eq:' + cls.snapshot.guest_uuid) for disk in disks: cls.snapshot_disk_mapping.snapshot_id = cls.snapshot.snapshot_id cls.snapshot_disk_mapping.disk_uuid = disk['uuid'] cls.snapshot_disk_mapping.create() else: cls.snapshot.progress = 255 cls.snapshot.update() if action == 'delete': if state == ResponseState.success.value: cls.snapshot.id = cls.message['message']['passback_parameters']['id'] cls.snapshot.get() # 更新子快照的 parent_id 为,当前快照的 parent_id。因为当前快照已被删除。 Snapshot.update_by_filter({'parent_id': cls.snapshot.parent_id}, filter_str='parent_id:eq:' + cls.snapshot.snapshot_id) SnapshotDiskMapping.delete_by_filter( filter_str=':'.join(['snapshot_id', 'eq', cls.snapshot.snapshot_id])) cls.snapshot.delete() else: pass if action == 'revert': # 不论恢复成功与否,都使快照恢复至正常状态。 cls.snapshot.id = cls.message['message']['passback_parameters']['id'] cls.snapshot.get() cls.snapshot.progress = 100 cls.snapshot.update() if action == 'convert': cls.snapshot.snapshot_id = cls.message['message']['passback_parameters']['id'] cls.snapshot.get_by('snapshot_id') cls.snapshot.progress = 100 cls.snapshot.update() cls.os_template_image.id = cls.message['message']['passback_parameters']['os_template_image_id'] cls.os_template_image.get() if state == ResponseState.success.value: cls.os_template_image.progress = 100 else: cls.os_template_image.progress = 255 cls.os_template_image.update() elif _object == 'os_template_image': if action == 'delete': cls.os_template_image.id = cls.message['message']['passback_parameters']['id'] cls.os_template_image.get() if state == ResponseState.success.value: cls.os_template_image.delete() else: pass else: pass @classmethod def guest_collection_performance_processor(cls): data_kind = cls.message['type'] timestamp = ji.Common.ts() timestamp -= (timestamp % 60) data = cls.message['message']['data'] if data_kind == GuestCollectionPerformanceDataKind.cpu_memory.value: for item in data: cls.guest_cpu_memory.guest_uuid = item['guest_uuid'] cls.guest_cpu_memory.cpu_load = item['cpu_load'] cls.guest_cpu_memory.memory_available = item['memory_available'] cls.guest_cpu_memory.memory_rate = item['memory_rate'] cls.guest_cpu_memory.timestamp = timestamp cls.guest_cpu_memory.create() if data_kind == GuestCollectionPerformanceDataKind.traffic.value: for item in data: cls.guest_traffic.guest_uuid = item['guest_uuid'] cls.guest_traffic.name = item['name'] cls.guest_traffic.rx_bytes = item['rx_bytes'] cls.guest_traffic.rx_packets = item['rx_packets'] cls.guest_traffic.rx_errs = item['rx_errs'] cls.guest_traffic.rx_drop = item['rx_drop'] cls.guest_traffic.tx_bytes = item['tx_bytes'] cls.guest_traffic.tx_packets = item['tx_packets'] cls.guest_traffic.tx_errs = item['tx_errs'] cls.guest_traffic.tx_drop = item['tx_drop'] cls.guest_traffic.timestamp = timestamp cls.guest_traffic.create() if data_kind == GuestCollectionPerformanceDataKind.disk_io.value: for item in data: cls.guest_disk_io.disk_uuid = item['disk_uuid'] cls.guest_disk_io.rd_req = item['rd_req'] cls.guest_disk_io.rd_bytes = item['rd_bytes'] cls.guest_disk_io.wr_req = item['wr_req'] cls.guest_disk_io.wr_bytes = item['wr_bytes'] cls.guest_disk_io.timestamp = timestamp cls.guest_disk_io.create() else: pass @classmethod def host_collection_performance_processor(cls): data_kind = cls.message['type'] timestamp = ji.Common.ts() timestamp -= (timestamp % 60) data = cls.message['message']['data'] if data_kind == HostCollectionPerformanceDataKind.cpu_memory.value: cls.host_cpu_memory.node_id = data['node_id'] cls.host_cpu_memory.cpu_load = data['cpu_load'] cls.host_cpu_memory.memory_available = data['memory_available'] cls.host_cpu_memory.timestamp = timestamp cls.host_cpu_memory.create() if data_kind == HostCollectionPerformanceDataKind.traffic.value: for item in data: cls.host_traffic.node_id = item['node_id'] cls.host_traffic.name = item['name'] cls.host_traffic.rx_bytes = item['rx_bytes'] cls.host_traffic.rx_packets = item['rx_packets'] cls.host_traffic.rx_errs = item['rx_errs'] cls.host_traffic.rx_drop = item['rx_drop'] cls.host_traffic.tx_bytes = item['tx_bytes'] cls.host_traffic.tx_packets = item['tx_packets'] cls.host_traffic.tx_errs = item['tx_errs'] cls.host_traffic.tx_drop = item['tx_drop'] cls.host_traffic.timestamp = timestamp cls.host_traffic.create() if data_kind == HostCollectionPerformanceDataKind.disk_usage_io.value: for item in data: cls.host_disk_usage_io.node_id = item['node_id'] cls.host_disk_usage_io.mountpoint = item['mountpoint'] cls.host_disk_usage_io.used = item['used'] cls.host_disk_usage_io.rd_req = item['rd_req'] cls.host_disk_usage_io.rd_bytes = item['rd_bytes'] cls.host_disk_usage_io.wr_req = item['wr_req'] cls.host_disk_usage_io.wr_bytes = item['wr_bytes'] cls.host_disk_usage_io.timestamp = timestamp cls.host_disk_usage_io.create() else: pass @classmethod def launch(cls): logger.info(msg='Thread EventProcessor is launched.') while True: if Utils.exit_flag: msg = 'Thread EventProcessor say bye-bye' print msg logger.info(msg=msg) return try: report = db.r.lpop(app_config['upstream_queue']) if report is None: time.sleep(1) continue cls.message = json.loads(report) if cls.message['kind'] == EmitKind.log.value: cls.log_processor() elif cls.message['kind'] == EmitKind.guest_event.value: cls.guest_event_processor() elif cls.message['kind'] == EmitKind.host_event.value: cls.host_event_processor() elif cls.message['kind'] == EmitKind.response.value: cls.response_processor() elif cls.message['kind'] == EmitKind.guest_collection_performance.value: cls.guest_collection_performance_processor() elif cls.message['kind'] == EmitKind.host_collection_performance.value: cls.host_collection_performance_processor() else: pass except AttributeError as e: logger.error(traceback.format_exc()) time.sleep(1) if db.r is None: db.init_conn_redis() except Exception as e: logger.error(traceback.format_exc()) time.sleep(1)
def r_convert_to_os_template_image(snapshot_id, disk_uuid): args_rules = [ Rules.SNAPSHOT_ID.value, Rules.DISK_UUID.value, Rules.LABEL.value ] try: ret = dict() ret['state'] = ji.Common.exchange_state(20000) ji.Check.previewing( args_rules, { 'snapshot_id': snapshot_id, 'disk_uuid': disk_uuid, 'label': request.json.get('label') }) rows, _ = SnapshotDiskMapping.get_by_filter( filter_str=':'.join(['snapshot_id', 'eq', snapshot_id])) disks_uuid = list() for row in rows: disks_uuid.append(row['disk_uuid']) if disk_uuid not in disks_uuid: ret['state'] = ji.Common.exchange_state(40401) ret['state']['sub']['zh-cn'] = ''.join([ ret['state']['sub']['zh-cn'], u': 未在快照: ', snapshot_id, u' 中找到磁盘:', disk_uuid ]) return ret config = Config() config.id = 1 config.get() snapshot = Snapshot() os_template_image = OSTemplateImage() guest = Guest() disk = Disk() snapshot.snapshot_id = snapshot_id snapshot.get_by('snapshot_id') snapshot.progress = 252 guest.uuid = snapshot.guest_uuid guest.get_by('uuid') disk.uuid = disk_uuid disk.get_by('uuid') os_template_image.id = guest.os_template_image_id os_template_image.get() image_name = '_'.join([snapshot.snapshot_id, disk.uuid ]) + '.' + disk.format os_template_image.id = 0 os_template_image.label = request.json.get('label') os_template_image.path = '/'.join( [os.path.dirname(os_template_image.path), image_name]) os_template_image.kind = OSTemplateImageKind.custom.value os_template_image.progress = 0 os_template_image.create_time = ji.Common.tus() if os_template_image.exist_by('path'): ret['state'] = ji.Common.exchange_state(40901) ret['state']['sub']['zh-cn'] = ''.join( [ret['state']['sub']['zh-cn'], ': ', os_template_image.path]) return ret os_template_image.create() os_template_image.get_by('path') message = { '_object': 'snapshot', 'action': 'convert', 'uuid': disk.guest_uuid, 'snapshot_id': snapshot.snapshot_id, 'storage_mode': config.storage_mode, 'dfs_volume': config.dfs_volume, 'node_id': disk.node_id, 'snapshot_path': disk.path, 'template_path': os_template_image.path, 'os_template_image_id': os_template_image.id, 'passback_parameters': { 'id': snapshot.snapshot_id, 'os_template_image_id': os_template_image.id } } Utils.emit_instruction(message=json.dumps(message, ensure_ascii=False)) snapshot.update() return ret except ji.PreviewingError, e: return json.loads(e.message)
def r_update(): config = Config() args_rules = [] if 'jimv_edition' in request.json: args_rules.append(Rules.JIMV_EDITION.value, ) if 'storage_mode' in request.json: args_rules.append(Rules.STORAGE_MODE.value, ) if 'dfs_volume' in request.json: args_rules.append(Rules.DFS_VOLUME.value, ) if 'storage_path' in request.json: args_rules.append(Rules.STORAGE_PATH.value, ) if 'vm_network' in request.json: args_rules.append(Rules.VM_NETWORK.value, ) if 'vm_manage_network' in request.json: args_rules.append(Rules.VM_MANAGE_NETWORK.value, ) if 'start_ip' in request.json: args_rules.append(Rules.START_IP.value, ) if 'end_ip' in request.json: args_rules.append(Rules.END_IP.value, ) if 'start_vnc_port' in request.json: args_rules.append(Rules.START_VNC_PORT.value, ) if 'netmask' in request.json: args_rules.append(Rules.NETMASK.value, ) if 'gateway' in request.json: args_rules.append(Rules.GATEWAY.value, ) if 'dns1' in request.json: args_rules.append(Rules.DNS1.value, ) if 'dns2' in request.json: args_rules.append(Rules.DNS2.value, ) if args_rules.__len__() < 1: ret = dict() ret['state'] = ji.Common.exchange_state(20000) return ret try: config.id = 1 ji.Check.previewing(args_rules, request.json) config.get() config.jimv_edition = int( request.json.get('jimv_edition', config.jimv_edition)) config.storage_mode = int( request.json.get('storage_mode', config.storage_mode)) config.dfs_volume = request.json.get('dfs_volume', config.dfs_volume) config.storage_path = request.json.get('storage_path', config.storage_path) config.vm_network = request.json.get('vm_network', config.vm_network) config.vm_manage_network = request.json.get('vm_manage_network', config.vm_manage_network) config.start_ip = request.json.get('start_ip', config.start_ip) config.end_ip = request.json.get('end_ip', config.end_ip) config.start_vnc_port = int( request.json.get('start_vnc_port', config.start_vnc_port)) config.netmask = request.json.get('netmask', config.netmask) config.gateway = request.json.get('gateway', config.gateway) config.dns1 = request.json.get('dns1', config.dns1) config.dns2 = request.json.get('dns2', config.dns2) config.check_ip() config.generate_available_ip2set() config.generate_available_vnc_port() config.update() config.update_global_config() config.get() ret = dict() ret['state'] = ji.Common.exchange_state(20000) ret['data'] = config.__dict__ return ret except ji.PreviewingError, e: return json.loads(e.message)
def r_create(): args_rules = [ Rules.JIMV_EDITION.value, Rules.STORAGE_MODE.value, Rules.DFS_VOLUME.value, Rules.STORAGE_PATH.value, Rules.VM_NETWORK.value, Rules.VM_MANAGE_NETWORK.value, Rules.START_IP.value, Rules.END_IP.value, Rules.START_VNC_PORT.value, Rules.NETMASK.value, Rules.GATEWAY.value, Rules.DNS1.value, Rules.DNS2.value, Rules.IOPS_BASE.value, Rules.IOPS_PRE_UNIT.value, Rules.IOPS_CAP.value, Rules.IOPS_MAX.value, Rules.IOPS_MAX_LENGTH.value, Rules.BPS_BASE.value, Rules.BPS_PRE_UNIT.value, Rules.BPS_CAP.value, Rules.BPS_MAX.value, Rules.BPS_MAX_LENGTH.value ] config = Config() config.id = 1 config.jimv_edition = int(request.json.get('jimv_edition', 0)) config.storage_mode = int(request.json.get('storage_mode', 0)) config.dfs_volume = request.json.get('dfs_volume', '') config.storage_path = request.json.get('storage_path') config.vm_network = request.json.get('vm_network') config.vm_manage_network = request.json.get('vm_manage_network') config.start_ip = request.json.get('start_ip') config.end_ip = request.json.get('end_ip') config.start_vnc_port = int(request.json.get('start_vnc_port', 15900)) config.netmask = request.json.get('netmask') config.gateway = request.json.get('gateway') config.dns1 = request.json.get('dns1') config.dns2 = request.json.get('dns2') config.iops_base = int(request.json.get('iops_base', 1000)) config.iops_pre_unit = int(request.json.get('iops_pre_unit', 1)) config.iops_cap = int(request.json.get('iops_cap', 2000)) config.iops_max = int(request.json.get('iops_max', 3000)) config.iops_max_length = int(request.json.get('iops_max_length', 20)) # 200 MiB config.bps_base = int(request.json.get('bps_base', 1024 * 1024 * 200)) # 0.3 MiB config.bps_pre_unit = int( request.json.get('bps_pre_unit', 1024 * 1024 * 0.3)) # 500 MiB config.bps_cap = int(request.json.get('bps_cap', 1024 * 1024 * 500)) # 1 GiB config.bps_max = int(request.json.get('bps_max', 1024 * 1024 * 1024)) config.bps_max_length = int(request.json.get('bps_max_length', 10)) try: ji.Check.previewing(args_rules, config.__dict__) ret = dict() ret['state'] = ji.Common.exchange_state(20000) if config.exist(): ret['state'] = ji.Common.exchange_state(40901) return ret config.check_ip() config.generate_available_ip2set() config.generate_available_vnc_port() config.create() config.update_global_config() config.id = 1 config.get() ret['data'] = config.__dict__ return ret except ji.PreviewingError, e: return json.loads(e.message)