def notify_backup_sync_file(master_ip, backup_ip, paths, check_path=None): """ 通知备控同步指定文件 :param backup_ip: 主控节点ip :param backup_ip: 备控节点ip :param paths: 需要同步的文件路径列表 :param check_path: 如果提供此参数,则节点会检查该路径下所有不在'paths'中的文件并删除 :return: """ bind = SERVER_CONF.addresses.get_by_default('server_bind', '') if bind: port = bind.split(':')[-1] else: port = constants.SERVER_DEFAULT_PORT endpoint = "http://%s:%s" % (master_ip, port) command_data = { "command": "ha_sync_file", "handler": "NodeHandler", "data": { "paths": paths, "check_path": check_path, "endpoint": endpoint, "url": constants.HA_SYNC_URL, } } logger.info("start check sync at node %s , check_path: %s, paths: %s", backup_ip, check_path, paths) rep_json = compute_post(backup_ip, command_data, timeout=600) logger.info( "finish check sync at node %s, rep_json: %s, check_path: %s, paths: %s", backup_ip, rep_json, check_path, paths) return rep_json
def check_concurrency(self, redis, host_ip, terminal_id, ram): try: info = {"node": host_ip, "ram": ram} redis.rds.hset(constants.PARALELL_QUEUE, terminal_id, value=json.dumps(info)) count = 0 ram_sum = 0 for value in redis.rds.hvals(constants.PARALELL_QUEUE): value = json.loads(value) if value['node'] == host_ip: count += 1 ram_sum += value['ram'] logger.info("terminal_id:%s, queue count:%s, ram_sum:%s", terminal_id, count, ram_sum) if count > constants.PARALELL_NUM: logger.info("check concurrency raise exception") raise Exception("the parallel queue is reach maxed") command_data = { "command": "check_ram", "handler": "InstanceHandler", "data": { "allocated": ram_sum } } rep_json = compute_post(host_ip, command_data) if rep_json.get("code", -1) != 0: logger.error("check ram available in %s failed", host_ip) raise Exception("check ram available failed") if not rep_json.get('data', {}).get('result', True): logger.error("can not allocate memory") raise Exception("can not allocate memory") except Exception as e: redis.rds.hdel(constants.PARALELL_QUEUE, terminal_id) raise e
def _notify_backup_sync(self, master_ip, backup_ip, paths, voi_template_list=None, voi_ha_domain_info=None): bind = SERVER_CONF.addresses.get_by_default('server_bind', '') if bind: port = bind.split(':')[-1] else: port = constants.SERVER_DEFAULT_PORT endpoint = "http://%s:%s" % (master_ip, port) # 同步sql和授权文件 command_data = { "command": "ha_sync_voi", "handler": "NodeHandler", "data": { "url": constants.HA_SYNC_URL, "endpoint": endpoint, "paths": paths, "voi_template_list": voi_template_list, "voi_ha_domain_info": voi_ha_domain_info } } logging.info("sync the file %s to %s", ','.join(paths), backup_ip) rep_json = compute_post(backup_ip, command_data, timeout=600) if rep_json.get("code", -1) != 0: logging.error("_notify_backup_sync failed in compute_node: %s" % rep_json['msg']) raise EnableHaException("_notify_backup_sync")
def _notify_backup_start(self, backup_ip): # 通知备控79 _data = {"command": "start_backup", "handler": "HaHandler", "data": {}} rep_json = compute_post(backup_ip, _data) if rep_json.get("code", -1) != 0: logging.error("_notify_backup_start failed in compute_node: %s" % rep_json['msg']) raise EnableHaException("_notify_backup_start")
def reclaim_remote_storage(self, remote_storage_uuid): """ 回收远端存储 :param data: { } :return: """ if not remote_storage_uuid: return build_result("ParamError") remote_storage = db_api.get_remote_storage_by_key( 'uuid', remote_storage_uuid) if not remote_storage: logger.error( "remote storage: {} not exist".format(remote_storage_uuid)) return build_result("RemoteStorageNotExistError") if not remote_storage.allocated: logger.error( "the remote storage is not allocated, can not reclaim") return build_result("RemoteStorageNotAllocatedError") if remote_storage.role: logger.error( "the remote storage is used as vm storage, can not reclaim") return build_result("RemoteStorageUsedError") instance_dir = constants.NFS_MOUNT_POINT_PREFIX + remote_storage.name + '/instances' base_dir = constants.NFS_MOUNT_POINT_PREFIX + remote_storage.name + '/instances/_base' if os.path.exists(instance_dir) and len(os.listdir(instance_dir)) > 1: return build_result("RemoteStorageHasImage") if os.path.exists(base_dir) and len(os.listdir(base_dir)): return build_result("RemoteStorageHasImage") nodes = db_api.get_node_by_pool_uuid(remote_storage.allocated_to) for node in nodes: # 底层umount nfs存储 _data = { "command": "umount_nfs", "handler": "NfsHandler", "data": { "name": remote_storage.name, } } rep_json = compute_post(node['ip'], _data) ret_code = rep_json.get("code", -1) if ret_code != 0: logger.error("mount nfs failed:%s", rep_json['msg']) return build_result("UmountNfsError", host=node['ip']) storages = db_api.get_node_storage_by_path( constants.NFS_MOUNT_POINT_PREFIX + remote_storage.name) for storage in storages: storage.soft_delete() db_api.update_remote_storage(remote_storage, { 'allocated_to': None, 'allocated': 0 }) return build_result("Success")
def _delete_network(self, ipaddr, network_uuid, vlan_id): command_data = { "command": "delete", "handler": "NetworkHandler", "data": { "network_id": network_uuid, "vlan_id": int(vlan_id) if vlan_id else '' } } logger.info("delete network %s in node %s", network_uuid, ipaddr) rep_json = compute_post(ipaddr, command_data) if rep_json.get("code", -1) != 0: logger.error("delete network failed, node:%s, error:%s", ipaddr, rep_json['data']) raise Exception("delete network failed")
def _notify_new_vip_host_check(self, new_vip_host_ip, vip): # 通知备控79 _data = { "command": "check_vip", "handler": "HaHandler", "data": { "vip": vip } } rep_json = compute_post(new_vip_host_ip, _data) if rep_json.get("code", -1) != 0: logging.error( "_notify_new_vip_host_check failed in compute_node: %s" % rep_json['msg']) raise SwitchHaMasterException("_notify_new_vip_host_check")
def notify_backup_sync_voi(master_ip, backup_ip, paths, voi_template_list=None): """ 通知备控同步VOI模板的磁盘文件 :param master_ip: :param backup_ip: :param paths: :param voi_template_list: :return: """ try: bind = SERVER_CONF.addresses.get_by_default('server_bind', '') if bind: port = bind.split(':')[-1] else: port = constants.SERVER_DEFAULT_PORT endpoint = "http://%s:%s" % (master_ip, port) command_data = { "command": "ha_sync_voi", "handler": "NodeHandler", "data": { "url": constants.HA_SYNC_URL, "endpoint": endpoint, "paths": paths, "voi_template_list": voi_template_list } } logger.info("start to sync the file %s to backup node %s", ','.join(paths), backup_ip) rep_json = compute_post(backup_ip, command_data, timeout=600) if rep_json.get("code", -1) != 0: logger.error("sync the file %s to backup node %s failed: %s", ','.join(paths), backup_ip, rep_json["msg"]) else: logger.info("sync the file %s to backup node %s success: %s", ','.join(paths), backup_ip) except Exception as e: logger.exception("%s", str(e), exc_info=True)
def _notify_backup_config(self, vip, netmask, sensitivity, quorum_ip, master_ip, backup_ip, backup_nic): # 通知备控123468 _data = { "command": "config_backup", "handler": "HaHandler", "data": { "vip": vip, "netmask": netmask, "sensitivity": sensitivity, "quorum_ip": quorum_ip, "master_ip": master_ip, "backup_ip": backup_ip, "backup_nic": backup_nic } } rep_json = compute_post(backup_ip, _data) if rep_json.get("code", -1) != 0: logging.error("_notify_backup_config failed in compute_node: %s" % rep_json['msg']) raise EnableHaException("_notify_backup_config") return rep_json
def _disable_backup(self, backup_ip, paths, voi_template_list=None, voi_xlms=None): # 备控1234 _data = { "command": "disable_backup", "handler": "HaHandler", "data": { "paths": paths, "voi_template_list": voi_template_list, "voi_xlms": voi_xlms } } rep_json = compute_post(backup_ip, _data) ret_code = rep_json.get("code", -1) if ret_code != 0: logging.error("disable_backup failed in compute_node: %s" % rep_json['msg']) # raise DisableHaException("disable_backup failed in compute_node: %s" % rep_json['msg']) logging.info("disable_backup in compute_node %s success" % backup_ip)
def allocate_remote_storage(self, remote_storage_uuid, resource_pool_uuid): """ 分配远端存储 :param data: { "uuid": "resource_pool_uuid", } :return: """ if not (remote_storage_uuid and resource_pool_uuid): return build_result("ParamError") remote_storage = db_api.get_remote_storage_by_key( 'uuid', remote_storage_uuid) if not remote_storage: logger.error( "remote storage: {} not exist".format(remote_storage_uuid)) return build_result("RemoteStorageNotExistError") if remote_storage.allocated: logger.error( "the remote storage is already allocated, can not allocated") return build_result("RemoteStorageAlreadyAllocatedError") nodes = db_api.get_node_by_pool_uuid(resource_pool_uuid) remote_storage_list = list() for node in nodes: # 底层挂载nfs存储 _data = { "command": "mount_nfs", "handler": "NfsHandler", "data": { "nfs_server": remote_storage.server, "name": remote_storage.name, } } rep_json = compute_post(node['ip'], _data) ret_code = rep_json.get("code", -1) if ret_code != 0: logger.error("mount nfs failed:%s", rep_json['msg']) return build_result("MountNfsError", host=node['ip']) if 'data' not in rep_json: logger.error("mount nfs failed: unexpected error") return build_result("MountNfsError", host=node['ip']) storage_uuid = create_uuid() info = { 'uuid': storage_uuid, 'node_uuid': node.uuid, 'path': constants.NFS_MOUNT_POINT_PREFIX + remote_storage.name, 'role': '', 'type': 2, # 1:本地存储 2:远端存储 'total': rep_json['data'][2], 'free': rep_json['data'][1], 'used': rep_json['data'][0] } remote_storage_list.append(info) logger.info("allocate remote storage success") db_api.insert_with_many(models.YzyNodeStorages, remote_storage_list) remote_storage_info = { 'allocated_to': resource_pool_uuid, 'allocated': 1, 'total': rep_json['data'][2], 'free': rep_json['data'][1], 'used': rep_json['data'][0] } db_api.update_remote_storage(remote_storage, remote_storage_info) return build_result("Success")
def init_network(self, data): """ :param data: { "ip": "172.16.1.49", "network_name": "default", "switch_name": "default", "switch_type": "vlan", "vlan_id": 10, "subnet_info": { "name": "default", "start_ip": "172.16.1.10", "end_ip": "172.16.1.20", "netmask": "255.255.0.0", "gateway": "172.16.1.254", "dns1": "8.8.8.8", "dns2": "" } "uplink": { "node_uuid": "", "nic_uuid": "", "interface": "ens224" } } :return: """ logger.info("check params") if not self._check_params(data): return build_result("ParamError") vs = db_api.get_virtual_switch_by_name(data['switch_name']) if vs: return build_result("VSwitchExistError", name=data['switch_name']) network = db_api.get_network_by_name(data['network_name']) if network: return build_result("NetworkNameRepeatError", name=data['network_name']) if constants.VLAN_NETWORK_TYPE == data['switch_type']: vlan_id = data.get('vlan_id', '') if not check_vlan_id(str(vlan_id)): return build_result("VlanIDError", vid=vlan_id) else: vlan_id = None subnet = db_api.get_subnet_by_name(data['subnet_info']['name']) if subnet: return build_result("SubnetNameRepeatError", name=data['subnet_info']['name']) try: self.check_subnet_params(data['subnet_info']) except Exception as e: return build_result("SubnetInfoError", e.__str__(), name=data['name']) # add default switch vs_uuid = create_uuid() switch_value = { "uuid": vs_uuid, "name": data['switch_name'], "type": data['switch_type'], "default": 1 } uplink_value = { "vs_uuid": vs_uuid, "node_uuid": data['uplink']['node_uuid'], "nic_uuid": data['uplink']['nic_uuid'] } # add default network network_uuid = create_uuid() network_value = { "uuid": network_uuid, "name": data['network_name'], "switch_name": data['switch_name'], "switch_uuid": vs_uuid, "switch_type": data['switch_type'], "vlan_id": vlan_id, "default": 1 } _data = { "command": "create", "handler": "NetworkHandler", "data": { "network_id": network_uuid, "network_type": data['switch_type'], "physical_interface": data['uplink']['interface'], "vlan_id": vlan_id } } rep_json = compute_post(data['ip'], _data) ret_code = rep_json.get("code", -1) if ret_code != 0: logger.error("create network failed:%s", rep_json['data']) return build_result("NetworkCreateFail") logger.info("create network success") # add subnet data['subnet_info']['network_uuid'] = network_uuid subnet_value = self._generate_subnet_info(data['subnet_info']) try: db_api.add_virtual_swtich(switch_value) db_api.add_virtual_switch_uplink(uplink_value) db_api.add_network(network_value) db_api.add_subnet(subnet_value) logger.info("init network success") except Exception as e: logger.error("init network failed:%s", e, exc_info=True) return build_result("NetworkInitFail") return build_result("Success")
def update_share_disk(self, data): """ 更新共享数据盘 { "uuid": "xxxxxxxxxx", "enable": 0, "disk_size": 8, "restore": 1, "share_desktop": [ {"uuid": "xxxxxxx", "name": "xxxxx", "choice": 0}, {"uuid": "xxxxxxx", "name": "xxxxx", "choice": 0} ] } :param data: :return: """ """ { "command": "create_share", "handler": "VoiHandler", "data": { "disk_info": { 'uuid': '2f110de8-78d8-11ea-ad5d-000c29e84b9c', 'base_path': '/opt/slow/instances' } "version": 0 } } """ logger.info("terminal share disk update data: {}".format(data)) try: # disk_uuid = create_uuid() version = 0 disk_uuid = data["uuid"] sys_base, data_base = self._get_template_storage() share_disk = db_api.get_item_with_first( models.YzyVoiTerminalShareDisk, {"uuid": disk_uuid}) disk_info = dict() disk_info["uuid"] = share_disk.uuid disk_info["base_path"] = sys_base['path'] disk_info["size"] = data["disk_size"] # 判断是否大小有更新 if data["disk_size"] != share_disk.disk_size: # 需要重新删除创建 # pass node = db_api.get_controller_node() delete_command = { "command": "delete_share", "handler": "VoiHandler", "data": { "disk_info": { "uuid": share_disk.uuid, "base_path": sys_base['path'], }, "version": version } } ret = compute_post(node.ip, delete_command) if ret.get("code", -1) != 0: logger.error( "terminal share disk update fail, delete old fail") return build_result("ShareDiskUpdateFail") # 创建新的容量盘 command_data = { "command": "create_share", "handler": "VoiHandler", "data": { "disk_info": disk_info, "version": version } } ret_json = compute_post(node.ip, command_data) if ret_json.get("code", -1) != 0: logger.error( "terminal share disk update fail, create new fail") return build_result("ShareDiskUpdateFail") share_disk.disk_size = data["disk_size"] share_disk.version += 1 # todo 维护桌面组的绑定关系 # import pdb; pdb.set_trace() desktops = db_api.get_item_with_all( models.YzyVoiDesktop, {"group_uuid": share_disk.group_uuid}) desktop_binds = db_api.get_item_with_all( models.YzyVoiShareToDesktops, {"disk_uuid": disk_uuid}) share_desktops = data["share_desktop"] copy_share_desktops = share_desktops[:] for desktop in share_desktops: # is_exist = False for bind in desktop_binds: if desktop["uuid"] == bind.desktop_uuid: # is_exist = True if not int(desktop.get("choice", 0)): bind.soft_delete() copy_share_desktops.remove(desktop) insert_binds = list() if copy_share_desktops: for desktop in copy_share_desktops: if desktop["choice"]: for _d in desktops: if desktop["uuid"] == _d.uuid: insert_binds.append({ "uuid": create_uuid(), "group_uuid": share_disk.group_uuid, "disk_uuid": disk_uuid, "desktop_uuid": desktop["uuid"], "desktop_name": desktop["name"] }) # 更新数据库绑定记录 if insert_binds: db_api.insert_with_many(models.YzyVoiShareToDesktops, insert_binds) # 更新数据库记录 share_disk.restore = data["restore"] share_disk.enable = data["enable"] share_disk.soft_update() # todo 生成bt种子 # 生成种子文件 task = Thread(target=self.create_share_disk_torrent, args=(disk_info, version)) task.start() logger.info( "update terminal voi share disk data: {} success".format( share_disk)) return build_result("Success") except Exception as e: logger.error("", exc_info=True) return build_result("OtherError")
def sync_base(self, ipaddr, server_ip, image_id, image_path, host_uuid=None, md5_sum=None, version=0): """节点同步镜像""" task = Task(image_id=image_id, host_uuid=host_uuid, version=version) uuid = create_uuid() task_id = create_uuid() # 添加任务信息记录 task_data = { "uuid": uuid, "task_uuid": task_id, "name": constants.NAME_TYPE_MAP[2], "status": constants.TASK_RUNNING, "type": 2 } db_api.create_task_info(task_data) task.begin(task_id, "start sync the image to host:%s" % ipaddr) image = { "image_id": image_id, "disk_file": image_path, "backing_file": image_path, "dest_path": image_path, "md5_sum": md5_sum } bind = SERVER_CONF.addresses.get_by_default('server_bind', '') if bind: port = bind.split(':')[-1] else: port = constants.SERVER_DEFAULT_PORT endpoint = "http://%s:%s" % (server_ip, port) command_data = { "command": "sync", "handler": "TemplateHandler", "data": { "image_version": 0, "task_id": task_id, "endpoint": endpoint, "url": constants.IMAGE_SYNC_URL, "image": image } } rep_json = compute_post(ipaddr, command_data, timeout=600) task_obj = db_api.get_task_info_first({"uuid": uuid}) if rep_json.get('code') != 0: logger.info("sync the image to host:%s failed:%s", ipaddr, rep_json['data']) task.error( task_id, "sync the image to host:%s failed:%s" % (ipaddr, rep_json['data'])) task_obj.update({"status": constants.TASK_ERROR}) task_obj.soft_update() else: logger.info("sync the base to host:%s success", ipaddr) task.end(task_id, "sync the image to host:%s success" % ipaddr) task_obj.update({"status": constants.TASK_COMPLETE}) task_obj.soft_update() # 如果同步失败,考虑添加数据库记录 return rep_json
def update_template_info(): templates = db_api.get_template_with_all({}) voi_templates = db_api.get_item_with_all(models.YzyVoiTemplate, {}) rep_data = dict() for item in templates: host_ip = item.host.ip _d = {"uuid": item.uuid, "name": item.name} if host_ip not in rep_data: rep_data[host_ip] = list() rep_data[host_ip].append(_d) for item in voi_templates: host_ip = item.host.ip _d = {"uuid": item.uuid, "name": item.name} if host_ip not in rep_data: rep_data[host_ip] = list() rep_data[host_ip].append(_d) for k, v in rep_data.items(): command_data = { "command": "get_status_many", "handler": "InstanceHandler", "data": { "instance": v } } logger.debug("get template state in node %s", k) rep_json = compute_post(k, command_data) logger.debug("from compute get template rep_json:{}".format(rep_json)) if rep_json.get("code", -1) != 0: continue for template in templates: for item in rep_json.get("data", []): if item["uuid"] == template.uuid: if template.status in [ constants.STATUS_ACTIVE, constants.STATUS_INACTIVE ]: if 1 == item.get("state"): status = constants.STATUS_ACTIVE else: status = constants.STATUS_INACTIVE if template.status != status: logger.info( "the template %s status change from %s to %s", template.name, template.status, status) template.status = status template.soft_update() break for template in voi_templates: for item in rep_json.get("data", []): if item["uuid"] == template.uuid: if template.status in [ constants.STATUS_ACTIVE, constants.STATUS_INACTIVE ]: if 1 == item.get("state"): status = constants.STATUS_ACTIVE else: status = constants.STATUS_INACTIVE if template.status != status: logger.info( "the template %s status change from %s to %s", template.name, template.status, status) template.status = status template.soft_update() break
def delete_images(self, data): """ 删除基础镜像,支持批量操作 1、先删除各个节点的镜像,再删除主控的基础镜像 2、批量操作,未删除成功的继续删除下一个 { "resource_uuid" : "xxxx-xxxxx", "uuids": [ "xxxxxxxxxxx", "111111111111" ] } :param data: :return: """ pool_uuid = data.get('pool_uuid') pool = db_api.get_resource_pool_by_key("uuid", pool_uuid) if not pool: logger.error("resource pool: %s not exist", pool_uuid) return build_result("ResourcePoolNotExist") nodes = db_api.get_node_by_pool_uuid(pool_uuid) success_num = 0 fail_num = 0 image_uuids = data.get("uuids", []) for uuid in image_uuids: try: image = db_api.get_image_with_first({"uuid": uuid}) if not image: logger.error("delete base image fail: %s not exist" % uuid) fail_num += 1 continue # 判断是否被引用 instances = db_api.get_devices_with_all({"image_id": uuid}) if instances: logger.error("delete base image fail: %s is use" % uuid) fail_num += 1 continue # 从各个节点上删除对应的基础镜像 command_data = { "command": "delete_base", "handler": "TemplateHandler", "data": { "image": { "disk_file": image['path'] } } } for node in nodes: node_ip = node.ip rep_json = compute_post(node_ip, command_data) if rep_json.get("code", -1) != 0: fail_num += 1 logger.error( "delete base image fail: node %s image %s delete error", node.uuid, uuid) break # 删除主控节点的基础镜像 # image_path = os.path.join(base_path, uuid) image_path = image.path logger.info("delete main contraller base image: %s" % image_path) if os.path.exists(image_path): os.remove(image_path) image.soft_delete() success_num += 1 logger.info("delete base image success: image %s", uuid) except Exception as e: logger.error("delete base image exception: image %s" % uuid, exc_info=True) fail_num += 1 if success_num > 0: ext_msg = " 成功: %d个, 失败: %d个" % (success_num, fail_num) return build_result("Success", ext_msg=ext_msg) # 一个成功的执行都没有 return build_result("ResourceImageDelFail")
def update_instance_info(): instances = db_api.get_instance_with_all({}) rep_data = dict() instance_dict = dict() for instance in instances: instance_dict[instance.uuid] = instance host_ip = instance.host.ip _d = { "uuid": instance.uuid, "name": instance.name # "spice_port": spice_port } if host_ip not in rep_data: rep_data[host_ip] = list() rep_data[host_ip].append(_d) # link_num = 0 for k, v in rep_data.items(): command_data = { "command": "get_status_many", "handler": "InstanceHandler", "data": { "instance": v } } logger.debug("get instance state in node %s", k) rep_json = compute_post(k, command_data) logger.debug("from compute get rep_json:{}".format(rep_json)) if rep_json.get("code", -1) != 0: # 如果节点计算服务连接失败,则桌面都更新为关机状态 if rep_json.get("code", -1) == 80000: for _d in v: if instance_dict[ _d["uuid"]].status != constants.STATUS_INACTIVE: instance_dict[_d["uuid"]].update( {"status": constants.STATUS_INACTIVE}) logger.info( "compute service unavaiable at node: %s, update instance.status to inactive: %s", k, _d["uuid"]) continue for item in rep_json.get("data", []): for instance in instances: if item["uuid"] == instance.uuid: if item.get("state") in [ constants.DOMAIN_STATE['running'] ]: if constants.STATUS_INACTIVE == instance.status: instance.status = constants.STATUS_ACTIVE instance.soft_update() elif item.get('state') in [ constants.DOMAIN_STATE['shutdown'], constants.DOMAIN_STATE['shutoff'] ]: if constants.STATUS_ACTIVE == instance.status: instance.status = constants.STATUS_INACTIVE # instance.spice_port = '' # instance.spice_link = 0 # instance.allocated = 0 # instance.link_time = None # 通知终端管理 桌面关闭 # 只对绑定了终端的桌面发通知 if instance.terminal_mac: if instance.classify == 2: desktop = db_api.get_personal_desktop_with_first( {'uuid': instance.desktop_uuid}) else: desktop = db_api.get_desktop_by_uuid( desktop_uuid=instance.desktop_uuid) if desktop: data = { 'desktop_name': desktop.name, 'desktop_order': desktop.order_num, 'desktop_uuid': desktop.uuid, 'instance_uuid': instance.uuid, 'instance_name': instance.name, 'host_ip': instance.host.ip, 'port': instance.spice_port, 'token': instance.spice_token, 'os_type': desktop.os_type, 'terminal_mac': instance.terminal_mac } logger.info( 'rtn: instance.classify: %s, data: %s' % (instance.classify, data)) base_controller = BaseController() ret = base_controller.notice_terminal_instance_close( data) # 通知完成后,清除桌面与终端的绑定关系 if ret: try: instance.terminal_mac = None except Exception as e: logger.error( "update instance.terminal_mac to None: %s failed: %s", instance.uuid, e) logger.info( 'rtn: %s, desktop.uuid: %s, instance.terminal_mac: %s' % (ret, desktop.uuid, instance.terminal_mac)) instance.soft_update() else: pass # instance.soft_update() logger.debug("the instance %s state %s", instance.uuid, item.get('state', 0)) break spice_ports = list() for instance in instances: if instance.spice_port: spice_ports.append(instance.spice_port) # 查询监控服务端口 ports = ",".join(list(set(spice_ports))) if ports: ports_status = monitor_post(k, "/api/v1/monitor/port_status", {"ports": ports}) else: ports_status = {} logger.info("from node %s get port status:%s", k, ports_status) for instance in instances: if instance.host.ip == k and instance.spice_port: instance.spice_link = ports_status.get("data", {}).get( instance.spice_port, False) if not instance.spice_link: instance.allocated = 0 instance.soft_update() logger.debug("the instance %s spice_link:%s", instance.uuid, instance.spice_link)
def create_share_disk(self, data): """ 创建共享数据盘 { "group_uuid": "xxxxxx" "disk_size": 5, # 共享盘大小 "enable" : 0, # 是否启用 "restore": 0 # 还原与不还原 } :param data: :return: """ """ { "command": "create_share", "handler": "VoiHandler", "data": { "disk_info": { 'uuid': '2f110de8-78d8-11ea-ad5d-000c29e84b9c', 'base_path': '/opt/slow/instances' } "version": 0 } } """ logger.info("terminal share disk create data: {}".format(data)) try: # disk_uuid = create_uuid() version = 0 node = db_api.get_controller_node() sys_base, data_base = self._get_template_storage() disk_info = dict() disk_info["uuid"] = create_uuid() disk_info["base_path"] = sys_base['path'] disk_info["size"] = data["disk_size"] command_data = { "command": "create_share", "handler": "VoiHandler", "data": { "disk_info": disk_info, "version": version } } logger.info("create share disk %s", disk_info) rep_json = compute_post(node.ip, command_data, timeout=600) if rep_json.get("code", -1) != 0: logger.error("create voi share disk:%s failed, error:%s", disk_info, rep_json.get('data')) # message = rep_json['data'] if rep_json.get('data', None) else rep_json['msg'] return jsonify(rep_json) # 记录数据库 share_disk = { "group_uuid": data["group_uuid"], "uuid": disk_info["uuid"], "disk_size": data["disk_size"], "enable": data["enable"], "restore": data["restore"] } db_api.create_voi_terminal_share(share_disk) # todo 生成bt种子 # 生成种子文件 task = Thread(target=self.create_share_disk_torrent, args=(disk_info, version)) task.start() logger.info( "create terminal voi share disk data: {} success".format( share_disk)) return build_result("Success", {"disk": share_disk}) except Exception as e: logger.error("", exc_info=True) return build_result("OtherError")