def esx_del_tmp_folder(dest_host, vmware_vm): r_user = ANSIABLE_REMOTE_USER r_pass = decrypt(KVMHOST_LOGIN_PASS) b_user = OPENSTACK_DEV_USER b_pass = decrypt(KVMHOST_SU_PASS) dest_dir = '/app/tmp/' + vmware_vm comand_rm = 'cd ' + dest_dir + ';rm -f ' + vmware_vm + '*;rm -f *.xml' folder_rm = ansible_run_shell(dest_host, comand_rm) if 'contacted' not in folder_rm: msg = "连接目标kvmhost失败" return False, msg elif folder_rm['contacted'] == {}: msg = "连接目标kvmhost失败" return False, msg elif 'failed' in folder_rm['contacted'][dest_host]: msg = "删除vm文件失败" return False, msg else: command_rmdir = 'rmdir ' + dest_dir dir_rm = ansible_run_shell(dest_host, command_rmdir) if dir_rm['contacted'] == '{}': msg = "连接目标kvmhost失败" return False, msg else: msg = "目标路径已删除" return True, msg
def get_vm_data_disk(dest_dir, kvmhost): remote_user = ANSIABLE_REMOTE_USER remote_pass = decrypt(KVMHOST_LOGIN_PASS) become_user = OPENSTACK_DEV_USER become_pass = decrypt(KVMHOST_SU_PASS) get_vm_data_list = 'cd ' + dest_dir + ";ls |grep -e '-sd'|grep -v .img|grep -v .xml|wc -l" getvmdatadisk = ansible_run_shell(kvmhost, get_vm_data_list) if 'contacted' not in getvmdatadisk: message = '连接目标host失败' logging.info(message) return False, message elif getvmdatadisk['contacted'] == {}: message = '连接目标host失败' logging.info(message) return False, message elif 'failed' in getvmdatadisk['contacted'][kvmhost]: message = '获取vm数据盘清单失败' logging.info(message) return False, message elif getvmdatadisk['contacted'][kvmhost]['stderr']: message = '获取vm数据盘清单失败' logging.info(message) return False, message else: message = "获取vm磁盘文件清单成功" logging.info(message) return True, getvmdatadisk['contacted'][kvmhost]['stdout']
def del_vm(dest_host, vmname): command1 = 'virsh destroy ' + vmname command = 'virsh undefine ' + vmname r_user = ANSIABLE_REMOTE_USER r_pass = decrypt(KVMHOST_LOGIN_PASS) b_user = OPENSTACK_DEV_USER b_pass = decrypt(KVMHOST_SU_PASS) res1 = ansible_run_shell(dest_host, command1) if res1['contacted'] == {}: msg = "连接目标kvmhost失败" return False, msg else: time.sleep(3) delvm_res = ansible_run_shell(dest_host, command) if 'contacted' not in delvm_res: msg = "连接目标kvmhost失败" return False, msg elif delvm_res['contacted'] == {}: msg = "连接目标kvmhost失败" return False, msg elif 'failed' in delvm_res['contacted'][dest_host]: msg = "删除目标vm失败" return False, msg else: msg = "目标VM已删除" return True, msg
def esx_retry_del_vm_folder(dest_host, dest_dir, vm_name, vmware_vm): if dest_dir == '/app/image/' or dest_dir == '/app/image': msg = "目标VM路径无效" return False, msg else: command_check = 'ls ' + dest_dir r_user = ANSIABLE_REMOTE_USER r_pass = decrypt(KVMHOST_LOGIN_PASS) b_user = OPENSTACK_DEV_USER b_pass = decrypt(KVMHOST_SU_PASS) folder_check = ansible_run_shell(dest_host, command_check) if 'contacted' not in folder_check: msg = "连接目标kvmhost失败" return False, msg elif folder_check['contacted'] == {}: msg = "连接目标kvmhost失败" return False, msg elif 'failed' in folder_check['contacted'][dest_host]: msg = "vm存储路径获取失败" return False, msg else: comand_rm = 'cd ' + dest_dir + ';rm -f ' + vm_name + '*;rm -f *.xml;rm -f ' + vmware_vm + '*' folder_rm = ansible_run_shell(dest_host, comand_rm) if folder_rm['contacted'] == {}: msg = "连接目标kvmhost失败" return False, msg elif 'failed' in folder_rm['contacted'][dest_host]: msg = "删除vm文件失败" return False, msg else: msg = "目标路径已删除" return True, msg
def check_host_os_ver(host_ip): os_ver = HOST_OS_VER command1 = 'cat /etc/redhat-release |grep %s' % os_ver r_user = ANSIABLE_REMOTE_USER r_pass = decrypt(ANSIABLE_REMOTE_PWD) b_user = ANSIABLE_REMOTE_SU_USER b_pass = decrypt(ANSIABLE_REMOTE_SU_PWD) host = host_ip c1 = ansible_run_shell(host, command1) if 'contacted' not in c1: message = '连接目标HOST %s 失败' % host_ip logging.error(message) return False, message elif c1['contacted'] == {}: message = '连接目标kvm host失败' logging.error(message) return False, message elif 'failed' in c1['contacted'][host_ip]: message = '获取HOST %s OS版本失败' % host_ip logging.error(message) return False, message elif c1['contacted'][host_ip]['stderr']: message = '获取HOST %s OS版本失败' % host_ip logging.error(message) return False, message elif c1['contacted'][host_ip]['stdout'] == '': message = 'HOST %s OS版本异常,非指定的 %s 版本,请检查' % (host_ip, os_ver) logging.info(message) return False, message else: message = 'HOST %s OS版本检查通过' % host_ip logging.info(message) return True, message
def check_bond_connection(host_ip, vlan_id): command1 = '/usr/sbin/ip a|grep br_bond0.' + vlan_id r_user = ANSIABLE_REMOTE_USER r_pass = decrypt(ANSIABLE_REMOTE_PWD) b_user = ANSIABLE_REMOTE_SU_USER b_pass = decrypt(ANSIABLE_REMOTE_SU_PWD) host = host_ip c1 = ansible_run_shell(host, command1) if 'contacted' not in c1: message = '连接目标HOST %s 失败' % (host_ip) logging.error(message) return False, False, message elif c1['contacted'] == {}: message = '连接目标kvm host失败' logging.error(message) return False, False, message elif 'failed' in c1['contacted'][host_ip]: message = '获取HOST %s 网桥信息出错' % (host_ip) logging.error(message) return False, False, message elif c1['contacted'][host_ip]['stderr']: message = '获取HOST %s 网桥信息出错' % (host_ip) logging.error(message) return False, False, message elif c1['contacted'][host_ip]['stdout'] == '': message = 'HOST %s 上主网所在网桥未创建' % (host_ip) logging.info(message) return False, True, message else: message = '获取HOST上主网网桥成功' logging.info(message) return True, True, message
def del_vm_folder_file(dest_dir, kvmhost, vmware_vm): remote_user = ANSIABLE_REMOTE_USER remote_pass = decrypt(KVMHOST_LOGIN_PASS) become_user = OPENSTACK_DEV_USER become_pass = decrypt(KVMHOST_SU_PASS) del_vm_com = 'cd ' + dest_dir + ';rm -f ' + vmware_vm + '-sd*;rm -f ' + vmware_vm + '.xml' delvmfolderfile = ansible_run_shell(kvmhost, del_vm_com) if 'contacted' not in delvmfolderfile: message = '连接目标host失败' logging.info(message) return False, message elif delvmfolderfile['contacted'] == {}: message = '连接目标host失败' logging.info(message) return False, message elif 'failed' in delvmfolderfile['contacted'][kvmhost]: message = '删除' logging.info(message) return False, message elif delvmfolderfile['contacted'][kvmhost]['stderr']: message = '重命名vm数据盘失败' logging.info(message) return False, message else: message = '重命名vm数据盘成功' logging.info(message) return True, message
def vm_data_disk_rename(datadisk, dest_dir, tag, vm_name, kvmhost): remote_user = ANSIABLE_REMOTE_USER remote_pass = decrypt(KVMHOST_LOGIN_PASS) become_user = OPENSTACK_DEV_USER become_pass = decrypt(KVMHOST_SU_PASS) mv_command = 'cd ' + dest_dir + ';cp -f ' + datadisk + ' ' + vm_name + '_disk' + str( tag) mvcommand = ansible_run_shell(kvmhost, mv_command) if 'contacted' not in mvcommand: message = '连接目标host失败' logging.info(message) return 1, message elif mvcommand['contacted'] == {}: message = '连接目标host失败' logging.info(message) return 1, message elif 'failed' in mvcommand['contacted'][kvmhost]: message = '重命名vm数据盘失败' logging.info(message) return 1, message elif mvcommand['contacted'][kvmhost]['stderr']: message = '重命名vm数据盘失败' logging.info(message) return 1, message else: message = '重命名vm数据盘成功' logging.info(message) datadisk = vm_name + '_disk' + str(tag) return 0, datadisk
def vm_start2(kvmhost, vmname, request_id, modulename, vm_uuid): command = 'virsh start ' + vmname remote_user = ANSIABLE_REMOTE_USER remote_pass = decrypt(KVMHOST_LOGIN_PASS) become_user = OPENSTACK_DEV_USER become_pass = decrypt(KVMHOST_SU_PASS) vmstart = ansible_run_shell(kvmhost, command) if 'contacted' not in vmstart: message = 'vm启动失败' threadlock = threading.Lock() threadlock.acquire() v2v_op.updata_v2v_message(request_id, message) v2v_op.update_v2v_actions(request_id, ActionStatus.FAILD) v2v_op.updata_v2v_ontask(request_id, '0') in_a_s.update_instance_actions(request_id, esx_v2vActions.VM_START2, ActionStatus.FAILD, message) threadlock.release() elif vmstart['contacted'] == {}: message = 'vm启动失败' threadlock = threading.Lock() threadlock.acquire() v2v_op.updata_v2v_message(request_id, message) v2v_op.update_v2v_actions(request_id, ActionStatus.FAILD) v2v_op.updata_v2v_ontask(request_id, '0') in_a_s.update_instance_actions(request_id, esx_v2vActions.VM_START2, ActionStatus.FAILD, message) threadlock.release() elif 'failed' in vmstart['contacted'][kvmhost]: message = 'vm启动失败' threadlock = threading.Lock() threadlock.acquire() v2v_op.updata_v2v_message(request_id, message) v2v_op.update_v2v_actions(request_id, ActionStatus.FAILD) v2v_op.updata_v2v_ontask(request_id, '0') in_a_s.update_instance_actions(request_id, esx_v2vActions.VM_START2, ActionStatus.FAILD, message) threadlock.release() elif vmstart['contacted'][kvmhost]['stdout'] == '': message = 'vm启动失败' threadlock = threading.Lock() threadlock.acquire() v2v_op.updata_v2v_message(request_id, message) v2v_op.update_v2v_actions(request_id, ActionStatus.FAILD) v2v_op.updata_v2v_ontask(request_id, '0') in_a_s.update_instance_actions(request_id, esx_v2vActions.VM_START2, ActionStatus.FAILD, message) threadlock.release() else: message = 'vm启动成功' threadlock = threading.Lock() threadlock.acquire() v2v_op.updata_v2v_message(request_id, message) in_a_s.update_instance_actions(request_id, esx_v2vActions.VM_START2, ActionStatus.SUCCSESS, message) v2v_op.updata_v2v_ontask(request_id, '0') v2v_op.update_v2v_step(request_id, esx_v2vActions.VM_START2) threadlock.release()
def createdir(kvmhost, request_id, modulename, dir): command = 'mkdir -p ' + dir remote_user = ANSIABLE_REMOTE_USER remote_pass = decrypt(KVMHOST_LOGIN_PASS) become_user = OPENSTACK_DEV_USER become_pass = decrypt(KVMHOST_SU_PASS) dir_result = ansible_run_shell(kvmhost, command) if 'contacted' not in dir_result: message = '创建vm目录失败' threadlock = threading.Lock() threadlock.acquire() v2v_op.updata_v2v_message(request_id, message) v2v_op.update_v2v_actions(request_id, ActionStatus.FAILD) v2v_op.updata_v2v_ontask(request_id, '0') in_a_s.update_instance_actions(request_id, esx_v2vActions.CREATE_DEST_DIR, ActionStatus.FAILD, message) threadlock.release() elif dir_result['contacted'] == {}: message = '创建vm目录失败' threadlock = threading.Lock() threadlock.acquire() v2v_op.updata_v2v_message(request_id, message) v2v_op.update_v2v_actions(request_id, ActionStatus.FAILD) v2v_op.updata_v2v_ontask(request_id, '0') in_a_s.update_instance_actions(request_id, esx_v2vActions.CREATE_DEST_DIR, ActionStatus.FAILD, message) threadlock.release() elif 'failed' in dir_result['contacted'][kvmhost]: message = '创建vm目录失败' threadlock = threading.Lock() threadlock.acquire() v2v_op.updata_v2v_message(request_id, message) v2v_op.update_v2v_actions(request_id, ActionStatus.FAILD) v2v_op.updata_v2v_ontask(request_id, '0') in_a_s.update_instance_actions(request_id, esx_v2vActions.CREATE_DEST_DIR, ActionStatus.FAILD, message) threadlock.release() else: data = dir message = '创建vm目录成功' threadlock = threading.Lock() threadlock.acquire() v2v_op.updata_v2v_message(request_id, message) in_a_s.update_instance_actions(request_id, esx_v2vActions.CREATE_DEST_DIR, ActionStatus.SUCCSESS, message) v2v_op.update_v2v_step(request_id, esx_v2vActions.CREATE_DEST_DIR) v2v_op.updata_v2v_ontask(request_id, '0') v2v_op.update_v2v_destdir(request_id, data) threadlock.release()
def virt_v2v(dest_dir, kvmhost, request_id, vmware_vm): virtv2v_command = 'cd '+ dest_dir +';virt-v2v -i libvirtxml /app/tmp/'+\ vmware_vm + '/'+vmware_vm+'.xml -o local -os ' + dest_dir +' -of qcow2 --network bond0' remote_user = ANSIABLE_REMOTE_USER remote_pass = decrypt(KVMHOST_LOGIN_PASS) become_user = OPENSTACK_DEV_USER become_pass = decrypt(KVMHOST_SU_PASS) virtv2v = ansible_run_shell(kvmhost, virtv2v_command) if 'contacted' not in virtv2v: message = '无法连接kvmhost' threadlock = threading.Lock() threadlock.acquire() v2v_op.updata_v2v_message(request_id, message) v2v_op.update_v2v_actions(request_id, ActionStatus.FAILD) v2v_op.updata_v2v_ontask(request_id, '0') in_a_s.update_instance_actions(request_id, esx_v2vActions.VIRT_V2V_FILES, ActionStatus.FAILD, message) threadlock.release() elif virtv2v['contacted'] == {}: message = '无法连接kvmhost' threadlock = threading.Lock() threadlock.acquire() v2v_op.updata_v2v_message(request_id, message) v2v_op.update_v2v_actions(request_id, ActionStatus.FAILD) v2v_op.updata_v2v_ontask(request_id, '0') in_a_s.update_instance_actions(request_id, esx_v2vActions.VIRT_V2V_FILES, ActionStatus.FAILD, message) threadlock.release() elif 'error' in virtv2v['contacted'][kvmhost]['stderr']: message = '转化vm文件失败' threadlock = threading.Lock() threadlock.acquire() v2v_op.updata_v2v_message(request_id, message) v2v_op.update_v2v_actions(request_id, ActionStatus.FAILD) v2v_op.updata_v2v_ontask(request_id, '0') in_a_s.update_instance_actions(request_id, esx_v2vActions.VIRT_V2V_FILES, ActionStatus.FAILD, message) threadlock.release() else: message = '转化vm文件成功' threadlock = threading.Lock() threadlock.acquire() v2v_op.updata_v2v_message(request_id, message) in_a_s.update_instance_actions(request_id, esx_v2vActions.VIRT_V2V_FILES, ActionStatus.SUCCSESS, message) v2v_op.update_v2v_step(request_id, esx_v2vActions.VIRT_V2V_FILES) v2v_op.updata_v2v_ontask(request_id, '0') threadlock.release()
def ch_sys_disk_name(dest_dir, kvmhost, vmware_vm, vm_name, request_id): remote_user = ANSIABLE_REMOTE_USER remote_pass = decrypt(KVMHOST_LOGIN_PASS) become_user = OPENSTACK_DEV_USER become_pass = decrypt(KVMHOST_SU_PASS) change_sysdisk_name = 'cd ' + dest_dir + ';mv ' + vmware_vm + '-sda ' + vm_name + '.img' chsysdiskname = ansible_run_shell(kvmhost, change_sysdisk_name) if 'contacted' not in chsysdiskname: message = '无法连接kvmhost' threadlock = threading.Lock() threadlock.acquire() v2v_op.updata_v2v_message(request_id, message) v2v_op.update_v2v_actions(request_id, ActionStatus.FAILD) v2v_op.updata_v2v_ontask(request_id, '0') in_a_s.update_instance_actions(request_id, esx_v2vActions.VM_SYS_DISK_STD, ActionStatus.FAILD, message) threadlock.release() elif chsysdiskname['contacted'] == {}: message = '无法连接kvmhost' threadlock = threading.Lock() threadlock.acquire() v2v_op.updata_v2v_message(request_id, message) v2v_op.update_v2v_actions(request_id, ActionStatus.FAILD) v2v_op.updata_v2v_ontask(request_id, '0') in_a_s.update_instance_actions(request_id, esx_v2vActions.VM_SYS_DISK_STD, ActionStatus.FAILD, message) threadlock.release() elif 'error' in chsysdiskname['contacted'][kvmhost]['stderr']: message = '标准化vm系统盘文件失败' threadlock = threading.Lock() threadlock.acquire() v2v_op.updata_v2v_message(request_id, message) v2v_op.update_v2v_actions(request_id, ActionStatus.FAILD) v2v_op.updata_v2v_ontask(request_id, '0') in_a_s.update_instance_actions(request_id, esx_v2vActions.VM_SYS_DISK_STD, ActionStatus.FAILD, message) threadlock.release() else: message = '标准化vm系统盘文件成功' threadlock = threading.Lock() threadlock.acquire() v2v_op.updata_v2v_message(request_id, message) in_a_s.update_instance_actions(request_id, esx_v2vActions.VM_SYS_DISK_STD, ActionStatus.SUCCSESS, message) v2v_op.update_v2v_step(request_id, esx_v2vActions.VM_SYS_DISK_STD) v2v_op.updata_v2v_ontask(request_id, '0') threadlock.release()
def rmrawfile(kvmhost, vmware_vm, request_id): remote_user = ANSIABLE_REMOTE_USER remote_pass = decrypt(KVMHOST_LOGIN_PASS) become_user = OPENSTACK_DEV_USER become_pass = decrypt(KVMHOST_SU_PASS) rm_raw_file = 'cd /app/tmp/' + vmware_vm + '/;rm -f ' + vmware_vm + '*;rmdir /app/tmp/' + vmware_vm rmrawfile = ansible_run_shell(kvmhost, rm_raw_file) if 'contacted' not in rmrawfile: message = '无法连接kvmhost' threadlock = threading.Lock() threadlock.acquire() v2v_op.updata_v2v_message(request_id, message) v2v_op.update_v2v_actions(request_id, ActionStatus.FAILD) v2v_op.updata_v2v_ontask(request_id, '0') in_a_s.update_instance_actions(request_id, esx_v2vActions.DELETE_TMP_FILE, ActionStatus.FAILD, message) threadlock.release() elif rmrawfile['contacted'] == {}: message = '无法连接kvmhost' threadlock = threading.Lock() threadlock.acquire() v2v_op.updata_v2v_message(request_id, message) v2v_op.update_v2v_actions(request_id, ActionStatus.FAILD) v2v_op.updata_v2v_ontask(request_id, '0') in_a_s.update_instance_actions(request_id, esx_v2vActions.DELETE_TMP_FILE, ActionStatus.FAILD, message) threadlock.release() elif 'error' in rmrawfile['contacted'][kvmhost]['stderr']: message = '删除转化临时文件失败' threadlock = threading.Lock() threadlock.acquire() v2v_op.updata_v2v_message(request_id, message) v2v_op.update_v2v_actions(request_id, ActionStatus.FAILD) v2v_op.updata_v2v_ontask(request_id, '0') in_a_s.update_instance_actions(request_id, esx_v2vActions.DELETE_TMP_FILE, ActionStatus.FAILD, message) threadlock.release() else: message = '删除转化临时文件完成' threadlock = threading.Lock() threadlock.acquire() v2v_op.updata_v2v_message(request_id, message) in_a_s.update_instance_actions(request_id, esx_v2vActions.DELETE_TMP_FILE, ActionStatus.SUCCSESS, message) v2v_op.update_v2v_step(request_id, esx_v2vActions.DELETE_TMP_FILE) v2v_op.updata_v2v_ontask(request_id, '0') threadlock.release()
def __init__(self, db_flag_name, **kwargs): ''' 构造函数 :param db_flag_name: :param kwargs: ''' params_init_db_pool = { 'creator': MySQLdb, # 'mincached' : kwargs.get("mincached", 5), # 'maxcached' : kwargs.get("maxcached", 10), 'maxconnections': kwargs.get("maxconnections", 50), 'user': kwargs.get("user"), 'passwd': encrypt_helper.decrypt(kwargs.get("passwd")), 'host': kwargs.get("host", '127.0.0.1'), 'port': kwargs.get("port", 3306), 'charset': kwargs.get("charset", 'utf8'), 'cursorclass': DictCursor, 'db': kwargs.get("database_name"), 'setsession': kwargs.get("setsession"), 'reset': False, 'connect_timeout': 60 * 2, } self.db_pool = PooledDB(**params_init_db_pool) self.fetch_many_size = 20000
def server_status(manage_ip, sn): ''' 物理机状态查询 :param manage_ip: :param sn: :return: ''' command = "ipmitool -I lanplus -H %s -U %s -P %s power status" % \ (manage_ip, default.SERVER_USER, encrypt_helper.decrypt(default.SERVER_PWD) + sn) result = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) # 设置超时规则,每250ms去获取返回结果,结果为空或者查询未超过1s,继续等待250ms timeout = 1 poll_seconds = .250 deadline = time.time() + timeout while time.time() < deadline and result.poll() == None: time.sleep(poll_seconds) if result.poll() == None: result.terminate() return 2 ret = result.stdout.readline().split('\n')[0] if ret == 'Error: Unable to establish IPMI v2 / RMCP+ session': return 1 elif ret == 'Chassis Power is on' or ret == 'Chassis Power is off': return ret else: return 2
def _pool_delete(remote_ip): command1 = 'virsh pool-destroy image' command2 = 'virsh pool-undefine image' r_user = ANSIABLE_REMOTE_USER r_pass = decrypt(ANSIABLE_REMOTE_PWD) b_user = ANSIABLE_REMOTE_SU_USER b_pass = decrypt(ANSIABLE_REMOTE_SU_PWD) host = remote_ip c1 = ansible_run_shell(host, command1) if c1['contacted'] == {}: return False else: c2 = ansible_run_shell(host, command2) if c2['contacted'] == {}: return False else: return True
def host_perform_multithreading(host_ip, host_ostype): user = GET_HOST_PERFORMANCE_USER passwd = decrypt(GET_HOST_PERFORMANCE_PWD) port = 22 try: # 从远端拷贝文件到本地 t = paramiko.Transport((host_ip, port)) t.connect(username=user, password=passwd) sftp = paramiko.SFTPClient.from_transport(t) remotepath = '/tmp/' + host_ip localpath = '/app/hostinfo/' + host_ip sftp.get(remotepath, localpath) t.close() logging.info("host " + host_ip + " performance file get success") except Exception: logging.error("host " + host_ip + " connection error") update_host_table_time(host_ip) return try: # 将取回来性能数据入库 ftime = os.path.getmtime("/app/hostinfo/%s" % host_ip) current_time = time.time() if (current_time - ftime) > 300: # 主机状态文件更新时间大于5分钟认为过期,不更新数据库信息 pass else: with open("/app/hostinfo/%s" % host_ip, 'r') as f: data = json.load(f) if data['collect_time']: collect_timestamp = time.mktime(time.strptime(data['collect_time'],'%Y-%m-%d %H:%M:%S')) if get_timestamp() - collect_timestamp > 300: return else: return # ret_msg = _msg_format(host_ip=host_ip, host_name=data['hostname'], cpu_core=data['cpu_core'], # mem_size=data['mem_size'], disk_size=data['disk_size'], net_size=data['net_size'], # current_cpu_used=data['current_cpu_used'], # current_mem_used=data['current_mem_used'], # current_disk_used=data['current_disk_used'], # week_cpu_p95_used=data['week_cpu_p95_used'], # week_mem_p95_used=data['week_mem_p95_used'], # current_net_rx_used=data['current_net_rx_used'], # current_net_tx_used=data['current_net_tx_used'], # start_time=data['start_time'], # libvirt_port=data['libvirt_port'], # libvirt_status=data['libvirt_status'], # images=data['images']) # 更新host表 update_host_table(host_ip, data['ostype']) # flush_host_perform_data_to_db(datadict=ret_msg) except: # 文件物理机信息不存在,不更新数据库信息 update_host_table_time(host_ip) pass return
def esx_del_vm_folder(dest_host, dest_dir, vm_ip, vmuuid, vmware_vm): if dest_dir == '/app/image/' or dest_dir == '/app/image': msg = "目标VM路径无效" return False, msg else: command_check = 'ls ' + dest_dir r_user = ANSIABLE_REMOTE_USER r_pass = decrypt(KVMHOST_LOGIN_PASS) b_user = OPENSTACK_DEV_USER b_pass = decrypt(KVMHOST_SU_PASS) folder_check = ansible_run_shell(dest_host, command_check) if 'contacted' not in folder_check: msg = "连接目标kvmhost失败" return False, msg elif folder_check['contacted'] == {}: msg = "连接目标kvmhost失败" return False, msg elif 'failed' in folder_check['contacted'][dest_host]: msg = "vm存储路径获取失败" return False, msg else: comand_rm = 'cd ' + dest_dir + ';rm -f ' + vm_ip + '*;rm -f *.xml;rm -f ' + vmware_vm + '*' folder_rm = ansible_run_shell(dest_host, comand_rm) if folder_rm['contacted'] == {}: msg = "连接目标kvmhost失败" return False, msg elif 'failed' in folder_rm['contacted'][dest_host]: msg = "删除vm文件失败" return False, msg else: command_rmdir = 'rmdir ' + dest_dir dir_rm = ansible_run_shell(dest_host, command_rmdir) if dir_rm['contacted'] == {}: msg = "连接目标kvmhost失败" return False, msg elif 'failed' in dir_rm['contacted'][dest_host]: msg = "删除vm路径失败" return False, msg else: rm_pool = 'virsh pool-destroy ' + vmuuid + ";virsh pool-undefine " + vmuuid ansible_run_shell(dest_host, rm_pool) msg = "目标路径已删除" return True, msg
def ansible_run_playbook(play, host_list, params): # _init_log('host_std_playbook') logging.info('now start to playbook') pb = ansible.playbook.PlayBook(playbook=play, host_list=host_list, stats=stats, callbacks=playbook_cb, runner_callbacks=runner_cb, check=False, extra_vars=params, remote_user=ANSIABLE_REMOTE_USER, remote_pass=decrypt(ANSIABLE_REMOTE_PWD), become=True, become_method='su', become_user=ANSIABLE_REMOTE_SU_USER, become_pass=decrypt(ANSIABLE_REMOTE_SU_PWD)) result = pb.run() playbook_cb.on_stats(pb.stats) return result
def libvirt_get_connect(host, conn_type='create', vmname="", poolname=""): user = default.HOST_LIBVIRT_USER password = decrypt(default.HOST_LIBVIRT_PWD) type = default.HOST_LIBVIRT_LOGIN_TYPE try: if conn_type == 'create': conn = vrtCreate.wvmCreate(host, user, password, type) elif conn_type == 'storage': conn = vrtStorage.wvmStorage(host, user, password, type, poolname) elif conn_type == 'storages': conn = vrtStorage.wvmStorages(host, user, password, type) elif conn_type == 'instance': conn = vrtInstance.wvmInstance(host, user, password, type, vmname) elif conn_type == 'instances': conn = vrtInstance.wvmInstances(host, user, password, type) else: conn = vrtCreate.wvmCreate(host, user, password, type) except libvirtError as err: logging.error(err) return None return conn
def server_stop(manage_ip, sn): ''' 远程硬关机 :param manage_ip: :param sn: :return: 返回3,机器状态已经为操作预期状态;返回2,ipmi操作超时,请重新下发指令; 返回1,ipmi无法使用,请联系管理员查看;返回0,操作成功 ''' # 远程硬关机前先确定物理机状态, 关机状态不允许操作 host_status = server_status(manage_ip, sn) if host_status is 1 or host_status is 2: return 1 if host_status is 'Chassis Power is off': return 3 command = "ipmitool -I lanplus -H %s -U %s -P %s power off" % \ (manage_ip, default.SERVER_USER, encrypt_helper.decrypt(default.SERVER_PWD) + sn) result = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) # 设置超时规则,每250ms去获取返回结果,结果为空或者查询未超过1s,继续等待250ms timeout = 1 poll_seconds = .250 deadline = time.time() + timeout while time.time() < deadline and result.poll() == None: time.sleep(poll_seconds) if result.poll() == None: result.terminate() return 2 ret = result.stdout.readline().split('\n')[0] if ret == 'Error: Unable to establish IPMI v2 / RMCP+ session': return 1 elif ret == 'Chassis Power Control: Down/Off': return 0 else: return 2
def standard_host(manage_ip, hostpool_id): ''' 物理机标准化操作 :param manage_ip: :return: ''' url = HOST_STANDARD_DIR + '/host_std.yaml' host_list = [manage_ip] # 获取新增host所在网络区域的vlanlist hostpool_info = hp_s.HostPoolService().get_hostpool_info(hostpool_id) net_area_id = hostpool_info['net_area_id'] vlan_res, vlan_data = seg_s.SegmentService().get_area_segment_list( net_area_id) if not vlan_res: return False, vlan_data else: host_vlan_filter_dict = {} host_vlan_list = vlan_data # host_vlan_list_dupl = sorted(set(host_vlan_list), key=host_vlan_list.index) # 根据br_bond对网段进行分类 for _host_vlan in host_vlan_list: if _host_vlan['host_bridge_name'] in host_vlan_filter_dict.keys(): host_vlan_filter_dict[_host_vlan['host_bridge_name']].append( _host_vlan['vlan']) else: host_vlan_filter_dict[_host_vlan['host_bridge_name']] = [ _host_vlan['vlan'] ] # host_vlan_list_dupl = sorted(set(host_vlan_list), key=lambda i: i["host_bridge_name"]) # 循环调用playbook为不同bond新建网桥,每次调用完成后需要time.sleep(5) br_bond_create_shell_url = HOST_STANDARD_DIR + '/host_std_br_bond_create.yaml' for _bond_name, _vlan_list in host_vlan_filter_dict.items(): host_dict = { "srcdir": HOST_STANDARD_DIR, "host_vlan_list": _vlan_list, "br_bond": _bond_name.split('_')[1] } run_result, run_message = run_standard_host(br_bond_create_shell_url, manage_ip, host_dict) if not run_result: logging.info('物理机%s初始化新增内网vlan执行playbook失败,原因:%s' % (manage_ip, run_message)) return False, run_message time.sleep(2) time.sleep(2) # 构造host网桥检测入参内容 host_test_bridge_list = [] for vlan_info in vlan_data: bridge_name = vlan_info['host_bridge_name'] + '.' + vlan_info['vlan'] gateway_ip = vlan_info['gateway'] vlan = vlan_info['vlan'] host_test_bridge = { "bridge": bridge_name, "gateway": gateway_ip, "vlan": vlan } host_test_bridge_list.append(host_test_bridge) # # 循环调用playbook测试HOST上对指定网桥的访问是否正常 # bridge_test_shell_url = HOST_STANDARD_DIR + '/host_std_test_vlan.yaml' # host_dict = { # "test_bridge_info": host_test_bridge_list # } res, message = check_vlan_connection(manage_ip, host_test_bridge_list) if not res: return False, message logging.info('start to do host std playbook') # 获取host所在网络区域的yum源地址 datacenter_id = da_s.DataCenterService().get_dctype_by_net_area_id( net_area_id) if str(datacenter_id) in PRD_DC_TYPE: yum_server_addr = PRD_YUM_SERVER else: yum_server_addr = CS_YUM_SERVER # #构造vlan_list字符串传递给playbook作为入参 # vlan_list_str = " ".join(host_vlan_list) # vlan_str = "\'" + vlan_list_str + "\'" # print vlan_str playbook_url = HOST_STANDARD_DIR + '/host_std.yaml' host_dict = { "srcdir": HOST_STANDARD_DIR, "agentdir": HOST_AGENT_PACKAGE_COPY_DIR, "agentshelldir": HOST_AGENT_PACKAGE_INSTALL_SHELL_DIR, "libvirt_user_pwd": decrypt(HOST_LIBVIRT_PWD), "root_pwd": decrypt(ROOT_PWD), "yum_server_ip": yum_server_addr } run_result, run_message = run_standard_host(playbook_url, manage_ip, host_dict) if not run_result: return False, run_message logging.info('物理机%s初始化playbook执行成功' % manage_ip) # 创建池 pool_ret = _create_storage_pool(manage_ip) if not pool_ret: logging.info('host manage ip %s create pool fail when standard host', manage_ip) msg = "创建存储池失败" return False, msg # 创建clone池 pool_ret = _create_clone_pool(manage_ip) if not pool_ret: logging.info( 'host manage ip %s create clone pool fail when standard host', manage_ip) msg = "创建clone存储池失败" return False, msg # host 运行checklist ret_checklist, msg_checklist = host_std_checklist(manage_ip) if not ret_checklist: msg = msg_checklist logging.info(msg) return False, msg msg = "标准化主机成功" return True, msg
def msg_to_sfslb(task_idapi, job_status, detail, request_ip): msg = {'jobStepNodeId': task_idapi, 'status': job_status, 'detail': detail} data = json.dumps(msg) headers = {"Content-Type": "application/json;charset=UTF-8"} # 软负载不需要了,所有未更改 url = MSG_TO_SFSLB_URL try: r = requests.post(url, data=data, headers=headers, verify=False) # 设置超时规则,每1s去获取返回结果,结果为空或者查询未超过3s,继续等待1 timeout = 5 poll_seconds = 1 deadline = time.time() + timeout while time.time() < deadline and not r.status_code: time.sleep(poll_seconds) if not r.status_code: return str(ErrorCode.SYS_ERR) # logging.info("requests to /slb_instance/jobcallback data:{},status_code:{}".format(data, r.status_code)) if r.status_code == 401: auth_msg = { 'userId': SLB_AUTH_USER, 'passWord': decrypt(SLB_AUTH_PASSWD), 'opruserRole': 4 } try: r_auth = requests.post(MSG_TO_SFSLB_LOGIN_URL, params=auth_msg, headers=headers) # 设置超时规则,每1s去获取返回结果,结果为空或者查询未超过3s,继续等待1 timeout = 5 poll_seconds = 1 deadline = time.time() + timeout while time.time() < deadline and not r_auth.status_code: time.sleep(poll_seconds) if not r_auth.status_code: return str(ErrorCode.SYS_ERR) # 认证成功后再次请求sfslb接口推送虚拟机创建信息 try: r = requests.post(url, data=data, headers=headers, cookies=r_auth.cookies) # 设置超时规则,每1s去获取返回结果,结果为空或者查询未超过3s,继续等待1 timeout = 5 poll_seconds = 1 deadline = time.time() + timeout while time.time() < deadline and not r.status_code: time.sleep(poll_seconds) if not r.status_code: return str(ErrorCode.SYS_ERR) return str(r.status_code) except Exception as err: logging.error('post to sfslb again error because:%s' % err) return str(ErrorCode.SYS_ERR) except Exception as err: logging.error('post to vs error because:%s' % err) return str(ErrorCode.SYS_ERR) return str(r.status_code) except Exception as err: logging.error('post to sfslb error because:%s' % err) return str(ErrorCode.SYS_ERR)
def get_collect_request_multithreading(task_idapi, task_idkvm, vm_count, request_opr_user, request_api_origin): # 判断此工单是否有线程在追踪 try: params = { 'WHERE_AND': { '=': { 'taskid_kvm': task_idkvm, 'taskid_api': task_idapi, 'istraceing': '1' }, } } ret_traceing_nums, ret_traceing_data = request_r_s.RequestRecordService( ).request_db_query_data(**params) if ret_traceing_nums > 0: return # 标记工单为跟踪中 _update_data = { 'istraceing': '1', } _where_data = { 'taskid_kvm': task_idkvm, } ret = request_r_s.RequestRecordService().update_request_status( _update_data, _where_data) if ret <= 0: logging.error('update request %s status to db failed' % task_idkvm) ret_request_re = request_r_s.RequestRecordService( ).get_request_record_info_by_taskid_kvm(task_idkvm) request_ip = ret_request_re["request_ip"] succeed_http_code = ['200', '500'] instance_actions_succeed_params = { 'WHERE_AND': { '=': { 'task_id': task_idkvm, 'action': 'instance_inject_data', 'status': 1 }, }, } # instance_actions_failed_params = { # 'WHERE_AND': { # '=': { # 'task_id': task_idkvm, # 'status': 2 # }, # '!=': { # 'action': 'image_sync_status' # }, # }, # } instance_timeout_failed_params = { 'WHERE_AND': { '=': { 'task_id': task_idkvm, 'status': VMStatus.CREATE_ERROR }, }, } vm_succeed_count_db_ret, vm_succeed_data = instance_a_s.InstanceActionsServices( ).query_data(**instance_actions_succeed_params) # vm_failed_count_db_ret, vm_failed_data = # instance_a_s.InstanceActionsServices().query_data(**instance_actions_failed_params) vm_createtimeout_count_db_ret, vm_createtimeout_failed_data = instance_s.InstanceService( ).query_data(**instance_timeout_failed_params) if not vm_succeed_count_db_ret: vm_succeed_count_db_ret = 0 if not vm_createtimeout_count_db_ret: vm_createtimeout_count_db_ret = 0 instances_uuid = [] if vm_succeed_count_db_ret > 0: for per_request_data in vm_succeed_data: instances_uuid.append(per_request_data['instance_uuid']) if vm_count == vm_succeed_count_db_ret: # 通过虚拟机uuid查询主机名、ip、物理机序列号 vm_datas = [] for instance_uuid in instances_uuid: ret_ins = instance_s.InstanceService( ).get_instance_info_by_uuid(instance_uuid) if ret_ins: ret_ins_host = instance_s.get_host_of_instance( ret_ins['id']) ret_ins_ip = instance_s.get_net_segment_info_of_instance( ret_ins['id']) if ret_ins_host and ret_ins_ip: vm_data = { 'instance_ids': ret_ins['id'], 'host_name': ret_ins['name'], 'ip': ret_ins_ip['ip_address'], 'ip_type': ret_ins_ip['segment_type'], 'sn': ret_ins_host['sn'], 'UUID': instance_uuid, 'net_name': ret_ins_ip['segment'], 'subnet_mask': ret_ins_ip['netmask'], 'gateway': ret_ins_ip['gateway_ip'], 'vlan_id': ret_ins_ip['vlan'], 'passWord': decrypt(ret_ins['password']) } vm_datas.append(vm_data) msg_detail = {'opUser': request_opr_user, 'vm': vm_datas} # 回调外部接口 response_to_api_status = '1' if request_api_origin == ApiOrigin.VISHNU: ret_code = msg_to_vishnu(task_idapi, VsJobStatus.SUCCEED, msg_detail, request_ip) elif request_api_origin == ApiOrigin.SFSLB: ret_code = msg_to_sfslb(task_idapi, VsJobStatus.SUCCEED, msg_detail, request_ip) elif request_api_origin == ApiOrigin.FWAF: ret_code = msg_to_fwaf(task_idapi, VsJobStatus.SUCCEED, msg_detail, request_ip) else: ret_code = msg_to_vishnu(task_idapi, VsJobStatus.SUCCEED, msg_detail, request_ip) if ret_code not in succeed_http_code: response_to_api_status = '0' update_db_time = get_datetime_str() _update_data = { 'task_status': '1', 'response_to_api': response_to_api_status, 'finish_time': update_db_time, 'request_status_collect_time': update_db_time, } _where_data = { 'taskid_kvm': task_idkvm, } ret = request_r_s.RequestRecordService().update_request_status( _update_data, _where_data) if ret <= 0: logging.error('update request %s status to db failed' % task_idkvm) elif (vm_createtimeout_count_db_ret + vm_succeed_count_db_ret) == vm_count \ and vm_createtimeout_count_db_ret > vm_count * 0.2: # 回调外部接口 response_to_api_status = '1' if request_api_origin == ApiOrigin.VISHNU: ret_code = msg_to_vishnu(task_idapi, VsJobStatus.FAILED, 'all kvm instance create failed', request_ip) elif request_api_origin == ApiOrigin.SFSLB: ret_code = msg_to_sfslb(task_idapi, VsJobStatus.FAILED, 'all kvm instance create failed', request_ip) elif request_api_origin == ApiOrigin.FWAF: ret_code = msg_to_fwaf(task_idapi, VsJobStatus.FAILED, 'all kvm instance create failed', request_ip) else: ret_code = msg_to_vishnu(task_idapi, VsJobStatus.FAILED, 'all kvm instance create failed', request_ip) if ret_code not in succeed_http_code: response_to_api_status = '0' update_db_time = get_datetime_str() _update_data = { 'task_status': '2', 'response_to_api': response_to_api_status, 'finish_time': update_db_time, 'request_status_collect_time': update_db_time, } _where_data = { 'taskid_kvm': task_idkvm, } ret = request_r_s.RequestRecordService().update_request_status( _update_data, _where_data) if ret <= 0: logging.error('update request %s status to db failed' % task_idkvm) else: if (vm_createtimeout_count_db_ret + vm_succeed_count_db_ret) == vm_count: # 把成功创建的虚拟机返回外部接口 """ vm_datas = [] for instance_uuid in instances_uuid: ret_ins = instance_s.InstanceService().get_instance_info_by_uuid(instance_uuid) if ret_ins: ret_ins_host = instance_s.get_host_of_instance(ret_ins['id']) ret_ins_ip = instance_s.get_net_segment_info_of_instance(ret_ins['id']) if ret_ins_host and ret_ins_ip: vm_data = { 'host_name': ret_ins['name'], 'ip': ret_ins_ip['ip_address'], 'sn': ret_ins_host['sn'], 'UUID': instance_uuid, 'net_name': ret_ins_ip['segment'], 'subnet_mask': ret_ins_ip['netmask'], 'gateway': ret_ins_ip['gateway_ip'], 'vlan_id': ret_ins_ip['vlan'] } vm_datas.append(vm_data) msg_detail = {'opUser': request_opr_user, 'vm': vm_datas} """ # 回调外部接口 response_to_api_status = '1' if request_api_origin == ApiOrigin.VISHNU: ret_code = msg_to_vishnu( task_idapi, VsJobStatus.FAILED, 'part of kvm instance create failed', request_ip) elif request_api_origin == ApiOrigin.SFSLB: ret_code = msg_to_sfslb( task_idapi, VsJobStatus.FAILED, 'part of kvm instance create failed', request_ip) elif request_api_origin == ApiOrigin.FWAF: ret_code = msg_to_fwaf( task_idapi, VsJobStatus.FAILED, 'part of kvm instance create failed', request_ip) else: ret_code = msg_to_vishnu( task_idapi, VsJobStatus.FAILED, 'part of kvm instance create failed', request_ip) if ret_code not in succeed_http_code: response_to_api_status = '0' update_db_time = get_datetime_str() _update_data = { 'task_status': '2', 'response_to_api': response_to_api_status, 'finish_time': update_db_time, 'request_status_collect_time': update_db_time, } _where_data = { 'taskid_kvm': task_idkvm, } ret = request_r_s.RequestRecordService( ).update_request_status(_update_data, _where_data) if ret <= 0: logging.error('update request %s status to db failed' % task_idkvm) else: # 成功失败的总数量未等于count pass elif vm_createtimeout_count_db_ret > 0: # 全部虚拟机创建失败 if (vm_createtimeout_count_db_ret + vm_succeed_count_db_ret) == vm_count \ and vm_createtimeout_count_db_ret > vm_count * 0.2: # 回调外部接口 response_to_api_status = '1' if request_api_origin == ApiOrigin.VISHNU: ret_code = msg_to_vishnu(task_idapi, VsJobStatus.FAILED, 'all kvm instance create failed', request_ip) elif request_api_origin == ApiOrigin.SFSLB: ret_code = msg_to_sfslb(task_idapi, VsJobStatus.FAILED, 'all kvm instance create failed', request_ip) elif request_api_origin == ApiOrigin.FWAF: ret_code = msg_to_fwaf(task_idapi, VsJobStatus.FAILED, 'all kvm instance create failed', request_ip) else: ret_code = msg_to_vishnu(task_idapi, VsJobStatus.FAILED, 'all kvm instance create failed', request_ip) if ret_code not in succeed_http_code: response_to_api_status = '0' update_db_time = get_datetime_str() _update_data = { 'task_status': '2', 'response_to_api': response_to_api_status, 'finish_time': update_db_time, 'request_status_collect_time': update_db_time, } _where_data = { 'taskid_kvm': task_idkvm, } ret = request_r_s.RequestRecordService().update_request_status( _update_data, _where_data) if ret <= 0: logging.error('update request %s status to db failed' % task_idkvm) else: update_db_time = get_datetime_str() _update_data = {'request_status_collect_time': update_db_time} _where_data = { 'taskid_kvm': task_idkvm, } ret = request_r_s.RequestRecordService().update_request_status( _update_data, _where_data) if ret <= 0: logging.error('update request %s status to db failed' % task_idkvm) # 标记工单为完成追踪 _update_data = { 'istraceing': '0', } _where_data = { 'taskid_kvm': task_idkvm, } ret = request_r_s.RequestRecordService().update_request_status( _update_data, _where_data) if ret <= 0: logging.error( 'update request %s status to db failed when mark istraceing 1' % task_idkvm) return except Exception as e: logging.error('request {} threading exception error: {}'.format( task_idkvm, e)) return
def _check_task(i, row, user_id): # 当前行数 cur_line = str(i + 1) # Openstack环境 cloud_area = row[0] cloud_area = cloud_area.strip() # VM名称 vm_name = row[1] vm_name = vm_name.strip() # VM IP vm_ip = row[2] vm_ip = vm_ip.strip() # VM所在网段 vm_segment = row[3] vm_segment = vm_segment.strip() # 应用系统信息 vm_app_info = row[4] vm_app_info = vm_app_info.strip() # 应用管理员 vm_owner = row[5] vm_owner = vm_owner.strip() # VM环境 vm_env_name = row[6] vm_env_name = vm_env_name.strip() # VM网络区域 netarea_name = row[7] netarea_name = netarea_name.strip() # VM集群 hostpool_name = row[8] hostpool_name = hostpool_name.strip() # VM系统版本 vm_ostype = row[9] vm_ostype = vm_ostype.strip() # 应用组 group_name = row[10] group_name = group_name.strip() # CPU数量 cpu_num = row[11] cpu_num = cpu_num.strip() # 内存容量 mem_size = row[12] mem_size = mem_size.strip() # 内存容量单位 mem_size_unit = row[13] mem_size_unit = mem_size_unit.strip() if not cloud_area or not vm_name or not vm_ip or not vm_segment or not vm_app_info or not vm_owner \ or not vm_env_name or not netarea_name or not hostpool_name or not vm_ostype or not group_name \ or not cpu_num or not mem_size or not mem_size_unit: result = False, "第" + cur_line + "行:参数不正确" q.put(result) return # 1.判断flavor信息 if mem_size_unit == 'G': mem_size_mb = int(mem_size) * 1024 elif mem_size_unit == 'M': mem_size_mb = mem_size else: result = False, "第" + cur_line + "行:内存容量单位不正确" q.put(result) return flavor_info = flavor_s.get_flavor_by_vcpu_and_memory(cpu_num, mem_size_mb) if not flavor_info: result = False, "第" + cur_line + "行:实例规格数据有误" q.put(result) return # 2.根据机房类型、网络区域和集群名来判断集群信息 if vm_env_name == "DEV": vm_env = "3" elif vm_env_name == "SIT": vm_env = "1" elif vm_env_name == "STG": vm_env = "2" else: result = False, "第" + cur_line + "行:VM环境数据有误" q.put(result) return hostpool_data = hostpool_s.get_hostpool_by_vmenv_netarea_hostpool( vm_env, netarea_name, hostpool_name) if not hostpool_data: result = False, "第" + cur_line + "行:VM集群数据有误" q.put(result) return # 3.判断应用组信息 group_info = group_s.get_group_info_by_name_and_env(group_name, vm_env) if not group_info: result = False, "第" + cur_line + "行:应用组数据有误" q.put(result) return # 4.获取并录入IP信息 vm_segment_info = segment_s.SegmentService().get_segment_info_bysegment( vm_segment) if vm_segment_info is None: result = False, "第" + cur_line + "行:网段信息有误" q.put(result) return else: ip_data = ip_s.IPService().get_ip_info_by_ipaddress(vm_ip) if ip_data: if str(ip_data['status']) != IPStatus.UNUSED: result = False, "第" + cur_line + "行:IP与现有环境冲突" q.put(result) return # 5.获取对应openstack环境的管理节点及ssh账户信息 if cloud_area == "SIT": ctr_host = "10.202.83.12" ctr_pass = decrypt(OPENSTACK_SIT_PASS) elif cloud_area == "DEV": ctr_host = "10.202.123.4" ctr_pass = decrypt(OPENSTACK_DEV_PASS) else: result = False, "第" + cur_line + "行:openstack环境参数错误" q.put(result) return # 6.判断vm信息是否输入错误 vmexist = _vm_exist(vm_ip, ctr_host, ctr_pass) if not vmexist: result = False, "第" + cur_line + "行:获取vm信息失败" q.put(result) return # 7.获取OS版本失败 osstat, verdata = _get_vm_version(ctr_host, ctr_pass, vm_ostype, vm_ip) if not osstat: result = False, "第" + cur_line + "行:获取vm OS版本失败" q.put(result) return # 8.获取待迁移vm磁盘大小 vdiskdata = _vm_disk_size(vm_ip, ctr_host, ctr_pass) if not vdiskdata: result = False, "第" + cur_line + "行:获取vm磁盘信息失败" q.put(result) return data_disk = vdiskdata - 80 # 9.判断待转化vm是否关机 vmshutdown = _vm_state(vm_ip, ctr_host, ctr_pass) if not vmshutdown: result = False, "第" + cur_line + "行:待转化vm未关机" q.put(result) return # 10.判断主机名是否包含中文 check_chinese, msg = _check_chinese(vm_name) if not check_chinese: err_msg = "第" + cur_line + "行:" + msg result = False, err_msg q.put(result) return # 11.组装信息 _task = { 'vm_name': vm_name, 'vm_ip': vm_ip, 'flavor_id': flavor_info['id'], 'cloud_area': cloud_area, 'vm_ostype': vm_ostype, 'vm_app_info': vm_app_info, 'vm_owner': vm_owner, 'hostpool_id': hostpool_data['hostpool_id'], 'group_id': group_info['id'], 'user_id': user_id, 'segment': vm_segment, 'vm_disk': data_disk, 'vm_osver': verdata, } result = True, _task q.put(result)
def task_check(): # 入参赋值 vmip = request.values.get('vm_ip') flavor_id = request.values.get('flavor_id') cloudarea = request.values.get('cloud_area') vm_ostype = request.values.get('vm_ostype') vm_segment = request.values.get('segment') # 入参完全性判断 if not vmip or not flavor_id or not cloudarea or not vm_ostype or not vm_segment: logging.info('params are invalid or missing') message = '入参缺失' return json_helper.format_api_resp(code=ErrorCode.PARAM_ERR, msg=message) else: # 获取flavor信息 flavor_info = flavor_service.FlavorService().get_flavor_info(flavor_id) if not flavor_info: logging.info('id: %s flavor info not in db when create instance', flavor_id) message = '实例规格数据有误,无法进行v2v' return json_helper.format_api_resp(code=ErrorCode.PARAM_ERR, msg=message) # 获取对应openstack环境的管理节点及ssh账户信息 if cloudarea == "SIT": ctr_host = '10.202.83.12' ctr_pass = decrypt(OPENSTACK_SIT_PASS) elif cloudarea == "DEV": ctr_host = "10.202.123.4" ctr_pass = decrypt(OPENSTACK_DEV_PASS) else: message = 'openstack环境参数错误,无法进行v2v操作' return json_helper.format_api_resp(code=ErrorCode.SYS_ERR, msg=message) # 判断vm信息是否输入错误 vmexist = vm_exist(vmip, ctr_host, ctr_pass) if vmexist == False: message = '获取vm信息失败' return json_helper.format_api_resp(code=ErrorCode.SYS_ERR, msg=message) # 获取OS版本失败 osstat, verdata = get_vm_version(ctr_host, ctr_pass, vm_ostype, vmip) if osstat == False: message = '获取vmOS版本失败' return json_helper.format_api_resp(code=ErrorCode.SYS_ERR, msg=message) else: ver_data = verdata # 获取待迁移vm磁盘大小 vdiskdata = vm_disk_size(vmip, cloudarea, ctr_host, ctr_pass) if vdiskdata == False: message = '获取vm磁盘信息失败' return json_helper.format_api_resp(code=ErrorCode.SYS_ERR, msg=message) vdisk = vdiskdata data_disk = vdisk - 80 # 判断待转化vm是否关机 vmshutdown = vm_stat(vmip, ctr_host, ctr_pass) if vmshutdown == False: message = '待转化vm未关机' return json_helper.format_api_resp(code=ErrorCode.SYS_ERR, msg=message) # 获取并录入IP信息 vm_segment = seg_s.SegmentService().get_segment_info_bysegment( vm_segment) if vm_segment == None: message = '网段信息有误,无法进行v2v' return json_helper.format_api_resp(code=ErrorCode.SYS_ERR, msg=message) else: ip_data = ip_s.IPService().get_ip_info_by_ipaddress(vmip) if ip_data: ip_data_status = ip_data['status'] if ip_data_status != '0': message = "IP与现有环境冲突,无法进行v2v" return json_helper.format_api_resp(code=ErrorCode.SYS_ERR, msg=message) return_info = {'vm_osver': ver_data, 'vm_disk': str(data_disk)} message = "获取VM信息成功" return json_helper.format_api_resp(code=ErrorCode.SUCCESS, msg=message, data=return_info)
def v2v_openstack_intodb(hostpool_id): ''' v2v_openstack :param hostpool_id: :return: ''' # 判断是否为重试操作 retry = request.values.get('retry') # 如果非重试进行以下步骤 if retry != '1': # 入参赋值 vmname = request.values.get('vm_name').strip() vmip = request.values.get('vm_ip').strip() flavor_id = request.values.get('flavor_id').strip() cloudarea = request.values.get('cloud_area').strip() vm_ostype = request.values.get('vm_ostype').strip() vm_app_info = request.values.get('vm_app_info').strip() vm_owner = request.values.get('vm_owner').strip() vm_group_id = request.values.get('group_id').strip() user_id = request.values.get('user_id').strip() vm_segment = request.values.get('segment').strip() # 入参完全性判断 if not vmname or not vmip or not flavor_id or not cloudarea or not vm_ostype \ or not vm_app_info or not vm_owner or not vm_group_id or not user_id or not vm_segment: logging.info('params are invalid or missing') return json_helper.format_api_resp(code=ErrorCode.PARAM_ERR, msg='入参缺失') else: # 获取flavor信息 flavor_info = flavor_service.FlavorService().get_flavor_info( flavor_id) if not flavor_info: logging.info( 'id: %s flavor info not in db when create instance', flavor_id) return json_helper.format_api_resp(code=ErrorCode.SYS_ERR, msg='实例规格数据有误,无法进行v2v') vmcpu = flavor_info['vcpu'] vmmem = flavor_info['memory_mb'] # 获取对应openstack环境的管理节点及ssh账户信息 if cloudarea == "SIT": ctr_host = '10.202.83.12' ctr_pass = decrypt(OPENSTACK_SIT_PASS) elif cloudarea == "DEV": ctr_host = "10.202.123.4" ctr_pass = decrypt(OPENSTACK_DEV_PASS) else: return json_helper.format_api_resp( code=ErrorCode.SYS_ERR, msg='openstack环境参数错误,无法进行v2v操作') #判断vm信息是否输入错误 vmexist = vm_exist(vmip, ctr_host, ctr_pass) if vmexist == False: return json_helper.format_api_resp(code=ErrorCode.SYS_ERR, msg='获取vm信息失败') #获取OS版本失败 osstat, verdata = get_vm_version(ctr_host, ctr_pass, vm_ostype, vmip) if osstat == False: return json_helper.format_api_resp(code=ErrorCode.SYS_ERR, msg='获取vmOS版本失败') else: ver_data = verdata # 获取待迁移vm磁盘大小 vdiskdata = vm_disk_size(vmip, cloudarea, ctr_host, ctr_pass) if vdiskdata == False: return json_helper.format_api_resp(code=ErrorCode.SYS_ERR, msg='获取vm磁盘信息失败') vdisk = vdiskdata data_disk = vdisk - 80 # 判断待转化vm是否关机 vmshutdown = vm_stat(vmip, ctr_host, ctr_pass) if vmshutdown == False: return json_helper.format_api_resp(code=ErrorCode.SYS_ERR, msg='待转化vm未关机') # 获取可用目标host host_code, host_data, host_msg = cal_host(hostpool_id, vmcpu, vmmem, data_disk, vm_group_id) if host_code < 0: return json_helper.format_api_resp(code=host_code, msg=host_msg) else: host = host_data vmhost = ho_s.HostService().get_host_info_by_hostip(host) ret_4 = ho_s.pre_allocate_host_resource(vmhost['id'], vmcpu, vmmem, 50) if ret_4 != 1: logging.error('资源预分配失败') message = '资源预分频失败' return json_helper.format_api_resp(code=ErrorCode.SYS_ERR, msg=message) # 获取并录入IP信息 vm_segment = seg_s.SegmentService().get_segment_info_bysegment( vm_segment) if vm_segment == None: return json_helper.format_api_resp(code=ErrorCode.SYS_ERR, msg='网段信息有误,无法进行v2v') else: segment_id = vm_segment['id'] vm_netmask = vm_segment['netmask'] vm_gateway = vm_segment['gateway_ip'] vm_dns1 = vm_segment['dns1'] vm_dns2 = vm_segment['dns2'] vmvlan = vm_segment['vlan'] ip_data = ip_s.IPService().get_ip_info_by_ipaddress(vmip) if ip_data == None: ip_data = { 'ip_address': vmip, 'segment_id': segment_id, 'netmask': vm_netmask, 'vlan': vmvlan, 'gateway_ip': vm_gateway, 'dns1': vm_dns1, 'dns2': vm_dns2, 'created_at': get_datetime_str(), 'status': '1' } ret = ip_s.IPService().add_ip_info(ip_data) if ret.get('row_num') <= 0: logging.info( 'add ip info error when create v2v task, insert_data: %s', ip_data) return json_helper.format_api_resp( code=ErrorCode.SYS_ERR, msg="录入IP信息失败") else: ip_id = ret.get('last_id') else: ip_data_status = ip_data['status'] vmvlan = ip_data['vlan'] if ip_data_status != '0': return json_helper.format_api_resp( code=ErrorCode.SYS_ERR, msg="IP与现有环境冲突,无法进行v2v") else: ip_id = ip_data['id'] where_data = {'id': ip_id} updata_data = { 'status': '1', 'updated_at': get_datetime_str() } ret1 = ip_s.IPService().update_ip_info( updata_data, where_data) if not ret1: logging.info( 'update ip info error when create v2v task, update_data: %s', updata_data) return json_helper.format_api_resp( code=ErrorCode.SYS_ERR, msg="更新IP状态失败") # 生成request_id request_Id = v2v_op.generate_req_id() # 生成vm的uuid和mac vmuuid = randomUUID() vmmac = randomMAC() # 信息入instance相关库表 instance_info = instance_db_info(vmuuid, vmname, vm_app_info, vm_owner, flavor_id, vm_group_id, host, vmmac, data_disk, ip_id, vm_ostype, request_Id, ver_data) if instance_info < 0: return json_helper.format_api_resp(code=ErrorCode.SYS_ERR, msg='信息入库失败') # 将步骤信息存入instance_action表 # 将createdir信息存入instance_action表 v2v_cd_d1 = { 'action': v2vActions.CREATE_DEST_DIR, 'request_id': request_Id, 'message': 'start', 'start_time': get_datetime_str() } in_a_s.InstanceActionsServices().add_instance_action_info( v2v_cd_d1) # 将getfile信息存入instance_action表 v2v_gf_d1 = { 'action': v2vActions.GET_VM_FILE, 'request_id': request_Id, 'message': 'other', 'start_time': get_datetime_str() } in_a_s.InstanceActionsServices().add_instance_action_info( v2v_gf_d1) # 将copy disk信息存入instance_action表 v2v_cpd_d1 = { 'action': v2vActions.COPY_VM_DISK, 'request_id': request_Id, 'message': 'other', 'start_time': get_datetime_str() } in_a_s.InstanceActionsServices().add_instance_action_info( v2v_cpd_d1) # 将copy xml信息存入instance_action表 v2v_cpx_d1 = { 'action': v2vActions.COPY_VM_XML, 'request_id': request_Id, 'message': 'other', 'start_time': get_datetime_str() } in_a_s.InstanceActionsServices().add_instance_action_info( v2v_cpx_d1) # 将创建存储池信息存入instance_action表 v2v_csp_d1 = { 'action': v2vActions.CREATE_STOR_POOL, 'request_id': request_Id, 'message': 'other', 'start_time': get_datetime_str() } in_a_s.InstanceActionsServices().add_instance_action_info( v2v_csp_d1) # 将vm标准化信息存入instance_action表 v2v_vmd_d1 = { 'action': v2vActions.VM_STANDARDLIZE, 'request_id': request_Id, 'message': 'other', 'start_time': get_datetime_str() } in_a_s.InstanceActionsServices().add_instance_action_info( v2v_vmd_d1) # 将vm注册信息存入instance_action表 v2v_vmdef_d1 = { 'action': v2vActions.VM_DEFINE, 'request_id': request_Id, 'message': 'other', 'start_time': get_datetime_str() } in_a_s.InstanceActionsServices().add_instance_action_info( v2v_vmdef_d1) # 将IP注入信息存入instance_action表 v2v_vmipj_d1 = { 'action': v2vActions.IP_INJECT, 'request_id': request_Id, 'message': 'other', 'start_time': get_datetime_str() } in_a_s.InstanceActionsServices().add_instance_action_info( v2v_vmipj_d1) # 将vm开机信息存入instance_action表 v2v_vmstar_d1 = { 'action': v2vActions.VM_START, 'request_id': request_Id, 'message': 'other', 'start_time': get_datetime_str() } in_a_s.InstanceActionsServices().add_instance_action_info( v2v_vmstar_d1) message = '信息已添加至任务队列,等待执行' # 将v2v信息存入v2v_task表 v2v_data = { 'request_id': request_Id, 'destory': '0', 'start_time': get_datetime_str(), 'status': 0, 'vm_ip': vmip, 'vm_name': vmname, 'vmvlan': vmvlan, 'flavor_id': flavor_id, 'cloud_area': cloudarea, 'vm_ostype': vm_ostype, 'vm_app_info': vm_app_info, 'vm_owner': vm_owner, 'vm_group_id': vm_group_id, 'user_id': user_id, 'vm_mac': vmmac, 'vm_uuid': vmuuid, 'cancel': '0', 'dest_dir': '/app/image/' + vmuuid, 'on_task': '0', 'port': '10000', 'source': VMCreateSource.OPENSTACK } v2v_insert = v2v_op.v2vTaskService().add_v2v_task_info(v2v_data) if v2v_insert.get('row_num') <= 0: logging.info('insert info to v2v_task failed! %s', v2v_data) return json_helper.format_api_resp(code=ErrorCode.SYS_ERR, msg='信息入库失败') # 将目标kvmhost存入信息表 v2v_op.update_v2v_desthost(request_Id, host) v2v_op.update_v2v_step(request_Id, v2vActions.BEGIN) return json_helper.format_api_resp(code=ErrorCode.SUCCESS, msg=message)
def virt_v2v_copy_to_local(dest_dir, kvmhost, esx_ip, esx_passwd, vmware_vm, request_id): echopass_command = 'mkdir -p /tmp/' + esx_ip + ';echo ' + esx_passwd + ' >> /tmp/' + esx_ip + '/passwd' remote_user = ANSIABLE_REMOTE_USER remote_pass = decrypt(KVMHOST_LOGIN_PASS) become_user = OPENSTACK_DEV_USER become_pass = decrypt(KVMHOST_SU_PASS) echopass = ansible_run_shell(kvmhost, echopass_command) if 'contacted' not in echopass: message = '无法连接kvmhost' threadlock = threading.Lock() threadlock.acquire() v2v_op.updata_v2v_message(request_id, message) v2v_op.update_v2v_actions(request_id, ActionStatus.FAILD) v2v_op.updata_v2v_ontask(request_id, '0') in_a_s.update_instance_actions(request_id, esx_v2vActions.COPY_FILE_TO_LOCAL, ActionStatus.FAILD, message) threadlock.release() elif echopass['contacted'] == {}: message = '无法连接kvmhost' threadlock = threading.Lock() threadlock.acquire() v2v_op.updata_v2v_message(request_id, message) v2v_op.update_v2v_actions(request_id, ActionStatus.FAILD) v2v_op.updata_v2v_ontask(request_id, '0') in_a_s.update_instance_actions(request_id, esx_v2vActions.COPY_FILE_TO_LOCAL, ActionStatus.FAILD, message) threadlock.release() elif "error" in echopass['contacted'][kvmhost]['stderr']: message = '记录esxi密码失败' threadlock = threading.Lock() threadlock.acquire() v2v_op.updata_v2v_message(request_id, message) v2v_op.update_v2v_actions(request_id, ActionStatus.FAILD) v2v_op.updata_v2v_ontask(request_id, '0') in_a_s.update_instance_actions(request_id, esx_v2vActions.COPY_FILE_TO_LOCAL, ActionStatus.FAILD, message) threadlock.release() else: rmfilefirst = 'rm -f /app/tmp/' + vmware_vm + '/' + vmware_vm + '*' rmfile_first = ansible_run_shell(kvmhost, rmfilefirst) if rmfile_first['contacted'] == {} or "error" in rmfile_first[ 'contacted'][kvmhost]['stderr']: message = "清除临时文件失败" threadlock = threading.Lock() threadlock.acquire() v2v_op.updata_v2v_message(request_id, message) v2v_op.update_v2v_actions(request_id, ActionStatus.FAILD) v2v_op.updata_v2v_ontask(request_id, '0') in_a_s.update_instance_actions(request_id, esx_v2vActions.COPY_FILE_TO_LOCAL, ActionStatus.FAILD, message) threadlock.release() else: copy_to_local_command = 'mkdir -p /app/tmp/'+ vmware_vm +'/;export TMPDIR=/app/tmp;cd /app/tmp/'+ vmware_vm +'/;virt-v2v-copy-to-local -ic esx://root@' \ + esx_ip + '?no_verify=1 ' + vmware_vm + ' --password-file ' + '/tmp/' + esx_ip + '/passwd' copy_local = ansible_run_shell(kvmhost, copy_to_local_command) if copy_local['contacted'] == {}: message = '无法连接kvmhost' threadlock = threading.Lock() threadlock.acquire() v2v_op.updata_v2v_message(request_id, message) v2v_op.update_v2v_actions(request_id, ActionStatus.FAILD) v2v_op.updata_v2v_ontask(request_id, '0') in_a_s.update_instance_actions( request_id, esx_v2vActions.COPY_FILE_TO_LOCAL, ActionStatus.FAILD, message) threadlock.release() elif 'error' in copy_local['contacted'][kvmhost]['stderr']: message = '拷贝vm文件到目标host失败' threadlock = threading.Lock() threadlock.acquire() v2v_op.updata_v2v_message(request_id, message) v2v_op.update_v2v_actions(request_id, ActionStatus.FAILD) v2v_op.updata_v2v_ontask(request_id, '0') in_a_s.update_instance_actions( request_id, esx_v2vActions.COPY_FILE_TO_LOCAL, ActionStatus.FAILD, message) threadlock.release() else: rm_esxpass_com = "rm -f /tmp/" + esx_ip + '/passwd;rmdir /tmp/' + esx_ip rmesxpass_com = ansible_run_shell(kvmhost, rm_esxpass_com) if rmesxpass_com['contacted'] == {} or 'error' in rmesxpass_com[ 'contacted'][kvmhost]['stderr']: message = 'vm文件拷贝成功,删除esxi密码文件失败' else: message = 'vm文件拷贝成功,删除esxi密码文件成功' threadlock = threading.Lock() threadlock.acquire() v2v_op.updata_v2v_message(request_id, message) in_a_s.update_instance_actions( request_id, esx_v2vActions.COPY_FILE_TO_LOCAL, ActionStatus.SUCCSESS, message) v2v_op.update_v2v_step(request_id, esx_v2vActions.COPY_FILE_TO_LOCAL) v2v_op.updata_v2v_ontask(request_id, '0') threadlock.release()
def v2v_esx_disk_attach_static(vm_name, kvmhost, request_id): create_disk = 'cd /tmp;rm -f diskimg;qemu-img create -f qcow2 diskimg 20G' remote_user = ANSIABLE_REMOTE_USER remote_pass = decrypt(KVMHOST_LOGIN_PASS) become_user = OPENSTACK_DEV_USER become_pass = decrypt(KVMHOST_SU_PASS) createdisk = ansible_run_shell(kvmhost, create_disk) if 'contacted' not in createdisk: message = '连接目标kvmhost失败' threadlock = threading.Lock() threadlock.acquire() v2v_op.updata_v2v_message(request_id, message) v2v_op.update_v2v_actions(request_id, ActionStatus.FAILD) v2v_op.updata_v2v_ontask(request_id, '0') in_a_s.update_instance_actions(request_id, esx_v2vActions.ATTACH_DISK, ActionStatus.FAILD, message) threadlock.release() elif createdisk['contacted'] == {}: message = '连接目标kvmhost失败' threadlock = threading.Lock() threadlock.acquire() v2v_op.updata_v2v_message(request_id, message) v2v_op.update_v2v_actions(request_id, ActionStatus.FAILD) v2v_op.updata_v2v_ontask(request_id, '0') in_a_s.update_instance_actions(request_id, esx_v2vActions.ATTACH_DISK, ActionStatus.FAILD, message) threadlock.release() elif 'failed' in createdisk['contacted'][kvmhost]: message = '创建临时磁盘失败' threadlock = threading.Lock() threadlock.acquire() v2v_op.updata_v2v_message(request_id, message) v2v_op.update_v2v_actions(request_id, ActionStatus.FAILD) v2v_op.updata_v2v_ontask(request_id, '0') in_a_s.update_instance_actions(request_id, esx_v2vActions.ATTACH_DISK, ActionStatus.FAILD, message) threadlock.release() else: time.sleep(200) xml = """ <disk type='file' device='disk'> <driver name='qemu' type='qcow2'/> <source file='/tmp/diskimg'/> <target dev='vdi' bus='virtio'/> </disk>""" att_device = instanceManager.v2v_esx_attach_device( kvmhost, vm_name, xml) if not att_device: message = '添加临时磁盘失败' threadlock = threading.Lock() threadlock.acquire() v2v_op.updata_v2v_message(request_id, message) v2v_op.update_v2v_actions(request_id, ActionStatus.FAILD) v2v_op.updata_v2v_ontask(request_id, '0') in_a_s.update_instance_actions(request_id, esx_v2vActions.ATTACH_DISK, ActionStatus.FAILD, message) threadlock.release() else: message = '添加临时磁盘成功' time.sleep(15) threadlock = threading.Lock() threadlock.acquire() v2v_op.updata_v2v_message(request_id, message) in_a_s.update_instance_actions(request_id, esx_v2vActions.ATTACH_DISK, ActionStatus.SUCCSESS, message) v2v_op.updata_v2v_ontask(request_id, '0') v2v_op.update_v2v_step(request_id, esx_v2vActions.ATTACH_DISK) threadlock.release()
def vm_start1(kvmhost, vmname, request_id, modulename, vm_ostype): command = 'virsh start ' + vmname remote_user = ANSIABLE_REMOTE_USER remote_pass = decrypt(KVMHOST_LOGIN_PASS) become_user = OPENSTACK_DEV_USER become_pass = decrypt(KVMHOST_SU_PASS) vmstart = ansible_run_shell(kvmhost, command) if 'contacted' not in vmstart: message = 'vm启动失败' threadlock = threading.Lock() threadlock.acquire() v2v_op.updata_v2v_message(request_id, message) v2v_op.update_v2v_actions(request_id, ActionStatus.FAILD) v2v_op.updata_v2v_ontask(request_id, '0') in_a_s.update_instance_actions(request_id, esx_v2vActions.VM_START1, ActionStatus.FAILD, message) threadlock.release() elif vmstart['contacted'] == {}: message = 'vm启动失败' threadlock = threading.Lock() threadlock.acquire() v2v_op.updata_v2v_message(request_id, message) v2v_op.update_v2v_actions(request_id, ActionStatus.FAILD) v2v_op.updata_v2v_ontask(request_id, '0') in_a_s.update_instance_actions(request_id, esx_v2vActions.VM_START1, ActionStatus.FAILD, message) threadlock.release() elif 'failed' in vmstart['contacted'][kvmhost]: message = 'vm启动失败' threadlock = threading.Lock() threadlock.acquire() v2v_op.updata_v2v_message(request_id, message) v2v_op.update_v2v_actions(request_id, ActionStatus.FAILD) v2v_op.updata_v2v_ontask(request_id, '0') in_a_s.update_instance_actions(request_id, esx_v2vActions.VM_START1, ActionStatus.FAILD, message) threadlock.release() elif vmstart['contacted'][kvmhost]['stdout'] == '': message = 'vm启动失败' threadlock = threading.Lock() threadlock.acquire() v2v_op.updata_v2v_message(request_id, message) v2v_op.update_v2v_actions(request_id, ActionStatus.FAILD) v2v_op.updata_v2v_ontask(request_id, '0') in_a_s.update_instance_actions(request_id, esx_v2vActions.VM_START1, ActionStatus.FAILD, message) threadlock.release() else: message = 'vm启动成功' threadlock = threading.Lock() threadlock.acquire() v2v_op.updata_v2v_message(request_id, message) in_a_s.update_instance_actions(request_id, esx_v2vActions.VM_START1, ActionStatus.SUCCSESS, message) v2v_op.updata_v2v_ontask(request_id, '0') v2v_op.update_v2v_step(request_id, esx_v2vActions.VM_START1) if vm_ostype == "Linux": vm_uuid = v2v_op.v2vTaskService().get_v2v_task_by_requestid( request_id)['vm_uuid'] where_data = {'uuid': vm_uuid} update_data = {'status': '3'} ins_s.InstanceService().update_instance_info( update_data, where_data) v2v_op.update_v2v_actions(request_id, 1) threadlock.release()