def create_data_disk(request_id, dst_host_ip, uuid, user_id, disk_name, dev_name, disk_size_gb, disk_format, task_id): ''' # 在目标host上创建磁盘 :return: ''' if not disk_format: disk_format = 'qcow2' connect_storage = vmManager.libvirt_get_connect(dst_host_ip, conn_type='storage', poolname=uuid) if not connect_storage: status = ActionStatus.FAILD message = 'can not connect to libvirt' update_instance_actions(uuid, request_id, user_id, disk_name + "create", status, message, task_id) return status, message, None disk_status, disk_xml = vmManager.libvirt_create_disk_hotmigrate( connect_storage, uuid, disk_name, disk_size_gb, disk_format) disk_dir_path = default.INSTANCE_DISK_PATH % (uuid, disk_name) if disk_status: status = ActionStatus.SUCCSESS message = "create disk %s success! %s " % (disk_name, disk_dir_path) else: status = ActionStatus.FAILD message = "create disk %s failed! %s " % (disk_name, disk_dir_path) update_instance_actions(uuid, request_id, user_id, "disk " + dev_name + " create", status, message, task_id) return status, message, disk_dir_path
def __change_instance_network(host_ip, ins_name, net_info): ''' 修改虚拟机指定网卡ip、掩码并启动网卡 :param host_ip: :param ins_name: :param net_info: :return: ''' # 连接libvirt找到对应虚拟机 connect_libivrt = vmManager.libvirt_get_connect(host_ip, conn_type='instance', vmname=ins_name) if not connect_libivrt: return False, '无法使用libvirt进行虚拟机新增网卡配置,请联系管理员' # 通过libvirt串口配置虚拟机ip inject_net_status, result_msg = vmManager.libvirt_change_instance_ip( connect_libivrt, net_info, net_card_new=True) if not inject_net_status: return False, '无法使用libvirt进行虚拟机ip配置,请联系管理员' # elif 'output' in result_msg: # if eval(result_msg)['return']['cmd_ret'] != 0: # return False, '无法找到mac地址对应的网卡配置文件' # elif result_msg != '{"return":0}': # return False, '无法找到mac地址对应的网卡配置文件' return True, '网卡配置成功'
def _create_storage_pool(host_ip, uuid, request_id): ''' 创建存储池 :param host_ip: :param uuid: :return: ''' connect_storages = instanceManager.libvirt_get_connect( host_ip, conn_type='storages') pool_status, pool_name = instanceManager.libvirt_create_storage_pool( connect_storages, uuid) if pool_status: logging.info('create storage pool successful') message = '创建存储池成功' threadlock = threading.Lock() threadlock.acquire() v2v_op.updata_v2v_message(request_id, message) v2v_op.updata_v2v_ontask(request_id, '0') in_a_s.update_instance_actions(request_id, esx_v2vActions.CREATE_STOR_POOL, ActionStatus.SUCCSESS, message) v2v_op.update_v2v_step(request_id, esx_v2vActions.CREATE_STOR_POOL) threadlock.release() else: msg = "创建存储池失败" threadlock = threading.Lock() threadlock.acquire() v2v_op.updata_v2v_message(request_id, msg) v2v_op.updata_v2v_ontask(request_id, '0') v2v_op.update_v2v_actions(request_id, ActionStatus.FAILD) in_a_s.update_instance_actions(request_id, esx_v2vActions.CREATE_STOR_POOL, ActionStatus.FAILD, msg) threadlock.release()
def _create_storage_pool(request_id, host_ip, uuid, user_id): ret_check_status, job_status = _check_job_step_done( request_id, InstaceActions.INSTANCE_CLONE_CREATE_STORAGE_POOL) if ret_check_status and job_status is 1: message = "pass" # logging.info(message) return ActionStatus.SUCCSESS, message else: _add_instance_actions( uuid, request_id, user_id, InstaceActions.INSTANCE_CLONE_CREATE_STORAGE_POOL, 'start') connect_storages = vmManager.libvirt_get_connect(host_ip, conn_type='storages') pool_status, pool_name = vmManager.libvirt_create_storage_pool( connect_storages, uuid) if pool_status: status = ActionStatus.SUCCSESS message = "create storage pool %s success!" % uuid else: status = ActionStatus.FAILD message = "create storage pool %s failed!" % uuid _update_instance_actions( uuid, request_id, user_id, InstaceActions.INSTANCE_CLONE_CREATE_STORAGE_POOL, status, message) return status, message
def _instance_net_on(instance_name, instance_mac, host_ip, dev): connect_instance = vmManager.libvirt_get_connect(host_ip, conn_type='instance', vmname=instance_name) if not connect_instance: return False, 'can not connect to libvirtd' return vmManager.libvirt_instance_net_on(connect_instance, instance_mac, dev)
def _create_storage_pool(host_ip, uuid): ''' 创建存储池 :param host_ip: :param uuid: :return: ''' connect_storages = instanceManager.libvirt_get_connect( host_ip, conn_type='storages') pool_status, pool_name = instanceManager.libvirt_create_storage_pool( connect_storages, uuid) if pool_status: return True return False
def image_console(): """ 虚拟机vnc控制台 :param instance_id: :return : """ """ if not instance_id: logging.info('no instance id when get configure info') return json_helper.format_api_resp(code=ErrorCode.PARAM_ERR, msg="参数错误") ins_data = ins_s.InstanceService().get_instance_info(instance_id) host_ip = ins_s.get_hostip_of_instance(instance_id) if not ins_data or not host_ip: logging.info('instance %s data is no exist in db when get configure info', instance_id) return json_helper.format_api_resp(code=ErrorCode.SYS_ERR) instance_name = ins_data['name'] connect_disk_device_get = vmManager.libvirt_get_connect(host_ip, conn_type='instance', vmname=instance_name) # result, data = vmManager.libvirt_get_vnc_port(connect_disk_device_get, instance_name) # if not result: # logging.info(data) # return json_helper.format_api_resp(code=ErrorCode.SYS_ERR) """ console_server_host = request.host.split(':')[0] apple = request.query_string.decode().strip() peeledapple = apple.split('=') instance_name = peeledapple[1] if not console_server_host: return json_helper.format_api_resp(code=ErrorCode.PARAM_ERR) kvm_host_ip = IMAGE_EDIT_SERVER # 连接libvirtd查询虚拟机console端口号 connect_instance = vmManager.libvirt_get_connect(kvm_host_ip, conn_type='instance', vmname=instance_name) status, vnc_port = vmManager.libvirt_get_vnc_console( connect_instance, instance_name) if not status: return json_helper.format_api_resp(code=ErrorCode.SYS_ERR) token = str(kvm_host_ip) + '-' + str(vnc_port) resp = make_response( render_template('console-vnc.html', vm_name=instance_name, ws_host=console_server_host, ws_port=6080)) resp.set_cookie('token', token) return resp
def win_vm_std(request_id): v2v_task = v2v_op.v2vTaskService().get_v2v_task_by_requestid(request_id) vmip = v2v_task['vm_ip'] ostype = 'Windows' ip_data = ip_s.IPService().get_ip_by_ip_address(vmip) vmmask_int = int(ip_data['netmask']) vmmask = exchange_maskint(vmmask_int) vmgateway = ip_data['gateway_ip'] vmname = v2v_task['vm_name'] dns1 = ip_data['dns1'] dns2 = ip_data['dns2'] host_ip = v2v_task['dest_host'] cloudarea = v2v_task['cloud_area'] connect_instance = instanceManager.libvirt_get_connect( host_ip, conn_type='instance', vmname=vmname) inject_stauts, mesg = instanceManager.v2v_esx_win_inject( connect_instance, vmname, vmip, vmgateway, dns1, dns2, vmmask, ostype, cloudarea) if inject_stauts: message = "信息注入成功" threadlock = threading.Lock() threadlock.acquire() v2v_op.updata_v2v_message(request_id, message) in_a_s.update_instance_actions(request_id, esx_v2vActions.WINDOWS_STD, ActionStatus.SUCCSESS, message) v2v_op.update_v2v_step(request_id, esx_v2vActions.WINDOWS_STD) v2v_op.updata_v2v_ontask(request_id, '0') vm_uuid = v2v_op.v2vTaskService().get_v2v_task_by_requestid( request_id)['vm_uuid'] v2v_op.update_v2v_actions(request_id, 1) v2v_op.update_v2v_step(request_id, esx_v2vActions.WINDOWS_STD) where_data = {'uuid': vm_uuid} update_data = {'status': '3'} ins_s.InstanceService().update_instance_info(update_data, where_data) threadlock.release() else: message = "信息注入失败" threadlock = threading.Lock() threadlock.acquire() v2v_op.updata_v2v_message(request_id, message) v2v_op.update_v2v_actions(request_id, ActionStatus.FAILD) v2v_op.updata_v2v_ontask(request_id, '0') in_a_s.update_instance_actions(request_id, esx_v2vActions.WINDOWS_STD, ActionStatus.FAILD, message) threadlock.release()
def cancel_move_to(host_ip, instance_name): '''这个函数终止迁移流程''' logging.error( 'instance:%s hot migrate terminated because executing overtime(40min)', instance_name) conn = vmManager.libvirt_get_connect(host=host_ip, conn_type='instance', vmname=instance_name) if not conn: return False # ret为True表示取消迁移成功 ret, message = vmManager.instance_migrate_cancel(conn) if ret: # 取消迁移成功 return -1 else: logging.error(message) return False
def _create_clone_pool(host_ip): connect_storages = vmManager.libvirt_get_connect(host_ip, conn_type='storages') if not connect_storages: msg = "libvirt connect error" return False, msg else: pool_status, pool_name = vmManager.libvirt_create_clone_pool( connect_storages, 'clone') if not pool_status: if "already" in pool_name: msg = "pool clone already exist" return True, msg else: msg = "pool clone create failed" return False, msg else: msg = "pool clone create success" return True, msg
def _vm_create_disk_snapshot(images, uuid, user_id, request_id, source_host, src_instance_id): ret_check_status, job_status = _check_job_step_done( request_id, InstaceActions.INSTANCE_CLONE_DISK_SNAPSHOT) if ret_check_status and job_status is 1: message = "pass" # logging.info(message) return ActionStatus.SUCCSESS, message else: _add_instance_actions(uuid, request_id, user_id, InstaceActions.INSTANCE_CLONE_DISK_SNAPSHOT, 'start') src_instance_data = ins_s.InstanceService().get_instance_info( src_instance_id) src_instance_name = src_instance_data['name'] src_instance_uuid = src_instance_data['uuid'] connect_instance = vmManager.libvirt_get_connect( source_host, conn_type='instance', vmname=src_instance_name) if not connect_instance: _status = ActionStatus.FAILD msg = "源vm%s创建磁盘快照失败,libvirt连接失败" % src_instance_name else: #ret_disk_snapshot = ansibleCmd.create_disk_exter_snapshot(source_host,src_instance_name,snapshot_disk_data) snapshot_disk_data = [] for image in images: image_url = '/app/image/' + src_instance_uuid + '/' + image snapshot_disk_data.append(image_url) ret_disk_snapshot = vmManager.ex_disk_snapshot( source_host, src_instance_name, snapshot_disk_data) if not ret_disk_snapshot: _status = ActionStatus.FAILD msg = "源vm %s 创建磁盘快照失败" % src_instance_name else: _status = ActionStatus.SUCCSESS msg = "源vm %s 创建磁盘快照成功" % src_instance_name _update_instance_actions(uuid, request_id, user_id, InstaceActions.INSTANCE_CLONE_DISK_SNAPSHOT, _status, msg) return _status, msg
def _img_tem_start(image_name): host_ip = IMAGE_EDIT_SERVER ret, msg = ansibleCmdV2.start_tem_vm(host_ip, image_name) if not ret: return False, msg else: connect_instance = vmManager.libvirt_get_connect(host_ip, conn_type='instance', vmname=image_name) timeout = 600 poll_seconds = 10 deadline = time.time() + timeout while time.time() < deadline and connect_instance.get_status() != 1: time.sleep(poll_seconds) if connect_instance.get_status() != 1: err_msg = 'template vm %s start up time out!' % image_name logging.error(err_msg) return False, err_msg else: msg = 'template vm %s start up success' % image_name logging.info(msg) return True, msg
def create_instance_first(request_id, host_ip, uuid, user_id, hostname, memory_mb, vcpu, volumes_d, net_card_name, mac): ret_check_status, job_status = _check_job_step_done( request_id, InstaceActions.INSTANCE_CREATE) if ret_check_status and job_status is 1: status = ActionStatus.SUCCSESS message = "create instance %s %s success! cpu:%s mem:%s " % ( hostname, uuid, vcpu, memory_mb) return status, message else: if not ret_check_status: _add_instance_actions(uuid, request_id, user_id, InstaceActions.INSTANCE_CREATE, 'start') connect_create = vmManager.libvirt_get_connect(host_ip) if not connect_create: status = ActionStatus.FAILD message = 'can not connect to libvirt' _update_instance_actions(uuid, request_id, user_id, InstaceActions.INSTANCE_CREATE, status, message) return status, message # 需要刷新存储池,否则libvirt创建虚拟机会找不到新建的虚拟机磁盘报错 connect_create.refresh_storage_pool_by_name(uuid) create_status, instance_hostname = vmManager.libvirt_create_instance_xml_no_nic( connect_create, hostname, memory_mb, vcpu, uuid, volumes_d, net_card_name, mac) if create_status: status = ActionStatus.SUCCSESS, message = "create instance %s %s success! cpu:%s mem:%s " % ( hostname, uuid, vcpu, memory_mb) else: status = ActionStatus.FAILD, message = "create instance %s %s Failed!cpu:%s mem:%s" % ( hostname, uuid, vcpu, memory_mb) _update_instance_actions(uuid, request_id, user_id, InstaceActions.INSTANCE_CREATE, status, message) return status, message
def img_tem_rm_ip(image_name, os_type): host_ip = IMAGE_EDIT_SERVER # get libvirt connection connect_instance = vmManager.libvirt_get_connect(host_ip, conn_type='instance', vmname=image_name) if not connect_instance: message = 'image tmp %s inject failed,because libvirt connection error' logging.info(message) return False, message if os_type == 'linux': # assemble inject command rm_udev_com = "rm -rf /etc/udev/rules.d/70-persistent-ipoib.rules;rm -rf /etc/udev/rules.d/70-persistent-net.rules" sed_eth0_com = "sed -i 's/IPADDR.*/IPADDR=/g' /etc/sysconfig/network-scripts/ifcfg-eth0" sed_gw_com = "sed -i 's/GATEWAY.*/GATEWAY=/g' /etc/sysconfig/network" inject_data = rm_udev_com + ';' + sed_eth0_com + ';' + sed_gw_com elif os_type == 'windows': inject_data = 'c:\\\windows\\\system32\\\sysprep \\/generalize \\/oobe \\/shutdown \\/unattend:Unattend.xml' else: err_msg = 'unknown image os type %s' % os_type return False, err_msg timeout = 300 poll_seconds = 10 deadline = time.time() + timeout while time.time() < deadline and not connect_instance.getqemuagentstuats(): time.sleep(poll_seconds) ostype = IMAGE_OS_TYPE inject_stauts, mesg = vmManager.image_inject_data(connect_instance, inject_data, host_ip, image_name, ostype) if inject_stauts: message = "image tmp %s ip inject success!" % image_name return True, message else: message = "image tmp %s ip inject failed!" % image_name return False, message
def get_instance_disks_size(src_host_ip, instance_name, uuid, action=None): ''' # 获取vm的各个磁盘名称以及大小 :return: type:list [{'disk_size': 85899345920L, 'format': 'qcow2', 'image': 'hostname.img', 'storage': 'e14f606d-db1a-491f-6f6f-830750924e6a', 'dev': 'vda', 'path': '/app/image/e14f606d-db1a-491f-6f6f-830750924e6a/hostname.img'}] ''' instances_result = vmManager.libvirt_get_instance_device( src_host_ip, instance_name) list_instance_status = instances_result[1] # 使用vm的image的名称入参,通过下面函数获取volume的大小 conn = vmManager.libvirt_get_connect(host=src_host_ip, conn_type="storage", poolname=uuid) if not conn: return False try: conn.refresh() for disk_name in list_instance_status: ret = conn.get_volume_size(disk_name['image']) disk_name['disk_size'] = ret except libvirt.libvirtError as err: logging.error('instance %s get disk info error, because %s' % (instance_name, err)) return return list_instance_status
def _img_tem_shutdown(image_name): host_ip = IMAGE_EDIT_SERVER # get libvirt connection if not vmManager.libvirt_instance_shutdown(host_ip, image_name): message = 'image tmp %s shutdown failed' % image_name logging.info(message) return False, message else: message = 'image tmp %s exec shutdown successed' % image_name logging.info(message) connect_instance = vmManager.libvirt_get_connect(host_ip, conn_type='instance', vmname=image_name) timeout = 600 poll_seconds = 10 deadline = time.time() + timeout while time.time() < deadline and connect_instance.get_status() != 5: time.sleep(poll_seconds) if connect_instance.get_status() != 5: msg = 'template exec shutdown time out!please check' logging.error(msg) return False, msg msg = 'template shutdown success' return True, msg
def console(): """ 虚拟机vnc控制台 :param instance_id: :return : """ """ if not instance_id: logging.info('no instance id when get configure info') return json_helper.format_api_resp(code=ErrorCode.PARAM_ERR, msg="参数错误") ins_data = ins_s.InstanceService().get_instance_info(instance_id) host_ip = ins_s.get_hostip_of_instance(instance_id) if not ins_data or not host_ip: logging.info('instance %s data is no exist in db when get configure info', instance_id) return json_helper.format_api_resp(code=ErrorCode.SYS_ERR) instance_name = ins_data['name'] connect_disk_device_get = vmManager.libvirt_get_connect(host_ip, conn_type='instance', vmname=instance_name) # result, data = vmManager.libvirt_get_vnc_port(connect_disk_device_get, instance_name) # if not result: # logging.info(data) # return json_helper.format_api_resp(code=ErrorCode.SYS_ERR) """ console_server_host = request.host.split(':')[0] apple = request.query_string.decode().strip() peeledapple = apple.split('=') instance_uuid = peeledapple[1] if not instance_uuid or not console_server_host: return json_helper.format_api_resp(code=ErrorCode.PARAM_ERR) ins_info = InstanceService().get_instance_info_by_uuid(instance_uuid) if not ins_info: return json_helper.format_api_resp(code=ErrorCode.PARAM_ERR) kvm_host_ip = get_hostip_of_instance(ins_info['id']) if not kvm_host_ip: return json_helper.format_api_resp(code=ErrorCode.PARAM_ERR) # 连接libvirtd查询虚拟机console端口号 connect_instance = vmManager.libvirt_get_connect(kvm_host_ip, conn_type='instance', vmname=ins_info['name']) status, vnc_port = vmManager.libvirt_get_vnc_console( connect_instance, ins_info['name']) if not status: return json_helper.format_api_resp(code=ErrorCode.SYS_ERR) token = str(kvm_host_ip) + '-' + str(vnc_port) resp = make_response( render_template('console-vnc.html', vm_name=ins_info['name'], ws_host=console_server_host, ws_port=6080)) resp.set_cookie('token', token) # 添加操作记录: try.. except .. 部分 try: user = user_service.get_user() extra_data = "name:" + ins_info['name'] + "," + "uuid:" + peeledapple[1] add_operation_other(user["user_id"], OperationObject.VM, OperationAction.CONSOLE, "SUCCESS", extra_data) except: pass return resp
def instance_disk_config(msg_data): logging.info("--" * 25) logging.info(msg_data) msg = json_helper.read(msg_data) data = msg.get('data') user_id = data.get('user_id') request_id = data.get('request_id') instance_id = data.get('instance_id') vm_uuid = data.get('uuid') disks = data.get('disk_msg') if request_id is None or instance_id is None: return 1 instance_info = InstanceService().get_instance_info(instance_id) for disk in disks: # 查看虚拟机mount所在磁盘位置、磁盘当前大小,vg、lv名称 host_ip = get_hostip_of_instance(instance_id) mount_point = disk['mount_point'] if mount_point and host_ip and instance_info['name']: vm_name = instance_info['name'] status, disk_path, disk_size, vm_vg_lv, _message = _check_volume_is_existed_and_volume_size( host_ip, vm_uuid, vm_name, mount_point, request_id, user_id) if status: if disk['size_gb'] <= disk_size: logging.info('disk size user input less than now, failed') else: add_instance_actions(vm_uuid, request_id, user_id, InstaceActions.INSTANCE_DISK_RESIZE, 'start') connect_disk_resize = vmManager.libvirt_get_connect( host_ip, conn_type='instance', vmname=vm_name) disk_resize_status, disk_resize_msg = vmManager.libvirt_config_disk_resize( connect_disk_resize, disk_path, int(disk['size_gb'])) logging.info(disk_resize_msg) if disk_resize_status: instance_disk_resize_status = ActionStatus.SUCCSESS _message = 'instance %s disk %s resize successful' % ( vm_name, disk_path) update_instance_actions( vm_uuid, request_id, user_id, InstaceActions.INSTANCE_DISK_RESIZE, instance_disk_resize_status, _message) print 'start to get disk dev:' add_instance_actions( vm_uuid, request_id, user_id, InstaceActions.INSTANCE_DISK_DEVICE, 'start') connect_disk_device_get = vmManager.libvirt_get_connect( host_ip, conn_type='instance', vmname=vm_name) disk_device_status, disk_dev = vmManager.libvirt_get_disk_device_by_path( connect_disk_device_get, disk_path) logging.info('disk device is: ' + disk_dev) if disk_device_status: instance_disk_device_status = ActionStatus.SUCCSESS _message = 'instance %s disk device get successful, device name is %s' % ( vm_name, disk_dev) update_instance_actions( vm_uuid, request_id, user_id, InstaceActions.INSTANCE_DISK_DEVICE, instance_disk_device_status, _message) # 查看虚拟机状态 add_instance_actions( vm_uuid, request_id, user_id, InstaceActions.INSTANCE_STATUS_CHECK, 'start') print 'start to check instance %s status' % instance_id instance_info = InstanceService( ).get_instance_info(instance_id) if instance_info['status'] == '3': instance_status = ActionStatus.SUCCSESS _message = 'instance %s is running' % vm_name update_instance_actions( vm_uuid, request_id, user_id, InstaceActions.INSTANCE_STATUS_CHECK, instance_status, _message) add_instance_actions( vm_uuid, request_id, user_id, InstaceActions.INSTANCE_DISK_INJECT_TO_OS, 'start') # 下发qemu agent命令 connect_disk_inject = vmManager.libvirt_get_connect( host_ip, conn_type='instance', vmname=vm_name) disk_add_size = str( int(disk['size_gb']) - int(disk_size) - 1) inject_disk_stauts, result_msg = vmManager.libvirt_inject_resize_disk( connect_disk_inject, disk_dev, vm_vg_lv, disk_add_size) logging.info(result_msg) if inject_disk_stauts: instance_disk_inject_on_os_status = ActionStatus.SUCCSESS _message = 'instance %s disk resize on os successful' % vm_name update_instance_actions( vm_uuid, request_id, user_id, InstaceActions. INSTANCE_DISK_INJECT_TO_OS, instance_disk_inject_on_os_status, _message) # 修改数据库虚拟机磁盘大小 update_data = { 'size_gb': disk['size_gb'], 'updated_at': get_datetime_str() } where_data = { 'instance_id': instance_id, 'mount_point': mount_point } ret = ins_d_s.InstanceDiskService( ).update_instance_disk_info( update_data, where_data) else: instance_disk_inject_on_os_status = ActionStatus.FAILD _message = 'instance %s disk resize on os failed because %s' % ( vm_name, result_msg) update_instance_actions( vm_uuid, request_id, user_id, InstaceActions. INSTANCE_DISK_INJECT_TO_OS, instance_disk_inject_on_os_status, _message) else: instance_status = ActionStatus.FAILD _message = 'failed: instance %s not running' % vm_name update_instance_actions( vm_uuid, request_id, user_id, InstaceActions.INSTANCE_STATUS_CHECK, instance_status, _message) else: instance_disk_device_status = ActionStatus.FAILD _message = 'instance %s disk device get failed' % vm_name update_instance_actions( vm_uuid, request_id, user_id, InstaceActions.INSTANCE_DISK_DEVICE, instance_disk_device_status, _message) else: instance_disk_resize_status = ActionStatus.FAILD _message = 'instance %s disk %s resize faile because %s' % ( vm_name, disk_path, disk_resize_msg) update_instance_actions( vm_uuid, request_id, user_id, InstaceActions.INSTANCE_DISK_RESIZE, instance_disk_resize_status, _message) else: logging.info(_message) return 'all job done, check whether has error in database instance'
def vm_define(request_id, vm_ostype, kvmhost, hostname, memory_mb, vcpu, uuid, volumes_d, vlan, mac): ''' :param libvirt_connect_create: :param hostname: 主机名 :param memory_mb: 内存大小 :param vcpu: cpu个数 :param uuid: instance uuid :param volumes_d: 磁盘的字典 :param net_card: :param mac: mac地址 :param disk_xml: 磁盘的xml文件 :return: ''' connect_create = instanceManager.libvirt_get_connect(kvmhost) if not connect_create: message = '连接kvmhost libvirt失败' threadlock = threading.Lock() threadlock.acquire() v2v_op.updata_v2v_message(request_id, message) v2v_op.update_v2v_actions(request_id, ActionStatus.FAILD) v2v_op.updata_v2v_ontask(request_id, '0') in_a_s.update_instance_actions(request_id, esx_v2vActions.VM_DEFINE1, ActionStatus.FAILD, message) threadlock.release() else: vm_vlan = 'br_bond0.' + vlan connect_create.refresh_storage_pool_by_name(uuid) succeed_create_xml = False retry_create_xml = 0 while retry_create_xml < 3 and not succeed_create_xml: try: if vm_ostype == "Linux": instance_xml = connect_create.v2v_esx_xml( hostname, memory_mb, vcpu, False, uuid, volumes_d, 'default', vm_vlan, True, mac) else: instance_xml = connect_create.v2v_esx_xml( hostname, memory_mb, vcpu, False, uuid, volumes_d, 'default', vm_vlan, False, mac) succeed_create_xml = True except libvirtError as err: logging.error( "create host connect failed ,name: %s ;because %s" % (hostname, err)) retry_create_xml += 1 time.sleep(5) if retry_create_xml == 3: message = "vm注册失败" threadlock = threading.Lock() threadlock.acquire() v2v_op.updata_v2v_message(request_id, message) v2v_op.update_v2v_actions(request_id, ActionStatus.FAILD) v2v_op.updata_v2v_ontask(request_id, '0') in_a_s.update_instance_actions(request_id, esx_v2vActions.VM_DEFINE1, ActionStatus.FAILD, message) threadlock.release() return False, err else: message = 'vm注册成功' threadlock = threading.Lock() threadlock.acquire() v2v_op.updata_v2v_message(request_id, message) in_a_s.update_instance_actions(request_id, esx_v2vActions.VM_DEFINE1, ActionStatus.SUCCSESS, message) v2v_op.update_v2v_step(request_id, esx_v2vActions.VM_DEFINE1) v2v_op.updata_v2v_ontask(request_id, '0') threadlock.release() return True, instance_xml
def hot_migrate(msg_data): ''' 虚机热迁移 :param msg_data: :return: ''' msg = json_helper.read(msg_data) data = msg.get('data') request_id = data.get('request_id') user_id = data.get('user_id') task_id = data.get('task_id') migrate_tab_id = data.get('migrate_tab_id') ins_data_s = data.get('ins_data_s') uuid = data['ins_data_s'].get('uuid') instance_id = data['ins_data_s'].get('id') instance_name = data['ins_data_s'].get('name') # speed_limit = data.get('speed_limit') 暂时不用这个字段 dst_host_ip = data['host_data_d'].get('ipaddress') dst_host_name = data['host_data_d'].get('name') dst_host_id = data['host_data_d'].get('id') src_host_ip = data['host_data_s'].get('ipaddress') src_host_name = data['host_data_s'].get('name') src_host_id = data['host_data_s'].get('id') pool_status = False if dst_host_ip is None or request_id is None: logging.error("empty input of host_ip or request_id") _update_instance_status(uuid, VMStatus.STARTUP) _change_migrate_host(dst_host_id, instance_id) ins_m_s.InstanceMigrateService().change_migrate_status( migrate_tab_id, MigrateStatus.SUCCESS) return # 开始热迁移前获取vm磁盘的大小、名称 # 热迁移过程中任意一步发生错误都执行以下操作: # 1:将instance表中的status值改为运行中, # 2:删除instance_host表中新增的instance_dsthost记录 # 3:将instance_actions表中操作status值改为2(失败) # 4:将instance_migrate表中的migrate_status只改为2(失败) add_instance_actions(uuid, request_id, user_id, InstaceActions.INSTANCE_HOT_MIGRATE_DISK_INFO, 'start') list_instance_status = get_instance_disks_size(src_host_ip, instance_name, uuid) if not list_instance_status: message = 'instance %s get migrate stats error' % instance_name logging.error(message) _update_instance_status(uuid, VMStatus.STARTUP) _change_migrate_host(dst_host_id, instance_id) update_instance_actions(uuid, request_id, user_id, InstaceActions.INSTANCE_HOT_MIGRATE_DISK_INFO, ActionStatus.FAILD, message, task_id) ins_m_s.InstanceMigrateService().change_migrate_status( migrate_tab_id, MigrateStatus.FAILED) return else: message = 'instance %s get migrate stats success' % instance_name update_instance_actions(uuid, request_id, user_id, InstaceActions.INSTANCE_HOT_MIGRATE_DISK_INFO, ActionStatus.SUCCSESS, message, task_id) # 在源主机上的Hosts文件中添加目标主机的(IP和名称)记录 add_instance_actions(uuid, request_id, user_id, InstaceActions.INSTANCE_HOT_MIGRATE_HOSTS_ADD, 'start') logging.info('add dst_host record in src_host %s' % uuid) retry_directory_create = 0 while retry_directory_create < 8: # ret_d = ansible_remote_check_instance_dir(dst_host_ip, uuid) ret_d = ansible_remote_mkdir_and_dns(src_host_ip, dst_host_ip, dst_host_name) if ret_d: message = 'host %s add dst_host record successful %s' % ( dst_host_ip, uuid) update_instance_actions( uuid, request_id, user_id, InstaceActions.INSTANCE_HOT_MIGRATE_HOSTS_ADD, ActionStatus.SUCCSESS, message, task_id) break retry_directory_create += 1 time.sleep(5) else: message = "host %s add dst_host record error %s" % (src_host_ip, uuid) logging.error(message) _update_instance_status(uuid, VMStatus.STARTUP) _change_migrate_host(dst_host_id, instance_id) update_instance_actions(uuid, request_id, user_id, InstaceActions.INSTANCE_HOT_MIGRATE_HOSTS_ADD, ActionStatus.FAILD, message, task_id) ins_m_s.InstanceMigrateService().change_migrate_status( migrate_tab_id, MigrateStatus.FAILED) return # 在目标主机上新建虚拟机镜像存储目录 add_instance_actions(uuid, request_id, user_id, InstaceActions.INSTANCE_HOT_MIGRATE_MKDIR_DIR, 'start') logging.info('get instance disk info %s' % uuid) # 设置重试次数8 retry_directory_create = 0 while retry_directory_create < 8: try: ret_d = ansible_remote_mkdir_instance_dir(dst_host_ip, uuid) if ret_d: message = 'host %s remote mkdir instance %s dir successful' % ( dst_host_ip, uuid) update_instance_actions( uuid, request_id, user_id, InstaceActions.INSTANCE_HOT_MIGRATE_MKDIR_DIR, ActionStatus.SUCCSESS, message, task_id) break except libvirt.libvirtError as err: logging.error("vm %s create data disk in %s failed because %s" % (instance_name, dst_host_ip, err)) logging.error(err) retry_directory_create += 1 time.sleep(5) else: message = 'host %s remote mkdir instance %s dir error' % (dst_host_ip, uuid) logging.error(message) _update_instance_status(uuid, VMStatus.STARTUP) _change_migrate_host(dst_host_id, instance_id) update_instance_actions(uuid, request_id, user_id, InstaceActions.INSTANCE_HOT_MIGRATE_MKDIR_DIR, ActionStatus.FAILD, message, task_id) ins_m_s.InstanceMigrateService().change_migrate_status( migrate_tab_id, MigrateStatus.FAILED) return # 在目标主机新建虚拟机池 add_instance_actions(uuid, request_id, user_id, InstaceActions.INSTANCE_HOT_MIGRATE_STORAGE_POOL, message) logging.info('start create storage pool %s', uuid) ret_p = _create_storage_pool(dst_host_ip, uuid) if not ret_p: message = 'host %s create storage pool error' % dst_host_ip logging.error(message) _change_migrate_host(dst_host_id, instance_id) _update_instance_status(uuid, VMStatus.STARTUP) update_instance_actions( uuid, request_id, user_id, InstaceActions.INSTANCE_HOT_MIGRATE_STORAGE_POOL, ActionStatus.FAILD, message, task_id) ins_m_s.InstanceMigrateService().change_migrate_status( migrate_tab_id, MigrateStatus.FAILED) return else: message = 'host %s create storage pool successful' % dst_host_ip update_instance_actions( uuid, request_id, user_id, InstaceActions.INSTANCE_HOT_MIGRATE_STORAGE_POOL, ActionStatus.SUCCSESS, message, task_id) # 在目标host上创建同样大小、名称的磁盘 disks_dir_path = [] for instance_status in list_instance_status: disk_name = instance_status['image'] dev_name = instance_status['dev'] disk_size = instance_status['disk_size'] # 记录创建的磁盘的路径 add_instance_actions(uuid, request_id, user_id, "disk " + dev_name + " create", 'start') logging.info('start create disk %s, %s', disk_name, uuid) create_data_disk_ret, create_data_disk_msg, disk_dir_path = create_data_disk( request_id, dst_host_ip, uuid, user_id, disk_name, dev_name, int(disk_size / 1073741824), 'qcow2', task_id) disks_dir_path.append(disk_dir_path) if create_data_disk_ret is ActionStatus.FAILD: logging.error("instance %s create date disk failed because %s" % (uuid, create_data_disk_msg)) _change_migrate_host(dst_host_id, instance_id) _update_instance_status(uuid, VMStatus.STARTUP) update_instance_actions( uuid, request_id, user_id, InstaceActions.INSTANCE_HOT_MIGRATE_CHECK_DISK, ActionStatus.FAILD, message, task_id) ins_m_s.InstanceMigrateService().change_migrate_status( migrate_tab_id, MigrateStatus.FAILED) return # add_instance_actions(uuid,request_id,user_id,InstaceActions.INSTANCE_HOT_MIGRATE_CHECK_DISK,'start') # if create_data_disk_ret is ActionStatus.FAILD: # logging.error("instance %s create date disk failed because %s" % (uuid, create_data_disk_msg)) # _change_migrate_host(dst_host_id, instance_id) # _update_instance_status(uuid, VMStatus.STARTUP) # update_instance_actions(uuid, request_id, user_id, InstaceActions.INSTANCE_HOT_MIGRATE_CHECK_DISK, # ActionStatus.FAILD, message, task_id) # ins_m_s.InstanceMigrateService().change_migrate_status(migrate_tab_id, MigrateStatus.FAILED) # return else: message = 'instance %s create disk successful' % (uuid) update_instance_actions( uuid, request_id, user_id, InstaceActions.INSTANCE_HOT_MIGRATE_CHECK_DISK, ActionStatus.SUCCSESS, message, task_id) # libvirt执行实际迁移操作 add_instance_actions(uuid, request_id, user_id, InstaceActions.INSTANCE_HOT_MIGRATE_START_MOVE, 'start') logging.info('start hot migrate %s to destination host', uuid) # 加入定时器,move_to_host执行超过40分钟时认定为迁移失败,终止迁移 move_timer = threading.Timer(7200, cancel_move_to, args=(src_host_ip, instance_name)) # setDaemon为False表示主线程会等待子线程move_timer执行完才继续执行 move_timer.setDaemon = False move_timer.start() # 执行迁移 move_to_host, move_to_msg = vmManager.instance_migrate_speed_limit( src_host_ip, dst_host_ip, instance_name) if move_to_host == True: # 迁移成功返回True message = 'instance %s moving to host %s successful' % (instance_name, dst_host_ip) update_instance_actions(uuid, request_id, user_id, InstaceActions.INSTANCE_HOT_MIGRATE_START_MOVE, ActionStatus.SUCCSESS, message, task_id) elif move_to_host == -1: # 迁移超时被中止 # message = 'instance %s hot migrate canceled because overtime' % (instance_name) _change_migrate_host(dst_host_id, instance_id) _update_instance_status(uuid, VMStatus.STARTUP) update_instance_actions(uuid, request_id, user_id, InstaceActions.INSTANCE_HOT_MIGRATE_START_MOVE, ActionStatus.FAILD, move_to_msg, task_id) ins_m_s.InstanceMigrateService().change_migrate_status( migrate_tab_id, MigrateStatus.FAILED) # 迁移失败,删除目标Host上刚创建的磁盘 connect_create = vmManager.libvirt_get_connect(dst_host_ip, conn_type='create') try: for disk_dir_path in disks_dir_path: connect_create.delete_volume(disk_dir_path) except libvirt.libvirtError as err: logging.error('host %s delete instance %s disk error because %s' % (dst_host_ip, instance_name, err)) return else: # move_to_host有返回值时表示迁移失败 logging.error('instance %s moving to host %s error' % (instance_name, dst_host_ip)) message = 'instance %s hot migrate moving faild' % (instance_name) _change_migrate_host(dst_host_id, instance_id) _update_instance_status(uuid, VMStatus.STARTUP) update_instance_actions(uuid, request_id, user_id, InstaceActions.INSTANCE_HOT_MIGRATE_START_MOVE, ActionStatus.FAILD, message, task_id) ins_m_s.InstanceMigrateService().change_migrate_status( migrate_tab_id, MigrateStatus.FAILED) # 迁移失败,删除目标Host上刚创建的磁盘 connect_create = vmManager.libvirt_get_connect(dst_host_ip, conn_type='create') try: for disk_dir_path in disks_dir_path: connect_create.delete_volume(disk_dir_path) except libvirt.libvirtError as err: logging.error('host %s delete instance %s disk error, because %s' % (dst_host_ip, instance_name, err)) return # 迁移结束之后的操作,以下操作如果出错,都至在LOG中提示,而不作为热迁移失败处理 # 将存储池在源主机上undefined add_instance_actions(uuid, request_id, user_id, InstaceActions.INSTANCE_HOT_MIGRATE_UNDEFINE_S, 'start') logging.info('source host %s undefined instance', src_host_ip) ret_u = vmManager.libvirt_instance_undefined(src_host_ip, ins_data_s) if not ret_u: message = 'source host %s undefined instance error' % src_host_ip logging.error(message) update_instance_actions(uuid, request_id, user_id, InstaceActions.INSTANCE_HOT_MIGRATE_UNDEFINE_S, ActionStatus.FAILD, message, task_id) return else: message = 'source host %s undefined instance successful' % src_host_ip update_instance_actions(uuid, request_id, user_id, InstaceActions.INSTANCE_HOT_MIGRATE_UNDEFINE_S, ActionStatus.SUCCSESS, message, task_id) # 修改原虚拟机存储目录名字 add_instance_actions(uuid, request_id, user_id, InstaceActions.INSTANCE_HOT_MIGRATE_BACKUP_NAME, 'start') logging.info('start backup %s name in sourse host', uuid) ret_d = ansible_change_migrate_dir(src_host_ip, dst_host_ip, uuid) if not ret_d: message = 'source host %s backup dir after migrate error' % src_host_ip logging.error(message) update_instance_actions( uuid, request_id, user_id, InstaceActions.INSTANCE_HOT_MIGRATE_BACKUP_NAME, ActionStatus.FAILD, message, task_id) return else: message = 'source host %s backup dir after migrate successful' % src_host_ip update_instance_actions( uuid, request_id, user_id, InstaceActions.INSTANCE_HOT_MIGRATE_BACKUP_NAME, ActionStatus.SUCCSESS, message, task_id) # 迁移后修改虚拟机状态为运行中 vm_hot_migrate_status = VMStatus.STARTUP ret_v = _update_instance_status(uuid, vm_hot_migrate_status) if ret_v != 1: logging.error( 'update instance status error when after hot migrate instance') # 迁移完成后删除instance_host表中vm对应源host的记录 ret_i = _change_migrate_host(src_host_id, instance_id) if ret_i <= 0: logging.error( 'delete instance_src_host error when after hot migrate instance' ) # 修改instance_migrate中迁移状态为1,迁移完成 ret_u = ins_m_s.InstanceMigrateService().change_migrate_status( migrate_tab_id, MigrateStatus.SUCCESS) if ret_u != 1: logging.error( 'update instance migrate info error when after hot migrate instance' )
def extend_disk(instance_id): """ 扩展磁盘接口 :param instance_id: :return: """ c_system = '' c_version = None qemu_ga_update = False mount_point_list = [] if not instance_id: logging.info('no instance id when get configure info') return json_helper.format_api_resp(code=ErrorCode.PARAM_ERR, msg="参数错误") ins_data = ins_s.InstanceService().get_instance_info(instance_id) uuid = ins_data['uuid'] user_id = get_user()['user_id'] request_id = ins_s.generate_req_id() host_ip = ins_s.get_hostip_of_instance(instance_id) if not ins_data or not host_ip: data_params = {'mount_point_list': mount_point_list, "qemu_ga_update": qemu_ga_update} logging.info('instance %s data is no exist in db when get configure info', instance_id) return json_helper.format_api_resp(code=ErrorCode.SYS_ERR, data=data_params, msg='数据库查询错误') if ins_data['create_source'] == '0': ins_images_data = ins_s.get_images_of_instance(instance_id) if ins_images_data: c_system = ins_images_data[0]['system'] c_version = ins_images_data[0]['version'] else: ins_images_data = v2v_ins_s.V2VInstanceService().get_v2v_instance_info(instance_id) if ins_images_data: c_system = ins_images_data['os_type'] if ins_images_data['os_version'] and '6.6' in ins_images_data['os_version']: c_version = '6.6' elif ins_images_data['os_version'] and '7.2' in ins_images_data['os_version']: c_version = '7.2' # 更新qemu-guest-agent action到数据库,zitong wang 29th,9,2017 ins_a_s.add_disk_display_action_to_database(uuid, request_id, user_id, InstaceActions.INSTANCE_DISK_INFO_DISPLAY, ActionStatus.START, 'start') # 更新qemu-guest-agent,获取所有挂载点的信息,add by zitongwang in 21th,9,2017 if c_system == "linux" and c_version: c_version = CentOS_Version.CentOS_7 if c_version >= '7.0' else CentOS_Version.CentOS_6 ins_a_s.add_disk_display_action_to_database(uuid, request_id, user_id, InstaceActions.INSTANCE_UPDATE_QEMU_AGENT, ActionStatus.START, 'start') # 更新qemu-ga版本 vmManager.update_qemu_ga_instance(c_version,host_ip,ins_data['name']) connect_instance = vmManager.libvirt_get_connect(host_ip, conn_type='instance', vmname=ins_data['name']) if not connect_instance: data_params = {'mount_point_list': mount_point_list, "qemu_ga_update": qemu_ga_update} msg = "libvirt连接建立失败,无法使用libvirt管理虚拟机" ins_a_s.add_disk_display_action_to_database(uuid, request_id, ActionStatus.FAILD, InstaceActions.INSTANCE_LIBVIRT_ERROR, msg) return json_helper.format_api_resp(code=ErrorCode.ALL_FAIL, data=data_params, msg=msg) # 重新建立连接判断是否更新成功 flag, msg = connect_instance.test_update_command() qemu_ga_update = flag if flag: # 更新action数据库状态:更新成功 _message = "qemu_agent update successfully" ins_a_s.update_disk_action_to_database(request_id, InstaceActions.INSTANCE_UPDATE_QEMU_AGENT, ActionStatus.SUCCSESS, _message) # 针对v2v机器,pvs显示的LV名称有问题 connect_instance.exec_qemu_command("pvscan") # 获得所有挂载点信息 command = "lsblk -r | awk '{print $7}' | awk NF | grep '^/' | grep -v '^/boot'" flag, msg = connect_instance.exec_qemu_command(command) mount_point = msg.splitlines() # 获取挂载点文件系统,大小,类型,挂载点信息 command = "lsblk -r | awk '{print $1" + "\\\" " + "\\\"" + "$4" + "\\\" " + "\\\"" + "$6" + "\\\" " + "\\\"" + "$7}'" flag, msg = connect_instance.exec_qemu_command(command) parts_info = msg.split('\n') parts_info.pop() mount_point_list = [] mount_list = [] for mount in mount_point: disk_info = {} mount_data = filter(lambda x: x.split(' ')[-1] == mount, parts_info) data = mount_data[0].split(' ') disk_info['mount_point'] = data[-1] # 存在数据就返回 if disk_info['mount_point'] in mount_list: continue else: mount_list.append(disk_info['mount_point']) disk_info['mount_partition_name'] = data[0] disk_info['mount_point_size'] = float(data[1][0:-1]) if data[1][-1].endswith('G') else float( data[1][0:-1]) / 1024 disk_info['mount_partition_type'] = data[2] command = "df -P '%s'| awk 'NR==2 {print $5}'" % mount flag, msg = connect_instance.exec_qemu_command(command) disk_info['mount_point_use'] = msg.strip() mount_point_list.append(disk_info) data_params = {'mount_point_list': mount_point_list, "qemu_ga_update": qemu_ga_update} _message = "linux os disk information display successfully" ins_a_s.update_disk_action_to_database(request_id, InstaceActions.INSTANCE_DISK_INFO_DISPLAY, ActionStatus.SUCCSESS, _message) return json_helper.format_api_resp(code=ErrorCode.SUCCESS, data=data_params) else: # 更新qmeu-ga失败 _message = "qemu_agent update failed" ins_a_s.update_disk_action_to_database(request_id, InstaceActions.INSTANCE_UPDATE_QEMU_AGENT, ActionStatus.FAILD, _message) data_params = {'mount_point_list': mount_point_list, "qemu_ga_update": qemu_ga_update} return json_helper.format_api_resp(code=ErrorCode.ALL_FAIL, data=data_params, msg='qemu update fail') elif c_system == "windows": connect_instance = vmManager.libvirt_get_connect(host_ip, conn_type='instance', vmname=ins_data['name']) disk_info = connect_instance.get_configure_disk_device(uuid) storage_instance = vmManager.libvirt_get_connect(host_ip, conn_type='storage', vmname=ins_data['name'], poolname=uuid) mount_point_list = [] for x in disk_info: d = {} block_info = storage_instance.get_disk_size(x['image']) d.setdefault('mount_point', x['dev']) d.setdefault('mount_point_size', '%.1f' % (float(block_info[0]) / 1073741824)) d.setdefault('mount_point_use', '%.2f' % (float(block_info[1]) / block_info[0] * 100)) d.setdefault('mount_partition_name', "") d.setdefault('mount_partition_type', "") mount_point_list.append(d) qemu_ga_update = True data_params = {'mount_point_list': mount_point_list, "qemu_ga_update": qemu_ga_update} _message = "disk information display successfully" ins_a_s.update_disk_action_to_database(request_id, InstaceActions.INSTANCE_DISK_INFO_DISPLAY, ActionStatus.SUCCSESS, _message) return json_helper.format_api_resp(code=ErrorCode.SUCCESS, data=data_params, msg=_message) else: data_params = {'mount_point_list': mount_point_list, "qemu_ga_update": qemu_ga_update} _message = "os type unknown, please call kvm administrators" ins_a_s.update_disk_action_to_database(request_id, InstaceActions.INSTANCE_DISK_INFO_DISPLAY, ActionStatus.FAILD, _message) return json_helper.format_api_resp(code=ErrorCode.ALL_FAIL, data=data_params, msg=_message)
def instance_configure(instance_id): ''' 虚机修改配置 规则: 热修改(开机状态):cpu disk 加 冷修改(关机状态):cpu mem 加减 disk 加 :param instance_id: :return: ''' n_flavor_id = request.values.get('flavor_id') n_app_info = request.values.get('app_info') n_owner = request.values.get('owner') n_group_id = request.values.get('group_id') n_net_conf_list_req = request.values.get('net_status_list') # start n_extend_list_req = request.values.get('extend_list') n_qemu_ga_update_req = request.values.get('qemu_ga_update') c_system = '' c_version = None # end if not instance_id or not n_flavor_id or not n_group_id: logging.error('params is invalid when change instance configure') return json_helper.format_api_resp(code=ErrorCode.PARAM_ERR) ins_data = ins_s.InstanceService().get_instance_info(instance_id) ###################################add 2017/09/29#############################3 uuid = ins_data['uuid'] user_id = get_user()['user_id'] request_id = ins_s.generate_req_id() if not ins_data: logging.error('the instance %s is not exist in db when change instance configure', instance_id) return json_helper.format_api_resp(code=ErrorCode.SYS_ERR) # --------------------edit 2017/11/13----------------------- if ins_data['create_source'] == '0': ins_images_data = ins_s.get_images_of_instance(instance_id) if ins_images_data: c_system = ins_images_data[0]['system'] else: ins_images_data = v2v_ins_s.V2VInstanceService().get_v2v_instance_info(instance_id) if ins_images_data: c_system = ins_images_data['os_type'] ins_status = ins_data['status'] # 获取虚拟机所在物理机信息 host_data = ins_s.get_host_of_instance(instance_id) ins_datacenter_info = ins_s.get_datacenter_of_instance(instance_id) if not host_data or not ins_datacenter_info: return json_helper.format_api_resp(code=ErrorCode.SYS_ERR, msg="无法获取虚拟机所在物理机信息、机房信息") ins_data['dc_type'] = ins_datacenter_info['dc_type'] # 新flavor信息 n_flavor_data = fla_s.FlavorService().get_flavor_info(n_flavor_id) if not n_flavor_data: logging.error('flavor %s is invalid in db when change instance configure', n_flavor_id) return json_helper.format_api_resp(code=ErrorCode.SYS_ERR, msg='新的配置数据有误,无法修改配置') # 虚机现有flavor信息 c_flavor_data = ins_s.get_flavor_of_instance(instance_id) if not c_flavor_data: logging.error('instance %s flavor is invalid in db when change instance configure', instance_id) return json_helper.format_api_resp(code=ErrorCode.SYS_ERR) c_group_data = ins_s.get_group_of_instance(instance_id) if c_group_data and int(c_group_data['group_id']) != int(n_group_id): # 检查新应用组的配额 is_group_enough, req_msg = _check_change_group_quota(n_group_id, n_flavor_data, c_flavor_data) if not is_group_enough: logging.error('new group %s quota is not enough to change new flavor', n_group_id) return json_helper.format_api_resp(code=ErrorCode.SYS_ERR, msg=req_msg) params = {} if json_helper.loads(n_extend_list_req): # 检查当前应用组的配额 is_group_enough, req_msg = _check_change_group_quota(n_group_id, n_flavor_data, c_flavor_data, json_helper.loads(n_extend_list_req)) if not is_group_enough: logging.error('new group %s quota is not enough to change new flavor', n_group_id) return json_helper.format_api_resp(code=ErrorCode.SYS_ERR, msg=req_msg) # 检查物理机当前使用率情况是否满足扩容 is_host_available, ret_msg = __check_host_capacity(host_data, n_flavor_data, c_flavor_data , json_helper.loads(n_extend_list_req)) if not is_host_available: return json_helper.format_api_resp(code=ErrorCode.SYS_ERR, msg=ret_msg) vmname = ins_data['name'] uuid = '' host_ip = '' n_extend_list_req = json_helper.loads(n_extend_list_req) try: uuid = ins_data['uuid'] host_ip = ins_s.get_hostip_of_instance(instance_id) except: pass connect_instance = vmManager.libvirt_get_connect(host_ip, conn_type='instance', vmname=ins_data['name']) if not connect_instance: pass # 添加扩容开始action ins_a_s.update_instance_status(VMStatus.CONFIGURE_ING, instance_id) ins_a_s.add_disk_extend_action_to_database(uuid, request_id, user_id, InstaceActions.INSTANCE_DISK_EXTEND, ActionStatus.START, 'start') # 满足扩容条件 if n_qemu_ga_update_req: if c_system.strip() == 'linux': flag, msg = connect_instance.exec_qemu_command( "cat /proc/self/mounts | grep -w / | grep -v rootfs | awk '{print $3}'") if not flag: c_version = None c_version = CentOS_Version.CentOS_6 if msg.strip() == 'ext4' else CentOS_Version.CentOS_7 flag, result = ins_a_s.extend_mount_size(n_extend_list_req, host_ip, vmname, uuid, c_version, instance_id) elif c_system.strip() == 'windows': flag, result = ins_a_s.extend_dev_size(n_extend_list_req, host_ip, vmname, uuid, instance_id) else: flag = False if flag: msg = "扩容成功" ins_a_s.update_disk_action_to_database(request_id, InstaceActions.INSTANCE_DISK_EXTEND, ActionStatus.SUCCSESS, msg) ins_a_s.update_instance_status(VMStatus.STARTUP, instance_id) else: msg = "扩容失败,{}".format(result) ins_a_s.update_disk_action_to_database(request_id, InstaceActions.INSTANCE_DISK_EXTEND, ActionStatus.FAILD, msg) ins_a_s.update_instance_status(VMStatus.STARTUP, instance_id) return json_helper.format_api_resp(code=ErrorCode.ALL_FAIL, msg=msg) else : # 非linux系统,关机状态,qemu-guest-agent没有更新成功 msg = "非linux系统,扩容失败" ins_a_s.update_disk_action_to_database(request_id, InstaceActions.INSTANCE_DISK_EXTEND, ActionStatus.FAILD, msg) ins_a_s.update_instance_status(VMStatus.STARTUP, instance_id) return json_helper.format_api_resp(code=ErrorCode.ALL_FAIL, msg=msg) else: pass if c_flavor_data['vcpu'] != n_flavor_data['vcpu'] or c_flavor_data['memory_mb'] != n_flavor_data['memory_mb']: # 检查当前应用组的配额 is_group_enough, req_msg = _check_change_group_quota(n_group_id, n_flavor_data, c_flavor_data) if not is_group_enough: logging.error('new group %s quota is not enough to change new flavor', n_group_id) return json_helper.format_api_resp(code=ErrorCode.SYS_ERR, msg=req_msg) # 检查物理机当前使用率情况是否满足扩容 is_host_available, ret_msg = __check_host_capacity(host_data, n_flavor_data, c_flavor_data) if not is_host_available: return json_helper.format_api_resp(code=ErrorCode.SYS_ERR, msg=ret_msg) # 关机状态 if ins_status == VMStatus.SHUTDOWN: pass elif ins_status == VMStatus.STARTUP: # 开机状态 # cpu只能增 if c_flavor_data['vcpu'] > n_flavor_data['vcpu']: logging.error('vcpu only be increase in startup status, now vcpu %s > new vcpu %s', c_flavor_data['vcpu'], n_flavor_data['vcpu']) ins_a_s.update_instance_status(VMStatus.STARTUP, instance_id) return json_helper.format_api_resp(code=ErrorCode.SYS_ERR, msg='开机状态下,CPU数量只能增加不能减少') # 内存不能修改 if c_flavor_data['memory_mb'] != n_flavor_data['memory_mb']: logging.error('memory only no allow be change in startup status, now mem %s, new mem %s', c_flavor_data['memory_mb'], n_flavor_data['memory_mb']) ins_a_s.update_instance_status(VMStatus.STARTUP, instance_id) return json_helper.format_api_resp(code=ErrorCode.SYS_ERR, msg='开机状态下,不能修改内存容量') else: logging.error('instance status %s is invalid when change instance configure', ins_status) ins_a_s.update_instance_status(VMStatus.STARTUP, instance_id) return json_helper.format_api_resp(code=ErrorCode.SYS_ERR, msg='只能在开机或关机状态下修改配置') if n_flavor_data['vcpu'] == c_flavor_data['vcpu']: pass else: params['new_vcpu'] = n_flavor_data['vcpu'] params['old_vcpu'] = c_flavor_data['vcpu'] new_mem = n_flavor_data['memory_mb'] old_mem = c_flavor_data['memory_mb'] if new_mem == old_mem: pass else: # 检查内存是否超分 if not _check_mem_allocation(instance_id, new_mem, old_mem): logging.error('instance %s mem has over allocation, new mem %s, old mem %s', instance_id, new_mem, old_mem) ins_a_s.update_instance_status(VMStatus.STARTUP, instance_id) return json_helper.format_api_resp(code=ErrorCode.SYS_ERR, msg='物理内存不能超分') params['new_mem'] = new_mem params['old_mem'] = old_mem # 检查网络配置是否需要修改 n_net_conf_list = json_helper.loads(n_net_conf_list_req) if n_net_conf_list: params['net_status_list'] = n_net_conf_list # 没有一个指标可以修改 if not params: logging.error('vcpu, mem, disk no one can change when change instance configure') else: host_ip = ins_s.get_hostip_of_instance(ins_data['id']) if not host_ip: logging.error('instance %s has no host ip when change instance configure', ins_data['id']) ins_a_s.update_instance_status(VMStatus.STARTUP, instance_id) return json_helper.format_api_resp(code=ErrorCode.SYS_ERR) ret_flavor = ins_a_s.change_instance_configure(host_ip, ins_data, c_flavor_data['flavor_id'], n_flavor_id, ins_status, **params) if not ret_flavor: ins_a_s.update_instance_status(VMStatus.STARTUP, instance_id) return json_helper.format_api_resp(code=ErrorCode.SYS_ERR) update_data_i = { 'app_info': n_app_info, 'owner': n_owner, 'updated_at': get_datetime_str() } where_data_i = { 'id': instance_id } ret_i = ins_s.InstanceService().update_instance_info(update_data_i, where_data_i) # if ret_i != 1: # logging.error('update instance info error when configure, update_data:%s, where_data:%s', # update_data_i, where_data_i) # return json_helper.format_api_resp(code=ErrorCode.SYS_ERR) update_data_g = { 'group_id': n_group_id, 'updated_at': get_datetime_str() } where_data_g = { 'instance_id': instance_id } ret_g = ins_g_s.InstanceGroupService().update_instance_group_info(update_data_g, where_data_g) # if ret_g != 1: # logging.error('update group info error when configure, update_data:%s, where_data:%s', # update_data_g, where_data_g) # return json_helper.format_api_resp(code=ErrorCode.SYS_ERR) # if 'disk_gb_list' in params: # return json_helper.format_api_resp(code=ErrorCode.SUCCESS, msg='硬盘扩容任务发送成功') return json_helper.format_api_resp(code=ErrorCode.SUCCESS, msg='修改配置成功')
def collect_instances_data(host_ip): ''' 收集指定主机下所有instance的信息 :param host_ip: :return: ''' print '*' * 40 print 'start colletc host ' + host_ip + ' instance data at ' + get_datetime_str() libvirt_connect_instance = libvirt_get_connect(host=host_ip, conn_type='instances') # libvirt连接不上时需要将host下面所有虚拟机状态改为错误 if not libvirt_connect_instance: logging.error('unable to connect host %s when collect data', host_ip) instances = ins_s.get_instances_by_host_ip(host_ip) for _ins in instances: _ins_data = ins_s.InstanceService().get_instance_info_by_uuid(_ins['uuid']) if not _ins_data: logging.warn('libvirt instance uuid %s , but not exist in db when collect data', _ins['uuid']) continue # 创建中虚拟机做判断,如果处于创建中大于45分钟,更新虚拟机状态为创建失败 if _ins_data['status'] == VMStatus.CREATING: check_vm_create_status = _check_vm_creating_time(_ins_data['created_at']) if check_vm_create_status: # 如果是克隆创建的vm则更新状态为克隆创建失败 if _ins_data['clone_source_host']: _update_data_i = { 'status': VMStatus.CLONE_CREATE_ERROR, 'updated_at': get_datetime_str() } _where_data_i = { 'uuid': _ins['uuid'] } ret_i_create = ins_s.InstanceService().update_instance_info(_update_data_i, _where_data_i) if ret_i_create != 1: logging.error( 'update instance %s data error when collect instance data, update_data:%s, where_data:%s', _ins['uuid'], _update_data_i, _where_data_i) else: _update_data_i = { 'status': VMStatus.CREATE_ERROR, 'updated_at': get_datetime_str() } _where_data_i = { 'uuid': _ins['uuid'] } ret_i_create = ins_s.InstanceService().update_instance_info(_update_data_i, _where_data_i) if ret_i_create != 1: logging.error( 'update instance %s data error when collect instance data, update_data:%s, where_data:%s', _ins['uuid'], _update_data_i, _where_data_i) continue # 虚拟机处于创建中状态小于45分钟、并且虚拟机状态被放到不更新状态列表中 if _ins_data['status'] in INSTANCE_STATUS_NOT_UPDATE: logging.info('not update instance %s status when in status %s', _ins['uuid'], _ins_data['status']) continue # 如果是v2v机器、状态为转化中则不更新状态 if _ins_data['status'] == VMStatus.CONVERTING: logging.info('not update instance %s status when in status %s', _ins['uuid'], _ins_data['status']) continue _update_data_i = { 'status': VMStatus.ERROR, 'updated_at': get_datetime_str() } _where_data_i = { 'uuid': _ins['uuid'] } ret_i = ins_s.InstanceService().update_instance_info(_update_data_i, _where_data_i) if ret_i != 1: logging.error('update instance %s data error when collect instance data, update_data:%s, where_data:%s', _ins['uuid'], _update_data_i, _where_data_i) else: instances = libvirt_get_all_instances_by_host(libvirt_connect_instance, host_ip) for _ins in instances: _ins_data = ins_s.InstanceService().get_instance_info_by_uuid(_ins['uuid']) if not _ins_data: logging.warn('libvirt instance uuid %s , but not exist in db when collect data', _ins['uuid']) continue # 创建中虚拟机做判断,如果处于创建中大于45分钟,更新虚拟机状态为创建失败 if _ins_data['status'] == VMStatus.CREATING: check_vm_create_status = _check_vm_creating_time(_ins_data['created_at']) if check_vm_create_status: #如果是克隆创建的vm则更新状态为克隆创建失败 if _ins_data['clone_source_host']: _update_data_i = { 'status': VMStatus.CLONE_CREATE_ERROR, 'updated_at': get_datetime_str() } _where_data_i = { 'uuid': _ins['uuid'] } ret_i_create = ins_s.InstanceService().update_instance_info(_update_data_i, _where_data_i) if ret_i_create != 1: logging.error( 'update instance %s data error when collect instance data, update_data:%s, where_data:%s', _ins['uuid'], _update_data_i, _where_data_i) else: _update_data_i = { 'status': VMStatus.CREATE_ERROR, 'updated_at': get_datetime_str() } _where_data_i = { 'uuid': _ins['uuid'] } ret_i_create = ins_s.InstanceService().update_instance_info(_update_data_i, _where_data_i) if ret_i_create != 1: logging.error( 'update instance %s data error when collect instance data, update_data:%s, where_data:%s', _ins['uuid'], _update_data_i, _where_data_i) continue continue # 虚拟机处于创建中状态小于45分钟、并且虚拟机状态被放到不更新状态列表中 if _ins_data['status'] in INSTANCE_STATUS_NOT_UPDATE: logging.info('not update instance %s status when in status %s', _ins['uuid'], _ins_data['status']) continue # 虚拟机处于关机中,并且虚拟机真实状态为运行中,此时不做状态更新 if _ins_data['status'] == VMStatus.SHUTDOWN_ING and _ins['status'] != VMLibvirtStatus.SHUTDOWN: continue _update_data_i = { 'status': _libvirt_status_2_ins_status(_ins['status']), 'updated_at': get_datetime_str() } _where_data_i = { 'uuid': _ins['uuid'] } ret_i = ins_s.InstanceService().update_instance_info(_update_data_i, _where_data_i) if ret_i != 1: logging.error('update instance %s data error when collect instance data, update_data:%s, where_data:%s', _ins['uuid'], _update_data_i, _where_data_i) instances = ins_s.get_instances_by_host_ip(host_ip) for _ins in instances: _ins_data = ins_s.InstanceService().get_instance_info_by_uuid(_ins['uuid']) if not _ins_data: logging.warn('libvirt instance uuid %s , but not exist in db when collect data', _ins['uuid']) continue # 创建中虚拟机做判断,如果处于创建中大于45分钟,更新虚拟机状态为创建失败 if _ins_data['status'] == VMStatus.CREATING: check_vm_create_status = _check_vm_creating_time(_ins_data['created_at']) if check_vm_create_status: # 如果是克隆创建的vm则更新状态为克隆创建失败 if _ins_data['clone_source_host']: _update_data_i = { 'status': VMStatus.CLONE_CREATE_ERROR, 'updated_at': get_datetime_str() } _where_data_i = { 'uuid': _ins['uuid'] } ret_i_create = ins_s.InstanceService().update_instance_info(_update_data_i, _where_data_i) if ret_i_create != 1: logging.error( 'update instance %s data error when collect instance data, update_data:%s, where_data:%s', _ins['uuid'], _update_data_i, _where_data_i) else: _update_data_i = { 'status': VMStatus.CREATE_ERROR, 'updated_at': get_datetime_str() } _where_data_i = { 'uuid': _ins['uuid'] } ret_i_create = ins_s.InstanceService().update_instance_info(_update_data_i, _where_data_i) if ret_i_create != 1: logging.error( 'update instance %s data error when collect instance data, update_data:%s, where_data:%s', _ins['uuid'], _update_data_i, _where_data_i) # 更新收集时间 _update_data_h = { 'instances_collect_time': get_datetime_str() } _where_data_h = { 'ipaddress': host_ip, 'isdeleted': '0' } ret_h = host_s.HostService().update_host_info(_update_data_h, _where_data_h) if ret_h != 1: logging.error('update collect time error when collect instance data, update_data:%s, where_data:%s', _update_data_h, _where_data_h) print 'end colletc host ' + host_ip + ' instance data at ' + get_datetime_str() print '*' * 40
def _img_tem_inject(image_name, ipaddr, netmask, gateway, os_type, os_ver, dns1, dns2): host_ip = IMAGE_EDIT_SERVER # get libvirt connection connect_instance = vmManager.libvirt_get_connect(host_ip, conn_type='instance', vmname=image_name) if not connect_instance: message = 'image tmp %s inject failed,because libvirt connection error' logging.info(message) return False, message if os_type == 'linux': # assemble inject command sed_eth0_ip_com = "sed -i 's/IPADDR.*/IPADDR=" + ipaddr + "/g' /etc/sysconfig/network-scripts/ifcfg-eth0" sed_eth0_mask_com = "sed -i 's/NETMASK.*/NETMASK=" + netmask + "/g' /etc/sysconfig/network-scripts/ifcfg-eth0" sed_gw_com = "sed -i 's/GATEWAY.*/GATEWAY=" + gateway + "/g' /etc/sysconfig/network" restart_network = '/etc/init.d/network restart' inject_data = sed_eth0_ip_com + ';' + sed_eth0_mask_com + ';' + sed_gw_com + ';' + restart_network timeout = 300 poll_seconds = 10 deadline = time.time() + timeout while time.time( ) < deadline and not connect_instance.getqemuagentstuats(): time.sleep(poll_seconds) ostype = IMAGE_OS_TYPE inject_stauts, mesg = vmManager.image_inject_data( connect_instance, inject_data, host_ip, image_name, ostype) if inject_stauts: message = "image tmp %s ip inject success!" % image_name return True, message else: message = "image tmp %s ip inject failed!" % image_name return False, message elif os_type == 'windows': if os_ver == '2008': set_ip_comm = 'netsh interface ip set address name=\\"Local Area Connection\\" source=static addr=%s mask=%s gateway=%s' % ( ipaddr, netmask, gateway) set_dns1_comm = 'netsh interface IP set dns name=\\"Local Area Connection\\" source=static %s' % dns1 set_dns2_comm = 'netsh interface ip add dns name=\\"Local Area Connection\\" %s index=2' % dns2 inject_data = set_ip_comm + ' && ' + set_dns1_comm + ' && ' + set_dns2_comm elif os_ver == '2012': set_ip_comm = 'netsh interface ip set address name=Ethernet source=static addr=%s mask=%s gateway=%s' % ( ipaddr, netmask, gateway) set_dns1_comm = 'netsh interface IP set dns Ethernet source=static %s' % dns1 set_dns2_comm = 'netsh interface ip add dns Ethernet %s index=2' % dns2 inject_data = set_ip_comm + ' && ' + set_dns1_comm + ' && ' + set_dns2_comm else: err_msg = 'the windows template os ver is %s which ip inject is not support!' % os_ver return False, err_msg #time.sleep(300) timeout = 600 poll_seconds = 10 deadline = time.time() + timeout while time.time( ) < deadline and not connect_instance.getqemuagentstuats(): time.sleep(poll_seconds) ostype = IMAGE_OS_TYPE inject_stauts, mesg = vmManager.image_inject_data( connect_instance, inject_data, host_ip, image_name, ostype) if inject_stauts: message = "image tmp %s ip inject success!" % image_name return True, message else: message = "image tmp %s ip inject failed!" % image_name return False, message else: err_msg = 'unknown image os type %s!' % os_type return False, err_msg