def edit(args): '''虚拟机参数修改''' vcpu = None if 'vcpu' in args: try: vcpu = int(args['vcpu']) except: pass mem = None if 'mem' in args: try: mem = int(args['mem']) except: pass remarks = None if 'remarks' in args: remarks = args['remarks'] api = VmAPI() vm = api.get_vm_by_uuid(args['uuid']) if not vm.can_operate_by(args['req_user']): return {'res': False, 'err': ERR_AUTH_PERM} res = api.edit_vm(args['uuid'], vcpu, mem, remarks) if res: return {'res': True} else: return {'res': False, 'err': ERR_VM_EDIT}
def create(args): '''创建虚拟机''' if 'net_type_id' not in args and 'vlan_id' not in args: return {'res': False, 'err': ERR_VM_CREATE_ARGS_VLAN} if 'group_id' not in args and 'host_id' not in args: return {'res': False, 'err': ERR_VM_CREATE_ARGS_HOST} optional_args = ['group_id', 'host_id', 'net_type_id', 'vlan_id', 'diskname', 'remarks'] kwargs = {} for field in optional_args: if field in args: kwargs[field] = args[field] api = VmAPI() vm = api.create_vm(args['image_id'], args['vcpu'], args['mem'], **kwargs) if vm == False: return {'res': False, 'err': ERR_VM_DEFINE} vm.set_creator(args['req_user'].username) vm.start() uuid = vm.uuid return {'res':True,'uuid':uuid}
def get_list(args): '''获取虚拟机列表''' ret_list = [] api = VmAPI() group_api = GroupAPI() group = group_api.get_group_by_id(args['group_id']) if not group.managed_by(args['req_user']): return {'res': False, 'err': ERR_AUTH_PERM} vm_list = api.get_vm_list_by_group_id(args['group_id'], order = '-create_time') for vm in vm_list: if vm.create_time: create_time = vm.create_time.strftime('%Y-%m-%d %H:%M:%S') else: create_time = '' ret_list.append({ 'uuid': vm.uuid, 'name': vm.name, 'center_id': vm.center_id, 'center_name': vm.center_name, 'group_id': vm.group_id, 'group_name': vm.group_name, 'host_id': vm.host_id, 'host_ipv4': vm.host_ipv4, 'image_id': vm.image_id, 'image': vm.image, 'ipv4': vm.ipv4, 'vcpu': vm.vcpu, 'mem': vm.mem, 'creator': vm.creator, 'create_time': create_time, 'remarks': vm.remarks }) return {'res': True, 'list': ret_list}
def migrate(args): #被迁移虚拟机校验 api = VmAPI() host_api = HostAPI() vm = api.get_vm_by_uuid(args['uuid']) if not vm: return {'res': False, 'err': ERR_VM_UUID} if not vm.can_operate_by(args['req_user']): return {'res': False, 'err': ERR_AUTH_PERM} #目标主机校验 host = host_api.get_host_by_id(args['host_id']) if not host.managed_by(args['req_user']): return {'res': False, 'err': ERR_AUTH_PERM} #被迁移虚拟机与目标主机是否处于同一个分中心 if not vm.center_id == host.center_id: return {'res': False, 'err': ERR_AUTH_PERM} if vm.host_id == host.id: return {'res': False, 'err': ERR_VM_MIGRATE} res = api.migrate_vm(args['uuid'], args['host_id']) if res: return {'res': True} return {'res': False, 'err': ERR_VM_MIGRATE}
def create(args): '''创建虚拟机''' if 'net_type_id' not in args and 'vlan_id' not in args: return {'res': False, 'err': ERR_VM_CREATE_ARGS_VLAN} if 'group_id' not in args and 'host_id' not in args: return {'res': False, 'err': ERR_VM_CREATE_ARGS_HOST} optional_args = [ 'group_id', 'host_id', 'net_type_id', 'vlan_id', 'diskname', 'remarks', 'ipv4' ] kwargs = {} for field in optional_args: if field in args: kwargs[field] = args[field] api = VmAPI() vm = api.create_vm(args['image_id'], args['vcpu'], args['mem'], **kwargs) if vm == False: return {'res': False, 'err': ERR_VM_DEFINE} vm.set_creator(args['req_user'].username) # vm.start() uuid = vm.uuid return {'res': True, 'uuid': uuid}
def op(args): '''虚拟机操作''' api = VmAPI() vm = api.get_vm_by_uuid(args['uuid']) if not vm: return {'res': False, 'err': ERR_VM_UUID} if not vm.can_operate_by(args['req_user']): return {'res': False, 'err': ERR_AUTH_PERM} if args['op'] == 'delete': res = api.delete_vm(args['uuid']) elif args['op'] == 'reset': res = api.reset_vm(args['uuid']) else: #虚拟机操作类型。 key,操作代码; value,VM对象中对应处理函数名称 op_list = { 'start': 'start', 'reboot': 'reboot', 'shutdown': 'shutdown', 'poweroff': 'poweroff' } if args['op'] in op_list: try: res = getattr(vm, op_list[args['op']]).__call__() except Error as e: return {'res': False, 'err': e.err} except Exception as e: raise e return {'res': False, 'err': ERR_VM_OP} else: return {'res': False, 'err': ERR_VM_NO_OP} if res: return {'res': res} return {'res': res, 'err': ERR_VM_OP}
def migrate(args): #被迁移虚拟机校验 api = VmAPI() host_api = HostAPI() vm = api.get_vm_by_uuid(args['uuid']) if not vm: return {'res': False, 'err': ERR_VM_UUID} if not vm.can_operate_by(args['req_user']): return {'res': False, 'err': ERR_AUTH_PERM} #目标主机校验 host = host_api.get_host_by_id(args['host_id']) if not host.managed_by(args['req_user']): return {'res': False, 'err': ERR_AUTH_PERM} #被迁移虚拟机与目标主机是否处于同一个分中心 if not vm.center_id == host.center_id: return {'res': False, 'err': ERR_AUTH_PERM} if vm.host_id == host.id: return {'res': False, 'err': ERR_VM_MIGRATE_SAME_HOST} res = api.migrate_vm(args['uuid'], args['host_id']) if res: return {'res': True} return {'res': False, 'err': ERR_VM_MIGRATE}
def __init__(self, manager=None, storage_api=None, vm_api=None, group_api=None, quota=None): if not manager: self.manager = CephManager() else: self.manager = manager if not storage_api: self.storage_api = StorageAPI() else: self.storage_api = storage_api if not vm_api: self.vm_api = VmAPI() else: self.vm_api = vm_api if not group_api: self.group_api = GroupAPI() else: self.group_api = group_api if not quota: self.quota = CephQuota() else: self.quota = quota super().__init__()
def op(args): '''虚拟机操作''' api = VmAPI() vm = api.get_vm_by_uuid(args['uuid']) if not vm: return {'res': False, 'err': ERR_VM_UUID} if not vm.can_operate_by(args['req_user']): return {'res': False, 'err': ERR_AUTH_PERM} if args['op'] == 'delete': res = api.delete_vm(args['uuid']) elif args['op'] == 'reset': res = api.reset_vm(args['uuid']) else: #虚拟机操作类型。 key,操作代码; value,VM对象中对应处理函数名称 op_list = { 'start': 'start', 'reboot': 'reboot', 'shutdown': 'shutdown', 'poweroff': 'poweroff'} if args['op'] in op_list: try: res = getattr(vm, op_list[args['op']]).__call__() except Error as e: return {'res': False, 'err': e.err} except Exception as e: raise e return {'res': False, 'err': ERR_VM_OP} else: return {'res': False, 'err': ERR_VM_NO_OP} if res: return {'res':res} return {'res': res, 'err': ERR_VM_OP}
def status(args): '''获取虚拟机状态''' api = VmAPI() vm = api.get_vm_by_uuid(args['uuid']) if not vm: return {'res': False, 'err': ERR_VM_UUID} if not vm.managed_by(args['req_user']): return {'res': False, 'err': ERR_AUTH_PERM} return {'res': True, 'status':vm.status}
def status(args): '''获取虚拟机状态''' api = VmAPI() vm = api.get_vm_by_uuid(args['uuid']) if not vm: return {'res': False, 'err': ERR_VM_UUID} if not vm.managed_by(args['req_user']): return {'res': False, 'err': ERR_AUTH_PERM} return {'res': True, 'status': vm.status}
def gpu_umount(args): api = GPUAPI() vm_api = VmAPI() gpu = api.get_gpu_by_id(args['gpu_id']) vm = vm_api.get_vm_by_uuid(gpu.vm) if not vm.can_operate_by(args['req_user']): return {'res': False, 'err': ERR_AUTH_PERM} if vm.is_running(): return {'res': False, 'err': ERR_UMOUNT_RUNNING} res = api.umount(args['gpu_id']) if res: return {'res': True} return {'res': False, 'err': ERR_GPU_UMOUNT}
def rollback_snap(args): #回滚快照 api = VmAPI() vm = api.get_vm_by_uuid(args['uuid']) if not vm: return {'res': False, 'err': ERR_VM_UUID} if not vm.can_operate_by(args['req_user']): return {'res': False, 'err': ERR_AUTH_PERM} res = api.rollback_vm_disk_snap(args['uuid'], args['snap_id']) if res: return {'res': True} return {'res': False, 'err': ERR_VM_ROLLBACK_SNAP}
def reset(args): #镜像重置 api = VmAPI() vm = api.get_vm_by_uuid(args['uuid']) if not vm: return {'res': False, 'err': ERR_VM_UUID} if not vm.can_operate_by(args['req_user']): return {'res': False, 'err': ERR_AUTH_PERM} res = api.reset_vm(args['uuid'], args['image_id']) if res: return {'res': True} return {'res': False, 'err': ERR_VM_RESET}
def set_snap_remarks(args): #设置快照备注 api = VmAPI() vm = api.get_vm_by_uuid(args['uuid']) if not vm: return {'res': False, 'err': ERR_VM_UUID} if not vm.can_operate_by(args['req_user']): return {'res': False, 'err': ERR_AUTH_PERM} res = api.set_vm_disk_snap_remarks(args['uuid'], args['snap_id'], args['remarks']) if res: return {'res': True} return {'res': False, 'err': ERR_VM_EDIT_REMARKS}
def create_snap(args): #创建快照 api = VmAPI() vm = api.get_vm_by_uuid(args['uuid']) if not vm: return {'res': False, 'err': ERR_VM_UUID} if not vm.can_operate_by(args['req_user']): return {'res': False, 'err': ERR_AUTH_PERM} remarks = '' if "remarks" in args: remarks = args['remarks'] res = api.create_vm_disk_snap(args['uuid'], remarks=remarks) if res: return {'res': True} return {'res': False, 'err': ERR_VM_CREATE_SNAP}
def gpu_mount(args): api = GPUAPI() vm_api = VmAPI() vm = vm_api.get_vm_by_uuid(args['vm_id']) if not vm.can_operate_by(args['req_user']): return {'res': False, 'err': ERR_AUTH_PERM} if vm.is_running(): return {'res': False, 'err': ERR_MOUNT_RUNNING} gpu = api.get_gpu_by_id(args['gpu_id']) if vm.group_id != gpu.group_id: return {'res': False, 'err': ERR_GPU_MOUNT} res = api.mount(args['vm_id'], args['gpu_id']) if res: return {'res': True} return {'res': False, 'err': ERR_GPU_MOUNT}
def mount(args): api = VolumeAPI() vm_api = VmAPI() vm = vm_api.get_vm_by_uuid(args['vm_uuid']) if not vm.can_operate_by(args['req_user']): return {'res': False, 'err': ERR_AUTH_PERM} if vm.is_running(): return {'res': False, 'err': ERR_MOUNT_RUNNING} volume = api.get_volume_by_id(args['volume_id']) if not volume.managed_by(args['req_user']): return {'res': False, 'err': ERR_AUTH_PERM} res = api.mount(args['vm_uuid'], args['volume_id']) if res: return {'res': True} return {'res': False, 'err': ERR_VOLUME_MOUNT}
def __init__(self, manager=None, vm_api=None, host_api=None, group_api=None): if manager: self.manager = manager else: self.manager = GPUManager() if vm_api: self.vm_api = vm_api else: self.vm_api = VmAPI() if host_api: self.host_api = host_api else: self.host_api = HostAPI() if group_api: self.group_api = group_api else: self.group_api = GroupAPI()
def mount(args): api = CephVolumeAPI() vm_api= VmAPI() vm = vm_api.get_vm_by_uuid(args['vm_uuid']) if not vm.can_operate_by(args['req_user']): return {'res': False, 'err': ERR_AUTH_PERM} if vm.is_running(): return {'res': False, 'err': ERR_MOUNT_RUNNING} volume = api.get_volume_by_id(args['volume_id']) if not volume.managed_by(args['req_user']): return {'res': False, 'err': ERR_AUTH_PERM} res = api.mount(args['vm_uuid'], args['volume_id']) if res: return {'res': True} return {'res': False, 'err': ERR_VOLUME_MOUNT}
def get(args): '''获取虚拟机详细信息''' api = VmAPI() vm = api.get_vm_by_uuid(args['uuid']) if not vm: return {'res': False, 'err': ERR_VM_UUID} if not vm.managed_by(args['req_user']): return {'res': False, 'err': ERR_AUTH_PERM} try: create_time = vm.create_time.strftime('%Y-%m-%d %H:%M:%S') except: create_time = '' info = { 'uuid': vm.uuid, 'name': vm.name, 'vcpu': vm.vcpu , 'mem': vm.mem , 'creator': vm.creator, 'create_time': create_time, 'remarks': vm.remarks, 'deleted': vm.deleted, 'image_id': vm.image_id, 'image_snap': vm.image_snap, 'image': vm.image, 'host_id': vm.host_id, 'host_ipv4': vm.host_ipv4, 'group_id': vm.group_id, 'group_name': vm.group_name, 'center_id': vm.center_id, 'center_name': vm.center_name, 'vlan_id': vm.vlan_id, 'vlan_name': vm.vlan_name, 'mac': vm.mac, 'ipv4': vm.ipv4, 'ceph_id': vm.ceph_id, 'ceph_host': vm.ceph_host, 'ceph_pool': vm.ceph_pool } return {'res': True, 'info': info}
def setUp(self): self.vmapi = VmAPI() self.gpuapi = GPUAPI() self.c1 = create_center('测试中心1', '位置1', '备注1') self.c2 = create_center('测试中心2', '位置2', '备注2') self.g1 = create_group(self.c1, '测试集群1', '备注11') self.g2 = create_group(self.c1, '测试集群2', '备注') self.h1 = create_host(self.g1, '1.1.1.1') self.h2 = create_host(self.g2, '1.1.1.2') self.vt1 = create_vlantype('vlantype1') self.v1 = create_vlan(str(TEST_VLAN), str(TEST_BR), self.vt1) self.ip1 = create_ip(self.v1, TEST_MAC, TEST_IP) self.h1 = create_host(self.g1, str(TEST_HOST), True, [self.v1]) self.ch1 = create_ceph_host(self.c1, str(TEST_CEPH['host']), TEST_CEPH['port'], str(TEST_CEPH['uuid'])) self.cp1 = create_ceph_image_pool(self.ch1, TEST_CEPH['pool']) self.it1 = create_imagetype('imagetype1') self.x1 = create_xml('linux', TEST_XML) self.i1 = create_image(self.cp1, self.x1, self.it1, 'image1', 'v0.1', TEST_IMAGE) self.vcpu1 = 2 self.mem1 = 2048 self.vm1 = self.vmapi.create_vm(self.i1.id, self.vcpu1, self.mem1, host_id=self.h1.id, vlan_id=self.v1.id) self.gpu1 = create_gpu(self.h1, '0000:84:00:0')
def migrate(args): #被迁移虚拟机校验 api = VmAPI() host_api = HostAPI() gpu_api = GPUAPI() volume_api = VolumeAPI() vm = api.get_vm_by_uuid(args['uuid']) if not vm: return {'res': False, 'err': ERR_VM_UUID} if not vm.can_operate_by(args['req_user']): return {'res': False, 'err': ERR_AUTH_PERM} #目标主机校验 host = host_api.get_host_by_id(args['host_id']) if not host.managed_by(args['req_user']): return {'res': False, 'err': ERR_AUTH_PERM} #被迁移虚拟机与目标主机是否处于同一个分中心 if not vm.center_id == host.center_id: return {'res': False, 'err': ERR_VM_MIGRATE_DIFF_CEPH} #检测目标主机是否为当前宿主机 if vm.host_id == host.id: return {'res': False, 'err': ERR_VM_MIGRATE_SAME_HOST} #检测是否挂载GPU gpu_list = gpu_api.get_gpu_list_by_vm_uuid(args['uuid']) if len(gpu_list) > 0: return {'res': False, 'err': ERR_VM_MIGRATE_WITHGPU} #检测挂载云硬盘与目标主机是否在同一集群 volume_list = volume_api.get_volume_list_by_vm_uuid(args['uuid']) if len(volume_list) > 0 and vm.group_id != host.group_id: return {'res': False, 'err': ERR_VM_MIGRATE_WITHVOL} res = api.migrate_vm(args['uuid'], args['host_id']) if res: return {'res': True} return {'res': False, 'err': ERR_VM_MIGRATE}
def __init__(self, manager=None, vm_api=None, host_api=None): if manager: self.manager = manager else: self.manager = GPUManager() if vm_api: self.vm_api = vm_api else: self.vm_api = VmAPI() if host_api: self.host_api = host_api else: self.host_api = HostAPI()
def get_snap_list(args): #虚拟机快照列表 ret_list = [] api = VmAPI() vm = api.get_vm_by_uuid(args['uuid']) if not vm: return {'res': False, 'err': ERR_VM_UUID} if not vm.can_operate_by(args['req_user']): return {'res': False, 'err': ERR_AUTH_PERM} res_list = api.get_vm_disk_snap_list(args['uuid']) for snap in res_list: ret_list.append({ 'id': snap.id, 'fullname': snap.fullname, 'cephpool_id': snap.cephpool_id, 'disk': snap.disk, 'snap': snap.snap, 'create_time': snap.create_time, 'remarks': snap.remarks }) return {'res': True, 'list': ret_list}
def __init__(self, manager=None, storage_api=None, vm_api=None, group_api=None, quota=None): if not manager: self.manager = CephManager() else: self.manager = manager if not storage_api: self.storage_api = CephStorageAPI() else: self.storage_api = storage_api if not vm_api: self.vm_api = VmAPI() else: self.vm_api = vm_api if not group_api: self.group_api = GroupAPI() else: self.group_api = group_api if not quota: self.quota = CephQuota() else: self.quota = quota super().__init__()
class VolumeAPI(object): def __init__(self, manager=None, storage_api=None, vm_api=None, group_api=None, quota=None): if not manager: self.manager = CephManager() else: self.manager = manager if not storage_api: self.storage_api = StorageAPI() else: self.storage_api = storage_api if not vm_api: self.vm_api = VmAPI() else: self.vm_api = vm_api if not group_api: self.group_api = GroupAPI() else: self.group_api = group_api if not quota: self.quota = CephQuota() else: self.quota = quota super().__init__() def create(self, pool_id, size, group_id=None): cephpool = self.storage_api.get_pool_by_id(pool_id) if not cephpool: raise Error(ERR_VOLUME_CREATE_NOPOOL) if type(size) != int or size <= 0: raise Error(ERR_INT_VOLUME_SIZE) return self.manager.create_volume(cephpool, size, group_id) def delete(self, volume_id, force=False): volume = self.get_volume_by_id(volume_id) if volume.vm: raise Error(ERR_DEL_MOUNTED_VOLUME) cephpool_id = volume.cephpool_id tmp_volume_name = 'x_{}_{}'.format( datetime.now().strftime("%Y%m%d%H%M%S"), volume_id) if self.storage_api.mv(cephpool_id, volume_id, tmp_volume_name): if not volume.delete(): self.storage_api.mv(cephpool_id, tmp_volume_name, volume_id) return False if force == True: if not self.storage_api.rm(cephpool_id, tmp_volume_name): print(ERR_CEPH_RM) else: raise Error(ERR_CEPH_MV) return True def resize(self, volume_id, size): volume = self.get_volume_by_id(volume_id) #if not self.quota.group_quota_validate(volume.group_id, size, volume.size): if not self.quota.group_pool_quota_validate( volume.group_id, volume.cephpool_id, size, volume.size): raise Error(ERR_VOLUME_QUOTA_G) # if not self.quota.volume_quota_validate(volume.group_id, size): if not self.quota.volume_pool_quota_validate(volume.group_id, volume.cephpool_id, size): raise Error(ERR_VOLUME_QUOTA_V) if self.storage_api.resize(volume.cephpool_id, volume.id, size): return volume.resize(size) else: raise Error(ERR_CEPH_RESIZE) def _get_disk_xml(self, volume, dev): cephpool = self.storage_api.get_pool_by_id(volume.cephpool_id) xml = volume.xml_tpl % { 'driver': 'qemu', 'auth_user': cephpool.username, 'auth_type': 'ceph', 'auth_uuid': cephpool.uuid, 'source_protocol': 'rbd', 'pool': cephpool.pool, 'name': volume.id, 'host': cephpool.host, 'port': cephpool.port, 'dev': dev } return xml def mount(self, vm_uuid, volume_id): volume = self.get_volume_by_id(volume_id) vm = self.vm_api.get_vm_by_uuid(vm_uuid) if vm.group_id != volume.group_id: return False disk_list = vm.get_disk_list() mounted_volume_list = self.get_volume_list(vm_uuid=vm_uuid) for v in mounted_volume_list: disk_list.append(v.dev) print(disk_list) i = 0 while True: j = i d = '' while True: d += chr(ord('a') + j % 26) j //= 26 if j <= 0: break if not 'vd' + d in disk_list: dev = 'vd' + d break i += 1 xml = self._get_disk_xml(volume, dev) print(xml) if volume.mount(vm_uuid, dev): try: if self.vm_api.attach_device(vm_uuid, xml): return True except Error as e: volume.umount() raise e return False def umount(self, volume_id): volume = self.get_volume_by_id(volume_id) vm_uuid = volume.vm if self.vm_api.vm_uuid_exists(vm_uuid): vm = self.vm_api.get_vm_by_uuid(vm_uuid) disk_list = vm.get_disk_list() if not volume.dev in disk_list: if volume.umount(): return True return False xml = self._get_disk_xml(volume, volume.dev) if self.vm_api.detach_device(vm_uuid, xml): try: if volume.umount(): return True except Error as e: self.vm_api.attach_device(vm_uuid, xml) raise e else: if volume.umount(): return True return False def set_remark(self, volume_id, content): volume = self.get_volume_by_id(volume_id) return volume.set_remark(content) def get_volume_list_by_pool_id(self, cephpool_id): cephpool = self.storage_api.get_pool_by_id(cephpool_id) return self.manager.get_volume_list(cephpool_id=cephpool_id) def get_volume_list_by_user_id(self, user_id): return self.manager.get_volume_list(user_id=user_id) def get_volume_list_by_group_id(self, group_id): return self.manager.get_volume_list(group_id=group_id) def get_volume_list_by_vm_uuid(self, vm_uuid): return self.manager.get_volume_list(vm_uuid=vm_uuid) def get_volume_list(self, user_id=None, creator=None, cephpool_id=None, group_id=None, vm_uuid=None): return self.manager.get_volume_list(user_id=user_id, creator=creator, cephpool_id=cephpool_id, group_id=group_id, vm_uuid=vm_uuid) def get_volume_by_id(self, volume_id): return self.manager.get_volume_by_id(volume_id) def set_user_id(self, volume_id, user_id): volume = self.get_volume_by_id(volume_id) return volume.set_user_id(user_id) def set_group_id(self, volume_id, group_id): volume = self.get_volume_by_id(volume_id) return volume.set_group_id(group_id)
class GPUAPI(object): def __init__(self, manager=None, vm_api=None, host_api=None, group_api=None): if manager: self.manager = manager else: self.manager = GPUManager() if vm_api: self.vm_api = vm_api else: self.vm_api = VmAPI() if host_api: self.host_api = host_api else: self.host_api = HostAPI() if group_api: self.group_api = group_api else: self.group_api = GroupAPI() def get_gpu_list_by_host_id(self, host_id): host = self.host_api.get_host_by_id(host_id) return self.manager.get_gpu_list(host_id = host_id) def get_gpu_list_by_group_id(self, group_id): group = self.group_api.get_group_by_id(group_id) return self.manager.get_gpu_list(group_id=group_id) def get_gpu_by_id(self, gpu_id): return self.manager.get_gpu_by_id(gpu_id) def get_gpu_by_address(self, address): return self.manager.get_gpu_by_address(address) def get_gpu_list_by_vm_uuid(self, vm_uuid): return self.manager.get_gpu_list(vm_uuid=vm_uuid) def set_remarks(self, gpu_id, content): gpu = self.manager.get_gpu_by_id(gpu_id) return gpu.set_remarks(content) def mount(self, vm_id, gpu_id): gpu = self.manager.get_gpu_by_id(gpu_id) vm = self.vm_api.get_vm_by_uuid(vm_id) if vm.host_id != gpu.host_id: return False if gpu.mount(vm_id): if self.vm_api.attach_device(vm_id, gpu.xml_desc): return True gpu.umount() return False def umount(self, gpu_id): gpu = self.manager.get_gpu_by_id(gpu_id) if self.vm_api.vm_uuid_exists(gpu.vm): vm = self.vm_api.get_vm_by_uuid(gpu.vm) if vm and vm.host_id != gpu.host_id: return False if self.vm_api.detach_device(vm.uuid, gpu.xml_desc): if gpu.umount(): return True self.vm_api.attach_device(vm.uuid, gpu.xml_desc) else: if gpu.umount(): return True return False
class CephVolumeAPI(object): def __init__(self, manager=None, storage_api=None, vm_api=None, group_api=None, quota=None): if not manager: self.manager = CephManager() else: self.manager = manager if not storage_api: self.storage_api = CephStorageAPI() else: self.storage_api = storage_api if not vm_api: self.vm_api = VmAPI() else: self.vm_api = vm_api if not group_api: self.group_api = GroupAPI() else: self.group_api = group_api if not quota: self.quota = CephQuota() else: self.quota = quota super().__init__() def create(self, group_id, size): group = self.group_api.get_group_by_id(group_id) if not self.quota.group_quota_validate(group_id, size): raise Error(ERR_VOLUME_QUOTA_G) if not self.quota.volume_quota_validate(group_id, size): raise Error(ERR_VOLUME_QUOTA_V) cephpool = self.storage_api.get_volume_pool_by_center_id(group.center_id) if not cephpool: raise Error(ERR_VOLUME_CREATE_NOPOOL) if type(size) != int or size <= 0: raise Error(ERR_INT_VOLUME_SIZE) return self.manager.create_volume(cephpool, size, group_id) def delete(self, volume_id, force=False): volume = self.get_volume_by_id(volume_id) if volume.vm: raise Error(ERR_DEL_MOUNTED_VOLUME) cephpool_id = volume.cephpool_id tmp_volume_name = 'x_' + volume_id if self.storage_api.mv(cephpool_id, volume_id, tmp_volume_name): if not volume.delete(): self.storage_api.mv(cephpool_id, tmp_volume_name, volume_id) return False if force == True: if not self.storage_api.rm(cephpool_id, tmp_volume_name): print(ERR_CEPH_RM) else: raise Error(ERR_CEPH_MV) return True def resize(self, volume_id, size): volume = self.get_volume_by_id(volume_id) if not self.quota.group_quota_validate(volume.group_id, size, volume.size): raise Error(ERR_VOLUME_QUOTA_G) if not self.quota.volume_quota_validate(volume.group_id, size): raise Error(ERR_VOLUME_QUOTA_V) if self.storage_api.resize(volume.cephpool_id, volume.id, size): return volume.resize(size) else: raise Error(ERR_CEPH_RESIZE) def _get_disk_xml(self, volume, dev): cephpool = self.storage_api.get_pool_by_id(volume.cephpool_id) xml = volume.xml_tpl % { 'type': 'network', 'device': 'disk', 'driver': 'qemu', 'auth_user': cephpool.username, 'auth_type': 'ceph', 'auth_uuid': cephpool.uuid, 'source_protocol': 'rbd', 'pool': cephpool.pool, 'name': volume.id, 'host': cephpool.host, 'port': cephpool.port, 'dev': dev } return xml def mount(self, vm_uuid, volume_id): volume = self.get_volume_by_id(volume_id) vm = self.vm_api.get_vm_by_uuid(vm_uuid) if vm.group_id != volume.group_id: return False disk_list = vm.get_disk_list() mounted_volume_list = self.get_volume_list(vm_uuid=vm_uuid) for v in mounted_volume_list: disk_list.append(v.dev) print(disk_list) i = 0 while True: j = i d = '' while True: d += chr(ord('a') + j % 26) j //= 26 if j <= 0: break if not 'vd' + d in disk_list: dev = 'vd' + d break i+=1 xml = self._get_disk_xml(volume, dev) print(xml) if volume.mount(vm_uuid, dev): try: if self.vm_api.attach_device(vm_uuid, xml): return True except Error as e: volume.umount() raise e return False def umount(self, volume_id): volume = self.get_volume_by_id(volume_id) vm_uuid = volume.vm if self.vm_api.vm_uuid_exists(vm_uuid): vm = self.vm_api.get_vm_by_uuid(vm_uuid) disk_list = vm.get_disk_list() if not volume.dev in disk_list: if volume.umount(): return True return False xml = self._get_disk_xml(volume, volume.dev) if self.vm_api.detach_device(vm_uuid, xml): try: if volume.umount(): return True except Error as e: self.vm_api.attach_device(vm_uuid, xml) raise e else: if volume.umount(): return True return False def set_remark(self, volume_id, content): volume = self.get_volume_by_id(volume_id) return volume.set_remark(content) def get_volume_list_by_pool_id(self, cephpool_id): cephpool = self.storage_api.get_pool_by_id(cephpool_id) return self.manager.get_volume_list(cephpool_id=cephpool_id) def get_volume_list_by_user_id(self, user_id): return self.manager.get_volume_list(user_id=user_id) def get_volume_list_by_group_id(self, group_id): return self.manager.get_volume_list(group_id=group_id) def get_volume_list_by_vm_uuid(self, vm_uuid): return self.manager.get_volume_list(vm_uuid=vm_uuid) def get_volume_list(self, user_id=None, creator=None, cephpool_id=None, group_id=None, vm_uuid=None): return self.manager.get_volume_list(user_id=user_id, creator=creator, cephpool_id=cephpool_id, group_id=group_id, vm_uuid=vm_uuid) def get_volume_by_id(self, volume_id): return self.manager.get_volume_by_id(volume_id) def set_user_id(self, volume_id, user_id): volume = self.get_volume_by_id(volume_id) return volume.set_user_id(user_id) def set_group_id(self, volume_id, group_id): volume = self.get_volume_by_id(volume_id) return volume.set_group_id(group_id)
class DeviceTest(TestCase): def setUp(self): self.vmapi = VmAPI() self.gpuapi = GPUAPI() self.c1 = create_center('测试中心1', '位置1', '备注1') self.c2 = create_center('测试中心2', '位置2', '备注2') self.g1 = create_group(self.c1, '测试集群1', '备注11') self.g2 = create_group(self.c1, '测试集群2', '备注') self.h1 = create_host(self.g1, '1.1.1.1') self.h2 = create_host(self.g2, '1.1.1.2') self.vt1 = create_vlantype('vlantype1') self.v1 = create_vlan(str(TEST_VLAN), str(TEST_BR), self.vt1) self.ip1 = create_ip(self.v1, TEST_MAC, TEST_IP) self.h1 = create_host(self.g1, str(TEST_HOST), True, [self.v1]) self.ch1 = create_ceph_host(self.c1, str(TEST_CEPH['host']), TEST_CEPH['port'], str(TEST_CEPH['uuid'])) self.cp1 = create_ceph_image_pool(self.ch1, TEST_CEPH['pool']) self.it1 = create_imagetype('imagetype1') self.x1 = create_xml('linux', TEST_XML) self.i1 = create_image(self.cp1, self.x1, self.it1, 'image1', 'v0.1', TEST_IMAGE) self.vcpu1 = 2 self.mem1 = 2048 self.vm1 = self.vmapi.create_vm(self.i1.id, self.vcpu1, self.mem1, host_id=self.h1.id, vlan_id=self.v1.id) self.gpu1 = create_gpu(self.h1, '0000:84:00:0') def tearDown(self): cmd = 'ssh %s virsh destroy %s' % (self.h1.ipv4, self.vm1.uuid) r, info = subprocess.getstatusoutput(cmd) cmd = 'ssh %s virsh undefine %s' % (self.h1.ipv4, self.vm1.uuid) r, info = subprocess.getstatusoutput(cmd) cmd1 = 'ssh %s rbd rm %s/%s' % (self.cp1.host.host, self.cp1.pool, self.vm1.uuid) r1, info1 = subprocess.getstatusoutput(cmd1) def test_set_remarks(self): remarks = 'testseresgfsts' self.assertTrue(self.gpuapi.set_remarks(self.gpu1.id, remarks)) db = DBGPU.objects.get(id=self.gpu1.id) self.assertEqual(db.remarks, remarks) def test_mount(self): mounted = self.gpuapi.mount(self.vm1.uuid, self.gpu1.id) self.assertTrue(mounted) db = DBGPU.objects.get(id=self.gpu1.id) self.assertEqual(db.vm, self.vm1.uuid) if mounted: self.assertTrue(self.gpuapi.umount(self.gpu1.id)) db = DBGPU.objects.get(id=self.gpu1.id) self.assertEqual(db.vm, None)
from compute.api import CenterAPI, GroupAPI, VmAPI, HostAPI from compute.vm.vm import VIR_DOMAIN_HOST_DOWN from device.api import GPUAPI from volume.api import VolumeAPI from api.error import Error from api.error import ERROR_CN from .api import MonitoringAPI api = MonitoringAPI() center_api = CenterAPI() group_api = GroupAPI() vm_api = VmAPI() host_api = HostAPI() gpuapi = GPUAPI() volumeapi = VolumeAPI() def run_ha_monitoring(): """ 虚拟机高可用定时监控 lzx: 2018-09-25 """ global center_api, group_api, vm_api, gpuapi, volumeapi group_list = group_api.get_group_list() vm_list = []
class GPUAPI(object): def __init__(self, manager=None, vm_api=None, host_api=None): if manager: self.manager = manager else: self.manager = GPUManager() if vm_api: self.vm_api = vm_api else: self.vm_api = VmAPI() if host_api: self.host_api = host_api else: self.host_api = HostAPI() def get_gpu_list_by_host_id(self, host_id): host = self.host_api.get_host_by_id(host_id) db_list = DBGPU.objects.filter(host_id = host_id) ret_list = [] for db in db_list: ret_list.append(GPU(db=db)) return ret_list def get_gpu_list_by_group_id(self, group_id): db_list = DBGPU.objects.filter(host__group_id = group_id) ret_list = [] for db in db_list: ret_list.append(GPU(db=db)) return ret_list def get_gpu_by_id(self, gpu_id): db = DBGPU.objects.filter(pk = gpu_id) if not db.exists(): raise Error(ERR_GPU_ID) return GPU(db=db[0]) def get_gpu_by_address(self, address): db = DBGPU.objects.filter(address = address) if not db.exists(): raise Error(ERR_GPU_ADDRESS) return GPU(db=db[0]) def get_gpu_list_by_vm_uuid(self, vm_uuid): db_list = DBGPU.objects.filter(vm = vm_uuid) ret_list = [] for db in db_list: ret_list.append(GPU(db=db)) return ret_list def set_remarks(self, gpu_id, content): gpu = self.get_gpu_by_id(gpu_id) return gpu.set_remarks(content) def mount(self, vm_id, gpu_id): gpu = self.get_gpu_by_id(gpu_id) vm = self.vm_api.get_vm_by_uuid(vm_id) if vm.host_id != gpu.host_id: return False if gpu.mount(vm_id): if self.vm_api.attach_device(vm_id, gpu.xml_desc): return True gpu.umount() return False def umount(self, gpu_id): gpu = self.get_gpu_by_id(gpu_id) if self.vm_api.vm_uuid_exists(gpu.vm): vm = self.vm_api.get_vm_by_uuid(gpu.vm) if vm and vm.host_id != gpu.host_id: return False if self.vm_api.detach_device(vm.uuid, gpu.xml_desc): if gpu.umount(): return True self.vm_api.attach_device(vm.uuid, gpu.xml_desc) else: if gpu.umount(): return True return False
class DeviceTest(TestCase): def setUp(self): self.vmapi = VmAPI() self.gpuapi = GPUAPI() self.c1 = create_center('测试中心1', '位置1', '备注1') self.c2 = create_center('测试中心2', '位置2', '备注2') self.g1 = create_group(self.c1, '测试集群1', '备注11') self.g2 = create_group(self.c1, '测试集群2', '备注') self.h1 = create_host(self.g1, '1.1.1.1') self.h2 = create_host(self.g2, '1.1.1.2') self.vt1 = create_vlantype('vlantype1') self.v1 = create_vlan(str(TEST_VLAN), str(TEST_BR), self.vt1) self.ip1 = create_ip(self.v1, TEST_MAC, TEST_IP) self.h1 = create_host(self.g1, str(TEST_HOST), True, [self.v1]) self.ch1 = create_ceph_host(self.c1, str(TEST_CEPH['host']), TEST_CEPH['port'], str(TEST_CEPH['uuid'])) self.cp1 = create_ceph_image_pool(self.ch1, TEST_CEPH['pool']) self.it1 = create_imagetype('imagetype1') self.x1 = create_xml('linux', TEST_XML) self.i1 = create_image(self.cp1, self.x1, self.it1, 'image1', 'v0.1', TEST_IMAGE) self.vcpu1 = 2 self.mem1 = 2048 self.vm1 = self.vmapi.create_vm(self.i1.id, self.vcpu1, self.mem1, host_id=self.h1.id, vlan_id=self.v1.id) self.gpu1 = create_gpu(self.h1, '0000:84:00:0') def tearDown(self): cmd = 'ssh %s virsh destroy %s' % (self.h1.ipv4, self.vm1.uuid) r, info = subprocess.getstatusoutput(cmd) cmd = 'ssh %s virsh undefine %s' % (self.h1.ipv4, self.vm1.uuid) r, info = subprocess.getstatusoutput(cmd) cmd1 = 'ssh %s rbd rm %s/%s' % (self.cp1.host.host, self.cp1.pool, self.vm1.uuid) r1, info1 = subprocess.getstatusoutput(cmd1) def test_set_remarks(self): remarks = 'testseresgfsts' self.assertTrue(self.gpuapi.set_remarks(self.gpu1.id, remarks)) db = DBGPU.objects.get(id = self.gpu1.id) self.assertEqual(db.remarks, remarks) def test_mount(self): mounted = self.gpuapi.mount(self.vm1.uuid, self.gpu1.id) self.assertTrue(mounted) db = DBGPU.objects.get(id = self.gpu1.id) self.assertEqual(db.vm, self.vm1.uuid) if mounted: self.assertTrue(self.gpuapi.umount(self.gpu1.id)) db = DBGPU.objects.get(id=self.gpu1.id) self.assertEqual(db.vm, None)