def test_vm_info(self): model.templates_create({'name': u'test', 'cdrom': fake_iso}) task = model.vms_create({ 'name': u'test-vm', 'template': '/plugins/kimchi/templates/test' }) wait_task(model.task_lookup, task['id']) vms = model.vms_get_list() self.assertEquals(2, len(vms)) self.assertIn(u'test-vm', vms) keys = set( ('name', 'state', 'stats', 'uuid', 'memory', 'cpus', 'screenshot', 'icon', 'graphics', 'users', 'groups', 'access', 'persistent')) stats_keys = set( ('cpu_utilization', 'mem_utilization', 'net_throughput', 'net_throughput_peak', 'io_throughput', 'io_throughput_peak')) info = model.vm_lookup(u'test-vm') self.assertEquals(keys, set(info.keys())) self.assertEquals('shutoff', info['state']) self.assertEquals('test-vm', info['name']) self.assertEquals(get_template_default('old', 'memory'), info['memory']) self.assertEquals(1, info['cpus']) self.assertEquals('plugins/kimchi/images/icon-vm.png', info['icon']) self.assertEquals(stats_keys, set(info['stats'].keys())) self.assertEquals('vnc', info['graphics']['type']) self.assertEquals('127.0.0.1', info['graphics']['listen'])
def test_vm_info(self): model.templates_create({'name': u'test', 'cdrom': fake_iso}) task = model.vms_create({'name': u'test-vm', 'template': '/plugins/kimchi/templates/test'}) wait_task(model.task_lookup, task['id']) vms = model.vms_get_list() self.assertEquals(2, len(vms)) self.assertIn(u'test-vm', vms) keys = set(('name', 'state', 'stats', 'uuid', 'memory', 'cpus', 'screenshot', 'icon', 'graphics', 'users', 'groups', 'access', 'persistent')) stats_keys = set(('cpu_utilization', 'net_throughput', 'net_throughput_peak', 'io_throughput', 'io_throughput_peak')) info = model.vm_lookup(u'test-vm') self.assertEquals(keys, set(info.keys())) self.assertEquals('shutoff', info['state']) self.assertEquals('test-vm', info['name']) self.assertEquals(get_template_default('old', 'memory'), info['memory']) self.assertEquals(1, info['cpus']) self.assertEquals('images/icon-vm.png', info['icon']) self.assertEquals(stats_keys, set(info['stats'].keys())) self.assertEquals('vnc', info['graphics']['type']) self.assertEquals('127.0.0.1', info['graphics']['listen'])
def add_vm(name): # Create a VM req = json.dumps({'name': name, 'template': '/plugins/kimchi/templates/test'}) task = json.loads(request(host, ssl_port, '/plugins/kimchi/vms', req, 'POST').read()) wait_task(model.task_lookup, task['id'])
def test_get_vms(self): vms = json.loads(self.request('/vms').read()) # test_rest.py uses MockModel() which connects to libvirt URI # test:///default. By default this driver already has one VM created self.assertEquals(1, len(vms)) # Create a template as a base for our VMs req = json.dumps({'name': 'test', 'cdrom': fake_iso}) resp = self.request('/templates', req, 'POST') self.assertEquals(201, resp.status) test_users = ['root'] test_groups = ['wheel'] # Now add a couple of VMs to the mock model for i in xrange(10): name = 'vm-%i' % i req = json.dumps({'name': name, 'template': '/templates/test', 'users': test_users, 'groups': test_groups}) resp = self.request('/vms', req, 'POST') self.assertEquals(202, resp.status) task = json.loads(resp.read()) wait_task(self._task_lookup, task['id']) vms = json.loads(self.request('/vms').read()) self.assertEquals(11, len(vms)) vm = json.loads(self.request('/vms/vm-1').read()) self.assertEquals('vm-1', vm['name']) self.assertEquals('shutoff', vm['state']) self.assertEquals([], vm['users']) self.assertEquals([], vm['groups'])
def test_screenshot_refresh(self): # Create a VM req = json.dumps({'name': 'test', 'cdrom': fake_iso}) request(host, ssl_port, '/plugins/kimchi/templates', req, 'POST') req = json.dumps({'name': 'test-vm', 'template': '/templates/test'}) resp = request(host, ssl_port, '/plugins/kimchi/vms', req, 'POST') task = json.loads(resp.read()) wait_task(model.task_lookup, task['id']) # Test screenshot refresh for running vm request(host, ssl_port, '/plugins/kimchi/vms/test-vm/start', '{}', 'POST') resp = request(host, ssl_port, '/plugins/kimchi/vms/test-vm/screenshot') self.assertEquals(200, resp.status) self.assertEquals('image/png', resp.getheader('content-type')) resp1 = request(host, ssl_port, '/plugins/kimchi/vms/test-vm') rspBody = resp1.read() testvm_Data = json.loads(rspBody) screenshotURL = testvm_Data['screenshot'] time.sleep(5) resp2 = request(host, ssl_port, screenshotURL) self.assertEquals(200, resp2.status) self.assertEquals(resp2.getheader('content-type'), resp.getheader('content-type')) self.assertEquals(resp2.getheader('content-length'), resp.getheader('content-length')) self.assertEquals(resp2.getheader('last-modified'), resp.getheader('last-modified'))
def test_screenshot_refresh(self): # Create a VM req = json.dumps({'name': 'test', 'cdrom': fake_iso}) request(host, ssl_port, '/plugins/kimchi/templates', req, 'POST') req = json.dumps({ 'name': 'test-vm', 'template': '/plugins/kimchi/templates/test' }) resp = request(host, ssl_port, '/plugins/kimchi/vms', req, 'POST') task = json.loads(resp.read()) wait_task(model.task_lookup, task['id']) # Test screenshot refresh for running vm request(host, ssl_port, '/plugins/kimchi/vms/test-vm/start', '{}', 'POST') resp = request(host, ssl_port, '/plugins/kimchi/vms/test-vm/screenshot') self.assertEquals(200, resp.status) self.assertEquals('image/png', resp.getheader('content-type')) resp1 = request(host, ssl_port, '/plugins/kimchi/vms/test-vm') rspBody = resp1.read() testvm_Data = json.loads(rspBody) screenshotURL = '/' + testvm_Data['screenshot'] time.sleep(5) resp2 = request(host, ssl_port, screenshotURL) self.assertEquals(200, resp2.status) self.assertEquals(resp2.getheader('content-type'), resp.getheader('content-type')) self.assertEquals(resp2.getheader('content-length'), resp.getheader('content-length')) self.assertEquals(resp2.getheader('last-modified'), resp.getheader('last-modified'))
def test_scsi_fc_storage(self): # Create scsi fc pool req = json.dumps({ 'name': 'scsi_fc_pool', 'type': 'scsi', 'source': { 'adapter_name': 'scsi_host2' } }) resp = self.request('/storagepools', req, 'POST') self.assertEquals(201, resp.status) # Test create vms using lun of this pool # activate the storage pool resp = self.request('/storagepools/scsi_fc_pool/activate', '{}', 'POST') # Create template fails because SCSI volume is missing tmpl_params = { 'name': 'test_fc_pool', 'cdrom': fake_iso, 'storagepool': '/storagepools/scsi_fc_pool' } req = json.dumps(tmpl_params) resp = self.request('/templates', req, 'POST') self.assertEquals(400, resp.status) # Choose SCSI volume to create template resp = self.request('/storagepools/scsi_fc_pool/storagevolumes') lun_name = json.loads(resp.read())[0]['name'] tmpl_params['disks'] = [{'index': 0, 'volume': lun_name}] req = json.dumps(tmpl_params) resp = self.request('/templates', req, 'POST') self.assertEquals(201, resp.status) # Create vm in scsi pool req = json.dumps({ 'name': 'test-vm', 'template': '/templates/test_fc_pool' }) resp = self.request('/vms', req, 'POST') self.assertEquals(202, resp.status) task = json.loads(resp.read()) wait_task(self._task_lookup, task['id']) # Start the VM resp = self.request('/vms/test-vm/start', '{}', 'POST') vm = json.loads(self.request('/vms/test-vm').read()) self.assertEquals('running', vm['state']) # Force poweroff the VM resp = self.request('/vms/test-vm/poweroff', '{}', 'POST') vm = json.loads(self.request('/vms/test-vm').read()) self.assertEquals('shutoff', vm['state']) # Delete the VM resp = self.request('/vms/test-vm', '{}', 'DELETE') self.assertEquals(204, resp.status)
def test_vm_livemigrate_persistent_API(self): patch_auth() inst = model.Model(libvirt_uri='qemu:///system', objstore_loc=self.tmp_store) host = '127.0.0.1' port = get_free_port('http') ssl_port = get_free_port('https') cherrypy_port = get_free_port('cherrypy_port') with RollbackContext() as rollback: test_server = run_server(host, port, ssl_port, test_mode=True, cherrypy_port=cherrypy_port, model=inst) rollback.prependDefer(test_server.stop) self.request = partial(request, host, ssl_port) self.create_vm_test() rollback.prependDefer(utils.rollback_wrapper, self.inst.vm_delete, u'test_vm_migrate') # removing cdrom because it is not shared storage and will make # the migration fail dev_list = self.inst.vmstorages_get_list('test_vm_migrate') self.inst.vmstorage_delete('test_vm_migrate', dev_list[0]) try: self.inst.vm_start('test_vm_migrate') except Exception, e: self.fail('Failed to start the vm, reason: %s' % e.message) migrate_url = "/plugins/kimchi/vms/%s/migrate" % 'test_vm_migrate' req = json.dumps({'remote_host': KIMCHI_LIVE_MIGRATION_TEST, 'user': '******'}) resp = self.request(migrate_url, req, 'POST') self.assertEquals(202, resp.status) task = json.loads(resp.read()) wait_task(self._task_lookup, task['id']) task = json.loads( self.request( '/plugins/kimchi/tasks/%s' % task['id'], '{}' ).read() ) self.assertEquals('finished', task['status']) try: remote_conn = self.get_remote_conn() rollback.prependDefer(remote_conn.close) remote_vm = remote_conn.lookupByName('test_vm_migrate') self.assertTrue(remote_vm.isPersistent()) remote_vm.destroy() remote_vm.undefine() except Exception, e: self.fail('Migration test failed: %s' % e.message)
def test_iso_scan_shallow(self): # fake environment preparation self._create_pool('pool-3') self.request('/storagepools/pool-3/activate', '{}', 'POST') params = {'name': 'fedora.iso', 'capacity': 1073741824, # 1 GiB 'type': 'file', 'format': 'iso'} task_info = model.storagevolumes_create('pool-3', params) wait_task(self._task_lookup, task_info['id']) storagevolume = json.loads(self.request( '/storagepools/kimchi_isos/storagevolumes/').read())[0] self.assertEquals('fedora.iso', storagevolume['name']) self.assertEquals('iso', storagevolume['format']) self.assertEquals('/var/lib/libvirt/images/fedora.iso', storagevolume['path']) self.assertEquals(1073741824, storagevolume['capacity']) # 1 GiB self.assertEquals(0, storagevolume['allocation']) self.assertEquals('17', storagevolume['os_version']) self.assertEquals('fedora', storagevolume['os_distro']) self.assertEquals(True, storagevolume['bootable']) # Create a template # In real model os distro/version can be omitted # as we will scan the iso req = json.dumps({'name': 'test', 'cdrom': storagevolume['path'], 'os_distro': storagevolume['os_distro'], 'os_version': storagevolume['os_version']}) resp = self.request('/templates', req, 'POST') self.assertEquals(201, resp.status) # Verify the template t = json.loads(self.request('/templates/test').read()) self.assertEquals('test', t['name']) self.assertEquals('fedora', t['os_distro']) self.assertEquals('17', t['os_version']) self.assertEquals(get_template_default('old', 'memory'), t['memory']) # Deactivate or destroy scan pool return 405 resp = self.request('/storagepools/kimchi_isos/storagevolumes' '/deactivate', '{}', 'POST') self.assertEquals(405, resp.status) resp = self.request('/storagepools/kimchi_isos/storagevolumes', '{}', 'DELETE') self.assertEquals(405, resp.status) # Delete the template resp = self.request('/templates/%s' % t['name'], '{}', 'DELETE') self.assertEquals(204, resp.status) resp = self.request('/storagepools/pool-3/deactivate', '{}', 'POST') self.assertEquals(200, resp.status) self._delete_pool('pool-3')
def test_vm_customise_storage(self): # Create a Template req = json.dumps({ 'name': 'test', 'cdrom': fake_iso, 'disks': [{ 'size': 1 }] }) resp = self.request('/templates', req, 'POST') self.assertEquals(201, resp.status) # Create alternate storage req = json.dumps({ 'name': 'alt', 'capacity': 1024, 'allocated': 512, 'path': '/tmp', 'type': 'dir' }) resp = self.request('/storagepools', req, 'POST') self.assertEquals(201, resp.status) resp = self.request('/storagepools/alt/activate', req, 'POST') self.assertEquals(200, resp.status) # Create a VM req = json.dumps({ 'name': 'test-vm', 'template': '/templates/test', 'storagepool': '/storagepools/alt' }) resp = self.request('/vms', req, 'POST') self.assertEquals(202, resp.status) task = json.loads(resp.read()) wait_task(self._task_lookup, task['id']) resp = self.request('/vms/test-vm', {}, 'GET') vm_info = json.loads(resp.read()) # Test template not changed after vm customise its pool t = json.loads(self.request('/templates/test').read()) self.assertEquals(t['storagepool'], '/storagepools/default-pool') # Verify the volume was created vol_uri = '/storagepools/alt/storagevolumes/%s-0.img' % vm_info['uuid'] resp = self.request(vol_uri) vol = json.loads(resp.read()) self.assertEquals(1 << 30, vol['capacity']) # Delete the VM resp = self.request('/vms/test-vm', '{}', 'DELETE') self.assertEquals(204, resp.status) # Verify the volume was deleted self.assertHTTPStatus(404, vol_uri)
def test_scsi_fc_storage(self): # Create scsi fc pool req = json.dumps({'name': 'scsi_fc_pool', 'type': 'scsi', 'source': {'adapter_name': 'scsi_host2'}}) resp = self.request('/storagepools', req, 'POST') self.assertEquals(201, resp.status) # Test create vms using lun of this pool # activate the storage pool resp = self.request('/storagepools/scsi_fc_pool/activate', '{}', 'POST') # Create template fails because SCSI volume is missing tmpl_params = {'name': 'test_fc_pool', 'cdrom': fake_iso, 'storagepool': '/storagepools/scsi_fc_pool'} req = json.dumps(tmpl_params) resp = self.request('/templates', req, 'POST') self.assertEquals(400, resp.status) # Choose SCSI volume to create template resp = self.request('/storagepools/scsi_fc_pool/storagevolumes') lun_name = json.loads(resp.read())[0]['name'] tmpl_params['disks'] = [{'index': 0, 'volume': lun_name}] req = json.dumps(tmpl_params) resp = self.request('/templates', req, 'POST') self.assertEquals(201, resp.status) # Create vm in scsi pool req = json.dumps({'name': 'test-vm', 'template': '/templates/test_fc_pool'}) resp = self.request('/vms', req, 'POST') self.assertEquals(202, resp.status) task = json.loads(resp.read()) wait_task(self._task_lookup, task['id']) # Start the VM resp = self.request('/vms/test-vm/start', '{}', 'POST') vm = json.loads(self.request('/vms/test-vm').read()) self.assertEquals('running', vm['state']) # Force poweroff the VM resp = self.request('/vms/test-vm/poweroff', '{}', 'POST') vm = json.loads(self.request('/vms/test-vm').read()) self.assertEquals('shutoff', vm['state']) # Delete the VM resp = self.request('/vms/test-vm', '{}', 'DELETE') self.assertEquals(204, resp.status)
def test_packages_update(self): pkgs = model.packagesupdate_get_list() self.assertEquals(3, len(pkgs)) for pkg_name in pkgs: pkgupdate = model.packageupdate_lookup(pkg_name) self.assertIn('package_name', pkgupdate.keys()) self.assertIn('repository', pkgupdate.keys()) self.assertIn('arch', pkgupdate.keys()) self.assertIn('version', pkgupdate.keys()) task = model.host_swupdate() task_params = [u'id', u'message', u'status', u'target_uri'] self.assertEquals(sorted(task_params), sorted(task.keys())) wait_task(model.task_lookup, task['id'])
def test_unnamed_vms(self): # Create a Template req = json.dumps({'name': 'test', 'cdrom': fake_iso}) resp = self.request('/templates', req, 'POST') self.assertEquals(201, resp.status) # Create 5 unnamed vms from this template for i in xrange(1, 6): req = json.dumps({'template': '/templates/test'}) task = json.loads(self.request('/vms', req, 'POST').read()) wait_task(self._task_lookup, task['id']) resp = self.request('/vms/test-vm-%i' % i, {}, 'GET') self.assertEquals(resp.status, 200) count = len(json.loads(self.request('/vms').read())) self.assertEquals(6, count)
def test_screenshot_refresh(self): # Create a VM req = json.dumps({'name': 'test', 'cdrom': fake_iso}) resp = self.request('/templates', req, 'POST') req = json.dumps({'name': 'test-vm', 'template': '/templates/test'}) resp = self.request('/vms', req, 'POST') task = json.loads(resp.read()) wait_task(self._task_lookup, task['id']) # Test screenshot for shut-off state vm resp = self.request('/vms/test-vm/screenshot') self.assertEquals(404, resp.status) # Test screenshot for running vm resp = self.request('/vms/test-vm/start', '{}', 'POST') vm = json.loads(self.request('/vms/test-vm').read()) resp = self.request(vm['screenshot'], method='HEAD') self.assertEquals(200, resp.status) self.assertTrue(resp.getheader('Content-type').startswith('image')) # Test screenshot sub-resource redirect resp = self.request('/vms/test-vm/screenshot') self.assertEquals(200, resp.status) self.assertEquals('image/png', resp.getheader('content-type')) lastMod1 = resp.getheader('last-modified') # Take another screenshot instantly and compare the last Modified date resp = self.request('/vms/test-vm/screenshot') lastMod2 = resp.getheader('last-modified') self.assertEquals(lastMod2, lastMod1) resp = self.request('/vms/test-vm/screenshot', '{}', 'DELETE') self.assertEquals(405, resp.status) # No screenshot after stopped the VM self.request('/vms/test-vm/poweroff', '{}', 'POST') resp = self.request('/vms/test-vm/screenshot') self.assertEquals(404, resp.status) # Picture link not available after VM deleted self.request('/vms/test-vm/start', '{}', 'POST') vm = json.loads(self.request('/vms/test-vm').read()) img_lnk = vm['screenshot'] self.request('/vms/test-vm', '{}', 'DELETE') resp = self.request(img_lnk) self.assertEquals(404, resp.status)
def test_create_debugreport(self): req = json.dumps({'name': 'report1'}) with RollbackContext() as rollback: resp = request(host, ssl_port, '/debugreports', req, 'POST') self.assertEquals(202, resp.status) task = json.loads(resp.read()) # make sure the debugreport doesn't exist until the # the task is finished wait_task(self._task_lookup, task['id']) rollback.prependDefer(self._report_delete, 'report2') resp = request(host, ssl_port, '/debugreports/report1') debugreport = json.loads(resp.read()) self.assertEquals("report1", debugreport['name']) self.assertEquals(200, resp.status) req = json.dumps({'name': 'report2'}) resp = request(host, ssl_port, '/debugreports/report1', req, 'PUT') self.assertEquals(303, resp.status)
def test_vm_customise_storage(self): # Create a Template req = json.dumps({'name': 'test', 'cdrom': fake_iso, 'disks': [{'size': 1}]}) resp = self.request('/templates', req, 'POST') self.assertEquals(201, resp.status) # Create alternate storage req = json.dumps({'name': 'alt', 'capacity': 1024, 'allocated': 512, 'path': '/tmp', 'type': 'dir'}) resp = self.request('/storagepools', req, 'POST') self.assertEquals(201, resp.status) resp = self.request('/storagepools/alt/activate', req, 'POST') self.assertEquals(200, resp.status) # Create a VM req = json.dumps({'name': 'test-vm', 'template': '/templates/test', 'storagepool': '/storagepools/alt'}) resp = self.request('/vms', req, 'POST') self.assertEquals(202, resp.status) task = json.loads(resp.read()) wait_task(self._task_lookup, task['id']) resp = self.request('/vms/test-vm', {}, 'GET') vm_info = json.loads(resp.read()) # Test template not changed after vm customise its pool t = json.loads(self.request('/templates/test').read()) self.assertEquals(t['storagepool'], '/storagepools/default-pool') # Verify the volume was created vol_uri = '/storagepools/alt/storagevolumes/%s-0.img' % vm_info['uuid'] resp = self.request(vol_uri) vol = json.loads(resp.read()) self.assertEquals(1 << 30, vol['capacity']) # Delete the VM resp = self.request('/vms/test-vm', '{}', 'DELETE') self.assertEquals(204, resp.status) # Verify the volume was deleted self.assertHTTPStatus(404, vol_uri)
def test_upload(self): with RollbackContext() as rollback: url = "https://%s:%s/storagepools/default-pool/storagevolumes" % \ (host, ssl_port) # Create a file with 3M to upload vol_path = '/tmp/3m-file' with open(vol_path, 'wb') as fd: fd.seek(3*1024*1024-1) fd.write("\0") rollback.prependDefer(os.remove, vol_path) with open(vol_path, 'rb') as fd: r = requests.post(url, files={'file': fd}, verify=False, headers=fake_auth_header()) self.assertEquals(r.status_code, 202) task = r.json() wait_task(self._task_lookup, task['id'], 15) uri = '/storagepools/default-pool/storagevolumes/%s' resp = self.request(uri % task['target_uri'].split('/')[-1]) self.assertEquals(200, resp.status) # Create a file with 5M to upload # Max body size is set to 4M so the upload will fail with 413 vol_path = '/tmp/5m-file' with open(vol_path, 'wb') as fd: fd.seek(5*1024*1024-1) fd.write("\0") rollback.prependDefer(os.remove, vol_path) with open(vol_path, 'rb') as fd: r = requests.post(url, files={'file': fd}, verify=False, headers=fake_auth_header()) self.assertEquals(r.status_code, 413)
def test_host_actions(self): def _task_lookup(taskid): return json.loads( self.request('/plugins/kimchi/tasks/%s' % taskid).read() ) resp = self.request('/plugins/kimchi/host/shutdown', '{}', 'POST') self.assertEquals(200, resp.status) resp = self.request('/plugins/kimchi/host/reboot', '{}', 'POST') self.assertEquals(200, resp.status) # Test system update resp = self.request('/plugins/kimchi/host/packagesupdate', None, 'GET') pkgs = json.loads(resp.read()) self.assertEquals(3, len(pkgs)) pkg_keys = ['package_name', 'repository', 'arch', 'version'] for p in pkgs: name = p['package_name'] resp = self.request('/plugins/kimchi/host/packagesupdate/' + name, None, 'GET') info = json.loads(resp.read()) self.assertEquals(sorted(pkg_keys), sorted(info.keys())) resp = self.request('/plugins/kimchi/host/swupdate', '{}', 'POST') task = json.loads(resp.read()) task_params = [u'id', u'message', u'status', u'target_uri'] self.assertEquals(sorted(task_params), sorted(task.keys())) resp = self.request('/plugins/kimchi/tasks/' + task[u'id'], None, 'GET') task_info = json.loads(resp.read()) self.assertEquals(task_info['status'], 'running') wait_task(_task_lookup, task_info['id']) resp = self.request('/plugins/kimchi/tasks/' + task[u'id'], None, 'GET') task_info = json.loads(resp.read()) self.assertEquals(task_info['status'], 'finished') self.assertIn(u'All packages updated', task_info['message']) pkgs = model.packagesupdate_get_list() self.assertEquals(0, len(pkgs))
def test_vm_info(self): model.templates_create({"name": u"test", "cdrom": fake_iso}) task = model.vms_create({"name": u"test-vm", "template": "/plugins/kimchi/templates/test"}) wait_task(model.task_lookup, task["id"]) vms = model.vms_get_list() self.assertEquals(2, len(vms)) self.assertIn(u"test-vm", vms) keys = set( ( "name", "state", "stats", "uuid", "memory", "cpus", "screenshot", "icon", "graphics", "users", "groups", "access", "persistent", ) ) stats_keys = set( ("cpu_utilization", "net_throughput", "net_throughput_peak", "io_throughput", "io_throughput_peak") ) info = model.vm_lookup(u"test-vm") self.assertEquals(keys, set(info.keys())) self.assertEquals("shutoff", info["state"]) self.assertEquals("test-vm", info["name"]) self.assertEquals(get_template_default("old", "memory"), info["memory"]) self.assertEquals(1, info["cpus"]) self.assertEquals("plugins/kimchi/images/icon-vm.png", info["icon"]) self.assertEquals(stats_keys, set(info["stats"].keys())) self.assertEquals("vnc", info["graphics"]["type"]) self.assertEquals("127.0.0.1", info["graphics"]["listen"])
def test_host_actions(self): def _task_lookup(taskid): return json.loads( self.request('/plugins/kimchi/tasks/%s' % taskid).read()) resp = self.request('/plugins/kimchi/host/shutdown', '{}', 'POST') self.assertEquals(200, resp.status) resp = self.request('/plugins/kimchi/host/reboot', '{}', 'POST') self.assertEquals(200, resp.status) # Test system update resp = self.request('/plugins/kimchi/host/packagesupdate', None, 'GET') pkgs = json.loads(resp.read()) self.assertEquals(3, len(pkgs)) pkg_keys = ['package_name', 'repository', 'arch', 'version'] for p in pkgs: name = p['package_name'] resp = self.request('/plugins/kimchi/host/packagesupdate/' + name, None, 'GET') info = json.loads(resp.read()) self.assertEquals(sorted(pkg_keys), sorted(info.keys())) resp = self.request('/plugins/kimchi/host/swupdate', '{}', 'POST') task = json.loads(resp.read()) task_params = [u'id', u'message', u'status', u'target_uri'] self.assertEquals(sorted(task_params), sorted(task.keys())) resp = self.request('/plugins/kimchi/tasks/' + task[u'id'], None, 'GET') task_info = json.loads(resp.read()) self.assertEquals(task_info['status'], 'running') wait_task(_task_lookup, task_info['id']) resp = self.request('/plugins/kimchi/tasks/' + task[u'id'], None, 'GET') task_info = json.loads(resp.read()) self.assertEquals(task_info['status'], 'finished') self.assertIn(u'All packages updated', task_info['message']) pkgs = model.packagesupdate_get_list() self.assertEquals(0, len(pkgs))
def test_create_vm_with_img_based_template(self): resp = json.loads( self.request('/storagepools/default-pool/storagevolumes').read()) self.assertEquals(0, len(resp)) # Create a Template mock_base = '/tmp/mock.img' open(mock_base, 'w').close() req = json.dumps({'name': 'test', 'disks': [{'base': mock_base}]}) resp = self.request('/templates', req, 'POST') self.assertEquals(201, resp.status) req = json.dumps({'template': '/templates/test'}) resp = self.request('/vms', req, 'POST') self.assertEquals(202, resp.status) task = json.loads(resp.read()) wait_task(self._task_lookup, task['id']) # Test storage volume created with backing store of base file resp = json.loads( self.request('/storagepools/default-pool/storagevolumes').read()) self.assertEquals(1, len(resp))
def test_tasks(self): id1 = add_task('/tasks/1', self._async_op, model.objstore) id2 = add_task('/tasks/2', self._except_op, model.objstore) id3 = add_task('/tasks/3', self._intermid_op, model.objstore) target_uri = urllib2.quote('^/tasks/*', safe="") filter_data = 'status=running&target_uri=%s' % target_uri tasks = json.loads(self.request('/tasks?%s' % filter_data).read()) self.assertEquals(3, len(tasks)) tasks = json.loads(self.request('/tasks').read()) tasks_ids = [int(t['id']) for t in tasks] self.assertEquals(set([id1, id2, id3]) - set(tasks_ids), set([])) wait_task(self._task_lookup, id2) foo2 = json.loads(self.request('/tasks/%s' % id2).read()) keys = ['id', 'status', 'message', 'target_uri'] self.assertEquals(sorted(keys), sorted(foo2.keys())) self.assertEquals('failed', foo2['status']) wait_task(self._task_lookup, id3) foo3 = json.loads(self.request('/tasks/%s' % id3).read()) self.assertEquals('in progress', foo3['message']) self.assertEquals('running', foo3['status'])
def test_debugreport_download(self): req = json.dumps({'name': 'report1'}) with RollbackContext() as rollback: resp = request(host, ssl_port, '/plugins/gingerbase/debugreports', req, 'POST') self.assertEquals(202, resp.status) task = json.loads(resp.read()) # make sure the debugreport doesn't exist until the # the task is finished wait_task(self._task_lookup, task['id'], 20) rollback.prependDefer(self._report_delete, 'report1') resp = request(host, ssl_port, '/plugins/gingerbase/debugreports/report1') debugreport = json.loads(resp.read()) self.assertEquals("report1", debugreport['name']) self.assertEquals(200, resp.status) resp = request(host, ssl_port, '/plugins/gingerbase/debugreports/report1/content') self.assertEquals(200, resp.status) resp = request(host, ssl_port, '/plugins/gingerbase/debugreports/report1') debugre = json.loads(resp.read()) resp = request(host, ssl_port, '/' + debugre['uri']) self.assertEquals(200, resp.status)
def test_screenshot_refresh(self): # Create a VM req = json.dumps({"name": "test", "cdrom": fake_iso}) request(host, ssl_port, "/plugins/kimchi/templates", req, "POST") req = json.dumps({"name": "test-vm", "template": "/plugins/kimchi/templates/test"}) resp = request(host, ssl_port, "/plugins/kimchi/vms", req, "POST") task = json.loads(resp.read()) wait_task(model.task_lookup, task["id"]) # Test screenshot refresh for running vm request(host, ssl_port, "/plugins/kimchi/vms/test-vm/start", "{}", "POST") resp = request(host, ssl_port, "/plugins/kimchi/vms/test-vm/screenshot") self.assertEquals(200, resp.status) self.assertEquals("image/png", resp.getheader("content-type")) resp1 = request(host, ssl_port, "/plugins/kimchi/vms/test-vm") rspBody = resp1.read() testvm_Data = json.loads(rspBody) screenshotURL = "/" + testvm_Data["screenshot"] time.sleep(5) resp2 = request(host, ssl_port, screenshotURL) self.assertEquals(200, resp2.status) self.assertEquals(resp2.getheader("content-type"), resp.getheader("content-type")) self.assertEquals(resp2.getheader("content-length"), resp.getheader("content-length")) self.assertEquals(resp2.getheader("last-modified"), resp.getheader("last-modified"))
def test_host_actions(self): def _task_lookup(taskid): return json.loads(self.request("/tasks/%s" % taskid).read()) resp = self.request("/host/shutdown", "{}", "POST") self.assertEquals(200, resp.status) resp = self.request("/host/reboot", "{}", "POST") self.assertEquals(200, resp.status) # Test system update resp = self.request("/host/packagesupdate", None, "GET") pkgs = json.loads(resp.read()) self.assertEquals(3, len(pkgs)) pkg_keys = ["package_name", "repository", "arch", "version"] for p in pkgs: name = p["package_name"] resp = self.request("/host/packagesupdate/" + name, None, "GET") info = json.loads(resp.read()) self.assertEquals(sorted(pkg_keys), sorted(info.keys())) resp = self.request("/host/swupdate", "{}", "POST") task = json.loads(resp.read()) task_params = [u"id", u"message", u"status", u"target_uri"] self.assertEquals(sorted(task_params), sorted(task.keys())) resp = self.request("/tasks/" + task[u"id"], None, "GET") task_info = json.loads(resp.read()) self.assertEquals(task_info["status"], "running") wait_task(_task_lookup, task_info["id"]) resp = self.request("/tasks/" + task[u"id"], None, "GET") task_info = json.loads(resp.read()) self.assertEquals(task_info["status"], "finished") self.assertIn(u"All packages updated", task_info["message"]) pkgs = model.packagesupdate_get_list() self.assertEquals(0, len(pkgs))
def test_async_tasks(self): class task_except(Exception): pass def abnormal_op(cb, params): try: raise task_except except: cb("Exception raised", False) taskid = AsyncTask('', self._quick_op, 'Hello').id wait_task(self._task_lookup, taskid) self.assertEquals('finished', self._task_lookup(taskid)['status']) self.assertEquals('Hello', self._task_lookup(taskid)['message']) params = { 'delay': 3, 'result': False, 'message': 'It was not meant to be' } taskid = AsyncTask('', self._long_op, params).id self.assertEquals('running', self._task_lookup(taskid)['status']) self.assertEquals('The request is being processing.', self._task_lookup(taskid)['message']) wait_task(self._task_lookup, taskid) self.assertEquals('failed', self._task_lookup(taskid)['status']) self.assertEquals('It was not meant to be', self._task_lookup(taskid)['message']) taskid = AsyncTask('', abnormal_op, {}).id wait_task(self._task_lookup, taskid) self.assertEquals('Exception raised', self._task_lookup(taskid)['message']) self.assertEquals('failed', self._task_lookup(taskid)['status']) taskid = AsyncTask('', self._continuous_ops, {'result': True}).id self.assertEquals('running', self._task_lookup(taskid)['status']) wait_task(self._task_lookup, taskid, timeout=10) self.assertEquals('finished', self._task_lookup(taskid)['status'])
def test_async_tasks(self): class task_except(Exception): pass def abnormal_op(cb, params): try: raise task_except except: cb("Exception raised", False) taskid = AsyncTask('', self._quick_op, 'Hello').id wait_task(self._task_lookup, taskid) self.assertEquals('finished', self._task_lookup(taskid)['status']) self.assertEquals('Hello', self._task_lookup(taskid)['message']) params = {'delay': 3, 'result': False, 'message': 'It was not meant to be'} taskid = AsyncTask('', self._long_op, params).id self.assertEquals('running', self._task_lookup(taskid)['status']) self.assertEquals('The request is being processing.', self._task_lookup(taskid)['message']) wait_task(self._task_lookup, taskid) self.assertEquals('failed', self._task_lookup(taskid)['status']) self.assertEquals('It was not meant to be', self._task_lookup(taskid)['message']) taskid = AsyncTask('', abnormal_op, {}).id wait_task(self._task_lookup, taskid) self.assertEquals('Exception raised', self._task_lookup(taskid)['message']) self.assertEquals('failed', self._task_lookup(taskid)['status']) taskid = AsyncTask('', self._continuous_ops, {'result': True}).id self.assertEquals('running', self._task_lookup(taskid)['status']) wait_task(self._task_lookup, taskid, timeout=10) self.assertEquals('finished', self._task_lookup(taskid)['status'])
def _do_volume_test(self, model, host, ssl_port, pool_name): def _task_lookup(taskid): return json.loads(self.request('/tasks/%s' % taskid).read()) uri = '/storagepools/%s/storagevolumes' % pool_name.encode('utf-8') resp = self.request(uri) self.assertEquals(200, resp.status) resp = self.request('/storagepools/%s' % pool_name.encode('utf-8')) pool_info = json.loads(resp.read()) with RollbackContext() as rollback: # Create storage volume with 'capacity' vol = 'test-volume' vol_uri = uri + '/' + vol req = json.dumps({'name': vol, 'format': 'raw', 'capacity': 1073741824}) # 1 GiB resp = self.request(uri, req, 'POST') if pool_info['type'] in READONLY_POOL_TYPE: self.assertEquals(400, resp.status) else: rollback.prependDefer(rollback_wrapper, model.storagevolume_delete, pool_name, vol) self.assertEquals(202, resp.status) task_id = json.loads(resp.read())['id'] wait_task(_task_lookup, task_id) status = json.loads(self.request('/tasks/%s' % task_id).read()) self.assertEquals('finished', status['status']) vol_info = json.loads(self.request(vol_uri).read()) vol_info['name'] = vol vol_info['format'] = 'raw' vol_info['capacity'] = 1073741824 # Resize the storage volume: increase its capacity to 2 GiB req = json.dumps({'size': 2147483648}) # 2 GiB resp = self.request(vol_uri + '/resize', req, 'POST') self.assertEquals(200, resp.status) storagevolume = json.loads(self.request(vol_uri).read()) self.assertEquals(2147483648, storagevolume['capacity']) # Resize the storage volume: decrease its capacity to 512 MiB # FIXME: Due a libvirt bug it is not possible to decrease the # volume capacity # For reference: # - https://bugzilla.redhat.com/show_bug.cgi?id=1021802 req = json.dumps({'size': 536870912}) # 512 MiB resp = self.request(vol_uri + '/resize', req, 'POST') # It is only possible when using MockModel if isinstance(model, MockModel): self.assertEquals(200, resp.status) storagevolume = json.loads(self.request(vol_uri).read()) self.assertEquals(536870912, storagevolume['capacity']) else: self.assertEquals(500, resp.status) # Wipe the storage volume resp = self.request(vol_uri + '/wipe', '{}', 'POST') self.assertEquals(200, resp.status) storagevolume = json.loads(self.request(vol_uri).read()) self.assertEquals(0, storagevolume['allocation']) # Clone the storage volume vol_info = json.loads(self.request(vol_uri).read()) resp = self.request(vol_uri + '/clone', '{}', 'POST') self.assertEquals(202, resp.status) task = json.loads(resp.read()) cloned_vol_name = task['target_uri'].split('/')[-1] rollback.prependDefer(model.storagevolume_delete, pool_name, cloned_vol_name) wait_task(_task_lookup, task['id']) task = json.loads(self.request('/tasks/%s' % task['id']).read()) self.assertEquals('finished', task['status']) resp = self.request(uri + '/' + cloned_vol_name.encode('utf-8')) self.assertEquals(200, resp.status) cloned_vol = json.loads(resp.read()) self.assertNotEquals(vol_info['name'], cloned_vol['name']) self.assertNotEquals(vol_info['path'], cloned_vol['path']) for key in ['name', 'path', 'allocation']: del vol_info[key] del cloned_vol[key] self.assertEquals(vol_info, cloned_vol) # Delete the storage volume resp = self.request(vol_uri, '{}', 'DELETE') self.assertEquals(204, resp.status) resp = self.request(vol_uri) self.assertEquals(404, resp.status) # Create storage volume with 'file' filepath = os.path.join(paths.get_prefix(), 'COPYING.LGPL') url = 'https://%s:%s' % (host, ssl_port) + uri with open(filepath, 'rb') as fd: r = requests.post(url, files={'file': fd}, verify=False, headers=fake_auth_header()) if pool_info['type'] in READONLY_POOL_TYPE: self.assertEquals(r.status_code, 400) else: rollback.prependDefer(model.storagevolume_delete, pool_name, 'COPYING.LGPL') self.assertEquals(r.status_code, 202) task = r.json() wait_task(_task_lookup, task['id']) resp = self.request(uri + '/COPYING.LGPL') self.assertEquals(200, resp.status) # Create storage volume with 'url' url = 'https://github.com/kimchi-project/kimchi/raw/master/COPYING' req = json.dumps({'url': url}) resp = self.request(uri, req, 'POST') if pool_info['type'] in READONLY_POOL_TYPE: self.assertEquals(400, resp.status) else: rollback.prependDefer(model.storagevolume_delete, pool_name, 'COPYING') self.assertEquals(202, resp.status) task = json.loads(resp.read()) wait_task(_task_lookup, task['id']) resp = self.request(uri + '/COPYING') self.assertEquals(200, resp.status)
def test_nonroot_access(self): # Non-root users can access static host information resp = self.request("/host", "{}", "GET") self.assertEquals(403, resp.status) # Non-root users can access host stats resp = self.request("/host/stats", "{}", "GET") self.assertEquals(403, resp.status) # Non-root users can not reboot/shutdown host system resp = self.request("/host/reboot", "{}", "POST") self.assertEquals(403, resp.status) resp = self.request("/host/shutdown", "{}", "POST") self.assertEquals(403, resp.status) # Non-root users can not get or debug reports resp = self.request("/debugreports", "{}", "GET") self.assertEquals(403, resp.status) resp = self.request("/debugreports", "{}", "POST") self.assertEquals(403, resp.status) # Non-root users can not create or delete network (only get) resp = self.request("/networks", "{}", "GET") self.assertEquals(200, resp.status) resp = self.request("/networks", "{}", "POST") self.assertEquals(403, resp.status) resp = self.request("/networks/default/activate", "{}", "POST") self.assertEquals(403, resp.status) resp = self.request("/networks/default", "{}", "DELETE") self.assertEquals(403, resp.status) # Non-root users can not create or delete storage pool (only get) resp = self.request("/storagepools", "{}", "GET") self.assertEquals(200, resp.status) resp = self.request("/storagepools", "{}", "POST") self.assertEquals(403, resp.status) resp = self.request("/storagepools/default/activate", "{}", "POST") self.assertEquals(403, resp.status) resp = self.request("/storagepools/default", "{}", "DELETE") self.assertEquals(403, resp.status) # Non-root users can not update or delete a template # but he can get and create a new one resp = self.request("/templates", "{}", "GET") self.assertEquals(403, resp.status) req = json.dumps({"name": "test", "cdrom": fake_iso}) resp = self.request("/templates", req, "POST") self.assertEquals(403, resp.status) resp = self.request("/templates/test", "{}", "PUT") self.assertEquals(403, resp.status) resp = self.request("/templates/test", "{}", "DELETE") self.assertEquals(403, resp.status) # Non-root users can only get vms authorized to them model.templates_create({"name": u"test", "cdrom": fake_iso}) task_info = model.vms_create({"name": u"test-me", "template": "/templates/test"}) wait_task(model.task_lookup, task_info["id"]) model.vm_update(u"test-me", {"users": [kimchi.mockmodel.fake_user.keys()[0]], "groups": []}) task_info = model.vms_create({"name": u"test-usera", "template": "/templates/test"}) wait_task(model.task_lookup, task_info["id"]) non_root = list(set(model.users_get_list()) - set(["root"]))[0] model.vm_update(u"test-usera", {"users": [non_root], "groups": []}) task_info = model.vms_create({"name": u"test-groupa", "template": "/templates/test"}) wait_task(model.task_lookup, task_info["id"]) a_group = model.groups_get_list()[0] model.vm_update(u"test-groupa", {"groups": [a_group]}) resp = self.request("/vms", "{}", "GET") self.assertEquals(200, resp.status) vms_data = json.loads(resp.read()) self.assertEquals([u"test-groupa", u"test-me"], sorted([v["name"] for v in vms_data])) resp = self.request("/vms", req, "POST") self.assertEquals(403, resp.status) # Create a vm using mockmodel directly to test Resource access task_info = model.vms_create({"name": "kimchi-test", "template": "/templates/test"}) wait_task(model.task_lookup, task_info["id"]) resp = self.request("/vms/kimchi-test", "{}", "PUT") self.assertEquals(403, resp.status) resp = self.request("/vms/kimchi-test", "{}", "DELETE") self.assertEquals(403, resp.status) # Non-root users can only update VMs authorized by them resp = self.request("/vms/test-me/start", "{}", "POST") self.assertEquals(200, resp.status) resp = self.request("/vms/test-usera/start", "{}", "POST") self.assertEquals(403, resp.status) model.template_delete("test") model.vm_delete("test-me")
def _do_volume_test(self, model, host, ssl_port, pool_name): def _task_lookup(taskid): return json.loads(self.request("/tasks/%s" % taskid).read()) uri = "/storagepools/%s/storagevolumes" % pool_name.encode("utf-8") resp = self.request(uri) self.assertEquals(200, resp.status) resp = self.request("/storagepools/%s" % pool_name.encode("utf-8")) pool_info = json.loads(resp.read()) with RollbackContext() as rollback: # Create storage volume with 'capacity' vol = "test-volume" vol_uri = uri + "/" + vol req = json.dumps({"name": vol, "format": "raw", "capacity": 1073741824}) # 1 GiB resp = self.request(uri, req, "POST") if pool_info["type"] in READONLY_POOL_TYPE: self.assertEquals(400, resp.status) else: rollback.prependDefer(rollback_wrapper, model.storagevolume_delete, pool_name, vol) self.assertEquals(202, resp.status) task_id = json.loads(resp.read())["id"] wait_task(_task_lookup, task_id) status = json.loads(self.request("/tasks/%s" % task_id).read()) self.assertEquals("finished", status["status"]) vol_info = json.loads(self.request(vol_uri).read()) vol_info["name"] = vol vol_info["format"] = "raw" vol_info["capacity"] = 1073741824 # Resize the storage volume: increase its capacity to 2 GiB req = json.dumps({"size": 2147483648}) # 2 GiB resp = self.request(vol_uri + "/resize", req, "POST") self.assertEquals(200, resp.status) storagevolume = json.loads(self.request(vol_uri).read()) self.assertEquals(2147483648, storagevolume["capacity"]) # Resize the storage volume: decrease its capacity to 512 MiB # FIXME: Due a libvirt bug it is not possible to decrease the # volume capacity # For reference: # - https://bugzilla.redhat.com/show_bug.cgi?id=1021802 req = json.dumps({"size": 536870912}) # 512 MiB resp = self.request(vol_uri + "/resize", req, "POST") # It is only possible when using MockModel if isinstance(model, MockModel): self.assertEquals(200, resp.status) storagevolume = json.loads(self.request(vol_uri).read()) self.assertEquals(536870912, storagevolume["capacity"]) else: self.assertEquals(500, resp.status) # Wipe the storage volume resp = self.request(vol_uri + "/wipe", "{}", "POST") self.assertEquals(200, resp.status) storagevolume = json.loads(self.request(vol_uri).read()) self.assertEquals(0, storagevolume["allocation"]) # Clone the storage volume vol_info = json.loads(self.request(vol_uri).read()) resp = self.request(vol_uri + "/clone", "{}", "POST") self.assertEquals(202, resp.status) task = json.loads(resp.read()) cloned_vol_name = task["target_uri"].split("/")[-1] rollback.prependDefer(model.storagevolume_delete, pool_name, cloned_vol_name) wait_task(_task_lookup, task["id"]) task = json.loads(self.request("/tasks/%s" % task["id"]).read()) self.assertEquals("finished", task["status"]) resp = self.request(uri + "/" + cloned_vol_name.encode("utf-8")) self.assertEquals(200, resp.status) cloned_vol = json.loads(resp.read()) self.assertNotEquals(vol_info["name"], cloned_vol["name"]) self.assertNotEquals(vol_info["path"], cloned_vol["path"]) for key in ["name", "path", "allocation"]: del vol_info[key] del cloned_vol[key] self.assertEquals(vol_info, cloned_vol) # Delete the storage volume resp = self.request(vol_uri, "{}", "DELETE") self.assertEquals(204, resp.status) resp = self.request(vol_uri) self.assertEquals(404, resp.status) # Storage volume upload # It is done through a sequence of POST and several PUT requests filename = "COPYING.LGPL" filepath = os.path.join(paths.get_prefix(), filename) filesize = os.stat(filepath).st_size # Create storage volume for upload req = json.dumps({"name": filename, "format": "raw", "capacity": filesize, "upload": True}) resp = self.request(uri, req, "POST") if pool_info["type"] in READONLY_POOL_TYPE: self.assertEquals(400, resp.status) else: rollback.prependDefer(rollback_wrapper, model.storagevolume_delete, pool_name, filename) self.assertEquals(202, resp.status) task_id = json.loads(resp.read())["id"] wait_task(_task_lookup, task_id) status = json.loads(self.request("/tasks/%s" % task_id).read()) self.assertEquals("ready for upload", status["message"]) # Upload volume content url = "https://%s:%s" % (host, ssl_port) + uri + "/" + filename # Create a file with 5M to upload # Max body size is set to 4M so the upload will fail with 413 newfile = "/tmp/5m-file" with open(newfile, "wb") as fd: fd.seek(5 * 1024 * 1024 - 1) fd.write("\0") rollback.prependDefer(os.remove, newfile) with open(newfile, "rb") as fd: with open(newfile + ".tmp", "wb") as tmp_fd: data = fd.read() tmp_fd.write(data) with open(newfile + ".tmp", "rb") as tmp_fd: r = requests.put( url, data={"chunk_size": len(data)}, files={"chunk": tmp_fd}, verify=False, headers=fake_auth_header(), ) self.assertEquals(r.status_code, 413) # Do upload index = 0 chunk_size = 2 * 1024 content = "" with open(filepath, "rb") as fd: while True: with open(filepath + ".tmp", "wb") as tmp_fd: fd.seek(index * chunk_size) data = fd.read(chunk_size) tmp_fd.write(data) with open(filepath + ".tmp", "rb") as tmp_fd: r = requests.put( url, data={"chunk_size": len(data)}, files={"chunk": tmp_fd}, verify=False, headers=fake_auth_header(), ) self.assertEquals(r.status_code, 200) content += data index = index + 1 if len(data) < chunk_size: break rollback.prependDefer(os.remove, filepath + ".tmp") resp = self.request(uri + "/" + filename) self.assertEquals(200, resp.status) uploaded_path = json.loads(resp.read())["path"] with open(uploaded_path) as fd: uploaded_content = fd.read() self.assertEquals(content, uploaded_content) # Create storage volume with 'url' url = "https://github.com/kimchi-project/kimchi/raw/master/COPYING" req = json.dumps({"url": url}) resp = self.request(uri, req, "POST") if pool_info["type"] in READONLY_POOL_TYPE: self.assertEquals(400, resp.status) else: rollback.prependDefer(model.storagevolume_delete, pool_name, "COPYING") self.assertEquals(202, resp.status) task = json.loads(resp.read()) wait_task(_task_lookup, task["id"]) resp = self.request(uri + "/COPYING") self.assertEquals(200, resp.status)
def _do_volume_test(self, model, host, ssl_port, pool_name): def _task_lookup(taskid): return json.loads(self.request('/tasks/%s' % taskid).read()) uri = '/storagepools/%s/storagevolumes' % pool_name.encode('utf-8') resp = self.request(uri) self.assertEquals(200, resp.status) resp = self.request('/storagepools/%s' % pool_name.encode('utf-8')) pool_info = json.loads(resp.read()) with RollbackContext() as rollback: # Create storage volume with 'capacity' vol = 'test-volume' vol_uri = uri + '/' + vol req = json.dumps({ 'name': vol, 'format': 'raw', 'capacity': 1073741824 }) # 1 GiB resp = self.request(uri, req, 'POST') if pool_info['type'] in READONLY_POOL_TYPE: self.assertEquals(400, resp.status) else: rollback.prependDefer(rollback_wrapper, model.storagevolume_delete, pool_name, vol) self.assertEquals(202, resp.status) task_id = json.loads(resp.read())['id'] wait_task(_task_lookup, task_id) status = json.loads(self.request('/tasks/%s' % task_id).read()) self.assertEquals('finished', status['status']) vol_info = json.loads(self.request(vol_uri).read()) vol_info['name'] = vol vol_info['format'] = 'raw' vol_info['capacity'] = 1073741824 # Resize the storage volume: increase its capacity to 2 GiB req = json.dumps({'size': 2147483648}) # 2 GiB resp = self.request(vol_uri + '/resize', req, 'POST') self.assertEquals(200, resp.status) storagevolume = json.loads(self.request(vol_uri).read()) self.assertEquals(2147483648, storagevolume['capacity']) # Resize the storage volume: decrease its capacity to 512 MiB # FIXME: Due a libvirt bug it is not possible to decrease the # volume capacity # For reference: # - https://bugzilla.redhat.com/show_bug.cgi?id=1021802 req = json.dumps({'size': 536870912}) # 512 MiB resp = self.request(vol_uri + '/resize', req, 'POST') # It is only possible when using MockModel if isinstance(model, MockModel): self.assertEquals(200, resp.status) storagevolume = json.loads(self.request(vol_uri).read()) self.assertEquals(536870912, storagevolume['capacity']) else: self.assertEquals(500, resp.status) # Wipe the storage volume resp = self.request(vol_uri + '/wipe', '{}', 'POST') self.assertEquals(200, resp.status) storagevolume = json.loads(self.request(vol_uri).read()) self.assertEquals(0, storagevolume['allocation']) # Clone the storage volume vol_info = json.loads(self.request(vol_uri).read()) resp = self.request(vol_uri + '/clone', '{}', 'POST') self.assertEquals(202, resp.status) task = json.loads(resp.read()) cloned_vol_name = task['target_uri'].split('/')[-1] rollback.prependDefer(model.storagevolume_delete, pool_name, cloned_vol_name) wait_task(_task_lookup, task['id']) task = json.loads(self.request('/tasks/%s' % task['id']).read()) self.assertEquals('finished', task['status']) resp = self.request(uri + '/' + cloned_vol_name.encode('utf-8')) self.assertEquals(200, resp.status) cloned_vol = json.loads(resp.read()) self.assertNotEquals(vol_info['name'], cloned_vol['name']) self.assertNotEquals(vol_info['path'], cloned_vol['path']) for key in ['name', 'path', 'allocation']: del vol_info[key] del cloned_vol[key] self.assertEquals(vol_info, cloned_vol) # Delete the storage volume resp = self.request(vol_uri, '{}', 'DELETE') self.assertEquals(204, resp.status) resp = self.request(vol_uri) self.assertEquals(404, resp.status) # Storage volume upload # It is done through a sequence of POST and several PUT requests filename = 'COPYING.LGPL' filepath = os.path.join(paths.get_prefix(), filename) filesize = os.stat(filepath).st_size # Create storage volume for upload req = json.dumps({ 'name': filename, 'format': 'raw', 'capacity': filesize, 'upload': True }) resp = self.request(uri, req, 'POST') if pool_info['type'] in READONLY_POOL_TYPE: self.assertEquals(400, resp.status) else: rollback.prependDefer(rollback_wrapper, model.storagevolume_delete, pool_name, filename) self.assertEquals(202, resp.status) task_id = json.loads(resp.read())['id'] wait_task(_task_lookup, task_id) status = json.loads(self.request('/tasks/%s' % task_id).read()) self.assertEquals('ready for upload', status['message']) # Upload volume content url = 'https://%s:%s' % (host, ssl_port) + uri + '/' + filename # Create a file with 5M to upload # Max body size is set to 4M so the upload will fail with 413 newfile = '/tmp/5m-file' with open(newfile, 'wb') as fd: fd.seek(5 * 1024 * 1024 - 1) fd.write("\0") rollback.prependDefer(os.remove, newfile) with open(newfile, 'rb') as fd: with open(newfile + '.tmp', 'wb') as tmp_fd: data = fd.read() tmp_fd.write(data) with open(newfile + '.tmp', 'rb') as tmp_fd: r = requests.put(url, data={'chunk_size': len(data)}, files={'chunk': tmp_fd}, verify=False, headers=fake_auth_header()) self.assertEquals(r.status_code, 413) # Do upload index = 0 chunk_size = 2 * 1024 content = '' with open(filepath, 'rb') as fd: while True: with open(filepath + '.tmp', 'wb') as tmp_fd: fd.seek(index * chunk_size) data = fd.read(chunk_size) tmp_fd.write(data) with open(filepath + '.tmp', 'rb') as tmp_fd: r = requests.put(url, data={'chunk_size': len(data)}, files={'chunk': tmp_fd}, verify=False, headers=fake_auth_header()) self.assertEquals(r.status_code, 200) content += data index = index + 1 if len(data) < chunk_size: break rollback.prependDefer(os.remove, filepath + '.tmp') resp = self.request(uri + '/' + filename) self.assertEquals(200, resp.status) uploaded_path = json.loads(resp.read())['path'] with open(uploaded_path) as fd: uploaded_content = fd.read() self.assertEquals(content, uploaded_content) # Create storage volume with 'url' url = 'https://github.com/kimchi-project/kimchi/raw/master/COPYING' req = json.dumps({'url': url}) resp = self.request(uri, req, 'POST') if pool_info['type'] in READONLY_POOL_TYPE: self.assertEquals(400, resp.status) else: rollback.prependDefer(model.storagevolume_delete, pool_name, 'COPYING') self.assertEquals(202, resp.status) task = json.loads(resp.read()) wait_task(_task_lookup, task['id']) resp = self.request(uri + '/COPYING') self.assertEquals(200, resp.status)
def test_nonroot_access(self): # Non-root users can access static host information resp = self.request('/plugins/kimchi/host', '{}', 'GET') self.assertEquals(403, resp.status) # Non-root users can access host stats resp = self.request('/plugins/kimchi/host/stats', '{}', 'GET') self.assertEquals(403, resp.status) # Non-root users can not reboot/shutdown host system resp = self.request('/plugins/kimchi/host/reboot', '{}', 'POST') self.assertEquals(403, resp.status) resp = self.request('/plugins/kimchi/host/shutdown', '{}', 'POST') self.assertEquals(403, resp.status) # Non-root users can not get or debug reports resp = self.request('/plugins/kimchi/debugreports', '{}', 'GET') self.assertEquals(403, resp.status) resp = self.request('/plugins/kimchi/debugreports', '{}', 'POST') self.assertEquals(403, resp.status) # Non-root users can not create or delete network (only get) resp = self.request('/plugins/kimchi/networks', '{}', 'GET') self.assertEquals(200, resp.status) resp = self.request('/plugins/kimchi/networks', '{}', 'POST') self.assertEquals(403, resp.status) resp = self.request('/plugins/kimchi/networks/default/activate', '{}', 'POST') self.assertEquals(403, resp.status) resp = self.request('/plugins/kimchi/networks/default', '{}', 'DELETE') self.assertEquals(403, resp.status) # Non-root users can not create or delete storage pool (only get) resp = self.request('/plugins/kimchi/storagepools', '{}', 'GET') self.assertEquals(200, resp.status) resp = self.request('/plugins/kimchi/storagepools', '{}', 'POST') self.assertEquals(403, resp.status) resp = self.request('/plugins/kimchi/storagepools/default/activate', '{}', 'POST') self.assertEquals(403, resp.status) resp = self.request('/plugins/kimchi/storagepools/default', '{}', 'DELETE') self.assertEquals(403, resp.status) # Non-root users can not update or delete a template # but he can get and create a new one resp = self.request('/plugins/kimchi/templates', '{}', 'GET') self.assertEquals(403, resp.status) req = json.dumps({'name': 'test', 'cdrom': fake_iso}) resp = self.request('/plugins/kimchi/templates', req, 'POST') self.assertEquals(403, resp.status) resp = self.request('/plugins/kimchi/templates/test', '{}', 'PUT') self.assertEquals(403, resp.status) resp = self.request('/plugins/kimchi/templates/test', '{}', 'DELETE') self.assertEquals(403, resp.status) # Non-root users can only get vms authorized to them model.templates_create({'name': u'test', 'cdrom': fake_iso}) task_info = model.vms_create({ 'name': u'test-me', 'template': '/plugins/kimchi/templates/test' }) wait_task(model.task_lookup, task_info['id']) model.vm_update(u'test-me', { 'users': [mockmodel.fake_user.keys()[0]], 'groups': [] }) task_info = model.vms_create({ 'name': u'test-usera', 'template': '/plugins/kimchi/templates/test' }) wait_task(model.task_lookup, task_info['id']) non_root = list(set(model.users_get_list()) - set(['root']))[0] model.vm_update(u'test-usera', {'users': [non_root], 'groups': []}) task_info = model.vms_create({ 'name': u'test-groupa', 'template': '/plugins/kimchi/templates/test' }) wait_task(model.task_lookup, task_info['id']) a_group = model.groups_get_list()[0] model.vm_update(u'test-groupa', {'groups': [a_group]}) resp = self.request('/plugins/kimchi/vms', '{}', 'GET') self.assertEquals(200, resp.status) vms_data = json.loads(resp.read()) self.assertEquals([u'test-groupa', u'test-me'], sorted([v['name'] for v in vms_data])) resp = self.request('/plugins/kimchi/vms', req, 'POST') self.assertEquals(403, resp.status) # Create a vm using mockmodel directly to test Resource access task_info = model.vms_create({ 'name': 'kimchi-test', 'template': '/plugins/kimchi/templates/test' }) wait_task(model.task_lookup, task_info['id']) resp = self.request('/plugins/kimchi/vms/kimchi-test', '{}', 'PUT') self.assertEquals(403, resp.status) resp = self.request('/plugins/kimchi/vms/kimchi-test', '{}', 'DELETE') self.assertEquals(403, resp.status) # Non-root users can only update VMs authorized by them resp = self.request('/plugins/kimchi/vms/test-me/start', '{}', 'POST') self.assertEquals(200, resp.status) resp = self.request('/plugins/kimchi/vms/test-usera/start', '{}', 'POST') self.assertEquals(403, resp.status) model.template_delete('test') model.vm_delete('test-me')
def _do_volume_test(self, model, host, ssl_port, pool_name): def _task_lookup(taskid): return json.loads(self.request('/tasks/%s' % taskid).read()) uri = '/storagepools/%s/storagevolumes' % pool_name.encode('utf-8') resp = self.request(uri) self.assertEquals(200, resp.status) resp = self.request('/storagepools/%s' % pool_name.encode('utf-8')) pool_info = json.loads(resp.read()) with RollbackContext() as rollback: # Create storage volume with 'capacity' vol = 'test-volume' vol_uri = uri + '/' + vol req = json.dumps({'name': vol, 'format': 'raw', 'capacity': 1073741824}) # 1 GiB resp = self.request(uri, req, 'POST') if pool_info['type'] in READONLY_POOL_TYPE: self.assertEquals(400, resp.status) else: rollback.prependDefer(rollback_wrapper, model.storagevolume_delete, pool_name, vol) self.assertEquals(202, resp.status) task_id = json.loads(resp.read())['id'] wait_task(_task_lookup, task_id) status = json.loads(self.request('/tasks/%s' % task_id).read()) self.assertEquals('finished', status['status']) vol_info = json.loads(self.request(vol_uri).read()) vol_info['name'] = vol vol_info['format'] = 'raw' vol_info['capacity'] = 1073741824 # Resize the storage volume: increase its capacity to 2 GiB req = json.dumps({'size': 2147483648}) # 2 GiB resp = self.request(vol_uri + '/resize', req, 'POST') self.assertEquals(200, resp.status) storagevolume = json.loads(self.request(vol_uri).read()) self.assertEquals(2147483648, storagevolume['capacity']) # Resize the storage volume: decrease its capacity to 512 MiB # FIXME: Due a libvirt bug it is not possible to decrease the # volume capacity # For reference: # - https://bugzilla.redhat.com/show_bug.cgi?id=1021802 req = json.dumps({'size': 536870912}) # 512 MiB resp = self.request(vol_uri + '/resize', req, 'POST') # It is only possible when using MockModel if isinstance(model, MockModel): self.assertEquals(200, resp.status) storagevolume = json.loads(self.request(vol_uri).read()) self.assertEquals(536870912, storagevolume['capacity']) else: self.assertEquals(500, resp.status) # Wipe the storage volume resp = self.request(vol_uri + '/wipe', '{}', 'POST') self.assertEquals(200, resp.status) storagevolume = json.loads(self.request(vol_uri).read()) self.assertEquals(0, storagevolume['allocation']) # Clone the storage volume vol_info = json.loads(self.request(vol_uri).read()) resp = self.request(vol_uri + '/clone', '{}', 'POST') self.assertEquals(202, resp.status) task = json.loads(resp.read()) cloned_vol_name = task['target_uri'].split('/')[-1] rollback.prependDefer(model.storagevolume_delete, pool_name, cloned_vol_name) wait_task(_task_lookup, task['id']) task = json.loads(self.request('/tasks/%s' % task['id']).read()) self.assertEquals('finished', task['status']) resp = self.request(uri + '/' + cloned_vol_name.encode('utf-8')) self.assertEquals(200, resp.status) cloned_vol = json.loads(resp.read()) self.assertNotEquals(vol_info['name'], cloned_vol['name']) self.assertNotEquals(vol_info['path'], cloned_vol['path']) for key in ['name', 'path', 'allocation']: del vol_info[key] del cloned_vol[key] self.assertEquals(vol_info, cloned_vol) # Delete the storage volume resp = self.request(vol_uri, '{}', 'DELETE') self.assertEquals(204, resp.status) resp = self.request(vol_uri) self.assertEquals(404, resp.status) # Storage volume upload # It is done through a sequence of POST and several PUT requests filename = 'COPYING.LGPL' filepath = os.path.join(paths.get_prefix(), filename) filesize = os.stat(filepath).st_size # Create storage volume for upload req = json.dumps({'name': filename, 'format': 'raw', 'capacity': filesize, 'upload': True}) resp = self.request(uri, req, 'POST') if pool_info['type'] in READONLY_POOL_TYPE: self.assertEquals(400, resp.status) else: rollback.prependDefer(rollback_wrapper, model.storagevolume_delete, pool_name, filename) self.assertEquals(202, resp.status) task_id = json.loads(resp.read())['id'] wait_task(_task_lookup, task_id) status = json.loads(self.request('/tasks/%s' % task_id).read()) self.assertEquals('ready for upload', status['message']) # Upload volume content url = 'https://%s:%s' % (host, ssl_port) + uri + '/' + filename # Create a file with 5M to upload # Max body size is set to 4M so the upload will fail with 413 newfile = '/tmp/5m-file' with open(newfile, 'wb') as fd: fd.seek(5*1024*1024-1) fd.write("\0") rollback.prependDefer(os.remove, newfile) with open(newfile, 'rb') as fd: with open(newfile + '.tmp', 'wb') as tmp_fd: data = fd.read() tmp_fd.write(data) with open(newfile + '.tmp', 'rb') as tmp_fd: r = requests.put(url, data={'chunk_size': len(data)}, files={'chunk': tmp_fd}, verify=False, headers=fake_auth_header()) self.assertEquals(r.status_code, 413) # Do upload index = 0 chunk_size = 2 * 1024 content = '' with open(filepath, 'rb') as fd: while True: with open(filepath + '.tmp', 'wb') as tmp_fd: fd.seek(index*chunk_size) data = fd.read(chunk_size) tmp_fd.write(data) with open(filepath + '.tmp', 'rb') as tmp_fd: r = requests.put(url, data={'chunk_size': len(data)}, files={'chunk': tmp_fd}, verify=False, headers=fake_auth_header()) self.assertEquals(r.status_code, 200) content += data index = index + 1 if len(data) < chunk_size: break rollback.prependDefer(os.remove, filepath + '.tmp') resp = self.request(uri + '/' + filename) self.assertEquals(200, resp.status) uploaded_path = json.loads(resp.read())['path'] with open(uploaded_path) as fd: uploaded_content = fd.read() self.assertEquals(content, uploaded_content) # Create storage volume with 'url' url = 'https://github.com/kimchi-project/kimchi/raw/master/COPYING' req = json.dumps({'url': url}) resp = self.request(uri, req, 'POST') if pool_info['type'] in READONLY_POOL_TYPE: self.assertEquals(400, resp.status) else: rollback.prependDefer(model.storagevolume_delete, pool_name, 'COPYING') self.assertEquals(202, resp.status) task = json.loads(resp.read()) wait_task(_task_lookup, task['id']) resp = self.request(uri + '/COPYING') self.assertEquals(200, resp.status)
def test_edit_vm(self): req = json.dumps({'name': 'test', 'cdrom': fake_iso}) resp = self.request('/templates', req, 'POST') self.assertEquals(201, resp.status) req = json.dumps({'name': 'vm-1', 'template': '/templates/test'}) resp = self.request('/vms', req, 'POST') self.assertEquals(202, resp.status) task = json.loads(resp.read()) wait_task(self._task_lookup, task['id']) vm = json.loads(self.request('/vms/vm-1').read()) self.assertEquals('vm-1', vm['name']) resp = self.request('/vms/vm-1/start', '{}', 'POST') self.assertEquals(200, resp.status) req = json.dumps({'unsupported-attr': 'attr'}) resp = self.request('/vms/vm-1', req, 'PUT') self.assertEquals(400, resp.status) req = json.dumps({'name': 'new-vm'}) resp = self.request('/vms/vm-1', req, 'PUT') self.assertEquals(400, resp.status) req = json.dumps({'cpus': 3}) resp = self.request('/vms/vm-1', req, 'PUT') self.assertEquals(200, resp.status) # Check if there is support to memory hotplug, once vm is running resp = self.request('/config/capabilities').read() conf = json.loads(resp) req = json.dumps({'memory': 2048}) resp = self.request('/vms/vm-1', req, 'PUT') if conf['mem_hotplug_support']: self.assertEquals(200, resp.status) else: self.assertEquals(400, resp.status) req = json.dumps({"graphics": {'passwd': "abcdef"}}) resp = self.request('/vms/vm-1', req, 'PUT') info = json.loads(resp.read()) self.assertEquals('abcdef', info["graphics"]["passwd"]) self.assertEquals(None, info["graphics"]["passwdValidTo"]) resp = self.request('/vms/vm-1/poweroff', '{}', 'POST') self.assertEquals(200, resp.status) req = json.dumps({"graphics": {'passwd': "123456", 'passwdValidTo': 20}}) resp = self.request('/vms/vm-1', req, 'PUT') info = json.loads(resp.read()) self.assertEquals('123456', info["graphics"]["passwd"]) self.assertGreaterEqual(20, info["graphics"]["passwdValidTo"]) req = json.dumps({'name': 12}) resp = self.request('/vms/vm-1', req, 'PUT') self.assertEquals(400, resp.status) req = json.dumps({'name': ''}) resp = self.request('/vms/vm-1', req, 'PUT') self.assertEquals(400, resp.status) req = json.dumps({'cpus': -2}) resp = self.request('/vms/vm-1', req, 'PUT') self.assertEquals(400, resp.status) req = json.dumps({'cpus': 'four'}) resp = self.request('/vms/vm-1', req, 'PUT') self.assertEquals(400, resp.status) req = json.dumps({'memory': 100}) resp = self.request('/vms/vm-1', req, 'PUT') self.assertEquals(400, resp.status) req = json.dumps({'memory': 'ten gigas'}) resp = self.request('/vms/vm-1', req, 'PUT') self.assertEquals(400, resp.status) req = json.dumps({'name': 'new-name', 'cpus': 5, 'UUID': 'notallowed'}) resp = self.request('/vms/vm-1', req, 'PUT') self.assertEquals(400, resp.status) params = {'name': u'∨м-црdαtеd', 'cpus': 5, 'memory': 3072} req = json.dumps(params) resp = self.request('/vms/vm-1', req, 'PUT') self.assertEquals(303, resp.status) vm = json.loads(self.request('/vms/∨м-црdαtеd', req).read()) for key in params.keys(): self.assertEquals(params[key], vm[key]) # change only VM users - groups are not changed (default is empty) resp = self.request('/users', '{}', 'GET') users = json.loads(resp.read()) req = json.dumps({'users': users}) resp = self.request('/vms/∨м-црdαtеd', req, 'PUT') self.assertEquals(200, resp.status) info = json.loads(self.request('/vms/∨м-црdαtеd', '{}').read()) self.assertEquals(users, info['users']) # change only VM groups - users are not changed (default is empty) resp = self.request('/groups', '{}', 'GET') groups = json.loads(resp.read()) req = json.dumps({'groups': groups}) resp = self.request('/vms/∨м-црdαtеd', req, 'PUT') self.assertEquals(200, resp.status) info = json.loads(self.request('/vms/∨м-црdαtеd', '{}').read()) self.assertEquals(groups, info['groups']) # change VM users (wrong value) and groups # when an error occurs, everything fails and nothing is changed req = json.dumps({'users': ['userdoesnotexist'], 'groups': []}) resp = self.request('/vms/∨м-црdαtеd', req, 'PUT') self.assertEquals(400, resp.status) # change VM users and groups (wrong value) # when an error occurs, everything fails and nothing is changed req = json.dumps({'users': [], 'groups': ['groupdoesnotexist']}) resp = self.request('/vms/∨м-црdαtеd', req, 'PUT') self.assertEquals(400, resp.status)
def test_vm_lifecycle(self): # Create a Template req = json.dumps({'name': 'test', 'disks': [{'size': 1}], 'icon': 'images/icon-debian.png', 'cdrom': fake_iso}) resp = self.request('/templates', req, 'POST') self.assertEquals(201, resp.status) # Create a VM req = json.dumps({'name': 'test-vm', 'template': '/templates/test'}) resp = self.request('/vms', req, 'POST') task = json.loads(resp.read()) wait_task(self._task_lookup, task['id']) self.assertEquals(202, resp.status) # Verify the VM vm = json.loads(self.request('/vms/test-vm').read()) self.assertEquals('shutoff', vm['state']) self.assertEquals('images/icon-debian.png', vm['icon']) # Verify the volume was created vol_uri = '/storagepools/default-pool/storagevolumes/%s-0.img' resp = self.request(vol_uri % vm['uuid']) vol = json.loads(resp.read()) self.assertEquals(1 << 30, vol['capacity']) self.assertEquals(['test-vm'], vol['used_by']) # Start the VM resp = self.request('/vms/test-vm/start', '{}', 'POST') vm = json.loads(self.request('/vms/test-vm').read()) self.assertEquals('running', vm['state']) # Test screenshot resp = self.request(vm['screenshot'], method='HEAD') self.assertEquals(200, resp.status) self.assertTrue(resp.getheader('Content-type').startswith('image')) # Clone a running VM resp = self.request('/vms/test-vm/clone', '{}', 'POST') self.assertEquals(400, resp.status) # Force poweroff the VM resp = self.request('/vms/test-vm/poweroff', '{}', 'POST') vm = json.loads(self.request('/vms/test-vm').read()) self.assertEquals('shutoff', vm['state']) # Test create VM with same name fails with 400 req = json.dumps({'name': 'test-vm', 'template': '/templates/test'}) resp = self.request('/vms', req, 'POST') self.assertEquals(400, resp.status) # Clone a VM resp = self.request('/vms/test-vm/clone', '{}', 'POST') self.assertEquals(202, resp.status) task = json.loads(resp.read()) wait_task(self._task_lookup, task['id']) task = json.loads(self.request('/tasks/%s' % task['id'], '{}').read()) self.assertEquals('finished', task['status']) clone_vm_name = task['target_uri'].split('/')[-2] self.assertTrue(re.match(u'test-vm-clone-\d+', clone_vm_name)) resp = self.request('/vms/test-vm', '{}') original_vm_info = json.loads(resp.read()) resp = self.request('/vms/%s' % clone_vm_name, '{}') self.assertEquals(200, resp.status) clone_vm_info = json.loads(resp.read()) self.assertNotEqual(original_vm_info['name'], clone_vm_info['name']) del original_vm_info['name'] del clone_vm_info['name'] self.assertNotEqual(original_vm_info['uuid'], clone_vm_info['uuid']) del original_vm_info['uuid'] del clone_vm_info['uuid'] self.assertEquals(original_vm_info, clone_vm_info) # Create a snapshot on a stopped VM params = {'name': 'test-snap'} resp = self.request('/vms/test-vm/snapshots', json.dumps(params), 'POST') self.assertEquals(202, resp.status) task = json.loads(resp.read()) wait_task(self._task_lookup, task['id']) task = json.loads(self.request('/tasks/%s' % task['id']).read()) self.assertEquals('finished', task['status']) # Look up a non-existing snapshot resp = self.request('/vms/test-vm/snapshots/snap404', '{}', 'GET') self.assertEquals(404, resp.status) # Look up a snapshot resp = self.request('/vms/test-vm/snapshots/%s' % params['name'], '{}', 'GET') self.assertEquals(200, resp.status) snap = json.loads(resp.read()) self.assertTrue(int(time.time()) >= int(snap['created'])) self.assertEquals(params['name'], snap['name']) self.assertEquals(u'', snap['parent']) self.assertEquals(u'shutoff', snap['state']) resp = self.request('/vms/test-vm/snapshots', '{}', 'GET') self.assertEquals(200, resp.status) snaps = json.loads(resp.read()) self.assertEquals(1, len(snaps)) # Look up current snapshot (the one created above) resp = self.request('/vms/test-vm/snapshots/current', '{}', 'GET') self.assertEquals(200, resp.status) snap = json.loads(resp.read()) self.assertEquals(params['name'], snap['name']) resp = self.request('/vms/test-vm/snapshots', '{}', 'POST') self.assertEquals(202, resp.status) task = json.loads(resp.read()) snap_name = task['target_uri'].split('/')[-1] wait_task(self._task_lookup, task['id']) resp = self.request('/tasks/%s' % task['id'], '{}', 'GET') task = json.loads(resp.read()) self.assertEquals('finished', task['status']) resp = self.request('/vms/test-vm/snapshots', '{}', 'GET') self.assertEquals(200, resp.status) snaps = json.loads(resp.read()) self.assertEquals(2, len(snaps)) # Look up current snapshot (the one created above) resp = self.request('/vms/test-vm/snapshots/current', '{}', 'GET') self.assertEquals(200, resp.status) snap = json.loads(resp.read()) self.assertEquals(snap_name, snap['name']) # Revert to snapshot resp = self.request('/vms/test-vm/snapshots/%s/revert' % params['name'], '{}', 'POST') self.assertEquals(200, resp.status) snap = json.loads(resp.read()) resp = self.request('/vms/test-vm', '{}', 'GET') self.assertEquals(200, resp.status) vm = json.loads(resp.read()) self.assertEquals(vm['state'], snap['state']) resp = self.request('/vms/test-vm/snapshots/current', '{}', 'GET') self.assertEquals(200, resp.status) current_snap = json.loads(resp.read()) self.assertEquals(snap, current_snap) # Delete a snapshot resp = self.request('/vms/test-vm/snapshots/%s' % params['name'], '{}', 'DELETE') self.assertEquals(204, resp.status) # Suspend the VM resp = self.request('/vms/test-vm', '{}', 'GET') self.assertEquals(200, resp.status) vm = json.loads(resp.read()) self.assertEquals(vm['state'], 'shutoff') resp = self.request('/vms/test-vm/suspend', '{}', 'POST') self.assertEquals(400, resp.status) resp = self.request('/vms/test-vm/start', '{}', 'POST') self.assertEquals(200, resp.status) resp = self.request('/vms/test-vm', '{}', 'GET') self.assertEquals(200, resp.status) vm = json.loads(resp.read()) self.assertEquals(vm['state'], 'running') resp = self.request('/vms/test-vm/suspend', '{}', 'POST') self.assertEquals(200, resp.status) resp = self.request('/vms/test-vm', '{}', 'GET') self.assertEquals(200, resp.status) vm = json.loads(resp.read()) self.assertEquals(vm['state'], 'paused') # Resume the VM resp = self.request('/vms/test-vm/resume', '{}', 'POST') self.assertEquals(200, resp.status) resp = self.request('/vms/test-vm', '{}', 'GET') self.assertEquals(200, resp.status) vm = json.loads(resp.read()) self.assertEquals(vm['state'], 'running') # Delete the VM resp = self.request('/vms/test-vm', '{}', 'DELETE') self.assertEquals(204, resp.status) # Delete the Template resp = self.request('/templates/test', '{}', 'DELETE') self.assertEquals(204, resp.status) # Verify the volume was deleted self.assertHTTPStatus(404, vol_uri % vm['uuid'])
def test_vm_graphics(self): # Create a Template req = json.dumps({'name': 'test', 'cdrom': fake_iso}) resp = self.request('/templates', req, 'POST') self.assertEquals(201, resp.status) # Create a VM with default args req = json.dumps({'name': 'test-vm', 'template': '/templates/test'}) resp = self.request('/vms', req, 'POST') self.assertEquals(202, resp.status) task = json.loads(resp.read()) wait_task(self._task_lookup, task['id']) # Verify the VM vm = json.loads(self.request('/vms/test-vm').read()) self.assertEquals('127.0.0.1', vm['graphics']['listen']) self.assertEquals('vnc', vm['graphics']['type']) # Delete the VM resp = self.request('/vms/test-vm', '{}', 'DELETE') self.assertEquals(204, resp.status) # Create a VM with specified graphics type and listen graphics = {'type': 'vnc', 'listen': '127.0.0.1'} req = json.dumps({'name': 'test-vm', 'template': '/templates/test', 'graphics': graphics}) resp = self.request('/vms', req, 'POST') self.assertEquals(202, resp.status) task = json.loads(resp.read()) wait_task(self._task_lookup, task['id']) # Verify the VM vm = json.loads(self.request('/vms/test-vm').read()) self.assertEquals('127.0.0.1', vm['graphics']['listen']) self.assertEquals('vnc', vm['graphics']['type']) # Delete the VM resp = self.request('/vms/test-vm', '{}', 'DELETE') self.assertEquals(204, resp.status) # Create a VM with listen as ipv6 address graphics = {'type': 'spice', 'listen': 'fe00::0'} req = json.dumps({'name': 'test-vm', 'template': '/templates/test', 'graphics': graphics}) resp = self.request('/vms', req, 'POST') self.assertEquals(202, resp.status) task = json.loads(resp.read()) wait_task(self._task_lookup, task['id']) # Verify the VM vm = json.loads(self.request('/vms/test-vm').read()) self.assertEquals('fe00::0', vm['graphics']['listen']) self.assertEquals('spice', vm['graphics']['type']) # Delete the VM resp = self.request('/vms/test-vm', '{}', 'DELETE') self.assertEquals(204, resp.status) # Create a VM with specified graphics type and default listen graphics = {'type': 'spice'} req = json.dumps({'name': 'test-vm', 'template': '/templates/test', 'graphics': graphics}) resp = self.request('/vms', req, 'POST') self.assertEquals(202, resp.status) task = json.loads(resp.read()) wait_task(self._task_lookup, task['id']) # Verify the VM vm = json.loads(self.request('/vms/test-vm').read()) self.assertEquals('127.0.0.1', vm['graphics']['listen']) self.assertEquals('spice', vm['graphics']['type']) # Delete the VM resp = self.request('/vms/test-vm', '{}', 'DELETE') self.assertEquals(204, resp.status) # Try to create a VM with invalid graphics type graphics = {'type': 'invalid'} req = json.dumps({'name': 'test-vm', 'template': '/templates/test', 'graphics': graphics}) resp = self.request('/vms', req, 'POST') self.assertEquals(400, resp.status) # Try to create a VM with invalid graphics listen graphics = {'type': 'spice', 'listen': 'invalid'} req = json.dumps({'name': 'test-vm', 'template': '/templates/test', 'graphics': graphics}) resp = self.request('/vms', req, 'POST') self.assertEquals(400, resp.status) # Delete the Template resp = self.request('/templates/test', '{}', 'DELETE') self.assertEquals(204, resp.status)
def test_vm_storage_devices(self): with RollbackContext() as rollback: # Create a template as a base for our VMs req = json.dumps({'name': 'test', 'cdrom': fake_iso}) resp = self.request('/templates', req, 'POST') self.assertEquals(201, resp.status) # Delete the template rollback.prependDefer(self.request, '/templates/test', '{}', 'DELETE') # Create a VM with default args req = json.dumps({'name': 'test-vm', 'template': '/templates/test'}) resp = self.request('/vms', req, 'POST') self.assertEquals(202, resp.status) task = json.loads(resp.read()) wait_task(self._task_lookup, task['id']) # Delete the VM rollback.prependDefer(self.request, '/vms/test-vm', '{}', 'DELETE') # Check storage devices resp = self.request('/vms/test-vm/storages', '{}', 'GET') devices = json.loads(resp.read()) self.assertEquals(2, len(devices)) dev_types = [] for d in devices: self.assertIn(u'type', d.keys()) self.assertIn(u'dev', d.keys()) self.assertIn(u'path', d.keys()) dev_types.append(d['type']) self.assertEquals(['cdrom', 'disk'], sorted(dev_types)) # Attach cdrom with nonexistent iso req = json.dumps({'dev': 'hdx', 'type': 'cdrom', 'path': '/tmp/nonexistent.iso'}) resp = self.request('/vms/test-vm/storages', req, 'POST') self.assertEquals(400, resp.status) # Create temp storage pool req = json.dumps({'name': 'tmp', 'capacity': 1024, 'allocated': 512, 'path': '/tmp', 'type': 'dir'}) resp = self.request('/storagepools', req, 'POST') self.assertEquals(201, resp.status) resp = self.request('/storagepools/tmp/activate', req, 'POST') self.assertEquals(200, resp.status) # 'name' is required for this type of volume req = json.dumps({'capacity': 1024, 'allocation': 512, 'type': 'disk', 'format': 'raw'}) resp = self.request('/storagepools/tmp/storagevolumes', req, 'POST') self.assertEquals(400, resp.status) req = json.dumps({'name': "attach-volume", 'capacity': 1024, 'allocation': 512, 'type': 'disk', 'format': 'raw'}) resp = self.request('/storagepools/tmp/storagevolumes', req, 'POST') self.assertEquals(202, resp.status) time.sleep(1) # Attach cdrom with both path and volume specified open('/tmp/existent.iso', 'w').close() req = json.dumps({'dev': 'hdx', 'type': 'cdrom', 'pool': 'tmp', 'vol': 'attach-volume', 'path': '/tmp/existent.iso'}) resp = self.request('/vms/test-vm/storages', req, 'POST') self.assertEquals(400, resp.status) # Attach disk with both path and volume specified req = json.dumps({'dev': 'hdx', 'type': 'disk', 'pool': 'tmp', 'vol': 'attach-volume', 'path': '/tmp/existent.iso'}) resp = self.request('/vms/test-vm/storages', req, 'POST') self.assertEquals(400, resp.status) # Attach disk with only pool specified req = json.dumps({'dev': 'hdx', 'type': 'cdrom', 'pool': 'tmp'}) resp = self.request('/vms/test-vm/storages', req, 'POST') self.assertEquals(400, resp.status) # Attach disk with pool and vol specified req = json.dumps({'type': 'disk', 'pool': 'tmp', 'vol': 'attach-volume'}) resp = self.request('/vms/test-vm/storages', req, 'POST') self.assertEquals(201, resp.status) cd_info = json.loads(resp.read()) self.assertEquals('disk', cd_info['type']) # Attach a cdrom with existent dev name req = json.dumps({'type': 'cdrom', 'path': '/tmp/existent.iso'}) resp = self.request('/vms/test-vm/storages', req, 'POST') self.assertEquals(201, resp.status) cd_info = json.loads(resp.read()) cd_dev = cd_info['dev'] self.assertEquals('cdrom', cd_info['type']) self.assertEquals('/tmp/existent.iso', cd_info['path']) # Delete the file and cdrom rollback.prependDefer(self.request, '/vms/test-vm/storages/hdx', '{}', 'DELETE') os.remove('/tmp/existent.iso') # Change path of storage cdrom cdrom = u'http://fedora.mirrors.tds.net/pub/fedora/releases/20/'\ 'Live/x86_64/Fedora-Live-Desktop-x86_64-20-1.iso' req = json.dumps({'path': cdrom}) resp = self.request('/vms/test-vm/storages/' + cd_dev, req, 'PUT') self.assertEquals(200, resp.status) cd_info = json.loads(resp.read()) self.assertEquals(urlparse.urlparse(cdrom).path, urlparse.urlparse(cd_info['path']).path) # Test GET devs = json.loads(self.request('/vms/test-vm/storages').read()) self.assertEquals(4, len(devs)) # Detach storage cdrom resp = self.request('/vms/test-vm/storages/' + cd_dev, '{}', 'DELETE') self.assertEquals(204, resp.status) # Test GET devs = json.loads(self.request('/vms/test-vm/storages').read()) self.assertEquals(3, len(devs)) resp = self.request('/storagepools/tmp/deactivate', {}, 'POST') self.assertEquals(200, resp.status) resp = self.request('/storagepools/tmp', {}, 'DELETE') self.assertEquals(204, resp.status)
def test_vm_iface(self): with RollbackContext() as rollback: # Create a template as a base for our VMs req = json.dumps({'name': 'test', 'cdrom': fake_iso}) resp = self.request('/templates', req, 'POST') self.assertEquals(201, resp.status) # Delete the template rollback.prependDefer(self.request, '/templates/test', '{}', 'DELETE') # Create a VM with default args req = json.dumps({'name': 'test-vm', 'template': '/templates/test'}) resp = self.request('/vms', req, 'POST') self.assertEquals(202, resp.status) task = json.loads(resp.read()) wait_task(self._task_lookup, task['id']) # Delete the VM rollback.prependDefer(self.request, '/vms/test-vm', '{}', 'DELETE') # Create a network req = json.dumps({'name': 'test-network', 'connection': 'nat', 'net': '127.0.1.0/24'}) resp = self.request('/networks', req, 'POST') self.assertEquals(201, resp.status) # Delete the network rollback.prependDefer(self.request, '/networks/test-network', '{}', 'DELETE') ifaces = json.loads(self.request('/vms/test-vm/ifaces').read()) self.assertEquals(1, len(ifaces)) for iface in ifaces: res = json.loads(self.request('/vms/test-vm/ifaces/%s' % iface['mac']).read()) self.assertEquals('default', res['network']) self.assertEquals(17, len(res['mac'])) self.assertEquals(get_template_default('old', 'nic_model'), res['model']) # try to attach an interface without specifying 'model' req = json.dumps({'type': 'network'}) resp = self.request('/vms/test-vm/ifaces', req, 'POST') self.assertEquals(400, resp.status) # attach network interface to vm req = json.dumps({"type": "network", "network": "test-network", "model": "virtio"}) resp = self.request('/vms/test-vm/ifaces', req, 'POST') self.assertEquals(201, resp.status) iface = json.loads(resp.read()) self.assertEquals('test-network', iface['network']) self.assertEquals(17, len(iface['mac'])) self.assertEquals('virtio', iface['model']) self.assertEquals('network', iface['type']) # update vm interface newMacAddr = '54:50:e3:44:8a:af' req = json.dumps({"network": "default", "model": "virtio", "type": "network", "mac": newMacAddr}) resp = self.request('/vms/test-vm/ifaces/%s' % iface['mac'], req, 'PUT') self.assertEquals(303, resp.status) iface = json.loads(self.request('/vms/test-vm/ifaces/%s' % newMacAddr).read()) self.assertEquals(newMacAddr, iface['mac']) # detach network interface from vm resp = self.request('/vms/test-vm/ifaces/%s' % iface['mac'], '{}', 'DELETE') self.assertEquals(204, resp.status)