class AuditdispTests(unittest.TestCase): @mock.patch('wok.plugins.ginger.model.auditdisp.' 'AuditdispModel.get_auditdisp_conf') def test_get_auditdisp_conf(self, mock_conf): """ Unittest to get audit dispatcher conf """ exp_out = {'overflow_action': 'SYSLOG', 'priority_boost': '4', 'q_depth': '150', 'max_restarts': '10', 'name_format': 'HOSTNAME' } open_mock = mock.mock_open() with mock.patch('wok.plugins.ginger.model.utils.open', open_mock, create=True): auditdisp = AuditdispModel() mock_conf.return_value = exp_out out = auditdisp.get_auditdisp_conf() self.assertEqual(out, exp_out) @mock.patch('wok.plugins.ginger.model.utils.' 'write_to_conf') def test_write_to_conf(self, mock_write): """ Unit test to write data to audisp conf file. """ data = 'q_depth = 150\n' \ 'overflow_action = SYSLOG\n' \ 'priority_boost = 4\n' \ 'max_restarts = 10\n' \ 'name_format = HOSTNAME\n' \ '#name = mydomain\n' AUDISPD_CONF = '/etc/audisp/abc' key = "dummy" value = "23" open_mock = mock.mock_open(read_data=data) with mock.patch('wok.plugins.ginger.model.utils.open', open_mock, create=True): mock_write.return_value = {} write_to_conf(key, value, AUDISPD_CONF) @mock.patch('wok.plugins.ginger.model.auditdisp.del_lines_of_attribute') @mock.patch('wok.plugins.ginger.model.auditdisp.write_to_conf') @unittest.skipUnless(utils.running_as_root(), 'Must be run as root') def test_update(self, mock_write, mock_del): """ Unit test to update data to audisp conf file. """ params = {"priority_boost": "34", "q_depth": "13"} name = "audit" mock_del.return_value = {} mock_write.return_value = {} auditdisp = AuditdispModel() auditdisp.update(name, params)
def running_root_and_remoteserver_defined(): return running_as_root() and remoteserver_environment_defined()
class LibvirtEventsTests(unittest.TestCase): def setUp(self): self.tmp_store = tempfile.mktemp() def tearDown(self): os.unlink(self.tmp_store) def domain_event_lifecycle_cb(self, conn, dom, event, detail, *args): """ Callback to handle Domain (VMs) events - VM Livecycle. """ evStrings = ("Defined", "Undefined", "Started", "Suspended", "Resumed", "Stopped", "Shutdown", "PMSuspended", "Crashed") evDetails = (("Added", "Updated"), ("Removed", ), ("Booted", "Migrated", "Restored", "Snapshot", "Wakeup"), ("Paused", "Migrated", "IOError", "Watchdog", "Restored", "Snapshot", "API error"), ("Unpaused", "Migrated", "Snapshot"), ("Shutdown", "Destroyed", "Crashed", "Migrated", "Saved", "Failed", "Snapshot"), ("Finished", ), ("Memory", "Disk"), ("Panicked")) data = { 'domain': dom.name(), 'event': evStrings[event], 'event_detail': evDetails[event][detail] } _store_event('%s|%s' % (_get_next_event_id(), json.dumps(data))) def domain_event_reboot_cb(self, conn, dom, *args): """ Callback to handle Domain (VMs) events - VM Reboot. """ data = {'domain': dom.name(), 'event': 'Rebooted'} _store_event('%s|%s' % (_get_next_event_id(), json.dumps(data))) @unittest.skipUnless(utils.running_as_root(), 'Must be run as root') def test_events_vm_lifecycle(self): inst = model.Model(objstore_loc=self.tmp_store) self.objstore = inst.objstore conn = inst.conn.get() # Create a template and VM to test, and start lifecycle tests with RollbackContext() as rollback: # Register the most common Libvirt domain events to be handled. event_map = [(libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE, self.domain_event_lifecycle_cb), (libvirt.VIR_DOMAIN_EVENT_ID_REBOOT, self.domain_event_reboot_cb)] for event, event_cb in event_map: ev_id = conn.domainEventRegisterAny(None, event, event_cb, None) rollback.prependDefer(conn.domainEventDeregisterAny, ev_id) # Create a template template_params = { 'name': 'ttest', 'source_media': { 'type': 'disk', 'path': UBUNTU_ISO } } inst.templates_create(template_params) rollback.prependDefer(inst.template_delete, 'ttest') # Create a VM (guest) vm_params = { 'name': 'kimchi-vm1', 'template': '/plugins/kimchi/templates/ttest' } task = inst.vms_create(vm_params) inst.task_wait(task['id'], 10) task = inst.task_lookup(task['id']) self.assertEquals('finished', task['status']) time.sleep(5) # Check event of domain definition (addition) res = json.loads(_get_event(str(_get_event_id()))) self.assertEquals('kimchi-vm1', res['domain']) self.assertEquals('Defined', res['event']) self.assertEquals('Added', res['event_detail']) # Start the VM and check the event inst.vm_start('kimchi-vm1') time.sleep(5) res = json.loads(_get_event(str(_get_event_id()))) self.assertEquals('kimchi-vm1', res['domain']) self.assertEquals('Started', res['event']) self.assertEquals('Booted', res['event_detail']) # Suspend the VM and check the event inst.vm_suspend('kimchi-vm1') time.sleep(5) res = json.loads(_get_event(str(_get_event_id()))) self.assertEquals('kimchi-vm1', res['domain']) self.assertEquals('Suspended', res['event']) self.assertEquals('Paused', res['event_detail']) # Resume the VM and check the event inst.vm_resume('kimchi-vm1') time.sleep(5) res = json.loads(_get_event(str(_get_event_id()))) self.assertEquals('kimchi-vm1', res['domain']) self.assertEquals('Resumed', res['event']) self.assertEquals('Unpaused', res['event_detail']) # Reboot the VM and check the event inst.vm_reset('kimchi-vm1') time.sleep(5) res = json.loads(_get_event(str(_get_event_id()))) self.assertEquals('kimchi-vm1', res['domain']) self.assertEquals('Rebooted', res['event']) # PowerOff (hard stop) the VM and check the event inst.vm_poweroff('kimchi-vm1') time.sleep(5) res = json.loads(_get_event(str(_get_event_id()))) self.assertEquals('kimchi-vm1', res['domain']) self.assertEquals('Stopped', res['event']) self.assertEquals('Destroyed', res['event_detail']) # Delete the VM and check the event inst.vm_delete('kimchi-vm1') time.sleep(5) res = json.loads(_get_event(str(_get_event_id()))) self.assertEquals('kimchi-vm1', res['domain']) self.assertEquals('Undefined', res['event']) self.assertEquals('Removed', res['event_detail'])
class ModelTests(unittest.TestCase): def setUp(self): self.tmp_store = '/tmp/kimchi-store-test' def tearDown(self): # FIXME: Tests using 'test:///default' URI should be moved to # test_rest or test_mockmodel to avoid overriding problems LibvirtConnection._connections['test:///default'] = {} os.unlink(self.tmp_store) def test_vm_info(self): inst = model.Model('test:///default', self.tmp_store) vms = inst.vms_get_list() self.assertEquals(1, len(vms)) self.assertEquals('test', vms[0]) keys = set(('name', 'state', 'stats', 'uuid', 'memory', 'cpus', 'screenshot', 'icon', 'graphics', 'users', 'groups', 'access', 'persistent')) stats_keys = set(('cpu_utilization', 'mem_utilization', 'net_throughput', 'net_throughput_peak', 'io_throughput', 'io_throughput_peak')) info = inst.vm_lookup('test') self.assertEquals(keys, set(info.keys())) self.assertEquals('running', info['state']) self.assertEquals('test', info['name']) self.assertEquals(2048, info['memory']) self.assertEquals(2, info['cpus']) self.assertEquals(None, info['icon']) self.assertEquals(stats_keys, set(info['stats'].keys())) self.assertRaises(NotFoundError, inst.vm_lookup, 'nosuchvm') self.assertEquals([], info['users']) self.assertEquals([], info['groups']) self.assertTrue(info['persistent']) @unittest.skipUnless(utils.running_as_root(), 'Must be run as root') def test_vm_lifecycle(self): inst = model.Model(objstore_loc=self.tmp_store) with RollbackContext() as rollback: vol_params = {'name': u'test-vol', 'capacity': 1024} task = inst.storagevolumes_create(u'default', vol_params) rollback.prependDefer(inst.storagevolume_delete, u'default', vol_params['name']) inst.task_wait(task['id']) task = inst.task_lookup(task['id']) self.assertEquals('finished', task['status']) vol = inst.storagevolume_lookup(u'default', vol_params['name']) params = {'name': 'test', 'disks': [{'base': vol['path'], 'size': 1}], 'cdrom': UBUNTU_ISO} inst.templates_create(params) rollback.prependDefer(inst.template_delete, 'test') params = {'name': 'kimchi-vm', 'template': '/plugins/kimchi/templates/test'} task = inst.vms_create(params) rollback.prependDefer(inst.vm_delete, 'kimchi-vm') inst.task_wait(task['id'], 10) task = inst.task_lookup(task['id']) self.assertEquals('finished', task['status']) vms = inst.vms_get_list() self.assertTrue('kimchi-vm' in vms) inst.vm_start('kimchi-vm') info = inst.vm_lookup('kimchi-vm') self.assertEquals('running', info['state']) self.assertRaises(InvalidOperation, inst.vmsnapshots_create, u'kimchi-vm') inst.vm_poweroff(u'kimchi-vm') vm = inst.vm_lookup(u'kimchi-vm') empty_snap = inst.currentvmsnapshot_lookup(u'kimchi-vm') self.assertEquals({}, empty_snap) # this snapshot should be deleted when its VM is deleted params = {'name': u'mysnap'} task = inst.vmsnapshots_create(u'kimchi-vm', params) inst.task_wait(task['id']) task = inst.task_lookup(task['id']) self.assertEquals('finished', task['status']) self.assertRaises(NotFoundError, inst.vmsnapshot_lookup, u'kimchi-vm', u'foobar') snap = inst.vmsnapshot_lookup(u'kimchi-vm', params['name']) self.assertTrue(int(time.time()) >= int(snap['created'])) self.assertEquals(vm['state'], snap['state']) self.assertEquals(params['name'], snap['name']) self.assertEquals(u'', snap['parent']) snaps = inst.vmsnapshots_get_list(u'kimchi-vm') self.assertEquals([params['name']], snaps) current_snap = inst.currentvmsnapshot_lookup(u'kimchi-vm') self.assertEquals(snap, current_snap) task = inst.vmsnapshots_create(u'kimchi-vm') snap_name = task['target_uri'].split('/')[-1] rollback.prependDefer(inst.vmsnapshot_delete, u'kimchi-vm', snap_name) inst.task_wait(task['id']) task = inst.task_lookup(task['id']) self.assertEquals('finished', task['status']) snaps = inst.vmsnapshots_get_list(u'kimchi-vm') self.assertEquals(sorted([params['name'], snap_name], key=unicode.lower), snaps) snap = inst.vmsnapshot_lookup(u'kimchi-vm', snap_name) current_snap = inst.currentvmsnapshot_lookup(u'kimchi-vm') self.assertEquals(snap, current_snap) # update vm name inst.vm_update('kimchi-vm', {'name': u'kimchi-vm-new'}) # Look up the first created snapshot from the renamed vm snap = inst.vmsnapshot_lookup(u'kimchi-vm-new', params['name']) # snapshot revert to the first created vm result = inst.vmsnapshot_revert(u'kimchi-vm-new', params['name']) self.assertEquals(result, [u'kimchi-vm', snap['name']]) vm = inst.vm_lookup(u'kimchi-vm') self.assertEquals(vm['state'], snap['state']) current_snap = inst.currentvmsnapshot_lookup(u'kimchi-vm') self.assertEquals(params['name'], current_snap['name']) self.assertRaises(NotFoundError, inst.vmsnapshot_delete, u'kimchi-vm', u'foobar') # suspend and resume the VM info = inst.vm_lookup(u'kimchi-vm') self.assertEquals(info['state'], 'shutoff') self.assertRaises(InvalidOperation, inst.vm_suspend, u'kimchi-vm') inst.vm_start(u'kimchi-vm') info = inst.vm_lookup(u'kimchi-vm') self.assertEquals(info['state'], 'running') inst.vm_suspend(u'kimchi-vm') info = inst.vm_lookup(u'kimchi-vm') self.assertEquals(info['state'], 'paused') self.assertRaises(InvalidParameter, inst.vm_update, u'kimchi-vm', {'name': 'foo'}) inst.vm_resume(u'kimchi-vm') info = inst.vm_lookup(u'kimchi-vm') self.assertEquals(info['state'], 'running') self.assertRaises(InvalidOperation, inst.vm_resume, u'kimchi-vm') # leave the VM suspended to make sure a paused VM can be # deleted correctly inst.vm_suspend(u'kimchi-vm') vms = inst.vms_get_list() self.assertFalse('kimchi-vm' in vms) @unittest.skipUnless(utils.running_as_root(), 'Must be run as root') def test_image_based_template(self): inst = model.Model(objstore_loc=self.tmp_store) with RollbackContext() as rollback: vol = 'base-vol.img' params = {'name': vol, 'capacity': 1073741824, # 1 GiB 'allocation': 1048576, # 1 MiB 'format': 'qcow2'} task_id = inst.storagevolumes_create('default', params)['id'] rollback.prependDefer(inst.storagevolume_delete, 'default', vol) inst.task_wait(task_id) self.assertEquals('finished', inst.task_lookup(task_id)['status']) vol_path = inst.storagevolume_lookup('default', vol)['path'] # Hack the model objstore to add a new template # It is needed as the image file must be a bootable image when # using model # As it is difficult to create one on test runtime, inject a # template with an empty image file to the objstore to test the # feature tmpl_name = "img-tmpl" tmpl_info = {"cpus": 1, "cdrom": "", "graphics": {"type": "vnc", "listen": "127.0.0.1"}, "networks": ["default"], "memory": 1024, "folder": [], "icon": "images/icon-vm.png", "os_distro": "unknown", "os_version": "unknown", "disks": [{"base": vol_path, "size": 10}], "storagepool": "/plugins/kimchi/storagepools/default"} with inst.objstore as session: session.store('template', tmpl_name, tmpl_info, get_kimchi_version()) params = {'name': 'kimchi-vm', 'template': '/plugins/kimchi/templates/img-tmpl'} task = inst.vms_create(params) inst.task_wait(task['id']) rollback.prependDefer(inst.vm_delete, 'kimchi-vm') vms = inst.vms_get_list() self.assertTrue('kimchi-vm' in vms) inst.vm_start('kimchi-vm') rollback.prependDefer(inst.vm_poweroff, 'kimchi-vm') info = inst.vm_lookup('kimchi-vm') self.assertEquals('running', info['state']) @unittest.skipUnless(utils.running_as_root(), 'Must be run as root') def test_vm_graphics(self): inst = model.Model(objstore_loc=self.tmp_store) params = {'name': 'test', 'disks': [], 'cdrom': UBUNTU_ISO} inst.templates_create(params) with RollbackContext() as rollback: params = {'name': 'kimchi-vnc', 'template': '/plugins/kimchi/templates/test'} task1 = inst.vms_create(params) inst.task_wait(task1['id']) rollback.prependDefer(inst.vm_delete, 'kimchi-vnc') info = inst.vm_lookup('kimchi-vnc') self.assertEquals('vnc', info['graphics']['type']) self.assertEquals('127.0.0.1', info['graphics']['listen']) graphics = {'type': 'spice', 'listen': '127.0.0.1'} params = {'name': 'kimchi-spice', 'template': '/plugins/kimchi/templates/test', 'graphics': graphics} task2 = inst.vms_create(params) inst.task_wait(task2['id']) rollback.prependDefer(inst.vm_delete, 'kimchi-spice') info = inst.vm_lookup('kimchi-spice') self.assertEquals('spice', info['graphics']['type']) self.assertEquals('127.0.0.1', info['graphics']['listen']) inst.template_delete('test') @unittest.skipUnless(utils.running_as_root(), 'Must be run as root') def test_vm_ifaces(self): inst = model.Model(objstore_loc=self.tmp_store) with RollbackContext() as rollback: params = {'name': 'test', 'disks': [], 'cdrom': UBUNTU_ISO} inst.templates_create(params) rollback.prependDefer(inst.template_delete, 'test') # Create a network net_name = 'test-network' net_args = {'name': net_name, 'connection': 'nat', 'subnet': '127.0.100.0/24'} inst.networks_create(net_args) rollback.prependDefer(inst.network_delete, net_name) inst.network_activate(net_name) rollback.prependDefer(inst.network_deactivate, net_name) for vm_name in ['kimchi-ifaces', 'kimchi-ifaces-running']: params = {'name': vm_name, 'template': '/plugins/kimchi/templates/test'} task = inst.vms_create(params) inst.task_wait(task['id']) rollback.prependDefer(inst.vm_delete, vm_name) ifaces = inst.vmifaces_get_list(vm_name) self.assertEquals(1, len(ifaces)) iface = inst.vmiface_lookup(vm_name, ifaces[0]) self.assertEquals(17, len(iface['mac'])) self.assertEquals("default", iface['network']) self.assertIn("model", iface) # attach network interface to vm iface_args = {"type": "network", "network": "test-network", "model": "virtio"} mac = inst.vmifaces_create(vm_name, iface_args) # detach network interface from vm rollback.prependDefer(inst.vmiface_delete, vm_name, mac) self.assertEquals(17, len(mac)) iface = inst.vmiface_lookup(vm_name, mac) self.assertEquals("network", iface["type"]) self.assertEquals("test-network", iface['network']) self.assertEquals("virtio", iface["model"]) # attach network interface to vm without providing model iface_args = {"type": "network", "network": "test-network"} mac = inst.vmifaces_create(vm_name, iface_args) rollback.prependDefer(inst.vmiface_delete, vm_name, mac) iface = inst.vmiface_lookup(vm_name, mac) self.assertEquals("network", iface["type"]) self.assertEquals("test-network", iface['network']) # update vm interface newMacAddr = '54:50:e3:44:8a:af' iface_args = {"mac": newMacAddr} inst.vmiface_update(vm_name, mac, iface_args) iface = inst.vmiface_lookup(vm_name, newMacAddr) self.assertEquals(newMacAddr, iface['mac']) # undo mac address change iface_args = {"mac": mac} inst.vmiface_update(vm_name, newMacAddr, iface_args) iface = inst.vmiface_lookup(vm_name, mac) self.assertEquals(mac, iface['mac']) @unittest.skipUnless(utils.running_as_root(), 'Must be run as root') def test_vm_disk(self): disk_path = os.path.join(TMP_DIR, 'existent2.iso') open(disk_path, 'w').close() modern_disk_bus = osinfo.get_template_default('modern', 'disk_bus') def _attach_disk(expect_bus=modern_disk_bus): disk_args = {"type": "disk", "pool": pool, "vol": vol} disk = inst.vmstorages_create(vm_name, disk_args) storage_list = inst.vmstorages_get_list(vm_name) self.assertEquals(prev_count + 1, len(storage_list)) # Check the bus type to be 'virtio' disk_info = inst.vmstorage_lookup(vm_name, disk) self.assertEquals(u'disk', disk_info['type']) self.assertEquals(vol_path, disk_info['path']) self.assertEquals(expect_bus, disk_info['bus']) return disk inst = model.Model(objstore_loc=self.tmp_store) with RollbackContext() as rollback: path = os.path.join(TMP_DIR, 'kimchi-images') pool = 'test-pool' vol = 'test-volume.img' vol_path = "%s/%s" % (path, vol) if not os.path.exists(path): os.mkdir(path) rollback.prependDefer(shutil.rmtree, path) args = {'name': pool, 'path': path, 'type': 'dir'} inst.storagepools_create(args) rollback.prependDefer(inst.storagepool_delete, pool) # Activate the pool before adding any volume inst.storagepool_activate(pool) rollback.prependDefer(inst.storagepool_deactivate, pool) params = {'name': vol, 'capacity': 1073741824, # 1 GiB 'allocation': 536870912, # 512 MiB 'format': 'qcow2'} task_id = inst.storagevolumes_create(pool, params)['id'] rollback.prependDefer(inst.storagevolume_delete, pool, vol) inst.task_wait(task_id) vm_name = 'kimchi-cdrom' params = {'name': 'test', 'disks': [], 'cdrom': UBUNTU_ISO} inst.templates_create(params) rollback.prependDefer(inst.template_delete, 'test') params = {'name': vm_name, 'template': '/plugins/kimchi/templates/test'} task1 = inst.vms_create(params) inst.task_wait(task1['id']) rollback.prependDefer(inst.vm_delete, vm_name) prev_count = len(inst.vmstorages_get_list(vm_name)) self.assertEquals(1, prev_count) # Volume format with mismatched type raise error cdrom_args = {"type": "cdrom", "pool": pool, "vol": vol} self.assertRaises(InvalidParameter, inst.vmstorages_create, vm_name, cdrom_args) # Cold plug and unplug a disk disk = _attach_disk() inst.vmstorage_delete(vm_name, disk) # Hot plug a disk inst.vm_start(vm_name) disk = _attach_disk() # VM disk still there after powered off inst.vm_poweroff(vm_name) disk_info = inst.vmstorage_lookup(vm_name, disk) self.assertEquals(u'disk', disk_info['type']) inst.vmstorage_delete(vm_name, disk) # Specifying pool and path at same time will fail disk_args = {"type": "disk", "pool": pool, "vol": vol, "path": disk_path} self.assertRaises( InvalidParameter, inst.vmstorages_create, vm_name, disk_args) old_distro_iso = TMP_DIR + 'rhel4_8.iso' iso_gen.construct_fake_iso(old_distro_iso, True, '4.8', 'rhel') vm_name = 'kimchi-ide-bus-vm' params = {'name': 'old_distro_template', 'disks': [], 'cdrom': old_distro_iso} inst.templates_create(params) rollback.prependDefer(inst.template_delete, 'old_distro_template') params = { 'name': vm_name, 'template': '/plugins/kimchi/templates/old_distro_template' } task2 = inst.vms_create(params) inst.task_wait(task2['id']) rollback.prependDefer(inst.vm_delete, vm_name) # Need to check the right disk_bus for old distro disk = _attach_disk(osinfo.get_template_default('old', 'disk_bus')) inst.vmstorage_delete('kimchi-ide-bus-vm', disk) # Hot plug IDE bus disk does not work inst.vm_start(vm_name) self.assertRaises(InvalidOperation, _attach_disk) inst.vm_poweroff(vm_name) @unittest.skipUnless(utils.running_as_root(), 'Must be run as root') def test_vm_cdrom(self): inst = model.Model(objstore_loc=self.tmp_store) with RollbackContext() as rollback: vm_name = 'kimchi-cdrom' params = {'name': 'test', 'disks': [], 'cdrom': UBUNTU_ISO} inst.templates_create(params) rollback.prependDefer(inst.template_delete, 'test') params = {'name': vm_name, 'template': '/plugins/kimchi/templates/test'} task = inst.vms_create(params) inst.task_wait(task['id']) rollback.prependDefer(inst.vm_delete, vm_name) prev_count = len(inst.vmstorages_get_list(vm_name)) self.assertEquals(1, prev_count) # dummy .iso files iso_path = os.path.join(TMP_DIR, 'existent.iso') iso_path2 = os.path.join(TMP_DIR, 'existent2.iso') open(iso_path, 'w').close() rollback.prependDefer(os.remove, iso_path) open(iso_path2, 'w').close() rollback.prependDefer(os.remove, iso_path2) wrong_iso_path = '/nonexistent.iso' # Create a cdrom cdrom_args = {"type": "cdrom", "path": iso_path} cdrom_dev = inst.vmstorages_create(vm_name, cdrom_args) storage_list = inst.vmstorages_get_list(vm_name) self.assertEquals(prev_count + 1, len(storage_list)) # Get cdrom info cd_info = inst.vmstorage_lookup(vm_name, cdrom_dev) self.assertEquals(u'cdrom', cd_info['type']) self.assertEquals(iso_path, cd_info['path']) # update path of existing cd with # non existent iso self.assertRaises(InvalidParameter, inst.vmstorage_update, vm_name, cdrom_dev, {'path': wrong_iso_path}) # Make sure CD ROM still exists after failure cd_info = inst.vmstorage_lookup(vm_name, cdrom_dev) self.assertEquals(u'cdrom', cd_info['type']) self.assertEquals(iso_path, cd_info['path']) # update path of existing cd with existent iso of shutoff vm inst.vmstorage_update(vm_name, cdrom_dev, {'path': iso_path2}) cdrom_info = inst.vmstorage_lookup(vm_name, cdrom_dev) self.assertEquals(iso_path2, cdrom_info['path']) # update path of existing cd with existent iso of running vm inst.vm_start(vm_name) inst.vmstorage_update(vm_name, cdrom_dev, {'path': iso_path}) cdrom_info = inst.vmstorage_lookup(vm_name, cdrom_dev) self.assertEquals(iso_path, cdrom_info['path']) # eject cdrom cdrom_dev = inst.vmstorage_update(vm_name, cdrom_dev, {'path': ''}) cdrom_info = inst.vmstorage_lookup(vm_name, cdrom_dev) self.assertEquals('', cdrom_info['path']) inst.vm_poweroff(vm_name) # removing non existent cdrom self.assertRaises(NotFoundError, inst.vmstorage_delete, vm_name, "fakedev") # removing valid cdrom inst.vmstorage_delete(vm_name, cdrom_dev) storage_list = inst.vmstorages_get_list(vm_name) self.assertEquals(prev_count, len(storage_list)) # Create a new cdrom using a remote iso valid_remote_iso_path = utils.get_remote_iso_path() cdrom_args = {"type": "cdrom", "path": valid_remote_iso_path} cdrom_dev = inst.vmstorages_create(vm_name, cdrom_args) storage_list = inst.vmstorages_get_list(vm_name) self.assertEquals(prev_count + 1, len(storage_list)) # Update remote-backed cdrom with the same ISO inst.vmstorage_update(vm_name, cdrom_dev, {'path': valid_remote_iso_path}) cdrom_info = inst.vmstorage_lookup(vm_name, cdrom_dev) cur_cdrom_path = re.sub(":80/", '/', cdrom_info['path']) self.assertEquals(valid_remote_iso_path, cur_cdrom_path) @unittest.skipUnless(utils.running_as_root(), 'Must be run as root') def test_vm_storage_provisioning(self): inst = model.Model(objstore_loc=self.tmp_store) with RollbackContext() as rollback: params = {'name': 'test', 'disks': [{'size': 1}], 'cdrom': UBUNTU_ISO} inst.templates_create(params) rollback.prependDefer(inst.template_delete, 'test') params = {'name': 'test-vm-1', 'template': '/plugins/kimchi/templates/test'} task = inst.vms_create(params) inst.task_wait(task['id']) rollback.prependDefer(inst.vm_delete, 'test-vm-1') vm_info = inst.vm_lookup(params['name']) disk_path = '%s/%s-0.img' % ( inst.storagepool_lookup('default')['path'], vm_info['uuid']) self.assertTrue(os.access(disk_path, os.F_OK)) self.assertFalse(os.access(disk_path, os.F_OK)) def _create_template_conf_with_disk_format(self, vol_format): if vol_format is None: conf_file_data = "[main]\n\n[storage]\n\n[[disk.0]]\n" \ "#format = \n\n[graphics]\n\n[processor]\n" else: conf_file_data = "[main]\n\n[storage]\n\n[[disk.0]]\n" \ "format = %s\n\n[graphics]\n\n[processor]\n"\ % vol_format config_file = os.path.join(paths.conf_dir, 'template.conf') config_bkp_file = \ os.path.join(paths.conf_dir, 'template.conf-unit_test_bkp') os.rename(config_file, config_bkp_file) with open(config_file, 'w') as f: f.write(conf_file_data) osinfo.defaults = osinfo._get_tmpl_defaults() def _restore_template_conf_file(self): config_file = os.path.join(paths.conf_dir, 'template.conf') config_bkp_file = \ os.path.join(paths.conf_dir, 'template.conf-unit_test_bkp') os.rename(config_bkp_file, config_file) osinfo.defaults = osinfo._get_tmpl_defaults() def _get_disk_format_from_vm(self, vm, conn): dom = VMModel.get_vm(vm, conn) xml = dom.XMLDesc(0) xpath = "/domain/devices/disk[@device='disk']/driver/@type" return xpath_get_text(xml, xpath)[0] @unittest.skipUnless(utils.running_as_root(), 'Must be run as root') def test_template_get_default_vol_format_from_conf(self): inst = model.Model(objstore_loc=self.tmp_store) with RollbackContext() as rollback: self._create_template_conf_with_disk_format('vmdk') rollback.prependDefer(self._restore_template_conf_file) params = {'name': 'test', 'disks': [{'size': 1}], 'cdrom': UBUNTU_ISO} inst.templates_create(params) rollback.prependDefer(inst.template_delete, 'test') params = {'name': 'test-vm-1', 'template': '/plugins/kimchi/templates/test'} task = inst.vms_create(params) inst.task_wait(task['id']) rollback.prependDefer(inst.vm_delete, 'test-vm-1') created_disk_format = self._get_disk_format_from_vm( 'test-vm-1', inst.conn ) self.assertEqual(created_disk_format, 'vmdk') @unittest.skipUnless(utils.running_as_root(), 'Must be run as root') def test_template_creates_user_defined_vol_format_instead_default(self): inst = model.Model(objstore_loc=self.tmp_store) default_vol = 'vmdk' user_vol = 'raw' with RollbackContext() as rollback: self._create_template_conf_with_disk_format(default_vol) rollback.prependDefer(self._restore_template_conf_file) params = {'name': 'test', 'disks': [{'size': 1, 'format': user_vol}], 'cdrom': UBUNTU_ISO} inst.templates_create(params) rollback.prependDefer(inst.template_delete, 'test') params = {'name': 'test-vm-1', 'template': '/plugins/kimchi/templates/test'} task = inst.vms_create(params) inst.task_wait(task['id']) rollback.prependDefer(inst.vm_delete, 'test-vm-1') created_disk_format = self._get_disk_format_from_vm( 'test-vm-1', inst.conn ) self.assertEqual(created_disk_format, user_vol) @unittest.skipUnless(utils.running_as_root(), 'Must be run as root') def test_template_uses_qcow2_format_if_no_user_or_default_defined(self): inst = model.Model(objstore_loc=self.tmp_store) with RollbackContext() as rollback: self._create_template_conf_with_disk_format(None) rollback.prependDefer(self._restore_template_conf_file) params = {'name': 'test', 'disks': [{'size': 1}], 'cdrom': UBUNTU_ISO} inst.templates_create(params) rollback.prependDefer(inst.template_delete, 'test') params = {'name': 'test-vm-1', 'template': '/plugins/kimchi/templates/test'} task = inst.vms_create(params) inst.task_wait(task['id']) rollback.prependDefer(inst.vm_delete, 'test-vm-1') created_disk_format = self._get_disk_format_from_vm( 'test-vm-1', inst.conn ) self.assertEqual(created_disk_format, 'qcow2') def test_vm_memory_hotplug(self): config.set("authentication", "method", "pam") inst = model.Model(None, objstore_loc=self.tmp_store) orig_params = {'name': 'test', 'memory': 1024, 'cdrom': UBUNTU_ISO} inst.templates_create(orig_params) with RollbackContext() as rollback: params = {'name': 'kimchi-vm1', 'template': '/plugins/kimchi/templates/test'} task1 = inst.vms_create(params) inst.task_wait(task1['id']) rollback.prependDefer(utils.rollback_wrapper, inst.vm_delete, 'kimchi-vm1') # Start vm inst.vm_start('kimchi-vm1') rollback.prependDefer(utils.rollback_wrapper, inst.vm_poweroff, 'kimchi-vm1') # Hotplug memory, only available in Libvirt >= 1.2.14 params = {'memory': 2048} if inst.capabilities_lookup()['mem_hotplug_support']: inst.vm_update('kimchi-vm1', params) rollback.prependDefer(utils.rollback_wrapper, inst.vm_delete, 'kimchi-vm1') self.assertEquals(params['memory'], inst.vm_lookup('kimchi-vm1')['memory']) else: self.assertRaises(InvalidOperation, inst.vm_update, 'kimchi-vm1', params) def test_vm_edit(self): config.set("authentication", "method", "pam") inst = model.Model(None, objstore_loc=self.tmp_store) # template disk format must be qcow2 because vmsnapshot # only supports this format orig_params = {'name': 'test', 'memory': 1024, 'cpus': 1, 'cdrom': UBUNTU_ISO, 'disks': [{'size': 1, 'format': 'qcow2'}]} inst.templates_create(orig_params) with RollbackContext() as rollback: params_1 = {'name': 'kimchi-vm1', 'template': '/plugins/kimchi/templates/test'} params_2 = {'name': 'kimchi-vm2', 'template': '/plugins/kimchi/templates/test'} task1 = inst.vms_create(params_1) inst.task_wait(task1['id']) rollback.prependDefer(utils.rollback_wrapper, inst.vm_delete, 'kimchi-vm1') task2 = inst.vms_create(params_2) inst.task_wait(task2['id']) rollback.prependDefer(utils.rollback_wrapper, inst.vm_delete, 'kimchi-vm2') vms = inst.vms_get_list() self.assertTrue('kimchi-vm1' in vms) # make sure "vm_update" works when the domain has a snapshot inst.vmsnapshots_create(u'kimchi-vm1') # update vm graphics when vm is not running inst.vm_update(u'kimchi-vm1', {"graphics": {"passwd": "123456"}}) inst.vm_start('kimchi-vm1') rollback.prependDefer(utils.rollback_wrapper, inst.vm_poweroff, 'kimchi-vm1') vm_info = inst.vm_lookup(u'kimchi-vm1') self.assertEquals('123456', vm_info['graphics']["passwd"]) self.assertEquals(None, vm_info['graphics']["passwdValidTo"]) # update vm graphics when vm is running inst.vm_update(u'kimchi-vm1', {"graphics": {"passwd": "abcdef", "passwdValidTo": 20}}) vm_info = inst.vm_lookup(u'kimchi-vm1') self.assertEquals('abcdef', vm_info['graphics']["passwd"]) self.assertGreaterEqual(20, vm_info['graphics']['passwdValidTo']) info = inst.vm_lookup('kimchi-vm1') self.assertEquals('running', info['state']) params = {'name': 'new-vm'} self.assertRaises(InvalidParameter, inst.vm_update, 'kimchi-vm1', params) # change VM users and groups, when wm is running. inst.vm_update(u'kimchi-vm1', {'users': ['root'], 'groups': ['root']}) vm_info = inst.vm_lookup(u'kimchi-vm1') self.assertEquals(['root'], vm_info['users']) self.assertEquals(['root'], vm_info['groups']) # change VM users and groups by removing all elements, # when wm is running. inst.vm_update(u'kimchi-vm1', {'users': [], 'groups': []}) vm_info = inst.vm_lookup(u'kimchi-vm1') self.assertEquals([], vm_info['users']) self.assertEquals([], vm_info['groups']) inst.vm_poweroff('kimchi-vm1') self.assertRaises(OperationFailed, inst.vm_update, 'kimchi-vm1', {'name': 'kimchi-vm2'}) params = {'name': u'пeω-∨м', 'cpus': 4, 'memory': 2048} inst.vm_update('kimchi-vm1', params) rollback.prependDefer(utils.rollback_wrapper, inst.vm_delete, u'пeω-∨м') self.assertEquals(info['uuid'], inst.vm_lookup(u'пeω-∨м')['uuid']) info = inst.vm_lookup(u'пeω-∨м') for key in params.keys(): self.assertEquals(params[key], info[key]) # change only VM users - groups are not changed (default is empty) users = inst.users_get_list()[:3] inst.vm_update(u'пeω-∨м', {'users': users}) self.assertEquals(users, inst.vm_lookup(u'пeω-∨м')['users']) self.assertEquals([], inst.vm_lookup(u'пeω-∨м')['groups']) # change only VM groups - users are not changed (default is empty) groups = inst.groups_get_list()[:2] inst.vm_update(u'пeω-∨м', {'groups': groups}) self.assertEquals(users, inst.vm_lookup(u'пeω-∨м')['users']) self.assertEquals(groups, inst.vm_lookup(u'пeω-∨м')['groups']) # change VM users and groups by adding a new element to each one users.append(pwd.getpwuid(os.getuid()).pw_name) groups.append(grp.getgrgid(os.getgid()).gr_name) inst.vm_update(u'пeω-∨м', {'users': users, 'groups': groups}) self.assertEquals(users, inst.vm_lookup(u'пeω-∨м')['users']) self.assertEquals(groups, inst.vm_lookup(u'пeω-∨м')['groups']) # change VM users (wrong value) and groups # when an error occurs, everything fails and nothing is changed self.assertRaises(InvalidParameter, inst.vm_update, u'пeω-∨м', {'users': ['userdoesnotexist'], 'groups': []}) self.assertEquals(users, inst.vm_lookup(u'пeω-∨м')['users']) self.assertEquals(groups, inst.vm_lookup(u'пeω-∨м')['groups']) # change VM users and groups (wrong value) # when an error occurs, everything fails and nothing is changed self.assertRaises(InvalidParameter, inst.vm_update, u'пeω-∨м', {'users': [], 'groups': ['groupdoesnotexist']}) self.assertEquals(users, inst.vm_lookup(u'пeω-∨м')['users']) self.assertEquals(groups, inst.vm_lookup(u'пeω-∨м')['groups']) # change VM users and groups by removing all elements inst.vm_update(u'пeω-∨м', {'users': [], 'groups': []}) self.assertEquals([], inst.vm_lookup(u'пeω-∨м')['users']) self.assertEquals([], inst.vm_lookup(u'пeω-∨м')['groups']) def test_get_interfaces(self): inst = model.Model('test:///default', objstore_loc=self.tmp_store) expected_ifaces = netinfo.all_favored_interfaces() ifaces = inst.interfaces_get_list() self.assertEquals(len(expected_ifaces), len(ifaces)) for name in expected_ifaces: iface = inst.interface_lookup(name) self.assertEquals(iface['name'], name) self.assertIn('type', iface) self.assertIn('status', iface) self.assertIn('ipaddr', iface) self.assertIn('netmask', iface) def test_async_tasks(self): class task_except(Exception): pass def quick_op(cb, message): cb(message, True) def long_op(cb, params): time.sleep(params.get('delay', 3)) cb(params.get('message', ''), params.get('result', False)) def abnormal_op(cb, params): try: raise task_except except: cb("Exception raised", False) def continuous_ops(cb, params): cb("step 1 OK") time.sleep(2) cb("step 2 OK") time.sleep(2) cb("step 3 OK", params.get('result', True)) inst = model.Model('test:///default', objstore_loc=self.tmp_store) taskid = add_task('', quick_op, inst.objstore, 'Hello') inst.task_wait(taskid) self.assertEquals(1, taskid) self.assertEquals('finished', inst.task_lookup(taskid)['status']) self.assertEquals('Hello', inst.task_lookup(taskid)['message']) taskid = add_task('', long_op, inst.objstore, {'delay': 3, 'result': False, 'message': 'It was not meant to be'}) self.assertEquals(2, taskid) self.assertEquals('running', inst.task_lookup(taskid)['status']) self.assertEquals('OK', inst.task_lookup(taskid)['message']) inst.task_wait(taskid) self.assertEquals('failed', inst.task_lookup(taskid)['status']) self.assertEquals('It was not meant to be', inst.task_lookup(taskid)['message']) taskid = add_task('', abnormal_op, inst.objstore, {}) inst.task_wait(taskid) self.assertEquals('Exception raised', inst.task_lookup(taskid)['message']) self.assertEquals('failed', inst.task_lookup(taskid)['status']) taskid = add_task('', continuous_ops, inst.objstore, {'result': True}) self.assertEquals('running', inst.task_lookup(taskid)['status']) inst.task_wait(taskid, timeout=10) self.assertEquals('finished', inst.task_lookup(taskid)['status']) @unittest.skipUnless(utils.running_as_root(), 'Must be run as root') def test_delete_running_vm(self): inst = model.Model(objstore_loc=self.tmp_store) with RollbackContext() as rollback: params = {'name': u'test', 'disks': [], 'cdrom': UBUNTU_ISO} inst.templates_create(params) rollback.prependDefer(inst.template_delete, 'test') params = {'name': u'kīмсhī-∨м', 'template': u'/plugins/kimchi/templates/test'} task = inst.vms_create(params) inst.task_wait(task['id']) rollback.prependDefer(utils.rollback_wrapper, inst.vm_delete, u'kīмсhī-∨м') inst.vm_start(u'kīмсhī-∨м') self.assertEquals(inst.vm_lookup(u'kīмсhī-∨м')['state'], 'running') rollback.prependDefer(utils.rollback_wrapper, inst.vm_poweroff, u'kīмсhī-∨м') inst.vm_delete(u'kīмсhī-∨м') vms = inst.vms_get_list() self.assertFalse(u'kīмсhī-∨м' in vms) @unittest.skipUnless(utils.running_as_root(), 'Must be run as root') def test_vm_list_sorted(self): inst = model.Model(objstore_loc=self.tmp_store) with RollbackContext() as rollback: params = {'name': 'test', 'disks': [], 'cdrom': UBUNTU_ISO} inst.templates_create(params) rollback.prependDefer(inst.template_delete, 'test') params = {'name': 'kimchi-vm', 'template': '/plugins/kimchi/templates/test'} task = inst.vms_create(params) inst.task_wait(task['id']) rollback.prependDefer(inst.vm_delete, 'kimchi-vm') vms = inst.vms_get_list() self.assertEquals(vms, sorted(vms, key=unicode.lower)) def test_vm_clone(self): inst = model.Model('test:///default', objstore_loc=self.tmp_store) all_vm_names = inst.vms_get_list() name = all_vm_names[0] original_vm = inst.vm_lookup(name) if original_vm['state'] == u'shutoff': inst.vm_start(name) # the VM 'test' should be running by now, so we can't clone it yet self.assertRaises(InvalidParameter, inst.vm_clone, name) with RollbackContext() as rollback: inst.vm_poweroff(name) rollback.prependDefer(inst.vm_start, name) # create two simultaneous clones of the same VM # and make sure both of them complete successfully task1 = inst.vm_clone(name) task2 = inst.vm_clone(name) clone1_name = task1['target_uri'].split('/')[-2] rollback.prependDefer(inst.vm_delete, clone1_name) clone2_name = task2['target_uri'].split('/')[-2] rollback.prependDefer(inst.vm_delete, clone2_name) inst.task_wait(task1['id']) task1 = inst.task_lookup(task1['id']) self.assertEquals('finished', task1['status']) inst.task_wait(task2['id']) task2 = inst.task_lookup(task2['id']) self.assertEquals('finished', task2['status']) # update the original VM info because its state has changed original_vm = inst.vm_lookup(name) clone_vm = inst.vm_lookup(clone1_name) self.assertNotEqual(original_vm['name'], clone_vm['name']) self.assertTrue(re.match(u'%s-clone-\d+' % original_vm['name'], clone_vm['name'])) del original_vm['name'] del clone_vm['name'] self.assertNotEqual(original_vm['uuid'], clone_vm['uuid']) del original_vm['uuid'] del clone_vm['uuid'] # compare all VM settings except the ones already compared # (and removed) above (i.e. 'name' and 'uuid') self.assertEquals(original_vm, clone_vm) def test_use_test_host(self): inst = model.Model('test:///default', objstore_loc=self.tmp_store) with RollbackContext() as rollback: params = { 'name': 'test', 'disks': [], 'cdrom': UBUNTU_ISO, 'storagepool': '/plugins/kimchi/storagepools/default-pool', 'domain': 'test', 'arch': 'i686' } inst.templates_create(params) rollback.prependDefer(inst.template_delete, 'test') params = {'name': 'kimchi-vm', 'template': '/plugins/kimchi/templates/test'} task = inst.vms_create(params) inst.task_wait(task['id']) rollback.prependDefer(inst.vm_delete, 'kimchi-vm') vms = inst.vms_get_list() self.assertTrue('kimchi-vm' in vms) def test_get_distros(self): inst = model.Model('test:///default', objstore_loc=self.tmp_store) distros = inst.distros_get_list() for d in distros: distro = inst.distro_lookup(d) self.assertIn('name', distro) self.assertIn('os_distro', distro) self.assertIn('os_version', distro) self.assertIn('os_arch', distro) self.assertIn('path', distro) @unittest.skipUnless(utils.running_as_root(), 'Must be run as root') def test_deep_scan(self): inst = model.Model(None, objstore_loc=self.tmp_store) with RollbackContext() as rollback: deep_path = os.path.join(TMP_DIR, 'deep-scan') subdir_path = os.path.join(deep_path, 'isos') if not os.path.exists(subdir_path): os.makedirs(subdir_path) ubuntu_iso = os.path.join(deep_path, 'ubuntu12.04.iso') sles_iso = os.path.join(subdir_path, 'sles10.iso') iso_gen.construct_fake_iso(ubuntu_iso, True, '12.04', 'ubuntu') iso_gen.construct_fake_iso(sles_iso, True, '10', 'sles') args = {'name': 'kimchi-scanning-pool', 'path': deep_path, 'type': 'kimchi-iso'} inst.storagepools_create(args) rollback.prependDefer(shutil.rmtree, deep_path) rollback.prependDefer(shutil.rmtree, args['path']) rollback.prependDefer(inst.storagepool_deactivate, args['name']) time.sleep(1) volumes = inst.storagevolumes_get_list(args['name']) self.assertEquals(len(volumes), 2)
def running_root_and_remoteserver_defined(): return running_as_root() and remoteserver_environment_defined()
import os import mock import unittest from wok.exception import OperationFailed, MissingParameter, InvalidParameter from wok.rollbackcontext import RollbackContext from wok.utils import run_command import tests.utils as utils import wok.plugins.ginger.model.filesystem as filesystem from wok.plugins.ginger.model.fs_utils import _parse_df_output TESTFILE = {True: "/testfile", False: "/tmp/testfile"}[utils.running_as_root()] TESTDIR = {True: "/test", False: "/tmp/test"}[utils.running_as_root()] NFSSHARE = {True: "/var/ftp/nfs1", False: "/tmp/ftp/nfs1"}[utils.running_as_root()] def create_file(self): fcmd = ["dd", "if=/dev/zero", "of=%s" % TESTFILE, "bs=10M", "count=1"] fout, err, rc = run_command(fcmd) if rc: self.assertRaises(OperationFailed) fscmd = ["mkfs.ext4", TESTFILE, "-F"] fsout, err, rc = run_command(fscmd) if rc:
class FileSystemTests(unittest.TestCase): def test_get_fs_list(self): fs = filesystem.FileSystemsModel() fs_list = fs.get_list() self.assertGreaterEqual(len(fs_list), 0) @unittest.skipUnless(utils.running_as_root(), 'Must be run as root') def test_mount_local_fs(self): fs = filesystem.FileSystemsModel() fsd = filesystem.FileSystemModel() create_file(self) fstype = 'local' blkdev = TESTFILE mntpt = TESTDIR persistent = False fs_list = fs.get_list() with RollbackContext() as rollback: fs.create({ 'type': fstype, 'blk_dev': blkdev, 'mount_point': mntpt, 'persistent': persistent }) rollback.prependDefer(fsd.delete, mntpt) new_fs_list = fs.get_list() self.assertEqual(len(new_fs_list), len(fs_list) + 1) delete_file(self) @unittest.skipUnless(utils.running_as_root(), 'Must be run as root') def test_mount_existing_fs_fails(self): fs = filesystem.FileSystemsModel() fsd = filesystem.FileSystemModel() create_file(self) fstype = 'local' blkdev = TESTFILE mntpt = TESTDIR persistent = False with RollbackContext() as rollback: fs.create({ 'type': fstype, 'blk_dev': blkdev, 'mount_point': mntpt, 'persistent': persistent }) rollback.prependDefer(fsd.delete, mntpt) with self.assertRaises(OperationFailed): fs.create({ 'type': fstype, 'blk_dev': blkdev, 'mount_point': mntpt, 'persistent': persistent }) delete_file(self) def test_df_parser(self): df_out = """Filesystem Type 1K-blocks Used Available Use% Mounted on devtmpfs devtmpfs 3875092 0 3875092 0% /dev tmpfs tmpfs 3886100 1772 3884328 1% /dev/shm tmpfs tmpfs 3886100 1120 3884980 1% /run tmpfs tmpfs 3886100 0 3886100 0% /sys/fs/cgroup /dev/mapper/fedora-root ext4 51475068 18194240 30643004 38% / tmpfs tmpfs 3886100 25748 3860352 1% /tmp /dev/sda1 ext4 487652 162811 295145 36% /boot /dev/mapper/fedora-home ext4 247613436 26170364 208841936 12% /home""" parse_out = _parse_df_output(df_out) if parse_out[0]['filesystem'] != 'devtmpfs': self.fail("Parsing of df failed : filesystem") if parse_out[0]['size'] != 3875092: self.fail("Parsing of df failed : size") if parse_out[0]['used'] != '0': self.fail("Parsing of df failed : used") if parse_out[0]['avail'] != 3875092: self.fail("Parsing of df failed : avail") if parse_out[0]['use%'] != '0%': self.fail("Parsing of df failed : use%") if parse_out[0]['mounted_on'] != '/dev': self.fail("Parsing of df failed : mounted on") @mock.patch('wok.plugins.ginger.model.fs_utils.nfsmount', autospec=True) @mock.patch('wok.plugins.ginger.model.fs_utils.make_persist', autospec=True) def test_nfs_mount(self, mock_make_persist, mock_nfsmount): fs = filesystem.FileSystemsModel() fstype = 'nfs' server = 'localhost' share = NFSSHARE mntpt = TESTDIR mntopts = '' fs.create({ 'type': fstype, 'server': server, 'share': share, 'mount_point': mntpt, 'mount_options': mntopts }) mock_nfsmount.assert_called_once_with(server, share, mntpt, mntopts) mock_make_persist.assert_called_once_with(server + ':' + share, mntpt, mntopts) def test_nfs_mount_missing_type(self): fs = filesystem.FileSystemsModel() server = 'localhost' share = NFSSHARE mntpt = TESTDIR params = {'server': server, 'share': share, 'mount_point': mntpt} self.assertRaises(MissingParameter, fs.create, params) def test_nfs_mount_invalid_type(self): fs = filesystem.FileSystemsModel() fstype = 'invalid' server = 'localhost' share = NFSSHARE mntpt = TESTDIR params = { 'type': fstype, 'server': server, 'share': share, 'mount_point': mntpt } self.assertRaises(InvalidParameter, fs.create, params) def test_nfs_mount_missing_server(self): fs = filesystem.FileSystemsModel() fstype = 'nfs' share = NFSSHARE mntpt = TESTDIR params = {'type': fstype, 'share': share, 'mount_point': mntpt} self.assertRaises(MissingParameter, fs.create, params) def test_nfs_mount_missing_share(self): fs = filesystem.FileSystemsModel() fstype = 'nfs' server = 'localhost' mntpt = TESTDIR params = {'type': fstype, 'server': server, 'mount_point': mntpt} self.assertRaises(MissingParameter, fs.create, params) def test_nfs_mount_missing_mountpoint(self): fs = filesystem.FileSystemsModel() fstype = 'nfs' server = 'localhost' share = NFSSHARE params = {'type': fstype, 'server': server, 'share': share} self.assertRaises(MissingParameter, fs.create, params)
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA import os import mock import unittest from wok.exception import OperationFailed, MissingParameter, InvalidParameter from wok.rollbackcontext import RollbackContext from wok.utils import run_command import tests.utils as utils import wok.plugins.ginger.model.filesystem as filesystem from wok.plugins.ginger.model.fs_utils import _parse_df_output TESTFILE = {True: "/testfile", False: "/tmp/testfile"}[utils.running_as_root()] TESTDIR = {True: "/test", False: "/tmp/test"}[utils.running_as_root()] NFSSHARE = { True: "/var/ftp/nfs1", False: "/tmp/ftp/nfs1" }[utils.running_as_root()] def create_file(self): fcmd = ["dd", "if=/dev/zero", "of=%s" % TESTFILE, "bs=10M", "count=1"] fout, err, rc = run_command(fcmd) if rc: self.assertRaises(OperationFailed) fscmd = ["mkfs.ext4", TESTFILE, "-F"] fsout, err, rc = run_command(fscmd)
class PartitionTests(unittest.TestCase): def setUp(self): objstore_loc = config.get_object_store() + '_ginger' self._objstore = ObjectStore(objstore_loc) self.task_model = TaskModel(objstore=self._objstore) @unittest.skipUnless(utils.running_as_root(), 'Must be run as root') def test_get_part_list(self): parts = diskparts.PartitionsModel() parts_list = parts.get_list() self.assertGreaterEqual(len(parts_list), 0) def test_create_part_missing_device(self): parts = diskparts.PartitionsModel() size = 10 params = {'partsize': size} self.assertRaises(MissingParameter, parts.create, params) def test_create_part_missing_size(self): parts = diskparts.PartitionsModel() dev = '/dev/sdb' params = {'devname': dev} self.assertRaises(MissingParameter, parts.create, params) @mock.patch('wok.plugins.ginger.model.utils.create_disk_part', autospec=True) def test_create_part(self, mock_create_part): parts = diskparts.PartitionsModel() dev = '/dev/sdb' size = 10 params = {'devname': dev, 'partsize': size} parts.create(params) mock_create_part.return_value = 'sdb1' mock_create_part.assert_called_with(dev, size) @mock.patch('wok.plugins.ginger.model.utils.change_part_type', autospec=True) def test_change_part_type(self, mock_change_type): part = diskparts.PartitionModel(objstore=self._objstore) part_name = 'sdb1' type = '82' mock_change_type.return_value = 'sdb1' part.change_type(part_name, type) mock_change_type.assert_called_with(part_name, type) @mock.patch('wok.plugins.ginger.model.utils.delete_part', autospec=True) def test_delete_part(self, mock_delete_part): part = diskparts.PartitionModel(objstore=self._objstore) part_name = 'sdb1' part.delete(part_name) mock_delete_part.assert_called_with(part_name) @mock.patch('wok.plugins.ginger.model.utils._makefs', autospec=True) @mock.patch('wok.plugins.ginger.model.utils._is_mntd', autospec=True) def test_format_part(self, mock_is_mntd, mock_makefs): mock_is_mntd.return_value = False part = diskparts.PartitionModel(objstore=self._objstore) name = 'a_partition_name' fstype = 'ext4' task_obj = part.format(name, fstype) self.task_model.wait(task_obj.get('id')) mock_makefs.assert_called_with(fstype, name) @mock.patch('wok.plugins.ginger.model.diskparts.get_partition_details', autospec=True) def test_lookup_invalid_part_returns_404(self, mock_get_part_details): mock_get_part_details.side_effect = iter([NotFoundError]) part = diskparts.PartitionModel(objstore=self._objstore) with self.assertRaises(NotFoundError): part.lookup('/a/invalid/partition')
class Audisp_PluginsTests(unittest.TestCase): @mock.patch('wok.plugins.ginger.model.audisp_plugins.' 'PluginsModel.get_list') def test_get_list_success(self, mock_get): """ Unittest to get list of plugins. :param self: :return: """ plugin = 'dummy1' audisp_plugins = PluginsModel() mock_get.return_value = plugin out = audisp_plugins.get_list() self.assertIn(plugin, out) @unittest.skipUnless(utils.running_as_root(), 'Must be run as root') def test_get_list_invalid_plugin(self): """ Unittest to test invalid plugin in list of plugins. :param self: :return: """ plugin = 'abcd' audisp_plugins = PluginsModel() out = audisp_plugins.get_list() self.assertNotIn(plugin, out) @mock.patch('wok.plugins.ginger.model.audisp_plugins.' 'PluginModel.get_plugin_info') def test_get_plugin_info(self, mock_info): """ Unittest to get a plugin info. :return: """ exp_out = {'details': {'direction': 'out', 'format': 'string', 'args': '0640 /var/run/audispd_events', 'active': 'no', 'path': 'builtin_af_unix', 'type': 'builtin'}, 'name': u'dummy1'} name = "dummy1" open_mock = mock.mock_open() with mock.patch('wok.plugins.ginger.model.utils.open', open_mock, create=True): audisp_plugin = PluginModel() mock_info.return_value = exp_out out = audisp_plugin.get_plugin_info(name) self.assertEquals(name, out['name']) @mock.patch('os.path.isfile') def test_get_plugin_info_invalid(self, mock_isfile): """ Unittest to get a plugin info. :return: """ name = "dummy1" audisp_plugin = PluginModel() mock_isfile.return_value = False self.assertRaises(NotFoundError, audisp_plugin.get_plugin_info, name) @mock.patch('wok.plugins.ginger.model.utils.' 'write_to_conf') def test_write_to_conf(self, mock_write): """ Unittest to write the data to the conf file. :return: """ data = 'active = yes\n' \ 'direction = out\n' \ 'path = /usr/sbin/sedispatch\n' \ 'type = always\n' \ 'format = string\n' name = 'dummy1' plugins_dir = '/etc/audisp/plugins.d' path_to_plugin = plugins_dir + "/" + name + ".conf" key = "active" value = "no" open_mock = mock.mock_open(read_data=data) with mock.patch('wok.plugins.ginger.model.utils.open', open_mock, create=True): # audisp_plugin = PluginModel() mock_write.return_value = {} write_to_conf(key, value, path_to_plugin) @mock.patch('os.path.isfile') @mock.patch('wok.plugins.ginger.model.' 'audisp_plugins.write_to_conf') @mock.patch('wok.plugins.ginger.model.' 'audisp_plugins.del_lines_of_attribute') def test_update(self, mock_del, mock_write, mock_os): """ Unittest to update the plugin info. :param mock_del: :param mock_write: :return: """ params = {"active": "yes"} name = 'af_uncvc' mock_os.return_value = True mock_del.return_value = {} mock_write.return_value = {} audisp_plugin = PluginModel() audisp_plugin.update(name, params)