def __init__(self, ipaddress, username, password, port, vsan): if not GlobalVars._is_normal_test: raise processutils.ProcessExecutionError( "Unable to connect to fabric")
def _fake_execute(*args, **kwargs): raise putils.ProcessExecutionError( exit_code=1, stdout='', stderr='this access control rule does not exist', cmd='tgt-admin --force --delete')
def fake_execute(*args, **kwargs): if args[1] == 'bad': raise processutils.ProcessExecutionError() return 'fakecontents', None
def exec_side_effect(*cmd, **kwargs): exerror = processutils.ProcessExecutionError() exerror.message = "Device or resource busy" raise exerror
def fake_execute(obj, *cmd, **kwargs): cmd_string = ', '.join(cmd) data = "\n" if ('env, LC_ALL=C, vgs, --noheadings, --unit=g, -o, name' == cmd_string): data = " fake-vg\n" data += " some-other-vg\n" elif ('env, LC_ALL=C, vgs, --noheadings, -o, name, fake-vg' == cmd_string): data = " fake-vg\n" elif 'env, LC_ALL=C, vgs, --version' in cmd_string: data = " LVM version: 2.02.95(2) (2012-03-06)\n" elif ('env, LC_ALL=C, vgs, --noheadings, -o, uuid, fake-vg' in cmd_string): data = " kVxztV-dKpG-Rz7E-xtKY-jeju-QsYU-SLG6Z1\n" elif 'env, LC_ALL=C, vgs, --noheadings, --unit=g, ' \ '-o, name,size,free,lv_count,uuid, ' \ '--separator, :, --nosuffix' in cmd_string: data = (" test-prov-cap-vg-unit:10.00:10.00:0:" "mXzbuX-dKpG-Rz7E-xtKY-jeju-QsYU-SLG8Z4\n") if 'test-prov-cap-vg-unit' in cmd_string: return (data, "") data = (" test-prov-cap-vg-no-unit:10.00:10.00:0:" "mXzbuX-dKpG-Rz7E-xtKY-jeju-QsYU-SLG8Z4\n") if 'test-prov-cap-vg-no-unit' in cmd_string: return (data, "") data = " fake-vg:10.00:10.00:0:"\ "kVxztV-dKpG-Rz7E-xtKY-jeju-QsYU-SLG6Z1\n" if 'fake-vg' in cmd_string: return (data, "") data += " fake-vg-2:10.00:10.00:0:"\ "lWyauW-dKpG-Rz7E-xtKY-jeju-QsYU-SLG7Z2\n" data += " fake-vg-3:10.00:10.00:0:"\ "mXzbuX-dKpG-Rz7E-xtKY-jeju-QsYU-SLG8Z3\n" elif ('env, LC_ALL=C, lvs, --noheadings, ' '--unit=g, -o, vg_name,name,size, --nosuffix, ' 'fake-vg/lv-nothere' in cmd_string): raise processutils.ProcessExecutionError( stderr="One or more specified logical volume(s) not found.") elif ('env, LC_ALL=C, lvs, --noheadings, ' '--unit=g, -o, vg_name,name,size, --nosuffix, ' 'fake-vg/lv-newerror' in cmd_string): raise processutils.ProcessExecutionError( stderr="Failed to find logical volume \"fake-vg/lv-newerror\"") elif ('env, LC_ALL=C, lvs, --noheadings, ' '--unit=g, -o, vg_name,name,size' in cmd_string): if 'fake-unknown' in cmd_string: raise processutils.ProcessExecutionError( stderr="One or more volume(s) not found.") if 'test-prov-cap-vg-unit' in cmd_string: data = " fake-vg test-prov-cap-pool-unit 9.50g\n" data += " fake-vg fake-volume-1 1.00g\n" data += " fake-vg fake-volume-2 2.00g\n" elif 'test-prov-cap-vg-no-unit' in cmd_string: data = " fake-vg test-prov-cap-pool-no-unit 9.50\n" data += " fake-vg fake-volume-1 1.00\n" data += " fake-vg fake-volume-2 2.00\n" elif 'test-found-lv-name' in cmd_string: data = " fake-vg test-found-lv-name 9.50\n" else: data = " fake-vg fake-1 1.00g\n" data += " fake-vg fake-2 1.00g\n" elif ('env, LC_ALL=C, lvdisplay, --noheading, -C, -o, Attr' in cmd_string): if 'test-volumes' in cmd_string: data = ' wi-a-' else: data = ' owi-a-' elif 'env, LC_ALL=C, pvs, --noheadings' in cmd_string: data = " fake-vg|/dev/sda|10.00|1.00\n" data += " fake-vg|/dev/sdb|10.00|1.00\n" data += " fake-vg|/dev/sdc|10.00|8.99\n" data += " fake-vg-2|/dev/sdd|10.00|9.99\n" elif 'env, LC_ALL=C, lvs, --noheadings, --unit=g' \ ', -o, size,data_percent, --separator, :' in cmd_string: if 'test-prov-cap-pool' in cmd_string: data = " 9.5:20\n" else: data = " 9:12\n" elif 'lvcreate, -T, -L, ' in cmd_string: pass elif 'lvcreate, -T, -V, ' in cmd_string: pass elif 'lvcreate, --name, ' in cmd_string: pass else: raise AssertionError('unexpected command called: %s' % cmd_string) return (data, "")
def _try_failing(self): self.attempts = self.attempts + 1 raise processutils.ProcessExecutionError("Fail everytime") return True
def test_reraise_false(self): with srb.handle_process_execution_error(message='', info_message='', reraise=False): raise processutils.ProcessExecutionError(description='Oops')
def fake_missing_execute(self, *cmd, **kwargs): """Error when trying to call rootwrap drv_cfg""" raise putils.ProcessExecutionError("Test missing drv_cfg.")
def _ssh_execute(self, cmd_list, check_exit_code=True, attempts=1): """Execute cli with status update. Executes CLI commands such as cfgsave where status return is expected. """ utils.check_ssh_injection(cmd_list) command = ' '.join(cmd_list) if not self.sshpool: self.sshpool = ssh_utils.SSHPool(self.switch_ip, self.switch_port, None, self.switch_user, self.switch_pwd, self.switch_key, min_size=1, max_size=5) stdin, stdout, stderr = None, None, None LOG.debug("Executing command via ssh: %s", command) last_exception = None try: with self.sshpool.item() as ssh: while attempts > 0: attempts -= 1 try: stdin, stdout, stderr = ssh.exec_command(command) stdin.write("%s\n" % zone_constant.YES) channel = stdout.channel exit_status = channel.recv_exit_status() LOG.debug("Exit Status from ssh: %s", exit_status) # exit_status == -1 if no exit code was returned if exit_status != -1: LOG.debug('Result was %s', exit_status) if check_exit_code and exit_status != 0: raise processutils.ProcessExecutionError( exit_code=exit_status, stdout=stdout, stderr=stderr, cmd=command) else: return True else: return True except Exception as e: LOG.exception(_LE('Error executing SSH command.')) last_exception = e greenthread.sleep(random.randint(20, 500) / 100.0) LOG.debug("Handling error case after " "SSH: %s", last_exception) try: raise processutils.ProcessExecutionError( exit_code=last_exception.exit_code, stdout=last_exception.stdout, stderr=last_exception.stderr, cmd=last_exception.cmd) except AttributeError: raise processutils.ProcessExecutionError( exit_code=-1, stdout="", stderr="Error running SSH command", cmd=command) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error executing command via ssh: %s"), e) finally: if stdin: stdin.flush() stdin.close() if stdout: stdout.close() if stderr: stderr.close()
def test_get_journalctl_output_fail(self, mock_execute): mock_execute.side_effect = processutils.ProcessExecutionError() self.assertRaises(errors.CommandExecutionError, self._get_journalctl_output, mock_execute)
class HostMountStateTestCase(test.NoDBTestCase): @mock.patch('os.path.ismount', side_effect=[False, True, True, True, True]) def test_init(self, mock_ismount): # Test that we initialise the state of MountManager correctly at # startup def fake_disk(disk): libvirt_disk = libvirt_config.LibvirtConfigGuestDisk() libvirt_disk.source_type = disk[0] libvirt_disk.source_path = os.path.join(*disk[1]) return libvirt_disk def mock_guest(uuid, disks): guest = mock.create_autospec(libvirt_guest.Guest) guest.uuid = uuid guest.get_all_disks.return_value = map(fake_disk, disks) return guest local_dir = '/local' mountpoint_a = '/mnt/a' mountpoint_b = '/mnt/b' guests = map( mock_guest, [uuids.instance_a, uuids.instance_b], [ # Local file root disk and a volume on each of mountpoints a and b [ ('file', (local_dir, uuids.instance_a, 'disk')), ('file', (mountpoint_a, 'vola1')), ('file', (mountpoint_b, 'volb1')), ], # Local LVM root disk and a volume on each of mountpoints a and b [ ('block', ('/dev', 'vg', uuids.instance_b + '_disk')), ('file', (mountpoint_a, 'vola2')), ('file', (mountpoint_b, 'volb2')), ] ]) host = mock.create_autospec(libvirt_host.Host) host.list_guests.return_value = guests m = mount._HostMountState(host, 0) self.assertEqual([mountpoint_a, mountpoint_b], sorted(m.mountpoints.keys())) self.assertSetEqual( set([('vola1', uuids.instance_a), ('vola2', uuids.instance_b)]), m.mountpoints[mountpoint_a].attachments) self.assertSetEqual( set([('volb1', uuids.instance_a), ('volb2', uuids.instance_b)]), m.mountpoints[mountpoint_b].attachments) @staticmethod def _get_clean_hostmountstate(): # list_guests returns no guests: _HostMountState initial state is # clean. host = mock.create_autospec(libvirt_host.Host) host.list_guests.return_value = [] return mount._HostMountState(host, 0) def _sentinel_mount(self, m, vol, mountpoint=mock.sentinel.mountpoint, instance=None): if instance is None: instance = mock.sentinel.instance instance.uuid = uuids.instance m.mount(mock.sentinel.fstype, mock.sentinel.export, vol, mountpoint, instance, [mock.sentinel.option1, mock.sentinel.option2]) def _sentinel_umount(self, m, vol, mountpoint=mock.sentinel.mountpoint, instance=mock.sentinel.instance): m.umount(vol, mountpoint, instance) @mock.patch('oslo_utils.fileutils.ensure_tree') @mock.patch('nova.privsep.fs.mount') @mock.patch('nova.privsep.fs.umount') @mock.patch('os.path.ismount', side_effect=[False, True, True, True]) def test_mount_umount(self, mock_ismount, mock_umount, mock_mount, mock_ensure_tree): # Mount 2 different volumes from the same export. Test that we only # mount and umount once. m = self._get_clean_hostmountstate() # Mount vol_a from export self._sentinel_mount(m, mock.sentinel.vol_a) mock_ensure_tree.assert_has_calls( [mock.call(mock.sentinel.mountpoint)]) mock_mount.assert_has_calls([ mock.call(mock.sentinel.fstype, mock.sentinel.export, mock.sentinel.mountpoint, [mock.sentinel.option1, mock.sentinel.option2]) ]) # Mount vol_b from export. We shouldn't have mounted again self._sentinel_mount(m, mock.sentinel.vol_b) mock_ensure_tree.assert_has_calls( [mock.call(mock.sentinel.mountpoint)]) mock_mount.assert_has_calls([ mock.call(mock.sentinel.fstype, mock.sentinel.export, mock.sentinel.mountpoint, [mock.sentinel.option1, mock.sentinel.option2]) ]) # Unmount vol_a. We shouldn't have unmounted self._sentinel_umount(m, mock.sentinel.vol_a) mock_ensure_tree.assert_has_calls( [mock.call(mock.sentinel.mountpoint)]) mock_mount.assert_has_calls([ mock.call(mock.sentinel.fstype, mock.sentinel.export, mock.sentinel.mountpoint, [mock.sentinel.option1, mock.sentinel.option2]) ]) # Unmount vol_b. We should have umounted. self._sentinel_umount(m, mock.sentinel.vol_b) mock_ensure_tree.assert_has_calls( [mock.call(mock.sentinel.mountpoint)]) mock_mount.assert_has_calls([ mock.call(mock.sentinel.fstype, mock.sentinel.export, mock.sentinel.mountpoint, [mock.sentinel.option1, mock.sentinel.option2]) ]) mock_umount.assert_has_calls([mock.call(mock.sentinel.mountpoint)]) @mock.patch('oslo_utils.fileutils.ensure_tree') @mock.patch('nova.privsep.fs.mount') @mock.patch('nova.privsep.fs.umount') @mock.patch('os.path.ismount', side_effect=[False, True, True, True]) @mock.patch('nova.privsep.path.rmdir') def test_mount_umount_multi_attach(self, mock_rmdir, mock_ismount, mock_umount, mock_mount, mock_ensure_tree): # Mount a volume from a single export for 2 different instances. Test # that we only mount and umount once. m = self._get_clean_hostmountstate() instance_a = mock.sentinel.instance_a instance_a.uuid = uuids.instance_a instance_b = mock.sentinel.instance_b instance_b.uuid = uuids.instance_b # Mount vol_a for instance_a self._sentinel_mount(m, mock.sentinel.vol_a, instance=instance_a) mock_mount.assert_has_calls([ mock.call(mock.sentinel.fstype, mock.sentinel.export, mock.sentinel.mountpoint, [mock.sentinel.option1, mock.sentinel.option2]) ]) mock_mount.reset_mock() # Mount vol_a for instance_b. We shouldn't have mounted again self._sentinel_mount(m, mock.sentinel.vol_a, instance=instance_b) mock_mount.assert_not_called() # Unmount vol_a for instance_a. We shouldn't have unmounted self._sentinel_umount(m, mock.sentinel.vol_a, instance=instance_a) mock_umount.assert_not_called() # Unmount vol_a for instance_b. We should have umounted. self._sentinel_umount(m, mock.sentinel.vol_a, instance=instance_b) mock_umount.assert_has_calls([mock.call(mock.sentinel.mountpoint)]) @mock.patch('oslo_utils.fileutils.ensure_tree') @mock.patch('nova.privsep.fs.mount') @mock.patch('nova.privsep.fs.umount') @mock.patch('os.path.ismount', side_effect=[False, False, True, False, False, True]) @mock.patch('nova.privsep.path.rmdir') def test_mount_concurrent(self, mock_rmdir, mock_ismount, mock_umount, mock_mount, mock_ensure_tree): # This is 2 tests in 1, because the first test is the precondition # for the second. # The first test is that if 2 threads call mount simultaneously, # only one of them will call mount # The second test is that we correctly handle the case where we # delete a lock after umount. During the umount of the first test, # which will delete the lock when it completes, we start 2 more # threads which both call mount. These threads are holding a lock # which is about to be deleted. We test that they still don't race, # and only one of them calls mount. m = self._get_clean_hostmountstate() def mount_a(): # Mount vol_a from export self._sentinel_mount(m, mock.sentinel.vol_a) ThreadController.current().waitpoint('mounted') self._sentinel_umount(m, mock.sentinel.vol_a) def mount_b(): # Mount vol_b from export self._sentinel_mount(m, mock.sentinel.vol_b) self._sentinel_umount(m, mock.sentinel.vol_b) def mount_c(): self._sentinel_mount(m, mock.sentinel.vol_c) def mount_d(): self._sentinel_mount(m, mock.sentinel.vol_d) ctl_a = ThreadController(mount_a) ctl_b = ThreadController(mount_b) ctl_c = ThreadController(mount_c) ctl_d = ThreadController(mount_d) def trap_mount(*args, **kwargs): # Conditionally wait at a waitpoint named after the command # we're executing ThreadController.current().waitpoint('mount') def trap_umount(*args, **kwargs): # Conditionally wait at a waitpoint named after the command # we're executing ThreadController.current().waitpoint('umount') mock_mount.side_effect = trap_mount mock_umount.side_effect = trap_umount # Run the first thread until it's blocked while calling mount ctl_a.runto('mount') mock_ensure_tree.assert_has_calls( [mock.call(mock.sentinel.mountpoint)]) mock_mount.assert_has_calls([ mock.call(mock.sentinel.fstype, mock.sentinel.export, mock.sentinel.mountpoint, [mock.sentinel.option1, mock.sentinel.option2]) ]) # Start the second mount, and ensure it's got plenty of opportunity # to race. ctl_b.start() time.sleep(0.01) mock_ensure_tree.assert_has_calls( [mock.call(mock.sentinel.mountpoint)]) mock_mount.assert_has_calls([ mock.call(mock.sentinel.fstype, mock.sentinel.export, mock.sentinel.mountpoint, [mock.sentinel.option1, mock.sentinel.option2]) ]) mock_umount.assert_not_called() # Allow ctl_a to complete its mount ctl_a.runto('mounted') mock_ensure_tree.assert_has_calls( [mock.call(mock.sentinel.mountpoint)]) mock_mount.assert_has_calls([ mock.call(mock.sentinel.fstype, mock.sentinel.export, mock.sentinel.mountpoint, [mock.sentinel.option1, mock.sentinel.option2]) ]) mock_umount.assert_not_called() # Allow ctl_b to finish. We should not have done a umount ctl_b.finish() mock_ensure_tree.assert_has_calls( [mock.call(mock.sentinel.mountpoint)]) mock_mount.assert_has_calls([ mock.call(mock.sentinel.fstype, mock.sentinel.export, mock.sentinel.mountpoint, [mock.sentinel.option1, mock.sentinel.option2]) ]) mock_umount.assert_not_called() # Allow ctl_a to start umounting. We haven't executed rmdir yet, # because we've blocked during umount ctl_a.runto('umount') mock_ensure_tree.assert_has_calls( [mock.call(mock.sentinel.mountpoint)]) mock_mount.assert_has_calls([ mock.call(mock.sentinel.fstype, mock.sentinel.export, mock.sentinel.mountpoint, [mock.sentinel.option1, mock.sentinel.option2]) ]) mock_umount.assert_has_calls([mock.call(mock.sentinel.mountpoint)]) mock_rmdir.assert_not_called() # While ctl_a is umounting, simultaneously start both ctl_c and # ctl_d, and ensure they have an opportunity to race ctl_c.start() ctl_d.start() time.sleep(0.01) # Allow a, c, and d to complete for ctl in (ctl_a, ctl_c, ctl_d): ctl.finish() # We should have completed the previous umount, then remounted # exactly once mock_ensure_tree.assert_has_calls( [mock.call(mock.sentinel.mountpoint)]) mock_mount.assert_has_calls([ mock.call(mock.sentinel.fstype, mock.sentinel.export, mock.sentinel.mountpoint, [mock.sentinel.option1, mock.sentinel.option2]), mock.call(mock.sentinel.fstype, mock.sentinel.export, mock.sentinel.mountpoint, [mock.sentinel.option1, mock.sentinel.option2]) ]) mock_umount.assert_has_calls([mock.call(mock.sentinel.mountpoint)]) @mock.patch('oslo_utils.fileutils.ensure_tree') @mock.patch('nova.privsep.fs.mount') @mock.patch('nova.privsep.fs.umount') @mock.patch('os.path.ismount', side_effect=[False, False, True, True, True, False]) @mock.patch('nova.privsep.path.rmdir') def test_mount_concurrent_no_interfere(self, mock_rmdir, mock_ismount, mock_umount, mock_mount, mock_ensure_tree): # Test that concurrent calls to mount volumes in different exports # run concurrently m = self._get_clean_hostmountstate() def mount_a(): # Mount vol on mountpoint a self._sentinel_mount(m, mock.sentinel.vol, mock.sentinel.mountpoint_a) ThreadController.current().waitpoint('mounted') self._sentinel_umount(m, mock.sentinel.vol, mock.sentinel.mountpoint_a) def mount_b(): # Mount vol on mountpoint b self._sentinel_mount(m, mock.sentinel.vol, mock.sentinel.mountpoint_b) self._sentinel_umount(m, mock.sentinel.vol, mock.sentinel.mountpoint_b) ctl_a = ThreadController(mount_a) ctl_b = ThreadController(mount_b) ctl_a.runto('mounted') mock_mount.assert_has_calls([ mock.call(mock.sentinel.fstype, mock.sentinel.export, mock.sentinel.mountpoint_a, [mock.sentinel.option1, mock.sentinel.option2]) ]) mock_mount.reset_mock() ctl_b.finish() mock_mount.assert_has_calls([ mock.call(mock.sentinel.fstype, mock.sentinel.export, mock.sentinel.mountpoint_b, [mock.sentinel.option1, mock.sentinel.option2]) ]) mock_umount.assert_has_calls([mock.call(mock.sentinel.mountpoint_b)]) mock_umount.reset_mock() ctl_a.finish() mock_umount.assert_has_calls([mock.call(mock.sentinel.mountpoint_a)]) @mock.patch('oslo_utils.fileutils.ensure_tree') @mock.patch('nova.privsep.fs.mount') @mock.patch('nova.privsep.fs.umount', side_effect=processutils.ProcessExecutionError()) @mock.patch('os.path.ismount', side_effect=[False, True, True, True, False]) @mock.patch('nova.privsep.path.rmdir') def test_mount_after_failed_umount(self, mock_rmdir, mock_ismount, mock_umount, mock_mount, mock_ensure_tree): # Test that MountManager correctly tracks state when umount fails. # Test that when umount fails a subsequent mount doesn't try to # remount it. m = self._get_clean_hostmountstate() # Mount vol_a self._sentinel_mount(m, mock.sentinel.vol_a) mock_mount.assert_has_calls([ mock.call(mock.sentinel.fstype, mock.sentinel.export, mock.sentinel.mountpoint, [mock.sentinel.option1, mock.sentinel.option2]) ]) mock_mount.reset_mock() # Umount vol_a. The umount command will fail. self._sentinel_umount(m, mock.sentinel.vol_a) mock_umount.assert_has_calls([mock.call(mock.sentinel.mountpoint)]) # We should not have called rmdir, because umount failed mock_rmdir.assert_not_called() # Mount vol_a again. We should not have called mount, because umount # failed. self._sentinel_mount(m, mock.sentinel.vol_a) mock_mount.assert_not_called() # Prevent future failure of umount mock_umount.side_effect = None # Umount vol_a successfully self._sentinel_umount(m, mock.sentinel.vol_a) mock_umount.assert_has_calls([mock.call(mock.sentinel.mountpoint)]) @mock.patch.object(mount.LOG, 'error') @mock.patch('oslo_utils.fileutils.ensure_tree') @mock.patch('nova.privsep.fs.mount') @mock.patch('os.path.ismount') @mock.patch('nova.privsep.fs.umount') def test_umount_log_failure(self, mock_umount, mock_ismount, mock_mount, mock_ensure_tree, mock_LOG_error): mock_umount.side_effect = mount.processutils.ProcessExecutionError( None, None, None, 'umount', 'umount: device is busy.') mock_ismount.side_effect = [False, True, True] m = self._get_clean_hostmountstate() self._sentinel_mount(m, mock.sentinel.vol_a) self._sentinel_umount(m, mock.sentinel.vol_a) mock_LOG_error.assert_called()
def fake_execute(obj, *cmd, **kwargs): # noqa if obj.configuration.lvm_suppress_fd_warnings: _lvm_prefix = 'env, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=1, ' else: _lvm_prefix = 'env, LC_ALL=C, ' cmd_string = ', '.join(cmd) data = "\n" if (_lvm_prefix + 'vgs, --noheadings, --unit=g, -o, name' == cmd_string): data = " fake-vg\n" data += " some-other-vg\n" elif (_lvm_prefix + 'vgs, --noheadings, -o, name, fake-vg' == cmd_string): data = " fake-vg\n" elif _lvm_prefix + 'vgs, --version' in cmd_string: data = " LVM version: 2.02.103(2) (2012-03-06)\n" elif (_lvm_prefix + 'vgs, --noheadings, -o, uuid, fake-vg' in cmd_string): data = " kVxztV-dKpG-Rz7E-xtKY-jeju-QsYU-SLG6Z1\n" elif (_lvm_prefix + 'vgs, --noheadings, --unit=g, ' '-o, name,size,free,lv_count,uuid, ' '--separator, :, --nosuffix' in cmd_string): data = (" test-prov-cap-vg-unit:10.00:10.00:0:" "mXzbuX-dKpG-Rz7E-xtKY-jeju-QsYU-SLG8Z4\n") if 'test-prov-cap-vg-unit' in cmd_string: return (data, "") data = (" test-prov-cap-vg-no-unit:10.00:10.00:0:" "mXzbuX-dKpG-Rz7E-xtKY-jeju-QsYU-SLG8Z4\n") if 'test-prov-cap-vg-no-unit' in cmd_string: return (data, "") data = " fake-vg:10.00:10.00:0:"\ "kVxztV-dKpG-Rz7E-xtKY-jeju-QsYU-SLG6Z1\n" if 'fake-vg' in cmd_string: return (data, "") data += " fake-vg-2:10.00:10.00:0:"\ "lWyauW-dKpG-Rz7E-xtKY-jeju-QsYU-SLG7Z2\n" data += " fake-vg-3:10.00:10.00:0:"\ "mXzbuX-dKpG-Rz7E-xtKY-jeju-QsYU-SLG8Z3\n" elif (_lvm_prefix + 'lvs, --noheadings, ' '--unit=g, -o, vg_name,name,size, --nosuffix, ' 'fake-vg/lv-nothere' in cmd_string): raise processutils.ProcessExecutionError( stderr="One or more specified logical volume(s) not found.") elif (_lvm_prefix + 'lvs, --noheadings, ' '--unit=g, -o, vg_name,name,size, --nosuffix, ' 'fake-vg/lv-newerror' in cmd_string): raise processutils.ProcessExecutionError( stderr="Failed to find logical volume \"fake-vg/lv-newerror\"") elif (_lvm_prefix + 'lvs, --noheadings, ' '--unit=g, -o, vg_name,name,size' in cmd_string): if 'fake-unknown' in cmd_string: raise processutils.ProcessExecutionError( stderr="One or more volume(s) not found.") if 'test-prov-cap-vg-unit' in cmd_string: data = " fake-vg test-prov-cap-pool-unit 9.50g\n" data += " fake-vg fake-volume-1 1.00g\n" data += " fake-vg fake-volume-2 2.00g\n" elif 'test-prov-cap-vg-no-unit' in cmd_string: data = " fake-vg test-prov-cap-pool-no-unit 9.50\n" data += " fake-vg fake-volume-1 1.00\n" data += " fake-vg fake-volume-2 2.00\n" elif 'test-found-lv-name' in cmd_string: data = " fake-vg test-found-lv-name 9.50\n" else: data = " fake-vg fake-1 1.00g\n" data += " fake-vg fake-2 1.00g\n" elif (_lvm_prefix + 'lvdisplay, --noheading, -C, -o, Attr' in cmd_string): if 'test-volumes' in cmd_string: data = ' wi-a-' elif 'snapshot' in cmd_string: data = ' swi-a-s--' elif 'open' in cmd_string: data = ' -wi-ao---' else: data = ' owi-a-' elif (_lvm_prefix + 'lvdisplay, --noheading, -C, -o, Origin' in cmd_string): if 'snapshot' in cmd_string: data = ' fake-volume-1' else: data = ' ' elif _lvm_prefix + 'pvs, --noheadings' in cmd_string: data = " fake-vg|/dev/sda|10.00|1.00\n" data += " fake-vg|/dev/sdb|10.00|1.00\n" data += " fake-vg|/dev/sdc|10.00|8.99\n" data += " fake-vg-2|/dev/sdd|10.00|9.99\n" if '--ignoreskippedcluster' not in cmd_string: raise processutils.ProcessExecutionError( stderr="Skipping clustered volume group", stdout=data, exit_code=5) elif _lvm_prefix + 'lvs, --noheadings, --unit=g' \ ', -o, size,data_percent, --separator, :' in cmd_string: if 'test-prov-cap-pool' in cmd_string: data = " 9.5:20\n" else: data = " 9:12\n" elif 'lvcreate, -T, -L, ' in cmd_string: pass elif 'lvcreate, -T, -V, ' in cmd_string: pass elif 'lvcreate, -n, ' in cmd_string: pass elif 'lvcreate, --name, ' in cmd_string: pass elif 'lvextend, -L, ' in cmd_string: pass else: raise AssertionError('unexpected command called: %s' % cmd_string) return (data, "")
def fail(*args, **kwargs): raise processutils.ProcessExecutionError()
def test_mkfs_with_unexpected_error(self, execute_mock): execute_mock.side_effect = iter( [processutils.ProcessExecutionError(stderr='fake')]) self.assertRaises(processutils.ProcessExecutionError, utils.mkfs, 'ext4', '/my/block/dev', 'ext4-vol')
class LvmTestCase(test.NoDBTestCase): def test_get_volume_size(self): executes = [] def fake_execute(*cmd, **kwargs): executes.append(cmd) return 123456789, None expected_commands = [('blockdev', '--getsize64', '/dev/foo')] self.stub_out('nova.utils.execute', fake_execute) size = lvm.get_volume_size('/dev/foo') self.assertEqual(expected_commands, executes) self.assertEqual(123456789, size) @mock.patch.object(utils, 'execute', side_effect=processutils.ProcessExecutionError( stderr=('blockdev: cannot open /dev/foo: ' 'No such device or address'))) def test_get_volume_size_not_found(self, mock_execute): self.assertRaises(exception.VolumeBDMPathNotFound, lvm.get_volume_size, '/dev/foo') @mock.patch.object(utils, 'execute', side_effect=processutils.ProcessExecutionError( stderr=('blockdev: cannot open /dev/foo: ' 'No such file or directory'))) def test_get_volume_size_not_found_file(self, mock_execute): self.assertRaises(exception.VolumeBDMPathNotFound, lvm.get_volume_size, '/dev/foo') @mock.patch.object(libvirt_utils, 'path_exists', return_value=True) @mock.patch.object(utils, 'execute', side_effect=processutils.ProcessExecutionError( stderr='blockdev: i am sad in other ways')) def test_get_volume_size_unexpectd_error(self, mock_execute, mock_path_exists): self.assertRaises(processutils.ProcessExecutionError, lvm.get_volume_size, '/dev/foo') def test_lvm_clear(self): def fake_lvm_size(path): return lvm_size def fake_execute(*cmd, **kwargs): executes.append(cmd) self.stub_out('nova.virt.libvirt.storage.lvm.get_volume_size', fake_lvm_size) self.stub_out('nova.utils.execute', fake_execute) # Test the correct dd commands are run for various sizes lvm_size = 1 executes = [] expected_commands = [('dd', 'bs=1', 'if=/dev/zero', 'of=/dev/v1', 'seek=0', 'count=1', 'conv=fdatasync')] lvm.clear_volume('/dev/v1') self.assertEqual(expected_commands, executes) lvm_size = 1024 executes = [] expected_commands = [('dd', 'bs=1024', 'if=/dev/zero', 'of=/dev/v2', 'seek=0', 'count=1', 'conv=fdatasync')] lvm.clear_volume('/dev/v2') self.assertEqual(expected_commands, executes) lvm_size = 1025 executes = [] expected_commands = [('dd', 'bs=1024', 'if=/dev/zero', 'of=/dev/v3', 'seek=0', 'count=1', 'conv=fdatasync')] expected_commands += [('dd', 'bs=1', 'if=/dev/zero', 'of=/dev/v3', 'seek=1024', 'count=1', 'conv=fdatasync')] lvm.clear_volume('/dev/v3') self.assertEqual(expected_commands, executes) lvm_size = 1048576 executes = [] expected_commands = [('dd', 'bs=1048576', 'if=/dev/zero', 'of=/dev/v4', 'seek=0', 'count=1', 'oflag=direct')] lvm.clear_volume('/dev/v4') self.assertEqual(expected_commands, executes) lvm_size = 1048577 executes = [] expected_commands = [('dd', 'bs=1048576', 'if=/dev/zero', 'of=/dev/v5', 'seek=0', 'count=1', 'oflag=direct')] expected_commands += [('dd', 'bs=1', 'if=/dev/zero', 'of=/dev/v5', 'seek=1048576', 'count=1', 'conv=fdatasync')] lvm.clear_volume('/dev/v5') self.assertEqual(expected_commands, executes) lvm_size = 1234567 executes = [] expected_commands = [('dd', 'bs=1048576', 'if=/dev/zero', 'of=/dev/v6', 'seek=0', 'count=1', 'oflag=direct')] expected_commands += [('dd', 'bs=1024', 'if=/dev/zero', 'of=/dev/v6', 'seek=1024', 'count=181', 'conv=fdatasync')] expected_commands += [('dd', 'bs=1', 'if=/dev/zero', 'of=/dev/v6', 'seek=1233920', 'count=647', 'conv=fdatasync')] lvm.clear_volume('/dev/v6') self.assertEqual(expected_commands, executes) # Test volume_clear_size limits the size lvm_size = 10485761 CONF.set_override('volume_clear_size', '1', 'libvirt') executes = [] expected_commands = [('dd', 'bs=1048576', 'if=/dev/zero', 'of=/dev/v7', 'seek=0', 'count=1', 'oflag=direct')] lvm.clear_volume('/dev/v7') self.assertEqual(expected_commands, executes) CONF.set_override('volume_clear_size', '2', 'libvirt') lvm_size = 1048576 executes = [] expected_commands = [('dd', 'bs=1048576', 'if=/dev/zero', 'of=/dev/v9', 'seek=0', 'count=1', 'oflag=direct')] lvm.clear_volume('/dev/v9') self.assertEqual(expected_commands, executes) # Test volume_clear=shred CONF.set_override('volume_clear', 'shred', 'libvirt') CONF.set_override('volume_clear_size', '0', 'libvirt') lvm_size = 1048576 executes = [] expected_commands = [('shred', '-n3', '-s1048576', '/dev/va')] lvm.clear_volume('/dev/va') self.assertEqual(expected_commands, executes) CONF.set_override('volume_clear', 'shred', 'libvirt') CONF.set_override('volume_clear_size', '1', 'libvirt') lvm_size = 10485761 executes = [] expected_commands = [('shred', '-n3', '-s1048576', '/dev/vb')] lvm.clear_volume('/dev/vb') self.assertEqual(expected_commands, executes) # Test volume_clear=none does nothing CONF.set_override('volume_clear', 'none', 'libvirt') executes = [] expected_commands = [] lvm.clear_volume('/dev/vc') self.assertEqual(expected_commands, executes) @mock.patch.object(utils, 'execute', side_effect=processutils.ProcessExecutionError( stderr=('blockdev: cannot open /dev/foo: ' 'No such file or directory'))) def test_lvm_clear_ignore_lvm_not_found(self, mock_execute): lvm.clear_volume('/dev/foo') def test_fail_remove_all_logical_volumes(self): def fake_execute(*args, **kwargs): if 'vol2' in args: raise processutils.ProcessExecutionError('Error') with test.nested( mock.patch.object(lvm, 'clear_volume'), mock.patch.object(libvirt_utils, 'execute', side_effect=fake_execute)) as (mock_clear, mock_execute): self.assertRaises(exception.VolumesNotRemoved, lvm.remove_volumes, ['vol1', 'vol2', 'vol3']) self.assertEqual(3, mock_execute.call_count)
def test_defaults(self): err = processutils.ProcessExecutionError() self.assertTrue('None\n' in six.text_type(err)) self.assertTrue('code: -\n' in six.text_type(err))
class APITestCase(test.NoDBTestCase): @mock.patch.object(localfs.VFSLocalFS, 'get_image_fs', autospec=True, return_value='') def test_can_resize_need_fs_type_specified(self, mock_image_fs): imgfile = tempfile.NamedTemporaryFile() self.addCleanup(imgfile.close) image = imgmodel.LocalFileImage(imgfile.name, imgmodel.FORMAT_QCOW2) self.assertFalse(api.is_image_extendable(image)) self.assertTrue(mock_image_fs.called) @mock.patch.object(utils, 'execute', autospec=True) def test_is_image_extendable_raw(self, mock_exec): imgfile = tempfile.NamedTemporaryFile() image = imgmodel.LocalFileImage(imgfile, imgmodel.FORMAT_RAW) self.addCleanup(imgfile.close) self.assertTrue(api.is_image_extendable(image)) mock_exec.assert_called_once_with('e2label', imgfile) @mock.patch('oslo_concurrency.processutils.execute', autospec=True) def test_resize2fs_success(self, mock_exec): imgfile = tempfile.NamedTemporaryFile() self.addCleanup(imgfile.close) api.resize2fs(imgfile) mock_exec.assert_has_calls([ mock.call('e2fsck', '-fp', imgfile, check_exit_code=[0, 1, 2]), mock.call('resize2fs', imgfile, check_exit_code=False) ]) @mock.patch('oslo_concurrency.processutils.execute') @mock.patch('nova.privsep.fs.resize2fs') def test_resize2fs_success_as_root(self, mock_resize, mock_exec): imgfile = tempfile.NamedTemporaryFile() self.addCleanup(imgfile.close) api.resize2fs(imgfile, run_as_root=True) mock_exec.assert_not_called() mock_resize.assert_called() @mock.patch('oslo_concurrency.processutils.execute', autospec=True, side_effect=processutils.ProcessExecutionError("fs error")) def test_resize2fs_e2fsck_fails(self, mock_exec): imgfile = tempfile.NamedTemporaryFile() self.addCleanup(imgfile.close) api.resize2fs(imgfile) mock_exec.assert_called_once_with('e2fsck', '-fp', imgfile, check_exit_code=[0, 1, 2]) @mock.patch.object(api, 'can_resize_image', autospec=True, return_value=True) @mock.patch.object(api, 'is_image_extendable', autospec=True, return_value=True) @mock.patch.object(api, 'resize2fs', autospec=True) @mock.patch.object(mount.Mount, 'instance_for_format') @mock.patch.object(utils, 'execute', autospec=True) def test_extend_qcow_success(self, mock_exec, mock_inst, mock_resize, mock_extendable, mock_can_resize): imgfile = tempfile.NamedTemporaryFile() self.addCleanup(imgfile.close) imgsize = 10 device = "/dev/sdh" image = imgmodel.LocalFileImage(imgfile, imgmodel.FORMAT_QCOW2) self.flags(resize_fs_using_block_device=True) mounter = FakeMount.instance_for_format(image, None, None) mounter.device = device mock_inst.return_value = mounter with test.nested( mock.patch.object(mounter, 'get_dev', autospec=True, return_value=True), mock.patch.object(mounter, 'unget_dev', autospec=True), ) as (mock_get_dev, mock_unget_dev): api.extend(image, imgsize) mock_can_resize.assert_called_once_with(imgfile, imgsize) mock_exec.assert_called_once_with('qemu-img', 'resize', imgfile, imgsize) mock_extendable.assert_called_once_with(image) mock_inst.assert_called_once_with(image, None, None) mock_resize.assert_called_once_with(mounter.device, run_as_root=True, check_exit_code=[0]) mock_get_dev.assert_called_once_with() mock_unget_dev.assert_called_once_with() @mock.patch.object(api, 'can_resize_image', autospec=True, return_value=True) @mock.patch.object(api, 'is_image_extendable', autospec=True) @mock.patch.object(utils, 'execute', autospec=True) def test_extend_qcow_no_resize(self, mock_execute, mock_extendable, mock_can_resize_image): imgfile = tempfile.NamedTemporaryFile() self.addCleanup(imgfile.close) imgsize = 10 image = imgmodel.LocalFileImage(imgfile, imgmodel.FORMAT_QCOW2) self.flags(resize_fs_using_block_device=False) api.extend(image, imgsize) mock_can_resize_image.assert_called_once_with(imgfile, imgsize) mock_execute.assert_called_once_with('qemu-img', 'resize', imgfile, imgsize) self.assertFalse(mock_extendable.called) @mock.patch.object(api, 'can_resize_image', autospec=True, return_value=True) @mock.patch('nova.privsep.libvirt.ploop_resize') def test_extend_ploop(self, mock_ploop_resize, mock_can_resize_image): imgfile = tempfile.NamedTemporaryFile() self.addCleanup(imgfile.close) imgsize = 10 * units.Gi image = imgmodel.LocalFileImage(imgfile, imgmodel.FORMAT_PLOOP) api.extend(image, imgsize) mock_can_resize_image.assert_called_once_with(image.path, imgsize) mock_ploop_resize.assert_called_once_with(imgfile, imgsize) @mock.patch.object(api, 'can_resize_image', autospec=True, return_value=True) @mock.patch.object(api, 'resize2fs', autospec=True) @mock.patch.object(utils, 'execute', autospec=True) def test_extend_raw_success(self, mock_exec, mock_resize, mock_can_resize): imgfile = tempfile.NamedTemporaryFile() self.addCleanup(imgfile.close) imgsize = 10 image = imgmodel.LocalFileImage(imgfile, imgmodel.FORMAT_RAW) api.extend(image, imgsize) mock_exec.assert_has_calls([ mock.call('qemu-img', 'resize', imgfile, imgsize), mock.call('e2label', image.path) ]) mock_resize.assert_called_once_with(imgfile, run_as_root=False, check_exit_code=[0]) mock_can_resize.assert_called_once_with(imgfile, imgsize) HASH_VFAT = utils.get_hash_str(api.FS_FORMAT_VFAT)[:7] HASH_EXT4 = utils.get_hash_str(api.FS_FORMAT_EXT4)[:7] HASH_NTFS = utils.get_hash_str(api.FS_FORMAT_NTFS)[:7] def test_get_file_extension_for_os_type(self): self.assertEqual(self.HASH_VFAT, api.get_file_extension_for_os_type(None, None)) self.assertEqual(self.HASH_EXT4, api.get_file_extension_for_os_type('linux', None)) self.assertEqual(self.HASH_NTFS, api.get_file_extension_for_os_type('windows', None)) def test_get_file_extension_for_os_type_with_overrides(self): with mock.patch('nova.virt.disk.api._DEFAULT_MKFS_COMMAND', 'custom mkfs command'): self.assertEqual("a74d253", api.get_file_extension_for_os_type('linux', None)) self.assertEqual( "a74d253", api.get_file_extension_for_os_type('windows', None)) self.assertEqual("a74d253", api.get_file_extension_for_os_type('osx', None)) with mock.patch.dict(api._MKFS_COMMAND, {'osx': 'custom mkfs command'}, clear=True): self.assertEqual(self.HASH_VFAT, api.get_file_extension_for_os_type(None, None)) self.assertEqual(self.HASH_EXT4, api.get_file_extension_for_os_type('linux', None)) self.assertEqual( self.HASH_NTFS, api.get_file_extension_for_os_type('windows', None)) self.assertEqual("a74d253", api.get_file_extension_for_os_type('osx', None))
def test_with_description(self): description = 'The Narwhal Bacons at Midnight' err = processutils.ProcessExecutionError(description=description) self.assertTrue(description in six.text_type(err))
def f(): with srb.handle_process_execution_error(message='', info_message='', reraise=True): raise processutils.ProcessExecutionError(description='Oops')
def test_with_exit_code(self): exit_code = 0 err = processutils.ProcessExecutionError(exit_code=exit_code) self.assertTrue(str(exit_code) in six.text_type(err))
class APITestCase(test.NoDBTestCase): @mock.patch.object(localfs.VFSLocalFS, 'get_image_fs', autospec=True, return_value='') def test_can_resize_need_fs_type_specified(self, mock_image_fs): imgfile = tempfile.NamedTemporaryFile() self.addCleanup(imgfile.close) image = imgmodel.LocalFileImage(imgfile.name, imgmodel.FORMAT_QCOW2) self.assertFalse(api.is_image_extendable(image)) self.assertTrue(mock_image_fs.called) @mock.patch('oslo_concurrency.processutils.execute', autospec=True) def test_is_image_extendable_raw(self, mock_exec): imgfile = tempfile.NamedTemporaryFile() image = imgmodel.LocalFileImage(imgfile, imgmodel.FORMAT_RAW) self.addCleanup(imgfile.close) self.assertTrue(api.is_image_extendable(image)) mock_exec.assert_called_once_with('e2label', imgfile) @mock.patch('oslo_concurrency.processutils.execute', autospec=True) def test_resize2fs_success(self, mock_exec): imgfile = tempfile.NamedTemporaryFile() self.addCleanup(imgfile.close) api.resize2fs(imgfile) mock_exec.assert_has_calls([ mock.call('e2fsck', '-fp', imgfile, check_exit_code=[0, 1, 2]), mock.call('resize2fs', imgfile, check_exit_code=False) ]) @mock.patch('oslo_concurrency.processutils.execute') @mock.patch('nova.privsep.fs.resize2fs') @mock.patch('nova.privsep.fs.e2fsck') def test_resize2fs_success_as_root(self, mock_fsck, mock_resize, mock_exec): imgfile = tempfile.NamedTemporaryFile() self.addCleanup(imgfile.close) api.resize2fs(imgfile, run_as_root=True) mock_exec.assert_not_called() mock_resize.assert_called() mock_fsck.assert_called() @mock.patch('oslo_concurrency.processutils.execute', autospec=True, side_effect=processutils.ProcessExecutionError("fs error")) def test_resize2fs_e2fsck_fails(self, mock_exec): imgfile = tempfile.NamedTemporaryFile() self.addCleanup(imgfile.close) api.resize2fs(imgfile) mock_exec.assert_called_once_with('e2fsck', '-fp', imgfile, check_exit_code=[0, 1, 2]) @mock.patch.object(api, 'can_resize_image', autospec=True, return_value=True) @mock.patch.object(api, 'is_image_extendable', autospec=True, return_value=True) @mock.patch.object(api, 'resize2fs', autospec=True) @mock.patch.object(mount.Mount, 'instance_for_format') @mock.patch('oslo_concurrency.processutils.execute', autospec=True) def test_extend_qcow_success(self, mock_exec, mock_inst, mock_resize, mock_extendable, mock_can_resize): imgfile = tempfile.NamedTemporaryFile() self.addCleanup(imgfile.close) imgsize = 10 device = "/dev/sdh" image = imgmodel.LocalFileImage(imgfile, imgmodel.FORMAT_QCOW2) self.flags(resize_fs_using_block_device=True) mounter = FakeMount.instance_for_format(image, None, None) mounter.device = device mock_inst.return_value = mounter with test.nested( mock.patch.object(mounter, 'get_dev', autospec=True, return_value=True), mock.patch.object(mounter, 'unget_dev', autospec=True), ) as (mock_get_dev, mock_unget_dev): api.extend(image, imgsize) mock_can_resize.assert_called_once_with(imgfile, imgsize) mock_exec.assert_called_once_with('qemu-img', 'resize', imgfile, imgsize) mock_extendable.assert_called_once_with(image) mock_inst.assert_called_once_with(image, None, None) mock_resize.assert_called_once_with(mounter.device, run_as_root=True, check_exit_code=[0]) mock_get_dev.assert_called_once_with() mock_unget_dev.assert_called_once_with() @mock.patch.object(api, 'can_resize_image', autospec=True, return_value=True) @mock.patch.object(api, 'is_image_extendable', autospec=True) @mock.patch('oslo_concurrency.processutils.execute', autospec=True) def test_extend_qcow_no_resize(self, mock_execute, mock_extendable, mock_can_resize_image): imgfile = tempfile.NamedTemporaryFile() self.addCleanup(imgfile.close) imgsize = 10 image = imgmodel.LocalFileImage(imgfile, imgmodel.FORMAT_QCOW2) self.flags(resize_fs_using_block_device=False) api.extend(image, imgsize) mock_can_resize_image.assert_called_once_with(imgfile, imgsize) mock_execute.assert_called_once_with('qemu-img', 'resize', imgfile, imgsize) self.assertFalse(mock_extendable.called) @mock.patch.object(api, 'can_resize_image', autospec=True, return_value=True) @mock.patch('nova.privsep.libvirt.ploop_resize') def test_extend_ploop(self, mock_ploop_resize, mock_can_resize_image): imgfile = tempfile.NamedTemporaryFile() self.addCleanup(imgfile.close) imgsize = 10 * units.Gi image = imgmodel.LocalFileImage(imgfile, imgmodel.FORMAT_PLOOP) api.extend(image, imgsize) mock_can_resize_image.assert_called_once_with(image.path, imgsize) mock_ploop_resize.assert_called_once_with(imgfile, imgsize) @mock.patch.object(api, 'can_resize_image', autospec=True, return_value=True) @mock.patch.object(api, 'resize2fs', autospec=True) @mock.patch('oslo_concurrency.processutils.execute', autospec=True) def test_extend_raw_success(self, mock_exec, mock_resize, mock_can_resize): imgfile = tempfile.NamedTemporaryFile() self.addCleanup(imgfile.close) imgsize = 10 image = imgmodel.LocalFileImage(imgfile, imgmodel.FORMAT_RAW) api.extend(image, imgsize) mock_exec.assert_has_calls([ mock.call('qemu-img', 'resize', imgfile, imgsize), mock.call('e2label', image.path) ]) mock_resize.assert_called_once_with(imgfile, run_as_root=False, check_exit_code=[0]) mock_can_resize.assert_called_once_with(imgfile, imgsize)
def test_with_cmd(self): cmd = 'telinit' err = processutils.ProcessExecutionError(cmd=cmd) self.assertTrue(cmd in six.text_type(err))
def execute(self, *cmd, **kwargs): # NOTE(dims): This method is to provide compatibility with the # processutils.execute interface. So that calling daemon or direct # rootwrap to honor the same set of flags in kwargs and to ensure # that we don't regress any current behavior. cmd = [str(c) for c in cmd] loglevel = kwargs.pop('loglevel', logging.DEBUG) log_errors = kwargs.pop('log_errors', None) process_input = kwargs.pop('process_input', None) delay_on_retry = kwargs.pop('delay_on_retry', True) attempts = kwargs.pop('attempts', 1) check_exit_code = kwargs.pop('check_exit_code', [0]) ignore_exit_code = False if isinstance(check_exit_code, bool): ignore_exit_code = not check_exit_code check_exit_code = [0] elif isinstance(check_exit_code, int): check_exit_code = [check_exit_code] sanitized_cmd = strutils.mask_password(' '.join(cmd)) LOG.info(_LI('Executing RootwrapDaemonHelper.execute ' 'cmd=[%(cmd)r] kwargs=[%(kwargs)r]'), {'cmd': sanitized_cmd, 'kwargs': kwargs}) while attempts > 0: attempts -= 1 try: start_time = time.time() LOG.log(loglevel, _('Running cmd (subprocess): %s'), sanitized_cmd) (returncode, out, err) = self.client.execute( cmd, process_input) end_time = time.time() - start_time LOG.log(loglevel, 'CMD "%(sanitized_cmd)s" returned: %(return_code)s ' 'in %(end_time)0.3fs', {'sanitized_cmd': sanitized_cmd, 'return_code': returncode, 'end_time': end_time}) if not ignore_exit_code and returncode not in check_exit_code: out = strutils.mask_password(out) err = strutils.mask_password(err) raise processutils.ProcessExecutionError( exit_code=returncode, stdout=out, stderr=err, cmd=sanitized_cmd) return (out, err) except processutils.ProcessExecutionError as err: # if we want to always log the errors or if this is # the final attempt that failed and we want to log that. if log_errors == processutils.LOG_ALL_ERRORS or ( log_errors == processutils.LOG_FINAL_ERROR and not attempts): format = _('%(desc)r\ncommand: %(cmd)r\n' 'exit code: %(code)r\nstdout: %(stdout)r\n' 'stderr: %(stderr)r') LOG.log(loglevel, format, {"desc": err.description, "cmd": err.cmd, "code": err.exit_code, "stdout": err.stdout, "stderr": err.stderr}) if not attempts: LOG.log(loglevel, _('%r failed. Not Retrying.'), sanitized_cmd) raise else: LOG.log(loglevel, _('%r failed. Retrying.'), sanitized_cmd) if delay_on_retry: time.sleep(random.randint(20, 200) / 100.0)
def test_with_stderr(self): stderr = 'Cottonian library' err = processutils.ProcessExecutionError(stderr=stderr) self.assertTrue(stderr in six.text_type(err))
def _fake_execute(*args, **kwargs): raise putils.ProcessExecutionError( exit_code=1, stdout='', stderr='can\'t find the target', cmd='tgt-admin --force --delete')
def fake_execute_raises(*cmd, **kwargs): raise processutils.ProcessExecutionError( exit_code=42, stdout='stdout', stderr='stderr', cmd=['this', 'is', 'a', 'command'])
def _fake_execute_wrong_message(*args, **kwargs): raise putils.ProcessExecutionError( exit_code=1, stdout='', stderr='this is not the error you are looking for', cmd='tgt-admin --force --delete')
def fake_execute(*args, **kwargs): if 'vol2' in args: raise processutils.ProcessExecutionError('Error')
def umount_raise_func(*args, **kwargs): if args[0] == 'umount': raise processutils.ProcessExecutionError('error')
class TestCinderConnector(base.TestCase): def setUp(self): base.TestCase.setUp(self) self.connector = osbrickconnector.CinderConnector() self.connector.cinderclient = fake_client.FakeCinderClient() def test_connect_volume(self): fake_cinder_volume = fake_object.FakeCinderVolume() self.connector._connect_volume = mock.MagicMock() self.connector.connect_volume(fake_cinder_volume) self.assertEqual(1, len(fake_cinder_volume.attachments)) @mock.patch.object(osbrickconnector, 'brick_get_connector', return_value=fake_client.FakeOSBrickConnector()) @mock.patch.object(utils, 'execute') def test_disconnect_volume(self, mock_brick_connector, mock_execute): attachments = [{ u'server_id': u'123', u'attachment_id': u'123', u'attached_at': u'2016-05-20T09:19:57.000000', u'host_name': utils.get_hostname(), u'device': None, u'id': u'123' }] fake_cinder_volume = \ fake_object.FakeCinderVolume(attachments=attachments) self.connector._get_connection_info = mock.MagicMock() self.connector.cinderclient.volumes.detach = mock.MagicMock() self.assertIsNone(self.connector.disconnect_volume(fake_cinder_volume)) @mock.patch.object(osbrickconnector, 'brick_get_connector_properties', mock_get_connector_properties) @mock.patch.object(utils, 'execute') @mock.patch('fuxi.tests.unit.fake_client.FakeCinderClient.Volumes' '.initialize_connection', side_effect=cinder_exception.ClientException(500)) def test_disconnect_volume_no_connection_info(self, mock_execute, mock_init_conn): attachments = [{ u'server_id': u'123', u'attachment_id': u'123', u'attached_at': u'2016-05-20T09:19:57.000000', u'host_name': utils.get_hostname(), u'device': None, u'id': u'123' }] fake_cinder_volume = \ fake_object.FakeCinderVolume(attachments=attachments) self.assertRaises(cinder_exception.ClientException, self.connector.disconnect_volume, fake_cinder_volume) @mock.patch.object(osbrickconnector, 'brick_get_connector', return_value=fake_client.FakeOSBrickConnector()) @mock.patch.object(osbrickconnector.CinderConnector, '_get_connection_info', return_value={ 'driver_volume_type': 'fake_proto', 'data': { 'path': '/dev/0' } }) @mock.patch.object(utils, 'execute') @mock.patch('fuxi.tests.unit.fake_client.FakeOSBrickConnector' '.disconnect_volume', side_effect=processutils.ProcessExecutionError()) def test_disconnect_volume_osbrick_disconnect_failed( self, mock_connector, mock_init_conn, mock_execute, mock_disconnect_vol): attachments = [{ u'server_id': u'123', u'attachment_id': u'123', u'attached_at': u'2016-05-20T09:19:57.000000', u'host_name': utils.get_hostname(), u'device': None, u'id': u'123' }] fake_cinder_volume = \ fake_object.FakeCinderVolume(attachments=attachments) self.assertRaises(processutils.ProcessExecutionError, self.connector.disconnect_volume, fake_cinder_volume) @mock.patch('fuxi.tests.unit.fake_client.FakeCinderClient.Volumes.detach', side_effect=cinder_exception.ClientException(500)) @mock.patch.object(osbrickconnector, 'brick_get_connector', return_value=fake_client.FakeOSBrickConnector()) @mock.patch.object(utils, 'execute') @mock.patch.object(osbrickconnector.CinderConnector, '_get_connection_info', return_value={ 'driver_volume_type': 'fake_proto', 'data': { 'path': '/dev/0' } }) def test_disconnect_volume_detach_failed(self, mock_detach, mock_brick_connector, mock_execute, mock_conn_info): attachments = [{ u'server_id': u'123', u'attachment_id': u'123', u'attached_at': u'2016-05-20T09:19:57.000000', u'host_name': utils.get_hostname(), u'device': None, u'id': u'123' }] fake_cinder_volume = \ fake_object.FakeCinderVolume(attachments=attachments) self.assertRaises(cinder_exception.ClientException, self.connector.disconnect_volume, fake_cinder_volume) def test_get_device_path(self): fake_cinder_volume = fake_object.FakeCinderVolume() self.assertEqual( os.path.join(constants.VOLUME_LINK_DIR, fake_cinder_volume.id), self.connector.get_device_path(fake_cinder_volume))