def __enter__(self): rc, out, err = execCmd([_SSH_AGENT.cmd], raw=True) if rc != 0: raise V2VError('Error init ssh-agent, exit code: %r' ', out: %r, err: %r' % (rc, out, err)) m = self._ssh_auth_re.match(out) # looking for: SSH_AUTH_SOCK=/tmp/ssh-VEE74ObhTWBT/agent.29917 self._auth = {m.group(1): m.group(2)} self._agent_pid = m.group(3) try: rc, out, err = execCmd([_SSH_ADD.cmd], env=self._auth) except: self._kill_agent() raise if rc != 0: # 1 = general fail # 2 = no agnet if rc != 2: self._kill_agent() raise V2VError('Error init ssh-add, exit code: %r' ', out: %r, err: %r' % (rc, out, err))
def testWriteLargeData(self): data = """The Doctor: Davros, if you had created a virus in your laboratory, something contagious and infectious that killed on contact, a virus that would destroy all other forms of life; would you allow its use? Davros: It is an interesting conjecture. The Doctor: Would you do it? Davros: The only living thing... The microscopic organism... reigning supreme... A fascinating idea. The Doctor: But would you do it? Davros: Yes; yes. To hold in my hand, a capsule that contained such power. To know that life and death on such a scale was my choice. To know that the tiny pressure on my thumb, enough to break the glass, would end everything. Yes! I would do it! That power would set me up above the gods! And through the Daleks, I shall have that power! """ # (C) BBC - Doctor Who data = data * 100 p = commands.execCmd([EXT_CAT], sync=False) self.log.info("Writing data to std out") p.stdin.write(data) p.stdin.flush() self.log.info("Written data reading") self.assertEqual(p.stdout.read(len(data)), data)
def snapshotScheduleDisable(): command = [_snapSchedulerPath.cmd, "disable_force"] rc, out, err = commands.execCmd(command) if rc not in [0, SNAP_SCHEDULER_ALREADY_DISABLED_RC]: raise ge.GlusterDisableSnapshotScheduleFailedException( rc) return True
def _kill_agent(self): rc, out, err = execCmd([_SSH_AGENT.cmd, '-k'], env={'SSH_AGENT_PID': self._agent_pid}) if rc != 0: logging.error('Error killing ssh-agent (PID=%r), exit code: %r' ', out: %r, err: %r' % (self._agent_pid, rc, out, err))
def volumeStatvfs(volumeName, host=GLUSTER_VOL_HOST, port=GLUSTER_VOL_PORT, protocol=GLUSTER_VOL_PROTOCOL): module = "vdsm.gluster.gfapi" command = [sys.executable, '-m', module, '-v', volumeName, '-p', str(port), '-H', host, '-t', protocol, '-c', 'statvfs'] # to include /usr/share/vdsm in python path env = os.environ.copy() env['PYTHONPATH'] = "%s:%s" % ( env.get("PYTHONPATH", ""), constants.P_VDSM) env['PYTHONPATH'] = ":".join(map(os.path.abspath, env['PYTHONPATH'].split(":"))) rc, out, err = commands.execCmd(command, raw=True, env=env) if rc != 0: raise ge.GlfsStatvfsException(rc, [out], [err]) res = json.loads(out) return os.statvfs_result((res['f_bsize'], res['f_frsize'], res['f_blocks'], res['f_bfree'], res['f_bavail'], res['f_files'], res['f_ffree'], res['f_favail'], res['f_flag'], res['f_namemax']))
def testExec(self): """ Tests that execCmd execs and returns the correct ret code """ ret, out, err = commands.execCmd([EXT_ECHO]) self.assertEqual(ret, 0)
def run_dd(self, args): cmd = [constants.EXT_DD] cmd.extend(args) rc, out, err = commands.execCmd(cmd, raw=True, data=self.data) assert rc == 0, "Process failed: rc={} err={}".format(rc, err) assert err != '', "No data from stderr" return out
def cleanup_transient_repository(*args): """ cleanup-transient-repository Cleanup the unused transient disks present in the repository. (NOTE: it is recommended to NOT execute this command when the vdsm daemon is running) """ if len(args) > 1: raise ExtraArgsError() transient_images = set(glob.glob(os.path.join(TRANSIENT_DISKS_REPO, "*"))) if len(transient_images) == 0: return # Nothing to do cmd_ret, cmd_out, cmd_err = execCmd([_fuser.cmd] + list(transient_images)) # According to: "fuser returns a non-zero return code if none of the # specified files is accessed or in case of a fatal error. If at least # one access has been found, fuser returns zero." we can discard the # return code. # NOTE: the list of open files is printed to cmd_err with an extra ":" # character appended (removed by [:-1]). open_transient_images = set(x[:-1] for x in cmd_err) for image_path in transient_images - open_transient_images: # NOTE: This could cause a race with the creation of a virtual # machine with a transient disk (if vdsm is running). try: os.unlink(image_path) except OSError as e: if e.errno != os.errno.ENOENT: raise
def testV2VOutput(self): cmd = [FAKE_VIRT_V2V.cmd, '-v', '-x', '-ic', self.vpx_url, '-o', 'vdsm', '-of', 'raw', '-oa', 'sparse', '--vdsm-image-uuid', self.image_id_a, '--vdsm-vol-uuid', self.volume_id_a, '--vdsm-image-uuid', self.image_id_b, '--vdsm-vol-uuid', self.volume_id_b, '--password-file', '/tmp/mypass', '--vdsm-vm-uuid', self.job_id, '--vdsm-ovf-output', '/usr/local/var/run/vdsm/v2v', '--machine-readable', '-os', '/rhev/data-center/%s/%s' % (self.pool_id, self.domain_id), self.vm_name] rc, output, error = execCmd(cmd, raw=True) self.assertEqual(rc, 0) with io.open('fake-virt-v2v.out', 'rb') as f: self.assertEqual(output, f.read()) with io.open('fake-virt-v2v.err', 'rb') as f: self.assertEqual(error, f.read())
def removeEphemeralBridge(bridgeName): rc, out, err = commands.execCmd([ EXT_IP, 'link', 'del', bridgeName, 'type', 'bridge']) if rc != 0: raise EnvironmentError( 'Failed to remove ephemeral dummy bridge. Err: %s' % err )
def testWaitCond(self): p = commands.execCmd([EXT_SLEEP, str(EXECCMD_TIMEOUT + 1)], sync=False) startTime = time.time() p.wait(cond=lambda: time.time() - startTime > TIMEOUT) duration = time.time() - startTime self.assertLess(duration, EXECCMD_TIMEOUT + TIMEOUT) self.assertGreater(duration, EXECCMD_TIMEOUT) p.kill()
def __exit__(self, *args): rc, out, err = execCmd([_SSH_ADD.cmd, '-d'], env=self._auth) if rc != 0: logging.error('Error deleting ssh-add, exit code: %r' ', out: %r, err: %r' % (rc, out, err)) self._kill_agent()
def upload(url, path, headers={}): cmd = [constants.EXT_CURL_IMG_WRAP, "--upload"] cmd.extend(_headersToOptions(headers) + [path, url]) rc, out, err = commands.execCmd(cmd) if rc != 0: raise CurlError(rc, out, err)
def testStdErr(self): """ Tests that execCmd correctly returns the standard error of the prog it executes. """ cmd = ["sh", "-c", "echo it works! >&2"] ret, stdout, stderr = commands.execCmd(cmd) self.assertEqual(stderr[0].decode("ascii"), "it works!")
def attach(self): if self._path is not None: raise AssertionError("Device is attached: %s" % self) cmd = ["losetup", "--find", "--show", self._backing_file] rc, out, err = commands.execCmd(cmd, raw=True) if rc != 0: raise cmdutils.Error(cmd, rc, out, err) self._path = out.strip().decode("ascii")
def _validate_module(name): if not os.path.exists('/sys/module/' + name): cmd_modprobe = [modprobe.cmd, name] rc, out, err = commands.execCmd(cmd_modprobe, sudo=True) if rc != 0: raise SkipTest("This test requires %s module " "(failed to load module: rc=%s, out=%s, err=%s)" % (name, rc, out, err))
def test_limit_rss(): # This should fail to allocate about 100 MiB. script = "s = 100 * 1024**2 * 'x'" cmd = ["python", "-c", script] cmd = cmdutils.prlimit(cmd, address_space=100 * 1024**2) rc, out, err = commands.execCmd(cmd, raw=True) assert rc == 1 assert b"MemoryError" in err
def physicalVolumeList(): rc, out, err = commands.execCmd([_pvsCommandPath.cmd, "--reportformat", "json", "--units", "b", "--nosuffix", "-o", "pv_name,vg_name"]) if rc: raise ge.GlusterCmdExecFailedException(rc, out, err) return json.loads("".join(out))["report"][0]["pv"]
def testSudo(self): """ Tests that when running with sudo the user really is root (or other desired user). """ cmd = [EXT_WHOAMI] checkSudo(cmd) ret, stdout, stderr = commands.execCmd(cmd, sudo=True) self.assertEqual(stdout[0].decode("ascii"), SUDO_USER)
def copyFromImage(dstImgPath, methodArgs): fileObj = methodArgs['fileObj'] bytes_left = total_size = methodArgs['length'] cmd = [constants.EXT_DD, "if=%s" % dstImgPath, "bs=%s" % constants.MEGAB, "count=%s" % (total_size // constants.MEGAB + 1)] p = commands.execCmd(cmd, sync=False) p.blocking = True with commands.terminating(p): _copyData(p.stdout, fileObj, bytes_left)
def testStdOut(self): """ Tests that execCmd correctly returns the standard output of the prog it executes. """ line = "All I wanted was to have some pizza, hang out with dad, " + \ "and not let your weirdness mess up my day" # (C) Nickolodeon - Invader Zim ret, stdout, stderr = commands.execCmd((EXT_ECHO, line)) self.assertEqual(stdout[0].decode("ascii"), line)
def testNice(self): cmd = ["sleep", "10"] proc = commands.execCmd(cmd, nice=10, sync=False) try: time.sleep(0.2) nice = pidstat(proc.pid).nice self.assertEqual(nice, 10) finally: proc.kill() proc.wait()
def testValidInputFalse(self): """ Test that is work when given valid but incorrect input. """ count = 802 with tempfile.NamedTemporaryFile() as f: cmd = [EXT_DD, "bs=1", "if=/dev/urandom", 'of=%s' % f.name, 'count=%d' % count] rc, out, err = commands.execCmd(cmd) self.assertFalse(misc.validateDDBytes(err, count + 1))
def _run(self, args, data=None): rc, out, err = commands.execCmd( args, data=data, raw=True, # We do tiny io, no need to run this on another CPU. resetCpuAffinity=False) if rc != 0: # Do not spam the log with received binary data raise cmdutils.Error(args, rc, "[suppressed]", err) return out
def _createVG(vgName, deviceList, stripeSize): # bz#1198568: Blivet always creates vg with 1MB stripe size # Workaround: Till blivet fixes the issue, use vgcreate command devices = ','.join([device.path for device in deviceList]) rc, out, err = commands.execCmd([_vgCreateCommandPath.cmd, '-s', '%sk' % stripeSize, vgName, devices]) if rc: raise ge.GlusterHostStorageDeviceVGCreateFailedException( vgName, devices, stripeSize, rc, out, err) blivetEnv.reset() return blivetEnv.devicetree.getDeviceByName(vgName)
def test(self): data = """Striker: You are a Time Lord, a lord of time. Are there lords in such a small domain? The Doctor: And where do you function? Striker: Eternity. The endless wastes of eternity. """ # (C) BBC - Doctor Who p = commands.execCmd([EXT_CAT], sync=False) self.log.info("Writing data to std out") p.stdin.write(data) p.stdin.flush() self.log.info("Written data reading") self.assertEqual(p.stdout.read(len(data)), data)
def head(url, headers={}): # Cannot be moved out because _curl.cmd is lazy-evaluated cmd = [_curl.cmd] + CURL_OPTIONS + ["--head", url] cmd.extend(_headersToOptions(headers)) rc, out, err = commands.execCmd(cmd) if rc != 0: raise CurlError(rc, out, err) # Parse and return headers return dict([x.split(": ", 1) for x in out[1:-1]])
def test_limit_cpu(): # This takes 6 seconds on i7-5600U CPU @ 2.60GHz. We assume that it will # never take less then 1 second. Increase n if this starts to fail # randomly. script = """ n = 2**27 while n: n -= 1 """ cmd = ["python", "-c", script] cmd = cmdutils.prlimit(cmd, cpu_time=1) rc, out, err = commands.execCmd(cmd, raw=True) assert rc == -9
def _createPV(deviceList, alignment): for dev in deviceList: # bz#1178705: Blivet always creates pv with 1MB dataalignment # Workaround: Till blivet fixes the issue, we use lvm pvcreate rc, out, err = commands.execCmd([_pvCreateCommandPath.cmd, '--dataalignment', '%sk' % alignment, dev.path]) if rc: raise ge.GlusterHostStorageDevicePVCreateFailedException( dev.path, alignment, rc, out, err) _reset_blivet(blivetEnv) return _getDeviceList([dev.name for dev in deviceList])
def verify_pattern(path, format, offset=512, len=1024, pattern=5): read_cmd = 'read -P %d -s 0 -l %d %d %d' % (pattern, len, offset, len) cmd = ['qemu-io', '-f', format, '-c', read_cmd, path] rc, out, err = commands.execCmd(cmd, raw=True) # Older qemu-io (2.10) used to exit with zero exit code and "Pattern # verification" error in stdout. In 2.12, non-zero code is returned when # pattern verification fails. if b"Pattern verification failed" in out: raise VerificationError( "Verification of volume %s failed. Pattern 0x%x not found at " "offset %s" % (path, pattern, offset)) if rc != 0 or err != b"": raise cmdutils.Error(cmd, rc, out, err)
def processesStop(): command = ["/bin/sh", _stopAllProcessesPath.cmd] rc, out, err = commands.execCmd(command) if rc: raise ge.GlusterProcessesStopFailedException(rc)
def func(): proc = commands.execCmd(["sleep", str(timeout)], sync=False) return utils.AsyncProcessOperation(proc)
def test_normal(self, cmd): rc, out, _ = commands.execCmd(cmd(('echo', 'hello world'))) assert rc == 0 assert out[0].decode() == 'hello world'
def test_io_class(self, cmd): rc, out, _ = commands.execCmd(cmd(('ionice', )), ioclass=2, ioclassdata=3) assert rc == 0 assert out[0].decode().strip() == 'best-effort: prio 3'
def _runHooksDir(data, dir, vmconf={}, raiseError=True, params={}, hookType=_DOMXML_HOOK): scripts = _scriptsPerDir(dir) scripts.sort() if not scripts: return data data_fd, data_filename = tempfile.mkstemp() try: if hookType == _DOMXML_HOOK: os.write(data_fd, data or '') elif hookType == _JSON_HOOK: os.write(data_fd, json.dumps(data)) os.close(data_fd) scriptenv = os.environ.copy() # Update the environment using params and custom configuration env_update = [params.iteritems(), vmconf.get('custom', {}).iteritems()] # Encode custom properties to UTF-8 and save them to scriptenv # Pass str objects (byte-strings) without any conversion for k, v in itertools.chain(*env_update): try: if isinstance(v, unicode): scriptenv[k] = v.encode('utf-8') else: scriptenv[k] = v except UnicodeDecodeError: pass if vmconf.get('vmId'): scriptenv['vmId'] = vmconf.get('vmId') ppath = scriptenv.get('PYTHONPATH', '') hook = pkgutil.get_loader('vdsm.hook').filename scriptenv['PYTHONPATH'] = ':'.join(ppath.split(':') + [hook]) if hookType == _DOMXML_HOOK: scriptenv['_hook_domxml'] = data_filename elif hookType == _JSON_HOOK: scriptenv['_hook_json'] = data_filename errorSeen = False for s in scripts: rc, out, err = commands.execCmd([s], raw=True, env=scriptenv) logging.info('%s: rc=%s err=%s', s, rc, err) if rc != 0: errorSeen = True if rc == 2: break elif rc > 2: logging.warn('hook returned unexpected return code %s', rc) if errorSeen and raiseError: raise exception.HookError(err) with open(data_filename) as f: final_data = f.read() finally: os.unlink(data_filename) if hookType == _DOMXML_HOOK: return final_data elif hookType == _JSON_HOOK: return json.loads(final_data)
def createBrick(brickName, mountPoint, devNameList, fsType=DEFAULT_FS_TYPE, raidParams={}): def _getDeviceList(devNameList): return [ blivetEnv.devicetree.getDeviceByName(devName.split("/")[-1]) for devName in devNameList ] def _createPV(deviceList, alignment): for dev in deviceList: # bz#1178705: Blivet always creates pv with 1MB dataalignment # Workaround: Till blivet fixes the issue, we use lvm pvcreate rc, out, err = commands.execCmd([ _pvCreateCommandPath.cmd, '--dataalignment', '%sk' % alignment, dev.path ]) if rc: raise ge.GlusterHostStorageDevicePVCreateFailedException( dev.path, alignment, rc, out, err) _reset_blivet(blivetEnv) return _getDeviceList([dev.name for dev in deviceList]) def _createVG(vgName, deviceList, stripeSize): # bz#1198568: Blivet always creates vg with 1MB stripe size # Workaround: Till blivet fixes the issue, use vgcreate command devices = ','.join([device.path for device in deviceList]) rc, out, err = commands.execCmd([ _vgCreateCommandPath.cmd, '-s', '%sk' % stripeSize, vgName, devices ]) if rc: raise ge.GlusterHostStorageDeviceVGCreateFailedException( vgName, devices, stripeSize, rc, out, err) blivetEnv.reset() return blivetEnv.devicetree.getDeviceByName(vgName) def _createThinPool(poolName, vg, alignment, poolMetaDataSize, poolDataSize): metaName = "meta-%s" % poolName vgPoolName = "%s/%s" % (vg.name, poolName) metaLv = LVMLogicalVolumeDevice(metaName, parents=[vg], size=blivet.size.Size( '%d KiB' % poolMetaDataSize)) poolLv = LVMLogicalVolumeDevice(poolName, parents=[vg], size=blivet.size.Size('%d KiB' % poolDataSize)) blivetEnv.createDevice(metaLv) blivetEnv.createDevice(poolLv) blivetEnv.doIt() # bz#1100514: LVM2 currently only supports physical extent sizes # that are a power of 2. Till that support is available we need # to use lvconvert to achive that. # bz#1179826: blivet doesn't support lvconvert functionality. # Workaround: Till the bz gets fixed, lvconvert command is used rc, out, err = commands.execCmd([ _lvconvertCommandPath.cmd, '--chunksize', '%sK' % alignment, '--thinpool', vgPoolName, '--poolmetadata', "%s/%s" % (vg.name, metaName), '--poolmetadataspar', 'n', '-y' ]) if rc: raise ge.GlusterHostStorageDeviceLVConvertFailedException( vg.path, alignment, rc, out, err) rc, out, err = commands.execCmd( [_lvchangeCommandPath.cmd, '--zero', 'n', vgPoolName]) if rc: raise ge.GlusterHostStorageDeviceLVChangeFailedException( vgPoolName, rc, out, err) _reset_blivet(blivetEnv) return blivetEnv.devicetree.getDeviceByName(poolLv.name) if os.path.ismount(mountPoint): raise ge.GlusterHostStorageMountPointInUseException(mountPoint) vgName = "vg-" + brickName poolName = "pool-" + brickName poolDataSize = 0 count = 0 raidType = raidParams.get('type') metaDataSizeKib = DEFAULT_METADATA_SIZE_KB if raidType == '6': count = raidParams['pdCount'] - 2 alignment = raidParams['stripeSize'] * count chunkSize = alignment elif raidType == '10': count = raidParams['pdCount'] // 2 alignment = raidParams['stripeSize'] * count chunkSize = DEFAULT_CHUNK_SIZE_KB else: # Device type is JBOD alignment = DEFAULT_CHUNK_SIZE_KB chunkSize = DEFAULT_CHUNK_SIZE_KB blivetEnv = blivet.Blivet() _reset_blivet(blivetEnv) # get the devices list from the device name deviceList = _getDeviceList(devNameList) # raise an error when any device not actually found in the given list notFoundList = set(devNameList).difference( set([dev.name for dev in deviceList])) if notFoundList: raise ge.GlusterHostStorageDeviceNotFoundException(notFoundList) # raise an error when any device is used already in the given list inUseList = set(devNameList).difference( set([not _canCreateBrick(dev) or dev.name for dev in deviceList])) if inUseList: raise ge.GlusterHostStorageDeviceInUseException(inUseList) pvDeviceList = _createPV(deviceList, alignment) vg = _createVG(vgName, pvDeviceList, alignment) # The following calculation is based on the redhat storage performance doc # http://docbuilder.usersys.redhat.com/22522 # /#chap-Configuring_Red_Hat_Storage_for_Enhancing_Performance # create ~16GB metadata LV (metaDataSizeKib) that has a size which is # a multiple of RAID stripe width if it is > minimum vg size # otherwise allocate a minimum of 0.5% of the data device size # and create data LV (poolDataSize) that has a size which is # a multiple of stripe width. vgSizeKib = int(_getDeviceSize(vg, 'KiB')) if _getDeviceSize(vg) < MIN_VG_SIZE: metaDataSizeKib = vgSizeKib * MIN_METADATA_PERCENT poolDataSize = vgSizeKib - metaDataSizeKib metaDataSizeKib = (metaDataSizeKib - (metaDataSizeKib % alignment)) poolDataSize = (poolDataSize - (poolDataSize % alignment)) # Creating a thin pool from the data LV and the metadata LV # lvconvert --chunksize alignment --thinpool VOLGROUP/thin_pool # --poolmetadata VOLGROUP/metadata_device_name pool = _createThinPool(poolName, vg, chunkSize, metaDataSizeKib, poolDataSize) # Size of the thin LV should be same as the size of Thinpool to avoid # over allocation. Refer bz#1412455 for more info. if six.PY2: thinlv = LVMThinLogicalVolumeDevice(brickName, parents=[pool], size=blivet.size.Size( '%d KiB' % poolDataSize), grow=True) else: thinlv = LVMLogicalVolumeDevice(brickName, parents=[pool], size=blivet.size.Size('%d KiB' % poolDataSize), grow=True, seg_type="thin") blivetEnv.createDevice(thinlv) blivetEnv.doIt() if fsType != DEFAULT_FS_TYPE: log.error("fstype %s is currently unsupported" % fsType) raise ge.GlusterHostStorageDeviceMkfsFailedException(fsType) if six.PY2: get_format = blivet.formats.getFormat # pylint: disable=no-member else: get_format = blivet.formats.get_format # pylint: disable=no-member format = get_format(DEFAULT_FS_TYPE, device=thinlv.path, mountopts=DEFAULT_MOUNT_OPTIONS) format._defaultFormatOptions = ["-f", "-i", "size=512", "-n", "size=8192"] if raidParams.get('type') == '6': format._defaultFormatOptions += [ "-d", "sw=%s,su=%sk" % (count, raidParams.get('stripeSize')) ] blivetEnv.formatDevice(thinlv, format) blivetEnv.doIt() try: os.makedirs(mountPoint) except OSError as e: if errno.EEXIST != e.errno: errMsg = "[Errno %s] %s: '%s'" % (e.errno, e.strerror, e.filename) raise ge.GlusterHostStorageDeviceMakeDirsFailedException( err=[errMsg]) thinlv.format.setup(mountpoint=mountPoint) blivetEnv.doIt() # bz#1230495: lvm devices are invisible and appears only after vgscan # Workaround: Till the bz gets fixed, We use vgscan to refresh LVM devices rc, out, err = commands.execCmd([_vgscanCommandPath.cmd]) if rc: raise ge.GlusterHostStorageDeviceVGScanFailedException(rc, out, err) fstab.FsTab().add(thinlv.path, mountPoint, DEFAULT_FS_TYPE, mntOpts=[DEFAULT_MOUNT_OPTIONS]) # If selinux is enabled, set correct selinux labels on the brick. if selinux.is_selinux_enabled(): rc, out, err = commands.execCmd([ _semanageCommandPath.cmd, 'fcontext', '-a', '-t', 'glusterd_brick_t', mountPoint ]) if rc: raise ge.GlusterHostFailedToSetSelinuxContext( mountPoint, rc, out, err) try: # mountPoint can be of 'unicode' type when its passed through # jsonrpc. restorecon calls into a C API that needs a char *. # Thus, it is necessary to encode unicode to a utf-8 string. selinux.restorecon(mountPoint.encode('utf-8'), recursive=True) except OSError as e: errMsg = "[Errno %s] %s: '%s'" % (e.errno, e.strerror, e.filename) raise ge.GlusterHostFailedToRunRestorecon(mountPoint, err=errMsg) return _getDeviceDict(thinlv)
def _run_cmd(cmd, cwd=None): rc, out, err = commands.execCmd(cmd, raw=True, cwd=cwd) if rc != 0: raise cmdutils.Error(cmd, rc, out, err) return out
def write_pattern(path, format, offset=512, len=1024, pattern=5): write_cmd = 'write -P %d %d %d' % (pattern, offset, len) cmd = ['qemu-io', '-f', format, '-c', write_cmd, path] rc, out, err = commands.execCmd(cmd, raw=True) if rc != 0: raise cmdutils.Error(cmd, rc, out, err)
def removeEphemeralBridge(bridgeName): rc, out, err = commands.execCmd( [EXT_IP, 'link', 'del', bridgeName, 'type', 'bridge']) if rc != 0: raise EnvironmentError( 'Failed to remove ephemeral dummy bridge. Err: %s' % err)
def systemctl_stop(self, name): return commands.execCmd([ self._exes.systemctl.cmd, 'stop', name, ], raw=True)
def testExistingNotInPaths(self): """Tests if CommandPath can find the executable like the 'which' unix tool""" cp = cmdutils.CommandPath('sh', 'utter nonsense') _, stdout, _ = commands.execCmd(['which', 'sh']) self.assertIn(cp.cmd.encode(), stdout)
def test_set_sid(self, cmd): cmd_args = (sys.executable, '-c', 'from __future__ import print_function;import os;' 'print(os.getsid(os.getpid()))') rc, out, _ = commands.execCmd(cmd(cmd_args), setsid=True) assert int(out[0]) != os.getsid(os.getpid())
def test_sudo(self, cmd): rc, out, _ = commands.execCmd(cmd( ('grep', 'Uid', '/proc/self/status')), sudo=True) assert rc == 0 assert int(out[0].split()[2]) == 0
def test_panic(self): cmd = [sys.executable, "panic_helper.py"] rc, out, err = commands.execCmd(cmd) self.assertEqual(rc, -9)
def _sleep(self, t): proc = commands.execCmd(["sleep", str(t)], sync=False) return utils.AsyncProcessOperation(proc)
def configure_passwd(): script = (str(_SASLPASSWD2), '-p', '-a', 'libvirt', SASL_USERNAME) rc, _, err = commands.execCmd(script, data=libvirt_password()) if rc != 0: raise RuntimeError("Set password failed: %s" % (err, ))
def snapshotScheduleDisable(): command = [_snapSchedulerPath.cmd, "disable_force"] rc, out, err = commands.execCmd(command) if rc not in [0, SNAP_SCHEDULER_ALREADY_DISABLED_RC]: raise ge.GlusterDisableSnapshotScheduleFailedException(rc) return True
def systemctl_stop(name): return commands.execCmd([_SYSTEMCTL.cmd, 'stop', name], )
def testCommunicate(self): data = ("The trouble with the world is that the stupid are cocksure " "and the intelligent are full of doubt") p = commands.execCmd([EXT_DD], data=data, sync=False) p.stdin.close() self.assertEqual(p.stdout.read(len(data)).strip(), data)
def run_client(self, host, port, protocol): cmd = ['openssl', 's_client', '-connect', '%s:%s' % (host, port), '-CAfile', CRT_FILE, '-cert', CRT_FILE, '-key', KEY_FILE, protocol] rc, _, _ = execCmd(cmd) return rc
def testNoCommandWithAffinity(self): rc, _, _ = commands.execCmd(["I.DONT.EXIST"]) self.assertNotEqual(rc, 0)
def _run_command(args): cmd = [_UDEVADM.cmd] cmd.extend(args) rc, out, err = commands.execCmd(cmd, raw=True) if rc != 0: raise cmdutils.Error(cmd, rc, out, err)
def test_nice(self, cmd): rc, out, _ = commands.execCmd(cmd(('cat', '/proc/self/stat')), nice=7) assert rc == 0 assert int(out[0].split()[18]) == 7
def setUp(self): self.proc = commands.execCmd([EXT_SLEEP, "2"], sync=False) self.proc_poll = self.proc.poll self.proc_kill = self.proc.kill self.proc_wait = self.proc.wait