def disk_format_gpt(self, host_uuid, idisk_dict, is_cinder_device): disk_node = idisk_dict.get('device_path') utils.disk_wipe(disk_node) utils.execute('parted', disk_node, 'mklabel', 'gpt') if is_cinder_device: LOG.debug("Removing .node_cinder_lvm_config_complete_file") try: os.remove(constants.NODE_CINDER_LVM_CONFIG_COMPLETE_FILE) except OSError: LOG.error(".node_cinder_lvm_config_complete_file not present.") pass # On SX ensure wipe succeeds before DB is updated. # Flag file is used to mark wiping in progress. try: os.remove(constants.DISK_WIPE_IN_PROGRESS_FLAG) except OSError: # it's ok if file is not present. pass # We need to send the updated info about the host disks back to # the conductor. idisk_update = self.idisk_get() ctxt = context.get_admin_context() rpcapi = conductor_rpcapi.ConductorAPI( topic=conductor_rpcapi.MANAGER_TOPIC) rpcapi.idisk_update_by_ihost(ctxt, host_uuid, idisk_update)
def test_check_exit_code_boolean(self): utils.execute('/usr/bin/env', 'false', check_exit_code=False) self.assertRaises(exception.ProcessExecutionError, utils.execute, '/usr/bin/env', 'false', check_exit_code=True)
def mkswap(dev, label='swap1'): """Execute mkswap on a device.""" utils.execute('mkswap', '-L', label, dev, run_as_root=True, check_exit_code=[0])
def dd(src, dst): """Execute dd from src to dst.""" utils.execute('dd', 'if=%s' % src, 'of=%s' % dst, 'bs=1M', 'oflag=direct', run_as_root=True, check_exit_code=[0])
def discovery(portal_address, portal_port): """Do iSCSI discovery on portal.""" utils.execute('iscsiadm', '-m', 'discovery', '-t', 'st', '-p', '%s:%s' % (portal_address, portal_port), run_as_root=True, check_exit_code=[0])
def logout_iscsi(portal_address, portal_port, target_iqn): """Logout from an iSCSI target.""" utils.execute('iscsiadm', '-m', 'node', '-p', '%s:%s' % (portal_address, portal_port), '-T', target_iqn, '--logout', run_as_root=True, check_exit_code=[0])
def login_iscsi(portal_address, portal_port, target_iqn): """Login to an iSCSI target.""" utils.execute('iscsiadm', '-m', 'node', '-p', '%s:%s' % (portal_address, portal_port), '-T', target_iqn, '--login', run_as_root=True, check_exit_code=[0]) # Ensure the login complete time.sleep(3)
def qemu_img_info(path): """Return an object containing the parsed output from qemu-img info.""" if not os.path.exists(path): return QemuImgInfo() out, err = utils.execute('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path) return QemuImgInfo(out)
def make_partitions(dev, root_mb, swap_mb): """Create partitions for root and swap on a disk device.""" # Lead in with 1MB to allow room for the partition table itself, otherwise # the way sfdisk adjusts doesn't shift the partition up to compensate, and # we lose the space. # http://bazaar.launchpad.net/~ubuntu-branches/ubuntu/raring/util-linux/ # raring/view/head:/fdisk/sfdisk.c#L1940 stdin_command = ('1,%d,83;\n,%d,82;\n0,0;\n0,0;\n' % (root_mb, swap_mb)) utils.execute('sfdisk', '-uM', dev, process_input=stdin_command, run_as_root=True, attempts=3, check_exit_code=[0]) # avoid "device is busy" time.sleep(3)
def block_uuid(dev): """Get UUID of a block device.""" out, _ = utils.execute('blkid', '-s', 'UUID', '-o', 'value', dev, run_as_root=True, check_exit_code=[0]) return out.strip()
def test_no_retry_on_success(self): fd, tmpfilename = tempfile.mkstemp() _, tmpfilename2 = tempfile.mkstemp() try: fp = os.fdopen(fd, 'w+') fp.write('''#!/bin/sh # If we've already run, bail out. grep -q foo "$1" && exit 1 # Mark that we've run before. echo foo > "$1" # Check that stdin gets passed correctly. grep foo ''') fp.close() os.chmod(tmpfilename, 0o755) utils.execute(tmpfilename, tmpfilename2, process_input='foo'.encode('utf-8'), attempts=2) finally: os.unlink(tmpfilename) os.unlink(tmpfilename2)
def device_wipe(device): """Wipe the begining and the end of a device, partition or disk""" # Wipe well known GPT table entries, if any. trycmd('wipefs', '-f', '-a', device) execute('udevadm', 'settle') # Wipe any other tables at the beginning of the device. out, err = trycmd('dd', 'if=/dev/zero', 'of=%s' % device, 'bs=512', 'count=2048', 'conv=fdatasync') LOG.info("Wiped beginning of device: %s - %s" % (out, err)) # Get size of disk. size, __ = trycmd('blockdev', '--getsz', device) size = size.rstrip() if size and size.isdigit(): # Wipe at the end of device. out, err = trycmd('dd', 'if=/dev/zero', 'of=%s' % device, 'bs=512', 'count=2048', 'seek=%s' % (int(size) - 2048), 'conv=fdatasync') LOG.info("Wiped end of device: %s - %s" % (out, err))
def test_mkfs(self): self.mox.StubOutWithMock(utils, 'execute') utils.execute('mkfs', '-t', 'ext4', '-F', '/my/block/dev') utils.execute('mkfs', '-t', 'msdos', '/my/msdos/block/dev') utils.execute('mkswap', '/my/swap/block/dev') self.mox.ReplayAll() utils.mkfs('ext4', '/my/block/dev') utils.mkfs('msdos', '/my/msdos/block/dev') utils.mkfs('swap', '/my/swap/block/dev')
def get_k8s_secret(secret_name, namespace=None): try: cmd = [ 'kubectl', '--kubeconfig=/etc/kubernetes/admin.conf', 'get', 'secrets', secret_name ] if namespace: cmd.append('--namespace=%s' % namespace) stdout, _ = cutils.execute(*cmd, run_as_root=False) except exception.ProcessExecutionError as e: if "not found" in e.stderr.lower(): return None raise exception.SysinvException( "Error getting secret: %s in namespace: %s, " "Details: %s" % (secret_name, namespace, str(e))) return stdout
def _crushmap_rule_add(self, tier, replicate_by): """Add a tier crushmap rule.""" crushmap_flag_file = os.path.join(constants.SYSINV_CONFIG_PATH, constants.CEPH_CRUSH_MAP_APPLIED) if not os.path.isfile(crushmap_flag_file): reason = "Cannot add any additional rules." raise exception.CephCrushMapNotApplied(reason=reason) default_root_name = self._format_root_name(self._default_tier) root_name = self._format_root_name(tier) if root_name == default_root_name: raise exception.CephCrushRuleAlreadyExists(tier=tier, rule='default') # get the current rule count rule_is_present, rule_name, rule_count = self._crush_rule_status( root_name) if rule_is_present: raise exception.CephCrushRuleAlreadyExists(tier=tier, rule=rule_name) # NOTE: The Ceph API only supports simple single step rule creation. # Because of this we need to update the crushmap the hard way. tmp_crushmap_bin_file = os.path.join(constants.SYSINV_CONFIG_PATH, "crushmap_rule_update.bin") tmp_crushmap_txt_file = os.path.join(constants.SYSINV_CONFIG_PATH, "crushmap_rule_update.txt") # Extract the crushmap cmd = ["ceph", "osd", "getcrushmap", "-o", tmp_crushmap_bin_file] stdout, __ = cutils.execute(*cmd, run_as_root=False) if os.path.exists(tmp_crushmap_bin_file): # Decompile the crushmap cmd = [ "crushtool", "-d", tmp_crushmap_bin_file, "-o", tmp_crushmap_txt_file ] stdout, __ = cutils.execute(*cmd, run_as_root=False) if os.path.exists(tmp_crushmap_txt_file): # Add the custom rule with open(tmp_crushmap_txt_file, 'r') as fp: contents = fp.readlines() self._insert_crush_rule(contents, root_name, rule_name, rule_count, replicate_by) with open(tmp_crushmap_txt_file, 'w') as fp: contents = "".join(contents) fp.write(contents) # Compile the crush map cmd = [ "crushtool", "-c", tmp_crushmap_txt_file, "-o", tmp_crushmap_bin_file ] stdout, __ = cutils.execute(*cmd, run_as_root=False) # Load the new crushmap LOG.info("Loading updated crushmap with elements for " "crushmap root: %s" % root_name) cmd = [ "ceph", "osd", "setcrushmap", "-i", tmp_crushmap_bin_file ] stdout, __ = cutils.execute(*cmd, run_as_root=False) # cleanup if os.path.exists(tmp_crushmap_txt_file): os.remove(tmp_crushmap_txt_file) if os.path.exists(tmp_crushmap_bin_file): os.remove(tmp_crushmap_bin_file)
def ipv_delete(self, ipv_dict): """Delete LVM physical volume Also delete Logical volume Group if PV is last in group :param ipv_dict: values for physical volume object :returns: pass or fail """ LOG.info("Deleting PV: %s" % (ipv_dict)) if ipv_dict['lvm_vg_name'] == constants.LVG_CINDER_VOLUMES: # disable LIO targets before cleaning up volumes # as they may keep the volumes busy LOG.info("Clearing LIO configuration") cutils.execute('targetctl', 'clear', run_as_root=True) # Note: targets are restored from config file by Cinder # on restart. Restarts should done after 'cinder-volumes' # re-configuration # Check if LVG exists stdout, __ = cutils.execute('vgs', '--reportformat', 'json', run_as_root=True) data = json.loads(stdout)['report'] LOG.debug("ipv_delete vgs data: %s" % data) vgs = [] for vgs_entry in data: if type(vgs_entry) == dict and 'vg' in vgs_entry.keys(): vgs = vgs_entry['vg'] break for vg in vgs: if vg['vg_name'] == ipv_dict['lvm_vg_name']: break else: LOG.info("VG %s not found, " "skipping removal" % ipv_dict['lvm_vg_name']) vg = None # Remove all volumes from volume group before deleting any PV from it # (without proper pvmove the data will get corrupted anyway, so better # we remove the data while the group is still clean) if vg: LOG.info("Removing all volumes " "from LVG %s" % ipv_dict['lvm_vg_name']) # VG exists, should not give any errors # (make sure no FD is open when running this) # TODO(oponcea): Run pvmove if multiple PVs are # associated with the same LVG to avoid data loss cutils.execute('lvremove', ipv_dict['lvm_vg_name'], '-f', run_as_root=True) # Check if PV exists stdout, __ = cutils.execute('pvs', '--reportformat', 'json', run_as_root=True) data = json.loads(stdout)['report'] LOG.debug("ipv_delete pvs data: %s" % data) pvs = [] for pvs_entry in data: if type(pvs_entry) == dict and 'pv' in pvs_entry.keys(): for pv in pvs: pvs = vgs_entry['pv'] break for pv in pvs: if (pv['vg_name'] == ipv_dict['lvm_vg_name'] and pv['pv_name'] == ipv_dict['lvm_pv_name']): break else: pv = None # Removing PV. VG goes down with it if last PV is removed from it if pv: parm = { 'dev': ipv_dict['lvm_pv_name'], 'vg': ipv_dict['lvm_vg_name'] } if (pv['vg_name'] == ipv_dict['lvm_vg_name'] and pv['pv_name'] == ipv_dict['lvm_pv_name']): LOG.info("Removing PV %(dev)s " "from LVG %(vg)s" % parm) cutils.execute('pvremove', ipv_dict['lvm_pv_name'], '--force', '--force', '-y', run_as_root=True) else: LOG.warn("PV %(dev)s from LVG %(vg)s not found, " "nothing to remove!" % parm) try: cutils.disk_wipe(ipv_dict['idisk_device_node']) # Clean up the directory used by the volume group otherwise VG # creation will fail without a reboot vgs, __ = cutils.execute('vgs', '--noheadings', '-o', 'vg_name', run_as_root=True) vgs = [v.strip() for v in vgs.split("\n")] if ipv_dict['lvm_vg_name'] not in vgs: cutils.execute('rm', '-rf', '/dev/%s' % ipv_dict['lvm_vg_name']) except exception.ProcessExecutionError as e: LOG.warning("Continuing after wipe command returned exit code: " "%(exit_code)s stdout: %(stdout)s err: %(stderr)s" % { 'exit_code': e.exit_code, 'stdout': e.stdout, 'stderr': e.stderr }) LOG.info("Deleting PV: %s completed" % (ipv_dict))
def convert_image(source, dest, out_format, run_as_root=False): """Convert image to other format.""" cmd = ('qemu-img', 'convert', '-O', out_format, source, dest) utils.execute(*cmd, run_as_root=run_as_root)