def test_no_exception_raised_for_execute(self, mock_exec): # Make sure we can call ironic_lib.utils.execute() even though we # didn't mock it. We do mock processutils.execute() so we don't # actually execute anything. utils.execute("ls") utils.execute("echo") self.assertEqual(2, mock_exec.call_count)
def _fix_gpt_structs(device, node_uuid): """Checks backup GPT data structures and moves them to end of the device :param device: The device path. :param node_uuid: UUID of the Node. Used for logging. :raises: InstanceDeployFailure, if any disk partitioning related commands fail. """ try: output, err = utils.execute('partprobe', device, use_standard_locale=True, run_as_root=True) search_str = "fix the GPT to use all of the space" if search_str in err: utils.execute('sgdisk', '-e', device, run_as_root=True) except (processutils.UnknownArgumentError, processutils.ProcessExecutionError, OSError) as e: msg = (_('Failed to fix GPT data structures on disk %(disk)s ' 'for node %(node)s. Error: %(error)s') % { 'disk': device, 'node': node_uuid, 'error': e }) LOG.error(msg) raise exception.InstanceDeployFailure(msg)
def test_no_retry_on_success(self): fd, tmpfilename = tempfile.mkstemp() _, tmpfilename2 = tempfile.mkstemp() try: fp = os.fdopen(fd, "w+") fp.write( """#!/bin/sh # If we've already run, bail out. grep -q foo "$1" && exit 1 # Mark that we've run before. echo foo > "$1" # Check that stdin gets passed correctly. grep foo """ ) fp.close() os.chmod(tmpfilename, 0o755) try: utils.execute(tmpfilename, tmpfilename2, process_input=b"foo", attempts=2) except OSError as e: if e.errno == errno.EACCES: self.skipTest("Permissions error detected. " "Are you running with a noexec /tmp?") else: raise finally: os.unlink(tmpfilename) os.unlink(tmpfilename2)
def fix_gpt_partition(node_uuid, device, is_gpt_partitioned=None): """Fix GPT partition Exposing. :param node_uuid: UUID of the Node. :param device: The device path. :raises: InstanceDeployFailure if exception is caught. """ try: LOG.error("fix_gpt_partition STARTING with %s" % is_gpt_partitioned) if not is_gpt_partitioned: is_gpt_partitioned = _is_disk_gpt_partitioned(device, node_uuid) LOG.error("fix_gpt_partition found %s" % is_gpt_partitioned) if is_gpt_partitioned: LOG.error("fix_gpt_partition RUNNING") _fix_gpt_structs(device, node_uuid) create_option = '0:-%dMB:0' % MAX_CONFIG_DRIVE_SIZE_MB utils.execute('sgdisk', '-n', create_option, device, run_as_root=True) except Exception as e: msg = (_('Failed to fix GPT partition on disk %(disk)s ' 'for node %(node)s. Error: %(error)s') % { 'disk': device, 'node': node_uuid, 'error': e }) LOG.error(msg) raise exception.InstanceDeployFailure(msg) finally: LOG.error("fix_gpt_partition DONE")
def _get_labelled_partition(device, label, node_uuid): """Check and return if partition with given label exists :param device: The device path. :param label: Partition label :param node_uuid: UUID of the Node. Used for logging. :raises: InstanceDeployFailure, if any disk partitioning related commands fail. :returns: block device file for partition if it exists; otherwise it returns None. """ try: utils.execute('partprobe', device, run_as_root=True) label_arg = 'LABEL=%s' % label output, err = utils.execute('blkid', '-o', 'device', device, '-t', label_arg, check_exit_code=[0, 2], use_standard_locale=True, run_as_root=True) except (processutils.UnknownArgumentError, processutils.ProcessExecutionError, OSError) as e: msg = (_('Failed to retrieve partition labels on disk %(disk)s ' 'for node %(node)s. Error: %(error)s') % {'disk': device, 'node': node_uuid, 'error': e}) LOG.error(msg) raise exception.InstanceDeployFailure(msg) if output: if len(output.split()) > 1: raise exception.InstanceDeployFailure( _('More than one config drive exists on device %(device)s ' 'for node %(node)s.') % {'device': device, 'node': node_uuid}) return output.rstrip()
def test_execute_use_standard_locale_with_env_variables(self, execute_mock): utils.execute('foo', use_standard_locale=True, env_variables={'foo': 'bar'}) execute_mock.assert_called_once_with('foo', env_variables={'LC_ALL': 'C', 'foo': 'bar'})
def destroy_disk_metadata(dev, node_uuid): """Destroy metadata structures on node's disk. Ensure that node's disk magic strings are wiped without zeroing the entire drive. To do this we use the wipefs tool from util-linux. :param dev: Path for the device to work on. :param node_uuid: Node's uuid. Used for logging. """ # NOTE(NobodyCam): This is needed to work around bug: # https://bugs.launchpad.net/ironic/+bug/1317647 LOG.debug("Start destroy disk metadata for node %(node)s.", {'node': node_uuid}) try: utils.execute('wipefs', '--force', '--all', dev, run_as_root=True, use_standard_locale=True) except processutils.ProcessExecutionError as e: # NOTE(zhenguo): Check if --force option is supported for wipefs, # if not, we should try without it. if '--force' in str(e): utils.execute('wipefs', '--all', dev, run_as_root=True, use_standard_locale=True) else: raise e LOG.info(_LI("Disk metadata on %(dev)s successfully destroyed for node " "%(node)s"), {'dev': dev, 'node': node_uuid})
def test_no_retry_on_success(self): fd, tmpfilename = tempfile.mkstemp() _, tmpfilename2 = tempfile.mkstemp() try: fp = os.fdopen(fd, 'w+') fp.write('''#!/bin/sh # If we've already run, bail out. grep -q foo "$1" && exit 1 # Mark that we've run before. echo foo > "$1" # Check that stdin gets passed correctly. grep foo ''') fp.close() os.chmod(tmpfilename, 0o755) try: utils.execute(tmpfilename, tmpfilename2, process_input=b'foo', attempts=2) except OSError as e: if e.errno == errno.EACCES: self.skipTest("Permissions error detected. " "Are you running with a noexec /tmp?") else: raise finally: os.unlink(tmpfilename) os.unlink(tmpfilename2)
def _get_labelled_partition(device_path, label, node_uuid): """Check and return if partition with given label exists :param device_path: The device path. :param label: Partition label :param node_uuid: UUID of the Node. Used for logging. :raises: InstanceDeployFailure, if any disk partitioning related commands fail. :returns: block device file for partition if it exists; otherwise it returns None. """ try: utils.execute('partprobe', device_path, run_as_root=True) # lsblk command output, err = utils.execute('lsblk', '-Po', 'name,label', device_path, check_exit_code=[0, 1], use_standard_locale=True, run_as_root=True) except (processutils.UnknownArgumentError, processutils.ProcessExecutionError, OSError) as e: msg = (_('Failed to retrieve partition labels on disk %(disk)s ' 'for node %(node)s. Error: %(error)s') % { 'disk': device_path, 'node': node_uuid, 'error': e }) LOG.error(msg) raise exception.InstanceDeployFailure(msg) found_part = None if output: for device in output.split('\n'): dev = { key: value for key, value in (v.split('=', 1) for v in shlex.split(device)) } if not dev: continue if dev['LABEL'] == label: if found_part: found_2 = '/dev/%(part)s' % {'part': dev['NAME'].strip()} found = [found_part, found_2] raise exception.InstanceDeployFailure( _('More than one partition with label "%(label)s" ' 'exists on device %(device)s for node %(node)s: ' '%(found)s.') % { 'label': label, 'device': device_path, 'node': node_uuid, 'found': ' and '.join(found) }) found_part = '/dev/%(part)s' % {'part': dev['NAME'].strip()} return found_part
def get_uefi_disk_identifier(dev): """Get the uuid from the disk being exposed by the ramdisk. This uuid is appended to the pxe config which will then be set as the root and load the bootx64.efi file using chainloader and boot the machine. This is helpful in deployments to nodes with multiple disks. https://wiki.gentoo.org/wiki/GRUB2/Chainloading :param dev: Path for the already populated disk device. :raises InstanceDeployFailure: Image is not UEFI bootable. :returns: The UUID of the partition. """ partition_id = None try: report, _ = utils.execute('fdisk', '-l', dev, run_as_root=True) except processutils.ProcessExecutionError as e: msg = _('Failed to find the partition on the disk %s ') % e LOG.error(msg) raise exception.InstanceDeployFailure(msg) for line in report.splitlines(): if line.startswith(dev) and 'EFI System' in line: vals = line.split() partition_id = vals[0] try: lsblk_output, _ = utils.execute('lsblk', '-PbioUUID', partition_id, run_as_root=True) disk_identifier = lsblk_output.split("=")[1].strip() disk_identifier = disk_identifier.strip('"') except processutils.ProcessExecutionError as e: raise exception.InstanceDeployFailure("Image is not UEFI bootable. " "Error: %s " % e) return disk_identifier
def block_uuid(dev): """Get UUID of a block device. Try to fetch the UUID, if that fails, try to fetch the PARTUUID. """ out, _err = utils.execute('blkid', '-s', 'UUID', '-o', 'value', dev, run_as_root=True, check_exit_code=[0]) if not out: LOG.debug( 'Falling back to partition UUID as the block device UUID ' 'was not found while examining %(device)s', {'device': dev}) out, _err = utils.execute('blkid', '-s', 'PARTUUID', '-o', 'value', dev, run_as_root=True, check_exit_code=[0]) return out.strip()
def _exec(self, *args): # NOTE(lucasagomes): utils.execute() is already a wrapper on top # of processutils.execute() which raises specific # exceptions. It also logs any failure so we don't # need to log it again here. utils.execute('parted', '-a', self._alignment, '-s', self._device, '--', 'unit', 'MiB', *args, check_exit_code=[0], use_standard_locale=True, run_as_root=True)
def test_execute_with_root_helper_run_as_root(self): with mock.patch.object(processutils, 'execute', autospec=True) as execute_mock: utils.execute('foo', run_as_root=True) execute_mock.assert_called_once_with( 'foo', run_as_root=True, root_helper=CONF.ironic_lib.root_helper)
def test_can_mock_execute(self, mock_exec): # NOTE(jlvillal): We had discovered an issue where mocking wasn't # working because we had used a mock to block access to the execute # functions. This caused us to "mock a mock" and didn't work correctly. # We want to make sure that we can mock our execute functions even with # our "block execute" code. utils.execute("ls") utils.execute("echo") self.assertEqual(2, mock_exec.call_count)
def setUp(self): super(RealFilePartitioningTestCase, self).setUp() # NOTE(dtantsur): no parted utility on gate-ironic-python26 try: utils.execute('parted', '--version') except OSError as exc: self.skipTest('parted utility was not found: %s' % exc) self.file = tempfile.NamedTemporaryFile(delete=False) # NOTE(ifarkas): the file needs to be closed, so fuser won't report # any usage self.file.close() # NOTE(dtantsur): 20 MiB file with zeros utils.execute('dd', 'if=/dev/zero', 'of=%s' % self.file.name, 'bs=1', 'count=0', 'seek=20MiB')
def destroy_disk_metadata(dev, node_uuid): """Destroy metadata structures on node's disk. Ensure that node's disk appears to be blank without zeroing the entire drive. To do this we will zero the first 18KiB to clear MBR / GPT data and the last 18KiB to clear GPT and other metadata like LVM, veritas, MDADM, DMRAID, etc. """ # NOTE(NobodyCam): This is needed to work around bug: # https://bugs.launchpad.net/ironic/+bug/1317647 LOG.debug("Start destroy disk metadata for node %(node)s.", {'node': node_uuid}) try: utils.execute('dd', 'if=/dev/zero', 'of=%s' % dev, 'bs=512', 'count=36', run_as_root=True, check_exit_code=[0]) except processutils.ProcessExecutionError as err: with excutils.save_and_reraise_exception(): LOG.error(_LE("Failed to erase beginning of disk for node " "%(node)s. Command: %(command)s. Error: %(error)s."), {'node': node_uuid, 'command': err.cmd, 'error': err.stderr}) # now wipe the end of the disk. # get end of disk seek value try: block_sz = get_dev_block_size(dev) except processutils.ProcessExecutionError as err: with excutils.save_and_reraise_exception(): LOG.error(_LE("Failed to get disk block count for node %(node)s. " "Command: %(command)s. Error: %(error)s."), {'node': node_uuid, 'command': err.cmd, 'error': err.stderr}) else: seek_value = block_sz - 36 try: utils.execute('dd', 'if=/dev/zero', 'of=%s' % dev, 'bs=512', 'count=36', 'seek=%d' % seek_value, run_as_root=True, check_exit_code=[0]) except processutils.ProcessExecutionError as err: with excutils.save_and_reraise_exception(): LOG.error(_LE("Failed to erase the end of the disk on node " "%(node)s. Command: %(command)s. " "Error: %(error)s."), {'node': node_uuid, 'command': err.cmd, 'error': err.stderr})
def _test_execute_with_log_stdout(self, log_mock, log_stdout=None): with mock.patch.object(processutils, "execute") as execute_mock: execute_mock.return_value = ("stdout", "stderr") if log_stdout is not None: utils.execute("foo", log_stdout=log_stdout) else: utils.execute("foo") execute_mock.assert_called_once_with("foo") name, args, kwargs = log_mock.debug.mock_calls[1] if log_stdout is False: self.assertEqual(2, log_mock.debug.call_count) self.assertNotIn("stdout", args[0]) else: self.assertEqual(3, log_mock.debug.call_count) self.assertIn("stdout", args[0])
def _test_execute_with_log_stdout(self, log_mock, log_stdout=None): with mock.patch.object(processutils, 'execute', autospec=True) as execute_mock: execute_mock.return_value = ('stdout', 'stderr') if log_stdout is not None: utils.execute('foo', log_stdout=log_stdout) else: utils.execute('foo') execute_mock.assert_called_once_with('foo') name, args, kwargs = log_mock.debug.mock_calls[1] if log_stdout is False: self.assertEqual(2, log_mock.debug.call_count) self.assertNotIn('stdout', args[0]) else: self.assertEqual(3, log_mock.debug.call_count) self.assertIn('stdout', args[0])
def _wait_for_disk_to_become_available(self, retries, max_retries, pids, stderr): retries[0] += 1 if retries[0] > max_retries: raise loopingcall.LoopingCallDone() try: # NOTE(ifarkas): fuser returns a non-zero return code if none of # the specified files is accessed out, err = utils.execute('fuser', self._device, check_exit_code=[0, 1], run_as_root=True) if not out and not err: raise loopingcall.LoopingCallDone() else: if err: stderr[0] = err if out: pids_match = re.search(self._fuser_pids_re, out) pids[0] = pids_match.group() except processutils.ProcessExecutionError as exc: LOG.warning( _LW('Failed to check the device %(device)s with fuser:'******' %(err)s'), { 'device': self._device, 'err': exc })
def list_partitions(device): """Get partitions information from given device. :param device: The device path. :returns: list of dictionaries (one per partition) with keys: number, start, end, size (in MiB), filesystem, flags """ output = utils.execute( 'parted', '-s', '-m', device, 'unit', 'MiB', 'print', use_standard_locale=True, run_as_root=True)[0] if isinstance(output, bytes): output = output.decode("utf-8") lines = [line for line in output.split('\n') if line.strip()][2:] # Example of line: 1:1.00MiB:501MiB:500MiB:ext4::boot fields = ('number', 'start', 'end', 'size', 'filesystem', 'flags') result = [] for line in lines: match = _PARTED_PRINT_RE.match(line) if match is None: LOG.warning(_LW("Partition information from parted for device " "%(device)s does not match " "expected format: %(line)s"), dict(device=device, line=line)) continue # Cast int fields to ints (some are floats and we round them down) groups = [int(float(x)) if i < 4 else x for i, x in enumerate(match.groups())] result.append(dict(zip(fields, groups))) return result
def get_disk_identifier(dev): """Get the disk identifier from the disk being exposed by the ramdisk. This disk identifier is appended to the pxe config which will then be used by chain.c32 to detect the correct disk to chainload. This is helpful in deployments to nodes with multiple disks. http://www.syslinux.org/wiki/index.php/Comboot/chain.c32#mbr: :param dev: Path for the already populated disk device. :raises OSError: When the hexdump binary is unavailable. :returns: The Disk Identifier. """ disk_identifier = utils.execute('hexdump', '-s', '440', '-n', '4', '-e', '''\"0x%08x\"''', dev, run_as_root=True, check_exit_code=[0], attempts=5, delay_on_retry=True) return disk_identifier[0]
def count_mbr_partitions(device): """Count the number of primary and logical partitions on a MBR :param device: The device path. :returns: A tuple with the number of primary partitions and logical partitions. :raise: ValueError if the device does not have a valid MBR partition table. """ # -d do not update the kernel table # -s print a summary of the partition table output, err = utils.execute('partprobe', '-d', '-s', device, run_as_root=True, use_standard_locale=True) if 'msdos' not in output: raise ValueError('The device %s does not have a valid MBR ' 'partition table' % device) # Sample output: /dev/vdb: msdos partitions 1 2 3 <5 6 7> # The partitions with number > 4 (and inside <>) are logical partitions output = output.replace('<', '').replace('>', '') partitions = [int(s) for s in output.split() if s.isdigit()] return (sum(i < 5 for i in partitions), sum(i > 4 for i in partitions))
def _is_disk_gpt_partitioned(device, node_uuid): """Checks if the disk is GPT partitioned :param device: The device path. :param node_uuid: UUID of the Node. Used for logging. :raises: InstanceDeployFailure, if any disk partitioning related commands fail. :param node_uuid: UUID of the Node :returns: Boolean. Returns True if disk is GPT partitioned """ try: stdout, _stderr = utils.execute('blkid', '-p', '-o', 'value', '-s', 'PTTYPE', device, use_standard_locale=True, run_as_root=True) except (processutils.UnknownArgumentError, processutils.ProcessExecutionError, OSError) as e: msg = (_('Failed to retrieve partition table type for disk %(disk)s ' 'for node %(node)s. Error: %(error)s') % { 'disk': device, 'node': node_uuid, 'error': e }) LOG.error(msg) raise exception.InstanceDeployFailure(msg) return (stdout.lower().strip() == 'gpt')
def _is_disk_larger_than_max_size(device, node_uuid): """Check if total disk size exceeds 2TB msdos limit :param device: device path. :param node_uuid: node's uuid. Used for logging. :raises: InstanceDeployFailure, if any disk partitioning related commands fail. :returns: True if total disk size exceeds 2TB. Returns False otherwise. """ try: disksize_bytes, err = utils.execute('blockdev', '--getsize64', device, use_standard_locale=True, run_as_root=True) except (processutils.UnknownArgumentError, processutils.ProcessExecutionError, OSError) as e: msg = (_('Failed to get size of disk %(disk)s for node %(node)s. ' 'Error: %(error)s') % {'disk': device, 'node': node_uuid, 'error': e}) LOG.error(msg) raise exception.InstanceDeployFailure(msg) disksize_mb = int(disksize_bytes.strip()) // 1024 // 1024 return disksize_mb > MAX_DISK_SIZE_MB_SUPPORTED_BY_MBR
def list_partitions(device): """Get partitions information from given device. :param device: The device path. :returns: list of dictionaries (one per partition) with keys: number, start, end, size (in MiB), filesystem, flags """ output = utils.execute( 'parted', '-s', '-m', device, 'unit', 'MiB', 'print', use_standard_locale=True, run_as_root=True)[0] if isinstance(output, bytes): output = output.decode("utf-8") lines = [line for line in output.split('\n') if line.strip()][2:] # Example of line: 1:1.00MiB:501MiB:500MiB:ext4::boot fields = ('number', 'start', 'end', 'size', 'filesystem', 'flags') result = [] for line in lines: match = _PARTED_PRINT_RE.match(line) if match is None: LOG.warning("Partition information from parted for device " "%(device)s does not match " "expected format: %(line)s", dict(device=device, line=line)) continue # Cast int fields to ints (some are floats and we round them down) groups = [int(float(x)) if i < 4 else x for i, x in enumerate(match.groups())] result.append(dict(zip(fields, groups))) return result
def qemu_img_info(path): """Return an object containing the parsed output from qemu-img info.""" if not os.path.exists(path): return imageutils.QemuImgInfo() out, err = utils.execute('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path) return imageutils.QemuImgInfo(out)
def get_dev_block_size(dev): """Get the device size in 512 byte sectors.""" block_sz, cmderr = utils.execute('blockdev', '--getsz', dev, run_as_root=True, check_exit_code=[0]) return int(block_sz)
def block_uuid(dev): """Get UUID of a block device.""" out, _err = utils.execute('blkid', '-s', 'UUID', '-o', 'value', dev, run_as_root=True, check_exit_code=[0]) return out.strip()
def destroy_disk_metadata(dev, node_uuid): """Destroy metadata structures on node's disk. Ensure that node's disk magic strings are wiped without zeroing the entire drive. To do this we use the wipefs tool from util-linux. :param dev: Path for the device to work on. :param node_uuid: Node's uuid. Used for logging. """ # NOTE(NobodyCam): This is needed to work around bug: # https://bugs.launchpad.net/ironic/+bug/1317647 LOG.debug("Start destroy disk metadata for node %(node)s.", {'node': node_uuid}) try: utils.execute('wipefs', '--force', '--all', dev, run_as_root=True, use_standard_locale=True) except processutils.ProcessExecutionError as e: with excutils.save_and_reraise_exception() as ctxt: # NOTE(zhenguo): Check if --force option is supported for wipefs, # if not, we should try without it. if '--force' in str(e): ctxt.reraise = False utils.execute('wipefs', '--all', dev, run_as_root=True, use_standard_locale=True) LOG.info( _LI("Disk metadata on %(dev)s successfully destroyed for node " "%(node)s"), { 'dev': dev, 'node': node_uuid })
def _fix_gpt_structs(device, node_uuid): """Checks backup GPT data structures and moves them to end of the device :param device: The device path. :param node_uuid: UUID of the Node. Used for logging. :raises: InstanceDeployFailure, if any disk partitioning related commands fail. """ try: output, err = utils.execute('partprobe', device, use_standard_locale=True, run_as_root=True) search_str = "fix the GPT to use all of the space" if search_str in err: utils.execute('sgdisk', '-e', device, run_as_root=True) except (processutils.UnknownArgumentError, processutils.ProcessExecutionError, OSError) as e: msg = (_('Failed to fix GPT data structures on disk %(disk)s ' 'for node %(node)s. Error: %(error)s') % {'disk': device, 'node': node_uuid, 'error': e}) LOG.error(msg) raise exception.InstanceDeployFailure(msg)
def get_disk_identifier(dev): """Get the disk identifier from the disk being exposed by the ramdisk. This disk identifier is appended to the pxe config which will then be used by chain.c32 to detect the correct disk to chainload. This is helpful in deployments to nodes with multiple disks. http://www.syslinux.org/wiki/index.php/Comboot/chain.c32#mbr: :param dev: Path for the already populated disk device. :returns The Disk Identifier. """ disk_identifier = utils.execute('hexdump', '-s', '440', '-n', '4', '-e', '''\"0x%08x\"''', dev, run_as_root=True, check_exit_code=[0], attempts=5, delay_on_retry=True) return disk_identifier[0]
def count_mbr_partitions(device): """Count the number of primary and logical partitions on a MBR :param device: The device path. :returns: A tuple with the number of primary partitions and logical partitions. :raise: ValueError if the device does not have a valid MBR partition table. """ # -d do not update the kernel table # -s print a summary of the partition table output, err = utils.execute('partprobe', '-d', '-s', device, run_as_root=True, use_standard_locale=True) if 'msdos' not in output: raise ValueError('The device %s does not have a valid MBR ' 'partition table' % device) # Sample output: /dev/vdb: msdos partitions 1 2 3 <5 6 7> # The partitions with number > 4 (and inside <>) are logical partitions output = output.replace('<', '').replace('>', '') partitions = [int(s) for s in output.split() if s.isdigit()] return(sum(i < 5 for i in partitions), sum(i > 4 for i in partitions))
def _wait_for_disk_to_become_available(self, retries, max_retries, pids, stderr): retries[0] += 1 if retries[0] > max_retries: raise loopingcall.LoopingCallDone() try: # NOTE(ifarkas): fuser returns a non-zero return code if none of # the specified files is accessed out, err = utils.execute('fuser', self._device, check_exit_code=[0, 1], run_as_root=True) if not out and not err: raise loopingcall.LoopingCallDone() else: if err: stderr[0] = err if out: pids_match = re.search(self._fuser_pids_re, out) pids[0] = pids_match.group() except processutils.ProcessExecutionError as exc: LOG.warning(_LW('Failed to check the device %(device)s with fuser:'******' %(err)s'), {'device': self._device, 'err': exc})
def _is_disk_gpt_partitioned(device, node_uuid): """Checks if the disk is GPT partitioned :param device: The device path. :param node_uuid: UUID of the Node. Used for logging. :raises: InstanceDeployFailure, if any disk partitioning related commands fail. :param node_uuid: UUID of the Node :returns: Boolean. Returns True if disk is GPT partitioned """ try: stdout, _stderr = utils.execute( 'blkid', '-p', '-o', 'value', '-s', 'PTTYPE', device, use_standard_locale=True, run_as_root=True) except (processutils.UnknownArgumentError, processutils.ProcessExecutionError, OSError) as e: msg = (_('Failed to retrieve partition table type for disk %(disk)s ' 'for node %(node)s. Error: %(error)s') % {'disk': device, 'node': node_uuid, 'error': e}) LOG.error(msg) raise exception.InstanceDeployFailure(msg) return (stdout.lower().strip() == 'gpt')
def _is_disk_larger_than_max_size(device, node_uuid): """Check if total disk size exceeds 2TB msdos limit :param device: device path. :param node_uuid: node's uuid. Used for logging. :raises: InstanceDeployFailure, if any disk partitioning related commands fail. :returns: True if total disk size exceeds 2TB. Returns False otherwise. """ try: disksize_bytes = utils.execute('blockdev', '--getsize64', device, use_standard_locale=True, run_as_root=True) except (processutils.UnknownArgumentError, processutils.ProcessExecutionError, OSError) as e: msg = (_('Failed to get size of disk %(disk)s for node %(node)s. ' 'Error: %(error)s') % {'disk': device, 'node': node_uuid, 'error': e}) LOG.error(msg) raise exception.InstanceDeployFailure(msg) disksize_mb = int(disksize_bytes[0].strip()) // 1024 // 1024 return disksize_mb > MAX_DISK_SIZE_MB_SUPPORTED_BY_MBR
def test_execute_without_root_helper(self): with mock.patch.object(processutils, 'execute') as execute_mock: utils.execute('foo', run_as_root=False) execute_mock.assert_called_once_with('foo', run_as_root=False)
def execute(*cmd, **kwargs): """Convenience wrapper around ironic_lib's execute() method. Executes and logs results from a system command. """ return ironic_utils.execute(*cmd, **kwargs)
def test_execute_not_use_standard_locale(self, execute_mock): utils.execute("foo", use_standard_locale=False, env_variables={"foo": "bar"}) execute_mock.assert_called_once_with("foo", env_variables={"foo": "bar"})
def create_config_drive_partition(node_uuid, device, configdrive): """Create a partition for config drive Checks if the device is GPT or MBR partitioned and creates config drive partition accordingly. :param node_uuid: UUID of the Node. :param device: The device path. :param configdrive: Base64 encoded Gzipped configdrive content or configdrive HTTP URL. :raises: InstanceDeployFailure if config drive size exceeds maximum limit or if it fails to create config drive. """ confdrive_file = None try: config_drive_part = _get_labelled_partition(device, CONFIGDRIVE_LABEL, node_uuid) confdrive_mb, confdrive_file = _get_configdrive(configdrive, node_uuid) if confdrive_mb > MAX_CONFIG_DRIVE_SIZE_MB: raise exception.InstanceDeployFailure( _('Config drive size exceeds maximum limit of 64MiB. ' 'Size of the given config drive is %(size)d MiB for ' 'node %(node)s.') % {'size': confdrive_mb, 'node': node_uuid}) LOG.debug("Adding config drive partition %(size)d MiB to " "device: %(dev)s for node %(node)s", {'dev': device, 'size': confdrive_mb, 'node': node_uuid}) if config_drive_part: LOG.debug("Configdrive for node %(node)s exists at " "%(part)s", {'node': node_uuid, 'part': config_drive_part}) else: cur_parts = set(part['number'] for part in list_partitions(device)) if _is_disk_gpt_partitioned(device, node_uuid): _fix_gpt_structs(device, node_uuid) create_option = '0:-%dMB:0' % MAX_CONFIG_DRIVE_SIZE_MB utils.execute('sgdisk', '-n', create_option, device, run_as_root=True) else: # Check if the disk has 4 partitions. The MBR based disk # cannot have more than 4 partitions. # TODO(stendulker): One can use logical partitions to create # a config drive if there are 3 primary partitions. # https://bugs.launchpad.net/ironic/+bug/1561283 try: pp_count, lp_count = count_mbr_partitions(device) except ValueError as e: raise exception.InstanceDeployFailure( _('Failed to check the number of primary partitions ' 'present on %(dev)s for node %(node)s. Error: ' '%(error)s') % {'dev': device, 'node': node_uuid, 'error': e}) if pp_count > 3: raise exception.InstanceDeployFailure( _('Config drive cannot be created for node %(node)s. ' 'Disk (%(dev)s) uses MBR partitioning and already ' 'has %(parts)d primary partitions.') % {'node': node_uuid, 'dev': device, 'parts': pp_count}) # Check if disk size exceeds 2TB msdos limit startlimit = '-%dMiB' % MAX_CONFIG_DRIVE_SIZE_MB endlimit = '-0' if _is_disk_larger_than_max_size(device, node_uuid): # Need to create a small partition at 2TB limit LOG.warning(_LW("Disk size is larger than 2TB for " "node %(node)s. Creating config drive " "at the end of the disk %(disk)s."), {'node': node_uuid, 'disk': device}) startlimit = (MAX_DISK_SIZE_MB_SUPPORTED_BY_MBR - MAX_CONFIG_DRIVE_SIZE_MB - 1) endlimit = MAX_DISK_SIZE_MB_SUPPORTED_BY_MBR - 1 utils.execute('parted', '-a', 'optimal', '-s', '--', device, 'mkpart', 'primary', 'ext2', startlimit, endlimit, run_as_root=True) upd_parts = set(part['number'] for part in list_partitions(device)) new_part = set(upd_parts) - set(cur_parts) if len(new_part) != 1: raise exception.InstanceDeployFailure( _('Disk partitioning failed on device %(device)s. ' 'Unable to retrieve config drive partition information.') % {'device': device}) if is_iscsi_device(device, node_uuid): config_drive_part = '%s-part%s' % (device, new_part.pop()) else: config_drive_part = '%s%s' % (device, new_part.pop()) # NOTE(vdrok): the partition was created successfully, let's wait # for it to appear in /dev. LOG.debug('Waiting for the config drive partition %(part)s ' 'on node %(node)s to be ready for writing.', {'part': config_drive_part, 'node': node_uuid}) utils.execute('udevadm', 'settle', '--exit-if-exists=%s' % config_drive_part) dd(confdrive_file, config_drive_part) LOG.info(_LI("Configdrive for node %(node)s successfully " "copied onto partition %(part)s"), {'node': node_uuid, 'part': config_drive_part}) except (processutils.UnknownArgumentError, processutils.ProcessExecutionError, OSError) as e: msg = (_('Failed to create config drive on disk %(disk)s ' 'for node %(node)s. Error: %(error)s') % {'disk': device, 'node': node_uuid, 'error': e}) LOG.error(msg) raise exception.InstanceDeployFailure(msg) finally: # If the configdrive was requested make sure we delete the file # after copying the content to the partition if confdrive_file: utils.unlink_without_raise(confdrive_file)
def test_execute_without_root_helper_run_as_root(self): CONF.set_override("root_helper", None, group="ironic_lib") with mock.patch.object(processutils, "execute", autospec=True) as execute_mock: utils.execute("foo", run_as_root=True) execute_mock.assert_called_once_with("foo", run_as_root=False)
def create_config_drive_partition(node_uuid, device, configdrive): """Create a partition for config drive Checks if the device is GPT or MBR partitioned and creates config drive partition accordingly. :param node_uuid: UUID of the Node. :param device: The device path. :param configdrive: Base64 encoded Gzipped configdrive content or configdrive HTTP URL. :raises: InstanceDeployFailure if config drive size exceeds maximum limit or if it fails to create config drive. """ confdrive_file = None try: config_drive_part = _get_labelled_partition(device, CONFIGDRIVE_LABEL, node_uuid) confdrive_mb, confdrive_file = _get_configdrive(configdrive, node_uuid) if confdrive_mb > MAX_CONFIG_DRIVE_SIZE_MB: raise exception.InstanceDeployFailure( _('Config drive size exceeds maximum limit of 64MiB. ' 'Size of the given config drive is %(size)d MiB for ' 'node %(node)s.') % {'size': confdrive_mb, 'node': node_uuid}) LOG.debug("Adding config drive partition %(size)d MiB to " "device: %(dev)s for node %(node)s", {'dev': device, 'size': confdrive_mb, 'node': node_uuid}) fix_gpt_partition(device, node_uuid) if config_drive_part: LOG.debug("Configdrive for node %(node)s exists at " "%(part)s", {'node': node_uuid, 'part': config_drive_part}) else: cur_parts = set(part['number'] for part in list_partitions(device)) if _is_disk_gpt_partitioned(device, node_uuid): create_option = '0:-%dMB:0' % MAX_CONFIG_DRIVE_SIZE_MB utils.execute('sgdisk', '-n', create_option, device, run_as_root=True) else: # Check if the disk has 4 partitions. The MBR based disk # cannot have more than 4 partitions. # TODO(stendulker): One can use logical partitions to create # a config drive if there are 3 primary partitions. # https://bugs.launchpad.net/ironic/+bug/1561283 try: pp_count, lp_count = count_mbr_partitions(device) except ValueError as e: raise exception.InstanceDeployFailure( _('Failed to check the number of primary partitions ' 'present on %(dev)s for node %(node)s. Error: ' '%(error)s') % {'dev': device, 'node': node_uuid, 'error': e}) if pp_count > 3: raise exception.InstanceDeployFailure( _('Config drive cannot be created for node %(node)s. ' 'Disk (%(dev)s) uses MBR partitioning and already ' 'has %(parts)d primary partitions.') % {'node': node_uuid, 'dev': device, 'parts': pp_count}) # Check if disk size exceeds 2TB msdos limit startlimit = '-%dMiB' % MAX_CONFIG_DRIVE_SIZE_MB endlimit = '-0' if _is_disk_larger_than_max_size(device, node_uuid): # Need to create a small partition at 2TB limit LOG.warning("Disk size is larger than 2TB for " "node %(node)s. Creating config drive " "at the end of the disk %(disk)s.", {'node': node_uuid, 'disk': device}) startlimit = (MAX_DISK_SIZE_MB_SUPPORTED_BY_MBR - MAX_CONFIG_DRIVE_SIZE_MB - 1) endlimit = MAX_DISK_SIZE_MB_SUPPORTED_BY_MBR - 1 utils.execute('parted', '-a', 'optimal', '-s', '--', device, 'mkpart', 'primary', 'fat32', startlimit, endlimit, run_as_root=True) # Parted uses fsync to tell the kernel to sync file io # however on ramdisks in ramfs, this is an explicit no-op. # Explicitly call sync so when the the kernel attempts to read # the partition table from disk, it is less likely that the write # is still in buffer cache pending write to disk. LOG.debug('Explicitly calling sync to force buffer/cache flush.') utils.execute('sync') # Make sure any additions to the partitioning are reflected in the # kernel. LOG.debug('Waiting until udev event queue is empty') utils.execute('udevadm', 'settle') try: utils.execute('partprobe', device, run_as_root=True, attempts=CONF.disk_utils.partprobe_attempts) # Also verify that the partitioning is correct now. utils.execute('sgdisk', '-v', device, run_as_root=True) except processutils.ProcessExecutionError as exc: LOG.warning('Failed to verify GPT partitioning after creating ' 'the configdrive partition: %s', exc) upd_parts = set(part['number'] for part in list_partitions(device)) new_part = set(upd_parts) - set(cur_parts) if len(new_part) != 1: raise exception.InstanceDeployFailure( _('Disk partitioning failed on device %(device)s. ' 'Unable to retrieve config drive partition information.') % {'device': device}) if is_iscsi_device(device, node_uuid): config_drive_part = '%s-part%s' % (device, new_part.pop()) elif is_nvme_device(device): config_drive_part = '%sp%s' % (device, new_part.pop()) else: config_drive_part = '%s%s' % (device, new_part.pop()) LOG.debug('Waiting until udev event queue is empty') utils.execute('udevadm', 'settle') # NOTE(vsaienko): check that devise actually exists, # it is not handled by udevadm when using ISCSI, for more info see: # https://bugs.launchpad.net/ironic/+bug/1673731 # Do not use 'udevadm settle --exit-if-exist' here LOG.debug('Waiting for the config drive partition %(part)s ' 'on node %(node)s to be ready for writing.', {'part': config_drive_part, 'node': node_uuid}) utils.execute('test', '-e', config_drive_part, check_exit_code=[0], attempts=15, delay_on_retry=True) dd(confdrive_file, config_drive_part) LOG.info("Configdrive for node %(node)s successfully " "copied onto partition %(part)s", {'node': node_uuid, 'part': config_drive_part}) except (processutils.UnknownArgumentError, processutils.ProcessExecutionError, OSError) as e: msg = (_('Failed to create config drive on disk %(disk)s ' 'for node %(node)s. Error: %(error)s') % {'disk': device, 'node': node_uuid, 'error': e}) LOG.error(msg) raise exception.InstanceDeployFailure(msg) finally: # If the configdrive was requested make sure we delete the file # after copying the content to the partition if confdrive_file: utils.unlink_without_raise(confdrive_file)
def test_execute_get_root_helper(self): with mock.patch.object(processutils, 'execute') as execute_mock: helper = utils._get_root_helper() utils.execute('foo', run_as_root=True) execute_mock.assert_called_once_with('foo', run_as_root=True, root_helper=helper)
def convert_image(source, dest, out_format, run_as_root=False): """Convert image to other format.""" cmd = ('qemu-img', 'convert', '-O', out_format, source, dest) utils.execute(*cmd, run_as_root=run_as_root, prlimit=QEMU_IMG_LIMITS)
def convert_image(source, dest, out_format, run_as_root=False): """Convert image to other format.""" cmd = ('qemu-img', 'convert', '-O', out_format, source, dest) utils.execute(*cmd, run_as_root=run_as_root)
def test_execute_not_use_standard_locale(self, execute_mock): utils.execute('foo', use_standard_locale=False, env_variables={'foo': 'bar'}) execute_mock.assert_called_once_with('foo', env_variables={'foo': 'bar'})
def test_execute_with_root_helper(self): with mock.patch.object(processutils, "execute", autospec=True) as execute_mock: utils.execute("foo", run_as_root=False) execute_mock.assert_called_once_with("foo", run_as_root=False)
def test_execute_with_root_helper(self): with mock.patch.object(processutils, 'execute') as execute_mock: utils.execute('foo', run_as_root=False) execute_mock.assert_called_once_with('foo', run_as_root=False)
def test_execute_with_root_helper_run_as_root(self): with mock.patch.object(processutils, "execute", autospec=True) as execute_mock: utils.execute("foo", run_as_root=True) execute_mock.assert_called_once_with("foo", run_as_root=True, root_helper=CONF.ironic_lib.root_helper)
def test_execute_without_root_helper_run_as_root(self): CONF.set_override('root_helper', None, group='ironic_lib') with mock.patch.object(processutils, 'execute') as execute_mock: utils.execute('foo', run_as_root=True) execute_mock.assert_called_once_with('foo', run_as_root=False)
def test_execute_use_standard_locale_with_env_variables(self, execute_mock): utils.execute("foo", use_standard_locale=True, env_variables={"foo": "bar"}) execute_mock.assert_called_once_with("foo", env_variables={"LC_ALL": "C", "foo": "bar"})
def create_config_drive_partition(node_uuid, device, configdrive): """Create a partition for config drive Checks if the device is GPT or MBR partitioned and creates config drive partition accordingly. :param node_uuid: UUID of the Node. :param device: The device path. :param configdrive: Base64 encoded Gzipped configdrive content or configdrive HTTP URL. :raises: InstanceDeployFailure if config drive size exceeds maximum limit or if it fails to create config drive. """ confdrive_file = None try: config_drive_part = get_labelled_partition( device, disk_utils.CONFIGDRIVE_LABEL, node_uuid) confdrive_mb, confdrive_file = get_configdrive(configdrive, node_uuid) if confdrive_mb > MAX_CONFIG_DRIVE_SIZE_MB: raise exception.InstanceDeployFailure( 'Config drive size exceeds maximum limit of 64MiB. ' 'Size of the given config drive is %(size)d MiB for ' 'node %(node)s.' % {'size': confdrive_mb, 'node': node_uuid}) LOG.debug("Adding config drive partition %(size)d MiB to " "device: %(dev)s for node %(node)s", {'dev': device, 'size': confdrive_mb, 'node': node_uuid}) disk_utils.fix_gpt_partition(device, node_uuid) if config_drive_part: LOG.debug("Configdrive for node %(node)s exists at " "%(part)s", {'node': node_uuid, 'part': config_drive_part}) else: cur_parts = set(part['number'] for part in disk_utils.list_partitions(device)) if disk_utils.get_partition_table_type(device) == 'gpt': create_option = '0:-%dMB:0' % MAX_CONFIG_DRIVE_SIZE_MB utils.execute('sgdisk', '-n', create_option, device, run_as_root=True) else: # Check if the disk has 4 partitions. The MBR based disk # cannot have more than 4 partitions. # TODO(stendulker): One can use logical partitions to create # a config drive if there are 3 primary partitions. # https://bugs.launchpad.net/ironic/+bug/1561283 try: pp_count, lp_count = disk_utils.count_mbr_partitions( device) except ValueError as e: raise exception.InstanceDeployFailure( 'Failed to check the number of primary partitions ' 'present on %(dev)s for node %(node)s. Error: ' '%(error)s' % {'dev': device, 'node': node_uuid, 'error': e}) if pp_count > 3: raise exception.InstanceDeployFailure( 'Config drive cannot be created for node %(node)s. ' 'Disk (%(dev)s) uses MBR partitioning and already ' 'has %(parts)d primary partitions.' % {'node': node_uuid, 'dev': device, 'parts': pp_count}) # Check if disk size exceeds 2TB msdos limit startlimit = '-%dMiB' % MAX_CONFIG_DRIVE_SIZE_MB endlimit = '-0' if _is_disk_larger_than_max_size(device, node_uuid): # Need to create a small partition at 2TB limit LOG.warning("Disk size is larger than 2TB for " "node %(node)s. Creating config drive " "at the end of the disk %(disk)s.", {'node': node_uuid, 'disk': device}) startlimit = (MAX_DISK_SIZE_MB_SUPPORTED_BY_MBR - MAX_CONFIG_DRIVE_SIZE_MB - 1) endlimit = MAX_DISK_SIZE_MB_SUPPORTED_BY_MBR - 1 utils.execute('parted', '-a', 'optimal', '-s', '--', device, 'mkpart', 'primary', 'fat32', startlimit, endlimit, run_as_root=True) # Trigger device rescan disk_utils.trigger_device_rescan(device) upd_parts = set(part['number'] for part in disk_utils.list_partitions(device)) new_part = set(upd_parts) - set(cur_parts) if len(new_part) != 1: raise exception.InstanceDeployFailure( 'Disk partitioning failed on device %(device)s. ' 'Unable to retrieve config drive partition information.' % {'device': device}) config_drive_part = disk_utils.partition_index_to_path( device, new_part.pop()) disk_utils.udev_settle() # NOTE(vsaienko): check that devise actually exists, # it is not handled by udevadm when using ISCSI, for more info see: # https://bugs.launchpad.net/ironic/+bug/1673731 # Do not use 'udevadm settle --exit-if-exist' here LOG.debug('Waiting for the config drive partition %(part)s ' 'on node %(node)s to be ready for writing.', {'part': config_drive_part, 'node': node_uuid}) utils.execute('test', '-e', config_drive_part, attempts=15, delay_on_retry=True) disk_utils.dd(confdrive_file, config_drive_part) LOG.info("Configdrive for node %(node)s successfully " "copied onto partition %(part)s", {'node': node_uuid, 'part': config_drive_part}) except (processutils.UnknownArgumentError, processutils.ProcessExecutionError, OSError) as e: msg = ('Failed to create config drive on disk %(disk)s ' 'for node %(node)s. Error: %(error)s' % {'disk': device, 'node': node_uuid, 'error': e}) LOG.error(msg) raise exception.InstanceDeployFailure(msg) finally: # If the configdrive was requested make sure we delete the file # after copying the content to the partition if confdrive_file: utils.unlink_without_raise(confdrive_file)
def test_check_exit_code_boolean(self): utils.execute("/usr/bin/env", "false", check_exit_code=False) self.assertRaises( processutils.ProcessExecutionError, utils.execute, "/usr/bin/env", "false", check_exit_code=True )