def _do_create_volume(self, volume): """Create a volume on given glusterfs_share. :param volume: volume reference """ volume_path = self.local_path(volume) volume_size = volume.size LOG.debug("creating new volume at %s", volume_path) if os.path.exists(volume_path): msg = _('file already exists at %s') % volume_path LOG.error(msg) raise exception.InvalidVolume(reason=msg) if self.configuration.nas_volume_prov_type == 'thin': self._create_qcow2_file(volume_path, volume_size) else: try: self._fallocate(volume_path, volume_size) except processutils.ProcessExecutionError as exc: if 'Operation not supported' in exc.stderr: LOG.warning(_LW('Fallocate not supported by current ' 'version of glusterfs. So falling ' 'back to dd.')) self._create_regular_file(volume_path, volume_size) else: fileutils.delete_if_exists(volume_path) raise self._set_rw_permissions_for_all(volume_path)
def unrescue_instance(self, instance): self.power_off(instance) root_vhd_path = self._pathutils.lookup_root_vhd_path(instance.name) rescue_vhd_path = self._pathutils.lookup_root_vhd_path(instance.name, rescue=True) if (instance.vm_state == vm_states.RESCUED and not (rescue_vhd_path and root_vhd_path)): err_msg = _('Missing instance root and/or rescue image. ' 'The instance cannot be unrescued.') raise vmutils.HyperVException(err_msg) vm_gen = self._vmutils.get_vm_gen(instance.name) controller_type = VM_GENERATIONS_CONTROLLER_TYPES[vm_gen] self._vmutils.detach_vm_disk(instance.name, root_vhd_path, is_physical=False) if rescue_vhd_path: self._vmutils.detach_vm_disk(instance.name, rescue_vhd_path, is_physical=False) fileutils.delete_if_exists(rescue_vhd_path) self._attach_drive(instance.name, root_vhd_path, 0, self._ROOT_DISK_CTRL_ADDR, controller_type) self._detach_config_drive(instance.name, rescue=True, delete=True) # Reattach the configdrive, if exists. configdrive_path = self._pathutils.lookup_configdrive_path( instance.name) if configdrive_path: self.attach_config_drive(instance, configdrive_path, vm_gen) self.power_on(instance)
def unrescue_instance(self, instance): self.power_off(instance) root_vhd_path = self._pathutils.lookup_root_vhd_path(instance.name) rescue_vhd_path = self._pathutils.lookup_root_vhd_path(instance.name, rescue=True) if (instance.vm_state == vm_states.RESCUED and not (rescue_vhd_path and root_vhd_path)): err_msg = _('Missing instance root and/or rescue image.') raise exception.InstanceNotRescuable(reason=err_msg, instance_id=instance.uuid) vm_gen = self._vmutils.get_vm_generation(instance.name) controller_type = VM_GENERATIONS_CONTROLLER_TYPES[vm_gen] self._vmutils.detach_vm_disk(instance.name, root_vhd_path, is_physical=False) if rescue_vhd_path: self._vmutils.detach_vm_disk(instance.name, rescue_vhd_path, is_physical=False) fileutils.delete_if_exists(rescue_vhd_path) self._attach_drive(instance.name, root_vhd_path, 0, self._ROOT_DISK_CTRL_ADDR, controller_type) self._detach_config_drive(instance.name, rescue=True, delete=True) # Reattach the configdrive, if exists and not already attached. configdrive_path = self._pathutils.lookup_configdrive_path( instance.name) if configdrive_path and not self._vmutils.is_disk_attached( configdrive_path, is_physical=False): self.attach_config_drive(instance, configdrive_path, vm_gen) self.power_on(instance)
def delete_volume(self, volume): """Deletes a logical volume.""" if not volume.provider_location: LOG.warning('Volume %s does not have provider_location ' 'specified, skipping', volume.name) return self._ensure_share_mounted(volume.provider_location) volume_dir = self._local_volume_dir(volume) active_image = self.get_active_image_from_info(volume) mounted_path = os.path.join(volume_dir, active_image) if os.access(self.local_path(volume), os.F_OK): img_info = self._qemu_img_info(self.local_path(volume), volume.name) if (img_info.backing_file and (self.QUOBYTE_VOLUME_SNAP_CACHE_DIR_NAME in img_info.backing_file)): # This is an overlay volume, call cache cleanup self._remove_from_vol_cache(img_info.backing_file, ".child-" + volume.id, volume) self._execute('rm', '-f', mounted_path, run_as_root=self._execute_as_root) # If an exception (e.g. timeout) occurred during delete_snapshot, the # base volume may linger around, so just delete it if it exists base_volume_path = self._local_path_volume(volume) fileutils.delete_if_exists(base_volume_path) info_path = self._local_path_volume_info(volume) fileutils.delete_if_exists(info_path)
def _remove_from_vol_cache(self, cache_file_path, ref_suffix, volume): """Removes a reference and possibly volume from the volume cache This method removes the ref_id reference (soft link) from the cache. If no other references exist the cached volume itself is removed, too. :param cache_file_path file path to the volume in the cache :param ref_suffix The id based suffix of the cache file reference :param volume The volume whose share defines the cache to address """ # NOTE(kaisers): As the cache_file_path may be a relative path we use # cache dir and file name to ensure absolute paths in all operations. cache_path = os.path.join(self._local_volume_dir(volume), self.QUOBYTE_VOLUME_SNAP_CACHE_DIR_NAME) cache_file_name = os.path.basename(cache_file_path) # delete the reference LOG.debug("Deleting cache reference %(cfp)s%(rs)s", {"cfp": cache_file_path, "rs": ref_suffix}) fileutils.delete_if_exists(os.path.join(cache_path, cache_file_name + ref_suffix)) # If no other reference exists, remove the cache entry. for file in os.listdir(cache_path): if fnmatch.fnmatch(file, cache_file_name + ".*"): # found another reference file, keep cache entry LOG.debug("Cached volume %(file)s still has at least one " "reference: %(ref)s", {"file": cache_file_name, "ref": file}) return # No other reference found, remove cache entry LOG.debug("Removing cached volume %(cvol)s as no more references for " "this cached volume exist.", {"cvol": os.path.join(cache_path, cache_file_name)}) fileutils.delete_if_exists(os.path.join(cache_path, cache_file_name))
def test_create_configdrive_vfat(self): CONF.set_override('config_drive_format', 'vfat') imagefile = None try: self.mox.StubOutWithMock(utils, 'mkfs') self.mox.StubOutWithMock(utils, 'execute') self.mox.StubOutWithMock(utils, 'trycmd') utils.mkfs('vfat', mox.IgnoreArg(), label='config-2').AndReturn(None) utils.trycmd('mount', '-o', mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(), run_as_root=True).AndReturn((None, None)) utils.execute('umount', mox.IgnoreArg(), run_as_root=True).AndReturn(None) self.mox.ReplayAll() with configdrive.ConfigDriveBuilder(FakeInstanceMD()) as c: (fd, imagefile) = tempfile.mkstemp(prefix='cd_vfat_') os.close(fd) c.make_drive(imagefile) # NOTE(mikal): we can't check for a VFAT output here because the # filesystem creation stuff has been mocked out because it # requires root permissions finally: if imagefile: fileutils.delete_if_exists(imagefile)
def test_create_configdrive_iso(self): CONF.set_override('config_drive_format', 'iso9660') imagefile = None try: self.mox.StubOutWithMock(utils, 'execute') utils.execute('genisoimage', '-o', mox.IgnoreArg(), '-ldots', '-allow-lowercase', '-allow-multidot', '-l', '-publisher', mox.IgnoreArg(), '-quiet', '-J', '-r', '-V', 'config-2', mox.IgnoreArg(), attempts=1, run_as_root=False).AndReturn(None) self.mox.ReplayAll() with configdrive.ConfigDriveBuilder(FakeInstanceMD()) as c: (fd, imagefile) = tempfile.mkstemp(prefix='cd_iso_') os.close(fd) c.make_drive(imagefile) finally: if imagefile: fileutils.delete_if_exists(imagefile)
def store(self, project_id, function, data, md5sum=None): """Store the function package data to local file system. :param project_id: Project ID. :param function: Function ID. :param data: Package file content. :param md5sum: The MD5 provided by the user. :return: MD5 value of the package. """ LOG.debug('Store package, function: %s, project: %s', function, project_id) project_path = os.path.join(self.base_path, project_id) fileutils.ensure_tree(project_path) # Check md5 md5_actual = common.md5(content=data) if md5sum and md5_actual != md5sum: raise exc.InputException("Package md5 mismatch.") # The md5 is contained in the package path. new_func_zip = os.path.join(project_path, '%s.zip.new' % function) func_zip = os.path.join(project_path, PACKAGE_NAME_TEMPLATE % (function, md5_actual)) # Store package with open(new_func_zip, 'wb') as fd: fd.write(data) if not zipfile.is_zipfile(new_func_zip): fileutils.delete_if_exists(new_func_zip) raise exc.InputException("Package is not a valid ZIP package.") os.rename(new_func_zip, func_zip) return md5_actual
def _get_rbd_handle(self, connection_properties): try: user = connection_properties['auth_username'] pool, volume = connection_properties['name'].split('/') cluster_name = connection_properties['cluster_name'] monitor_ips = connection_properties['hosts'] monitor_ports = connection_properties['ports'] # NOTE: cinder no longer passes keyring data in the connection # properties as of the victoria release. See OSSN-0085. But # cinderlib does, so we must keep the code related to the keyring. keyring = connection_properties.get('keyring') except (KeyError, ValueError): msg = _("Connect volume failed, malformed connection properties.") raise exception.BrickException(msg=msg) conf = self._create_ceph_conf(monitor_ips, monitor_ports, str(cluster_name), user, keyring) try: rbd_client = linuxrbd.RBDClient(user, pool, conffile=conf, rbd_cluster_name=str(cluster_name)) rbd_volume = linuxrbd.RBDVolume(rbd_client, volume) rbd_handle = linuxrbd.RBDVolumeIOWrapper( linuxrbd.RBDImageMetadata(rbd_volume, pool, user, conf)) except Exception: fileutils.delete_if_exists(conf) raise return rbd_handle
def unrescue_instance(self, instance): self.power_off(instance) root_vhd_path = self._pathutils.lookup_root_vhd_path(instance.name) rescue_vhd_path = self._pathutils.lookup_root_vhd_path(instance.name, rescue=True) if instance.vm_state == vm_states.RESCUED and not (rescue_vhd_path and root_vhd_path): err_msg = _("Missing instance root and/or rescue image. " "The instance cannot be unrescued.") raise vmutils.HyperVException(err_msg) vm_gen = self._vmutils.get_vm_gen(instance.name) controller_type = VM_GENERATIONS_CONTROLLER_TYPES[vm_gen] self._vmutils.detach_vm_disk(instance.name, root_vhd_path, is_physical=False) if rescue_vhd_path: self._vmutils.detach_vm_disk(instance.name, rescue_vhd_path, is_physical=False) fileutils.delete_if_exists(rescue_vhd_path) self._attach_drive(instance.name, root_vhd_path, 0, self._ROOT_DISK_CTRL_ADDR, controller_type) self._detach_config_drive(instance.name, rescue=True, delete=True) # Reattach the configdrive, if exists. configdrive_path = self._pathutils.lookup_configdrive_path(instance.name) if configdrive_path: self.attach_config_drive(instance, configdrive_path, vm_gen) self.power_on(instance)
def disconnect_volume(self, connection_properties, device_info, force=False, ignore_errors=False): """Disconnect a volume. :param connection_properties: The dictionary that describes all of the target volume attributes. :type connection_properties: dict :param device_info: historical difference, but same as connection_props :type device_info: dict """ do_local_attach = connection_properties.get('do_local_attach', self.do_local_attach) if do_local_attach: conf = device_info.get('conf') if device_info else None root_device = self._find_root_device(connection_properties, conf) if root_device: # TODO(stephenfin): Update to the unified 'rbd device unmap' # command introduced in ceph 13.0 (commit 6a57358add1157629a6d) # when we drop support earlier versions cmd = ['rbd', 'unmap', root_device] cmd += self._get_rbd_args(connection_properties, conf) self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) if conf: rbd_privsep.delete_if_exists(conf) else: if device_info: rbd_handle = device_info.get('path', None) if rbd_handle is not None: fileutils.delete_if_exists(rbd_handle.rbd_conf) rbd_handle.close()
def test_create_configdrive_vfat(self, mock_trycmd, mock_execute, mock_mkfs): CONF.set_override('config_drive_format', 'vfat') imagefile = None try: with configdrive.ConfigDriveBuilder(FakeInstanceMD()) as c: (fd, imagefile) = tempfile.mkstemp(prefix='cd_vfat_') os.close(fd) c.make_drive(imagefile) mock_mkfs.assert_called_once_with('vfat', mock.ANY, label='config-2') mock_trycmd.assert_called_once_with('mount', '-o', mock.ANY, mock.ANY, mock.ANY, run_as_root=True) mock_execute.assert_called_once_with('umount', mock.ANY, run_as_root=True) # NOTE(mikal): we can't check for a VFAT output here because the # filesystem creation stuff has been mocked out because it # requires root permissions finally: if imagefile: fileutils.delete_if_exists(imagefile)
def _do_create_volume(self, volume): """Create a volume on given glusterfs_share. :param volume: volume reference """ volume_path = self.local_path(volume) volume_size = volume['size'] LOG.debug("creating new volume at %s", volume_path) if os.path.exists(volume_path): msg = _('file already exists at %s') % volume_path LOG.error(msg) raise exception.InvalidVolume(reason=msg) if self.configuration.nas_volume_prov_type == 'thin': self._create_qcow2_file(volume_path, volume_size) else: try: self._fallocate(volume_path, volume_size) except processutils.ProcessExecutionError as exc: if 'Operation not supported' in exc.stderr: warnings.warn('Fallocate not supported by current version ' 'of glusterfs. So falling back to dd.') self._create_regular_file(volume_path, volume_size) else: fileutils.delete_if_exists(volume_path) raise self._set_rw_permissions_for_all(volume_path)
def delete_volume(self, volume): """Driver entry point for destroying existing volumes.""" vol_name = volume['name'] vhd_path = self.local_path(volume) self._tgt_utils.remove_wt_disk(vol_name) fileutils.delete_if_exists(vhd_path)
def _get_rbd_handle(self, connection_properties): try: user = connection_properties['auth_username'] pool, volume = connection_properties['name'].split('/') cluster_name = connection_properties.get('cluster_name') monitor_ips = connection_properties.get('hosts') monitor_ports = connection_properties.get('ports') keyring = connection_properties.get('keyring') except IndexError: msg = _("Connect volume failed, malformed connection properties") raise exception.BrickException(msg=msg) conf = self._create_ceph_conf(monitor_ips, monitor_ports, str(cluster_name), user, keyring) try: rbd_client = linuxrbd.RBDClient(user, pool, conffile=conf, rbd_cluster_name=str(cluster_name)) rbd_volume = linuxrbd.RBDVolume(rbd_client, volume) rbd_handle = linuxrbd.RBDVolumeIOWrapper( linuxrbd.RBDImageMetadata(rbd_volume, pool, user, conf)) except Exception: fileutils.delete_if_exists(conf) raise return rbd_handle
def _get_rbd_handle(self, connection_properties): try: user = connection_properties['auth_username'] pool, volume = connection_properties['name'].split('/') cluster_name = connection_properties['cluster_name'] monitor_ips = connection_properties['hosts'] monitor_ports = connection_properties['ports'] keyring = connection_properties.get('keyring') except (KeyError, ValueError): msg = _("Connect volume failed, malformed connection properties.") raise exception.BrickException(msg=msg) conf = self._create_ceph_conf(monitor_ips, monitor_ports, str(cluster_name), user, keyring) try: rbd_client = linuxrbd.RBDClient(user, pool, conffile=conf, rbd_cluster_name=str(cluster_name)) rbd_volume = linuxrbd.RBDVolume(rbd_client, volume) rbd_handle = linuxrbd.RBDVolumeIOWrapper( linuxrbd.RBDImageMetadata(rbd_volume, pool, user, conf)) except Exception: fileutils.delete_if_exists(conf) raise return rbd_handle
def delete_volume(self, volume): """Deletes a logical volume.""" if not volume.provider_location: LOG.warning( 'Volume %s does not have provider_location ' 'specified, skipping', volume.name) return self._ensure_share_mounted(volume.provider_location) volume_dir = self._local_volume_dir(volume) mounted_path = os.path.join(volume_dir, self.get_active_image_from_info(volume)) self._execute('rm', '-f', mounted_path, run_as_root=self._execute_as_root) # If an exception (e.g. timeout) occurred during delete_snapshot, the # base volume may linger around, so just delete it if it exists base_volume_path = self._local_path_volume(volume) fileutils.delete_if_exists(base_volume_path) info_path = self._local_path_volume_info(volume) fileutils.delete_if_exists(info_path)
def test_transit_nets_cfg_invalid_file_format(self): self.plugging_driver._cfg_file = fileutils.write_to_tempfile( ("""{ 'EDGENAT': { 'gateway_ip': '1.109.100.254', 'cidr_exposed': '1.109.100.1/24', 'segmentation_id': 1066 } } { 'EDGENATBackup': { 'gateway_ip': '1.209.200.254', 'cidr_exposed': '1.209.200.1/24', 'segmentation_id': 1066 } }""").encode('utf-8') ) # TODO(thbachman): couldn't get assertRaises to work here, # so used this construct instead try: # just accessing the member should trigger the exception self.plugging_driver.transit_nets_cfg self.assertTrue(False) except aci_vlan.AciDriverConfigInvalidFileFormat: self.assertTrue(True) fileutils.delete_if_exists(self.plugging_driver._cfg_file)
def disable(self, sig='9', get_stop_command=None): pid = self.pid if self.active: if get_stop_command: cmd = get_stop_command(self.get_pid_file_name()) ip_wrapper = ip_lib.IPWrapper(namespace=self.namespace) ip_wrapper.netns.execute(cmd, addl_env=self.cmd_addl_env, run_as_root=self.run_as_root) else: cmd = self.get_kill_cmd(sig, pid) utils.execute(cmd, run_as_root=self.run_as_root) # In the case of shutting down, remove the pid file if sig == '9': fileutils.delete_if_exists(self.get_pid_file_name()) elif pid: LOG.debug( 'Process for %(uuid)s pid %(pid)d is stale, ignoring ' 'signal %(signal)s', { 'uuid': self.uuid, 'pid': pid, 'signal': sig }) else: LOG.debug('No process started for %s', self.uuid)
def delete_invalid_files(self): """ Removes any invalid cache entries """ for path in self.get_cache_files(self.invalid_dir): fileutils.delete_if_exists(path) LOG.info(_LI("Removed invalid cache file %s"), path)
def delete_volume(self, volume): """Driver entry point for destroying existing volumes.""" vol_name = volume.name vhd_path = self.local_path(volume) self._tgt_utils.remove_wt_disk(vol_name) fileutils.delete_if_exists(vhd_path)
def disconnect_volume(self, connection_properties, device_info, force=False, ignore_errors=False): """Disconnect a volume. :param connection_properties: The dictionary that describes all of the target volume attributes. :type connection_properties: dict :param device_info: historical difference, but same as connection_props :type device_info: dict """ do_local_attach = connection_properties.get('do_local_attach', self.do_local_attach) if do_local_attach: pool, volume = connection_properties['name'].split('/') dev_name = RBDConnector.get_rbd_device_name(pool, volume) cmd = ['rbd', 'unmap', dev_name] cmd += self._get_rbd_args(connection_properties) self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) else: if device_info: rbd_handle = device_info.get('path', None) if rbd_handle is not None: fileutils.delete_if_exists(rbd_handle.rbd_conf) rbd_handle.close()
def test_create_configdrive_iso(self, mock_execute): CONF.set_override('config_drive_format', 'iso9660') imagefile = None try: with configdrive.ConfigDriveBuilder(FakeInstanceMD()) as c: (fd, imagefile) = tempfile.mkstemp(prefix='cd_iso_') os.close(fd) c.make_drive(imagefile) mock_execute.assert_called_once_with('genisoimage', '-o', mock.ANY, '-ldots', '-allow-lowercase', '-allow-multidot', '-l', '-publisher', mock.ANY, '-quiet', '-J', '-r', '-V', 'config-2', mock.ANY, attempts=1, run_as_root=False) finally: if imagefile: fileutils.delete_if_exists(imagefile)
def temporary_file(*args: str, **kwargs) -> Generator[str, None, None]: tmp = None try: tmp = create_temporary_file(*args, **kwargs) yield tmp finally: if tmp: fileutils.delete_if_exists(tmp)
def temporary_file(*args, **kwargs): tmp = None try: tmp = create_temporary_file(*args, **kwargs) yield tmp finally: if tmp: fileutils.delete_if_exists(tmp)
def delete_all_queued_images(self): """ Removes all queued image files and any attributes about the images """ files = [f for f in self.get_cache_files(self.queue_dir)] for file in files: fileutils.delete_if_exists(file) return len(files)
def delete_queued_image(self, image_id): """ Removes a specific queued image file and any attributes about the image :param image_id: Image ID """ path = self.get_image_filepath(image_id, 'queue') fileutils.delete_if_exists(path)
def replace_xenserver_image_with_coalesced_vhd(image_file): with temporary_dir() as tempdir: extract_targz(image_file, tempdir) chain = discover_vhd_chain(tempdir) fix_vhd_chain(chain) coalesced = coalesce_chain(chain) fileutils.delete_if_exists(image_file) os.rename(coalesced, image_file)
def _delete_vm_console_log(self, instance): console_log_files = self._pathutils.get_vm_console_log_paths(instance.name) vm_log_writer = self._vm_log_writers.get(instance.uuid) if vm_log_writer: vm_log_writer.join() for log_file in console_log_files: fileutils.delete_if_exists(log_file)
def _delete_vm_console_log(self, instance): console_log_files = self._pathutils.get_vm_console_log_paths( instance.name) vm_log_writer = self._vm_log_writers.get(instance.uuid) if vm_log_writer: vm_log_writer.join() for log_file in console_log_files: fileutils.delete_if_exists(log_file)
def create_ks_config(self): tname = self.get_ks_template_path() cname = self.get_dhcp_config_path() fileutils.delete_if_exists(cname) context = { 'ks_opts': self.opts, } with open(cname, 'w') as f: config = self.build_config(context, tname) f.write(config)
def disconnect_volume(self, connection_properties, device_info, force=False, ignore_errors=False): pool, volume = connection_properties['name'].split('/') conf_file = device_info['conf'] dev_name = self.get_rbd_device_name(pool, volume) cmd = ['rbd', 'unmap', dev_name, '--conf', conf_file] cmd += self._get_rbd_args(connection_properties) self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) fileutils.delete_if_exists(conf_file)
def create_pxe_config(self): tname = self.get_pxe_template_path() cname = self.get_pxe_config_path(self.mac_addr) fileutils.delete_if_exists(cname) context = { 'pxe_opts': self.opts, } with open(cname, 'w') as f: config = self.build_config(context, tname) f.write(config)
def make_persistent_password_file(path, password): """Writes a file containing a password until deleted.""" try: fileutils.delete_if_exists(path) with open(path, 'wb') as file: os.chmod(path, 0o600) file.write(password.encode()) return path except Exception as e: fileutils.delete_if_exists(path) raise exception.PasswordFileFailedToCreate(error=e)
def update_head_file(config): '''Update HEAD file with the latest branch head.''' if _use_separate_migration_branches(config): # Kill any HEAD(S) files because we don't rely on them for branch-aware # chains anymore files_to_remove = [ _get_head_file_path(config), _get_heads_file_path(config) ] for file_ in files_to_remove: fileutils.delete_if_exists(file_) return _update_head_file(config)
def _can_fallocate(self): """Check once per class, whether fallocate(1) is available, and that the instances directory supports fallocate(2). """ can_fallocate = getattr(self.__class__, "can_fallocate", None) if can_fallocate is None: _out, err = utils.trycmd("fallocate", "-n", "-l", "1", self.path + ".fallocate_test") fileutils.delete_if_exists(self.path + ".fallocate_test") can_fallocate = not err self.__class__.can_fallocate = can_fallocate if not can_fallocate: LOG.error(_LE("Unable to preallocate image at path: " "%(path)s"), {"path": self.path}) return can_fallocate
def connect_volume(self, connection_properties): # NOTE(e0ne): sanity check if ceph-common is installed. self._setup_rbd_class() # Extract connection parameters and generate config file try: user = connection_properties['auth_username'] pool, volume = connection_properties['name'].split('/') cluster_name = connection_properties.get('cluster_name') monitor_ips = connection_properties.get('hosts') monitor_ports = connection_properties.get('ports') keyring = connection_properties.get('keyring') except IndexError: msg = 'Malformed connection properties' raise exception.BrickException(msg) conf = self._create_ceph_conf(monitor_ips, monitor_ports, str(cluster_name), user, keyring) link_name = self.get_rbd_device_name(pool, volume) real_path = os.path.realpath(link_name) try: # Map RBD volume if it's not already mapped if not os.path.islink(link_name) or not os.path.exists(real_path): cmd = ['rbd', 'map', volume, '--pool', pool, '--conf', conf] cmd += self._get_rbd_args(connection_properties) stdout, stderr = self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) real_path = stdout.strip() # The host may not have RBD installed, and therefore won't # create the symlinks, ensure they exist if self.containerized: self._ensure_link(real_path, link_name) except Exception as exec_exception: try: try: self._unmap(real_path, conf, connection_properties) finally: fileutils.delete_if_exists(conf) except Exception: exc = traceback.format_exc() print( 'Exception occurred while cleaning up after connection ' 'error\n%s', exc) finally: raise exception.BrickException('Error connecting volume: %s' % six.text_type(exec_exception)) return {'path': real_path, 'conf': conf, 'type': 'block'}
def temporary_file(self, suffix=None, *args, **kwargs): """Creates a random, temporary, closed file, returning the file's path. It's different from tempfile.NamedTemporaryFile which returns an open file descriptor. """ tmp_file_path = None try: tmp_file_path = self.create_temporary_file(suffix, *args, **kwargs) yield tmp_file_path finally: if tmp_file_path: fileutils.delete_if_exists(tmp_file_path)
def update_head_files(config): '''Update HEAD files with the latest branch heads.''' head_map = _get_heads_map(config) contract_head = _get_contract_head_file_path(config) expand_head = _get_expand_head_file_path(config) with open(contract_head, 'w+') as f: f.write(head_map[CONTRACT_BRANCH] + '\n') with open(expand_head, 'w+') as f: f.write(head_map[EXPAND_BRANCH] + '\n') old_head_file = _get_head_file_path(config) old_heads_file = _get_heads_file_path(config) for file_ in (old_head_file, old_heads_file): fileutils.delete_if_exists(file_)
def copy_volume_to_image(self, context, volume, image_service, image_meta): """Copy the volume to the specified image.""" disk_format = self._tgt_utils.get_supported_disk_format() temp_vhd_path = os.path.join(CONF.image_conversion_dir, str(image_meta['id']) + '.' + disk_format) try: with self._temporary_snapshot(volume['name']) as tmp_snap_name: # qemu-img cannot access VSS snapshots, for which reason it # must be exported first. self._tgt_utils.export_snapshot(tmp_snap_name, temp_vhd_path) image_utils.upload_volume(context, image_service, image_meta, temp_vhd_path, 'vhd') finally: fileutils.delete_if_exists(temp_vhd_path)
def _can_fallocate(self): """Check once per class, whether fallocate(1) is available, and that the instances directory supports fallocate(2). """ can_fallocate = getattr(self.__class__, 'can_fallocate', None) if can_fallocate is None: test_path = self.path + '.fallocate_test' _out, err = utils.trycmd('fallocate', '-l', '1', test_path) fileutils.delete_if_exists(test_path) can_fallocate = not err self.__class__.can_fallocate = can_fallocate if not can_fallocate: LOG.warning(_LW('Unable to preallocate image at path: ' '%(path)s'), {'path': self.path}) return can_fallocate
def disable(self, sig='9'): pid = self.pid if self.active: cmd = ['kill', '-%s' % (sig), pid] utils.execute(cmd, run_as_root=True) # In the case of shutting down, remove the pid file if sig == '9': fileutils.delete_if_exists(self.get_pid_file_name()) elif pid: LOG.debug('Process for %(uuid)s pid %(pid)d is stale, ignoring ' 'signal %(signal)s', {'uuid': self.uuid, 'pid': pid, 'signal': sig}) else: LOG.debug('No process started for %s', self.uuid)
def update_head_file(config): '''Update HEAD file with the latest branch head.''' head_file = _get_head_file_path(config) if _use_separate_migration_branches(config): # Kill any HEAD(S) files because we don't rely on them for branch-aware # chains anymore files_to_remove = [head_file, _get_heads_file_path(config)] for file_ in files_to_remove: fileutils.delete_if_exists(file_) return script = alembic_script.ScriptDirectory.from_config(config) head = script.get_heads() with open(head_file, 'w+') as f: f.write('\n'.join(head))
def test_remove_dir(self): tmpdir = tempfile.mktemp() os.mkdir(tmpdir) try: with fileutils.remove_path_on_error(tmpdir, lambda path: fileutils.delete_if_exists(path, os.rmdir)): raise Exception except Exception: self.assertFalse(os.path.exists(tmpdir))
def disable(self, sig='9', get_stop_command=None): pid = self.pid if self.active: if get_stop_command: cmd = get_stop_command(self.get_pid_file_name()) ip_wrapper = ip_lib.IPWrapper(namespace=self.namespace) ip_wrapper.netns.execute(cmd, addl_env=self.cmd_addl_env) else: cmd = ['kill', '-%s' % (sig), pid] utils.execute(cmd, run_as_root=True) # In the case of shutting down, remove the pid file if sig == '9': fileutils.delete_if_exists(self.get_pid_file_name()) elif pid: LOG.debug('Process for %(uuid)s pid %(pid)d is stale, ignoring ' 'signal %(signal)s', {'uuid': self.uuid, 'pid': pid, 'signal': sig}) else: LOG.debug('No process started for %s', self.uuid)
def _test_copy_volume_to_image(self, supported_format): drv = self._driver vol = db_fakes.get_fake_volume_info() image_meta = db_fakes.get_fake_image_meta() fake_get_supported_format = lambda x: supported_format self.stubs.Set(os.path, 'exists', lambda x: False) self.stubs.Set(drv, 'local_path', self.fake_local_path) self.stubs.Set(windows_utils.WindowsUtils, 'get_supported_format', fake_get_supported_format) self.mox.StubOutWithMock(fileutils, 'ensure_tree') self.mox.StubOutWithMock(fileutils, 'delete_if_exists') self.mox.StubOutWithMock(image_utils, 'upload_volume') self.mox.StubOutWithMock(windows_utils.WindowsUtils, 'copy_vhd_disk') self.mox.StubOutWithMock(vhdutils.VHDUtils, 'convert_vhd') fileutils.ensure_tree(CONF.image_conversion_dir) temp_vhd_path = os.path.join(CONF.image_conversion_dir, str(image_meta['id']) + "." + supported_format) upload_image = temp_vhd_path windows_utils.WindowsUtils.copy_vhd_disk(self.fake_local_path(vol), temp_vhd_path) if supported_format == 'vhdx': upload_image = upload_image[:-1] vhdutils.VHDUtils.convert_vhd(temp_vhd_path, upload_image, constants.VHD_TYPE_DYNAMIC) image_utils.upload_volume(None, None, image_meta, upload_image, 'vhd') fileutils.delete_if_exists(temp_vhd_path) fileutils.delete_if_exists(upload_image) self.mox.ReplayAll() drv.copy_volume_to_image(None, vol, None, image_meta)
def disable(self, sig="9", get_stop_command=None): pid = self.pid if self.active: if get_stop_command: cmd = get_stop_command(self.get_pid_file_name()) ip_wrapper = ip_lib.IPWrapper(namespace=self.namespace) ip_wrapper.netns.execute(cmd, addl_env=self.cmd_addl_env) else: cmd = ["kill", "-%s" % (sig), pid] utils.execute(cmd, run_as_root=True) # In the case of shutting down, remove the pid file if sig == "9": fileutils.delete_if_exists(self.get_pid_file_name()) elif pid: LOG.debug( "Process for %(uuid)s pid %(pid)d is stale, ignoring " "signal %(signal)s", {"uuid": self.uuid, "pid": pid, "signal": sig}, ) else: LOG.debug("No process started for %s", self.uuid)
def delete_volume(self, volume): """Deletes a logical volume.""" if not volume["provider_location"]: LOG.warning(_LW("Volume %s does not have provider_location " "specified, skipping"), volume["name"]) return self._ensure_share_mounted(volume["provider_location"]) volume_dir = self._local_volume_dir(volume) mounted_path = os.path.join(volume_dir, self.get_active_image_from_info(volume)) self._execute("rm", "-f", mounted_path, run_as_root=self._execute_as_root) # If an exception (e.g. timeout) occurred during delete_snapshot, the # base volume may linger around, so just delete it if it exists base_volume_path = self._local_path_volume(volume) fileutils.delete_if_exists(base_volume_path) info_path = self._local_path_volume_info(volume) fileutils.delete_if_exists(info_path)