def _ensure(self): self._v1_compat = self.snap and len(self.snap['disks']) and \ isinstance(self.snap['disks'][0], dict) and \ 'snapshot' in self.snap['disks'][0] if self.snap: disks = [] snaps = [] try: # @todo: create disks concurrently for disk_snap in self.snap['disks']: if self._v1_compat: disk_snap = disk_snap['snapshot'] snap = storage2.snapshot(disk_snap) snaps.append(snap) if self.disks: if len(self.disks) != len(snaps): raise storage2.StorageError( 'Volume disks count is not equal to ' 'snapshot disks count') self.disks = map(storage2.volume, self.disks) # Mixing snapshots to self.volumes (if exist) or empty volumes disks = self.disks or [ storage2.volume(type=s['type']) for s in snaps ] for disk, snap in zip(disks, snaps): disk.snap = snap except: with util.capture_exception(logger=LOG): for disk in disks: disk.destroy() self.disks = disks if self._v1_compat: # is some old snapshots /dev/vgname occured self.vg = os.path.basename(self.snap['vg']) else: self.vg = self.snap['vg'] self.level = int(self.snap['level']) self.pv_uuid = self.snap['pv_uuid'] self.lvm_group_cfg = self.snap['lvm_group_cfg'] self.snap = None self._check_attr('level') self._check_attr('vg') self._check_attr('disks') assert int(self.level) in (0, 1, 5, 10), 'Unknown raid level: %s' % self.level # Making sure autoassembly is disabled before attaching disks self._disable_autoassembly() disks = [] for disk in self.disks: disk = storage2.volume(disk) disk.ensure() disks.append(disk) self.disks = disks disks_devices = [disk.device for disk in self.disks] if self.lvm_group_cfg: time.sleep(2) # Give a time to device manager try: raid_device = mdadm.mdfind(*disks_devices) except storage2.StorageError: raid_device = mdadm.findname() """ if self.level in (1, 10): for disk in disks_devices: mdadm.mdadm('misc', None, disk, zero_superblock=True, force=True) try: kwargs = dict(force=True, metadata='default', level=self.level, assume_clean=True, raid_devices=len(disks_devices)) mdadm.mdadm('create', raid_device, *disks_devices, **kwargs) except: if self.level == 10 and self._v1_compat: self._v1_repair_raid10(raid_device) else: raise else: """ mdadm.mdadm('assemble', raid_device, *disks_devices) mdadm.mdadm('misc', None, raid_device, wait=True, raise_exc=False) # Restore vg config vg_restore_file = tempfile.mktemp() with open(vg_restore_file, 'w') as f: f.write(base64.b64decode(self.lvm_group_cfg)) # Ensure RAID physical volume try: lvm2.pvs(raid_device) except: lvm2.pvcreate(raid_device, uuid=self.pv_uuid, restorefile=vg_restore_file) finally: lvm2.vgcfgrestore(self.vg, file=vg_restore_file) os.remove(vg_restore_file) # Check that logical volume exists lv_infos = lvm2.lvs(self.vg) if not lv_infos: raise storage2.StorageError( 'No logical volumes found in %s vol. group') lv_name = lv_infos.popitem()[1].lv_name self.device = lvm2.lvpath(self.vg, lv_name) # Activate volume group lvm2.vgchange(self.vg, available='y') # Wait for logical volume device file util.wait_until(lambda: os.path.exists(self.device), timeout=120, logger=LOG, error_text='Logical volume %s not found' % self.device) else: raid_device = mdadm.findname() kwargs = dict(force=True, level=self.level, assume_clean=True, raid_devices=len(disks_devices), metadata='default') mdadm.mdadm('create', raid_device, *disks_devices, **kwargs) mdadm.mdadm('misc', None, raid_device, wait=True, raise_exc=False) lvm2.pvcreate(raid_device, force=True) self.pv_uuid = lvm2.pvs(raid_device)[raid_device].pv_uuid lvm2.vgcreate(self.vg, raid_device) out, err = lvm2.lvcreate(self.vg, extents='100%FREE')[:2] try: clean_out = out.strip().split('\n')[-1].strip() vol = re.match(self.lv_re, clean_out).group(1) self.device = lvm2.lvpath(self.vg, vol) except: e = 'Logical volume creation failed: %s\n%s' % (out, err) raise Exception(e) self.lvm_group_cfg = lvm2.backup_vg_config(self.vg) self.raid_pv = raid_device
def _ensure(self): self._v1_compat = self.snap and len(self.snap['disks']) and \ isinstance(self.snap['disks'][0], dict) and \ 'snapshot' in self.snap['disks'][0] if self.snap: disks = [] snaps = [] try: # @todo: create disks concurrently for disk_snap in self.snap['disks']: if self._v1_compat: disk_snap = disk_snap['snapshot'] snap = storage2.snapshot(disk_snap) snaps.append(snap) if self.disks: if len(self.disks) != len(snaps): raise storage2.StorageError('Volume disks count is not equal to ' 'snapshot disks count') self.disks = map(storage2.volume, self.disks) # Mixing snapshots to self.volumes (if exist) or empty volumes disks = self.disks or [storage2.volume(type=s['type']) for s in snaps] for disk, snap in zip(disks, snaps): disk.snap = snap except: with util.capture_exception(logger=LOG): for disk in disks: disk.destroy() self.disks = disks if self._v1_compat: # is some old snapshots /dev/vgname occured self.vg = os.path.basename(self.snap['vg']) else: self.vg = self.snap['vg'] self.level = int(self.snap['level']) self.pv_uuid = self.snap['pv_uuid'] self.lvm_group_cfg = self.snap['lvm_group_cfg'] self.snap = None self._check_attr('level') self._check_attr('vg') self._check_attr('disks') assert int(self.level) in (0,1,5,10), 'Unknown raid level: %s' % self.level disks = [] for disk in self.disks: disk = storage2.volume(disk) disk.ensure() disks.append(disk) self.disks = disks disks_devices = [disk.device for disk in self.disks] if self.lvm_group_cfg: try: raid_device = mdadm.mdfind(*disks_devices) except storage2.StorageError: raid_device = mdadm.findname() """ if self.level in (1, 10): for disk in disks_devices: mdadm.mdadm('misc', None, disk, zero_superblock=True, force=True) try: kwargs = dict(force=True, metadata='default', level=self.level, assume_clean=True, raid_devices=len(disks_devices)) mdadm.mdadm('create', raid_device, *disks_devices, **kwargs) except: if self.level == 10 and self._v1_compat: self._v1_repair_raid10(raid_device) else: raise else: """ mdadm.mdadm('assemble', raid_device, *disks_devices) mdadm.mdadm('misc', None, raid_device, wait=True, raise_exc=False) # Restore vg config vg_restore_file = tempfile.mktemp() with open(vg_restore_file, 'w') as f: f.write(base64.b64decode(self.lvm_group_cfg)) # Ensure RAID physical volume try: lvm2.pvs(raid_device) except: lvm2.pvcreate(raid_device, uuid=self.pv_uuid, restorefile=vg_restore_file) finally: lvm2.vgcfgrestore(self.vg, file=vg_restore_file) os.remove(vg_restore_file) # Check that logical volume exists lv_infos = lvm2.lvs(self.vg) if not lv_infos: raise storage2.StorageError( 'No logical volumes found in %s vol. group') lv_name = lv_infos.popitem()[1].lv_name self.device = lvm2.lvpath(self.vg, lv_name) # Activate volume group lvm2.vgchange(self.vg, available='y') # Wait for logical volume device file util.wait_until(lambda: os.path.exists(self.device), timeout=120, logger=LOG, error_text='Logical volume %s not found' % self.device) else: raid_device = mdadm.findname() kwargs = dict(force=True, level=self.level, assume_clean=True, raid_devices=len(disks_devices), metadata='default') mdadm.mdadm('create', raid_device, *disks_devices, **kwargs) mdadm.mdadm('misc', None, raid_device, wait=True, raise_exc=False) lvm2.pvcreate(raid_device, force=True) self.pv_uuid = lvm2.pvs(raid_device)[raid_device].pv_uuid lvm2.vgcreate(self.vg, raid_device) out, err = lvm2.lvcreate(self.vg, extents='100%FREE')[:2] try: clean_out = out.strip().split('\n')[-1].strip() vol = re.match(self.lv_re, clean_out).group(1) self.device = lvm2.lvpath(self.vg, vol) except: e = 'Logical volume creation failed: %s\n%s' % (out, err) raise Exception(e) self.lvm_group_cfg = lvm2.backup_vg_config(self.vg) self.raid_pv = raid_device
def _ensure(self): def get_lv_size_kwarg(size): kwd = dict() if '%' in str(size): kwd['extents'] = size else: try: int(size) kwd['size'] = '%sG' % size except: kwd['size'] = size return kwd if self.snap: pvs = [] try: for snap in self.snap['pv_snaps']: snap = storage2.snapshot(snap) vol = storage2.volume(type=snap.type, snap=snap) vol.ensure() pvs.append(vol) except: for pv in pvs: pv.destroy() raise self.pvs = pvs self.vg = self.snap['vg'] self.name = self.snap['name'] pv_volumes = [] for pv_volume in self.pvs: pv_volume = storage2.volume(pv_volume) pv_volume.ensure() pvs = lvm2.pvs() if pv_volume.device not in pvs: if pv_volume.mounted_to(): pv_volume.umount() lvm2.pvcreate(pv_volume.device) pv_volumes.append(pv_volume) self.pvs = pv_volumes self._check_attr('vg') try: lv_info = self._lvinfo() except lvm2.NotFound: self._check_attr('size') try: lvm2.vgs(self.vg) except lvm2.NotFound: lvm2.vgcreate(self.vg, *[disk.device for disk in self.pvs]) kwds = {'name': self.name} kwds.update(get_lv_size_kwarg(self.size)) lvm2.lvcreate(self.vg, **kwds) lv_info = self._lvinfo() self._config.update({'device': lv_info.lv_path, 'snap': None}) pvs_to_extend_vg = [] for pv in self.pvs: pv_info = lvm2.pvs(pv.device).popitem()[1] if not pv_info.vg_name: pvs_to_extend_vg.append(pv_info.pv_name) continue if os.path.basename(self.vg) != pv_info.vg_name: raise storage2.StorageError( 'Can not add physical volume %s to volume group %s: already' ' in volume group %s' % (pv_info.pv_name, self.vg, pv_info.vg_name)) if pvs_to_extend_vg: lvm2.vgextend(self.vg, *pvs_to_extend_vg) lvm2.lvextend(self.device, **get_lv_size_kwarg(self.size)) if self.is_fs_created(): fs = storage2.filesystem(self.fstype) if fs.features.get('resizable'): fs.resize(self.device) if lv_info.lv_attr[4] == '-': lvm2.lvchange(self.device, available='y') util.wait_until(lambda: os.path.exists(self.device), sleep=1, timeout=30, start_text='Waiting for device %s' % self.device, error_text='Device %s not available' % self.device)
def _ensure(self): def get_lv_size_kwarg(size): kwd = dict() if '%' in str(size): kwd['extents'] = size else: try: int(size) kwd['size'] = '%sG' % size except: kwd['size'] = size return kwd if self.snap: pvs = [] try: for snap in self.snap['pv_snaps']: snap = storage2.snapshot(snap) vol = storage2.volume(type=snap.type, snap=snap) vol.ensure() pvs.append(vol) except: for pv in pvs: pv.destroy() raise self.pvs = pvs self.vg = self.snap['vg'] self.name = self.snap['name'] pv_volumes = [] for pv_volume in self.pvs: pv_volume = storage2.volume(pv_volume) pv_volume.ensure() pvs = lvm2.pvs() if pv_volume.device not in pvs: pv_volume.umount() lvm2.pvcreate(pv_volume.device) pv_volumes.append(pv_volume) self.pvs = pv_volumes self._check_attr('vg') try: lv_info = self._lvinfo() except lvm2.NotFound: self._check_attr('size') try: lvm2.vgs(self.vg) except lvm2.NotFound: lvm2.vgcreate(self.vg, *[disk.device for disk in self.pvs]) kwds = {'name': self.name} kwds.update(get_lv_size_kwarg(self.size)) lvm2.lvcreate(self.vg, **kwds) lv_info = self._lvinfo() self._config.update({ 'device': lv_info.lv_path, 'snap': None }) pvs_to_extend_vg = [] for pv in self.pvs: pv_info = lvm2.pvs(pv.device)[pv.device] if not pv_info.vg_name: pvs_to_extend_vg.append(pv.device) continue if os.path.basename(self.vg) != pv_info.vg_name: raise storage2.StorageError( 'Can not add physical volume %s to volume group %s: already' ' in volume group %s' % (pv.device, self.vg, pv_info.vg_name)) if pvs_to_extend_vg: lvm2.vgextend(self.vg, *pvs_to_extend_vg) lvm2.lvextend(self.device, **get_lv_size_kwarg(self.size)) if self.is_fs_created(): self.fscreated = True fs = storage2.filesystem(self.fstype) if fs.features.get('resizable'): fs.resize(self.device) if lv_info.lv_attr[4] == '-': lvm2.lvchange(self.device, available='y')