Exemple #1
0
 def _snapshot(self, description, tags, **kwds):
     active = os.path.exists(self.device)
     if active:
         coreutils.dmsetup('suspend', self.device)
     try:
         if not description:
             description = self.id
         description += ' PV-${index}'
         pv_snaps = storage2.concurrent_snapshot(self.pvs, description,
                                                 tags, **kwds)
         return storage2.snapshot(type='lvm',
                                  pv_snaps=pv_snaps,
                                  vg=self.vg,
                                  name=self.name,
                                  size=self.size)
     finally:
         if active:
             coreutils.dmsetup('resume', self.device)
Exemple #2
0
	def _snapshot(self, description, tags, **kwds):
		active = os.path.exists(self.device)
		if active:
			coreutils.dmsetup('suspend', self.device)
		try:
			if not description:
				description = self.id
			description += ' PV-${index}'
			pv_snaps = storage2.concurrent_snapshot(self.pvs, 
									description, tags, **kwds)
			return storage2.snapshot(
					type='lvm',
					pv_snaps=pv_snaps,
					vg=self.vg,
					name=self.name,
					size=self.size)
		finally:
			if active:
				coreutils.dmsetup('resume', self.device) 
Exemple #3
0
    def _snapshot(self, description, tags, **kwds):
        coreutils.sync()
        lvm2.dmsetup('suspend', self.device)
        try:
            description = 'Raid%s disk ${index}%s' % (self.level, \
                                            '. %s' % description if description else '')
            disks_snaps = storage2.concurrent_snapshot(volumes=self.disks,
                                                       description=description,
                                                       tags=tags,
                                                       **kwds)

            return storage2.snapshot(type='raid',
                                     disks=disks_snaps,
                                     lvm_group_cfg=lvm2.backup_vg_config(
                                         self.vg),
                                     level=self.level,
                                     pv_uuid=self.pv_uuid,
                                     vg=self.vg)
        finally:
            lvm2.dmsetup('resume', self.device)
Exemple #4
0
    def _snapshot(self, description, tags, **kwds):
        coreutils.sync()
        lvm2.dmsetup('suspend', self.device)
        try:
            description = 'Raid%s disk ${index}%s' % (self.level, \
                                            '. %s' % description if description else '')
            disks_snaps = storage2.concurrent_snapshot(
                    volumes=self.disks,
                    description=description,
                    tags=tags, **kwds
            )

            return storage2.snapshot(
                    type='raid',
                    disks=disks_snaps,
                    lvm_group_cfg=lvm2.backup_vg_config(self.vg),
                    level=self.level,
                    pv_uuid=self.pv_uuid,
                    vg=self.vg
            )
        finally:
            lvm2.dmsetup('resume', self.device)
Exemple #5
0
    def _grow(self, new_vol, **growth):
        if int(self.level) in (0, 10):
            raise storage2.StorageError("Raid%s doesn't support growth" %
                                        self.level)

        disk_growth = growth.get('disks')

        current_len = len(self.disks)
        new_len = int(growth.get('disks_count', 0))
        increase_disk_count = new_len and new_len != current_len

        new_vol.lvm_group_cfg = self.lvm_group_cfg
        new_vol.pv_uuid = self.pv_uuid

        growed_disks = []
        added_disks = []
        try:
            if disk_growth:

                def _grow(index, disk, cfg, queue):
                    try:
                        ret = disk.grow(resize_fs=False, **cfg)
                        queue.put(dict(index=index, result=ret))
                    except:
                        e = sys.exc_info()[1]
                        queue.put(dict(index=index, error=e))

                # Concurrently grow each descendant disk
                queue = Queue.Queue()
                pool = []
                for index, disk_cfg_or_obj in enumerate(self.disks):
                    # We use index to save disk order in raid disks
                    disk = storage2.volume(disk_cfg_or_obj)

                    t = threading.Thread(
                        name='Raid %s disk %s grower' % (self.id, disk.id),
                        target=_grow,
                        args=(index, disk, disk_growth, queue))
                    t.daemon = True
                    t.start()
                    pool.append(t)

                for thread in pool:
                    thread.join()

                # Get disks growth results
                res = []
                while True:
                    try:
                        res.append(queue.get_nowait())
                    except Queue.Empty:
                        break

                res.sort(key=lambda p: p['index'])
                growed_disks = [r['result'] for r in res if 'result' in r]

                # Validate concurrent growth results
                assert len(res) == len(
                    self.disks), ("Not enough data in "
                                  "concurrent raid disks grow result")

                if not all(map(lambda x: 'result' in x, res)):
                    errors = '\n'.join(
                        [str(r['error']) for r in res if 'error' in r])
                    raise storage2.StorageError('Failed to grow raid disks.'
                                                ' Errors: \n%s' % errors)

                assert len(growed_disks) == len(
                    self.disks), ("Got malformed disks"
                                  " growth result (not enough data).")

                new_vol.disks = growed_disks
                new_vol.pv_uuid = self.pv_uuid
                new_vol.lvm_group_cfg = self.lvm_group_cfg

                new_vol.ensure()

            if increase_disk_count:
                if not disk_growth:
                    """ It means we have original disks in self.disks
                            We need to snapshot it and make new disks.
                    """
                    new_vol.disks = []
                    snaps = storage2.concurrent_snapshot(
                        self.disks,
                        'Raid %s temp snapshot No.${index} (for growth)' %
                        self.id,
                        tags=dict(temp='1'))
                    try:
                        for disk, snap in zip(self.disks, snaps):
                            new_disk = disk.clone()
                            new_disk.snap = snap
                            new_vol.disks.append(new_disk)
                            new_disk.ensure()
                    finally:
                        for s in snaps:
                            try:
                                s.destroy()
                            except:
                                e = sys.exc_info()[1]
                                LOG.debug(
                                    'Failed to remove temporary snapshot: %s' %
                                    e)

                    new_vol.ensure()

                existing_raid_disk = new_vol.disks[0]
                add_disks_count = new_len - current_len
                for _ in range(add_disks_count):
                    disk_to_add = existing_raid_disk.clone()
                    added_disks.append(disk_to_add)
                    disk_to_add.ensure()

                added_disks_devices = [d.device for d in added_disks]
                mdadm.mdadm('manage',
                            new_vol.raid_pv,
                            add=True,
                            *added_disks_devices)
                new_vol.disks.extend(added_disks)

                mdadm.mdadm('grow', new_vol.raid_pv, raid_devices=new_len)

            mdadm.mdadm('misc',
                        None,
                        new_vol.raid_pv,
                        wait=True,
                        raise_exc=False)
            mdadm.mdadm('grow', new_vol.raid_pv, size='max')
            mdadm.mdadm('misc',
                        None,
                        new_vol.raid_pv,
                        wait=True,
                        raise_exc=False)

            lvm2.pvresize(new_vol.raid_pv)
            try:
                lvm2.lvresize(new_vol.device, extents='100%VG')
            except:
                e = sys.exc_info()[1]
                if (self.level == 1 and 'matches existing size' in str(e)
                        and not disk_growth):
                    LOG.debug('Raid1 actual size has not changed')
                else:
                    raise
        except:
            err_type, err_val, trace = sys.exc_info()
            if growed_disks or added_disks:
                LOG.debug(
                    "Removing %s successfully growed disks and "
                    "%s additional disks", len(growed_disks), len(added_disks))
                for disk in itertools.chain(growed_disks, added_disks):
                    try:
                        disk.destroy(force=True)
                    except:
                        e = sys.exc_info()[1]
                        LOG.error('Failed to remove raid disk: %s' % e)

            raise err_type, err_val, trace
Exemple #6
0
    def _grow(self, new_vol, **growth):
        if int(self.level) in (0, 10):
            raise storage2.StorageError("Raid%s doesn't support growth" % self.level)

        disk_growth = growth.get('disks')

        current_len = len(self.disks)
        new_len = int(growth.get('disks_count', 0))
        increase_disk_count = new_len and new_len != current_len

        new_vol.lvm_group_cfg = self.lvm_group_cfg
        new_vol.pv_uuid = self.pv_uuid

        growed_disks = []
        added_disks = []
        try:
            if disk_growth:

                def _grow(index, disk, cfg, queue):
                    try:
                        ret = disk.grow(resize_fs=False, **cfg)
                        queue.put(dict(index=index, result=ret))
                    except:
                        e = sys.exc_info()[1]
                        queue.put(dict(index=index, error=e))

                # Concurrently grow each descendant disk
                queue = Queue.Queue()
                pool = []
                for index, disk_cfg_or_obj in enumerate(self.disks):
                    # We use index to save disk order in raid disks
                    disk = storage2.volume(disk_cfg_or_obj)

                    t = threading.Thread(
                            name='Raid %s disk %s grower' % (self.id, disk.id),
                            target=_grow, args=(index, disk, disk_growth, queue))
                    t.daemon = True
                    t.start()
                    pool.append(t)

                for thread in pool:
                    thread.join()

                # Get disks growth results
                res = []
                while True:
                    try:
                        res.append(queue.get_nowait())
                    except Queue.Empty:
                        break

                res.sort(key=lambda p: p['index'])
                growed_disks = [r['result'] for r in res if 'result' in r]

                # Validate concurrent growth results
                assert len(res) == len(self.disks), ("Not enough data in "
                                "concurrent raid disks grow result")

                if not all(map(lambda x: 'result' in x, res)):
                    errors = '\n'.join([str(r['error']) for r in res if 'error' in r])
                    raise storage2.StorageError('Failed to grow raid disks.'
                                    ' Errors: \n%s' % errors)

                assert len(growed_disks) == len(self.disks), ("Got malformed disks"
                                        " growth result (not enough data).")

                new_vol.disks = growed_disks
                new_vol.pv_uuid = self.pv_uuid
                new_vol.lvm_group_cfg = self.lvm_group_cfg

                new_vol.ensure()

            if increase_disk_count:
                if not disk_growth:
                    """ It means we have original disks in self.disks
                            We need to snapshot it and make new disks.
                    """
                    new_vol.disks = []
                    snaps = storage2.concurrent_snapshot(self.disks,
                                    'Raid %s temp snapshot No.${index} (for growth)' % self.id,
                                    tags=dict(temp='1'))
                    try:
                        for disk, snap in zip(self.disks, snaps):
                            new_disk = disk.clone()
                            new_disk.snap = snap
                            new_vol.disks.append(new_disk)
                            new_disk.ensure()
                    finally:
                        for s in snaps:
                            try:
                                s.destroy()
                            except:
                                e = sys.exc_info()[1]
                                LOG.debug('Failed to remove temporary snapshot: %s' % e)

                    new_vol.ensure()

                existing_raid_disk = new_vol.disks[0]
                add_disks_count = new_len - current_len
                for _ in range(add_disks_count):
                    disk_to_add = existing_raid_disk.clone()
                    added_disks.append(disk_to_add)
                    disk_to_add.ensure()

                added_disks_devices = [d.device for d in added_disks]
                mdadm.mdadm('manage', new_vol.raid_pv, add=True,
                                                                                                *added_disks_devices)
                new_vol.disks.extend(added_disks)

                mdadm.mdadm('grow', new_vol.raid_pv, raid_devices=new_len)

            mdadm.mdadm('misc', None, new_vol.raid_pv, wait=True, raise_exc=False)
            mdadm.mdadm('grow', new_vol.raid_pv, size='max')
            mdadm.mdadm('misc', None, new_vol.raid_pv, wait=True, raise_exc=False)

            lvm2.pvresize(new_vol.raid_pv)
            try:
                lvm2.lvresize(new_vol.device, extents='100%VG')
            except:
                e = sys.exc_info()[1]
                if (self.level == 1 and 'matches existing size' in str(e)
                                                                                                and not disk_growth):
                    LOG.debug('Raid1 actual size has not changed')
                else:
                    raise
        except:
            err_type, err_val, trace = sys.exc_info()
            if growed_disks or added_disks:
                LOG.debug("Removing %s successfully growed disks and "
                                        "%s additional disks",
                                        len(growed_disks), len(added_disks))
                for disk in itertools.chain(growed_disks, added_disks):
                    try:
                        disk.destroy(force=True)
                    except:
                        e = sys.exc_info()[1]
                        LOG.error('Failed to remove raid disk: %s' % e)

            raise err_type, err_val, trace