Esempio n. 1
0
 def _snapshot(self, description, tags, **kwds):
     snapshot = self._create_snapshot(self.id, description,
                                      kwds.get('nowait', True))
     return storage2.snapshot(type='cinder',
                              id=snapshot.id,
                              description=description,
                              tags=tags)
Esempio n. 2
0
	def _snapshot(self, description, tags, **kwds):
		lvm_snap = self._lvm_volume.lvm_snapshot(size='100%FREE')
		try:
			snap = storage2.snapshot(type='eph')
			snap.path = os.path.join(os.path.join(
							self.cloudfs_dir, snap.id + '.manifest.ini'))

			lvm_snap_vol = storage2.volume(
							device=lvm_snap.device,
							mpoint=tempfile.mkdtemp())
			lvm_snap_vol.ensure(mount=True)

			df_info = filetool.df()
			df = filter(lambda x: x.mpoint == lvm_snap_vol.mpoint, df_info)

			snap.size = df[0].used

			try:
				transfer = cloudfs.LargeTransfer(
								src=lvm_snap_vol.mpoint + '/',
								dst=snap.path,
								tar_it=True,
								gzip_it=True,
								tags=tags)
				transfer.run()
			finally:
				lvm_snap_vol.umount()
				os.rmdir(lvm_snap_vol.mpoint)
		finally:
			lvm_snap.destroy()

		return snap
Esempio n. 3
0
    def _snapshot(self, description, tags, **kwds):
        snap = storage2.snapshot(type='eph')
        lvm_snap = self._lvm_volume.lvm_snapshot(size='100%FREE')

        t = threading.Thread(target=snap.upload_lvm_snapshot, args=(lvm_snap, tags, self.cloudfs_dir))
        t.start()
        return snap
Esempio n. 4
0
    def lvm_snapshot(self, name=None, size=None):
        long_kwds = {
                'name': name or '%ssnap' % self.name,
                'snapshot': '%s/%s' % (self.vg, self.name)
        }
        if size:
            size=str(size)
            if '%' in size:
                long_kwds['extents'] = size
            else:
                long_kwds['size'] = size
        else:
            long_kwds['extents'] = '1%ORIGIN'

        lvol = '%s/%s' % (self.vg, long_kwds['name'])
        if lvol in lvm2.lvs():
            lvm2.lvremove(lvol)
        lvm2.lvcreate(**long_kwds)
        lv_info = lvm2.lvs(lvol).values()[0]

        return storage2.snapshot(
                        type='lvm_native',
                        name=lv_info.lv_name,
                        vg=lv_info.vg_name,
                        device=lv_info.lv_path)
Esempio n. 5
0
    def _snapshot(self, description, tags, **kwds):
        snap = storage2.snapshot(type='eph')
        lvm_snap = self._lvm_volume.lvm_snapshot(size='100%FREE')

        t = threading.Thread(target=snap.upload_lvm_snapshot, args=(lvm_snap, tags, self.cloudfs_dir))
        t.start()
        return snap
Esempio n. 6
0
 def _snapshot(self, description, tags, **kwds):
     snapshot = self._create_snapshot(self.id, description,
                                      kwds.get('nowait', True))
     return storage2.snapshot(
         type='cinder',
         id=snapshot.id,
         description=snapshot.display_description,
         tags=tags)
Esempio n. 7
0
 def _run(self):
     self.snapshot = storage2.snapshot(self.snapshot)
     if self.volume:
         self.volume = storage2.volume(self.volume)
         self.volume.snap = self.snapshot
         self.volume.ensure()
     else:
         self.volume = self.snapshot.restore()
     return self.volume
Esempio n. 8
0
 def _run(self):
     self.snapshot = storage2.snapshot(self.snapshot)
     if self.volume:
         self.volume = storage2.volume(self.volume)
         self.volume.snap = self.snapshot
         self.volume.ensure()
     else:
         self.volume = self.snapshot.restore()
     return self.volume
Esempio n. 9
0
	def _snapshot(self, description, tags, **kwds):
		conf = self._eph_vol.config()
		del conf['id']
		eph_snap = self._eph_pvd.snapshot_factory(description, **conf)		
		eph_snap = self._eph_pvd.create_snapshot(self._eph_vol, eph_snap, **kwds)
		
		snap = storage2.snapshot(type='eph')
		snap._config.update(eph_snap.config())
		snap._eph_pvd = self._eph_pvd
		return snap
Esempio n. 10
0
	def _ensure(self):
		# snap should be applied after layout: download and extract data.
		# this could be done on already ensured volume. 
		# Example: resync slave data

		if not self._lvm_volume:
			if isinstance(self.disk, basestring) and \
					self.disk.startswith('/dev/sd'):
				self.disk = storage2.volume(
						type='ec2_ephemeral', 
						name='ephemeral0')
			self._lvm_volume = storage2.volume(
					type='lvm',
					pvs=[self.disk],
					size=self.size + 'VG',
					vg=self.vg,
					name='data')

		self._lvm_volume.ensure()
		self.device = self._lvm_volume.device

		if self.snap:
			self.snap = storage2.snapshot(self.snap)
			self.mkfs()
			tmp_mpoint = not self.mpoint
			if tmp_mpoint:
				tmp_mpoint = tempfile.mkdtemp()
				self.mpoint = tmp_mpoint

			transfer = cloudfs.LargeTransfer(self.snap.path, self.mpoint + '/')
			try:
				self.mount()
				if hasattr(self.snap, 'size'):
					df_info = filetool.df()
					df = filter(lambda x: x.mpoint == self.mpoint, df_info)[0]
					if df.free < self.snap.size:
						raise storage2.StorageError('Not enough free space'
								' on device %s to restore snapshot.' %
								self.device)

				transfer.run()
			except:
				e = sys.exc_info()[1]
				raise storage2.StorageError("Snapshot restore error: %s" % e)
			finally:
				try:
					self.umount()
				finally:
					if tmp_mpoint:
						self.mpoint = None
						os.rmdir(tmp_mpoint)

			self.snap = None
Esempio n. 11
0
    def _snapshot(self, description, tags, **kwds):
        '''
        @type nowait: bool
        @param nowait: Wait for snapshot completion. Default: True
        '''

        self._check_connection()
        snapshot = self._create_snapshot(self.id, kwds.get('nowait', True))
        return storage2.snapshot(type='csvol',
                                 id=snapshot.id,
                                 description=description,
                                 tags=tags)
Esempio n. 12
0
    def _snapshot(self, description, tags, **kwds):
        '''
        @type nowait: bool
        @param nowait: Wait for snapshot completion. Default: True
        '''

        self._check_ec2()
        snapshot = self._create_snapshot(self.id, description, tags, kwds.get('nowait', True))
        return storage2.snapshot(
                        type='ebs',
                        id=snapshot.id,
                        description=snapshot.description,
                        tags=tags)
Esempio n. 13
0
 def _snapshot(self, description, tags, **kwds):
     active = os.path.exists(self.device)
     if active:
         coreutils.dmsetup('suspend', self.device)
     try:
         if not description:
             description = self.id
         description += ' PV-${index}'
         pv_snaps = storage2.concurrent_snapshot(self.pvs, description,
                                                 tags, **kwds)
         return storage2.snapshot(type='lvm',
                                  pv_snaps=pv_snaps,
                                  vg=self.vg,
                                  name=self.name,
                                  size=self.size)
     finally:
         if active:
             coreutils.dmsetup('resume', self.device)
Esempio n. 14
0
	def _snapshot(self, description, tags, **kwds):
		active = os.path.exists(self.device)
		if active:
			coreutils.dmsetup('suspend', self.device)
		try:
			if not description:
				description = self.id
			description += ' PV-${index}'
			pv_snaps = storage2.concurrent_snapshot(self.pvs, 
									description, tags, **kwds)
			return storage2.snapshot(
					type='lvm',
					pv_snaps=pv_snaps,
					vg=self.vg,
					name=self.name,
					size=self.size)
		finally:
			if active:
				coreutils.dmsetup('resume', self.device) 
Esempio n. 15
0
    def _snapshot(self, description, tags, **kwds):
        coreutils.sync()
        lvm2.dmsetup('suspend', self.device)
        try:
            description = 'Raid%s disk ${index}%s' % (self.level, \
                                            '. %s' % description if description else '')
            disks_snaps = storage2.concurrent_snapshot(volumes=self.disks,
                                                       description=description,
                                                       tags=tags,
                                                       **kwds)

            return storage2.snapshot(type='raid',
                                     disks=disks_snaps,
                                     lvm_group_cfg=lvm2.backup_vg_config(
                                         self.vg),
                                     level=self.level,
                                     pv_uuid=self.pv_uuid,
                                     vg=self.vg)
        finally:
            lvm2.dmsetup('resume', self.device)
Esempio n. 16
0
    def _snapshot(self, description, tags, **kwds):
        coreutils.sync()
        lvm2.dmsetup('suspend', self.device)
        try:
            description = 'Raid%s disk ${index}%s' % (self.level, \
                                            '. %s' % description if description else '')
            disks_snaps = storage2.concurrent_snapshot(
                    volumes=self.disks,
                    description=description,
                    tags=tags, **kwds
            )

            return storage2.snapshot(
                    type='raid',
                    disks=disks_snaps,
                    lvm_group_cfg=lvm2.backup_vg_config(self.vg),
                    level=self.level,
                    pv_uuid=self.pv_uuid,
                    vg=self.vg
            )
        finally:
            lvm2.dmsetup('resume', self.device)
Esempio n. 17
0
    def lvm_snapshot(self, name=None, size=None):
        long_kwds = {
            'name': name or '%ssnap' % self.name,
            'snapshot': '%s/%s' % (self.vg, self.name)
        }
        if size:
            size = str(size)
            if '%' in size:
                long_kwds['extents'] = size
            else:
                long_kwds['size'] = size
        else:
            long_kwds['extents'] = '1%ORIGIN'

        lvol = '%s/%s' % (self.vg, long_kwds['name'])
        if lvol in lvm2.lvs():
            lvm2.lvremove(lvol)
        lvm2.lvcreate(**long_kwds)
        lv_info = lvm2.lvs(lvol).values()[0]

        return storage2.snapshot(type='lvm_native',
                                 name=lv_info.lv_name,
                                 vg=lv_info.vg_name,
                                 device=lv_info.lv_path)
Esempio n. 18
0
    def _ensure(self):
        self._v1_compat = self.snap and len(self.snap['disks']) and \
                                        isinstance(self.snap['disks'][0], dict) and \
                                        'snapshot' in self.snap['disks'][0]
        if self.snap:
            disks = []
            snaps = []
            try:
                # @todo: create disks concurrently
                for disk_snap in self.snap['disks']:
                    if self._v1_compat:
                        disk_snap = disk_snap['snapshot']
                    snap = storage2.snapshot(disk_snap)
                    snaps.append(snap)

                if self.disks:
                    if len(self.disks) != len(snaps):
                        raise storage2.StorageError(
                            'Volume disks count is not equal to '
                            'snapshot disks count')
                    self.disks = map(storage2.volume, self.disks)

                # Mixing snapshots to self.volumes (if exist) or empty volumes
                disks = self.disks or [
                    storage2.volume(type=s['type']) for s in snaps
                ]

                for disk, snap in zip(disks, snaps):
                    disk.snap = snap

            except:
                with util.capture_exception(logger=LOG):
                    for disk in disks:
                        disk.destroy()

            self.disks = disks

            if self._v1_compat:
                # is some old snapshots /dev/vgname occured
                self.vg = os.path.basename(self.snap['vg'])
            else:
                self.vg = self.snap['vg']
            self.level = int(self.snap['level'])
            self.pv_uuid = self.snap['pv_uuid']
            self.lvm_group_cfg = self.snap['lvm_group_cfg']

            self.snap = None

        self._check_attr('level')
        self._check_attr('vg')
        self._check_attr('disks')

        assert int(self.level) in (0, 1, 5,
                                   10), 'Unknown raid level: %s' % self.level

        # Making sure autoassembly is disabled before attaching disks
        self._disable_autoassembly()

        disks = []
        for disk in self.disks:
            disk = storage2.volume(disk)
            disk.ensure()
            disks.append(disk)
        self.disks = disks

        disks_devices = [disk.device for disk in self.disks]

        if self.lvm_group_cfg:
            time.sleep(2)  # Give a time to device manager
            try:
                raid_device = mdadm.mdfind(*disks_devices)
            except storage2.StorageError:
                raid_device = mdadm.findname()
                """
                if self.level in (1, 10):
                        for disk in disks_devices:
                                mdadm.mdadm('misc', None, disk,
                                                        zero_superblock=True, force=True)

                        try:
                                kwargs = dict(force=True, metadata='default',
                                                          level=self.level, assume_clean=True,
                                                          raid_devices=len(disks_devices))
                                mdadm.mdadm('create', raid_device, *disks_devices, **kwargs)
                        except:
                                if self.level == 10 and self._v1_compat:
                                        self._v1_repair_raid10(raid_device)
                                else:
                                        raise
                else:
                """
                mdadm.mdadm('assemble', raid_device, *disks_devices)
                mdadm.mdadm('misc',
                            None,
                            raid_device,
                            wait=True,
                            raise_exc=False)

            # Restore vg config
            vg_restore_file = tempfile.mktemp()
            with open(vg_restore_file, 'w') as f:
                f.write(base64.b64decode(self.lvm_group_cfg))

            # Ensure RAID physical volume
            try:
                lvm2.pvs(raid_device)
            except:
                lvm2.pvcreate(raid_device,
                              uuid=self.pv_uuid,
                              restorefile=vg_restore_file)
            finally:
                lvm2.vgcfgrestore(self.vg, file=vg_restore_file)
                os.remove(vg_restore_file)

            # Check that logical volume exists
            lv_infos = lvm2.lvs(self.vg)
            if not lv_infos:
                raise storage2.StorageError(
                    'No logical volumes found in %s vol. group')
            lv_name = lv_infos.popitem()[1].lv_name
            self.device = lvm2.lvpath(self.vg, lv_name)

            # Activate volume group
            lvm2.vgchange(self.vg, available='y')

            # Wait for logical volume device file
            util.wait_until(lambda: os.path.exists(self.device),
                            timeout=120,
                            logger=LOG,
                            error_text='Logical volume %s not found' %
                            self.device)

        else:
            raid_device = mdadm.findname()
            kwargs = dict(force=True,
                          level=self.level,
                          assume_clean=True,
                          raid_devices=len(disks_devices),
                          metadata='default')
            mdadm.mdadm('create', raid_device, *disks_devices, **kwargs)
            mdadm.mdadm('misc', None, raid_device, wait=True, raise_exc=False)

            lvm2.pvcreate(raid_device, force=True)
            self.pv_uuid = lvm2.pvs(raid_device)[raid_device].pv_uuid

            lvm2.vgcreate(self.vg, raid_device)

            out, err = lvm2.lvcreate(self.vg, extents='100%FREE')[:2]
            try:
                clean_out = out.strip().split('\n')[-1].strip()
                vol = re.match(self.lv_re, clean_out).group(1)
                self.device = lvm2.lvpath(self.vg, vol)
            except:
                e = 'Logical volume creation failed: %s\n%s' % (out, err)
                raise Exception(e)

            self.lvm_group_cfg = lvm2.backup_vg_config(self.vg)

        self.raid_pv = raid_device
Esempio n. 19
0
	def _destroy(self):
		for snap in self.pv_snaps:
			if isinstance(snap, dict):
				snap = storage2.snapshot(**snap)
			snap.destroy()
Esempio n. 20
0
    def _ensure(self):
        # snap should be applied after layout: download and extract data.
        # this could be done on already ensured volume.
        # Example: resync slave data

        if not self._lvm_volume:
            # First of all, merge self config and snapshot config
            self.snap = storage2.snapshot(self.snap) if self.snap else None

            for attr in ('fstype', 'size', 'vg', 'mpoint'):
                if not getattr(self, attr, None):
                    if not self.snap or not getattr(self.snap, attr, None):
                        raise storage2.StorageError('Missing ephemeral volume attribute "%s"' % attr)
                    setattr(self, attr, getattr(self.snap, attr))
            if not (self.disk or self.disks):
                raise storage2.StorageError('Missing "disk" or "disks" attribute')

            if self.disk:
                self.disk = storage2.volume(self.disk)
                # Compatibility with storage v1
                if self.disk.device and self.disk.type == 'base':
                    if self.disk.device.startswith('/dev/sd'):
                        self.disk = storage2.volume(type='ec2_ephemeral', name='ephemeral0')
                    elif 'google' in self.disk.device:
                        self.disk = storage2.volume(type='gce_ephemeral', name='ephemeral-disk-0')

            self._lvm_volume = storage2.volume(
                            type='lvm',
                            pvs=[self.disk] if self.disk else self.disks,
                            size=self.size + 'VG',
                            vg=self.vg,
                            name='data')

        self._lvm_volume.ensure()
        self.device = self._lvm_volume.device
        # To allow ensure(mkfs=True, mount=True) after volume passed
        # scalarizr 1st initialization
        self.fscreated = self.is_fs_created()

        if self.snap:
            self.snap = storage2.snapshot(self.snap)
            # umount device to allow filesystem re-creation
            if self.mounted_to():
                self.umount()
            self.mkfs(force=True)

            tmp_mpoint = not self.mpoint
            if tmp_mpoint:
                tmp_mpoint = tempfile.mkdtemp()
                self.mpoint = tmp_mpoint

            try:
                transfer = cloudfs.LargeTransfer(self.snap.path, self.mpoint + '/')
                self.mount()
                if hasattr(self.snap, 'data_size'):
                    fs_free = coreutils.statvfs(self.mpoint)['avail']
                    if fs_free < int(self.snap.data_size):
                        raise storage2.StorageError('Not enough free space'
                                        ' on device %s to restore snapshot.' %
                                        self.device)

                result = transfer.run()
                if result.get('failed'):
                    err = result['failed'][0]['exc_info'][1]
                    raise storage2.StorageError('Failed to download snapshot'
                                                                            'data. %s' % err)
            except:
                e = sys.exc_info()[1]
                raise storage2.StorageError("Snapshot restore error: %s" % e)
            finally:
                try:
                    self.umount()
                finally:
                    if tmp_mpoint:
                        self.mpoint = None
                        os.rmdir(tmp_mpoint)

            self.snap = None
Esempio n. 21
0
	def _ensure(self):
		def get_lv_size_kwarg(size):
			kwd = dict()
			if '%' in str(size):
				kwd['extents'] = size
			else:
				try:
					int(size)
					kwd['size'] = '%sG' % size
				except:
					kwd['size'] = size
			return kwd

		if self.snap:
			pvs = []
			try:
				for snap in self.snap['pv_snaps']:
					snap = storage2.snapshot(snap)
					vol = storage2.volume(type=snap.type, snap=snap)
					vol.ensure()
					pvs.append(vol)
			except:
				for pv in pvs:
					pv.destroy()
				raise
			self.pvs = pvs
			self.vg = self.snap['vg']
			self.name = self.snap['name']
		
		pv_volumes = []
		for pv_volume in self.pvs:
			pv_volume = storage2.volume(pv_volume)
			pv_volume.ensure()

			pvs = lvm2.pvs()
			if pv_volume.device not in pvs:
				pv_volume.umount()
				lvm2.pvcreate(pv_volume.device)
			pv_volumes.append(pv_volume)
		self.pvs = pv_volumes

		self._check_attr('vg')
		try:
			lv_info = self._lvinfo()
		except lvm2.NotFound:
			self._check_attr('size')
			
			try:
				lvm2.vgs(self.vg)
			except lvm2.NotFound:
				lvm2.vgcreate(self.vg, *[disk.device for disk in self.pvs])

			kwds = {'name': self.name}
			kwds.update(get_lv_size_kwarg(self.size))

			lvm2.lvcreate(self.vg, **kwds)
			lv_info = self._lvinfo()

		self._config.update({
			'device': lv_info.lv_path,
			'snap': None
		})

		pvs_to_extend_vg = []
		for pv in self.pvs:
			pv_info = lvm2.pvs(pv.device)[pv.device]

			if not pv_info.vg_name:
				pvs_to_extend_vg.append(pv.device)
				continue

			if os.path.basename(self.vg) != pv_info.vg_name:
				raise storage2.StorageError(
					'Can not add physical volume %s to volume group %s: already'
					' in volume group %s' %
					(pv.device, self.vg, pv_info.vg_name))

		if pvs_to_extend_vg:
			lvm2.vgextend(self.vg, *pvs_to_extend_vg)
			lvm2.lvextend(self.device, **get_lv_size_kwarg(self.size))
			if self.is_fs_created():
				self.fscreated = True
				fs = storage2.filesystem(self.fstype)
				if fs.features.get('resizable'):
					fs.resize(self.device)

		if lv_info.lv_attr[4] == '-':
			lvm2.lvchange(self.device, available='y')
Esempio n. 22
0
    def _ensure(self):
        self._v1_compat = self.snap and len(self.snap['disks']) and \
                                        isinstance(self.snap['disks'][0], dict) and \
                                        'snapshot' in self.snap['disks'][0]
        if self.snap:
            disks = []
            snaps = []
            try:
                # @todo: create disks concurrently
                for disk_snap in self.snap['disks']:
                    if self._v1_compat:
                        disk_snap = disk_snap['snapshot']
                    snap = storage2.snapshot(disk_snap)
                    snaps.append(snap)

                if self.disks:
                    if len(self.disks) != len(snaps):
                        raise storage2.StorageError('Volume disks count is not equal to '
                                                                                'snapshot disks count')
                    self.disks = map(storage2.volume, self.disks)

                # Mixing snapshots to self.volumes (if exist) or empty volumes
                disks = self.disks or [storage2.volume(type=s['type']) for s in snaps]

                for disk, snap in zip(disks, snaps):
                    disk.snap = snap

            except:
                with util.capture_exception(logger=LOG):
                    for disk in disks:
                        disk.destroy()

            self.disks = disks

            if self._v1_compat:
                # is some old snapshots /dev/vgname occured
                self.vg = os.path.basename(self.snap['vg'])
            else:
                self.vg = self.snap['vg']
            self.level = int(self.snap['level'])
            self.pv_uuid = self.snap['pv_uuid']
            self.lvm_group_cfg = self.snap['lvm_group_cfg']

            self.snap = None

        self._check_attr('level')
        self._check_attr('vg')
        self._check_attr('disks')

        assert int(self.level) in (0,1,5,10), 'Unknown raid level: %s' % self.level

        disks = []
        for disk in self.disks:
            disk = storage2.volume(disk)
            disk.ensure()
            disks.append(disk)
        self.disks = disks

        disks_devices = [disk.device for disk in self.disks]

        if self.lvm_group_cfg:
            try:
                raid_device = mdadm.mdfind(*disks_devices)
            except storage2.StorageError:
                raid_device = mdadm.findname()
                """
                if self.level in (1, 10):
                        for disk in disks_devices:
                                mdadm.mdadm('misc', None, disk,
                                                        zero_superblock=True, force=True)

                        try:
                                kwargs = dict(force=True, metadata='default',
                                                          level=self.level, assume_clean=True,
                                                          raid_devices=len(disks_devices))
                                mdadm.mdadm('create', raid_device, *disks_devices, **kwargs)
                        except:
                                if self.level == 10 and self._v1_compat:
                                        self._v1_repair_raid10(raid_device)
                                else:
                                        raise
                else:
                """
                mdadm.mdadm('assemble', raid_device, *disks_devices)
                mdadm.mdadm('misc', None, raid_device, wait=True, raise_exc=False)

            # Restore vg config
            vg_restore_file = tempfile.mktemp()
            with open(vg_restore_file, 'w') as f:
                f.write(base64.b64decode(self.lvm_group_cfg))

            # Ensure RAID physical volume
            try:
                lvm2.pvs(raid_device)
            except:
                lvm2.pvcreate(raid_device, uuid=self.pv_uuid,
                                        restorefile=vg_restore_file)
            finally:
                lvm2.vgcfgrestore(self.vg, file=vg_restore_file)
                os.remove(vg_restore_file)


            # Check that logical volume exists
            lv_infos = lvm2.lvs(self.vg)
            if not lv_infos:
                raise storage2.StorageError(
                        'No logical volumes found in %s vol. group')
            lv_name = lv_infos.popitem()[1].lv_name
            self.device = lvm2.lvpath(self.vg, lv_name)

            # Activate volume group
            lvm2.vgchange(self.vg, available='y')

            # Wait for logical volume device file
            util.wait_until(lambda: os.path.exists(self.device),
                                    timeout=120, logger=LOG,
                                    error_text='Logical volume %s not found' % self.device)

        else:
            raid_device = mdadm.findname()
            kwargs = dict(force=True, level=self.level, assume_clean=True,
                                      raid_devices=len(disks_devices), metadata='default')
            mdadm.mdadm('create', raid_device, *disks_devices, **kwargs)
            mdadm.mdadm('misc', None, raid_device, wait=True, raise_exc=False)

            lvm2.pvcreate(raid_device, force=True)
            self.pv_uuid = lvm2.pvs(raid_device)[raid_device].pv_uuid

            lvm2.vgcreate(self.vg, raid_device)

            out, err = lvm2.lvcreate(self.vg, extents='100%FREE')[:2]
            try:
                clean_out = out.strip().split('\n')[-1].strip()
                vol = re.match(self.lv_re, clean_out).group(1)
                self.device = lvm2.lvpath(self.vg, vol)
            except:
                e = 'Logical volume creation failed: %s\n%s' % (out, err)
                raise Exception(e)

            self.lvm_group_cfg = lvm2.backup_vg_config(self.vg)

        self.raid_pv = raid_device
Esempio n. 23
0
    def _ensure(self):

        garbage_can = []
        zone = os.path.basename(__node__['gce']['zone'])
        project_id = __node__['gce']['project_id']
        server_name = __node__['server_id']

        try:
            connection = __node__['gce']['compute_connection']
        except:
            """ No connection, implicit check """
            try:
                self._check_attr('name')
            except:
                raise storage2.StorageError(
                    'Disk is not created yet, and GCE connection'
                    ' is unavailable')
            device = gce_util.devicename_to_device(self.name)
            if not device:
                raise storage2.StorageError(
                    "Disk is not attached and GCE connection is unavailable")

            self.device = device
        else:

            try:
                # TODO(spike) raise VolumeNotExistsError when link passed disk not exists
                create = False
                if not self.link:
                    # Disk does not exist, create it first
                    create_request_body = dict(name=self.name,
                                               sizeGb=self.size)
                    if self.snap:
                        self.snap = storage2.snapshot(self.snap)
                        create_request_body['sourceSnapshot'] = self.snap.link
                    create = True
                else:
                    self._check_attr('zone')
                    if self.zone != zone:
                        # Volume is in different zone, snapshot it,
                        # create new volume from this snapshot, then attach
                        temp_snap = self.snapshot('volume')
                        garbage_can.append(temp_snap)
                        new_name = self.name + zone
                        create_request_body = dict(
                            name=new_name,
                            sizeGb=self.size,
                            sourceSnapshot=temp_snap.link)
                        create = True

                attach = False
                if create:
                    disk_name = create_request_body['name']
                    LOG.debug('Creating new GCE disk %s' % disk_name)
                    op = connection.disks().insert(
                        project=project_id,
                        zone=zone,
                        body=create_request_body).execute()
                    gce_util.wait_for_operation(connection, project_id,
                                                op['name'], zone)
                    disk_dict = connection.disks().get(disk=disk_name,
                                                       project=project_id,
                                                       zone=zone).execute()
                    self.id = disk_dict['id']
                    self.link = disk_dict['selfLink']
                    self.zone = zone
                    self.name = disk_name
                    attach = True

                else:
                    if self.last_attached_to and self.last_attached_to != server_name:
                        LOG.debug(
                            "Making sure that disk %s detached from previous attachment place."
                            % self.name)
                        gce_util.ensure_disk_detached(connection, project_id,
                                                      zone,
                                                      self.last_attached_to,
                                                      self.link)

                    attachment_inf = self._attachment_info(connection)
                    if attachment_inf:
                        disk_devicename = attachment_inf['deviceName']
                    else:
                        attach = True

                if attach:
                    LOG.debug('Attaching disk %s to current instance' %
                              self.name)
                    op = connection.instances().attachDisk(
                        instance=server_name,
                        project=project_id,
                        zone=zone,
                        body=dict(deviceName=self.name,
                                  source=self.link,
                                  mode="READ_WRITE",
                                  type="PERSISTENT")).execute()
                    gce_util.wait_for_operation(connection,
                                                project_id,
                                                op['name'],
                                                zone=zone)
                    disk_devicename = self.name

                device = gce_util.devicename_to_device(disk_devicename)
                if not device:
                    raise storage2.StorageError(
                        "Disk should be attached, but corresponding"
                        " device not found in system")
                self.device = device
                self.last_attached_to = server_name
                self.snap = None

            finally:
                # Perform cleanup
                for garbage in garbage_can:
                    try:
                        garbage.destroy(force=True)
                    except:
                        pass
Esempio n. 24
0
 def _snapshot(self, description, tags, **kwds):
     snapfile = '%s.snap.%s' % (self.file, self._uniq())
     shutil.copy(self.file, snapfile)
     return storage2.snapshot(type='loop', file=snapfile)
Esempio n. 25
0
    def _ensure(self):

        garbage_can = []
        zone = os.path.basename(__node__['gce']['zone'])
        project_id = __node__['gce']['project_id']
        server_name = __node__['server_id']

        try:
            connection = __node__['gce'].connect_compute()
        except:
            e = sys.exc_info()[1]
            LOG.debug('Can not get GCE connection: %s' % e)
            """ No connection, implicit check """
            try:
                self._check_attr('name')
            except:
                raise storage2.StorageError('Disk is not created yet, and GCE connection is unavailable')
            device = gce_util.devicename_to_device(self.name)
            if not device:
                raise storage2.StorageError("Disk is not attached and GCE connection is unavailable")

            self.device = device
        else:
            LOG.debug('Successfully created connection to cloud engine')
            try:
                create = False
                if not self.link:
                    # Disk does not exist, create it first
                    create_request_body = dict(name=self.name)
                    if self.snap:
                        snap_dict = dict(self.snap)
                        snap_dict['type'] = STORAGE_TYPE
                        self.snap = storage2.snapshot(snap_dict)
                        LOG.debug('Ensuring that snapshot is ready, before creating disk from it')
                        gce_util.wait_snapshot_ready(self.snap)
                        create_request_body['sourceSnapshot'] = to_current_api_version(self.snap.link)
                    else:
                        create_request_body['sizeGb'] = self.size

                    create = True
                else:
                    self._check_attr('zone')
                    LOG.debug('Checking that disk already exists')
                    try:
                        disk_dict = connection.disks().get(disk=self.name, project=project_id,
                                                                            zone=zone).execute()
                        self.link = disk_dict['selfLink']
                    except HttpError, e:
                        code = int(e.resp['status'])
                        if code == 404:
                            raise storage2.VolumeNotExistsError(self.name)
                        else:
                            raise

                    if self.zone != zone:
                        # Volume is in different zone, snapshot it,
                        # create new volume from this snapshot, then attach
                        temp_snap = self.snapshot('volume')
                        garbage_can.append(temp_snap)
                        new_name = self.name + zone
                        create_request_body = dict(name=new_name,
                                                   sourceSnapshot=to_current_api_version(temp_snap.link))
                        create = True

                attach = False
                if create:
                    disk_name = create_request_body['name']
                    LOG.debug('Creating new GCE disk %s' % disk_name)
                    op = connection.disks().insert(project=project_id,
                                                   zone=zone,
                                                   body=create_request_body).execute()
                    gce_util.wait_for_operation(connection, project_id, op['name'], zone)
                    disk_dict = connection.disks().get(disk=disk_name,
                                                       project=project_id,
                                                       zone=zone).execute()
                    self.id = disk_dict['id']
                    self.link = disk_dict['selfLink']
                    self.zone = zone
                    self.name = disk_name
                    attach = True

                else:
                    if self.last_attached_to and self.last_attached_to != server_name:
                        LOG.debug("Making sure that disk %s detached from previous attachment place." % self.name)
                        try:
                            gce_util.ensure_disk_detached(connection,
                                                          project_id,
                                                          zone,
                                                          self.last_attached_to,
                                                          self.link)
                        except:
                            e = sys.exc_info()[1]
                            if 'resource was not found' in str(e):
                                raise storage2.VolumeNotExistsError(self.link)
                            raise
                        
                    attachment_inf = self._attachment_info(connection)
                    if attachment_inf:
                        disk_devicename = attachment_inf['deviceName']
                    else:
                        attach = True

                if attach:
                    LOG.debug('Attaching disk %s to current instance' % self.name)
                    try:
                        op = connection.instances().attachDisk(instance=server_name, project=project_id,
                                            zone=zone, body=dict(deviceName=self.name,
                                                                    source=self.link,
                                                                    mode="READ_WRITE",
                                                                    type="PERSISTENT")).execute()
                    except:
                        e = sys.exc_info()[1]
                        if 'resource was not found' in str(e):
                            raise storage2.VolumeNotExistsError(self.link)
                        raise

                    gce_util.wait_for_operation(connection, project_id, op['name'], zone=zone)
                    disk_devicename = self.name

                for i in range(10):
                    device = gce_util.devicename_to_device(disk_devicename)
                    if device:
                        break
                    LOG.debug('Device not found in system. Retrying in 1s.')
                    time.sleep(1)
                else:
                    raise storage2.StorageError("Disk should be attached, but corresponding device not found in system")

                self.device = device
                self.last_attached_to = server_name
                self.snap = None

            finally:
                # Perform cleanup
                for garbage in garbage_can:
                    try:
                        garbage.destroy(force=True)
                    except:
                        e = sys.exc_info()[1]
                        LOG.debug('Failed to destroy temporary storage object %s: %s', garbage, e)
Esempio n. 26
0
 def _snapshot(self, description, tags, **kwds):
     snapfile = '%s.snap.%s' % (self.file, self._uniq())
     shutil.copy(self.file, snapfile)
     return storage2.snapshot(type='loop', file=snapfile)
Esempio n. 27
0
    def _ensure(self):
        # snap should be applied after layout: download and extract data.
        # this could be done on already ensured volume.
        # Example: resync slave data

        if not self._lvm_volume:
            # First of all, merge self config and snapshot config
            self.snap = storage2.snapshot(self.snap) if self.snap else None

            for attr in ('disk', 'fstype', 'size', 'vg', 'mpoint'):
                if not getattr(self, attr, None):
                    if not self.snap or not getattr(self.snap, attr, None):
                        raise storage2.StorageError(
                            'Missing ephemeral volume attribute "%s"' % attr)
                    setattr(self, attr, getattr(self.snap, attr))

            self.disk = storage2.volume(self.disk)
            # Compatibility with storage v1
            if self.disk.device and self.disk.type == 'base':
                if self.disk.device.startswith('/dev/sd'):
                    self.disk = storage2.volume(type='ec2_ephemeral',
                                                name='ephemeral0')
                elif 'google' in self.disk.device:
                    self.disk = storage2.volume(type='gce_ephemeral',
                                                name='ephemeral-disk-0')

            self._lvm_volume = storage2.volume(type='lvm',
                                               pvs=[self.disk],
                                               size=self.size + 'VG',
                                               vg=self.vg,
                                               name='data')

        self._lvm_volume.ensure()
        self.device = self._lvm_volume.device
        # To allow ensure(mkfs=True, mount=True) after volume passed
        # scalarizr 1st initialization
        self.fscreated = self.is_fs_created()

        if self.snap:
            self.snap = storage2.snapshot(self.snap)
            # umount device to allow filesystem re-creation
            if self.mounted_to():
                self.umount()
            self.mkfs(force=True)

            tmp_mpoint = not self.mpoint
            if tmp_mpoint:
                tmp_mpoint = tempfile.mkdtemp()
                self.mpoint = tmp_mpoint

            try:
                transfer = cloudfs.LargeTransfer(self.snap.path,
                                                 self.mpoint + '/')
                self.mount()
                if hasattr(self.snap, 'data_size'):
                    fs_free = coreutils.statvfs(self.mpoint)['avail']
                    if fs_free < int(self.snap.data_size):
                        raise storage2.StorageError(
                            'Not enough free space'
                            ' on device %s to restore snapshot.' % self.device)

                result = transfer.run()
                if result.get('failed'):
                    err = result['failed'][0]['exc_info'][1]
                    raise storage2.StorageError('Failed to download snapshot'
                                                'data. %s' % err)
            except:
                e = sys.exc_info()[1]
                raise storage2.StorageError("Snapshot restore error: %s" % e)
            finally:
                try:
                    self.umount()
                finally:
                    if tmp_mpoint:
                        self.mpoint = None
                        os.rmdir(tmp_mpoint)

            self.snap = None
Esempio n. 28
0
    def _ensure(self):
        def get_lv_size_kwarg(size):
            kwd = dict()
            if '%' in str(size):
                kwd['extents'] = size
            else:
                try:
                    int(size)
                    kwd['size'] = '%sG' % size
                except:
                    kwd['size'] = size
            return kwd

        if self.snap:
            pvs = []
            try:
                for snap in self.snap['pv_snaps']:
                    snap = storage2.snapshot(snap)
                    vol = storage2.volume(type=snap.type, snap=snap)
                    vol.ensure()
                    pvs.append(vol)
            except:
                for pv in pvs:
                    pv.destroy()
                raise
            self.pvs = pvs
            self.vg = self.snap['vg']
            self.name = self.snap['name']

        pv_volumes = []
        for pv_volume in self.pvs:
            pv_volume = storage2.volume(pv_volume)
            pv_volume.ensure()

            pvs = lvm2.pvs()
            if pv_volume.device not in pvs:
                if pv_volume.mounted_to():
                    pv_volume.umount()
                lvm2.pvcreate(pv_volume.device)
            pv_volumes.append(pv_volume)
        self.pvs = pv_volumes

        self._check_attr('vg')
        try:
            lv_info = self._lvinfo()
        except lvm2.NotFound:
            self._check_attr('size')

            try:
                lvm2.vgs(self.vg)
            except lvm2.NotFound:
                lvm2.vgcreate(self.vg, *[disk.device for disk in self.pvs])

            kwds = {'name': self.name}
            kwds.update(get_lv_size_kwarg(self.size))

            lvm2.lvcreate(self.vg, **kwds)
            lv_info = self._lvinfo()

        self._config.update({'device': lv_info.lv_path, 'snap': None})

        pvs_to_extend_vg = []
        for pv in self.pvs:
            pv_info = lvm2.pvs(pv.device).popitem()[1]

            if not pv_info.vg_name:
                pvs_to_extend_vg.append(pv_info.pv_name)
                continue

            if os.path.basename(self.vg) != pv_info.vg_name:
                raise storage2.StorageError(
                    'Can not add physical volume %s to volume group %s: already'
                    ' in volume group %s' %
                    (pv_info.pv_name, self.vg, pv_info.vg_name))

        if pvs_to_extend_vg:
            lvm2.vgextend(self.vg, *pvs_to_extend_vg)
            lvm2.lvextend(self.device, **get_lv_size_kwarg(self.size))
            if self.is_fs_created():
                fs = storage2.filesystem(self.fstype)
                if fs.features.get('resizable'):
                    fs.resize(self.device)

        if lv_info.lv_attr[4] == '-':
            lvm2.lvchange(self.device, available='y')
            util.wait_until(lambda: os.path.exists(self.device),
                            sleep=1,
                            timeout=30,
                            start_text='Waiting for device %s' % self.device,
                            error_text='Device %s not available' % self.device)
Esempio n. 29
0
 def _destroy(self):
     for snap in self.pv_snaps:
         if isinstance(snap, dict):
             snap = storage2.snapshot(**snap)
         snap.destroy()
Esempio n. 30
0
    def _ensure(self):

        garbage_can = []
        zone = os.path.basename(__node__['gce']['zone'])
        project_id = __node__['gce']['project_id']
        server_name = __node__['server_id']

        try:
            connection = __node__['gce']['compute_connection']
        except:
            """ No connection, implicit check """
            try:
                self._check_attr('name')
            except:
                raise storage2.StorageError('Disk is not created yet, and GCE connection'
                                            ' is unavailable')
            device = gce_util.devicename_to_device(self.name)
            if not device:
                raise storage2.StorageError("Disk is not attached and GCE connection is unavailable")

            self.device = device
        else:

            try:
                # TODO(spike) raise VolumeNotExistsError when link passed disk not exists
                create = False
                if not self.link:
                    # Disk does not exist, create it first
                    create_request_body = dict(name=self.name, sizeGb=self.size)
                    if self.snap:
                        self.snap = storage2.snapshot(self.snap)
                        create_request_body['sourceSnapshot'] = self.snap.link
                    create = True
                else:
                    self._check_attr('zone')
                    if self.zone != zone:
                        # Volume is in different zone, snapshot it,
                        # create new volume from this snapshot, then attach
                        temp_snap = self.snapshot('volume')
                        garbage_can.append(temp_snap)
                        new_name = self.name + zone
                        create_request_body = dict(name=new_name,
                                                   sizeGb=self.size,
                                                   sourceSnapshot=temp_snap.link)
                        create = True

                attach = False
                if create:
                    disk_name = create_request_body['name']
                    LOG.debug('Creating new GCE disk %s' % disk_name)
                    op = connection.disks().insert(project=project_id,
                                                   zone=zone,
                                                   body=create_request_body).execute()
                    gce_util.wait_for_operation(connection, project_id, op['name'], zone)
                    disk_dict = connection.disks().get(disk=disk_name,
                                                       project=project_id,
                                                       zone=zone).execute()
                    self.id = disk_dict['id']
                    self.link = disk_dict['selfLink']
                    self.zone = zone
                    self.name = disk_name
                    attach = True

                else:
                    if self.last_attached_to and self.last_attached_to != server_name:
                        LOG.debug("Making sure that disk %s detached from previous attachment place." % self.name)
                        gce_util.ensure_disk_detached(connection, project_id, zone, self.last_attached_to, self.link)

                    attachment_inf = self._attachment_info(connection)
                    if attachment_inf:
                        disk_devicename = attachment_inf['deviceName']
                    else:
                        attach = True

                if attach:
                    LOG.debug('Attaching disk %s to current instance' % self.name)
                    op = connection.instances().attachDisk(
                                            instance=server_name,
                                            project=project_id,
                                            zone=zone,
                                            body=dict(
                                                            deviceName=self.name,
                                                            source=self.link,
                                                            mode="READ_WRITE",
                                                            type="PERSISTENT"
                                            )).execute()
                    gce_util.wait_for_operation(connection, project_id, op['name'], zone=zone)
                    disk_devicename = self.name

                device = gce_util.devicename_to_device(disk_devicename)
                if not device:
                    raise storage2.StorageError("Disk should be attached, but corresponding"
                                                                            " device not found in system")
                self.device = device
                self.last_attached_to = server_name
                self.snap = None

            finally:
                # Perform cleanup
                for garbage in garbage_can:
                    try:
                        garbage.destroy(force=True)
                    except:
                        pass
Esempio n. 31
0
	def _run(self):
		if self.backup_type:
			self._check_backup_type()
		rst_volume = None
		exc_info = None
		'''
		# Create custom my.cnf
		# XXX: it's not a good think to do, but we should create this hacks, 
		# cause when handler calls restore.run() my.cnf is not patched yet 
		shutil.copy(__mysql__['my.cnf'], '/tmp/my.cnf')
		mycnf = metaconf.Configuration('mysql')
		mycnf.read('/tmp/my.cnf')
		try:
			mycnf.options('mysqld')
		except metaconf.NoPathError:
			mycnf.add('mysqld')
		mycnf.set('mysqld/datadir', __mysql__['data_dir'])
		mycnf.set('mysqld/log-bin', __mysql__['binlog_dir'])
		mycnf.write('/tmp/my.cnf')
		'''
		
		my_defaults = my_print_defaults('mysqld')
		rst_volume = None
		self._data_dir = os.path.normpath(my_defaults['datadir'])
		LOG.info('_run: datadir is "%s"' % self._data_dir)
		self._log_bin = os.path.normpath(my_defaults['log_bin'])
		if self._log_bin.startswith('/'):
			self._binlog_dir = os.path.dirname(self._log_bin)
		
		try:
			if self.snapshot:
				LOG.info('Creating restore volume from snapshot')
				if self.volume:
					# Clone volume object
					self.volume = storage2.volume(self.volume)
					rst_volume = self.volume.clone()
					rst_volume.snap = self.snapshot
				else:
					self.snapshot = storage2.snapshot(self.snapshot)
					rst_volume = storage2.volume(type=self.snapshot.type, 
											snap=self.snapshot)
				rst_volume.tags.update({'tmp': 1})
				rst_volume.mpoint = self.backup_dir
				rst_volume.ensure(mount=True)

	
			if not os.listdir(self.backup_dir):
				msg = 'Failed to find any backups in %s'
				raise Error(msg, self.backup_dir)
			
			backups = sorted(os.listdir(self.backup_dir))
			LOG.info('Preparing the base backup')
			base = backups.pop(0)
			target_dir = os.path.join(self.backup_dir, base)
			innobackupex(target_dir, 
						apply_log=True, 
						redo_only=True,
						user=__mysql__['root_user'],
						password=__mysql__['root_password'])
			for inc in backups:
				LOG.info('Preparing incremental backup %s', inc)
				innobackupex(target_dir,
							apply_log=True, 
							redo_only=True, 
							incremental_dir=os.path.join(self.backup_dir, inc),
							user=__mysql__['root_user'],
							password=__mysql__['root_password'])
			LOG.info('Preparing the full backup')
			innobackupex(target_dir, 
						apply_log=True, 
						user=__mysql__['root_user'],
						password=__mysql__['root_password'])
			
			LOG.info('Copying backup to datadir')
			self._mysql_init.stop()
			self._start_copyback()
			try:
				innobackupex(target_dir, copy_back=True)
				coreutils.chown_r(self._data_dir, 
								'mysql', 'mysql')
				self._mysql_init.start()
				self._commit_copyback()
			except:
				self._rollback_copyback()
				raise
		except:
			exc_info = sys.exc_info()
		finally:
			if rst_volume:
				LOG.info('Destroying restore volume')
				try:
					rst_volume.destroy(force=True)
				except:
					msg = 'Failed to destroy volume %s: %s'
					LOG.warn(msg, rst_volume.id, sys.exc_info()[1])
		if exc_info:
			raise exc_info[0], exc_info[1], exc_info[2]
Esempio n. 32
0
    def _ensure(self):

        garbage_can = []
        zone = os.path.basename(__node__['gce']['zone'])
        project_id = __node__['gce']['project_id']
        server_name = __node__['server_id']

        try:
            connection = __node__['gce'].connect_compute()
        except:
            e = sys.exc_info()[1]
            LOG.debug('Can not get GCE connection: %s' % e)
            """ No connection, implicit check """
            try:
                self._check_attr('name')
            except:
                raise storage2.StorageError(
                    'Disk is not created yet, and GCE connection is unavailable'
                )
            device = gce_util.devicename_to_device(self.name)
            if not device:
                raise storage2.StorageError(
                    "Disk is not attached and GCE connection is unavailable")

            self.device = device
        else:
            LOG.debug('Successfully created connection to cloud engine')
            try:
                create = False
                if not self.link:
                    # Disk does not exist, create it first
                    create_request_body = dict(name=self.name)
                    if self.snap:
                        snap_dict = dict(self.snap)
                        snap_dict['type'] = STORAGE_TYPE
                        self.snap = storage2.snapshot(snap_dict)
                        LOG.debug(
                            'Ensuring that snapshot is ready, before creating disk from it'
                        )
                        gce_util.wait_snapshot_ready(self.snap)
                        create_request_body[
                            'sourceSnapshot'] = to_current_api_version(
                                self.snap.link)
                    else:
                        create_request_body['sizeGb'] = self.size

                    create = True
                else:
                    self._check_attr('zone')
                    LOG.debug('Checking that disk already exists')
                    try:
                        disk_dict = connection.disks().get(
                            disk=self.name, project=project_id,
                            zone=zone).execute()
                        self.link = disk_dict['selfLink']
                    except HttpError, e:
                        code = int(e.resp['status'])
                        if code == 404:
                            raise storage2.VolumeNotExistsError(self.name)
                        else:
                            raise

                    if self.zone != zone:
                        # Volume is in different zone, snapshot it,
                        # create new volume from this snapshot, then attach
                        temp_snap = self.snapshot('volume')
                        garbage_can.append(temp_snap)
                        new_name = self.name + zone
                        create_request_body = dict(
                            name=new_name,
                            sourceSnapshot=to_current_api_version(
                                temp_snap.link))
                        create = True

                attach = False
                if create:
                    disk_name = create_request_body['name']
                    if "pd-standard" != self.disk_type:
                        disk_type = gce_util.get_disktype(
                            conn=connection,
                            project_id=project_id,
                            zone=zone,
                            disktype=self.disk_type)
                        create_request_body.update(
                            {'type': disk_type['selfLink']})

                    LOG.debug('Creating new GCE disk %s' % disk_name)
                    op = connection.disks().insert(
                        project=project_id,
                        zone=zone,
                        body=create_request_body).execute()
                    gce_util.wait_for_operation(connection, project_id,
                                                op['name'], zone)
                    disk_dict = connection.disks().get(disk=disk_name,
                                                       project=project_id,
                                                       zone=zone).execute()
                    self.id = disk_dict['id']
                    self.link = disk_dict['selfLink']
                    self.zone = zone
                    self.name = disk_name
                    attach = True

                else:
                    if self.last_attached_to and self.last_attached_to != server_name:
                        LOG.debug(
                            "Making sure that disk %s detached from previous attachment place."
                            % self.name)
                        try:
                            gce_util.ensure_disk_detached(
                                connection, project_id, zone,
                                self.last_attached_to, self.link)
                        except:
                            e = sys.exc_info()[1]
                            if 'resource was not found' in str(e):
                                raise storage2.VolumeNotExistsError(self.link)
                            raise

                    attachment_inf = self._attachment_info(connection)
                    if attachment_inf:
                        disk_devicename = attachment_inf['deviceName']
                    else:
                        attach = True

                if attach:
                    LOG.debug('Attaching disk %s to current instance' %
                              self.name)
                    try:
                        op = connection.instances().attachDisk(
                            instance=server_name,
                            project=project_id,
                            zone=zone,
                            body=dict(deviceName=self.name,
                                      source=self.link,
                                      mode="READ_WRITE",
                                      type="PERSISTENT")).execute()
                    except:
                        e = sys.exc_info()[1]
                        if 'resource was not found' in str(e):
                            raise storage2.VolumeNotExistsError(self.link)
                        raise

                    gce_util.wait_for_operation(connection,
                                                project_id,
                                                op['name'],
                                                zone=zone)
                    disk_devicename = self.name

                for i in range(10):
                    device = gce_util.devicename_to_device(disk_devicename)
                    if device:
                        break
                    LOG.debug('Device not found in system. Retrying in 1s.')
                    time.sleep(1)
                else:
                    raise storage2.StorageError(
                        "Disk should be attached, but corresponding device not found in system"
                    )

                self.device = device
                self.last_attached_to = server_name
                self.snap = None

            finally:
                # Perform cleanup
                for garbage in garbage_can:
                    try:
                        garbage.destroy(force=True)
                    except:
                        e = sys.exc_info()[1]
                        LOG.debug(
                            'Failed to destroy temporary storage object %s: %s',
                            garbage, e)