Example #1
0
	def _create(self, volume, snapshot, snap_lv, tranzit_path,  complete_cb):
		try:
			chunk_prefix = '%s.data' % snapshot.id
			snapshot.path = None
			snap_mpoint = mkdtemp()
			try:
				opts = []
				if volume.fstype == 'xfs':
					opts += ['-o', 'nouuid,ro']
				mount(snap_lv, snap_mpoint, opts)				
				tar_cmd = ['tar', 'cp', '-C', snap_mpoint, '.']
				
				pigz_bins = whereis('pigz')
				compress_cmd = [pigz_bins[0] if pigz_bins else 'gzip', '-5'] 
				
				self._logger.debug("Creating and compressing snapshot data.")
				tar = subprocess.Popen(tar_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
				compress = subprocess.Popen(compress_cmd, stdin=tar.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
				tar.stdout.close() # Allow tar to receive a SIGPIPE if compress exits.
				split = threading.Thread(target=self._split, name='split', 
						  args=(compress.stdout, tranzit_path, chunk_prefix, snapshot))
				split.start()
								
				uploaders = []
				for i in range(2):
					uploader = threading.Thread(name="Uploader-%s" % i, target=self._uploader, 
											  args=(volume.snap_backend['path'], snapshot))
					self._logger.debug("Starting uploader '%s'", uploader.getName())
					
					uploader.start()
					uploaders.append(uploader)
				self._logger.debug('uploaders started. waiting compress')

				compress.wait()
				self._logger.debug('compress completed (code: %s). waiting split', compress.returncode)
				if compress.returncode:
					raise StorageError('Compress process terminated with exit code %s. <err>: %s' % (compress.returncode, compress.stderr.read()))				
					
				split.join()
				self._logger.debug('split completed. waiting uploaders')

				for uploader in uploaders:
					uploader.join()
				self._logger.debug('uploaders completed')
				
				if self._inner_exc_info:
					t, e, s = self._inner_exc_info
					raise t, e, s

			finally:
				self._return_ev.set()				
				umount(snap_mpoint, options=('-f',))
				os.rmdir(snap_mpoint)
				self._lvm.remove_lv(snap_lv)
				self._inner_exc_info = None
			self._state_map[snapshot.id] = Snapshot.COMPLETED
		except (Exception, BaseException), e:
			self._state_map[snapshot.id] = Snapshot.FAILED
			self._logger.exception('Snapshot creation failed. %s' % e)
Example #2
0
	def handle_request(self, req_message, resp_message):
		self.umounted = False
		self.device_name = cassandra.ini.get(CNF_SECTION, OPT_STORAGE_DEVICE_NAME)

		cassandra.stop_service()
		system2('sync', shell=True)			
					
		fstool.umount(self.device_name)
		self.umounted = True
		
		volume_id = cassandra.ini.get(CNF_SECTION, OPT_STORAGE_VOLUME_ID)
		resp_message.body.update(dict(
			status		= 'ok',						
			snapshot_id = cassandra.create_snapshot(volume_id),
			timestamp   = time.strftime('%Y-%m-%d %H-%M')
		))
Example #3
0
	def restore(self, queue, volume, download_finished):
		tmp_mpoint = mkdtemp()
		volume.mount(tmp_mpoint)
		try:
			pigz_bins = whereis('pigz')
			cmd1 = ('pigz' if pigz_bins else 'gzip', '-d')
			cmd2 = ('tar', 'px', '-C', tmp_mpoint)

			compressor = subprocess.Popen(cmd1, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
			tar	 = subprocess.Popen(cmd2, stdin=compressor.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
			self.concat_chunks(queue, download_finished, compressor.stdin)
			
			compressor.stdin.close()
			r_code = compressor.wait()
			if r_code:
				raise Exception('Archiver finished with return code %s' % r_code)

			r_code = tar.wait()
			if r_code:
				raise Exception('Tar finished with return code %s' % r_code)
		finally:
			umount(mpoint=tmp_mpoint, options=('-f', ))
Example #4
0
	def _umount_detach_delete_volume(self, devname, volume):
		fstool.umount(devname)
		ebstool.detach_volume(None, volume, logger=self._logger)
		volume.delete()
Example #5
0
	def rebundle(self):
		rebundle_dir = tempfile.mkdtemp()

		try:
			pl = bus.platform
			proj_id = pl.get_numeric_project_id()
			proj_name = pl.get_project_id()
			cloudstorage = pl.new_storage_client()

			tmp_mount_dir = os.path.join(rebundle_dir, 'root')
			os.makedirs(tmp_mount_dir)

			image_name	= 'disk.raw'
			image_path	= os.path.join(rebundle_dir, image_name)

			root = filter(lambda x: x.mpoint == '/', filetool.df())[0]

			LOG.debug('Creating image file %s' % image_path)
			with open(image_path, 'w') as f:
				f.truncate(root.size*1024 + 1*1024)

			try:

				LOG.debug('Creating partition table on image')
				system2(('parted', image_path, 'mklabel', 'msdos'))
				system2(('parted', image_path, 'mkpart', 'primary', 'ext2', 1, str(root.size/1024)))

				# Map disk image
				out = system2(('kpartx', '-av', image_path))[0]
				try:
					loop = re.search('(/dev/loop\d+)', out).group(1)
					root_dev_name = '/dev/mapper/%sp1' % loop.split('/')[-1]

					LOG.info('Creating filesystem')
					fstool.mkfs(root_dev_name, 'ext4')
					dev_uuid = uuid.uuid4()
					system2(('tune2fs', '-U', str(dev_uuid), root_dev_name))

					fstool.mount(root_dev_name, tmp_mount_dir)
					try:
						lines = system2(('/bin/mount', '-l'))[0].splitlines()
						exclude_dirs = set()
						for line in lines:
							mpoint = line.split()[2]
							if mpoint != '/':
								exclude_dirs.add(mpoint)

						exclude_dirs.update(self.exclude_dirs)

						rsync = filetool.Rsync()
						rsync.source('/').dest(tmp_mount_dir).sparse()
						rsync.hardlinks().archive().times()
						rsync.exclude([os.path.join(ex, '**') for ex in exclude_dirs])
						rsync.exclude(self.exclude_files)
						rsync.exclude(self._excludes)
						LOG.info('Copying root filesystem to image')
						rsync.execute()

						LOG.info('Cleanup image')
						self._create_spec_devices(tmp_mount_dir)

						LOG.debug('Removing roles-builder user')
						sh = pexpect.spawn('/bin/sh')
						try:
							sh.sendline('chroot %s' % tmp_mount_dir)
							sh.expect('#')
							sh.sendline('userdel -rf %s' % ROLEBUILDER_USER)
							sh.expect('#')
						finally:
							sh.close()

						""" Patch fstab"""
						fstab_path = os.path.join(tmp_mount_dir, 'etc/fstab')
						if os.path.exists(fstab_path):
							with open(fstab_path) as f:
								fstab = f.read()

							new_fstab = re.sub('UUID=\S+\s+/\s+(.*)', 'UUID=%s / \\1' % dev_uuid, fstab)

							with open(fstab_path, 'w') as f:
								f.write(new_fstab)

					finally:
						fstool.umount(device=root_dev_name)
				finally:
					system2(('kpartx', '-d', image_path))

				LOG.info('Compressing image.')
				arch_name = '%s.tar.gz' % self._role_name.lower()
				arch_path = os.path.join(rebundle_dir, arch_name)

				tar = filetool.Tar()
				tar.create().gzip().sparse()
				tar.archive(arch_path)
				tar.add(image_name, rebundle_dir)
				system2(str(tar), shell=True)

			finally:
				os.unlink(image_path)

			try:
				LOG.info('Uploading compressed image to cloud storage')
				uploader = transfer.Transfer(logger=LOG)
				tmp_bucket_name = 'scalr-images-%s-%s' % (
									random.randint(1,1000000), int(time.time()))

				try:
					remote_path = 'gcs://%s/' % tmp_bucket_name
					uploader.upload((arch_path,), remote_path)
				except:
					try:
						objs = cloudstorage.objects()
						objs.delete(bucket=tmp_bucket_name, object=arch_name).execute()
					except:
						pass

					cloudstorage.buckets().delete(bucket=tmp_bucket_name).execute()
					raise

			finally:
				os.unlink(arch_path)

		finally:
			shutil.rmtree(rebundle_dir)

		try:
			goog_image_name = self._role_name.lower().replace('_', '-')
			LOG.info('Registering new image %s' % goog_image_name)
			# TODO: check duplicate names
			compute = pl.new_compute_client()

			image_url = 'http://storage.googleapis.com/%s/%s' % (
											tmp_bucket_name, arch_name)
			req_body = dict(
				name=goog_image_name,
				sourceType='RAW',
				rawDisk=dict(
					containerType='TAR',
					source=image_url
				)
			)

			req = compute.images().insert(project=proj_id, body=req_body)
			operation = req.execute()['name']

			LOG.info('Waiting for image to register')
			def image_is_ready():
				req = compute.operations().get(project=proj_id, operation=operation)
				res = req.execute()
				if res['status'] == 'DONE':
					if res.get('error'):
						errors = []
						for e in res['error']['errors']:
							err_text = '%s: %s' % (e['code'], e['message'])
							errors.append(err_text)
						raise Exception('\n'.join(errors))
					return True
				return False
			wait_until(image_is_ready, logger=LOG, timeout=600)

		finally:
			objs = cloudstorage.objects()
			objs.delete(bucket=tmp_bucket_name, object=arch_name).execute()
			cloudstorage.buckets().delete(bucket=tmp_bucket_name).execute()

		return '%s/images/%s' % (proj_name, goog_image_name)