Beispiel #1
0
	def on_DbMsr_CreateBackup(self, message):
		LOG.debug("on_DbMsr_CreateBackup")

		'''
		bak = backup.backup(
				type='mysqldump', 
				file_per_database=True,
				tmpdir=__mysql__['tmp_dir'],
				cloudfsdir=self._platform.scalrfs.backups(__mysql__['behavior']),
				chunk_size=__mysql__['mysqldump_chunk_size'])
		restore = None

		try:
			op = operation(name=self._op_backup, phases=[{
				'name': self._phase_backup, 
				'steps': [self._phase_backup]
			}])
			op.define()
			with op.phase(self._phase_backup):
				with op.step(self._phase_backup):
					restore = bak.run()
					
					#- type: mysqldump
					#- files:
					#  - size: 1234567
		            #  - path: s3://farm-2121-44/backups/mysql/20120314.tar.gz.part0
					#  - size: 3524567
		            #  - path: s3://farm-2121-44/backups/mysql/20120314.tar.gz.part1
					#result = list(dict(path=path, size=size) for path, size in zip(cloud_files, sizes))								
			op.ok(data=restore.files)
	
			# Notify Scalr
			self.send_message(DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT, dict(
				db_type = __mysql__['behavior'],
				status = 'ok',
				backup_parts = restore.files
			))
		except:
			exc = sys.exc_info()[1]
			LOG.exception(exc)
			
			# Notify Scalr about error
			self.send_message(DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT, dict(
				db_type = __mysql__['behavior'],
				status = 'error',
				last_error = str(exc)
			))

		'''


		tmp_basedir = __mysql__['tmp_dir']
		if not os.path.exists(tmp_basedir):
			os.makedirs(tmp_basedir)		
		# Retrieve password for scalr mysql user
		backup_path = None
		tmpdir = None
		try:
			# Get databases list
			databases = self.root_client.list_databases()
			
			op = operation(name=self._op_backup, phases=[{
				'name': self._phase_backup
			}])
			op.define()			

			with op.phase(self._phase_backup):
				# Dump all databases
				LOG.info("Dumping all databases")
				tmpdir = tempfile.mkdtemp(dir=tmp_basedir)

				backup_filename = 'mysql-backup-%s.tar.gz' % time.strftime('%Y-%m-%d-%H:%M:%S') 
				backup_path = os.path.join(tmpdir, backup_filename)
				
				# Creating archive 
				backup = tarfile.open(backup_path, 'w:gz')
				mysqldump = mysql_svc.MySQLDump(root_user=__mysql__['root_user'],
									root_password=__mysql__['root_password'])
				dump_options = __mysql__['mysqldump_options'].split(' ')

				def _single_backup(db_name):
					dump_path = os.path.join(tmpdir, db_name + '.sql') 
					mysqldump.create(db_name, dump_path, dump_options)
					backup.add(dump_path, os.path.basename(dump_path))

				make_backup_steps(databases, op, _single_backup)
						
				backup.close()
				
			with op.step(self._step_upload_to_cloud_storage):
				# Creating list of full paths to archive chunks
				if os.path.getsize(backup_path) > __mysql__['mysqldump_chunk_size']:
					parts = [os.path.join(tmpdir, file) for file in filetool.split(backup_path, backup_filename, __mysql__['mysqldump_chunk_size'] , tmpdir)]
				else:
					parts = [backup_path]
				sizes = [os.path.getsize(file) for file in parts]
						
				cloud_storage_path = self._platform.scalrfs.backups('mysql')
				LOG.info("Uploading backup to cloud storage (%s)", cloud_storage_path)
				trn = transfer.Transfer()
				cloud_files = trn.upload(parts, cloud_storage_path)
				LOG.info("Mysql backup uploaded to cloud storage under %s/%s", 
								cloud_storage_path, backup_filename)

			result = list(dict(path=path, size=size) for path, size in zip(cloud_files, sizes))								
			op.ok(data=result)
			
			# Notify Scalr
			self.send_message(DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT, dict(
				db_type = __mysql__['behavior'],
				status = 'ok',
				backup_parts = result
			))
						
		except (Exception, BaseException), e:
			LOG.exception(e)
			
			# Notify Scalr about error
			self.send_message(DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT, dict(
				db_type = __mysql__['behavior'],
				status = 'error',
				last_error = str(e)
			))
Beispiel #2
0
	def on_DbMsr_CreateBackup(self, message):
		tmpdir = backup_path = None
		try:
			op = operation(name=self._op_backup, phases=[{
			                                             'name': self._phase_backup,
			                                             'steps': [self._step_copy_database_file,
			                                                       self._step_upload_to_cloud_storage]
			                                             }])
			op.define()

			with op.phase(self._phase_backup):

				with op.step(self._step_copy_database_file):
					# Flush redis data on disk before creating backup
					LOG.info("Dumping Redis data on disk")
					self.redis_instances.save_all()

					# Dump all databases
					LOG.info("Dumping all databases")
					tmpdir = tempfile.mkdtemp()

					# Defining archive name and path
					backup_filename = time.strftime('%Y-%m-%d-%H:%M:%S')+'.tar.gz'
					backup_path = os.path.join('/tmp', backup_filename)
					dbs = [r.db_path for r in self.redis_instances]

					# Creating archive 
					backup = tarfile.open(backup_path, 'w:gz')

					for src_path in dbs:
						fname = os.path.basename(src_path)
						dump_path = os.path.join(tmpdir, fname)
						if not os.path.exists(src_path):
							LOG.info('%s DB file %s does not exist. Nothing to backup.' % (BEHAVIOUR, src_path))
						else:
							shutil.copyfile(src_path, dump_path)
							backup.add(dump_path, fname)
					backup.close()

					# Creating list of full paths to archive chunks
					if os.path.getsize(backup_path) > BACKUP_CHUNK_SIZE:
						parts = [os.path.join(tmpdir, file) for file in split(backup_path, backup_filename, BACKUP_CHUNK_SIZE , tmpdir)]
					else:
						parts = [backup_path]
					sizes = [os.path.getsize(file) for file in parts]

				with op.step(self._step_upload_to_cloud_storage):

					cloud_storage_path = self._platform.scalrfs.backups(BEHAVIOUR)
					LOG.info("Uploading backup to cloud storage (%s)", cloud_storage_path)
					trn = transfer.Transfer()
					cloud_files = trn.upload(parts, cloud_storage_path)
					LOG.info("%s backup uploaded to cloud storage under %s/%s" %
					         (BEHAVIOUR, cloud_storage_path, backup_filename))

			result = list(dict(path=path, size=size) for path, size in zip(cloud_files, sizes))
			op.ok(data=result)

			# Notify Scalr
			self.send_message(DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT, dict(
				db_type = BEHAVIOUR,
				status = 'ok',
				backup_parts = result
			))

		except (Exception, BaseException), e:
			LOG.exception(e)

			# Notify Scalr about error
			self.send_message(DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT, dict(
				db_type = BEHAVIOUR,
				status = 'error',
				last_error = str(e)
			))
Beispiel #3
0
	def on_DbMsr_CreateBackup(self, message):
		#TODO: Think how to move the most part of it into Postgresql class 
		# Retrieve password for scalr mysql user
		tmpdir = backup_path = None
		try:
			# Get databases list
			psql = PSQL(user=self.postgresql.root_user.name)
			databases = psql.list_pg_databases()
			if 'template0' in databases:
				databases.remove('template0')
			
			
			op = operation(name=self._op_backup, phases=[{
				'name': self._phase_backup
			}])
			op.define()			
			
			with op.phase(self._phase_backup):
			
				if not os.path.exists(self._tmp_path):
					os.makedirs(self._tmp_path)
					
				# Defining archive name and path
				backup_filename = time.strftime('%Y-%m-%d-%H:%M:%S')+'.tar.gz'
				backup_path = os.path.join(self._tmp_path, backup_filename)
				
				# Creating archive 
				backup = tarfile.open(backup_path, 'w:gz')
	
				# Dump all databases
				self._logger.info("Dumping all databases")
				tmpdir = tempfile.mkdtemp(dir=self._tmp_path)		
				rchown(self.postgresql.root_user.name, tmpdir)

				def _single_backup(db_name):
					dump_path = tmpdir + os.sep + db_name + '.sql'
					pg_args = '%s %s --no-privileges -f %s' % (PG_DUMP, db_name, dump_path)
					su_args = [SU_EXEC, '-', self.postgresql.root_user.name, '-c', pg_args]
					err = system2(su_args)[1]
					if err:
						raise HandlerError('Error while dumping database %s: %s' % (db_name, err))
					backup.add(dump_path, os.path.basename(dump_path))	

				make_backup_steps(databases, op, _single_backup)						

				backup.close()
				
				with op.step(self._step_upload_to_cloud_storage):
					# Creating list of full paths to archive chunks
					if os.path.getsize(backup_path) > BACKUP_CHUNK_SIZE:
						parts = [os.path.join(tmpdir, file) for file in split(backup_path, backup_filename, BACKUP_CHUNK_SIZE , tmpdir)]
					else:
						parts = [backup_path]
					sizes = [os.path.getsize(file) for file in parts]
						
					cloud_storage_path = self._platform.scalrfs.backups(BEHAVIOUR)
					self._logger.info("Uploading backup to cloud storage (%s)", cloud_storage_path)
					trn = transfer.Transfer()
					cloud_files = trn.upload(parts, cloud_storage_path)
					self._logger.info("Postgresql backup uploaded to cloud storage under %s/%s", 
									cloud_storage_path, backup_filename)
			
			result = list(dict(path=path, size=size) for path, size in zip(cloud_files, sizes))
			op.ok(data=result)
				
			# Notify Scalr
			self.send_message(DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT, dict(
				db_type = BEHAVIOUR,
				status = 'ok',
				backup_parts = result
			))
						
		except (Exception, BaseException), e:
			self._logger.exception(e)
			
			# Notify Scalr about error
			self.send_message(DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT, dict(
				db_type = BEHAVIOUR,
				status = 'error',
				last_error = str(e)
			))