def i_upload_it_with_intentional_interrupt(step): world.destination = STORAGES[STORAGE]["url"] world.driver = STORAGES[STORAGE]["driver"]() lt = LargeTransfer(world.sources[0], world.destination, chunk_size=20, num_workers=2) lt.on(transfer_complete=lambda *args: lt.kill()) lt.run() world.manifest_url = os.path.join(world.destination, lt.transfer_id)
def i_upload_it_with_intentional_interrupt(step): world.destination = STORAGES[STORAGE]["url"] world.driver = STORAGES[STORAGE]["driver"]() lt = LargeTransfer(world.sources[0], world.destination, chunk_size=20, num_workers=2) lt.on(transfer_complete=lambda *args: lt.kill()) lt.run() world.manifest_url = os.path.join(world.destination, lt.transfer_id)
def _run(self): client = mysql_svc.MySQLClient( __mysql__['root_user'], __mysql__['root_password']) self._databases = client.list_databases() transfer = LargeTransfer(self._gen_src, self._gen_dst, 'upload', tar_it=False, chunk_size=self.chunk_size) transfer.run() return backup.restore(type='mysqldump', files=transfer.result()['completed'])
def do_backup(op): try: self.redis_instances.save_all() dbs = [r.db_path for r in self.redis_instances if r.db_path] cloud_storage_path = bus.platform.scalrfs.backups( BEHAVIOUR) #? __node__.platform LOG.info("Uploading backup to cloud storage (%s)", cloud_storage_path) transfer = LargeTransfer(dbs, cloud_storage_path) result = transfer.run() result = transfer_result_to_backup_result(result) node.__node__.messaging.send( DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT, dict(db_type=BEHAVIOUR, status='ok', backup_parts=result)) return result #? except (Exception, BaseException), e: LOG.exception(e) # Notify Scalr about error node.__node__.messaging.send( DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT, dict(db_type=BEHAVIOUR, status='error', last_error=str(e)))
def do_backup(op): try: self.redis_instances.save_all() dbs = [r.db_path for r in self.redis_instances if r.db_path] cloud_storage_path = bus.platform.scalrfs.backups(BEHAVIOUR) #? __node__.platform LOG.info("Uploading backup to cloud storage (%s)", cloud_storage_path) transfer = LargeTransfer(dbs, cloud_storage_path) result = transfer.run() result = transfer_result_to_backup_result(result) node.__node__.messaging.send(DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT, dict( db_type=BEHAVIOUR, status='ok', backup_parts=result)) return result #? except (Exception, BaseException), e: LOG.exception(e) # Notify Scalr about error node.__node__.messaging.send(DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT, dict( db_type=BEHAVIOUR, status='error', last_error=str(e)))
def do_backup(op): tmpdir = None dumps = [] tmp_path = os.path.join(__postgresql__['storage_dir'], 'tmp') try: # Get databases list psql = PSQL(user=self.postgresql.root_user.name) databases = psql.list_pg_databases() if 'template0' in databases: databases.remove('template0') if not os.path.exists(tmp_path): os.makedirs(tmp_path) # Dump all databases LOG.info("Dumping all databases") tmpdir = tempfile.mkdtemp(dir=tmp_path) chown_r(tmpdir, self.postgresql.root_user.name) def _single_backup(db_name): dump_path = tmpdir + os.sep + db_name + '.sql' pg_args = '%s %s --no-privileges -f %s' % (PG_DUMP, db_name, dump_path) su_args = [SU_EXEC, '-', self.postgresql.root_user.name, '-c', pg_args] err = system2(su_args)[1] if err: raise HandlerError('Error while dumping database %s: %s' % (db_name, err)) #? dumps.append(dump_path) for db_name in databases: _single_backup(db_name) cloud_storage_path = __node__.platform.scalrfs.backups(BEHAVIOUR) suffix = 'master' if int(__postgresql__[OPT_REPLICATION_MASTER]) else 'slave' backup_tags = {'scalr-purpose': 'postgresql-%s' % suffix} LOG.info("Uploading backup to %s with tags %s" % (cloud_storage_path, backup_tags)) trn = LargeTransfer(dumps, cloud_storage_path, tags=backup_tags) manifest = trn.run() LOG.info("Postgresql backup uploaded to cloud storage under %s", cloud_storage_path) # Notify Scalr result = transfer_result_to_backup_result(manifest) __node__.messaging.send(DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT, dict(db_type=BEHAVIOUR, status='ok', backup_parts=result)) return result except (Exception, BaseException), e: LOG.exception(e) # Notify Scalr about error __node__.messaging.send(DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT, dict(db_type=BEHAVIOUR, status='error', last_error=str(e)))
def on_DbMsr_CreateBackup(self, message): try: op = operation(name=self._op_backup, phases=[{ 'name': self._phase_backup, 'steps': [self._step_copy_database_file, self._step_upload_to_cloud_storage] }]) op.define() with op.phase(self._phase_backup): with op.step(self._step_copy_database_file): # Flush redis data on disk before creating backup LOG.info("Dumping Redis data on disk") self.redis_instances.save_all() dbs = [r.db_path for r in self.redis_instances] with op.step(self._step_upload_to_cloud_storage): cloud_storage_path = self._platform.scalrfs.backups(BEHAVIOUR) LOG.info("Uploading backup to cloud storage (%s)", cloud_storage_path) transfer = LargeTransfer(dbs, cloud_storage_path) result = transfer.run() result = handlers.transfer_result_to_backup_result(result) op.ok(data=result) # Notify Scalr self.send_message(DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT, dict( db_type = BEHAVIOUR, status = 'ok', backup_parts = result )) except (Exception, BaseException), e: LOG.exception(e) # Notify Scalr about error self.send_message(DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT, dict( db_type = BEHAVIOUR, status = 'error', last_error = str(e) ))
def on_DbMsr_CreateBackup(self, message): try: op = operation(name=self._op_backup, phases=[{ 'name': self._phase_backup, 'steps': [self._step_copy_database_file, self._step_upload_to_cloud_storage] }]) op.define() with op.phase(self._phase_backup): with op.step(self._step_copy_database_file): # Flush redis data on disk before creating backup LOG.info("Dumping Redis data on disk") self.redis_instances.save_all() dbs = [r.db_path for r in self.redis_instances] with op.step(self._step_upload_to_cloud_storage): cloud_storage_path = self._platform.scalrfs.backups(BEHAVIOUR) LOG.info("Uploading backup to cloud storage (%s)", cloud_storage_path) transfer = LargeTransfer(dbs, cloud_storage_path) result = transfer.run() result = handlers.transfer_result_to_backup_result(result) op.ok(data=result) # Notify Scalr self.send_message(DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT, dict( db_type = BEHAVIOUR, status = 'ok', backup_parts = result )) except (Exception, BaseException), e: LOG.exception(e) # Notify Scalr about error self.send_message(DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT, dict( db_type = BEHAVIOUR, status = 'error', last_error = str(e) ))
def on_DbMsr_CreateBackup(self, message): #TODO: Think how to move the most part of it into Postgresql class # Retrieve password for scalr pg user tmpdir = backup_path = None try: # Get databases list psql = PSQL(user=self.postgresql.root_user.name) databases = psql.list_pg_databases() if 'template0' in databases: databases.remove('template0') op = operation(name=self._op_backup, phases=[{ 'name': self._phase_backup }]) op.define() with op.phase(self._phase_backup): if not os.path.exists(self._tmp_path): os.makedirs(self._tmp_path) # Defining archive name and path backup_filename = time.strftime('%Y-%m-%d-%H:%M:%S')+'.tar.gz' backup_path = os.path.join(self._tmp_path, backup_filename) # Creating archive backup = tarfile.open(backup_path, 'w:gz') # Dump all databases LOG.info("Dumping all databases") tmpdir = tempfile.mkdtemp(dir=self._tmp_path) chown_r(tmpdir, self.postgresql.root_user.name) def _single_backup(db_name): dump_path = tmpdir + os.sep + db_name + '.sql' pg_args = '%s %s --no-privileges -f %s' % (PG_DUMP, db_name, dump_path) su_args = [SU_EXEC, '-', self.postgresql.root_user.name, '-c', pg_args] err = system2(su_args)[1] if err: raise HandlerError('Error while dumping database %s: %s' % (db_name, err)) backup.add(dump_path, os.path.basename(dump_path)) make_backup_steps(databases, op, _single_backup) backup.close() with op.step(self._step_upload_to_cloud_storage): # Creating list of full paths to archive chunks #if os.path.getsize(backup_path) > __postgresql__['pgdump_chunk_size']: # parts = [os.path.join(tmpdir, file) for file in split(backup_path, backup_filename, __postgresql__['pgdump_chunk_size'], tmpdir)] #else: # parts = [backup_path] #sizes = [os.path.getsize(file) for file in parts] cloud_storage_path = self._platform.scalrfs.backups(BEHAVIOUR) LOG.info("Uploading backup to cloud storage (%s)", cloud_storage_path) trn = LargeTransfer(backup_path, cloud_storage_path) manifest = trn.run() LOG.info("Postgresql backup uploaded to cloud storage under %s/%s", cloud_storage_path, backup_filename) result = list(dict(path=os.path.join(cloud_storage_path, c[0]), size=c[2]) for c in manifest['files'][0]['chunks']) op.ok(data=result) # Notify Scalr self.send_message(DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT, dict( db_type = BEHAVIOUR, status = 'ok', backup_parts = result )) except (Exception, BaseException), e: LOG.exception(e) # Notify Scalr about error self.send_message(DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT, dict( db_type = BEHAVIOUR, status = 'error', last_error = str(e) ))
def do_backup(op): tmpdir = backup_path = None tmp_path = os.path.join(__postgresql__['storage_dir'], 'tmp') try: # Get databases list psql = PSQL(user=self.postgresql.root_user.name) databases = psql.list_pg_databases() if 'template0' in databases: databases.remove('template0') if not os.path.exists(tmp_path): os.makedirs(tmp_path) # Defining archive name and path backup_filename = time.strftime('%Y-%m-%d-%H:%M:%S')+'.tar.gz' backup_path = os.path.join(tmp_path, backup_filename) # Creating archive backup_obj = tarfile.open(backup_path, 'w:gz') # Dump all databases LOG.info("Dumping all databases") tmpdir = tempfile.mkdtemp(dir=tmp_path) chown_r(tmpdir, self.postgresql.root_user.name) def _single_backup(db_name): dump_path = tmpdir + os.sep + db_name + '.sql' pg_args = '%s %s --no-privileges -f %s' % (PG_DUMP, db_name, dump_path) su_args = [SU_EXEC, '-', self.postgresql.root_user.name, '-c', pg_args] err = system2(su_args)[1] if err: raise HandlerError('Error while dumping database %s: %s' % (db_name, err)) #? backup_obj.add(dump_path, os.path.basename(dump_path)) for db_name in databases: _single_backup(db_name) backup_obj.close() # Creating list of full paths to archive chunks #if os.path.getsize(backup_path) > __postgresql__['pgdump_chunk_size']: # parts = [os.path.join(tmpdir, file) for file in split(backup_path, backup_filename, __postgresql__['pgdump_chunk_size'], tmpdir)] #else: # parts = [backup_path] #sizes = [os.path.getsize(file) for file in parts] cloud_storage_path = __node__.platform.scalrfs.backups(BEHAVIOUR) suffix = 'master' if int(__postgresql__[OPT_REPLICATION_MASTER]) else 'slave' backup_tags = {'scalr-purpose': 'postgresql-%s' % suffix} LOG.info("Uploading backup to %s with tags %s" % (cloud_storage_path, backup_tags)) trn = LargeTransfer(backup_path, cloud_storage_path, tags=backup_tags) manifest = trn.run() LOG.info("Postgresql backup uploaded to cloud storage under %s/%s", cloud_storage_path, backup_filename) result = list(dict(path=os.path.join(os.path.dirname(manifest.cloudfs_path), c[0]), size=c[2]) for c in manifest['files'][0]['chunks']) # Notify Scalr __node__.messaging.send(DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT, dict(db_type=BEHAVIOUR, status='ok', backup_parts=result)) return result #? except (Exception, BaseException), e: LOG.exception(e) # Notify Scalr about error __node__.messaging.send(DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT, dict(db_type=BEHAVIOUR, status='error', last_error=str(e)))
def do_backup(op): tmpdir = None dumps = [] tmp_path = os.path.join(__postgresql__['storage_dir'], 'tmp') try: # Get databases list psql = PSQL(user=self.postgresql.root_user.name) databases = psql.list_pg_databases() if 'template0' in databases: databases.remove('template0') if not os.path.exists(tmp_path): os.makedirs(tmp_path) # Dump all databases LOG.info("Dumping all databases") tmpdir = tempfile.mkdtemp(dir=tmp_path) chown_r(tmpdir, self.postgresql.root_user.name) def _single_backup(db_name): dump_path = tmpdir + os.sep + db_name + '.sql' pg_args = '%s %s --no-privileges -f %s' % ( PG_DUMP, db_name, dump_path) su_args = [ SU_EXEC, '-', self.postgresql.root_user.name, '-c', pg_args ] err = system2(su_args)[1] if err: raise HandlerError( 'Error while dumping database %s: %s' % (db_name, err)) #? dumps.append(dump_path) for db_name in databases: _single_backup(db_name) cloud_storage_path = __node__.platform.scalrfs.backups( BEHAVIOUR) suffix = 'master' if int( __postgresql__[OPT_REPLICATION_MASTER]) else 'slave' backup_tags = {'scalr-purpose': 'postgresql-%s' % suffix} LOG.info("Uploading backup to %s with tags %s" % (cloud_storage_path, backup_tags)) trn = LargeTransfer(dumps, cloud_storage_path, tags=backup_tags) manifest = trn.run() LOG.info( "Postgresql backup uploaded to cloud storage under %s", cloud_storage_path) # Notify Scalr result = transfer_result_to_backup_result(manifest) __node__.messaging.send( DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT, dict(db_type=BEHAVIOUR, status='ok', backup_parts=result)) return result except (Exception, BaseException), e: LOG.exception(e) # Notify Scalr about error __node__.messaging.send( DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT, dict(db_type=BEHAVIOUR, status='error', last_error=str(e)))