Exemplo n.º 1
0
        def do_backup(op):
            tmpdir = None
            dumps = []
            tmp_path = os.path.join(__postgresql__['storage_dir'], 'tmp')
            try:
                # Get databases list
                psql = PSQL(user=self.postgresql.root_user.name)
                databases = psql.list_pg_databases()
                if 'template0' in databases:
                    databases.remove('template0')
                
                if not os.path.exists(tmp_path):
                    os.makedirs(tmp_path)

                # Dump all databases
                LOG.info("Dumping all databases")
                tmpdir = tempfile.mkdtemp(dir=tmp_path)       
                chown_r(tmpdir, self.postgresql.root_user.name)

                def _single_backup(db_name):
                    dump_path = tmpdir + os.sep + db_name + '.sql'
                    pg_args = '%s %s --no-privileges -f %s' % (PG_DUMP, db_name, dump_path)
                    su_args = [SU_EXEC, '-', self.postgresql.root_user.name, '-c', pg_args]
                    err = system2(su_args)[1]
                    if err:
                        raise HandlerError('Error while dumping database %s: %s' % (db_name, err))  #?
                    dumps.append(dump_path)


                for db_name in databases:
                    _single_backup(db_name)

                cloud_storage_path = __node__.platform.scalrfs.backups(BEHAVIOUR)

                suffix = 'master' if int(__postgresql__[OPT_REPLICATION_MASTER]) else 'slave'
                backup_tags = {'scalr-purpose': 'postgresql-%s' % suffix}

                LOG.info("Uploading backup to %s with tags %s" % (cloud_storage_path, backup_tags))
                trn = LargeTransfer(dumps, cloud_storage_path, tags=backup_tags)
                manifest = trn.run()
                LOG.info("Postgresql backup uploaded to cloud storage under %s", cloud_storage_path)
                    
                # Notify Scalr
                result = transfer_result_to_backup_result(manifest)
                __node__.messaging.send(DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT,
                                        dict(db_type=BEHAVIOUR,
                                             status='ok',
                                             backup_parts=result))

                return result
                            
            except (Exception, BaseException), e:
                LOG.exception(e)
                
                # Notify Scalr about error
                __node__.messaging.send(DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT,
                                        dict(db_type=BEHAVIOUR,
                                             status='error',
                                             last_error=str(e)))
Exemplo n.º 2
0
    def do_backup(self):
        tmpdir = None
        dumps = []
        tmp_path = os.path.join(__postgresql__['storage_dir'], 'tmp')
        try:
            # Get databases list
            psql = PSQL(user=self.postgresql.root_user.name)
            databases = psql.list_pg_databases()
            if 'template0' in databases:
                databases.remove('template0')

            if not os.path.exists(tmp_path):
                os.makedirs(tmp_path)

            # Dump all databases
            LOG.info("Dumping all databases")
            tmpdir = tempfile.mkdtemp(dir=tmp_path)
            chown_r(tmpdir, self.postgresql.root_user.name)

            def _single_backup(db_name):
                dump_path = tmpdir + os.sep + db_name + '.sql'
                pg_args = '%s %s --no-privileges -f %s' % (PG_DUMP, db_name, dump_path)
                su_args = [SU_EXEC, '-', self.postgresql.root_user.name, '-c', pg_args]
                err = system2(su_args)[1]
                if err:
                    raise HandlerError('Error while dumping database %s: %s' % (db_name, err))  # ?
                dumps.append(dump_path)

            for db_name in databases:
                _single_backup(db_name)

            cloud_storage_path = __node__.platform.scalrfs.backups(BEHAVIOUR)

            suffix = 'master' if int(__postgresql__[OPT_REPLICATION_MASTER]) else 'slave'
            backup_tags = {'scalr-purpose': 'postgresql-%s' % suffix}

            LOG.info("Uploading backup to %s with tags %s" % (cloud_storage_path, backup_tags))

            def progress_cb(progress):
                LOG.debug('Uploading %s bytes' % progress)

            uploader = largetransfer.Upload(dumps, cloud_storage_path, progress_cb=progress_cb)
            try:
                uploader.apply_async()
                uploader.join()
                manifest = uploader.manifest

                LOG.info("Postgresql backup uploaded to cloud storage under %s", cloud_storage_path)
                LOG.debug(manifest.data)

                return transfer_result_to_backup_result(manifest)
            except:
                uploader.terminate()
                raise
        finally:
            if tmpdir:
                shutil.rmtree(tmpdir, ignore_errors=True)
Exemplo n.º 3
0
	def _create_snapshot(self):
		self._logger.info("Creating PostgreSQL data bundle")
		psql = PSQL()
		if self.postgresql.service.running:
			psql.start_backup()
		
		system2('sync', shell=True)
		# Creating storage snapshot
		snap = self._create_storage_snapshot()
		if self.postgresql.service.running:
			psql.stop_backup()
		
		wait_until(lambda: snap.state in (Snapshot.CREATED, Snapshot.COMPLETED, Snapshot.FAILED))
		if snap.state == Snapshot.FAILED:
			raise HandlerError('postgresql storage snapshot creation failed. See log for more details')
		
		self._logger.info('PostgreSQL data bundle created\n  snapshot: %s', snap.id)
		return snap
Exemplo n.º 4
0
    def on_DbMsr_CreateBackup(self, message):
        #TODO: Think how to move the most part of it into Postgresql class 
        # Retrieve password for scalr pg user
        tmpdir = backup_path = None
        try:
            # Get databases list
            psql = PSQL(user=self.postgresql.root_user.name)
            databases = psql.list_pg_databases()
            if 'template0' in databases:
                databases.remove('template0')
            
            
            op = operation(name=self._op_backup, phases=[{
                'name': self._phase_backup
            }])
            op.define()         
            
            with op.phase(self._phase_backup):
            
                if not os.path.exists(self._tmp_path):
                    os.makedirs(self._tmp_path)
                    
                # Defining archive name and path
                backup_filename = time.strftime('%Y-%m-%d-%H:%M:%S')+'.tar.gz'
                backup_path = os.path.join(self._tmp_path, backup_filename)
                
                # Creating archive 
                backup = tarfile.open(backup_path, 'w:gz')
    
                # Dump all databases
                LOG.info("Dumping all databases")
                tmpdir = tempfile.mkdtemp(dir=self._tmp_path)       
                chown_r(tmpdir, self.postgresql.root_user.name)

                def _single_backup(db_name):
                    dump_path = tmpdir + os.sep + db_name + '.sql'
                    pg_args = '%s %s --no-privileges -f %s' % (PG_DUMP, db_name, dump_path)
                    su_args = [SU_EXEC, '-', self.postgresql.root_user.name, '-c', pg_args]
                    err = system2(su_args)[1]
                    if err:
                        raise HandlerError('Error while dumping database %s: %s' % (db_name, err))
                    backup.add(dump_path, os.path.basename(dump_path))  

                make_backup_steps(databases, op, _single_backup)                        

                backup.close()
                
                with op.step(self._step_upload_to_cloud_storage):
                    # Creating list of full paths to archive chunks
                    #if os.path.getsize(backup_path) > __postgresql__['pgdump_chunk_size']:
                    #    parts = [os.path.join(tmpdir, file) for file in split(backup_path, backup_filename, __postgresql__['pgdump_chunk_size'], tmpdir)]
                    #else:
                    #    parts = [backup_path]
                    #sizes = [os.path.getsize(file) for file in parts]

                    cloud_storage_path = self._platform.scalrfs.backups(BEHAVIOUR)
                    LOG.info("Uploading backup to cloud storage (%s)", cloud_storage_path)

                    trn = LargeTransfer(backup_path, cloud_storage_path)
                    manifest = trn.run()
                    LOG.info("Postgresql backup uploaded to cloud storage under %s/%s",
                                    cloud_storage_path, backup_filename)
            
            result = list(dict(path=os.path.join(cloud_storage_path, c[0]), size=c[2]) for c in
                            manifest['files'][0]['chunks'])
            op.ok(data=result)
                
            # Notify Scalr
            self.send_message(DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT, dict(
                db_type = BEHAVIOUR,
                status = 'ok',
                backup_parts = result
            ))
                        
        except (Exception, BaseException), e:
            LOG.exception(e)
            
            # Notify Scalr about error
            self.send_message(DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT, dict(
                db_type = BEHAVIOUR,
                status = 'error',
                last_error = str(e)
            ))
Exemplo n.º 5
0
    def on_DbMsr_CreateBackup(self, message):
        #TODO: Think how to move the most part of it into Postgresql class
        # Retrieve password for scalr pg user
        tmpdir = backup_path = None
        try:
            # Get databases list
            psql = PSQL(user=self.postgresql.root_user.name)
            databases = psql.list_pg_databases()
            if 'template0' in databases:
                databases.remove('template0')

            op = operation(name=self._op_backup,
                           phases=[{
                               'name': self._phase_backup
                           }])
            op.define()

            with op.phase(self._phase_backup):

                if not os.path.exists(self._tmp_path):
                    os.makedirs(self._tmp_path)

                # Defining archive name and path
                backup_filename = time.strftime(
                    '%Y-%m-%d-%H:%M:%S') + '.tar.gz'
                backup_path = os.path.join(self._tmp_path, backup_filename)

                # Creating archive
                backup = tarfile.open(backup_path, 'w:gz')

                # Dump all databases
                LOG.info("Dumping all databases")
                tmpdir = tempfile.mkdtemp(dir=self._tmp_path)
                chown_r(tmpdir, self.postgresql.root_user.name)

                def _single_backup(db_name):
                    dump_path = tmpdir + os.sep + db_name + '.sql'
                    pg_args = '%s %s --no-privileges -f %s' % (
                        PG_DUMP, db_name, dump_path)
                    su_args = [
                        SU_EXEC, '-', self.postgresql.root_user.name, '-c',
                        pg_args
                    ]
                    err = system2(su_args)[1]
                    if err:
                        raise HandlerError(
                            'Error while dumping database %s: %s' %
                            (db_name, err))
                    backup.add(dump_path, os.path.basename(dump_path))

                make_backup_steps(databases, op, _single_backup)

                backup.close()

                with op.step(self._step_upload_to_cloud_storage):
                    # Creating list of full paths to archive chunks
                    if os.path.getsize(
                            backup_path) > __postgresql__['pgdump_chunk_size']:
                        parts = [
                            os.path.join(tmpdir, file) for file in split(
                                backup_path, backup_filename,
                                __postgresql__['pgdump_chunk_size'], tmpdir)
                        ]
                    else:
                        parts = [backup_path]
                    sizes = [os.path.getsize(file) for file in parts]

                    cloud_storage_path = self._platform.scalrfs.backups(
                        BEHAVIOUR)
                    LOG.info("Uploading backup to cloud storage (%s)",
                             cloud_storage_path)
                    trn = transfer.Transfer()
                    cloud_files = trn.upload(parts, cloud_storage_path)
                    LOG.info(
                        "Postgresql backup uploaded to cloud storage under %s/%s",
                        cloud_storage_path, backup_filename)

            result = list(
                dict(path=path, size=size)
                for path, size in zip(cloud_files, sizes))
            op.ok(data=result)

            # Notify Scalr
            self.send_message(
                DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT,
                dict(db_type=BEHAVIOUR, status='ok', backup_parts=result))

        except (Exception, BaseException), e:
            LOG.exception(e)

            # Notify Scalr about error
            self.send_message(
                DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT,
                dict(db_type=BEHAVIOUR, status='error', last_error=str(e)))
Exemplo n.º 6
0
        def do_backup(op):
            tmpdir = backup_path = None
            tmp_path = os.path.join(__postgresql__['storage_dir'], 'tmp')
            try:
                # Get databases list
                psql = PSQL(user=self.postgresql.root_user.name)
                databases = psql.list_pg_databases()
                if 'template0' in databases:
                    databases.remove('template0')
                
                if not os.path.exists(tmp_path):
                    os.makedirs(tmp_path)
                    
                # Defining archive name and path
                backup_filename = time.strftime('%Y-%m-%d-%H:%M:%S')+'.tar.gz'
                backup_path = os.path.join(tmp_path, backup_filename)
                
                # Creating archive 
                backup_obj = tarfile.open(backup_path, 'w:gz')

                # Dump all databases
                LOG.info("Dumping all databases")
                tmpdir = tempfile.mkdtemp(dir=tmp_path)       
                chown_r(tmpdir, self.postgresql.root_user.name)

                def _single_backup(db_name):
                    dump_path = tmpdir + os.sep + db_name + '.sql'
                    pg_args = '%s %s --no-privileges -f %s' % (PG_DUMP, db_name, dump_path)
                    su_args = [SU_EXEC, '-', self.postgresql.root_user.name, '-c', pg_args]
                    err = system2(su_args)[1]
                    if err:
                        raise HandlerError('Error while dumping database %s: %s' % (db_name, err))  #?
                    backup_obj.add(dump_path, os.path.basename(dump_path))  

                for db_name in databases:
                    _single_backup(db_name)
                       
                backup_obj.close()
                
                # Creating list of full paths to archive chunks
                #if os.path.getsize(backup_path) > __postgresql__['pgdump_chunk_size']:
                #    parts = [os.path.join(tmpdir, file) for file in split(backup_path, backup_filename, __postgresql__['pgdump_chunk_size'], tmpdir)]
                #else:
                #    parts = [backup_path]
                #sizes = [os.path.getsize(file) for file in parts]

                cloud_storage_path = __node__.platform.scalrfs.backups(BEHAVIOUR)

                suffix = 'master' if int(__postgresql__[OPT_REPLICATION_MASTER]) else 'slave'
                backup_tags = {'scalr-purpose': 'postgresql-%s' % suffix}

                LOG.info("Uploading backup to %s with tags %s" % (cloud_storage_path, backup_tags))
                trn = LargeTransfer(backup_path, cloud_storage_path, tags=backup_tags)
                manifest = trn.run()
                LOG.info("Postgresql backup uploaded to cloud storage under %s/%s",
                                cloud_storage_path, backup_filename)
                
                result = list(dict(path=os.path.join(os.path.dirname(manifest.cloudfs_path), c[0]), size=c[2]) for c in
                                manifest['files'][0]['chunks'])
                    
                # Notify Scalr
                __node__.messaging.send(DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT,
                                        dict(db_type=BEHAVIOUR,
                                             status='ok',
                                             backup_parts=result))

                return result  #?
                            
            except (Exception, BaseException), e:
                LOG.exception(e)
                
                # Notify Scalr about error
                __node__.messaging.send(DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT,
                                        dict(db_type=BEHAVIOUR,
                                             status='error',
                                             last_error=str(e)))
Exemplo n.º 7
0
        def do_backup(op):
            tmpdir = None
            dumps = []
            tmp_path = os.path.join(__postgresql__['storage_dir'], 'tmp')
            try:
                # Get databases list
                psql = PSQL(user=self.postgresql.root_user.name)
                databases = psql.list_pg_databases()
                if 'template0' in databases:
                    databases.remove('template0')

                if not os.path.exists(tmp_path):
                    os.makedirs(tmp_path)

                # Dump all databases
                LOG.info("Dumping all databases")
                tmpdir = tempfile.mkdtemp(dir=tmp_path)
                chown_r(tmpdir, self.postgresql.root_user.name)

                def _single_backup(db_name):
                    dump_path = tmpdir + os.sep + db_name + '.sql'
                    pg_args = '%s %s --no-privileges -f %s' % (
                        PG_DUMP, db_name, dump_path)
                    su_args = [
                        SU_EXEC, '-', self.postgresql.root_user.name, '-c',
                        pg_args
                    ]
                    err = system2(su_args)[1]
                    if err:
                        raise HandlerError(
                            'Error while dumping database %s: %s' %
                            (db_name, err))  #?
                    dumps.append(dump_path)

                for db_name in databases:
                    _single_backup(db_name)

                cloud_storage_path = __node__.platform.scalrfs.backups(
                    BEHAVIOUR)

                suffix = 'master' if int(
                    __postgresql__[OPT_REPLICATION_MASTER]) else 'slave'
                backup_tags = {'scalr-purpose': 'postgresql-%s' % suffix}

                LOG.info("Uploading backup to %s with tags %s" %
                         (cloud_storage_path, backup_tags))

                def progress_cb(progress):
                    LOG.debug('Uploading %s bytes' % progress)

                uploader = largetransfer.Upload(dumps,
                                                cloud_storage_path,
                                                progress_cb=progress_cb)
                uploader.apply_async()
                uploader.join()
                manifest = uploader.manifest

                LOG.info(
                    "Postgresql backup uploaded to cloud storage under %s",
                    cloud_storage_path)
                LOG.debug(manifest.data)

                # Notify Scalr
                result = transfer_result_to_backup_result(manifest)
                __node__.messaging.send(
                    DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT,
                    dict(db_type=BEHAVIOUR, status='ok', backup_parts=result))

                return result

            except (Exception, BaseException), e:
                LOG.exception(e)

                # Notify Scalr about error
                __node__.messaging.send(
                    DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT,
                    dict(db_type=BEHAVIOUR, status='error', last_error=str(e)))