Esempio n. 1
0
    def _run(self):
        LOG.debug("Running MySQLDumpBackup")
        client = mysql_svc.MySQLClient(__mysql__['root_user'],
                                       __mysql__['root_password'])
        self._databases = client.list_databases()

        with self._run_lock:
            if self._killed:
                raise Error("Canceled")
            self.transfer = cloudfs.LargeTransfer(self._gen_src,
                                                  self._dst,
                                                  streamer=None,
                                                  chunk_size=self.chunk_size)
        result = self.transfer.run()
        if not result:
            raise Error("Error while transfering to cloud storage")

        def log_stderr(popen):
            LOG.debug("mysqldump log_stderr communicate")
            out, err = popen.communicate()
            LOG.debug("mysqldump log_stderr communicate done")
            if err:
                LOG.debug("mysqldump stderr: %s", err)

        map(log_stderr, self._popens)

        if self._killed:
            raise Error("Canceled")

        result = transfer_result_to_backup_result(result)
        return result
Esempio n. 2
0
        def do_backup(op):
            try:
                self.redis_instances.save_all()
                dbs = [r.db_path for r in self.redis_instances if r.db_path]

                cloud_storage_path = bus.platform.scalrfs.backups(
                    BEHAVIOUR)  #? __node__.platform
                LOG.info("Uploading backup to cloud storage (%s)",
                         cloud_storage_path)
                transfer = LargeTransfer(dbs, cloud_storage_path)
                result = transfer.run()
                result = transfer_result_to_backup_result(result)

                node.__node__.messaging.send(
                    DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT,
                    dict(db_type=BEHAVIOUR, status='ok', backup_parts=result))

                return result  #?

            except (Exception, BaseException), e:
                LOG.exception(e)

                # Notify Scalr about error
                node.__node__.messaging.send(
                    DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT,
                    dict(db_type=BEHAVIOUR, status='error', last_error=str(e)))
Esempio n. 3
0
    def _run(self):
        LOG.debug("Running MySQLDumpBackup")
        client = mysql_svc.MySQLClient(
                    __mysql__['root_user'],
                    __mysql__['root_password'])
        self._databases = client.list_databases()

        with self._run_lock:
            if self._killed:
                raise Error("Canceled")
            self.transfer = cloudfs.LargeTransfer(self._gen_src, self._dst,
                                    streamer=None, chunk_size=self.chunk_size)
        result = self.transfer.run()
        if not result:
            raise Error("Error while transfering to cloud storage")

        def log_stderr(popen):
            LOG.debug("mysqldump log_stderr communicate")
            out, err = popen.communicate()
            LOG.debug("mysqldump log_stderr communicate done")
            if err:
                LOG.debug("mysqldump stderr: %s", err)
        map(log_stderr, self._popens)

        if self._killed:
            raise Error("Canceled")

        result = transfer_result_to_backup_result(result)
        return result
Esempio n. 4
0
        def do_backup(op):
            try:
                self.redis_instances.save_all()
                dbs = [r.db_path for r in self.redis_instances if r.db_path]

                cloud_storage_path = bus.platform.scalrfs.backups(
                    BEHAVIOUR)  #? __node__.platform
                LOG.info("Uploading backup to cloud storage (%s)",
                         cloud_storage_path)

                def progress_cb(progress):
                    LOG.debug('Uploading %s bytes' % progress)

                uploader = largetransfer.Upload(dbs,
                                                cloud_storage_path,
                                                progress_cb=progress_cb)
                uploader.apply_async()
                uploader.join()
                manifest = uploader.manifest

                result = transfer_result_to_backup_result(manifest)

                node.__node__.messaging.send(
                    DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT,
                    dict(db_type=BEHAVIOUR, status='ok', backup_parts=result))

                return result  #?

            except (Exception, BaseException), e:
                LOG.exception(e)

                # Notify Scalr about error
                node.__node__.messaging.send(
                    DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT,
                    dict(db_type=BEHAVIOUR, status='error', last_error=str(e)))
Esempio n. 5
0
        def do_backup(op):
            try:
                self.redis_instances.save_all()
                dbs = [r.db_path for r in self.redis_instances if r.db_path]

                cloud_storage_path = bus.platform.scalrfs.backups(BEHAVIOUR)  #? __node__.platform
                LOG.info("Uploading backup to cloud storage (%s)", cloud_storage_path)
                transfer = LargeTransfer(dbs, cloud_storage_path)
                result = transfer.run()
                result = transfer_result_to_backup_result(result)

                node.__node__.messaging.send(DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT, dict(
                    db_type=BEHAVIOUR,
                    status='ok',
                    backup_parts=result))

                return result  #?

            except (Exception, BaseException), e:
                LOG.exception(e)

                # Notify Scalr about error
                node.__node__.messaging.send(DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT, dict(
                    db_type=BEHAVIOUR,
                    status='error',
                    last_error=str(e)))
Esempio n. 6
0
        def do_backup(op):
            tmpdir = None
            dumps = []
            tmp_path = os.path.join(__postgresql__['storage_dir'], 'tmp')
            try:
                # Get databases list
                psql = PSQL(user=self.postgresql.root_user.name)
                databases = psql.list_pg_databases()
                if 'template0' in databases:
                    databases.remove('template0')
                
                if not os.path.exists(tmp_path):
                    os.makedirs(tmp_path)

                # Dump all databases
                LOG.info("Dumping all databases")
                tmpdir = tempfile.mkdtemp(dir=tmp_path)       
                chown_r(tmpdir, self.postgresql.root_user.name)

                def _single_backup(db_name):
                    dump_path = tmpdir + os.sep + db_name + '.sql'
                    pg_args = '%s %s --no-privileges -f %s' % (PG_DUMP, db_name, dump_path)
                    su_args = [SU_EXEC, '-', self.postgresql.root_user.name, '-c', pg_args]
                    err = system2(su_args)[1]
                    if err:
                        raise HandlerError('Error while dumping database %s: %s' % (db_name, err))  #?
                    dumps.append(dump_path)


                for db_name in databases:
                    _single_backup(db_name)

                cloud_storage_path = __node__.platform.scalrfs.backups(BEHAVIOUR)

                suffix = 'master' if int(__postgresql__[OPT_REPLICATION_MASTER]) else 'slave'
                backup_tags = {'scalr-purpose': 'postgresql-%s' % suffix}

                LOG.info("Uploading backup to %s with tags %s" % (cloud_storage_path, backup_tags))
                trn = LargeTransfer(dumps, cloud_storage_path, tags=backup_tags)
                manifest = trn.run()
                LOG.info("Postgresql backup uploaded to cloud storage under %s", cloud_storage_path)
                    
                # Notify Scalr
                result = transfer_result_to_backup_result(manifest)
                __node__.messaging.send(DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT,
                                        dict(db_type=BEHAVIOUR,
                                             status='ok',
                                             backup_parts=result))

                return result
                            
            except (Exception, BaseException), e:
                LOG.exception(e)
                
                # Notify Scalr about error
                __node__.messaging.send(DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT,
                                        dict(db_type=BEHAVIOUR,
                                             status='error',
                                             last_error=str(e)))
Esempio n. 7
0
    def do_backup(self):
        tmpdir = None
        dumps = []
        tmp_path = os.path.join(__postgresql__['storage_dir'], 'tmp')
        try:
            # Get databases list
            psql = PSQL(user=self.postgresql.root_user.name)
            databases = psql.list_pg_databases()
            if 'template0' in databases:
                databases.remove('template0')

            if not os.path.exists(tmp_path):
                os.makedirs(tmp_path)

            # Dump all databases
            LOG.info("Dumping all databases")
            tmpdir = tempfile.mkdtemp(dir=tmp_path)
            chown_r(tmpdir, self.postgresql.root_user.name)

            def _single_backup(db_name):
                dump_path = tmpdir + os.sep + db_name + '.sql'
                pg_args = '%s %s --no-privileges -f %s' % (PG_DUMP, db_name, dump_path)
                su_args = [SU_EXEC, '-', self.postgresql.root_user.name, '-c', pg_args]
                err = system2(su_args)[1]
                if err:
                    raise HandlerError('Error while dumping database %s: %s' % (db_name, err))  # ?
                dumps.append(dump_path)

            for db_name in databases:
                _single_backup(db_name)

            cloud_storage_path = __node__.platform.scalrfs.backups(BEHAVIOUR)

            suffix = 'master' if int(__postgresql__[OPT_REPLICATION_MASTER]) else 'slave'
            backup_tags = {'scalr-purpose': 'postgresql-%s' % suffix}

            LOG.info("Uploading backup to %s with tags %s" % (cloud_storage_path, backup_tags))

            def progress_cb(progress):
                LOG.debug('Uploading %s bytes' % progress)

            uploader = largetransfer.Upload(dumps, cloud_storage_path, progress_cb=progress_cb)
            try:
                uploader.apply_async()
                uploader.join()
                manifest = uploader.manifest

                LOG.info("Postgresql backup uploaded to cloud storage under %s", cloud_storage_path)
                LOG.debug(manifest.data)

                return transfer_result_to_backup_result(manifest)
            except:
                uploader.terminate()
                raise
        finally:
            if tmpdir:
                shutil.rmtree(tmpdir, ignore_errors=True)
Esempio n. 8
0
def test_transfer_result_to_backup_result():
    mnf = mock.Mock(files=[{
        'chunks': [['f101.part', None, 1048576], ['f102.part', None, 5942]]
    }, {
        'chunks': [['f201.part', None, 65403]]
    }],
                    cloudfs_path='s3://path/to/backup/manifest.json')

    result = handlers.transfer_result_to_backup_result(mnf)

    assert len(result) == 3
    assert result[0] == dict(path='s3://path/to/backup/f101.part',
                             size=1048576)
    assert result[1] == dict(path='s3://path/to/backup/f102.part', size=5942)
    assert result[2] == dict(path='s3://path/to/backup/f201.part', size=65403)
Esempio n. 9
0
    def _run(self):
        LOG.debug("Running MySQLDumpBackup")
        client = mysql_svc.MySQLClient(__mysql__['root_user'],
                                       __mysql__['root_password'])
        self._databases = client.list_databases()

        with self._run_lock:
            if self._killed:
                raise Error("Canceled")
            src_gen = self._gen_src()
            transfer_id = datetime.datetime.utcnow().strftime('%Y%m%d%H%M%S')
            self.transfer = largetransfer.Upload(src_gen,
                                                 self._dst,
                                                 chunk_size=self.chunk_size,
                                                 transfer_id=transfer_id)
        self.transfer.apply_async()
        self.transfer.join()
        result = self.transfer.manifest

        def log_stderr(popen):
            LOG.debug("mysqldump log_stderr communicate")
            out, err = popen.communicate()
            LOG.debug("mysqldump log_stderr communicate done")
            if err:
                LOG.debug("mysqldump (code %s) stderr for %s: %s",
                          popen.returncode, popen.db_name, err)
            return popen.db_name, popen.returncode, err

        mysqldump_results = map(log_stderr, self._popens)

        if self._killed:
            raise Error("Canceled")

        mysqldump_errors = []
        for db_name, retcode, err in mysqldump_results:
            if retcode:
                mysqldump_errors.append('%s: "%s"' % (db_name, err))
        if mysqldump_errors:
            raise Error("Mysqldump has returned a non-zero code.\n" +
                        '\n'.join(mysqldump_errors))

        parts = transfer_result_to_backup_result(result)
        return backup.restore(type='mysqldump',
                              cloudfs_source=result.cloudfs_path,
                              parts=parts,
                              description=self.description,
                              tags=self.tags)
Esempio n. 10
0
    def _run(self):
        LOG.debug("Running MySQLDumpBackup")
        client = mysql_svc.MySQLClient(__mysql__['root_user'],
                                       __mysql__['root_password'])
        self._databases = client.list_databases()

        with self._run_lock:
            if self._killed:
                raise Error("Canceled")
            self.transfer = cloudfs.LargeTransfer(self._gen_src,
                                                  self._dst,
                                                  streamer=None,
                                                  chunk_size=self.chunk_size)
        result = self.transfer.run()
        if not result:
            raise Error("Error while transfering to cloud storage")

        def log_stderr(popen):
            LOG.debug("mysqldump log_stderr communicate")
            out, err = popen.communicate()
            LOG.debug("mysqldump log_stderr communicate done")
            if err:
                LOG.debug("mysqldump (code %s) stderr for %s: %s",
                          popen.returncode, popen.db_name, err)
            return popen.db_name, popen.returncode, err

        mysqldump_results = map(log_stderr, self._popens)

        if self._killed:
            raise Error("Canceled")

        mysqldump_errors = []
        for db_name, retcode, err in mysqldump_results:
            if retcode:
                mysqldump_errors.append('%s: "%s"' % (db_name, err))
        if mysqldump_errors:
            raise Error("Mysqldump has returned a non-zero code.\n" +
                        '\n'.join(mysqldump_errors))

        parts = transfer_result_to_backup_result(result)
        return backup.restore(type='mysqldump',
                              cloudfs_source=result.cloudfs_path,
                              parts=parts,
                              description=self.description,
                              tags=self.tags)
Esempio n. 11
0
    def on_DbMsr_CreateBackup(self, message):
        try:
            op = operation(name=self._op_backup, phases=[{
                                                         'name': self._phase_backup,
                                                         'steps': [self._step_copy_database_file,
                                                                   self._step_upload_to_cloud_storage]
                                                         }])
            op.define()

            with op.phase(self._phase_backup):

                with op.step(self._step_copy_database_file):
                    # Flush redis data on disk before creating backup
                    LOG.info("Dumping Redis data on disk")
                    self.redis_instances.save_all()
                    dbs = [r.db_path for r in self.redis_instances]

            with op.step(self._step_upload_to_cloud_storage):
                cloud_storage_path = self._platform.scalrfs.backups(BEHAVIOUR)
                LOG.info("Uploading backup to cloud storage (%s)", cloud_storage_path)
                transfer = LargeTransfer(dbs, cloud_storage_path)
                result = transfer.run()
                result = handlers.transfer_result_to_backup_result(result)

            op.ok(data=result)

            # Notify Scalr
            self.send_message(DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT, dict(
                    db_type = BEHAVIOUR,
                    status = 'ok',
                    backup_parts = result
            ))

        except (Exception, BaseException), e:
            LOG.exception(e)

            # Notify Scalr about error
            self.send_message(DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT, dict(
                    db_type = BEHAVIOUR,
                    status = 'error',
                    last_error = str(e)
            ))
Esempio n. 12
0
    def on_DbMsr_CreateBackup(self, message):
        try:
            op = operation(name=self._op_backup, phases=[{
                                                         'name': self._phase_backup,
                                                         'steps': [self._step_copy_database_file,
                                                                   self._step_upload_to_cloud_storage]
                                                         }])
            op.define()

            with op.phase(self._phase_backup):

                with op.step(self._step_copy_database_file):
                    # Flush redis data on disk before creating backup
                    LOG.info("Dumping Redis data on disk")
                    self.redis_instances.save_all()
                    dbs = [r.db_path for r in self.redis_instances]

            with op.step(self._step_upload_to_cloud_storage):
                cloud_storage_path = self._platform.scalrfs.backups(BEHAVIOUR)
                LOG.info("Uploading backup to cloud storage (%s)", cloud_storage_path)
                transfer = LargeTransfer(dbs, cloud_storage_path)
                result = transfer.run()
                result = handlers.transfer_result_to_backup_result(result)

            op.ok(data=result)

            # Notify Scalr
            self.send_message(DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT, dict(
                    db_type = BEHAVIOUR,
                    status = 'ok',
                    backup_parts = result
            ))

        except (Exception, BaseException), e:
            LOG.exception(e)

            # Notify Scalr about error
            self.send_message(DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT, dict(
                    db_type = BEHAVIOUR,
                    status = 'error',
                    last_error = str(e)
            ))
Esempio n. 13
0
    def _run(self):
        LOG.debug("Running MySQLDumpBackup")
        client = mysql_svc.MySQLClient(
                    __mysql__['root_user'],
                    __mysql__['root_password'])
        self._databases = client.list_databases()

        with self._run_lock:
            if self._killed:
                raise Error("Canceled")
            self.transfer = cloudfs.LargeTransfer(self._gen_src, self._dst,
                                    streamer=None, chunk_size=self.chunk_size)
        result = self.transfer.run()
        if not result:
            raise Error("Error while transfering to cloud storage")

        def log_stderr(popen):
            LOG.debug("mysqldump log_stderr communicate")
            out, err = popen.communicate()
            LOG.debug("mysqldump log_stderr communicate done")
            if err:
                LOG.debug("mysqldump (code %s) stderr for %s: %s", popen.returncode, popen.db_name, err)
            return popen.db_name, popen.returncode, err
        mysqldump_results = map(log_stderr, self._popens)

        if self._killed:
            raise Error("Canceled")

        mysqldump_errors = []
        for db_name, retcode, err in mysqldump_results:
            if retcode:
                mysqldump_errors.append('%s: "%s"' % (db_name, err))
        if mysqldump_errors:
            raise Error("Mysqldump has returned a non-zero code.\n" + 
                        '\n'.join(mysqldump_errors))

        parts = transfer_result_to_backup_result(result)
        return backup.restore(type='mysqldump', 
                cloudfs_source=result.cloudfs_path, 
                parts=parts,
                description=self.description,
                tags=self.tags)
Esempio n. 14
0
    def do_backup(self):
        self.redis_instances.save_all()
        dbs = [r.db_path for r in self.redis_instances if r.db_path]

        cloud_storage_path = __node__.platform.scalrfs.backups(BEHAVIOUR)
        LOG.info("Uploading backup to cloud storage (%s)", cloud_storage_path)

        def progress_cb(progress):
            LOG.debug('Uploading %s bytes' % progress)

        uploader = largetransfer.Upload(dbs,
                                        cloud_storage_path,
                                        progress_cb=progress_cb)
        try:
            uploader.apply_async()
            uploader.join()
            manifest = uploader.manifest

            return transfer_result_to_backup_result(manifest)
        except:
            uploader.terminate()
            raise
Esempio n. 15
0
        def do_backup(op):
            tmpdir = None
            dumps = []
            tmp_path = os.path.join(__postgresql__['storage_dir'], 'tmp')
            try:
                # Get databases list
                psql = PSQL(user=self.postgresql.root_user.name)
                databases = psql.list_pg_databases()
                if 'template0' in databases:
                    databases.remove('template0')

                if not os.path.exists(tmp_path):
                    os.makedirs(tmp_path)

                # Dump all databases
                LOG.info("Dumping all databases")
                tmpdir = tempfile.mkdtemp(dir=tmp_path)
                chown_r(tmpdir, self.postgresql.root_user.name)

                def _single_backup(db_name):
                    dump_path = tmpdir + os.sep + db_name + '.sql'
                    pg_args = '%s %s --no-privileges -f %s' % (
                        PG_DUMP, db_name, dump_path)
                    su_args = [
                        SU_EXEC, '-', self.postgresql.root_user.name, '-c',
                        pg_args
                    ]
                    err = system2(su_args)[1]
                    if err:
                        raise HandlerError(
                            'Error while dumping database %s: %s' %
                            (db_name, err))  #?
                    dumps.append(dump_path)

                for db_name in databases:
                    _single_backup(db_name)

                cloud_storage_path = __node__.platform.scalrfs.backups(
                    BEHAVIOUR)

                suffix = 'master' if int(
                    __postgresql__[OPT_REPLICATION_MASTER]) else 'slave'
                backup_tags = {'scalr-purpose': 'postgresql-%s' % suffix}

                LOG.info("Uploading backup to %s with tags %s" %
                         (cloud_storage_path, backup_tags))

                def progress_cb(progress):
                    LOG.debug('Uploading %s bytes' % progress)

                uploader = largetransfer.Upload(dumps,
                                                cloud_storage_path,
                                                progress_cb=progress_cb)
                uploader.apply_async()
                uploader.join()
                manifest = uploader.manifest

                LOG.info(
                    "Postgresql backup uploaded to cloud storage under %s",
                    cloud_storage_path)
                LOG.debug(manifest.data)

                # Notify Scalr
                result = transfer_result_to_backup_result(manifest)
                __node__.messaging.send(
                    DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT,
                    dict(db_type=BEHAVIOUR, status='ok', backup_parts=result))

                return result

            except (Exception, BaseException), e:
                LOG.exception(e)

                # Notify Scalr about error
                __node__.messaging.send(
                    DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT,
                    dict(db_type=BEHAVIOUR, status='error', last_error=str(e)))