Пример #1
0
    def run(self):
        try:
            self.oplogs['backup'] = Oplog(self.mongodump_oplog['file'],
                                          self.do_gzip(), 'a+',
                                          self.flush_docs, self.flush_secs)
            self.oplogs['tailed'] = Oplog(self.tailed_oplog['file'],
                                          self.do_gzip())
            logging.info("Resolving oplog for %s to max ts: %s" %
                         (self.uri, self.max_end_ts))
            self.state.set('running', True)
            self.state.set('first_ts', self.mongodump_oplog['first_ts'])
            if not self.state.get('first_ts'):
                self.state.set('first_ts', self.tailed_oplog['first_ts'])
            for change in decode_file_iter(
                    self.oplogs['tailed'],
                    CodecOptions(unicode_decode_error_handler="ignore")):
                self.last_ts = change['ts']
                if not self.mongodump_oplog[
                        'last_ts'] or self.last_ts > self.mongodump_oplog[
                            'last_ts']:
                    if self.last_ts < self.max_end_ts:
                        self.oplogs['backup'].add(change)
                        self.changes += 1
                    elif self.last_ts > self.max_end_ts:
                        break

            self.state.set('count',
                           self.mongodump_oplog['count'] + self.changes)
            self.state.set('last_ts', self.last_ts)
            self.state.set('running', False)
            self.exit_code = 0
        except Exception, e:
            raise Error("Resolving of oplogs failed! Error: %s" % e)
Пример #2
0
 def server_version(self):
     status = self.admin_command('serverStatus')
     try:
         if 'version' in status:
             version = status['version'].split('-')[0]
             return tuple(version.split('.'))
     except Exception, e:
         raise Error("Unable to determine version from serverStatus! Error: %s" % e)
Пример #3
0
 def run(self):
     try:
         thread_count = self.threads()
         self._pool   = Pool(processes=thread_count)
         logging.info("Archiving backup directories with pool of %i thread(s)" % thread_count)
     except Exception, e:
         logging.fatal("Could not start pool! Error: %s" % e)
         raise Error(e)
Пример #4
0
    def __init__(self, config, sharding, db):
        self.config       = config
        self.sharding     = sharding
        self.db           = db
        self.max_lag_secs = self.config.replication.max_lag_secs

        self.replsets      = {}
        self.replset_conns = {}

        # Check Sharding class:
        if not isinstance(self.sharding, Sharding):
            raise Error("'sharding' field is not an instance of class: 'Sharding'!")

        # Get a DB connection
        if isinstance(self.db, DB):
            self.connection = self.db.connection()
            if not self.db.is_mongos() and not self.db.is_configsvr():
                raise Error('MongoDB connection is not to a mongos or configsvr!')
        else:
            raise Error("'db' field is not an instance of class: 'DB'!")
Пример #5
0
class Tar(Task):
    def __init__(self, manager, config, timer, base_dir, backup_dir, **kwargs):
        super(Tar, self).__init__(self.__class__.__name__, manager, config, timer, base_dir, backup_dir, **kwargs)
        self.compression_method = self.config.archive.tar.compression
        self.binary             = "tar"

        self._pool   = None
        self._pooled = []

    def done(self, done_dir):
        if done_dir in self._pooled:
            logging.debug("Archiving completed for: %s" % done_dir)
            self._pooled.remove(done_dir)
        else:
            raise OperationError("Unexpected response from tar thread: %s" % done_dir)

    def wait(self):
        if len(self._pooled) > 0:
            self._pool.close()
            while len(self._pooled):
                logging.debug("Waiting for %i tar thread(s) to stop" % len(self._pooled))
                sleep(2)
            self._pool.terminate()
            logging.debug("Stopped all tar threads")
            self.stopped = True
            self.running = False

    def run(self):
        try:
            thread_count = self.threads()
            self._pool   = Pool(processes=thread_count)
            logging.info("Archiving backup directories with pool of %i thread(s)" % thread_count)
        except Exception, e:
            logging.fatal("Could not start pool! Error: %s" % e)
            raise Error(e)

        if os.path.isdir(self.backup_dir):
            try:
                self.running = True
                for backup_dir in os.listdir(self.backup_dir):
                    subdir_name = os.path.join(self.backup_dir, backup_dir)
                    if not os.path.isdir(os.path.join(subdir_name, "dump")):
                        continue
                    output_file = "%s.tar" % subdir_name
                    if self.do_gzip():
                        output_file  = "%s.tgz" % subdir_name
                    self._pool.apply_async(TarThread(subdir_name, output_file, self.compression(), self.verbose, self.binary).run, callback=self.done)
                    self._pooled.append(subdir_name)
            except Exception, e:
                self._pool.terminate()
                logging.fatal("Could not create tar archiving thread! Error: %s" % e)
                raise Error(e)
            finally:
Пример #6
0
 def run(self, *args):
     if self._task and len(self.notifications) > 0:
         try:
             logging.info("Sending %i notification(s) to: %s" %
                          (len(self.notifications), self._task.server))
             while len(self.notifications) > 0:
                 try:
                     (success, message) = self.notifications.pop()
                     state = self._task.failed
                     if success is True:
                         state = self._task.success
                     self._task.run(state, message)
                 except NotifyError:
                     continue
         except Exception, e:
             raise Error(e)
Пример #7
0
class Stage(object):
    def __init__(self, stage_name, manager, config, timers, base_dir, backup_dir, **kwargs):
        self.stage_name = stage_name
        self.manager    = manager
        self.config     = config
        self.timers     = timers
        self.base_dir   = base_dir
        self.backup_dir = backup_dir
        self.args       = kwargs

        self.running   = False
        self.stopped   = False
        self.completed = False

        self.stage  = "mongodb_consistent_backup.%s" % self.stage_name
        self.module = None
        self.task   = "none"
        self._task  = None

    def init(self):
        mod_class = None
        if self.task == "none":
            logging.info("%s stage disabled, skipping" % self.stage_name)
            return
        try:
            module    = sys.modules["%s.%s" % (self.stage, self.task.capitalize())]
            mod_class = getattr(module, self.task.capitalize())
        except LookupError, e:
            raise OperationError('Could not load task %s: %s' % (self.task, e))
        if mod_class:
            self._task = mod_class(
                self.manager,
                self.config,
                self.timers,
                self.base_dir,
                self.backup_dir,
                **self.args
            )
            if isinstance(self._task, Task):
                logging.debug("Loaded stage %s with task %s" % (self.stage, self.task.capitalize()))
            else:
                raise Error("Loaded class must be child of mongodb_consistent_backup.Pipeline.Task!")
Пример #8
0
    def __init__(self, config, db):
        self.config         = config
        self.db             = db
        self.read_pref_tags = self.config.replication.read_pref_tags
        self.max_lag_secs   = self.config.replication.max_lag_secs
        self.min_priority   = self.config.replication.min_priority
        self.max_priority   = self.config.replication.max_priority
        self.hidden_only    = self.config.replication.hidden_only
        self.preferred_members = []
        if self.config.replication.preferred_members:
            self.preferred_members = self.config.replication.preferred_members.split(",")
            logging.debug("Preferred members: %s" % self.preferred_members)

        self.state_primary   = 1
        self.state_secondary = 2
        self.state_arbiter   = 7
        self.hidden_weight   = 0.20
        self.pri0_weight     = 0.10

        self.replset      = True
        self.rs_config    = None
        self.rs_status    = None
        self.primary      = None
        self.secondary    = None
        self.mongo_config = None

        self.replset_summary = {}

        # Get a DB connection
        try:
            if isinstance(self.db, DB):
                self.connection = self.db.connection()
            else:
                raise Error("'db' field is not an instance of class: 'DB'!")
        except Exception, e:
            logging.fatal("Could not get DB connection! Error: %s" % e)
            raise OperationError(e)
Пример #9
0
 def close(self, code=None, frame=None):
     raise Error("Must define a .close() method when using %s class!" %
                 self.__class__.__name__)
Пример #10
0
 def run(self):
     raise Error("Must define a .run() method when using %s class!" %
                 self.__class__.__name__)
Пример #11
0
class Tar(Task):
    def __init__(self, manager, config, timer, base_dir, backup_dir, **kwargs):
        super(Tar, self).__init__(self.__class__.__name__, manager, config,
                                  timer, base_dir, backup_dir, **kwargs)
        self.compression_method = self.config.archive.tar.compression
        self.binary = self.config.archive.tar.binary

        self._pool = None
        self._pooled = []

        self.threads(self.config.archive.tar.threads)
        self._all_threads_successful = True

    def done(self, result):
        success = result["success"]
        message = result["message"]
        error = result["error"]
        directory = result["directory"]
        exit_code = result["exit_code"]

        if success:
            if directory in self._pooled:
                logging.debug("Archiving completed for: %s" % directory)
            else:
                logging.warning(
                    "Tar thread claimed success, but delivered unexpected response %s for directory %s. "
                    "Assuming failure anyway." % (message, directory))
                self._all_threads_successful = False
        else:
            self._all_threads_successful = False
            logging.error(
                "Tar thread failed for directory %s: %s; Exit code %s; Error %s)"
                % (directory, message, exit_code, error))
        self._pooled.remove(directory)

    def wait(self):
        if len(self._pooled) > 0:
            self._pool.close()
            while len(self._pooled):
                logging.debug("Waiting for %i tar thread(s) to stop" %
                              len(self._pooled))
                sleep(2)
            self._pool.terminate()
            logging.debug("Stopped all tar threads")
            self.stopped = True
            self.running = False

    def run(self):
        try:
            self._pool = Pool(processes=self.threads())
            logging.info(
                "Archiving backup directories with pool of %i thread(s)" %
                self.threads())
        except Exception, e:
            logging.fatal("Could not start pool! Error: %s" % e)
            raise Error(e)

        if os.path.isdir(self.backup_dir):
            try:
                self.running = True
                for backup_dir in os.listdir(self.backup_dir):
                    subdir_name = os.path.join(self.backup_dir, backup_dir)
                    if not os.path.isdir(os.path.join(subdir_name, "dump")):
                        continue
                    output_file = "%s.tar" % subdir_name
                    if self.do_gzip():
                        output_file = "%s.tgz" % subdir_name
                    self._pool.apply_async(TarThread(subdir_name, output_file,
                                                     self.compression(),
                                                     self.verbose,
                                                     self.binary).run,
                                           callback=self.done)
                    self._pooled.append(subdir_name)
            except Exception, e:
                self._pool.terminate()
                logging.fatal(
                    "Could not create tar archiving thread! Error: %s" % e)
                raise Error(e)
            finally: