def run(self): if not self.enabled(): logging.info("Oplog tailer is disabled, skipping") return logging.info("Starting oplog tailers on all replica sets (options: compression=%s, status_secs=%i)" % (self.compression(), self.status_secs)) self.timer.start(self.timer_name) for shard in self.replsets: tail_stop = Event() secondary = self.replsets[shard].find_secondary() mongo_uri = secondary['uri'] shard_name = mongo_uri.replset oplog_file = self.prepare_oplog_files(shard_name) oplog_state = OplogState(self.manager, mongo_uri, oplog_file) thread = TailThread( self.backup_stop, tail_stop, mongo_uri, self.config, self.timer, oplog_file, oplog_state, self.do_gzip() ) self.shards[shard] = { 'stop': tail_stop, 'thread': thread, 'state': oplog_state } self.shards[shard]['thread'].start() while not oplog_state.get('running'): if self.shards[shard]['thread'].exitcode: raise OperationError("Oplog tailer for %s failed with exit code %i!" % (mongo_uri, self.shards[shard]['thread'].exitcode)) sleep(0.5)
def run(self): logging.info("Starting oplog tailers on all replica sets (options: compression=%s, status_secs=%i)" % (self.compression(), self.status_secs)) self.timer.start(self.timer_name) for shard in self.replsets: stop = Event() secondary = self.replsets[shard].find_secondary() mongo_uri = secondary['uri'] shard_name = mongo_uri.replset oplog_file = self.prepare_oplog_files(shard_name) oplog_state = OplogState(self.manager, mongo_uri, oplog_file) thread = TailThread( stop, mongo_uri, self.config, self.timer, oplog_file, oplog_state, self.do_gzip() ) self.shards[shard] = { 'stop': stop, 'thread': thread, 'state': oplog_state } self.shards[shard]['thread'].start() while not oplog_state.get('running'): sleep(0.5)
def run(self): self.timer.start(self.timer_name) # backup a secondary from each shard: for shard in self.replsets: secondary = self.replsets[shard].find_secondary() mongo_uri = secondary['uri'] self.states[shard] = OplogState(self.manager, mongo_uri) thread = MongodumpThread( self.states[shard], mongo_uri, self.timer, self.user, self.password, self.authdb, self.backup_dir, self.binary, self.threads(), self.do_gzip, self.verbose ) self.dump_threads.append(thread) if not len(self.dump_threads) > 0: raise OperationError('No backup threads started!') logging.info( "Starting backups using mongodump %s (options: compression=%s, threads_per_dump=%i)" % (self.version, self.compression(), self.threads())) for thread in self.dump_threads: thread.start() self.wait() # backup a single sccc/non-replset config server, if exists: if self.sharding: config_server = self.sharding.get_config_server() if config_server and isinstance(config_server, dict): logging.info("Using non-replset backup method for config server mongodump") mongo_uri = MongoUri(config_server['host'], 27019, 'configsvr') self.states['configsvr'] = OplogState(self.manager, mongo_uri) self.dump_threads = [MongodumpThread( self.states['configsvr'], mongo_uri, self.timer, self.user, self.password, self.authdb, self.backup_dir, self.binary, self.threads(), self.do_gzip, self.verbose )] self.dump_threads[0].start() self.dump_threads[0].join() self.completed = True return self._summary
def run(self): if not self.enabled(): logging.info("Oplog getter is disabled, skipping") return logging.info( "Starting oplog getter for all replica sets (options: compression=%s, status_secs=%i)" % (self.compression(), self.status_secs)) self.timer.start(self.timer_name) if len(self.backup_summary) == 0: raise OperationError( "Oplogs cannot gathered without a successful backup first.") # Determine the time when the last shard completed its backup, because we need all changes # across all other shards since whenever they finished until then logging.debug("Finding latest finished backup timestamp") need_changes_until_ts = None for shard in self.replsets: ts = self.backup_summary[shard].get('last_ts') logging.debug("Shard %s's has changes up to %s" % (shard, ts)) if need_changes_until_ts is None or ts > need_changes_until_ts: need_changes_until_ts = ts logging.info("Getting oplogs for all shards up to %s" % need_changes_until_ts) for shard in self.replsets: getter_stop = Event() secondary = self.replsets[shard].find_secondary() mongo_uri = secondary['uri'] shard_name = mongo_uri.replset need_changes_since_ts = self.backup_summary[shard].get('last_ts') oplog_file = self.prepare_oplog_files(shard_name) oplog_state = OplogState(self.manager, mongo_uri, oplog_file) thread = SimpleOplogGetterThread( self.backup_stop, getter_stop, mongo_uri, self.config, self.timer, oplog_file, oplog_state, self.do_gzip(), need_changes_since_ts, need_changes_until_ts) self.shards[shard] = { 'stop': getter_stop, 'thread': thread, 'state': oplog_state } self.worker_threads.append(thread) logging.debug("Starting thread %s to write %s oplog to %s" % (thread.name, mongo_uri, oplog_file)) thread.start() # Wait for all threads to complete self.wait() # Wait would have thrown an error is not all of them completed # normally. self.completed = True self.stopped = True self.get_summaries() return self._summary
def run(self): self.timer.start(self.timer_name) # backup a secondary from each shard: for shard in self.replsets: try: secondary = self.replsets[shard].find_secondary() mongo_uri = secondary['uri'] self.states[shard] = OplogState(self.manager, mongo_uri) thread = MongodumpThread(self.states[shard], mongo_uri, self.timer, self.config, self.backup_dir, self.version, self.threads(), self.do_gzip()) self.dump_threads.append(thread) except Exception, e: logging.error("Failed to get secondary for shard %s: %s" % (shard, e)) raise e