예제 #1
0
파일: record.py 프로젝트: llvtt/flashback
    def record(self):
        """record the activities in the multithreading way"""
        start_utc_secs = utils.now_in_utc_secs()
        end_utc_secs = utils.now_in_utc_secs() + self.config["duration_secs"]
        # We'll dump the recorded activities to `files`.
        files = {
            "oplog": open(self.config["oplog_output_file"], "wb")
        }
        tailer_names = []
        profiler_output_files = []
        # open a file for each profiler client, append client name as suffix
        for client_name in self.profiler_clients:
            # create a file for each (client,db)
            for db in self.config["target_databases"]:
                tailer_name = "%s_%s" % (db, client_name)
                tailer_names.append(tailer_name)
                profiler_output_files.append(tailer_name)
                files[tailer_name]= open(tailer_name, "wb")
                
        tailer_names.append("oplog")
        state = MongoQueryRecorder. RecordingState(tailer_names)
        
        # Create a series working threads to handle to track/dump mongodb
        # activities. On return, these threads have already started.
        workers_info = self._generate_workers(files, state, start_utc_secs,
                                              end_utc_secs)
        timer_control = self._periodically_report_status(state)

        # Waiting till due time arrives
        while all(s.alive for s in state.tailer_states.values()) \
                and (utils.now_in_utc_secs() < end_utc_secs) \
                and not self.force_quit:
            time.sleep(1)

        state.timeout = True

        self._join_workers(state, workers_info)
        timer_control.set()  # stop status report
        utils.LOG.info("Preliminary recording completed!")

        for f in files.values():
            f.close()

        # Fill the missing insert op details from oplog
        merge.merge_to_final_output(
            oplog_output_file=self.config["oplog_output_file"],
            profiler_output_files=profiler_output_files,
            output_file=self.config["output_file"])
예제 #2
0
    def record(self):
        """record the activities in the multithreading way"""
        start_utc_secs = utils.now_in_utc_secs()
        end_utc_secs = utils.now_in_utc_secs() + self.config["duration_secs"]
        # We'll dump the recorded activities to `files`.
        files = {"oplog": open(self.config["oplog_output_file"], "wb")}
        tailer_names = []
        profiler_output_files = []
        # open a file for each profiler client, append client name as suffix
        for client_name in self.profiler_clients:
            # create a file for each (client,db)
            for db in self.config["target_databases"]:
                tailer_name = "%s_%s" % (db, client_name)
                tailer_names.append(tailer_name)
                profiler_output_files.append(tailer_name)
                files[tailer_name] = open(tailer_name, "wb")

        tailer_names.append("oplog")
        state = MongoQueryRecorder.RecordingState(tailer_names)

        # Create a series working threads to handle to track/dump mongodb
        # activities. On return, these threads have already started.
        workers_info = self._generate_workers(files, state, start_utc_secs,
                                              end_utc_secs)
        timer_control = self._periodically_report_status(state)

        # Waiting till due time arrives
        while all(s.alive for s in state.tailer_states.values()) \
                and (utils.now_in_utc_secs() < end_utc_secs) \
                and not self.force_quit:
            time.sleep(1)

        state.timeout = True

        self._join_workers(state, workers_info)
        timer_control.set()  # stop status report
        utils.LOG.info("Preliminary recording completed!")

        for f in files.values():
            f.close()

        # Fill the missing insert op details from oplog
        merge.merge_to_final_output(
            oplog_output_file=self.config["oplog_output_file"],
            profiler_output_files=profiler_output_files,
            output_file=self.config["output_file"])
예제 #3
0
파일: record.py 프로젝트: liukai/flashback
    def record(self):
        """record the activities in the multithreading way"""
        start_utc_secs = utils.now_in_utc_secs()
        end_utc_secs = utils.now_in_utc_secs() + self.config["duration_secs"]
        state = MongoQueryRecorder. RecordingState()
        # We'll dump the recorded activities to `files`.
        files = [
            open(self.config["oplog_output_file"], "wb"),
            open(self.config["profiler_output_file"], "wb")
        ]

        # Create a series working threads to handle to track/dump mongodb
        # activities. On return, these threads have already started.
        workers_info = self._generate_workers(files, state, start_utc_secs,
                                              end_utc_secs)
        timer_control = self._periodically_report_status(state)

        # Waiting till due time arrives
        while all(s.alive for s in state.tailer_states) \
                and (utils.now_in_utc_secs() < end_utc_secs) \
                and not self.force_quit:
            time.sleep(1)

        state.timeout = True

        self._join_workers(state, workers_info)
        timer_control.set()  # stop status report
        utils.LOG.info("Preliminary recording completed!")

        for f in files:
            f.close()

        # Fill the missing insert op details from oplog
        merge.merge_to_final_output(self.config["oplog_output_file"],
                                    self.config["profiler_output_file"],
                                    self.config["output_file"])
예제 #4
0
    def record(self):
        """Record the activities in a multithreaded way"""
        start_utc_secs = utils.now_in_utc_secs()
        end_utc_secs = utils.now_in_utc_secs() + self.config["duration_secs"]

        # If overwrite_output_file setting is False, determine the actual name
        # of the output files
        if not self.config["overwrite_output_file"]:
            self.config.update(self.get_noncolliding_config())

        # We'll dump the recorded activities to `files`.
        files = {"oplog": open(self.config["oplog_output_file"], "wb")}
        tailer_names = []
        profiler_output_files = []

        # Open a file for each profiler client, append client name as suffix
        for client_name in self.profiler_clients:
            # create a file for each (client,db)
            for db in self.config["target_databases"]:
                tailer_name = "%s_%s" % (db, client_name)
                tailer_names.append(tailer_name)
                fname = "%s_%s" % (self.config["output_file"], tailer_name)
                profiler_output_files.append(fname)
                files[tailer_name] = open(fname, "wb")
        tailer_names.append("oplog")

        state = MongoQueryRecorder.RecordingState(tailer_names)

        # Create the working threads that handle tracking and dumping of
        # mongodb activities. On return, these threads will have already
        # started.
        workers_info = self._generate_workers(files, state, start_utc_secs,
                                              end_utc_secs)
        timer_control = self._periodically_report_status(state)

        # Waiting till due time arrives
        while all(s.alive for s in state.tailer_states.values()) \
                and (utils.now_in_utc_secs() < end_utc_secs) \
                and not self.force_quit:
            time.sleep(1)

        # Log the reason for stopping
        utils.LOG.debug(
            "Stopping the recording! All tailers alive: %s; End time passed: %s; Force quit requested: %s.",
            all(s.alive for s in state.tailer_states.values()),
            (utils.now_in_utc_secs() >= end_utc_secs), self.force_quit)

        # Indicate that it's time to stop
        state.timeout = True

        # Wait until all the workers finish
        self._join_workers(state, workers_info)

        # Stop reporting the status
        timer_control.set()
        utils.LOG.info("Preliminary recording completed!")

        # Close all the file handlers
        for f in files.values():
            f.close()

        # Fill the missing insert op details from the oplog
        merge.merge_to_final_output(
            oplog_output_file=self.config["oplog_output_file"],
            profiler_output_files=profiler_output_files,
            output_file=self.config["output_file"])
예제 #5
0
파일: record.py 프로젝트: aayjaa/flashback
    def record(self):
        """Record the activities in a multithreaded way"""
        start_utc_secs = utils.now_in_utc_secs()
        end_utc_secs = utils.now_in_utc_secs() + self.config["duration_secs"]

        # If overwrite_output_file setting is False, determine the actual name
        # of the output files
        if not self.config["overwrite_output_file"]:
            self.config.update(self.get_noncolliding_config())

        # We'll dump the recorded activities to `files`.
        files = {
            "oplog": open(self.config["oplog_output_file"], "wb")
        }
        tailer_names = []
        profiler_output_files = []

        # Open a file for each profiler client, append client name as suffix
        for client_name in self.profiler_clients:
            # create a file for each (client,db)
            for db in self.config["target_databases"]:
                tailer_name = "%s_%s" % (db, client_name)
                tailer_names.append(tailer_name)
                fname = "%s_%s" % (self.config["output_file"], tailer_name)
                profiler_output_files.append(fname)
                files[tailer_name] = open(fname, "wb")
        tailer_names.append("oplog")

        state = MongoQueryRecorder.RecordingState(tailer_names)

        # Create the working threads that handle tracking and dumping of
        # mongodb activities. On return, these threads will have already
        # started.
        workers_info = self._generate_workers(files, state, start_utc_secs,
                                              end_utc_secs)
        timer_control = self._periodically_report_status(state)

        # Waiting till due time arrives
        while all(s.alive for s in state.tailer_states.values()) \
                and (utils.now_in_utc_secs() < end_utc_secs) \
                and not self.force_quit:
            time.sleep(1)

        # Log the reason for stopping
        utils.LOG.debug("Stopping the recording! All tailers alive: %s; End time passed: %s; Force quit requested: %s.",
            all(s.alive for s in state.tailer_states.values()),
            (utils.now_in_utc_secs() >= end_utc_secs),
            self.force_quit
        )

        # Indicate that it's time to stop
        state.timeout = True

        # Wait until all the workers finish
        self._join_workers(state, workers_info)

        # Stop reporting the status
        timer_control.set()
        utils.LOG.info("Preliminary recording completed!")

        # Close all the file handlers
        for f in files.values():
            f.close()

        # Fill the missing insert op details from the oplog
        merge.merge_to_final_output(
            oplog_output_file=self.config["oplog_output_file"],
            profiler_output_files=profiler_output_files,
            output_file=self.config["output_file"])