def copy(self, progress): # type: (_HistoryFreeProgressLogger) -> None for oid, tid, state in self: begin = perf_counter() if self.trans_meta is None: self.trans_meta = _HistoryFreeTransactionMetaData() self.trans_meta.first_oid = oid self.tpc.tpc_begin(self.trans_meta, tid) self.trans_meta.last_oid = oid record_size, was_blob = self.restore_one( self.trans_meta, oid, tid, state ) self.trans_meta.num_records += 1 self.trans_meta.num_records_since_last_log += 1 self.trans_meta.num_blobs_since_last_log += was_blob self.trans_meta.record_size_since_last_log += record_size now = perf_counter() progress.copied_one(now, now - begin, self.trans_meta, 1, record_size, was_blob) # Perform the final commit if needed. self.before_major_log()
def copy(self, progress): # type: (_ProgressLogger) -> None for trans in self.storage_it: begin = perf_counter() num_txn_records, txn_data_size, num_blobs = self(trans) now = perf_counter() progress.copied_one(now, now - begin, trans, num_txn_records, txn_data_size, num_blobs)
def run_populate(): import os import logging logging.basicConfig(level=logging.DEBUG) print("PID", os.getpid()) begin = perf_counter() populate() end = perf_counter() print("Duration", end - begin)
def copyTransactionsFrom(self, other): # Just the interface, not the attribute, in case we have a # partial proxy. other_has_record_iternext = IRecordIter.providedBy(other) # pylint:disable=no-value-for-parameter copier_factory = _HistoryFreeCopier if self.tpc.keep_history or not other_has_record_iternext: copier_factory = _HistoryPreservingCopier logger.info( "Copying transactions to %s " "from %s (supports IStorageCurrentRecordIteration? %s) " "using %s", self.tpc, other, other_has_record_iternext, copier_factory, ) copier = copier_factory(other, self.blobhelper, self.tpc, self.restore) try: logger.info("Counting the %s to copy.", copier.units) num_txns = len(copier) logger.info("Copying %d %s%s", num_txns, copier.units, copier.initial_log_suffix) progress = copier.ProgressLogger(num_txns, copier) copier.copy(progress) finally: copier.close() now = perf_counter() logger.info( "Copied transactions: %s", progress.display_at(now))
def copied_one(self, now, copy_duration, trans, num_txn_records, txn_byte_size, num_txn_blobs): # type: (float, float, Any, int, int, int) entire_stats = self._entire_stats interval_stats = self._interval_stats entire_stats.units_copied += 1 interval_stats.units_copied += 1 total_units_copied = self._entire_stats.units_copied entire_stats.total_size += txn_byte_size interval_stats.total_size += txn_byte_size if self.debug_enabled and self._should_minor_log( now, total_units_copied, txn_byte_size, num_txn_records, copy_duration ): self.minor_log_at = now + self.minor_log_interval self.do_minor_log(trans, num_txn_records, txn_byte_size, num_txn_blobs, copy_duration) if self._should_major_log(now, total_units_copied): self.copier.before_major_log() now = perf_counter() self.log_at = now + self.log_interval self.__major_log( now, self.transaction_display(trans, num_txn_records, txn_byte_size, num_txn_blobs)) self._interval_stats = self._IntervalStats(now)
def __init__(self, num_txns, copier): self.num_txns = num_txns # type: int self.copier = copier # type: _AbstractCopier begin_time = perf_counter() self._entire_stats = self._IntervalStats(begin_time) self._interval_stats = self._IntervalStats(begin_time) self.log_at = begin_time + self.log_interval self.minor_log_at = begin_time + self.minor_log_interval self.debug_enabled = logger.isEnabledFor(logging.DEBUG) or type(self).debug_enabled
def __init__(self, num_txns, other_storage, copy): self.num_txns = num_txns begin_time = perf_counter() self._entire_stats = self._IntervalStats(begin_time) self._interval_stats = self._IntervalStats(begin_time) self.log_at = begin_time + self.log_interval self.minor_log_at = begin_time + self.minor_log_interval self.debug_enabled = logger.isEnabledFor(logging.DEBUG) self._other_storage = other_storage self._copy = copy
def bench_multiunion_no_overlap(loops): from relstorage._inthashmap import OidTidMap # 2000 maps of 250 unique oids # 29.3ms with the BTree sorting # 25.2ms with the stdlib sort/unique/erase approach, but copying into a new result # vector. # 24.7ms when returning the vector in place. # Most of the time here is probably in the final C++->Python conversion. i = 0 maps = [] for _map_num in range(2000): x = OidTidMap() maps.append(x) for _ in range(250): i += 1 x[i] = i duration = 0 for _ in range(loops): begin = perf_counter() OidTidMap._multiunion(maps) duration += perf_counter() - begin return duration
def copyTransactionsFrom(self, other): logger.info("Counting the transactions to copy.") other_it = other.iterator() logger.debug("Opened the other iterator: %s", other_it) num_txns, other_it = self.__get_num_txns_to_copy(other, other_it) logger.info("Copying %d transactions", num_txns) progress = _ProgressLogger(num_txns, other, self.__copy_transaction) try: for trans in other_it: progress(trans) finally: try: close = other_it.close except AttributeError: pass else: close() now = perf_counter() logger.info("Copied transactions: %s", progress.display_at(now))
def __call__(self, trans): begin_copy = perf_counter() result = self._copy(self._other_storage, trans) now = perf_counter() self._copied(now, now - begin_copy, trans, result)