def add_new_changeset(self, changeset): """Add the new CHANGESET to the graph and also to the databases.""" if logger.is_on(logger.DEBUG): logger.debug('Adding changeset %r' % (changeset,)) self.add_changeset(changeset) self.store_changeset(changeset)
def summarize_symbol_transforms(self): if self.symbol_transform_counts and logger.is_on(logger.NORMAL): logger.normal('Summary of symbol transforms:') transforms = self.symbol_transform_counts.items() transforms.sort() for ((old_name, new_name), count) in transforms: if new_name is None: logger.normal(' "%s" ignored in %d files' % (old_name, count,)) else: logger.normal( ' "%s" transformed to "%s" in %d files' % (old_name, new_name, count,) )
def delete_changeset(self, changeset): """Remove CHANGESET from the graph and also from the databases. In fact, we don't remove CHANGESET from self._cvs_item_to_changeset_id, because in practice the CVSItems in CHANGESET are always added again as part of a new CHANGESET, which will cause the old values to be overwritten.""" if logger.is_on(logger.DEBUG): logger.debug('Removing changeset %r' % (changeset,)) del self[changeset.id] del self._changeset_db[changeset.id]
def check_for_garbage(): # We've turned off the garbage collector because we shouldn't # need it (we don't create circular dependencies) and because it # is therefore a waste of time. So here we check for any # unreachable objects and generate a debug-level warning if any # occur: gc.set_debug(gc.DEBUG_SAVEALL) gc_count = gc.collect() if gc_count: if logger.is_on(logger.DEBUG): logger.debug( 'INTERNAL: %d unreachable object(s) were garbage collected:' % (gc_count, )) for g in gc.garbage: logger.debug(' %s' % (g, )) del gc.garbage[:]
def check_for_garbage(): # We've turned off the garbage collector because we shouldn't # need it (we don't create circular dependencies) and because it # is therefore a waste of time. So here we check for any # unreachable objects and generate a debug-level warning if any # occur: gc.set_debug(gc.DEBUG_SAVEALL) gc_count = gc.collect() if gc_count: if logger.is_on(logger.DEBUG): logger.debug( 'INTERNAL: %d unreachable object(s) were garbage collected:' % (gc_count,) ) for g in gc.garbage: logger.debug(' %s' % (g,)) del gc.garbage[:]
def summarize_symbol_transforms(self): if self.symbol_transform_counts and logger.is_on(logger.NORMAL): logger.normal('Summary of symbol transforms:') transforms = self.symbol_transform_counts.items() transforms.sort() for ((old_name, new_name), count) in transforms: if new_name is None: logger.normal(' "%s" ignored in %d files' % ( old_name, count, )) else: logger.normal(' "%s" transformed to "%s" in %d files' % ( old_name, new_name, count, ))
def check_for_garbage(self): """Check for any unreachable objects. Generate a DEBUG-level warning if any were found.""" try: gc.set_debug(gc.DEBUG_SAVEALL) gc_count = gc.collect() if gc_count: if logger.is_on(logger.DEBUG): logger.debug( 'INTERNAL: %d unreachable object(s) were garbage collected:' % (gc_count, )) for g in gc.garbage: logger.debug(' %s' % (g, )) del gc.garbage[:] except (AttributeError, NotImplementedError): # Other Python implementations implement garbage collection # differently, so if errors occur just ignore them. pass
def get(self, timestamp, change_expected): """Return a reasonable timestamp derived from TIMESTAMP. Push TIMESTAMP into the future if necessary to ensure that it is at least one second later than every other timestamp that has been returned by previous calls to this method. If CHANGE_EXPECTED is not True, then log a message if the timestamp has to be changed.""" if timestamp > self.max_timestamp: # If a timestamp is in the future, it is assumed that it is # bogus. Shift it backwards in time to prevent it forcing other # timestamps to be pushed even further in the future. # Note that this is not nearly a complete solution to the bogus # timestamp problem. A timestamp in the future still affects # the ordering of changesets, and a changeset having such a # timestamp will not be committed until all changesets with # earlier timestamps have been committed, even if other # changesets with even earlier timestamps depend on this one. self.timestamp = self.timestamp + 1.0 if not change_expected: logger.warn( 'Timestamp "%s" is in the future; changed to "%s".' % ( time.asctime(time.gmtime(timestamp)), time.asctime(time.gmtime(self.timestamp)), )) elif timestamp < self.timestamp + 1.0: self.timestamp = self.timestamp + 1.0 if not change_expected and logger.is_on(logger.VERBOSE): logger.verbose( 'Timestamp "%s" adjusted to "%s" to ensure monotonicity.' % ( time.asctime(time.gmtime(timestamp)), time.asctime(time.gmtime(self.timestamp)), )) else: self.timestamp = timestamp return self.timestamp
def check_for_garbage(self): """Check for any unreachable objects. Generate a DEBUG-level warning if any were found.""" try: gc.set_debug(gc.DEBUG_SAVEALL) gc_count = gc.collect() if gc_count: if logger.is_on(logger.DEBUG): logger.debug( 'INTERNAL: %d unreachable object(s) were garbage collected:' % (gc_count,) ) for g in gc.garbage: logger.debug(' %s' % (g,)) del gc.garbage[:] except (AttributeError, NotImplementedError): # Other Python implementations implement garbage collection # differently, so if errors occur just ignore them. pass
def get(self, timestamp, change_expected): """Return a reasonable timestamp derived from TIMESTAMP. Push TIMESTAMP into the future if necessary to ensure that it is at least one second later than every other timestamp that has been returned by previous calls to this method. If CHANGE_EXPECTED is not True, then log a message if the timestamp has to be changed.""" if timestamp > self.max_timestamp: # If a timestamp is in the future, it is assumed that it is # bogus. Shift it backwards in time to prevent it forcing other # timestamps to be pushed even further in the future. # Note that this is not nearly a complete solution to the bogus # timestamp problem. A timestamp in the future still affects # the ordering of changesets, and a changeset having such a # timestamp will not be committed until all changesets with # earlier timestamps have been committed, even if other # changesets with even earlier timestamps depend on this one. self.timestamp = self.timestamp + 1.0 if not change_expected: logger.warn( 'Timestamp "%s" is in the future; changed to "%s".' % (time.asctime(time.gmtime(timestamp)), time.asctime(time.gmtime(self.timestamp)),) ) elif timestamp < self.timestamp + 1.0: self.timestamp = self.timestamp + 1.0 if not change_expected and logger.is_on(logger.VERBOSE): logger.verbose( 'Timestamp "%s" adjusted to "%s" to ensure monotonicity.' % (time.asctime(time.gmtime(timestamp)), time.asctime(time.gmtime(self.timestamp)),) ) else: self.timestamp = timestamp return self.timestamp
def check_for_garbage(): # We've turned off the garbage collector because we shouldn't # need it (we don't create circular dependencies) and because it # is therefore a waste of time. So here we check for any # unreachable objects and generate a debug-level warning if any # occur: try: gc.set_debug(gc.DEBUG_SAVEALL) gc_count = gc.collect() if gc_count: if logger.is_on(logger.DEBUG): logger.debug( 'INTERNAL: %d unreachable object(s) were garbage collected:' % (gc_count,) ) for g in gc.garbage: logger.debug(' %s' % (g,)) del gc.garbage[:] except (AttributeError, NotImplementedError): # Other Python implementations implement garbage collection # differently, so if errors occur just ignore them. pass