def create_50k_index(self): """Create an knit index file with 50,000 entries. This isn't super realistic, but it *is* big :) The file 'test.kndx' will be created. """ rev_id = generate_ids.gen_revision_id('*****@*****.**') versions = [(rev_id, ('fulltext',), 0, 200, [])] pos = 200 alt_parent = None for i in xrange(49999): if alt_parent is not None: parent_ids = [rev_id, alt_parent] else: parent_ids = [rev_id] if i % 8 == 0: # The *next* entry will be a merge alt_parent = rev_id else: alt_parent = None rev_id = generate_ids.gen_revision_id('*****@*****.**') versions.append((rev_id, ('line-delta',), pos, 200, parent_ids)) pos += 200 t = self.get_transport() kndx = knit._KnitIndex(t, 'test.kndx', 'w', create=True, delay_create=True) kndx.add_versions(versions)
def build_20k_dirstate_with_parents(self, num_parents): """Build a DirState file with 20k records and N parents. With 1 parent, this is equivalent to after a simple commit. With 2 it is equivalent to after a merge. """ # All files are marked as changed in the same revision, and this occurs # supposedly in the history of the current trees. last_changed_id = generate_ids.gen_revision_id('*****@*****.**') parent_revision_ids = [generate_ids.gen_revision_id('*****@*****.**') for i in xrange(num_parents)] # Start with a dirstate file with 0 parents state = self.build_helper([(10, 0), (10, 0), (10, 20)]) try: # This invasively updates the internals of DirState to be fast, # since we don't have an api other than passing in Revision Tree # objects, but that requires having a real inventory, etc. if num_parents > 0: for entry in state._iter_entries(): minikind, fingerprint, size, is_exec, packed_stat = entry[1][0] for parent_id in parent_revision_ids: # Add a parent record for this record entry[1].append((minikind, fingerprint, size, is_exec, last_changed_id)) state._parents = parent_revision_ids state._ghosts = [] state.save() finally: state.unlock() return state
def assertGenRevisionId(self, regex, username, timestamp=None): """gen_revision_id should create a revision id matching the regex""" revision_id = generate_ids.gen_revision_id(username, timestamp) self.assertContainsRe(revision_id, '^'+regex+'$') # It should be a utf8 revision_id, not a unicode one self.assertIsInstance(revision_id, str) # gen_revision_id should always return ascii revision ids. revision_id.decode('ascii')
def assertGenRevisionId(self, regex, username, timestamp=None): """gen_revision_id should create a revision id matching the regex""" revision_id = generate_ids.gen_revision_id(username, timestamp) self.assertContainsRe(revision_id, '^' + regex + '$') # It should be a utf8 revision_id, not a unicode one self.assertIsInstance(revision_id, str) # gen_revision_id should always return ascii revision ids. revision_id.decode('ascii')
def regenerate_default_revid(repository, revid): """Generate a revision id for the rebase of an existing revision. :param repository: Repository in which the revision is present. :param revid: Revision id of the revision that is being rebased. :return: new revision id.""" if revid == NULL_REVISION: return NULL_REVISION rev = repository.get_revision(revid) return gen_revision_id(rev.committer, rev.timestamp)
def gen_revision_id(self): """Generate a revision id. Subclasses may override this to produce deterministic ids say. """ committer = self.command.committer # Perhaps 'who' being the person running the import is ok? If so, # it might be a bit quicker and give slightly better compression? who = self._format_name_email("committer", committer[0], committer[1]) timestamp = committer[2] return generate_ids.gen_revision_id(who, timestamp)