Exemplo n.º 1
0
Arquivo: bzr.py Projeto: jelmer/wikkid
 def _get_final_text(self, content, f, parent_revision):
     current_rev = f.last_modified_in_revision
     wt = self.tree
     current_lines = wt.get_file_lines(f.file_id)
     basis = self.branch.repository.revision_tree(parent_revision)
     basis_lines = basis.get_file_lines(f.file_id)
     # need to break content into lines.
     ending = get_line_ending(current_lines)
     # If the content doesn't end with a new line, add one.
     new_lines = split_lines(content)
     # Look at the end of the first string.
     new_ending = get_line_ending(new_lines)
     if ending != new_ending:
         # I know this is horribly inefficient, but lets get it working
         # first.
         content = normalize_line_endings(content, ending)
         new_lines = split_lines(content)
     if len(new_lines) > 0 and not new_lines[-1].endswith(ending):
         new_lines[-1] += ending
     merge = Merge3(basis_lines, new_lines, current_lines)
     result = list(merge.merge_lines()) # or merge_regions or whatever
     conflicted = ('>>>>>>>' + ending) in result
     if conflicted:
         raise UpdateConflicts(''.join(result), current_rev)
     return result
Exemplo n.º 2
0
    def insert_record_stream(self, stream):
        """Insert a record stream into this versioned file.

        :param stream: A stream of records to insert. 
        :return: None
        :seealso VersionedFile.get_record_stream:
        """
        adapters = {}
        for record in stream:
            # Raise an error when a record is missing.
            if record.storage_kind == 'absent':
                raise RevisionNotPresent([record.key[0]], self)
            # adapt to non-tuple interface
            parents = [parent[0] for parent in record.parents]
            if record.storage_kind == 'fulltext':
                self.add_lines(record.key[0], parents,
                    split_lines(record.get_bytes_as('fulltext')))
            else:
                adapter_key = record.storage_kind, 'fulltext'
                try:
                    adapter = adapters[adapter_key]
                except KeyError:
                    adapter_factory = adapter_registry.get(adapter_key)
                    adapter = adapter_factory(self)
                    adapters[adapter_key] = adapter
                lines = split_lines(adapter.get_bytes(
                    record, record.get_bytes_as(record.storage_kind)))
                try:
                    self.add_lines(record.key[0], parents, lines)
                except RevisionAlreadyPresent:
                    pass
Exemplo n.º 3
0
    def insert_record_stream(self, stream):
        """Insert a record stream into this versioned file.

        :param stream: A stream of records to insert. 
        :return: None
        :seealso VersionedFile.get_record_stream:
        """
        adapters = {}
        for record in stream:
            # Raise an error when a record is missing.
            if record.storage_kind == 'absent':
                raise RevisionNotPresent([record.key[0]], self)
            # adapt to non-tuple interface
            parents = [parent[0] for parent in record.parents]
            if record.storage_kind == 'fulltext':
                self.add_lines(record.key[0], parents,
                               split_lines(record.get_bytes_as('fulltext')))
            else:
                adapter_key = record.storage_kind, 'fulltext'
                try:
                    adapter = adapters[adapter_key]
                except KeyError:
                    adapter_factory = adapter_registry.get(adapter_key)
                    adapter = adapter_factory(self)
                    adapters[adapter_key] = adapter
                lines = split_lines(
                    adapter.get_bytes(record,
                                      record.get_bytes_as(
                                          record.storage_kind)))
                try:
                    self.add_lines(record.key[0], parents, lines)
                except RevisionAlreadyPresent:
                    pass
Exemplo n.º 4
0
 def insert_record_stream(self, stream):
     adapters = {}
     for record in stream:
         # Raise an error when a record is missing.
         if record.storage_kind == 'absent':
             raise errors.RevisionNotPresent([record.key[0]], self)
         # adapt to non-tuple interface
         if record.storage_kind == 'fulltext':
             self.add_lines(
                 record.key, None,
                 osutils.split_lines(record.get_bytes_as('fulltext')))
         else:
             adapter_key = record.storage_kind, 'fulltext'
             try:
                 adapter = adapters[adapter_key]
             except KeyError:
                 adapter_factory = adapter_registry.get(adapter_key)
                 adapter = adapter_factory(self)
                 adapters[adapter_key] = adapter
             lines = osutils.split_lines(
                 adapter.get_bytes(record,
                                   record.get_bytes_as(
                                       record.storage_kind)))
             try:
                 self.add_lines(record.key, None, lines)
             except RevisionAlreadyPresent:
                 pass
Exemplo n.º 5
0
def make_delta(source_bytes, target_bytes):
    """Create a delta from source to target."""
    if type(source_bytes) is not str:
        raise TypeError("source is not a str")
    if type(target_bytes) is not str:
        raise TypeError("target is not a str")
    line_locations = LinesDeltaIndex(osutils.split_lines(source_bytes))
    delta, _ = line_locations.make_delta(osutils.split_lines(target_bytes), bytes_length=len(target_bytes))
    return "".join(delta)
Exemplo n.º 6
0
def make_delta(source_bytes, target_bytes):
    """Create a delta from source to target."""
    if type(source_bytes) is not str:
        raise TypeError('source is not a str')
    if type(target_bytes) is not str:
        raise TypeError('target is not a str')
    line_locations = LinesDeltaIndex(osutils.split_lines(source_bytes))
    delta, _ = line_locations.make_delta(osutils.split_lines(target_bytes),
                                         bytes_length=len(target_bytes))
    return ''.join(delta)
Exemplo n.º 7
0
    def force_break_corrupt(self, corrupt_info_lines):
        """Release a lock that has been corrupted.

        This is very similar to force_break, it except it doesn't assume that
        self.peek() can work.

        :param corrupt_info_lines: the lines of the corrupted info file, used
            to check that the lock hasn't changed between reading the (corrupt)
            info file and calling force_break_corrupt.
        """
        # XXX: this copes with unparseable info files, but what about missing
        # info files?  Or missing lock dirs?
        self._check_not_locked()
        tmpname = '%s/broken.%s.tmp' % (self.path, rand_chars(20))
        self.transport.rename(self._held_dir, tmpname)
        # check that we actually broke the right lock, not someone else;
        # there's a small race window between checking it and doing the
        # rename.
        broken_info_path = tmpname + self.__INFO_NAME
        broken_content = self.transport.get_bytes(broken_info_path)
        broken_lines = osutils.split_lines(broken_content)
        if broken_lines != corrupt_info_lines:
            raise LockBreakMismatch(self, broken_lines, corrupt_info_lines)
        self.transport.delete(broken_info_path)
        self.transport.rmdir(tmpname)
        result = lock.LockResult(self.transport.abspath(self.path))
        for hook in self.hooks['lock_broken']:
            hook(result)
Exemplo n.º 8
0
    def force_break_corrupt(self, corrupt_info_lines):
        """Release a lock that has been corrupted.

        This is very similar to force_break, it except it doesn't assume that
        self.peek() can work.

        :param corrupt_info_lines: the lines of the corrupted info file, used
            to check that the lock hasn't changed between reading the (corrupt)
            info file and calling force_break_corrupt.
        """
        # XXX: this copes with unparseable info files, but what about missing
        # info files?  Or missing lock dirs?
        self._check_not_locked()
        tmpname = '%s/broken.%s.tmp' % (self.path, rand_chars(20))
        self.transport.rename(self._held_dir, tmpname)
        # check that we actually broke the right lock, not someone else;
        # there's a small race window between checking it and doing the
        # rename.
        broken_info_path = tmpname + self.__INFO_NAME
        broken_content = self.transport.get_bytes(broken_info_path)
        broken_lines = osutils.split_lines(broken_content)
        if broken_lines != corrupt_info_lines:
            raise LockBreakMismatch(self, broken_lines, corrupt_info_lines)
        self.transport.delete(broken_info_path)
        self.transport.rmdir(tmpname)
        result = lock.LockResult(self.transport.abspath(self.path))
        for hook in self.hooks['lock_broken']:
            hook(result)
Exemplo n.º 9
0
Arquivo: bzr.py Projeto: ilius/starcal
def getShortStatByTrees(repo, old_tree, tree):
	files_changed = 0
	insertions = 0
	deletions = 0
	####
	tree.lock_read()
	for (
		file_id,
		(old_path, new_path),
		changed_content,
		versioned,
		parent,
		name,
		(old_kind, new_kind),
		executable,
	) in tree.iter_changes(old_tree):
		if changed_content:
			#for kind in (old_kind, new_kind):
			#	if not kind in (None, "file", "symlink", "directory"):
			#		print("kind", old_kind, new_kind)
			if new_kind in ("file", "symlink"):
				files_changed += 1
				text = tree.get_file_text(file_id)
				if "\x00" not in text[:1024]:## FIXME
					if old_kind is None:
						insertions += len(split_lines(text))
					elif old_kind in ("file", "symlink"):
						old_text = old_tree.get_file_text(file_id)
						seq = SequenceMatcher(
							None,
							split_lines(old_text),
							split_lines(text),
						)
						for op, i1, i2, j1, j2 in seq.get_opcodes():
							if op == "equal":
								continue
							#if not op in ("insert", "delete", "replace"):
							#	print("op", op)
							insertions += (j2 - j1)
							deletions += (i2 - i1)
			elif new_kind is None:
				if old_kind in ("file", "symlink"):
					files_changed += 1
					old_text = old_tree.get_file_text(file_id)
					if "\x00" not in old_text[:1024]:## FIXME
						deletions += len(split_lines(old_text))
	return files_changed, insertions, deletions
Exemplo n.º 10
0
Arquivo: bzr.py Projeto: ilius/starcal
def getShortStatByTrees(repo, old_tree, tree):
    files_changed = 0
    insertions = 0
    deletions = 0
    ####
    tree.lock_read()
    for (
            file_id,
        (old_path, new_path),
            changed_content,
            versioned,
            parent,
            name,
        (old_kind, new_kind),
            executable,
    ) in tree.iter_changes(old_tree):
        if changed_content:
            #for kind in (old_kind, new_kind):
            #	if not kind in (None, "file", "symlink", "directory"):
            #		print("kind", old_kind, new_kind)
            if new_kind in ("file", "symlink"):
                files_changed += 1
                text = tree.get_file_text(file_id)
                if "\x00" not in text[:1024]:  ## FIXME
                    if old_kind is None:
                        insertions += len(split_lines(text))
                    elif old_kind in ("file", "symlink"):
                        old_text = old_tree.get_file_text(file_id)
                        seq = SequenceMatcher(
                            None,
                            split_lines(old_text),
                            split_lines(text),
                        )
                        for op, i1, i2, j1, j2 in seq.get_opcodes():
                            if op == "equal":
                                continue
                            #if not op in ("insert", "delete", "replace"):
                            #	print("op", op)
                            insertions += (j2 - j1)
                            deletions += (i2 - i1)
            elif new_kind is None:
                if old_kind in ("file", "symlink"):
                    files_changed += 1
                    old_text = old_tree.get_file_text(file_id)
                    if "\x00" not in old_text[:1024]:  ## FIXME
                        deletions += len(split_lines(old_text))
    return files_changed, insertions, deletions
Exemplo n.º 11
0
 def from_info_file_bytes(cls, info_file_bytes):
     """Construct from the contents of the held file."""
     lines = osutils.split_lines(info_file_bytes)
     try:
         stanza = rio.read_stanza(lines)
     except ValueError, e:
         mutter('Corrupt lock info file: %r', lines)
         raise LockCorrupt("could not parse lock info file: " + str(e),
             lines)
Exemplo n.º 12
0
 def from_info_file_bytes(cls, info_file_bytes):
     """Construct from the contents of the held file."""
     lines = osutils.split_lines(info_file_bytes)
     try:
         stanza = rio.read_stanza(lines)
     except ValueError, e:
         mutter('Corrupt lock info file: %r', lines)
         raise LockCorrupt("could not parse lock info file: " + str(e),
                           lines)
Exemplo n.º 13
0
 def _edit_file(self, file_id, work_tree_lines):
     """
     :param file_id: id of the file to edit.
     :param work_tree_lines: Line contents of the file in the working tree.
     :return: (lines, change_region_count), where lines is the new line
         content of the file, and change_region_count is the number of
         changed regions.
     """
     lines = osutils.split_lines(self.change_editor.edit_file(file_id))
     return lines, self._count_changed_regions(work_tree_lines, lines)
Exemplo n.º 14
0
 def _edit_file(self, file_id, work_tree_lines):
     """
     :param file_id: id of the file to edit.
     :param work_tree_lines: Line contents of the file in the working tree.
     :return: (lines, change_region_count), where lines is the new line
         content of the file, and change_region_count is the number of
         changed regions.
     """
     lines = osutils.split_lines(self.change_editor.edit_file(file_id))
     return lines, self._count_changed_regions(work_tree_lines, lines)
Exemplo n.º 15
0
def getShortStatByTrees(repo, old_tree, tree):
    files_changed = 0
    insertions = 0
    deletions = 0
    ####
    tree.lock_read()
    for file_id, (old_path, new_path), changed_content,\
    versioned, parent, name, (old_kind, new_kind), executable in tree.iter_changes(old_tree):
        if changed_content:
            #for kind in (old_kind, new_kind):
            #    if not kind in (None, 'file', 'symlink', 'directory'):
            #        print('kind', old_kind, new_kind)
            if new_kind in ('file', 'symlink'):
                files_changed += 1
                text = tree.get_file_text(file_id)
                if not '\x00' in text[:1024]:  ## FIXME
                    if old_kind == None:
                        insertions += len(split_lines(text))
                    elif old_kind in ('file', 'symlink'):
                        old_text = old_tree.get_file_text(file_id)
                        seq = SequenceMatcher(
                            None,
                            split_lines(old_text),
                            split_lines(text),
                        )
                        for op, i1, i2, j1, j2 in seq.get_opcodes():
                            if op == 'equal':
                                continue
                            #if not op in ('insert', 'delete', 'replace'):
                            #    print('op', op)
                            insertions += (j2 - j1)
                            deletions += (i2 - i1)
            elif new_kind == None:
                if old_kind in ('file', 'symlink'):
                    files_changed += 1
                    old_text = old_tree.get_file_text(file_id)
                    if not '\x00' in old_text[:1024]:  ## FIXME
                        deletions += len(split_lines(old_text))
    return files_changed, insertions, deletions
Exemplo n.º 16
0
def getShortStatByTrees(repo, old_tree, tree):
    files_changed = 0
    insertions = 0
    deletions = 0
    ####
    tree.lock_read()
    for file_id, (old_path, new_path), changed_content,\
    versioned, parent, name, (old_kind, new_kind), executable in tree.iter_changes(old_tree):
        if changed_content:
            #for kind in (old_kind, new_kind):
            #    if not kind in (None, 'file', 'symlink', 'directory'):
            #        print('kind', old_kind, new_kind)
            if new_kind in ('file', 'symlink'):
                files_changed += 1
                text = tree.get_file_text(file_id)
                if not '\x00' in text[:1024]:## FIXME
                    if old_kind == None:
                        insertions += len(split_lines(text))
                    elif old_kind in ('file', 'symlink'):
                        old_text = old_tree.get_file_text(file_id)
                        seq = SequenceMatcher(
                            None,
                            split_lines(old_text),
                            split_lines(text),
                        )
                        for op, i1, i2, j1, j2 in seq.get_opcodes():
                            if op == 'equal':
                                continue
                            #if not op in ('insert', 'delete', 'replace'):
                            #    print('op', op)
                            insertions += (j2 - j1)
                            deletions += (i2 - i1)
            elif new_kind == None:
                if old_kind in ('file', 'symlink'):
                    files_changed += 1
                    old_text = old_tree.get_file_text(file_id)
                    if not '\x00' in old_text[:1024]:## FIXME
                        deletions += len(split_lines(old_text))
    return files_changed, insertions, deletions
Exemplo n.º 17
0
    def add_special_text(self, key, parent_keys, text):
        """Add a specific text to the graph.

        This is used to add a text which is not otherwise present in the
        versioned file. (eg. a WorkingTree injecting 'current:' into the
        graph to annotate the edited content.)

        :param key: The key to use to request this text be annotated
        :param parent_keys: The parents of this text
        :param text: A string containing the content of the text
        """
        self._parent_map[key] = parent_keys
        self._text_cache[key] = osutils.split_lines(text)
        self._heads_provider = None
Exemplo n.º 18
0
 def _flush_insert(self, start_linenum, end_linenum, new_lines, out_lines, index_lines):
     """Add an 'insert' request to the data stream."""
     bytes_to_insert = "".join(new_lines[start_linenum:end_linenum])
     insert_length = len(bytes_to_insert)
     # Each insert instruction is at most 127 bytes long
     for start_byte in xrange(0, insert_length, 127):
         insert_count = min(insert_length - start_byte, 127)
         out_lines.append(chr(insert_count))
         # Don't index the 'insert' instruction
         index_lines.append(False)
         insert = bytes_to_insert[start_byte : start_byte + insert_count]
         as_lines = osutils.split_lines(insert)
         out_lines.extend(as_lines)
         index_lines.extend([True] * len(as_lines))
Exemplo n.º 19
0
    def make_repo_with_extra_ghost_index(self):
        """Make a corrupt repository.
        
        It will contain one revision, 'revision-id'.  The knit index will claim
        that it has one parent, 'incorrect-parent', but the revision text will
        claim it has no parents.

        Note: only the *cache* of the knit index is corrupted.  Thus the
        corruption will only last while the repository is locked.  For this
        reason, the returned repo is locked.
        """
        if not isinstance(self.repository_format, RepositoryFormatKnit):
            # XXX: Broken revision graphs can happen to weaves too, but they're
            # pretty deprecated.  Ideally these tests should apply to any repo
            # where repo.revision_graph_can_have_wrong_parents() is True, but
            # at the moment we only know how to corrupt knit repos.
            raise TestNotApplicable(
                "%s isn't a knit format" % self.repository_format)

        repo = self.make_repository('broken')
        repo.lock_write()
        repo.start_write_group()
        try:
            inv = inventory.Inventory(revision_id='revision-id')
            inv.root.revision = 'revision-id'
            inv_sha1 = repo.add_inventory('revision-id', inv, [])
            if repo.supports_rich_root():
                root_id = inv.root.file_id
                repo.texts.add_lines((root_id, 'revision-id'), [], [])
            revision = _mod_revision.Revision('revision-id',
                committer='*****@*****.**', timestamp=0,
                inventory_sha1=inv_sha1, timezone=0, message='message',
                parent_ids=[])
            # Manually add the revision text using the RevisionStore API, with
            # bad parents.
            rev_text = repo._serializer.write_revision_to_string(revision)
            repo.revisions.add_lines((revision.revision_id,),
                [('incorrect-parent',)],
                osutils.split_lines(rev_text))
        except:
            repo.abort_write_group()
            repo.unlock()
            raise
        else:
            repo.commit_write_group()
            repo.unlock()

        repo.lock_write()
        self.addCleanup(repo.unlock)
        return repo
Exemplo n.º 20
0
    def make_repo_with_extra_ghost_index(self):
        """Make a corrupt repository.

        It will contain one revision, 'revision-id'.  The knit index will claim
        that it has one parent, 'incorrect-parent', but the revision text will
        claim it has no parents.

        Note: only the *cache* of the knit index is corrupted.  Thus the
        corruption will only last while the repository is locked.  For this
        reason, the returned repo is locked.
        """
        if not isinstance(self.repository_format, RepositoryFormatKnit):
            # XXX: Broken revision graphs can happen to weaves too, but they're
            # pretty deprecated.  Ideally these tests should apply to any repo
            # where repo.revision_graph_can_have_wrong_parents() is True, but
            # at the moment we only know how to corrupt knit repos.
            raise TestNotApplicable(
                "%s isn't a knit format" % self.repository_format)

        repo = self.make_repository('broken')
        repo.lock_write()
        repo.start_write_group()
        try:
            inv = inventory.Inventory(revision_id='revision-id')
            inv.root.revision = 'revision-id'
            inv_sha1 = repo.add_inventory('revision-id', inv, [])
            if repo.supports_rich_root():
                root_id = inv.root.file_id
                repo.texts.add_lines((root_id, 'revision-id'), [], [])
            revision = _mod_revision.Revision('revision-id',
                committer='*****@*****.**', timestamp=0,
                inventory_sha1=inv_sha1, timezone=0, message='message',
                parent_ids=[])
            # Manually add the revision text using the RevisionStore API, with
            # bad parents.
            rev_text = repo._serializer.write_revision_to_string(revision)
            repo.revisions.add_lines((revision.revision_id,),
                [('incorrect-parent',)],
                osutils.split_lines(rev_text))
        except:
            repo.abort_write_group()
            repo.unlock()
            raise
        else:
            repo.commit_write_group()
            repo.unlock()

        repo.lock_write()
        self.addCleanup(repo.unlock)
        return repo
Exemplo n.º 21
0
 def insert_record_stream(self, stream):
     adapters = {}
     for record in stream:
         # Raise an error when a record is missing.
         if record.storage_kind == 'absent':
             raise errors.RevisionNotPresent([record.key[0]], self)
         # adapt to non-tuple interface
         if record.storage_kind == 'fulltext':
             self.add_lines(record.key, None,
                 osutils.split_lines(record.get_bytes_as('fulltext')))
         else:
             adapter_key = record.storage_kind, 'fulltext'
             try:
                 adapter = adapters[adapter_key]
             except KeyError:
                 adapter_factory = adapter_registry.get(adapter_key)
                 adapter = adapter_factory(self)
                 adapters[adapter_key] = adapter
             lines = osutils.split_lines(adapter.get_bytes(
                 record, record.get_bytes_as(record.storage_kind)))
             try:
                 self.add_lines(record.key, None, lines)
             except RevisionAlreadyPresent:
                 pass
Exemplo n.º 22
0
 def _flush_insert(self, start_linenum, end_linenum, new_lines, out_lines,
                   index_lines):
     """Add an 'insert' request to the data stream."""
     bytes_to_insert = ''.join(new_lines[start_linenum:end_linenum])
     insert_length = len(bytes_to_insert)
     # Each insert instruction is at most 127 bytes long
     for start_byte in xrange(0, insert_length, 127):
         insert_count = min(insert_length - start_byte, 127)
         out_lines.append(chr(insert_count))
         # Don't index the 'insert' instruction
         index_lines.append(False)
         insert = bytes_to_insert[start_byte:start_byte + insert_count]
         as_lines = osutils.split_lines(insert)
         out_lines.extend(as_lines)
         index_lines.extend([True] * len(as_lines))
Exemplo n.º 23
0
def import_git_blob(repo, mapping, path, blob, inv, parent_invs, executable):
    """Import a git blob object into a bzr repository.

    :param repo: bzr repository
    :param path: Path in the tree
    :param blob: A git blob
    """
    file_id = mapping.generate_file_id(path)
    text_revision = inv.revision_id
    repo.texts.add_lines((file_id, text_revision),
        [(file_id, p[file_id].revision) for p in parent_invs if file_id in p],
        osutils.split_lines(blob.data))
    ie = inv.add_path(path, "file", file_id)
    ie.revision = text_revision
    ie.text_size = len(blob.data)
    ie.text_sha1 = osutils.sha_string(blob.data)
    ie.executable = executable
Exemplo n.º 24
0
 def get_file_lines(self, revision_id, file_id):
     record = self.repo.texts.get_record_stream([(file_id, revision_id)],
         'unordered', True).next()
     if record.storage_kind == 'absent':
         raise errors.RevisionNotPresent(record.key, self.repo)
     return osutils.split_lines(record.get_bytes_as('fulltext'))
Exemplo n.º 25
0
 def get_lines(self, key):
     return osutils.split_lines(self.repository.get_inventory_xml(key))
Exemplo n.º 26
0
 def get_lines(self, key):
     return osutils.split_lines(self.repository.get_revision_xml(key))
Exemplo n.º 27
0
 def get_lines(self, key):
     return osutils.split_lines(self.repository.get_inventory_xml(key))
Exemplo n.º 28
0
 def get_lines(self, key):
     return osutils.split_lines(self.repository.get_signature_text(key))
Exemplo n.º 29
0
 def get_lines(self, key):
     return osutils.split_lines(self.repository.get_revision_xml(key))
Exemplo n.º 30
0
 def get_file_lines(self, revision_id, file_id):
     """Get the lines stored for a file in a given revision."""
     revtree = self.repo.revision_tree(revision_id)
     return osutils.split_lines(revtree.get_file_text(file_id))
Exemplo n.º 31
0
 def get_lines(self, key):
     return osutils.split_lines(self.repository.get_signature_text(key))
Exemplo n.º 32
0
 def _get_lines(self, file_id):
     """Get the lines for a file-id."""
     return osutils.split_lines(self._get_data(file_id))
Exemplo n.º 33
0
 def get_file_lines(self, file_id):
     return osutils.split_lines(self.get_file_text(file_id))