コード例 #1
0
ファイル: base.py プロジェクト: binarydud/GitPython
	def resolve_blobs(self, iter_blobs):
		"""Resolve the blobs given in blob iterator. This will effectively remove the
		index entries of the respective path at all non-null stages and add the given
		blob as new stage null blob.

		For each path there may only be one blob, otherwise a ValueError will be raised
		claiming the path is already at stage 0.

		:raise ValueError: if one of the blobs already existed at stage 0
		:return: self

		:note:
			You will have to write the index manually once you are done, i.e.
			index.resolve_blobs(blobs).write()
		"""
		for blob in iter_blobs:
			stage_null_key = (blob.path, 0)
			if stage_null_key in self.entries:
				raise ValueError( "Path %r already exists at stage 0" % blob.path )
			# END assert blob is not stage 0 already

			# delete all possible stages
			for stage in (1, 2, 3):
				try:
					del( self.entries[(blob.path, stage)])
				except KeyError:
					pass
				# END ignore key errors
			# END for each possible stage

			self.entries[stage_null_key] = IndexEntry.from_blob(blob)
		# END for each blob

		return self
コード例 #2
0
ファイル: base.py プロジェクト: drtagkim/GitPython
    def resolve_blobs(self, iter_blobs):
        """Resolve the blobs given in blob iterator. This will effectively remove the
        index entries of the respective path at all non-null stages and add the given
        blob as new stage null blob.

        For each path there may only be one blob, otherwise a ValueError will be raised
        claiming the path is already at stage 0.

        :raise ValueError: if one of the blobs already existed at stage 0
        :return: self

        :note:
            You will have to write the index manually once you are done, i.e.
            index.resolve_blobs(blobs).write()
        """
        for blob in iter_blobs:
            stage_null_key = (blob.path, 0)
            if stage_null_key in self.entries:
                raise ValueError("Path %r already exists at stage 0" %
                                 blob.path)
            # END assert blob is not stage 0 already

            # delete all possible stages
            for stage in (1, 2, 3):
                try:
                    del (self.entries[(blob.path, stage)])
                except KeyError:
                    pass
                # END ignore key errors
            # END for each possible stage

            self.entries[stage_null_key] = IndexEntry.from_blob(blob)
        # END for each blob

        return self
コード例 #3
0
def read_cache(stream):
    """Read a cache file from the given stream
    :return: tuple(version, entries_dict, extension_data, content_sha)
        * version is the integer version number
        * entries dict is a dictionary which maps IndexEntry instances to a path
            at a stage
        * extension_data is '' or 4 bytes of type + 4 bytes of size + size bytes
        * content_sha is a 20 byte sha on all cache file contents"""
    version, num_entries = read_header(stream)
    count = 0
    entries = dict()

    read = stream.read
    tell = stream.tell
    while count < num_entries:
        beginoffset = tell()
        ctime = unpack(">8s", read(8))[0]
        mtime = unpack(">8s", read(8))[0]
        (dev, ino, mode, uid, gid, size, sha, flags) = \
            unpack(">LLLLLL20sH", read(20 + 4 * 6 + 2))
        path_size = flags & CE_NAMEMASK
        path = read(path_size)

        real_size = ((tell() - beginoffset + 8) & ~7)
        data = read((beginoffset + real_size) - tell())
        entry = IndexEntry(
            (mode, sha, flags, path, ctime, mtime, dev, ino, uid, gid, size))
        # entry_key would be the method to use, but we safe the effort
        entries[(path, entry.stage)] = entry
        count += 1
    # END for each entry

    # the footer contains extension data and a sha on the content so far
    # Keep the extension footer,and verify we have a sha in the end
    # Extension data format is:
    # 4 bytes ID
    # 4 bytes length of chunk
    # repeated 0 - N times
    extension_data = stream.read(~0)
    assert len(
        extension_data
    ) > 19, "Index Footer was not at least a sha on content as it was only %i bytes in size" % len(
        extension_data)

    content_sha = extension_data[-20:]

    # truncate the sha in the end as we will dynamically create it anyway
    extension_data = extension_data[:-20]

    return (version, entries, extension_data, content_sha)
コード例 #4
0
def read_entry(stream):
    """Return: One entry of the given stream"""
    beginoffset = stream.tell()
    read = stream.read
    ctime = unpack(">8s", read(8))[0]
    mtime = unpack(">8s", read(8))[0]
    (dev, ino, mode, uid, gid, size, sha, flags) = \
     unpack(">LLLLLL20sH", read(20 + 4 * 6 + 2))
    path_size = flags & CE_NAMEMASK
    path = read(path_size)

    real_size = ((stream.tell() - beginoffset + 8) & ~7)
    data = read((beginoffset + real_size) - stream.tell())
    return IndexEntry(
        (mode, sha, flags, path, ctime, mtime, dev, ino, uid, gid, size))
コード例 #5
0
ファイル: base.py プロジェクト: binarydud/GitPython
	def new(cls, repo, *tree_sha):
		""" Merge the given treeish revisions into a new index which is returned.
		This method behaves like git-read-tree --aggressive when doing the merge.

		:param repo: The repository treeish are located in.

		:param tree_sha:
			20 byte or 40 byte tree sha or tree objects 

		:return:
			New IndexFile instance. Its path will be undefined. 
			If you intend to write such a merged Index, supply an alternate file_path 
			to its 'write' method."""
		base_entries = aggressive_tree_merge(repo.odb, [to_bin_sha(str(t)) for t in tree_sha])
		
		inst = cls(repo)
		# convert to entries dict
		entries = dict(izip(((e.path, e.stage) for e in base_entries), 
							(IndexEntry.from_base(e) for e in base_entries)))
		
		inst.entries = entries
		return inst
コード例 #6
0
	def new(cls, repo, *tree_sha):
		""" Merge the given treeish revisions into a new index which is returned.
		This method behaves like git-read-tree --aggressive when doing the merge.

		:param repo: The repository treeish are located in.

		:param tree_sha:
			20 byte or 40 byte tree sha or tree objects 

		:return:
			New IndexFile instance. Its path will be undefined. 
			If you intend to write such a merged Index, supply an alternate file_path 
			to its 'write' method."""
		base_entries = aggressive_tree_merge(repo.odb, [to_bin_sha(str(t)) for t in tree_sha])
		
		inst = cls(repo)
		# convert to entries dict
		entries = dict(izip(((e.path, e.stage) for e in base_entries), 
							(IndexEntry.from_base(e) for e in base_entries)))
		
		inst.entries = entries
		return inst
コード例 #7
0
ファイル: base.py プロジェクト: drtagkim/GitPython
    def add(self,
            items,
            force=True,
            fprogress=lambda *args: None,
            path_rewriter=None,
            write=True):
        """Add files from the working tree, specific blobs or BaseIndexEntries
        to the index. 

        :param items:
            Multiple types of items are supported, types can be mixed within one call.
            Different types imply a different handling. File paths may generally be
            relative or absolute.

            - path string
                strings denote a relative or absolute path into the repository pointing to
                an existing file, i.e. CHANGES, lib/myfile.ext, '/home/gitrepo/lib/myfile.ext'.

                Paths provided like this must exist. When added, they will be written
                into the object database.

                PathStrings may contain globs, such as 'lib/__init__*' or can be directories
                like 'lib', the latter ones will add all the files within the dirctory and
                subdirectories.

                This equals a straight git-add.

                They are added at stage 0

            - Blob or Submodule object
                Blobs are added as they are assuming a valid mode is set.
                The file they refer to may or may not exist in the file system, but
                must be a path relative to our repository.

                If their sha is null ( 40*0 ), their path must exist in the file system
                relative to the git repository as an object will be created from 
                the data at the path.
                The handling now very much equals the way string paths are processed, except that
                the mode you have set will be kept. This allows you to create symlinks
                by settings the mode respectively and writing the target of the symlink
                directly into the file. This equals a default Linux-Symlink which
                is not dereferenced automatically, except that it can be created on
                filesystems not supporting it as well.

                Please note that globs or directories are not allowed in Blob objects.

                They are added at stage 0

            - BaseIndexEntry or type
                Handling equals the one of Blob objects, but the stage may be
                explicitly set. Please note that Index Entries require binary sha's.

        :param force:
            **CURRENTLY INEFFECTIVE**
            If True, otherwise ignored or excluded files will be
            added anyway.
            As opposed to the git-add command, we enable this flag by default
            as the API user usually wants the item to be added even though
            they might be excluded.

        :param fprogress:
            Function with signature f(path, done=False, item=item) called for each
            path to be added, one time once it is about to be added where done==False
            and once after it was added where done=True.
            item is set to the actual item we handle, either a Path or a BaseIndexEntry
            Please note that the processed path is not guaranteed to be present
            in the index already as the index is currently being processed.

        :param path_rewriter:
            Function with signature (string) func(BaseIndexEntry) function returning a path
            for each passed entry which is the path to be actually recorded for the
            object created from entry.path. This allows you to write an index which
            is not identical to the layout of the actual files on your hard-disk.
            If not None and ``items`` contain plain paths, these paths will be
            converted to Entries beforehand and passed to the path_rewriter.
            Please note that entry.path is relative to the git repository.

        :param write:
                If True, the index will be written once it was altered. Otherwise
                the changes only exist in memory and are not available to git commands.
        
        :return:
            List(BaseIndexEntries) representing the entries just actually added.

        :raise OSError:
            if a supplied Path did not exist. Please note that BaseIndexEntry
            Objects that do not have a null sha will be added even if their paths
            do not exist.
        """
        # sort the entries into strings and Entries, Blobs are converted to entries
        # automatically
        # paths can be git-added, for everything else we use git-update-index
        entries_added = list()
        paths, entries = self._preprocess_add_items(items)
        if paths and path_rewriter:
            for path in paths:
                abspath = os.path.abspath(path)
                gitrelative_path = abspath[len(self.repo.working_tree_dir) +
                                           1:]
                blob = Blob(self.repo, Blob.NULL_BIN_SHA,
                            stat_mode_to_index_mode(os.stat(abspath).st_mode),
                            to_native_path_linux(gitrelative_path))
                entries.append(BaseIndexEntry.from_blob(blob))
            # END for each path
            del (paths[:])
        # END rewrite paths

        def store_path(filepath):
            """Store file at filepath in the database and return the base index entry"""
            st = os.lstat(filepath)  # handles non-symlinks as well
            stream = None
            if S_ISLNK(st.st_mode):
                stream = StringIO(os.readlink(filepath))
            else:
                stream = open(filepath, 'rb')
            # END handle stream
            fprogress(filepath, False, filepath)
            istream = self.repo.odb.store(
                IStream(Blob.type, st.st_size, stream))
            fprogress(filepath, True, filepath)
            return BaseIndexEntry(
                (stat_mode_to_index_mode(st.st_mode), istream.binsha, 0,
                 to_native_path_linux(filepath)))

        # END utility method

        # HANDLE PATHS
        if paths:
            assert len(entries_added) == 0
            added_files = list()
            for filepath in self._iter_expand_paths(paths):
                entries_added.append(store_path(filepath))
            # END for each filepath
        # END path handling

        # HANDLE ENTRIES
        if entries:
            null_mode_entries = [e for e in entries if e.mode == 0]
            if null_mode_entries:
                raise ValueError(
                    "At least one Entry has a null-mode - please use index.remove to remove files for clarity"
                )
            # END null mode should be remove

            # HANLDE ENTRY OBJECT CREATION
            # create objects if required, otherwise go with the existing shas
            null_entries_indices = [
                i for i, e in enumerate(entries)
                if e.binsha == Object.NULL_BIN_SHA
            ]
            if null_entries_indices:
                for ei in null_entries_indices:
                    null_entry = entries[ei]
                    new_entry = store_path(null_entry.path)

                    # update null entry
                    entries[ei] = BaseIndexEntry(
                        (null_entry.mode, new_entry.binsha, null_entry.stage,
                         null_entry.path))
                # END for each entry index
            # END null_entry handling

            # REWRITE PATHS
            # If we have to rewrite the entries, do so now, after we have generated
            # all object sha's
            if path_rewriter:
                for i, e in enumerate(entries):
                    entries[i] = BaseIndexEntry(
                        (e.mode, e.binsha, e.stage, path_rewriter(e)))
                # END for each entry
            # END handle path rewriting

            # just go through the remaining entries and provide progress info
            for i, entry in enumerate(entries):
                progress_sent = i in null_entries_indices
                if not progress_sent:
                    fprogress(entry.path, False, entry)
                    fprogress(entry.path, True, entry)
                # END handle progress
            # END for each enty
            entries_added.extend(entries)
        # END if there are base entries

        # FINALIZE
        # add the new entries to this instance
        for entry in entries_added:
            self.entries[(entry.path, 0)] = IndexEntry.from_base(entry)

        if write:
            self.write()
        # END handle write

        return entries_added
コード例 #8
0
ファイル: base.py プロジェクト: binarydud/GitPython
	def add(self, items, force=True, fprogress=lambda *args: None, path_rewriter=None, 
				write=True):
		"""Add files from the working tree, specific blobs or BaseIndexEntries
		to the index. 

		:param items:
			Multiple types of items are supported, types can be mixed within one call.
			Different types imply a different handling. File paths may generally be
			relative or absolute.

			- path string
				strings denote a relative or absolute path into the repository pointing to
				an existing file, i.e. CHANGES, lib/myfile.ext, '/home/gitrepo/lib/myfile.ext'.

				Paths provided like this must exist. When added, they will be written
				into the object database.

				PathStrings may contain globs, such as 'lib/__init__*' or can be directories
				like 'lib', the latter ones will add all the files within the dirctory and
				subdirectories.

				This equals a straight git-add.

				They are added at stage 0

			- Blob or Submodule object
				Blobs are added as they are assuming a valid mode is set.
				The file they refer to may or may not exist in the file system, but
				must be a path relative to our repository.

				If their sha is null ( 40*0 ), their path must exist in the file system
				relative to the git repository as an object will be created from 
				the data at the path.
				The handling now very much equals the way string paths are processed, except that
				the mode you have set will be kept. This allows you to create symlinks
				by settings the mode respectively and writing the target of the symlink
				directly into the file. This equals a default Linux-Symlink which
				is not dereferenced automatically, except that it can be created on
				filesystems not supporting it as well.

				Please note that globs or directories are not allowed in Blob objects.

				They are added at stage 0

			- BaseIndexEntry or type
				Handling equals the one of Blob objects, but the stage may be
				explicitly set. Please note that Index Entries require binary sha's.

		:param force:
			**CURRENTLY INEFFECTIVE**
			If True, otherwise ignored or excluded files will be
			added anyway.
			As opposed to the git-add command, we enable this flag by default
			as the API user usually wants the item to be added even though
			they might be excluded.

		:param fprogress:
			Function with signature f(path, done=False, item=item) called for each
			path to be added, one time once it is about to be added where done==False
			and once after it was added where done=True.
			item is set to the actual item we handle, either a Path or a BaseIndexEntry
			Please note that the processed path is not guaranteed to be present
			in the index already as the index is currently being processed.

		:param path_rewriter:
			Function with signature (string) func(BaseIndexEntry) function returning a path
			for each passed entry which is the path to be actually recorded for the
			object created from entry.path. This allows you to write an index which
			is not identical to the layout of the actual files on your hard-dist.
			If not None and ``items`` contain plain paths, these paths will be
			converted to Entries beforehand and passed to the path_rewriter.
			Please note that entry.path is relative to the git repository.

		:param write:
				If True, the index will be written once it was altered. Otherwise
				the changes only exist in memory and are not available to git commands.
		
		:return:
			List(BaseIndexEntries) representing the entries just actually added.

		:raise OSError:
			if a supplied Path did not exist. Please note that BaseIndexEntry
			Objects that do not have a null sha will be added even if their paths
			do not exist.
		"""
		# sort the entries into strings and Entries, Blobs are converted to entries
		# automatically
		# paths can be git-added, for everything else we use git-update-index
		entries_added = list()
		paths, entries = self._preprocess_add_items(items)
		if paths and path_rewriter:
			for path in paths:
				abspath = os.path.abspath(path)
				gitrelative_path = abspath[len(self.repo.working_tree_dir)+1:]
				blob = Blob(self.repo, Blob.NULL_BIN_SHA, 
							stat_mode_to_index_mode(os.stat(abspath).st_mode), 
							to_native_path_linux(gitrelative_path))
				entries.append(BaseIndexEntry.from_blob(blob))
			# END for each path
			del(paths[:])
		# END rewrite paths


		def store_path(filepath):
			"""Store file at filepath in the database and return the base index entry"""
			st = os.lstat(filepath)		# handles non-symlinks as well
			stream = None
			if S_ISLNK(st.st_mode):
				stream = StringIO(os.readlink(filepath))
			else:
				stream = open(filepath, 'rb')
			# END handle stream
			fprogress(filepath, False, filepath)
			istream = self.repo.odb.store(IStream(Blob.type, st.st_size, stream))
			fprogress(filepath, True, filepath)
			return BaseIndexEntry((stat_mode_to_index_mode(st.st_mode), 
									istream.binsha, 0, to_native_path_linux(filepath)))
		# END utility method


		# HANDLE PATHS
		if paths:
			assert len(entries_added) == 0
			added_files = list()
			for filepath in self._iter_expand_paths(paths):
				entries_added.append(store_path(filepath))
			# END for each filepath
		# END path handling


		# HANDLE ENTRIES
		if entries:
			null_mode_entries = [ e for e in entries if e.mode == 0 ]
			if null_mode_entries:
				raise ValueError("At least one Entry has a null-mode - please use index.remove to remove files for clarity")
			# END null mode should be remove

			# HANLDE ENTRY OBJECT CREATION
			# create objects if required, otherwise go with the existing shas
			null_entries_indices = [ i for i,e in enumerate(entries) if e.binsha == Object.NULL_BIN_SHA ]
			if null_entries_indices:
				for ei in null_entries_indices:
					null_entry = entries[ei]
					new_entry = store_path(null_entry.path)
					
					# update null entry
					entries[ei] = BaseIndexEntry((null_entry.mode, new_entry.binsha, null_entry.stage, null_entry.path))
				# END for each entry index
			# END null_entry handling

			# REWRITE PATHS
			# If we have to rewrite the entries, do so now, after we have generated
			# all object sha's
			if path_rewriter:
				for i,e in enumerate(entries):
					entries[i] = BaseIndexEntry((e.mode, e.binsha, e.stage, path_rewriter(e)))
				# END for each entry
			# END handle path rewriting

			# just go through the remaining entries and provide progress info
			for i, entry in enumerate(entries):
				progress_sent = i in null_entries_indices
				if not progress_sent:
					fprogress(entry.path, False, entry)
					fprogress(entry.path, True, entry)
				# END handle progress
			# END for each enty
			entries_added.extend(entries)
		# END if there are base entries

		# FINALIZE
		# add the new entries to this instance
		for entry in entries_added:
			self.entries[(entry.path, 0)] = IndexEntry.from_base(entry)
			
		if write:
			self.write()
		# END handle write
		
		return entries_added