Beispiel #1
0
    def _set_cache_(self, attr):
        if attr == "entries":
            # read the current index
            # try memory map for speed
            lfd = LockedFD(self._file_path)
            try:
                fd = lfd.open(write=False, stream=False)
            except OSError:
                lfd.rollback()
                # in new repositories, there may be no index, which means we are empty
                self.entries = dict()
                return
            # END exception handling

            # Here it comes: on windows in python 2.5, memory maps aren't closed properly
            # Hence we are in trouble if we try to delete a file that is memory mapped,
            # which happens during read-tree.
            # In this case, we will just read the memory in directly.
            # Its insanely bad ... I am disappointed !
            allow_mmap = (os.name != 'nt' or sys.version_info[1] > 5)
            stream = file_contents_ro(fd, stream=True, allow_mmap=allow_mmap)

            try:
                self._deserialize(stream)
            finally:
                lfd.rollback()
                # The handles will be closed on desctruction
            # END read from default index on demand
        else:
            super(IndexFile, self)._set_cache_(attr)
Beispiel #2
0
    def write(self, file_path=None, ignore_tree_extension_data=False):
        """Write the current state to our file path or to the given one

        :param file_path:
            If None, we will write to our stored file path from which we have
            been initialized. Otherwise we write to the given file path.
            Please note that this will change the file_path of this index to
            the one you gave.

        :param ignore_tree_extension_data:
            If True, the TREE type extension data read in the index will not
            be written to disk. Use this if you have altered the index and
            would like to use git-write-tree afterwards to create a tree
            representing your written changes.
            If this data is present in the written index, git-write-tree
            will instead write the stored/cached tree.
            Alternatively, use IndexFile.write_tree() to handle this case
            automatically

        :return: self"""
        # make sure we have our entries read before getting a write lock
        # else it would be done when streaming. This can happen
        # if one doesn't change the index, but writes it right away
        self.entries
        lfd = LockedFD(file_path or self._file_path)
        stream = lfd.open(write=True, stream=True)

        self._serialize(stream, ignore_tree_extension_data)

        lfd.commit()

        # make sure we represent what we have written
        if file_path is not None:
            self._file_path = file_path
Beispiel #3
0
    def to_file(self, filepath):
        """Write the contents of the reflog instance to a file at the given filepath.
        :param filepath: path to file, parent directories are assumed to exist"""
        lfd = LockedFD(filepath)
        assure_directory_exists(filepath, is_file=True)

        fp = lfd.open(write=True, stream=True)
        try:
            self._serialize(fp)
            lfd.commit()
        except Exception:
            # on failure it rolls back automatically, but we make it clear
            lfd.rollback()
            raise
    def _set_cache_(self, attr):
        if attr == "entries":
            # read the current index
            # try memory map for speed
            lfd = LockedFD(self._file_path)
            try:
                fd = lfd.open(write=False, stream=False)
            except OSError:
                lfd.rollback()
                # in new repositories, there may be no index, which means we are empty
                self.entries = dict()
                return
            # END exception handling

            stream = file_contents_ro(fd, stream=True, allow_mmap=True)

            try:
                self._deserialize(stream)
            finally:
                lfd.rollback()
                # The handles will be closed on desctruction
            # END read from default index on demand
        else:
            super(IndexFile, self)._set_cache_(attr)
Beispiel #5
0
    def set_reference(self, ref, logmsg=None):
        """Set ourselves to the given ref. It will stay a symbol if the ref is a Reference.
        Otherwise an Object, given as Object instance or refspec, is assumed and if valid,
        will be set which effectively detaches the refererence if it was a purely
        symbolic one.

        :param ref: SymbolicReference instance, Object instance or refspec string
            Only if the ref is a SymbolicRef instance, we will point to it. Everything
            else is dereferenced to obtain the actual object.
        :param logmsg: If set to a string, the message will be used in the reflog.
            Otherwise, a reflog entry is not written for the changed reference.
            The previous commit of the entry will be the commit we point to now.

            See also: log_append()

        :return: self
        :note: This symbolic reference will not be dereferenced. For that, see
            ``set_object(...)``"""
        write_value = None
        obj = None
        if isinstance(ref, SymbolicReference):
            write_value = "ref: %s" % ref.path
        elif isinstance(ref, Object):
            obj = ref
            write_value = ref.hexsha
        elif isinstance(ref, str):
            try:
                obj = self.repo.rev_parse(ref + "^{}")  # optionally deref tags
                write_value = obj.hexsha
            except (BadObject, BadName) as e:
                raise ValueError("Could not extract object from %s" %
                                 ref) from e
            # END end try string
        else:
            raise ValueError("Unrecognized Value: %r" % ref)
        # END try commit attribute

        # typecheck
        if obj is not None and self._points_to_commits_only and obj.type != Commit.type:
            raise TypeError("Require commit, got %r" % obj)
        # END verify type

        oldbinsha = None
        if logmsg is not None:
            try:
                oldbinsha = self.commit.binsha
            except ValueError:
                oldbinsha = Commit.NULL_BIN_SHA
            # END handle non-existing
        # END retrieve old hexsha

        fpath = self.abspath
        assure_directory_exists(fpath, is_file=True)

        lfd = LockedFD(fpath)
        fd = lfd.open(write=True, stream=True)
        ok = True
        try:
            fd.write(write_value.encode('ascii') + b'\n')
            lfd.commit()
            ok = True
        finally:
            if not ok:
                lfd.rollback()
        # Adjust the reflog
        if logmsg is not None:
            self.log_append(oldbinsha, logmsg)

        return self
Beispiel #6
0
    def test_lockedfd(self):
        my_file = tempfile.mktemp()
        orig_data = "hello"
        new_data = "world"
        my_file_fp = open(my_file, "wb")
        my_file_fp.write(orig_data)
        my_file_fp.close()

        try:
            lfd = LockedFD(my_file)
            lockfilepath = lfd._lockfilepath()

            # cannot end before it was started
            self.failUnlessRaises(AssertionError, lfd.rollback)
            self.failUnlessRaises(AssertionError, lfd.commit)

            # open for writing
            assert not os.path.isfile(lockfilepath)
            wfd = lfd.open(write=True)
            assert lfd._fd is wfd
            assert os.path.isfile(lockfilepath)

            # write data and fail
            os.write(wfd, new_data)
            lfd.rollback()
            assert lfd._fd is None
            self._cmp_contents(my_file, orig_data)
            assert not os.path.isfile(lockfilepath)

            # additional call doesnt fail
            lfd.commit()
            lfd.rollback()

            # test reading
            lfd = LockedFD(my_file)
            rfd = lfd.open(write=False)
            assert os.read(rfd, len(orig_data)) == orig_data

            assert os.path.isfile(lockfilepath)
            # deletion rolls back
            del (lfd)
            assert not os.path.isfile(lockfilepath)

            # write data - concurrently
            lfd = LockedFD(my_file)
            olfd = LockedFD(my_file)
            assert not os.path.isfile(lockfilepath)
            wfdstream = lfd.open(write=True,
                                 stream=True)  # this time as stream
            assert os.path.isfile(lockfilepath)
            # another one fails
            self.failUnlessRaises(IOError, olfd.open)

            wfdstream.write(new_data)
            lfd.commit()
            assert not os.path.isfile(lockfilepath)
            self._cmp_contents(my_file, new_data)

            # could test automatic _end_writing on destruction
        finally:
            os.remove(my_file)
        # END final cleanup

        # try non-existing file for reading
        lfd = LockedFD(tempfile.mktemp())
        try:
            lfd.open(write=False)
        except OSError:
            assert not os.path.exists(lfd._lockfilepath())
        else:
            self.fail("expected OSError")