Пример #1
0
    def _gethash(self, filename, hash):
        """Get file with the provided hash and store it in the local repo's
        store and in the usercache.
        filename is for informational messages only.
        """
        util.makedirs(lfutil.storepath(self.repo, ''))
        storefilename = lfutil.storepath(self.repo, hash)

        tmpname = storefilename + '.tmp'
        tmpfile = util.atomictempfile(
            tmpname, createmode=self.repo.store.createmode)

        try:
            gothash = self._getfile(tmpfile, filename, hash)
        except StoreError as err:
            self.ui.warn(err.longmessage())
            gothash = ""
        tmpfile.close()

        if gothash != hash:
            if gothash != "":
                self.ui.warn(
                    _('%s: data corruption (expected %s, got %s)\n') %
                    (filename, hash, gothash))
            util.unlink(tmpname)
            return False

        util.rename(tmpname, storefilename)
        lfutil.linktousercache(self.repo, hash)
        return True
Пример #2
0
    def writeIgnoreFile(self):
        eol = self.doseoln and '\r\n' or '\n'
        out = eol.join(self.ignorelines) + eol
        hasignore = os.path.exists(self.repo.vfs.join(self.ignorefile))

        try:
            f = util.atomictempfile(self.ignorefile, 'wb', createmode=None)
            f.write(out)
            f.close()
            if not hasignore:
                ret = qtlib.QuestionMsgBox(
                    _('New file created'),
                    _('TortoiseHg has created a new '
                      '.hgignore file.  Would you like to '
                      'add this file to the source code '
                      'control repository?'),
                    parent=self)
                if ret:
                    commands.add(hglib.loadui(), self.repo, self.ignorefile)
            shlib.shell_notify([self.ignorefile])
            self.ignoreFilterUpdated.emit()
        except EnvironmentError, e:
            qtlib.WarningMsgBox(_('Unable to write .hgignore file'),
                                hglib.tounicode(str(e)),
                                parent=self)
Пример #3
0
    def _gethash(self, filename, hash):
        """Get file with the provided hash and store it in the local repo's
        store and in the usercache.
        filename is for informational messages only.
        """
        util.makedirs(lfutil.storepath(self.repo, ''))
        storefilename = lfutil.storepath(self.repo, hash)

        tmpname = storefilename + '.tmp'
        with util.atomictempfile(
                tmpname, createmode=self.repo.store.createmode) as tmpfile:
            try:
                gothash = self._getfile(tmpfile, filename, hash)
            except StoreError as err:
                self.ui.warn(err.longmessage())
                gothash = ""

        if gothash != hash:
            if gothash != "":
                self.ui.warn(
                    _('%s: data corruption (expected %s, got %s)\n') %
                    (filename, hash, gothash))
            util.unlink(tmpname)
            return False

        util.rename(tmpname, storefilename)
        lfutil.linktousercache(self.repo, hash)
        return True
Пример #4
0
 def removeFile(self, wfile):
     repo = self.repo
     ctx = self.ctx
     if isinstance(ctx, patchctx):
         repo.thgbackup(ctx._path)
         fp = util.atomictempfile(ctx._path, 'wb')
         try:
             if ctx._ph.comments:
                 fp.write('\n'.join(ctx._ph.comments))
                 fp.write('\n\n')
             for file in ctx._fileorder:
                 if file == wfile:
                     continue
                 for chunk in ctx._files[file]:
                     chunk.write(fp)
             fp.close()
         finally:
             del fp
         ctx.invalidate()
     else:
         fullpath = repo.wjoin(wfile)
         repo.thgbackup(fullpath)
         wasadded = wfile in repo[None].added()
         try:
             commands.revert(repo.ui, repo, fullpath, rev='.',
                             no_backup=True)
             if wasadded and os.path.exists(fullpath):
                 os.unlink(fullpath)
         except EnvironmentError:
             qtlib.InfoMsgBox(_("Unable to remove"),
                              _("Unable to remove file %s,\n"
                                "permission denied") %
                                 hglib.tounicode(wfile))
     self.fileModified.emit()
Пример #5
0
def putlfile(repo, proto, sha):
    '''Put a largefile into a repository's local store and into the
    user cache.'''
    proto.redirect()

    path = lfutil.storepath(repo, sha)
    util.makedirs(os.path.dirname(path))
    tmpfp = util.atomictempfile(path, createmode=repo.store.createmode)

    try:
        try:
            proto.getfile(tmpfp)
            tmpfp._fp.seek(0)
            if sha != lfutil.hexsha1(tmpfp._fp):
                raise IOError(0, _('largefile contents do not match hash'))
            tmpfp.close()
            lfutil.linktousercache(repo, sha)
        except IOError, e:
            repo.ui.warn(_('largefiles: failed to put %s into store: %s\n') %
                         (sha, e.strerror))
            return wireproto.pushres(1)
    finally:
        tmpfp.discard()

    return wireproto.pushres(0)
Пример #6
0
    def update_parent(path, line, parent):
        line = line - 1 # The line number we're passed will be 1-based
        fp = None

        try:
            fp = open(path)
            data = fp.readlines()
        finally:
            if fp and not fp.closed:
                fp.close()

        #
        # line will be the last line of any continued block, go back
        # to the first removing the continuation as we go.
        #
        while data[line][0].isspace():
            data.pop(line)
            line -= 1

        assert data[line].startswith('default')

        data[line] = "default = %s\n" % parent
        if data[-1] != '\n':
            data.append('\n')

        try:
            fp = util.atomictempfile(path, 'w', 0644)
            fp.writelines(data)
            fp.rename()
        finally:
            if fp and not fp.closed:
                fp.close()
def putlfile(repo, proto, sha):
    '''Server command for putting a largefile into a repository's local store
    and into the user cache.'''
    proto.redirect()

    path = lfutil.storepath(repo, sha)
    util.makedirs(os.path.dirname(path))
    tmpfp = util.atomictempfile(path, createmode=repo.store.createmode)

    try:
        proto.getfile(tmpfp)
        tmpfp._fp.seek(0)
        if sha != lfutil.hexsha1(tmpfp._fp):
            raise IOError(0, _('largefile contents do not match hash'))
        tmpfp.close()
        lfutil.linktousercache(repo, sha)
    except IOError as e:
        repo.ui.warn(
            _('largefiles: failed to put %s into store: %s\n') %
            (sha, e.strerror))
        return wireproto.pushres(1)
    finally:
        tmpfp.discard()

    return wireproto.pushres(0)
Пример #8
0
def test3_oops():
    try:
        file = atomictempfile()
    except TypeError:
        print "OK"
    else:
        print "expected TypeError"
Пример #9
0
def putlfile(repo, proto, sha):
    '''Server command for putting a largefile into a repository's local store
    and into the user cache.'''
    with proto.mayberedirectstdio() as output:
        path = lfutil.storepath(repo, sha)
        util.makedirs(os.path.dirname(path))
        tmpfp = util.atomictempfile(path, createmode=repo.store.createmode)

        try:
            for p in proto.getpayload():
                tmpfp.write(p)
            tmpfp._fp.seek(0)
            if sha != lfutil.hexsha1(tmpfp._fp):
                raise IOError(0, _(b'largefile contents do not match hash'))
            tmpfp.close()
            lfutil.linktousercache(repo, sha)
        except IOError as e:
            repo.ui.warn(
                _(b'largefiles: failed to put %s into store: %s\n') %
                (sha, e.strerror))
            return wireprototypes.pushres(1,
                                          output.getvalue() if output else b'')
        finally:
            tmpfp.discard()

    return wireprototypes.pushres(0, output.getvalue() if output else b'')
Пример #10
0
 def _write(self, appname, data):
     s = cPickle.dumps(data)
     f = util.atomictempfile(appname, 'wb', None)
     f.write(s)
     try:
         f.rename()
     except OSError:
         pass # silently ignore these errors
Пример #11
0
 def _write(self, appname, data):
     s = cPickle.dumps(data)
     f = util.atomictempfile(appname, 'wb', None)
     f.write(s)
     try:
         f.close()
     except OSError:
         pass  # silently ignore these errors
Пример #12
0
def dump(data, file_path):
    """Serialize some data to a path atomically.

    This is present because I kept corrupting my revmap by managing to hit ^C
    during the serialization of that file.
    """
    f = hgutil.atomictempfile(file_path, 'w+b', 0644)
    json.dump(_convert(data, _scrub), f)
    f.close()
Пример #13
0
def link(src, dest):
    try:
        util.oslink(src, dest)
    except OSError:
        # if hardlinks fail, fallback on atomic copy
        dst = util.atomictempfile(dest)
        for chunk in util.filechunkiter(open(src, 'rb')):
            dst.write(chunk)
        dst.close()
        os.chmod(dest, os.stat(src).st_mode)
Пример #14
0
def link(src, dest):
    try:
        util.oslink(src, dest)
    except OSError:
        # if hardlinks fail, fallback on atomic copy
        dst = util.atomictempfile(dest)
        for chunk in util.filechunkiter(open(src, 'rb')):
            dst.write(chunk)
        dst.close()
        os.chmod(dest, os.stat(src).st_mode)
Пример #15
0
    def get(self, files):
        '''Get the specified largefiles from the store and write to local
        files under repo.root.  files is a list of (filename, hash)
        tuples.  Return (success, missing), lists of files successfully
        downloaded and those not found in the store.  success is a list
        of (filename, hash) tuples; missing is a list of filenames that
        we could not get.  (The detailed error message will already have
        been presented to the user, so missing is just supplied as a
        summary.)'''
        success = []
        missing = []
        ui = self.ui

        util.makedirs(lfutil.storepath(self.repo, ''))

        at = 0
        available = self.exists(set(hash for (_filename, hash) in files))
        for filename, hash in files:
            ui.progress(_('getting largefiles'),
                        at,
                        unit='lfile',
                        total=len(files))
            at += 1
            ui.note(_('getting %s:%s\n') % (filename, hash))

            if not available.get(hash):
                ui.warn(
                    _('%s: largefile %s not available from %s\n') %
                    (filename, hash, self.url))
                missing.append(filename)
                continue

            storefilename = lfutil.storepath(self.repo, hash)
            tmpfile = util.atomictempfile(
                storefilename + '.tmp', createmode=self.repo.store.createmode)

            try:
                hhash = self._getfile(tmpfile, filename, hash)
            except StoreError, err:
                ui.warn(err.longmessage())
                hhash = ""
            tmpfile.close()

            if hhash != hash:
                if hhash != "":
                    ui.warn(
                        _('%s: data corruption (expected %s, got %s)\n') %
                        (filename, hash, hhash))
                util.unlink(storefilename + '.tmp')
                missing.append(filename)
                continue

            util.rename(storefilename + '.tmp', storefilename)
            lfutil.linktousercache(self.repo, hash)
            success.append((filename, hhash))
Пример #16
0
def copytostoreabsolute(repo, file, hash):
    if inusercache(repo.ui, hash):
        link(usercachepath(repo.ui, hash), storepath(repo, hash))
    else:
        util.makedirs(os.path.dirname(storepath(repo, hash)))
        with open(file, 'rb') as srcf:
            with util.atomictempfile(storepath(repo, hash),
                                     createmode=repo.store.createmode) as dstf:
                for chunk in util.filechunkiter(srcf):
                    dstf.write(chunk)
        linktousercache(repo, hash)
Пример #17
0
    def test2_discard(self):
        if os.path.exists('foo'):
            os.remove('foo')
        file = atomictempfile('foo')
        (dir, basename) = os.path.split(file._tempname)

        file.write('yo\n')
        file.discard()

        self.assertFalse(os.path.isfile('foo'))
        self.assertTrue(basename not in os.listdir('.'))
Пример #18
0
def copytostoreabsolute(repo, file, hash):
    if inusercache(repo.ui, hash):
        link(usercachepath(repo.ui, hash), storepath(repo, hash))
    else:
        util.makedirs(os.path.dirname(storepath(repo, hash)))
        dst = util.atomictempfile(storepath(repo, hash),
                                  createmode=repo.store.createmode)
        for chunk in util.filechunkiter(open(file, 'rb')):
            dst.write(chunk)
        dst.close()
        linktousercache(repo, hash)
Пример #19
0
def copytostoreabsolute(repo, file, hash):
    util.makedirs(os.path.dirname(storepath(repo, hash)))
    if inusercache(repo.ui, hash):
        link(usercachepath(repo.ui, hash), storepath(repo, hash))
    else:
        dst = util.atomictempfile(storepath(repo, hash))
        for chunk in util.filechunkiter(open(file, 'rb')):
            dst.write(chunk)
        dst.close()
        util.copymode(file, storepath(repo, hash))
        linktousercache(repo, hash)
Пример #20
0
def copytostoreabsolute(repo, file, hash):
    if inusercache(repo.ui, hash):
        link(usercachepath(repo.ui, hash), storepath(repo, hash))
    elif not getattr(repo, "_isconverting", False):
        util.makedirs(os.path.dirname(storepath(repo, hash)))
        dst = util.atomictempfile(storepath(repo, hash),
                                  createmode=repo.store.createmode)
        for chunk in util.filechunkiter(open(file, 'rb')):
            dst.write(chunk)
        dst.close()
        linktousercache(repo, hash)
Пример #21
0
    def test2_discard(self):
        if os.path.exists('foo'):
            os.remove('foo')
        file = atomictempfile('foo')
        (dir, basename) = os.path.split(file._tempname)

        file.write('yo\n')
        file.discard()

        self.assertFalse(os.path.isfile('foo'))
        self.assertTrue(basename not in os.listdir('.'))
Пример #22
0
def copytostoreabsolute(repo, file, hash):
    if inusercache(repo.ui, hash):
        link(usercachepath(repo.ui, hash), storepath(repo, hash))
    else:
        util.makedirs(os.path.dirname(storepath(repo, hash)))
        with open(file, 'rb') as srcf:
            with util.atomictempfile(storepath(repo, hash),
                                     createmode=repo.store.createmode) as dstf:
                for chunk in util.filechunkiter(srcf):
                    dstf.write(chunk)
        linktousercache(repo, hash)
Пример #23
0
def link(src, dest):
    """Try to create hardlink - if that fails, efficiently make a copy."""
    util.makedirs(os.path.dirname(dest))
    try:
        util.oslink(src, dest)
    except OSError:
        # if hardlinks fail, fallback on atomic copy
        with open(src, 'rb') as srcf, util.atomictempfile(dest) as dstf:
            for chunk in util.filechunkiter(srcf):
                dstf.write(chunk)
        os.chmod(dest, os.stat(src).st_mode)
Пример #24
0
def test2_discard():
    if os.path.exists('foo'):
        os.remove('foo')
    file = atomictempfile('foo')
    (dir, basename) = os.path.split(file._tempname)

    file.write('yo\n')
    file.discard()

    assert not os.path.isfile('foo')
    assert basename not in os.listdir('.')
    print 'OK'
Пример #25
0
def writefile(config, path):
    """Write the given config obj to the specified file"""
    f = util.atomictempfile(os.path.realpath(path), 'w')
    try:
        buf = cStringIO.StringIO()
        config.write(buf)
        # normalize line endings
        for line in buf.getvalue().splitlines():
            f.write(line + '\n')
        f.close()
    finally:
        del f  # unlink temp file
Пример #26
0
def writefile(config, path):
    """Write the given config obj to the specified file"""
    f = util.atomictempfile(os.path.realpath(path), 'w')
    try:
        buf = cStringIO.StringIO()
        config.write(buf)
        # normalize line endings
        for line in buf.getvalue().splitlines():
            f.write(line + '\n')
        f.close()
    finally:
        del f  # unlink temp file
Пример #27
0
def link(src, dest):
    """Try to create hardlink - if that fails, efficiently make a copy."""
    util.makedirs(os.path.dirname(dest))
    try:
        util.oslink(src, dest)
    except OSError:
        # if hardlinks fail, fallback on atomic copy
        dst = util.atomictempfile(dest)
        for chunk in util.filechunkiter(open(src, 'rb')):
            dst.write(chunk)
        dst.close()
        os.chmod(dest, os.stat(src).st_mode)
Пример #28
0
    def test1_simple(self):
        if os.path.exists('foo'):
            os.remove('foo')
        file = atomictempfile('foo')
        (dir, basename) = os.path.split(file._tempname)
        self.assertFalse(os.path.isfile('foo'))
        self.assertTrue(basename in glob.glob('.foo-*'))

        file.write('argh\n')
        file.close()

        self.assertTrue(os.path.isfile('foo'))
        self.assertTrue(basename not in glob.glob('.foo-*'))
Пример #29
0
 def write_ignore_lines(self):
     if self.doseoln:
         out = [line + '\r\n' for line in self.ignorelines]
     else:
         out = [line + '\n' for line in self.ignorelines]
     try:
         f = util.atomictempfile(self.ignorefile, 'wb',
                                 createmode=None)
         f.writelines(out)
         f.rename()
     except EnvironmentError, e:
         dialog.error_dialog(self, _('Unable to write .hgignore file'),
                             hglib.tounicode(str(e)))
Пример #30
0
    def writeIgnoreFile(self):
        eol = self.doseoln and '\r\n' or '\n'
        out = eol.join(self.ignorelines) + eol

        try:
            f = util.atomictempfile(self.ignorefile, 'wb', createmode=None)
            f.write(out)
            f.rename()
            shlib.shell_notify([self.ignorefile])
            self.ignoreFilterUpdated.emit()
        except EnvironmentError, e:
            qtlib.WarningMsgBox(_('Unable to write .hgignore file'),
                                hglib.tounicode(str(e)), parent=self)
Пример #31
0
    def test1_simple(self):
        if os.path.exists('foo'):
            os.remove('foo')
        file = atomictempfile('foo')
        (dir, basename) = os.path.split(file._tempname)
        self.assertFalse(os.path.isfile('foo'))
        self.assertTrue(basename in glob.glob('.foo-*'))

        file.write('argh\n')
        file.close()

        self.assertTrue(os.path.isfile('foo'))
        self.assertTrue(basename not in glob.glob('.foo-*'))
Пример #32
0
    def get(self, files):
        '''Get the specified largefiles from the store and write to local
        files under repo.root.  files is a list of (filename, hash)
        tuples.  Return (success, missing), lists of files successfully
        downloaded and those not found in the store.  success is a list
        of (filename, hash) tuples; missing is a list of filenames that
        we could not get.  (The detailed error message will already have
        been presented to the user, so missing is just supplied as a
        summary.)'''
        success = []
        missing = []
        ui = self.ui

        util.makedirs(lfutil.storepath(self.repo, ''))

        at = 0
        available = self.exists(set(hash for (_filename, hash) in files))
        for filename, hash in files:
            ui.progress(_('getting largefiles'), at, unit='lfile',
                total=len(files))
            at += 1
            ui.note(_('getting %s:%s\n') % (filename, hash))

            if not available.get(hash):
                ui.warn(_('%s: largefile %s not available from %s\n')
                        % (filename, hash, self.url))
                missing.append(filename)
                continue

            storefilename = lfutil.storepath(self.repo, hash)
            tmpfile = util.atomictempfile(storefilename + '.tmp',
                                          createmode=self.repo.store.createmode)

            try:
                hhash = self._getfile(tmpfile, filename, hash)
            except StoreError, err:
                ui.warn(err.longmessage())
                hhash = ""
            tmpfile.close()

            if hhash != hash:
                if hhash != "":
                    ui.warn(_('%s: data corruption (expected %s, got %s)\n')
                            % (filename, hash, hhash))
                util.unlink(storefilename + '.tmp')
                missing.append(filename)
                continue

            util.rename(storefilename + '.tmp', storefilename)
            lfutil.linktousercache(self.repo, hash)
            success.append((filename, hhash))
Пример #33
0
def test1_simple():
    if os.path.exists('foo'):
        os.remove('foo')
    file = atomictempfile('foo')
    (dir, basename) = os.path.split(file._tempname)
    assert not os.path.isfile('foo')
    assert basename in glob.glob('.foo-*')

    file.write('argh\n')
    file.close()

    assert os.path.isfile('foo')
    assert basename not in glob.glob('.foo-*')
    print 'OK'
Пример #34
0
def savepinnednodes(repo, newpin, newunpin, fullargs):
    # take a narrowed lock so it does not affect repo lock
    with extutil.flock(repo.svfs.join('obsinhibit.lock'), 'save pinned nodes'):
        orignodes = loadpinnednodes(repo)
        nodes = set(orignodes)
        nodes |= set(newpin)
        nodes -= set(newunpin)
        with util.atomictempfile(repo.svfs.join('obsinhibit')) as f:
            f.write(''.join(nodes))

        desc = lambda s: [short(n) for n in s]
        repo.ui.log('pinnednodes', 'pinnednodes: %r newpin=%r newunpin=%r '
                    'before=%r after=%r\n', fullargs, desc(newpin),
                    desc(newunpin), desc(orignodes), desc(nodes))
Пример #35
0
    def _gethash(self, filename, hash):
        """Get file with the provided hash and store it in the local repo's
        store and in the usercache.
        filename is for informational messages only.
        """
        util.makedirs(lfutil.storepath(self.repo, ""))
        storefilename = lfutil.storepath(self.repo, hash)

        tmpname = storefilename + ".tmp"
        tmpfile = util.atomictempfile(tmpname, createmode=self.repo.store.createmode)

        try:
            gothash = self._getfile(tmpfile, filename, hash)
        except StoreError, err:
            self.ui.warn(err.longmessage())
            gothash = ""
Пример #36
0
    def _gethash(self, filename, hash):
        """Get file with the provided hash and store it in the local repo's
        store and in the usercache.
        filename is for informational messages only.
        """
        util.makedirs(lfutil.storepath(self.repo, ''))
        storefilename = lfutil.storepath(self.repo, hash)

        tmpname = storefilename + '.tmp'
        tmpfile = util.atomictempfile(tmpname,
                                      createmode=self.repo.store.createmode)

        try:
            gothash = self._getfile(tmpfile, filename, hash)
        except StoreError, err:
            self.ui.warn(err.longmessage())
            gothash = ""
Пример #37
0
    def get(self, files):
        '''Get the specified largefiles from the store and write to local
        files under repo.root.  files is a list of (filename, hash)
        tuples.  Return (success, missing), lists of files successfuly
        downloaded and those not found in the store.  success is a list
        of (filename, hash) tuples; missing is a list of filenames that
        we could not get.  (The detailed error message will already have
        been presented to the user, so missing is just supplied as a
        summary.)'''
        success = []
        missing = []
        ui = self.ui

        at = 0
        for filename, hash in files:
            ui.progress(_('getting largefiles'),
                        at,
                        unit='lfile',
                        total=len(files))
            at += 1
            ui.note(_('getting %s:%s\n') % (filename, hash))

            storefilename = lfutil.storepath(self.repo, hash)
            tmpfile = util.atomictempfile(
                storefilename, createmode=self.repo.store.createmode)

            try:
                hhash = binascii.hexlify(self._getfile(tmpfile, filename,
                                                       hash))
            except StoreError, err:
                ui.warn(err.longmessage())
                hhash = ""

            if hhash != hash:
                if hhash != "":
                    ui.warn(
                        _('%s: data corruption (expected %s, got %s)\n') %
                        (filename, hash, hhash))
                tmpfile.discard()  # no-op if it's already closed
                missing.append(filename)
                continue

            tmpfile.close()
            lfutil.linktousercache(self.repo, hash)
            success.append((filename, hhash))
def _loadfileblob(repo, cachepath, path, node):
    filecachepath = os.path.join(cachepath, path, hex(node))
    if not os.path.exists(filecachepath) or os.path.getsize(
            filecachepath) == 0:
        filectx = repo.filectx(path, fileid=node)
        if filectx.node() == repo.nullid:
            repo.changelog = changelog.changelog(repo.svfs)
            filectx = repo.filectx(path, fileid=node)

        text = createfileblob(filectx)
        # TODO configurable compression engines
        text = zlib.compress(text)

        # everything should be user & group read/writable
        oldumask = os.umask(0o002)
        try:
            dirname = os.path.dirname(filecachepath)
            if not os.path.exists(dirname):
                try:
                    os.makedirs(dirname)
                except OSError as ex:
                    if ex.errno != errno.EEXIST:
                        raise

            f = None
            try:
                f = util.atomictempfile(filecachepath, b"wb")
                f.write(text)
            except (IOError, OSError):
                # Don't abort if the user only has permission to read,
                # and not write.
                pass
            finally:
                if f:
                    f.close()
        finally:
            os.umask(oldumask)
    else:
        with open(filecachepath, b"rb") as f:
            text = f.read()
    return text
Пример #39
0
    def get(self, files):
        '''Get the specified largefiles from the store and write to local
        files under repo.root.  files is a list of (filename, hash)
        tuples.  Return (success, missing), lists of files successfuly
        downloaded and those not found in the store.  success is a list
        of (filename, hash) tuples; missing is a list of filenames that
        we could not get.  (The detailed error message will already have
        been presented to the user, so missing is just supplied as a
        summary.)'''
        success = []
        missing = []
        ui = self.ui

        at = 0
        for filename, hash in files:
            ui.progress(_('getting largefiles'), at, unit='lfile',
                total=len(files))
            at += 1
            ui.note(_('getting %s:%s\n') % (filename, hash))

            storefilename = lfutil.storepath(self.repo, hash)
            tmpfile = util.atomictempfile(storefilename,
                                          createmode=self.repo.store.createmode)

            try:
                hhash = binascii.hexlify(self._getfile(tmpfile, filename, hash))
            except StoreError, err:
                ui.warn(err.longmessage())
                hhash = ""

            if hhash != hash:
                if hhash != "":
                    ui.warn(_('%s: data corruption (expected %s, got %s)\n')
                            % (filename, hash, hhash))
                tmpfile.discard() # no-op if it's already closed
                missing.append(filename)
                continue

            tmpfile.close()
            lfutil.linktousercache(self.repo, hash)
            success.append((filename, hhash))
Пример #40
0
    def write(self, key, data):
        path = os.path.join(self.cachepath, key)
        dirpath = os.path.dirname(path)
        if not os.path.exists(dirpath):
            makedirs(self.cachepath, dirpath, self.uid)

        f = None
        try:
            f = util.atomictempfile(path, 'w')
            f.write(data)
        finally:
            if f:
                f.close()

        if self._validatecachelog:
            if not self._validatekey(path, 'write'):
                raise util.Abort(_("local cache write was corrupted %s") % path)

        stat = os.stat(path)
        if stat.st_uid == self.uid:
            os.chmod(path, 0o0664)
Пример #41
0
    def write(self, key, data):
        path = os.path.join(self.cachepath, key)
        dirpath = os.path.dirname(path)
        if not os.path.exists(dirpath):
            makedirs(self.cachepath, dirpath, self.uid)

        f = None
        try:
            f = util.atomictempfile(path, 'w')
            f.write(data)
        finally:
            if f:
                f.close()

        if self._validatecachelog:
            if not self._validatekey(path, 'write'):
                raise util.Abort(
                    _("local cache write was corrupted %s") % path)

        stat = os.stat(path)
        if stat.st_uid == self.uid:
            os.chmod(path, 0o0664)
Пример #42
0
def _loadfileblob(repo, cachepath, path, node):
    filecachepath = os.path.join(cachepath, path, hex(node))
    if not os.path.exists(filecachepath) or os.path.getsize(filecachepath) == 0:
        filectx = repo.filectx(path, fileid=node)
        if filectx.node() == nullid:
            repo.changelog = changelog.changelog(repo.svfs)
            filectx = repo.filectx(path, fileid=node)

        text = createfileblob(filectx)
        text = lz4wrapper.lzcompresshc(text)

        # everything should be user & group read/writable
        oldumask = os.umask(0o002)
        try:
            dirname = os.path.dirname(filecachepath)
            if not os.path.exists(dirname):
                try:
                    os.makedirs(dirname)
                except OSError as ex:
                    if ex.errno != errno.EEXIST:
                        raise

            f = None
            try:
                f = util.atomictempfile(filecachepath, "w")
                f.write(text)
            except (IOError, OSError):
                # Don't abort if the user only has permission to read,
                # and not write.
                pass
            finally:
                if f:
                    f.close()
        finally:
            os.umask(oldumask)
    else:
        with open(filecachepath, "r") as f:
            text = f.read()
    return text
Пример #43
0
    def writeIgnoreFile(self):
        eol = self.doseoln and '\r\n' or '\n'
        out = eol.join(self.ignorelines) + eol
        hasignore = os.path.exists(self.repo.join(self.ignorefile))

        try:
            f = util.atomictempfile(self.ignorefile, 'wb', createmode=None)
            f.write(out)
            f.close()
            if not hasignore:
                ret = qtlib.QuestionMsgBox(_('New file created'),
                                           _('TortoiseHg has created a new '
                                             '.hgignore file.  Would you like to '
                                             'add this file to the source code '
                                             'control repository?'), parent=self)
                if ret:
                    commands.add(ui.ui(), self.repo, self.ignorefile)
            shlib.shell_notify([self.ignorefile])
            self.ignoreFilterUpdated.emit()
        except EnvironmentError, e:
            qtlib.WarningMsgBox(_('Unable to write .hgignore file'),
                                hglib.tounicode(str(e)), parent=self)
Пример #44
0
 def removeFile(self, wfile):
     repo = self.repo
     ctx = self.ctx
     if isinstance(ctx, patchctx):
         repo.thgbackup(ctx._path)
         fp = util.atomictempfile(ctx._path, 'wb')
         try:
             if ctx._ph.comments:
                 fp.write('\n'.join(ctx._ph.comments))
                 fp.write('\n\n')
             for file in ctx._fileorder:
                 if file == wfile:
                     continue
                 for chunk in ctx._files[file]:
                     chunk.write(fp)
             fp.close()
         finally:
             del fp
         ctx.invalidate()
     else:
         fullpath = repo.wjoin(wfile)
         repo.thgbackup(fullpath)
         wasadded = wfile in repo[None].added()
         try:
             commands.revert(repo.ui,
                             repo,
                             fullpath,
                             rev='.',
                             no_backup=True)
             if wasadded and os.path.exists(fullpath):
                 os.unlink(fullpath)
         except EnvironmentError:
             qtlib.InfoMsgBox(
                 _("Unable to remove"),
                 _("Unable to remove file %s,\n"
                   "permission denied") % hglib.tounicode(wfile))
     self.fileModified.emit()
Пример #45
0
 def deleteSelectedChunks(self):
     'delete currently selected chunks'
     repo = self.repo
     chunks = self.diffbrowse.curchunks
     dchunks = [c for c in chunks[1:] if c.selected]
     if not dchunks:
         self.showMessage.emit(_('No deletable chunks'))
         return
     ctx = self.ctx
     kchunks = [c for c in chunks[1:] if not c.selected]
     revertall = False
     if not kchunks:
         if isinstance(ctx, patchctx):
             revertmsg = _('Completely remove file from patch?')
         else:
             revertmsg = _('Revert all file changes?')
         revertall = qtlib.QuestionMsgBox(_('No chunks remain'), revertmsg)
     if isinstance(ctx, patchctx):
         repo.thgbackup(ctx._path)
         fp = util.atomictempfile(ctx._path, 'wb')
         buf = cStringIO.StringIO()
         try:
             if ctx._ph.comments:
                 buf.write('\n'.join(ctx._ph.comments))
                 buf.write('\n\n')
             needsnewline = False
             for wfile in ctx._fileorder:
                 if wfile == self.currentFile:
                     if revertall:
                         continue
                     chunks[0].write(buf)
                     for chunk in kchunks:
                         chunk.write(buf)
                 else:
                     if buf.tell() and buf.getvalue()[-1] != '\n':
                         buf.write('\n')
                     for chunk in ctx._files[wfile]:
                         chunk.write(buf)
             fp.write(buf.getvalue())
             fp.close()
         finally:
             del fp
         ctx.invalidate()
         self.fileModified.emit()
     else:
         path = repo.wjoin(self.currentFile)
         if not os.path.exists(path):
             self.showMessage.emit(_('file has been deleted, refresh'))
             return
         if self.mtime != os.path.getmtime(path):
             self.showMessage.emit(_('file has been modified, refresh'))
             return
         repo.thgbackup(path)
         if revertall:
             commands.revert(repo.ui, repo, path, no_backup=True)
         else:
             wlock = repo.wlock()
             try:
                 # atomictemp can preserve file permission
                 wf = repo.wopener(self.currentFile, 'wb', atomictemp=True)
                 wf.write(self.diffbrowse.origcontents)
                 wf.close()
                 fp = cStringIO.StringIO()
                 chunks[0].write(fp)
                 for c in kchunks:
                     c.write(fp)
                 fp.seek(0)
                 self.runPatcher(fp, self.currentFile, False)
             finally:
                 wlock.release()
         self.fileModified.emit()
Пример #46
0
 def mergeChunks(self, wfile, chunks):
     def isAorR(header):
         for line in header:
             if line.startswith('--- /dev/null'):
                 return True
             if line.startswith('+++ /dev/null'):
                 return True
         return False
     repo = self.repo
     ctx = self.ctx
     if isinstance(ctx, patchctx):
         if wfile in ctx._files:
             patchchunks = ctx._files[wfile]
             if isAorR(chunks[0].header) or isAorR(patchchunks[0].header):
                 qtlib.InfoMsgBox(_('Unable to merge chunks'),
                                 _('Add or remove patches must be merged '
                                   'in the working directory'))
                 return False
             # merge new chunks into existing chunks, sorting on start line
             newchunks = [chunks[0]]
             pidx = nidx = 1
             while pidx < len(patchchunks) or nidx < len(chunks):
                 if pidx == len(patchchunks):
                     newchunks.append(chunks[nidx])
                     nidx += 1
                 elif nidx == len(chunks):
                     newchunks.append(patchchunks[pidx])
                     pidx += 1
                 elif chunks[nidx].fromline < patchchunks[pidx].fromline:
                     newchunks.append(chunks[nidx])
                     nidx += 1
                 else:
                     newchunks.append(patchchunks[pidx])
                     pidx += 1
             ctx._files[wfile] = newchunks
         else:
             # add file to patch
             ctx._files[wfile] = chunks
             ctx._fileorder.append(wfile)
         repo.thgbackup(ctx._path)
         fp = util.atomictempfile(ctx._path, 'wb')
         try:
             if ctx._ph.comments:
                 fp.write('\n'.join(ctx._ph.comments))
                 fp.write('\n\n')
             for file in ctx._fileorder:
                 for chunk in ctx._files[file]:
                     chunk.write(fp)
             fp.close()
             ctx.invalidate()
             self.fileModified.emit()
             return True
         finally:
             del fp
     else:
         # Apply chunks to wfile
         repo.thgbackup(repo.wjoin(wfile))
         fp = cStringIO.StringIO()
         for c in chunks:
             c.write(fp)
         fp.seek(0)
         wlock = repo.wlock()
         try:
             return self.runPatcher(fp, wfile, True)
         finally:
             wlock.release()
Пример #47
0
 def deleteSelectedChunks(self):
     'delete currently selected chunks'
     repo = self.repo
     chunks = self.diffbrowse.curchunks
     dchunks = [c for c in chunks[1:] if c.selected]
     if not dchunks:
         self.showMessage.emit(_('No deletable chunks'))
         return
     ctx = self.ctx
     kchunks = [c for c in chunks[1:] if not c.selected]
     revertall = False
     if not kchunks:
         if isinstance(ctx, patchctx):
             revertmsg = _('Completely remove file from patch?')
         else:
             revertmsg = _('Revert all file changes?')
         revertall = qtlib.QuestionMsgBox(_('No chunks remain'), revertmsg)
     if isinstance(ctx, patchctx):
         repo.thgbackup(ctx._path)
         fp = util.atomictempfile(ctx._path, 'wb')
         buf = cStringIO.StringIO()
         try:
             if ctx._ph.comments:
                 buf.write('\n'.join(ctx._ph.comments))
                 buf.write('\n\n')
             needsnewline = False
             for wfile in ctx._fileorder:
                 if wfile == self.currentFile:
                     if revertall:
                         continue
                     chunks[0].write(buf)
                     for chunk in kchunks:
                         chunk.write(buf)
                 else:
                     if buf.tell() and buf.getvalue()[-1] != '\n':
                         buf.write('\n')
                     for chunk in ctx._files[wfile]:
                         chunk.write(buf)
             fp.write(buf.getvalue())
             fp.close()
         finally:
             del fp
         ctx.invalidate()
         self.fileModified.emit()
     else:
         path = repo.wjoin(self.currentFile)
         if not os.path.exists(path):
             self.showMessage.emit(_('file has been deleted, refresh'))
             return
         if self.mtime != os.path.getmtime(path):
             self.showMessage.emit(_('file has been modified, refresh'))
             return
         repo.thgbackup(path)
         if revertall:
             commands.revert(repo.ui, repo, path, no_backup=True)
         else:
             wlock = repo.wlock()
             try:
                 # atomictemp can preserve file permission
                 wf = repo.wopener(self.currentFile, 'wb', atomictemp=True)
                 wf.write(self.diffbrowse.origcontents)
                 wf.close()
                 fp = cStringIO.StringIO()
                 chunks[0].write(fp)
                 for c in kchunks:
                     c.write(fp)
                 fp.seek(0)
                 self.runPatcher(fp, self.currentFile, False)
             finally:
                 wlock.release()
         self.fileModified.emit()
Пример #48
0
    def mergeChunks(self, wfile, chunks):
        def isAorR(header):
            for line in header:
                if line.startswith('--- /dev/null'):
                    return True
                if line.startswith('+++ /dev/null'):
                    return True
            return False

        repo = self.repo
        ctx = self.ctx
        if isinstance(ctx, patchctx):
            if wfile in ctx._files:
                patchchunks = ctx._files[wfile]
                if isAorR(chunks[0].header) or isAorR(patchchunks[0].header):
                    qtlib.InfoMsgBox(
                        _('Unable to merge chunks'),
                        _('Add or remove patches must be merged '
                          'in the working directory'))
                    return False
                # merge new chunks into existing chunks, sorting on start line
                newchunks = [chunks[0]]
                pidx = nidx = 1
                while pidx < len(patchchunks) or nidx < len(chunks):
                    if pidx == len(patchchunks):
                        newchunks.append(chunks[nidx])
                        nidx += 1
                    elif nidx == len(chunks):
                        newchunks.append(patchchunks[pidx])
                        pidx += 1
                    elif chunks[nidx].fromline < patchchunks[pidx].fromline:
                        newchunks.append(chunks[nidx])
                        nidx += 1
                    else:
                        newchunks.append(patchchunks[pidx])
                        pidx += 1
                ctx._files[wfile] = newchunks
            else:
                # add file to patch
                ctx._files[wfile] = chunks
                ctx._fileorder.append(wfile)
            repo.thgbackup(ctx._path)
            fp = util.atomictempfile(ctx._path, 'wb')
            try:
                if ctx._ph.comments:
                    fp.write('\n'.join(ctx._ph.comments))
                    fp.write('\n\n')
                for file in ctx._fileorder:
                    for chunk in ctx._files[file]:
                        chunk.write(fp)
                fp.close()
                ctx.invalidate()
                self.fileModified.emit()
                return True
            finally:
                del fp
        else:
            # Apply chunks to wfile
            repo.thgbackup(repo.wjoin(wfile))
            fp = cStringIO.StringIO()
            for c in chunks:
                c.write(fp)
            fp.seek(0)
            wlock = repo.wlock()
            try:
                return self.runPatcher(fp, wfile, True)
            finally:
                wlock.release()