コード例 #1
0
ファイル: __init__.py プロジェクト: Nephyrin/bzexport
def consumestreamclone(repo, fp):
    """Apply the contents from a streaming clone file.

    This code is copied from Mercurial. Until Mercurial 3.5, this code was
    a closure in wireproto.py and not consumeable by extensions.
    """
    lock = repo.lock()
    try:
        repo.ui.status(_('streaming all changes\n'))
        l = fp.readline()
        try:
            total_files, total_bytes = map(int, l.split(' ', 1))
        except (ValueError, TypeError):
            raise error.ResponseError(
                _('unexpected response from remote server:'), l)
        repo.ui.status(_('%d files to transfer, %s of data\n') %
                       (total_files, util.bytecount(total_bytes)))
        handled_bytes = 0
        repo.ui.progress(_('clone'), 0, total=total_bytes)
        start = time.time()

        tr = repo.transaction(_('clone'))
        try:
            for i in xrange(total_files):
                # XXX doesn't support '\n' or '\r' in filenames
                l = fp.readline()
                try:
                    name, size = l.split('\0', 1)
                    size = int(size)
                except (ValueError, TypeError):
                    raise error.ResponseError(
                        _('unexpected response from remote server:'), l)
                if repo.ui.debugflag:
                    repo.ui.debug('adding %s (%s)\n' %
                                  (name, util.bytecount(size)))
                # for backwards compat, name was partially encoded
                ofp = repo.svfs(store.decodedir(name), 'w')
                for chunk in util.filechunkiter(fp, limit=size):
                    handled_bytes += len(chunk)
                    repo.ui.progress(_('clone'), handled_bytes,
                                     total=total_bytes)
                    ofp.write(chunk)
                ofp.close()
            tr.close()
        finally:
            tr.release()

        # Writing straight to files circumvented the inmemory caches
        repo.invalidate()

        elapsed = time.time() - start
        if elapsed <= 0:
            elapsed = 0.001
        repo.ui.progress(_('clone'), None)
        repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
                       (util.bytecount(total_bytes), elapsed,
                        util.bytecount(total_bytes / elapsed)))
    finally:
        lock.release()
コード例 #2
0
def consumev1(repo, fp, filecount, bytecount):
    """Apply the contents from version 1 of a streaming clone file handle.

    This takes the output from "streamout" and applies it to the specified
    repository.

    Like "streamout," the status line added by the wire protocol is not handled
    by this function.
    """
    lock = repo.lock()
    try:
        repo.ui.status(
            _('%d files to transfer, %s of data\n') %
            (filecount, util.bytecount(bytecount)))
        handled_bytes = 0
        repo.ui.progress(_('clone'), 0, total=bytecount)
        start = time.time()

        tr = repo.transaction(_('clone'))
        try:
            for i in xrange(filecount):
                # XXX doesn't support '\n' or '\r' in filenames
                l = fp.readline()
                try:
                    name, size = l.split('\0', 1)
                    size = int(size)
                except (ValueError, TypeError):
                    raise error.ResponseError(
                        _('unexpected response from remote server:'), l)
                if repo.ui.debugflag:
                    repo.ui.debug('adding %s (%s)\n' %
                                  (name, util.bytecount(size)))
                # for backwards compat, name was partially encoded
                ofp = repo.svfs(store.decodedir(name), 'w')
                for chunk in util.filechunkiter(fp, limit=size):
                    handled_bytes += len(chunk)
                    repo.ui.progress(_('clone'),
                                     handled_bytes,
                                     total=bytecount)
                    ofp.write(chunk)
                ofp.close()
            tr.close()
        finally:
            tr.release()

        # Writing straight to files circumvented the inmemory caches
        repo.invalidate()

        elapsed = time.time() - start
        if elapsed <= 0:
            elapsed = 0.001
        repo.ui.progress(_('clone'), None)
        repo.ui.status(
            _('transferred %s in %.1f seconds (%s/sec)\n') %
            (util.bytecount(bytecount), elapsed,
             util.bytecount(bytecount / elapsed)))
    finally:
        lock.release()
コード例 #3
0
ファイル: bundleclone.py プロジェクト: djmitche/build-puppet
def consumev1(repo, fp, filecount, bytecount):
    """Apply the contents from version 1 of a streaming clone file handle.

    This takes the output from "streamout" and applies it to the specified
    repository.

    Like "streamout," the status line added by the wire protocol is not handled
    by this function.
    """
    lock = repo.lock()
    try:
        repo.ui.status(_('%d files to transfer, %s of data\n') %
                       (filecount, util.bytecount(bytecount)))
        handled_bytes = 0
        repo.ui.progress(_('clone'), 0, total=bytecount)
        start = time.time()

        tr = repo.transaction(_('clone'))
        try:
            for i in xrange(filecount):
                # XXX doesn't support '\n' or '\r' in filenames
                l = fp.readline()
                try:
                    name, size = l.split('\0', 1)
                    size = int(size)
                except (ValueError, TypeError):
                    raise error.ResponseError(
                        _('unexpected response from remote server:'), l)
                if repo.ui.debugflag:
                    repo.ui.debug('adding %s (%s)\n' %
                                  (name, util.bytecount(size)))
                # for backwards compat, name was partially encoded
                ofp = repo.svfs(store.decodedir(name), 'w')
                for chunk in util.filechunkiter(fp, limit=size):
                    handled_bytes += len(chunk)
                    repo.ui.progress(_('clone'), handled_bytes, total=bytecount)
                    ofp.write(chunk)
                ofp.close()
            tr.close()
        finally:
            tr.release()

        # Writing straight to files circumvented the inmemory caches
        repo.invalidate()

        elapsed = time.time() - start
        if elapsed <= 0:
            elapsed = 0.001
        repo.ui.progress(_('clone'), None)
        repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
                       (util.bytecount(bytecount), elapsed,
                        util.bytecount(bytecount / elapsed)))
    finally:
        lock.release()
コード例 #4
0
ファイル: repack.py プロジェクト: dothq/mozillabuild
def _deletebigpacks(repo, folder, files):
    """Deletes packfiles that are bigger than ``packs.maxpacksize``.

    Returns ``files` with the removed files omitted."""
    maxsize = repo.ui.configbytes("packs", "maxpacksize")
    if maxsize <= 0:
        return files

    # This only considers datapacks today, but we could broaden it to include
    # historypacks.
    VALIDEXTS = [".datapack", ".dataidx"]

    # Either an oversize index or datapack will trigger cleanup of the whole
    # pack:
    oversized = {
        os.path.splitext(path)[0]
        for path, ftype, stat in files
        if (stat.st_size > maxsize and (os.path.splitext(path)[1] in VALIDEXTS)
            )
    }

    for rootfname in oversized:
        rootpath = os.path.join(folder, rootfname)
        for ext in VALIDEXTS:
            path = rootpath + ext
            repo.ui.debug('removing oversize packfile %s (%s)\n' %
                          (path, util.bytecount(os.stat(path).st_size)))
            os.unlink(path)
    return [row for row in files if os.path.basename(row[0]) not in oversized]
コード例 #5
0
ファイル: fix.py プロジェクト: Smosker/mercurial
def getworkqueue(ui, repo, pats, opts, revstofix, basectxs):
    """"Constructs the list of files to be fixed at specific revisions

    It is up to the caller how to consume the work items, and the only
    dependence between them is that replacement revisions must be committed in
    topological order. Each work item represents a file in the working copy or
    in some revision that should be fixed and written back to the working copy
    or into a replacement revision.
    """
    workqueue = []
    numitems = collections.defaultdict(int)
    maxfilesize = ui.configbytes('fix', 'maxfilesize')
    for rev in revstofix:
        fixctx = repo[rev]
        match = scmutil.match(fixctx, pats, opts)
        for path in pathstofix(ui, repo, pats, opts, match, basectxs[rev],
                               fixctx):
            if path not in fixctx:
                continue
            fctx = fixctx[path]
            if fctx.islink():
                continue
            if fctx.size() > maxfilesize:
                ui.warn(
                    _('ignoring file larger than %s: %s\n') %
                    (util.bytecount(maxfilesize), path))
                continue
            workqueue.append((rev, path))
            numitems[rev] += 1
    return workqueue, numitems
コード例 #6
0
ファイル: blobstore.py プロジェクト: CJX32/my_blog
 def transfer(chunk):
     for obj in chunk:
         objsize = obj.get(b'size', 0)
         if self.ui.verbose:
             if action == b'download':
                 msg = _(b'lfs: downloading %s (%s)\n')
             elif action == b'upload':
                 msg = _(b'lfs: uploading %s (%s)\n')
             self.ui.note(
                 msg % (obj.get(b'oid'), util.bytecount(objsize))
             )
         retry = self.retry
         while True:
             try:
                 self._basictransfer(obj, action, localstore)
                 yield 1, obj.get(b'oid')
                 break
             except socket.error as ex:
                 if retry > 0:
                     self.ui.note(
                         _(b'lfs: failed: %r (remaining retry %d)\n')
                         % (stringutil.forcebytestr(ex), retry)
                     )
                     retry -= 1
                     continue
                 raise
コード例 #7
0
ファイル: repack.py プロジェクト: davidshepherd7/dotfiles
def _deletebigpacks(repo, folder, files):
    """Deletes packfiles that are bigger than ``packs.maxpacksize``.

    Returns ``files` with the removed files omitted."""
    maxsize = repo.ui.configbytes("packs", "maxpacksize")
    if maxsize <= 0:
        return files

    # This only considers datapacks today, but we could broaden it to include
    # historypacks.
    VALIDEXTS = [".datapack", ".dataidx"]

    # Either an oversize index or datapack will trigger cleanup of the whole
    # pack:
    oversized = set([os.path.splitext(path)[0] for path, ftype, stat in files
        if (stat.st_size > maxsize and (os.path.splitext(path)[1]
                                        in VALIDEXTS))])

    for rootfname in oversized:
        rootpath = os.path.join(folder, rootfname)
        for ext in VALIDEXTS:
            path = rootpath + ext
            repo.ui.debug('removing oversize packfile %s (%s)\n' %
                          (path, util.bytecount(os.stat(path).st_size)))
            os.unlink(path)
    return [row for row in files if os.path.basename(row[0]) not in oversized]
コード例 #8
0
    def _batch(self, pointers, localstore, action):
        if action not in ['upload', 'download']:
            raise error.ProgrammingError('invalid Git-LFS action: %s' % action)

        response = self._batchrequest(pointers, action)
        objects = self._extractobjects(response, pointers, action)
        total = sum(x.get('size', 0) for x in objects)
        sizes = {}
        for obj in objects:
            sizes[obj.get('oid')] = obj.get('size', 0)
        topic = {
            'upload': _('lfs uploading'),
            'download': _('lfs downloading')
        }[action]
        if len(objects) > 1:
            self.ui.note(
                _('lfs: need to transfer %d objects (%s)\n') %
                (len(objects), util.bytecount(total)))
        self.ui.progress(topic, 0, total=total)

        def transfer(chunk):
            for obj in chunk:
                objsize = obj.get('size', 0)
                if self.ui.verbose:
                    if action == 'download':
                        msg = _('lfs: downloading %s (%s)\n')
                    elif action == 'upload':
                        msg = _('lfs: uploading %s (%s)\n')
                    self.ui.note(msg %
                                 (obj.get('oid'), util.bytecount(objsize)))
                retry = self.retry
                while True:
                    try:
                        self._basictransfer(obj, action, localstore)
                        yield 1, obj.get('oid')
                        break
                    except socket.error as ex:
                        if retry > 0:
                            self.ui.note(
                                _('lfs: failed: %r (remaining retry %d)\n') %
                                (ex, retry))
                            retry -= 1
                            continue
                        raise

        # Until https multiplexing gets sorted out
        if self.ui.configbool('experimental', 'lfs.worker-enable'):
            oids = worker.worker(self.ui, 0.1, transfer, (),
                                 sorted(objects, key=lambda o: o.get('oid')))
        else:
            oids = transfer(sorted(objects, key=lambda o: o.get('oid')))

        processed = 0
        for _one, oid in oids:
            processed += sizes[oid]
            self.ui.progress(topic, processed, total=total)
            self.ui.note(_('lfs: processed: %s\n') % oid)
        self.ui.progress(topic, pos=None, total=total)
コード例 #9
0
def do_relink(src, dst, files, ui):
    def relinkfile(src, dst):
        bak = dst + b'.bak'
        os.rename(dst, bak)
        try:
            util.oslink(src, dst)
        except OSError:
            os.rename(bak, dst)
            raise
        os.remove(bak)

    CHUNKLEN = 65536
    relinked = 0
    savedbytes = 0

    progress = ui.makeprogress(_(b'relinking'),
                               unit=_(b'files'),
                               total=len(files))
    pos = 0
    for f, sz in files:
        pos += 1
        source = os.path.join(src, f)
        tgt = os.path.join(dst, f)
        # Binary mode, so that read() works correctly, especially on Windows
        sfp = open(source, b'rb')
        dfp = open(tgt, b'rb')
        sin = sfp.read(CHUNKLEN)
        while sin:
            din = dfp.read(CHUNKLEN)
            if sin != din:
                break
            sin = sfp.read(CHUNKLEN)
        sfp.close()
        dfp.close()
        if sin:
            ui.debug(b'not linkable: %s\n' % f)
            continue
        try:
            relinkfile(source, tgt)
            progress.update(pos, item=f)
            relinked += 1
            savedbytes += sz
        except OSError as inst:
            ui.warn(b'%s: %s\n' % (tgt, stringutil.forcebytestr(inst)))

    progress.complete()

    ui.status(
        _(b'relinked %d files (%s reclaimed)\n') %
        (relinked, util.bytecount(savedbytes)))
コード例 #10
0
 def totalsize(self, silent=True):
     totalsize = 0
     numentries = 0
     for entry in self:
         entrysize = self.entrysize(entry)
         if entrysize == -1:
             # Entry was deleted by another process
             continue
         totalsize += entrysize
         numentries += 1
         if not silent:
             msg = "%s (size %s)\n" % (self.pathprefix + entry,
                                       util.bytecount(entrysize))
             self.ui.status(msg)
     return totalsize, numentries
コード例 #11
0
def do_relink(src, dst, files, ui):
    def relinkfile(src, dst):
        bak = dst + '.bak'
        os.rename(dst, bak)
        try:
            util.oslink(src, dst)
        except OSError:
            os.rename(bak, dst)
            raise
        os.remove(bak)

    CHUNKLEN = 65536
    relinked = 0
    savedbytes = 0

    pos = 0
    total = len(files)
    for f, sz in files:
        pos += 1
        source = os.path.join(src, f)
        tgt = os.path.join(dst, f)
        # Binary mode, so that read() works correctly, especially on Windows
        sfp = file(source, 'rb')
        dfp = file(tgt, 'rb')
        sin = sfp.read(CHUNKLEN)
        while sin:
            din = dfp.read(CHUNKLEN)
            if sin != din:
                break
            sin = sfp.read(CHUNKLEN)
        sfp.close()
        dfp.close()
        if sin:
            ui.debug('not linkable: %s\n' % f)
            continue
        try:
            relinkfile(source, tgt)
            ui.progress(_('relinking'), pos, f, _('files'), total)
            relinked += 1
            savedbytes += sz
        except OSError as inst:
            ui.warn('%s: %s\n' % (tgt, str(inst)))

    ui.progress(_('relinking'), None)

    ui.status(
        _('relinked %d files (%s reclaimed)\n') %
        (relinked, util.bytecount(savedbytes)))
コード例 #12
0
ファイル: relink.py プロジェクト: Distrotech/mercurial
def do_relink(src, dst, files, ui):
    def relinkfile(src, dst):
        bak = dst + '.bak'
        os.rename(dst, bak)
        try:
            util.oslink(src, dst)
        except OSError:
            os.rename(bak, dst)
            raise
        os.remove(bak)

    CHUNKLEN = 65536
    relinked = 0
    savedbytes = 0

    pos = 0
    total = len(files)
    for f, sz in files:
        pos += 1
        source = os.path.join(src, f)
        tgt = os.path.join(dst, f)
        # Binary mode, so that read() works correctly, especially on Windows
        sfp = file(source, 'rb')
        dfp = file(tgt, 'rb')
        sin = sfp.read(CHUNKLEN)
        while sin:
            din = dfp.read(CHUNKLEN)
            if sin != din:
                break
            sin = sfp.read(CHUNKLEN)
        sfp.close()
        dfp.close()
        if sin:
            ui.debug('not linkable: %s\n' % f)
            continue
        try:
            relinkfile(source, tgt)
            ui.progress(_('relinking'), pos, f, _('files'), total)
            relinked += 1
            savedbytes += sz
        except OSError as inst:
            ui.warn('%s: %s\n' % (tgt, str(inst)))

    ui.progress(_('relinking'), None)

    ui.status(_('relinked %d files (%s reclaimed)\n') %
              (relinked, util.bytecount(savedbytes)))
コード例 #13
0
def cachemanifestlist(ui, repo):
    cache = fastmanifestcache.getinstance(repo.store.opener, ui)
    total, numentries = cache.ondiskcache.totalsize(silent=False)
    ui.status(("cache size is: %s\n" % util.bytecount(total)))
    ui.status(("number of entries is: %s\n" % numentries))

    if ui.debug:
        revs = set(repo.revs("fastmanifestcached()"))
        import collections
        revstoman = collections.defaultdict(list)
        for r in revs:
            mannode = revlog.hex(repo.changelog.changelogrevision(r).manifest)
            revstoman[mannode].append(str(r))
        if revs:
            ui.status(("Most relevant cache entries appear first\n"))
            ui.status(("="*80))
            ui.status(("\nmanifest node                           |revs\n"))
            for h in cache.ondiskcache:
                l = h.replace("fast","")
                ui.status("%s|%s\n" % (l, ",".join(revstoman.get(l,[]))))
コード例 #14
0
ファイル: fix.py プロジェクト: JesseDavids/mqtta
def getworkqueue(ui, repo, pats, opts, revstofix, basectxs):
    """Constructs the list of files to be fixed at specific revisions

    It is up to the caller how to consume the work items, and the only
    dependence between them is that replacement revisions must be committed in
    topological order. Each work item represents a file in the working copy or
    in some revision that should be fixed and written back to the working copy
    or into a replacement revision.

    Work items for the same revision are grouped together, so that a worker
    pool starting with the first N items in parallel is likely to finish the
    first revision's work before other revisions. This can allow us to write
    the result to disk and reduce memory footprint. At time of writing, the
    partition strategy in worker.py seems favorable to this. We also sort the
    items by ascending revision number to match the order in which we commit
    the fixes later.
    """
    workqueue = []
    numitems = collections.defaultdict(int)
    maxfilesize = ui.configbytes(b'fix', b'maxfilesize')
    for rev in sorted(revstofix):
        fixctx = repo[rev]
        match = scmutil.match(fixctx, pats, opts)
        for path in sorted(
            pathstofix(ui, repo, pats, opts, match, basectxs[rev], fixctx)
        ):
            fctx = fixctx[path]
            if fctx.islink():
                continue
            if fctx.size() > maxfilesize:
                ui.warn(
                    _(b'ignoring file larger than %s: %s\n')
                    % (util.bytecount(maxfilesize), path)
                )
                continue
            workqueue.append((rev, path))
            numitems[rev] += 1
    return workqueue, numitems
コード例 #15
0
ファイル: relink.py プロジェクト: leetaizhu/Odoo_ENV_MAC_OS
        while sin:
            din = dfp.read(CHUNKLEN)
            if sin != din:
                break
            sin = sfp.read(CHUNKLEN)
        sfp.close()
        dfp.close()
        if sin:
            ui.debug('not linkable: %s\n' % f)
            continue
        try:
            relinkfile(source, tgt)
            ui.progress(_('relinking'), pos, f, _('files'), total)
            relinked += 1
            savedbytes += sz
        except OSError, inst:
            ui.warn('%s: %s\n' % (tgt, str(inst)))

    ui.progress(_('relinking'), None)

    ui.status(_('relinked %d files (%s reclaimed)\n') %
              (relinked, util.bytecount(savedbytes)))

cmdtable = {
    'relink': (
        relink,
        [],
        _('[ORIGIN]')
    )
}
コード例 #16
0
ファイル: blobstore.py プロジェクト: CJX32/my_blog
    def _batch(self, pointers, localstore, action):
        if action not in [b'upload', b'download']:
            raise error.ProgrammingError(b'invalid Git-LFS action: %s' % action)

        response = self._batchrequest(pointers, action)
        objects = self._extractobjects(response, pointers, action)
        total = sum(x.get(b'size', 0) for x in objects)
        sizes = {}
        for obj in objects:
            sizes[obj.get(b'oid')] = obj.get(b'size', 0)
        topic = {
            b'upload': _(b'lfs uploading'),
            b'download': _(b'lfs downloading'),
        }[action]
        if len(objects) > 1:
            self.ui.note(
                _(b'lfs: need to transfer %d objects (%s)\n')
                % (len(objects), util.bytecount(total))
            )

        def transfer(chunk):
            for obj in chunk:
                objsize = obj.get(b'size', 0)
                if self.ui.verbose:
                    if action == b'download':
                        msg = _(b'lfs: downloading %s (%s)\n')
                    elif action == b'upload':
                        msg = _(b'lfs: uploading %s (%s)\n')
                    self.ui.note(
                        msg % (obj.get(b'oid'), util.bytecount(objsize))
                    )
                retry = self.retry
                while True:
                    try:
                        self._basictransfer(obj, action, localstore)
                        yield 1, obj.get(b'oid')
                        break
                    except socket.error as ex:
                        if retry > 0:
                            self.ui.note(
                                _(b'lfs: failed: %r (remaining retry %d)\n')
                                % (stringutil.forcebytestr(ex), retry)
                            )
                            retry -= 1
                            continue
                        raise

        # Until https multiplexing gets sorted out
        if self.ui.configbool(b'experimental', b'lfs.worker-enable'):
            oids = worker.worker(
                self.ui,
                0.1,
                transfer,
                (),
                sorted(objects, key=lambda o: o.get(b'oid')),
            )
        else:
            oids = transfer(sorted(objects, key=lambda o: o.get(b'oid')))

        with self.ui.makeprogress(topic, total=total) as progress:
            progress.update(0)
            processed = 0
            blobs = 0
            for _one, oid in oids:
                processed += sizes[oid]
                blobs += 1
                progress.update(processed)
                self.ui.note(_(b'lfs: processed: %s\n') % oid)

        if blobs > 0:
            if action == b'upload':
                self.ui.status(
                    _(b'lfs: uploaded %d files (%s)\n')
                    % (blobs, util.bytecount(processed))
                )
            elif action == b'download':
                self.ui.status(
                    _(b'lfs: downloaded %d files (%s)\n')
                    % (blobs, util.bytecount(processed))
                )
コード例 #17
0
        while sin:
            din = dfp.read(CHUNKLEN)
            if sin != din:
                break
            sin = sfp.read(CHUNKLEN)
        sfp.close()
        dfp.close()
        if sin:
            ui.debug('not linkable: %s\n' % f)
            continue
        try:
            relinkfile(source, tgt)
            ui.progress(_('relinking'), pos, f, _('files'), total)
            relinked += 1
            savedbytes += sz
        except OSError, inst:
            ui.warn('%s: %s\n' % (tgt, str(inst)))

    ui.progress(_('relinking'), None)

    ui.status(_('relinked %d files (%s reclaimed)\n') %
              (relinked, util.bytecount(savedbytes)))

cmdtable = {
    'relink': (
        relink,
        [],
        _('[ORIGIN]')
    )
}