Ejemplo n.º 1
0
    def _get_file(self, repo, path, resolved):
        """Process a request on a file.

        Return value is either a file object, or None (indicating an error).
        In either case, the headers are sent.
        """
        try:
            file_item = resolved[-1][1]
            file_item = vfs.augment_item_meta(repo,
                                              file_item,
                                              include_size=True)

            # we defer the set_header() calls until after we start writing
            # so we can still generate a 500 failure if something fails ...
            if self.request.method != 'HEAD':
                set_header = False
                with vfs.fopen(self.repo, file_item) as f:
                    it = chunkyreader(f)
                    for blob in chunkyreader(f):
                        if not set_header:
                            self._set_header(path, file_item)
                            set_header = True
                        self.write(blob)
            else:
                self._set_header(path, file_item)
        except Exception as e:
            self.set_status(500)
            self.write("<h1>Server Error</h1>\n")
            self.write("%s: %s\n" % (e.__class__.__name__, str(e)))
        raise gen.Return()
Ejemplo n.º 2
0
 def cat_batch(self, refs):
     self._require_command(b'cat-batch')
     self.check_busy()
     self._busy = b'cat-batch'
     conn = self.conn
     conn.write(b'cat-batch\n')
     # FIXME: do we want (only) binary protocol?
     for ref in refs:
         assert ref
         assert b'\n' not in ref
         conn.write(ref)
         conn.write(b'\n')
     conn.write(b'\n')
     for ref in refs:
         info = conn.readline()
         if info == b'missing\n':
             yield None, None, None, None
             continue
         if not (info and info.endswith(b'\n')):
             raise ClientError('Hit EOF while looking for object info: %r'
                               % info)
         oidx, oid_t, size = info.split(b' ')
         size = int(size)
         cr = chunkyreader(conn, size)
         yield oidx, oid_t, size, cr
         detritus = next(cr, None)
         if detritus:
             raise ClientError('unexpected leftover data ' + repr(detritus))
     # FIXME: confusing
     not_ok = self.check_ok()
     if not_ok:
         raise not_ok
     self._not_busy()
Ejemplo n.º 3
0
def write_file_content(fullname, n):
    outf = open(fullname, 'wb')
    try:
        for b in chunkyreader(n.open()):
            outf.write(b)
    finally:
        outf.close()
Ejemplo n.º 4
0
Archivo: client.py Proyecto: gdt/bup
 def cat_batch(self, refs):
     self._require_command('cat-batch')
     self.check_busy()
     self._busy = 'cat-batch'
     conn = self.conn
     conn.write('cat-batch\n')
     # FIXME: do we want (only) binary protocol?
     for ref in refs:
         assert ref
         assert '\n' not in ref
         conn.write(ref)
         conn.write('\n')
     conn.write('\n')
     for ref in refs:
         info = conn.readline()
         if info == 'missing\n':
             yield None, None, None, None
             continue
         if not (info and info.endswith('\n')):
             raise ClientError('Hit EOF while looking for object info: %r'
                               % info)
         oidx, oid_t, size = info.split(' ')
         size = int(size)
         cr = chunkyreader(conn, size)
         yield oidx, oid_t, size, cr
         detritus = next(cr, None)
         if detritus:
             raise ClientError('unexpected leftover data ' + repr(detritus))
     # FIXME: confusing
     not_ok = self.check_ok()
     if not_ok:
         raise not_ok
     self._not_busy()
Ejemplo n.º 5
0
Archivo: git.py Proyecto: zzmjohn/bup
    def write(self, filename, packbin):
        ofs64_count = 0
        for section in self.idx:
            for entry in section:
                if entry[2] >= 2**31:
                    ofs64_count += 1

        # Length: header + fan-out + shas-and-crcs + overflow-offsets
        index_len = 8 + (4 * 256) + (28 * self.count) + (8 * ofs64_count)
        idx_map = None
        idx_f = open(filename, 'w+b')
        try:
            idx_f.truncate(index_len)
            fdatasync(idx_f.fileno())
            idx_map = mmap_readwrite(idx_f, close=False)
            try:
                count = _helpers.write_idx(filename, idx_map, self.idx,
                                           self.count)
                assert (count == self.count)
                idx_map.flush()
            finally:
                idx_map.close()
        finally:
            idx_f.close()

        idx_f = open(filename, 'a+b')
        try:
            idx_f.write(packbin)
            idx_f.seek(0)
            idx_sum = Sha1()
            b = idx_f.read(8 + 4 * 256)
            idx_sum.update(b)

            obj_list_sum = Sha1()
            for b in chunkyreader(idx_f, 20 * self.count):
                idx_sum.update(b)
                obj_list_sum.update(b)
            namebase = hexlify(obj_list_sum.digest())

            for b in chunkyreader(idx_f):
                idx_sum.update(b)
            idx_f.write(idx_sum.digest())
            fdatasync(idx_f.fileno())
            return namebase
        finally:
            idx_f.close()
Ejemplo n.º 6
0
    def _write_pack_idx_v2(self, filename, idx, packbin):
        ofs64_count = 0
        for section in idx:
            for entry in section:
                if entry[2] >= 2**31:
                    ofs64_count += 1

        # Length: header + fan-out + shas-and-crcs + overflow-offsets
        index_len = 8 + (4 * 256) + (28 * self.count) + (8 * ofs64_count)
        idx_map = None
        idx_f = open(filename, 'w+b')
        try:
            idx_f.truncate(index_len)
            fdatasync(idx_f.fileno())
            idx_map = mmap_readwrite(idx_f, close=False)
            try:
                count = _helpers.write_idx(filename, idx_map, idx, self.count)
                assert(count == self.count)
                idx_map.flush()
            finally:
                idx_map.close()
        finally:
            idx_f.close()

        idx_f = open(filename, 'a+b')
        try:
            idx_f.write(packbin)
            idx_f.seek(0)
            idx_sum = Sha1()
            b = idx_f.read(8 + 4*256)
            idx_sum.update(b)

            obj_list_sum = Sha1()
            for b in chunkyreader(idx_f, 20*self.count):
                idx_sum.update(b)
                obj_list_sum.update(b)
            namebase = obj_list_sum.hexdigest()

            for b in chunkyreader(idx_f):
                idx_sum.update(b)
            idx_f.write(idx_sum.digest())
            fdatasync(idx_f.fileno())
            return namebase
        finally:
            idx_f.close()
Ejemplo n.º 7
0
def write_file_content_sparsely(fullname, n):
    outfd = os.open(fullname, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o600)
    try:
        trailing_zeros = 0;
        for b in chunkyreader(n.open()):
            trailing_zeros = write_sparsely(outfd, b, 512, trailing_zeros)
        pos = os.lseek(outfd, trailing_zeros, os.SEEK_END)
        os.ftruncate(outfd, pos)
    finally:
        os.close(outfd)
Ejemplo n.º 8
0
def main(argv):
    o = options.Options(optspec)
    opt, flags, extra = o.parse_bytes(argv[1:])

    git.check_repo_or_die()

    if not extra:
        o.fatal('must specify a target')
    if len(extra) > 1:
        o.fatal('only one target file allowed')
    if opt.bupm and opt.meta:
        o.fatal('--meta and --bupm are incompatible')

    target = argv_bytes(extra[0])

    if not re.match(br'/*[^/]+/[^/]+', target):
        o.fatal("path %r doesn't include a branch and revision" % target)

    with LocalRepo() as repo:
        resolved = vfs.resolve(repo, target, follow=False)
        leaf_name, leaf_item = resolved[-1]
        if not leaf_item:
            log('error: cannot access %r in %r\n' %
                (b'/'.join(name for name, item in resolved), target))
            sys.exit(1)

        mode = vfs.item_mode(leaf_item)

        sys.stdout.flush()
        out = byte_stream(sys.stdout)

        if opt.bupm:
            if not stat.S_ISDIR(mode):
                o.fatal('%r is not a directory' % target)
            _, bupm_oid = vfs.tree_data_and_bupm(repo, leaf_item.oid)
            if bupm_oid:
                with vfs.tree_data_reader(repo, bupm_oid) as meta_stream:
                    out.write(meta_stream.read())
        elif opt.meta:
            augmented = vfs.augment_item_meta(repo,
                                              leaf_item,
                                              include_size=True)
            out.write(augmented.meta.encode())
        else:
            if stat.S_ISREG(mode):
                with vfs.fopen(repo, leaf_item) as f:
                    for b in chunkyreader(f):
                        out.write(b)
            else:
                o.fatal('%r is not a plain file' % target)

    if saved_errors:
        log('warning: %d errors encountered\n' % len(saved_errors))
        sys.exit(1)
Ejemplo n.º 9
0
def write_file_content_sparsely(repo, dest_path, vfs_file):
    with vfs.fopen(repo, vfs_file) as inf:
        outfd = os.open(dest_path, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o600)
        try:
            trailing_zeros = 0;
            for b in chunkyreader(inf):
                trailing_zeros = write_sparsely(outfd, b, 512, trailing_zeros)
            pos = os.lseek(outfd, trailing_zeros, os.SEEK_END)
            os.ftruncate(outfd, pos)
        finally:
            os.close(outfd)
Ejemplo n.º 10
0
def write_file_content_sparsely(repo, dest_path, vfs_file):
    with vfs.fopen(repo, vfs_file) as inf:
        outfd = os.open(dest_path, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o600)
        try:
            trailing_zeros = 0;
            for b in chunkyreader(inf):
                trailing_zeros = write_sparsely(outfd, b, 512, trailing_zeros)
            pos = os.lseek(outfd, trailing_zeros, os.SEEK_END)
            os.ftruncate(outfd, pos)
        finally:
            os.close(outfd)
Ejemplo n.º 11
0
def quick_verify(base):
    f = open(base + '.pack', 'rb')
    f.seek(-20, 2)
    wantsum = f.read(20)
    assert(len(wantsum) == 20)
    f.seek(0)
    sum = Sha1()
    for b in chunkyreader(f, os.fstat(f.fileno()).st_size - 20):
        sum.update(b)
    if sum.digest() != wantsum:
        raise ValueError('expected %r, got %r' % (wantsum.encode('hex'),
                                                  sum.hexdigest()))
Ejemplo n.º 12
0
def quick_verify(base):
    f = open(base + b'.pack', 'rb')
    f.seek(-20, 2)
    wantsum = f.read(20)
    assert (len(wantsum) == 20)
    f.seek(0)
    sum = Sha1()
    for b in chunkyreader(f, os.fstat(f.fileno()).st_size - 20):
        sum.update(b)
    if sum.digest() != wantsum:
        raise ValueError('expected %r, got %r' %
                         (hexlify(wantsum), sum.hexdigest()))
Ejemplo n.º 13
0
    def _get_file(self, repo, path, resolved):
        """Process a request on a file.

        Return value is either a file object, or None (indicating an error).
        In either case, the headers are sent.
        """
        file_item = resolved[-1][1]
        file_item = vfs.augment_item_meta(repo, file_item, include_size=True)
        meta = file_item.meta
        ctype = self._guess_type(path)
        self.set_header("Last-Modified", http_date_from_utc_ns(meta.mtime))
        self.set_header("Content-Type", ctype)

        self.set_header("Content-Length", str(meta.size))
        assert len(file_item.oid) == 20
        self.set_header("Etag", hexlify(file_item.oid))
        if self.request.method != 'HEAD':
            with vfs.fopen(self.repo, file_item) as f:
                it = chunkyreader(f)
                for blob in chunkyreader(f):
                    self.write(blob)
        raise gen.Return()
Ejemplo n.º 14
0
    def _get_file(self, path, n):
        """Process a request on a file.

        Return value is either a file object, or None (indicating an error).
        In either case, the headers are sent.
        """
        ctype = self._guess_type(path)
        self.set_header("Last-Modified", self.date_time_string(n.mtime))
        self.set_header("Content-Type", ctype)
        size = n.size()
        self.set_header("Content-Length", str(size))
        assert(len(n.hash) == 20)
        self.set_header("Etag", n.hash.encode('hex'))
        if self.request.method != 'HEAD':
            f = n.open()
            try:
                it = chunkyreader(f)
                for blob in chunkyreader(f):
                    self.write(blob)
            finally:
                f.close()
        raise gen.Return()
Ejemplo n.º 15
0
Archivo: web-cmd.py Proyecto: bup/bup
    def _get_file(self, repo, path, resolved):
        """Process a request on a file.

        Return value is either a file object, or None (indicating an error).
        In either case, the headers are sent.
        """
        file_item = resolved[-1][1]
        file_item = vfs.augment_item_meta(repo, file_item, include_size=True)
        meta = file_item.meta
        ctype = self._guess_type(path)
        self.set_header("Last-Modified", http_date_from_utc_ns(meta.mtime))
        self.set_header("Content-Type", ctype)
        
        self.set_header("Content-Length", str(meta.size))
        assert len(file_item.oid) == 20
        self.set_header("Etag", file_item.oid.encode('hex'))
        if self.request.method != 'HEAD':
            with vfs.fopen(self.repo, file_item) as f:
                it = chunkyreader(f)
                for blob in chunkyreader(f):
                    self.write(blob)
        raise gen.Return()
Ejemplo n.º 16
0
Archivo: git.py Proyecto: wwiowac/bup
    def _slow_get(self, id):
        assert (id.find('\n') < 0)
        assert (id.find('\r') < 0)
        assert (id[0] != '-')
        type = _git_capture(['git', 'cat-file', '-t', id]).strip()
        yield type

        p = subprocess.Popen(['git', 'cat-file', type, id],
                             stdout=subprocess.PIPE,
                             preexec_fn=_gitenv(self.repo_dir))
        for blob in chunkyreader(p.stdout):
            yield blob
        _git_wait('git cat-file', p)
Ejemplo n.º 17
0
    def _slow_get(self, id):
        assert(id.find('\n') < 0)
        assert(id.find('\r') < 0)
        assert(id[0] != '-')
        type = _git_capture(['git', 'cat-file', '-t', id]).strip()
        yield type

        p = subprocess.Popen(['git', 'cat-file', type, id],
                             stdout=subprocess.PIPE,
                             preexec_fn = _gitenv(self.repo_dir))
        for blob in chunkyreader(p.stdout):
            yield blob
        _git_wait('git cat-file', p)
Ejemplo n.º 18
0
Archivo: git.py Proyecto: xeyownt/bup
    def _end(self, run_midx=True):
        f = self.file
        if not f: return None
        self.file = None
        try:
            self.objcache = None
            idx = self.idx
            self.idx = None

            # update object count
            f.seek(8)
            cp = struct.pack('!i', self.count)
            assert (len(cp) == 4)
            f.write(cp)

            # calculate the pack sha1sum
            f.seek(0)
            sum = Sha1()
            for b in chunkyreader(f):
                sum.update(b)
            packbin = sum.digest()
            f.write(packbin)
            fdatasync(f.fileno())
        finally:
            f.close()

        obj_list_sha = self._write_pack_idx_v2(self.filename + '.idx', idx,
                                               packbin)

        nameprefix = repo('objects/pack/pack-%s' % obj_list_sha)
        if os.path.exists(self.filename + '.map'):
            os.unlink(self.filename + '.map')
        os.rename(self.filename + '.pack', nameprefix + '.pack')
        os.rename(self.filename + '.idx', nameprefix + '.idx')
        try:
            os.fsync(self.parentfd)
        except OSError as e:
            # Ignore EINVAL (*only*) since some fs don't support this (e.g. cifs).
            if e.errno != errno.EINVAL:
                raise
        finally:
            os.close(self.parentfd)

        if run_midx:
            auto_midx(repo('objects/pack'))

        if self.on_pack_finish:
            self.on_pack_finish(nameprefix)

        return nameprefix
Ejemplo n.º 19
0
    def _end(self, run_midx=True):
        f = self.file
        if not f: return None
        self.file = None
        try:
            self.objcache = None
            idx = self.idx
            self.idx = None

            # update object count
            f.seek(8)
            cp = struct.pack('!i', self.count)
            assert (len(cp) == 4)
            f.write(cp)

            # calculate the pack sha1sum
            f.seek(0)
            sum = Sha1()
            for b in chunkyreader(f):
                sum.update(b)
            packbin = sum.digest()
            f.write(packbin)
            fdatasync(f.fileno())
        finally:
            f.close()

        obj_list_sha = self._write_pack_idx_v2(self.filename + '.idx', idx,
                                               packbin)
        nameprefix = os.path.join(self.repo_dir,
                                  'objects/pack/pack-' + obj_list_sha)
        if os.path.exists(self.filename + '.map'):
            os.unlink(self.filename + '.map')
        os.rename(self.filename + '.pack', nameprefix + '.pack')
        os.rename(self.filename + '.idx', nameprefix + '.idx')
        try:
            os.fsync(self.parentfd)
        except:
            pass
        finally:
            os.close(self.parentfd)

        if run_midx:
            auto_midx(os.path.join(self.repo_dir, 'objects/pack'))

        if self.on_pack_finish:
            self.on_pack_finish(nameprefix)

        return nameprefix
Ejemplo n.º 20
0
    def _end(self, run_midx=True):
        f = self.file
        if not f: return None
        self.file = None
        try:
            self.objcache = None
            idx = self.idx
            self.idx = None

            # update object count
            f.seek(8)
            cp = struct.pack('!i', self.count)
            assert(len(cp) == 4)
            f.write(cp)

            # calculate the pack sha1sum
            f.seek(0)
            sum = Sha1()
            for b in chunkyreader(f):
                sum.update(b)
            packbin = sum.digest()
            f.write(packbin)
            fdatasync(f.fileno())
        finally:
            f.close()

        obj_list_sha = self._write_pack_idx_v2(self.filename + '.idx', idx, packbin)

        nameprefix = repo('objects/pack/pack-%s' % obj_list_sha)
        if os.path.exists(self.filename + '.map'):
            os.unlink(self.filename + '.map')
        os.rename(self.filename + '.pack', nameprefix + '.pack')
        os.rename(self.filename + '.idx', nameprefix + '.idx')
        try:
            os.fsync(self.parentfd)
        finally:
            os.close(self.parentfd)

        if run_midx:
            auto_midx(repo('objects/pack'))

        if self.on_pack_finish:
            self.on_pack_finish(nameprefix)

        return nameprefix
Ejemplo n.º 21
0
 def sync_index(self, name):
     #debug1('requesting %r\n' % name)
     self.check_busy()
     mkdirp(self.cachedir)
     fn = os.path.join(self.cachedir, name)
     if os.path.exists(fn):
         msg = "won't request existing .idx, try `bup bloom --check %s`" % fn
         raise ClientError(msg)
     self.conn.write('send-index %s\n' % name)
     n = struct.unpack('!I', self.conn.read(4))[0]
     assert(n)
     with atomically_replaced_file(fn, 'w') as f:
         count = 0
         for b in chunkyreader(self.conn, n):
             f.write(b)
             count += len(b)
             qprogress('Receiving index from server: %d/%d\r' % (count, n))
         self.check_ok()
Ejemplo n.º 22
0
    def send_index(self, name, f, send_size):
        self._require_command(b'send-index')
        #debug1('requesting %r\n' % name)
        self.check_busy()
        self.conn.write(b'send-index %s\n' % name)
        n = struct.unpack('!I', self.conn.read(4))[0]
        assert(n)

        send_size(n)

        count = 0
        progress('Receiving index from server: %d/%d\r' % (count, n))
        for b in chunkyreader(self.conn, n):
            f.write(b)
            count += len(b)
            qprogress('Receiving index from server: %d/%d\r' % (count, n))
        progress('Receiving index from server: %d/%d, done.\n' % (count, n))
        self.check_ok()
Ejemplo n.º 23
0
    def get(self, id, size=False):
        """Yield the object type, and then an iterator over the data referred
        to by the id ref.  If size is true, yield (obj_type, obj_size)
        instead of just the type.

        """
        if not self.p or self.p.poll() != None:
            self.restart()
        assert (self.p)
        poll_result = self.p.poll()
        assert (poll_result == None)
        if self.inprogress:
            log('get: opening %r while %r is open\n' % (id, self.inprogress))
        assert (not self.inprogress)
        assert (id.find('\n') < 0)
        assert (id.find('\r') < 0)
        assert (not id.startswith('-'))
        self.inprogress = id
        self.p.stdin.write('%s\n' % id)
        self.p.stdin.flush()
        hdr = self.p.stdout.readline()
        if hdr.endswith(' missing\n'):
            self.inprogress = None
            raise MissingObject(id.decode('hex'))
        spl = hdr.split(' ')
        if len(spl) != 3 or len(spl[0]) != 40:
            raise GitError('expected blob, got %r' % spl)
        hex, typ, sz = spl
        sz = int(sz)
        it = _AbortableIter(chunkyreader(self.p.stdout, sz),
                            onabort=self._abort)
        try:
            if size:
                yield typ, sz
            else:
                yield typ
            for blob in it:
                yield blob
            readline_result = self.p.stdout.readline()
            assert (readline_result == '\n')
            self.inprogress = None
        except Exception as e:
            it.abort()
            raise
Ejemplo n.º 24
0
Archivo: git.py Proyecto: xx4h/bup
    def get(self, id, size=False):
        """Yield the object type, and then an iterator over the data referred
        to by the id ref.  If size is true, yield (obj_type, obj_size)
        instead of just the type.

        """
        if not self.p or self.p.poll() != None:
            self.restart()
        assert(self.p)
        poll_result = self.p.poll()
        assert(poll_result == None)
        if self.inprogress:
            log('get: opening %r while %r is open\n' % (id, self.inprogress))
        assert(not self.inprogress)
        assert(id.find('\n') < 0)
        assert(id.find('\r') < 0)
        assert(not id.startswith('-'))
        self.inprogress = id
        self.p.stdin.write('%s\n' % id)
        self.p.stdin.flush()
        hdr = self.p.stdout.readline()
        if hdr.endswith(' missing\n'):
            self.inprogress = None
            raise MissingObject(id.decode('hex'))
        spl = hdr.split(' ')
        if len(spl) != 3 or len(spl[0]) != 40:
            raise GitError('expected blob, got %r' % spl)
        hex, typ, sz = spl
        sz = int(sz)
        it = _AbortableIter(chunkyreader(self.p.stdout, sz),
                            onabort=self._abort)
        try:
            if size:
                yield typ, sz
            else:
                yield typ
            for blob in it:
                yield blob
            readline_result = self.p.stdout.readline()
            assert(readline_result == '\n')
            self.inprogress = None
        except Exception as e:
            it.abort()
            raise
Ejemplo n.º 25
0
Archivo: git.py Proyecto: bup/bup
    def get(self, ref):
        """Yield (oidx, type, size), followed by the data referred to by ref.
        If ref does not exist, only yield (None, None, None).

        """
        if not self.p or self.p.poll() != None:
            self.restart()
        assert(self.p)
        poll_result = self.p.poll()
        assert(poll_result == None)
        if self.inprogress:
            log('get: opening %r while %r is open\n' % (ref, self.inprogress))
        assert(not self.inprogress)
        assert(ref.find('\n') < 0)
        assert(ref.find('\r') < 0)
        assert(not ref.startswith('-'))
        self.inprogress = ref
        self.p.stdin.write('%s\n' % ref)
        self.p.stdin.flush()
        hdr = self.p.stdout.readline()
        if hdr.endswith(' missing\n'):
            self.inprogress = None
            yield None, None, None
            return
        info = hdr.split(' ')
        if len(info) != 3 or len(info[0]) != 40:
            raise GitError('expected object (id, type, size), got %r' % info)
        oidx, typ, size = info
        size = int(size)
        it = _AbortableIter(chunkyreader(self.p.stdout, size),
                            onabort=self._abort)
        try:
            yield oidx, typ, size
            for blob in it:
                yield blob
            readline_result = self.p.stdout.readline()
            assert(readline_result == '\n')
            self.inprogress = None
        except Exception as e:
            it.abort()
            raise
Ejemplo n.º 26
0
Archivo: git.py Proyecto: zzmjohn/bup
    def get(self, ref):
        """Yield (oidx, type, size), followed by the data referred to by ref.
        If ref does not exist, only yield (None, None, None).

        """
        if not self.p or self.p.poll() != None:
            self.restart()
        assert (self.p)
        poll_result = self.p.poll()
        assert (poll_result == None)
        if self.inprogress:
            log('get: opening %r while %r is open\n' % (ref, self.inprogress))
        assert (not self.inprogress)
        assert ref.find(b'\n') < 0
        assert ref.find(b'\r') < 0
        assert not ref.startswith(b'-')
        self.inprogress = ref
        self.p.stdin.write(ref + b'\n')
        self.p.stdin.flush()
        hdr = self.p.stdout.readline()
        if hdr.endswith(b' missing\n'):
            self.inprogress = None
            yield None, None, None
            return
        info = hdr.split(b' ')
        if len(info) != 3 or len(info[0]) != 40:
            raise GitError('expected object (id, type, size), got %r' % info)
        oidx, typ, size = info
        size = int(size)
        it = _AbortableIter(chunkyreader(self.p.stdout, size),
                            onabort=self._abort)
        try:
            yield oidx, typ, size
            for blob in it:
                yield blob
            readline_result = self.p.stdout.readline()
            assert readline_result == b'\n'
            self.inprogress = None
        except Exception as e:
            it.abort()
            raise
Ejemplo n.º 27
0
 def sync_index(self, name):
     #debug1('requesting %r\n' % name)
     self.check_busy()
     mkdirp(self.cachedir)
     fn = os.path.join(self.cachedir, name)
     if os.path.exists(fn):
         msg = "won't request existing .idx, try `bup bloom --check %s`" % fn
         raise ClientError(msg)
     self.conn.write('send-index %s\n' % name)
     n = struct.unpack('!I', self.conn.read(4))[0]
     assert (n)
     with atomically_replaced_file(fn, 'w') as f:
         count = 0
         progress('Receiving index from server: %d/%d\r' % (count, n))
         for b in chunkyreader(self.conn, n):
             f.write(b)
             count += len(b)
             qprogress('Receiving index from server: %d/%d\r' % (count, n))
         progress('Receiving index from server: %d/%d, done.\n' %
                  (count, n))
         self.check_ok()
Ejemplo n.º 28
0
Archivo: git.py Proyecto: wwiowac/bup
    def _fast_get(self, id):
        if not self.p or self.p.poll() != None:
            self.restart()
        assert (self.p)
        poll_result = self.p.poll()
        assert (poll_result == None)
        if self.inprogress:
            log('_fast_get: opening %r while %r is open\n' %
                (id, self.inprogress))
        assert (not self.inprogress)
        assert (id.find('\n') < 0)
        assert (id.find('\r') < 0)
        assert (not id.startswith('-'))
        self.inprogress = id
        self.p.stdin.write('%s\n' % id)
        self.p.stdin.flush()
        hdr = self.p.stdout.readline()
        if hdr.endswith(' missing\n'):
            self.inprogress = None
            raise MissingObject(id.decode('hex'))
        spl = hdr.split(' ')
        if len(spl) != 3 or len(spl[0]) != 40:
            raise GitError('expected blob, got %r' % spl)
        (hex, type, size) = spl

        it = _AbortableIter(chunkyreader(self.p.stdout, int(spl[2])),
                            onabort=self._abort)
        try:
            yield type
            for blob in it:
                yield blob
            readline_result = self.p.stdout.readline()
            assert (readline_result == '\n')
            self.inprogress = None
        except Exception as e:
            it.abort()
            raise
Ejemplo n.º 29
0
    def _fast_get(self, id):
        if not self.p or self.p.poll() != None:
            self._restart()
        assert(self.p)
        poll_result = self.p.poll()
        assert(poll_result == None)
        if self.inprogress:
            log('_fast_get: opening %r while %r is open\n'
                % (id, self.inprogress))
        assert(not self.inprogress)
        assert(id.find('\n') < 0)
        assert(id.find('\r') < 0)
        assert(not id.startswith('-'))
        self.inprogress = id
        self.p.stdin.write('%s\n' % id)
        self.p.stdin.flush()
        hdr = self.p.stdout.readline()
        if hdr.endswith(' missing\n'):
            self.inprogress = None
            raise MissingObject(id.decode('hex'))
        spl = hdr.split(' ')
        if len(spl) != 3 or len(spl[0]) != 40:
            raise GitError('expected blob, got %r' % spl)
        (hex, type, size) = spl

        it = _AbortableIter(chunkyreader(self.p.stdout, int(spl[2])),
                           onabort = self._abort)
        try:
            yield type
            for blob in it:
                yield blob
            readline_result = self.p.stdout.readline()
            assert(readline_result == '\n')
            self.inprogress = None
        except Exception as e:
            it.abort()
            raise
Ejemplo n.º 30
0
try:
    n = top.lresolve(target)
except vfs.NodeError as e:
    o.fatal(e)

if isinstance(n, vfs.FakeSymlink):
    # Source is actually /foo/what, i.e. a top-level commit
    # like /foo/latest, which is a symlink to ../.commit/SHA.
    # So dereference it.
    target = n.dereference()

if opt.bupm:
    if not stat.S_ISDIR(n.mode):
        o.fatal('%r is not a directory' % target)
    mfile = n.metadata_file()  # VFS file -- cannot close().
    if mfile:
        meta_stream = mfile.open()
        sys.stdout.write(meta_stream.read())
elif opt.meta:
    sys.stdout.write(n.metadata().encode())
else:
    if stat.S_ISREG(n.mode):
        for b in chunkyreader(n.open()):
            sys.stdout.write(b)
    else:
        o.fatal('%r is not a plain file' % target)

if saved_errors:
    log('warning: %d errors encountered\n' % len(saved_errors))
    sys.exit(1)
Ejemplo n.º 31
0
def write_file_content(repo, dest_path, vfs_file):
    with vfs.fopen(repo, vfs_file) as inf:
        with open(dest_path, 'wb') as outf:
            for b in chunkyreader(inf):
                outf.write(b)
Ejemplo n.º 32
0
def write_to_file(inf, outf):
    for blob in chunkyreader(inf):
        outf.write(blob)
Ejemplo n.º 33
0
resolved = vfs.resolve(repo, target, follow=False)
leaf_name, leaf_item = resolved[-1]
if not leaf_item:
    log('error: cannot access %r in %r\n'
        % ('/'.join(name for name, item in resolved), path))
    sys.exit(1)

mode = vfs.item_mode(leaf_item)

if opt.bupm:
    if not stat.S_ISDIR(mode):
        o.fatal('%r is not a directory' % target)
    _, bupm_oid = vfs.tree_data_and_bupm(repo, leaf_item.oid)
    if bupm_oid:
        with vfs.tree_data_reader(repo, bupm_oid) as meta_stream:
            sys.stdout.write(meta_stream.read())
elif opt.meta:
    augmented = vfs.augment_item_meta(repo, leaf_item, include_size=True)
    sys.stdout.write(augmented.meta.encode())
else:
    if stat.S_ISREG(mode):
        with vfs.fopen(repo, leaf_item) as f:
            for b in chunkyreader(f):
                sys.stdout.write(b)
    else:
        o.fatal('%r is not a plain file' % target)

if saved_errors:
    log('warning: %d errors encountered\n' % len(saved_errors))
    sys.exit(1)
Ejemplo n.º 34
0
def write_file_content(repo, dest_path, vfs_file):
    with vfs.fopen(repo, vfs_file) as inf:
        with open(dest_path, 'wb') as outf:
            for b in chunkyreader(inf):
                outf.write(b)
Ejemplo n.º 35
0
try:
    n = top.lresolve(target)
except vfs.NodeError as e:
    o.fatal(e)

if isinstance(n, vfs.FakeSymlink):
    # Source is actually /foo/what, i.e. a top-level commit
    # like /foo/latest, which is a symlink to ../.commit/SHA.
    # So dereference it.
    target = n.dereference()

if opt.bupm:
    if not stat.S_ISDIR(n.mode):
        o.fatal('%r is not a directory' % target)
    mfile = n.metadata_file() # VFS file -- cannot close().
    if mfile:
        meta_stream = mfile.open()
        sys.stdout.write(meta_stream.read())
elif opt.meta:
    sys.stdout.write(n.metadata().encode())
else:
    if stat.S_ISREG(n.mode):
        for b in chunkyreader(n.open()):
            sys.stdout.write(b)
    else:
        o.fatal('%r is not a plain file' % target)

if saved_errors:
    log('warning: %d errors encountered\n' % len(saved_errors))
    sys.exit(1)
Ejemplo n.º 36
0
resolved = vfs.lresolve(repo, target)
leaf_name, leaf_item = resolved[-1]
if not leaf_item:
    log('error: cannot access %r in %r\n' %
        ('/'.join(name for name, item in resolved), path))
    sys.exit(1)

mode = vfs.item_mode(leaf_item)

if opt.bupm:
    if not stat.S_ISDIR(mode):
        o.fatal('%r is not a directory' % target)
    _, bupm_oid = vfs.tree_data_and_bupm(repo, leaf_item.oid)
    if bupm_oid:
        with vfs.tree_data_reader(repo, bupm_oid) as meta_stream:
            sys.stdout.write(meta_stream.read())
elif opt.meta:
    augmented = vfs.augment_item_meta(repo, leaf_item, include_size=True)
    sys.stdout.write(augmented.meta.encode())
else:
    if stat.S_ISREG(mode):
        with vfs.fopen(repo, leaf_item) as f:
            for b in chunkyreader(f):
                sys.stdout.write(b)
    else:
        o.fatal('%r is not a plain file' % target)

if saved_errors:
    log('warning: %d errors encountered\n' % len(saved_errors))
    sys.exit(1)
Ejemplo n.º 37
0
Archivo: ftp-cmd.py Proyecto: bup/bup
def write_to_file(inf, outf):
    for blob in chunkyreader(inf):
        outf.write(blob)
Ejemplo n.º 38
0
def pump(src, tgt, prg=lambda i,n: i):
    for blob in helpers.chunkyreader(src):
        tgt.write(blob); prg(len(blob))
    src.close(); tgt.close()