Exemplo n.º 1
0
def main(argv):
    o = options.Options(optspec)
    opt, flags, extra = o.parse_bytes(argv[1:])

    if extra:
        o.fatal('no arguments expected')

    debug2('bup server: reading from stdin.\n')

    # FIXME: this protocol is totally lame and not at all future-proof.
    # (Especially since we abort completely as soon as *anything* bad happens)
    sys.stdout.flush()
    conn = Conn(byte_stream(sys.stdin), byte_stream(sys.stdout))
    lr = linereader(conn)
    for _line in lr:
        line = _line.strip()
        if not line:
            continue
        debug1('bup server: command: %r\n' % line)
        words = line.split(b' ', 1)
        cmd = words[0]
        rest = len(words)>1 and words[1] or b''
        if cmd == b'quit':
            break
        else:
            cmd = commands.get(cmd)
            if cmd:
                cmd(conn, rest)
            else:
                raise Exception('unknown server command: %r\n' % line)

    debug1('bup server: done\n')
Exemplo n.º 2
0
def main(argv):
    o = options.Options(optspec)
    opt, flags, extra = o.parse_bytes(argv[1:])
    if opt.remote:
        opt.remote = argv_bytes(opt.remote)

    git.check_repo_or_die()

    stdin = byte_stream(sys.stdin)

    if not extra:
        extra = linereader(stdin)

    ret = 0
    repo = RemoteRepo(opt.remote) if opt.remote else LocalRepo()

    if opt.o:
        outfile = open(opt.o, 'wb')
    else:
        sys.stdout.flush()
        outfile = byte_stream(sys.stdout)

    for ref in [argv_bytes(x) for x in extra]:
        try:
            for blob in repo.join(ref):
                outfile.write(blob)
        except KeyError as e:
            outfile.flush()
            log('error: %s\n' % e)
            ret = 1

    sys.exit(ret)
Exemplo n.º 3
0
Arquivo: server.py Projeto: jmberg/bup
def main(argv):
    o = options.Options(optspec)
    opt, flags, extra = o.parse_bytes(argv[1:])

    if extra:
        o.fatal('no arguments expected')

    debug2('bup server: reading from stdin.\n')

    class ServerRepo(LocalRepo):
        def __init__(self, repo_dir):
            if opt.force_repo:
                repo_dir = None
            LocalRepo.__init__(self, repo_dir)

    def _restrict(server, commands):
        for fn in dir(server):
            if getattr(fn, 'bup_server_command', False):
                if not fn in commands:
                    del cls.fn

    modes = ['unrestricted', 'append', 'read-append', 'read']
    if opt.mode is not None and opt.mode not in modes:
        o.fatal("server: invalid mode")

    BupProtocolServer(Conn(byte_stream(sys.stdin), byte_stream(sys.stdout)),
                      ServerRepo, mode=opt.mode).handle()
Exemplo n.º 4
0
def main(argv):
    o = options.Options(optspec)
    opt, flags, extra = o.parse_bytes(argv[1:])
    opt.dir = argv_bytes(opt.dir) if opt.dir else None
    opt.output = argv_bytes(opt.output) if opt.output else None

    if extra and (opt.auto or opt.force):
        o.fatal("you can't use -f/-a and also provide filenames")
    if opt.check and (not extra and not opt.auto):
        o.fatal("if using --check, you must provide filenames or -a")

    git.check_repo_or_die()

    if opt.max_files < 0:
        opt.max_files = max_files()
    assert (opt.max_files >= 5)

    extra = [argv_bytes(x) for x in extra]

    if opt.check:
        # check existing midx files
        if extra:
            midxes = extra
        else:
            path = opt.dir or git.repo(b'objects/pack')
            debug1('midx: scanning %s\n' % path)
            midxes = glob.glob(os.path.join(path, b'*.midx'))
        for name in midxes:
            check_midx(name)
        if not saved_errors:
            log('All tests passed.\n')
    else:
        if extra:
            sys.stdout.flush()
            do_midx(git.repo(b'objects/pack'),
                    opt.output,
                    extra,
                    b'',
                    byte_stream(sys.stdout),
                    auto=opt.auto,
                    force=opt.force,
                    print_names=opt.print)
        elif opt.auto or opt.force:
            sys.stdout.flush()
            path = opt.dir or git.repo(b'objects/pack')
            debug1('midx: scanning %s\n' % path_msg(path))
            do_midx_dir(path,
                        opt.output,
                        byte_stream(sys.stdout),
                        auto=opt.auto,
                        force=opt.force,
                        max_files=opt.max_files)
        else:
            o.fatal("you must use -f or -a or provide input filenames")

    if saved_errors:
        log('WARNING: %d errors encountered.\n' % len(saved_errors))
        sys.exit(1)
Exemplo n.º 5
0
def ex(cmd, **kwargs):
    """Print cmd to stderr and then run it as per ex(...).
    Print the subprocess stderr to stderr if stderr=PIPE and there's
    any data.
    """
    logcmd(cmd)
    result = run(cmd, **kwargs)
    if result.err:
        sys.stderr.flush()
        byte_stream(sys.stderr).write(result.err)
    return result
Exemplo n.º 6
0
def main(argv):
    global repo

    o = options.Options(optspec)
    opt, flags, extra = o.parse_bytes(argv[1:])

    git.check_repo_or_die()
    sys.stdout.flush()
    out = byte_stream(sys.stdout)
    stdin = byte_stream(sys.stdin)
    with LocalRepo() as repo:
        present_interface(stdin, out, extra, repo)
    if saved_errors:
        log('warning: %d errors encountered\n' % len(saved_errors))
        sys.exit(1)
Exemplo n.º 7
0
def main(argv):
    o = options.Options(optspec)
    opt, flags, extra = o.parse_bytes(argv[1:])

    git.check_repo_or_die()

    tags = [t for sublist in git.tags().values() for t in sublist]

    if opt.delete:
        # git.delete_ref() doesn't complain if a ref doesn't exist.  We
        # could implement this verification but we'd need to read in the
        # contents of the tag file and pass the hash, and we already know
        # about the tag's existance via "tags".
        tag_name = argv_bytes(opt.delete)
        if not opt.force and tag_name not in tags:
            log("error: tag '%s' doesn't exist\n" % path_msg(tag_name))
            sys.exit(1)
        tag_file = b'refs/tags/%s' % tag_name
        git.delete_ref(tag_file)
        sys.exit(0)

    if not extra:
        for t in tags:
            sys.stdout.flush()
            out = byte_stream(sys.stdout)
            out.write(t)
            out.write(b'\n')
        sys.exit(0)
    elif len(extra) != 2:
        o.fatal('expected commit ref and hash')

    tag_name, commit = map(argv_bytes, extra[:2])
    if not tag_name:
        o.fatal("tag name must not be empty.")
    debug1("args: tag name = %s; commit = %s\n" %
           (path_msg(tag_name), commit.decode('ascii')))

    if tag_name in tags and not opt.force:
        log("bup: error: tag '%s' already exists\n" % path_msg(tag_name))
        sys.exit(1)

    if tag_name.startswith(b'.'):
        o.fatal("'%s' is not a valid tag name." % path_msg(tag_name))

    try:
        hash = git.rev_parse(commit)
    except git.GitError as e:
        log("bup: error: %s" % e)
        sys.exit(2)

    if not hash:
        log("bup: error: commit %s not found.\n" % commit.decode('ascii'))
        sys.exit(2)

    with git.PackIdxList(git.repo(b'objects/pack')) as pL:
        if not pL.exists(hash):
            log("bup: error: commit %s not found.\n" % commit.decode('ascii'))
            sys.exit(2)

    git.update_ref(b'refs/tags/' + tag_name, hash, None, force=True)
Exemplo n.º 8
0
def check_prune_result(expected):
    actual = sorted([int(x)
                     for x in exo([b'git', b'log',
                                   b'--pretty=format:%at']).out.splitlines()])

    if expected != actual:
        for x in expected:
            print('ex:', x, strftime('%Y-%m-%d-%H%M%S', localtime(x)),
                  file=stderr)
        for line in diff_bytes(unified_diff,
                               [result_diffline(x) for x in expected],
                               [result_diffline(x) for x in actual],
                               fromfile=b'expected', tofile=b'actual'):
            sys.stderr.flush()
            byte_stream(sys.stderr).write(line)
    wvpass(expected == actual)
Exemplo n.º 9
0
def main(argv):
    o = options.Options(optspec)
    opt, flags, extra = o.parse_bytes(argv[1:])

    if extra:
        o.fatal("no arguments expected")

    git.check_repo_or_die()

    with git.PackIdxList(git.repo(b'objects/pack'),
                         ignore_midx=opt.ignore_midx) as mi:

        def do_predict(ix, out):
            total = len(ix)
            maxdiff = 0
            for count,i in enumerate(ix):
                prefix = struct.unpack('!Q', i[:8])[0]
                expected = prefix * total // (1 << 64)
                diff = count - expected
                maxdiff = max(maxdiff, abs(diff))
            out.write(b'%d of %d (%.3f%%) '
                      % (maxdiff, len(ix), maxdiff * 100.0 / len(ix)))
            out.flush()
            assert(count+1 == len(ix))

        sys.stdout.flush()
        out = byte_stream(sys.stdout)

        if opt.predict:
            if opt.ignore_midx:
                for pack in mi.packs:
                    do_predict(pack, out)
            else:
                do_predict(mi, out)
        else:
            # default mode: find longest matching prefix
            last = b'\0'*20
            longmatch = 0
            for i in mi:
                if i == last:
                    continue
                #assert(str(i) >= last)
                pm = _helpers.bitmatch(last, i)
                longmatch = max(longmatch, pm)
                last = i
            out.write(b'%d\n' % longmatch)
            log('%d matching prefix bits\n' % longmatch)
            doublings = math.log(len(mi), 2)

        bpd = longmatch / doublings
        log('%.2f bits per doubling\n' % bpd)
        remain = 160 - longmatch
        rdoublings = remain / bpd
        log('%d bits (%.2f doublings) remaining\n' % (remain, rdoublings))
        larger = 2**rdoublings
        log('%g times larger is possible\n' % larger)
        perperson = larger/POPULATION_OF_EARTH
        log('\nEveryone on earth could have %d data sets like yours, all in one\n'
            'repository, and we would expect 1 object collision.\n'
            % int(perperson))
Exemplo n.º 10
0
 def __init__(self, infd, outp):
     BaseConn.__init__(self, outp)
     # Anything that comes through before the sync string was not
     # multiplexed and can be assumed to be debug/log before mux init.
     tail = b''
     while tail != b'BUPMUX':
         b = os.read(infd, (len(tail) < 6) and (6-len(tail)) or 1)
         if not b:
             raise IOError('demux: unexpected EOF during initialization')
         tail += b
         byte_stream(sys.stderr).write(tail[:-6])  # pre-mux log messages
         tail = tail[-6:]
     self.infd = infd
     self.reader = None
     self.buf = None
     self.closed = False
Exemplo n.º 11
0
 def _next_packet(self, timeout):
     if self.closed: return False
     rl, wl, xl = select.select([self.infd], [], [], timeout)
     if not rl: return False
     assert(rl[0] == self.infd)
     ns = b''.join(checked_reader(self.infd, 5))
     n, fdw = struct.unpack('!IB', ns)
     assert(n <= MAX_PACKET)
     if fdw == 1:
         self.reader = checked_reader(self.infd, n)
     elif fdw == 2:
         for buf in checked_reader(self.infd, n):
             byte_stream(sys.stderr).write(buf)
     elif fdw == 3:
         self.closed = True
         debug2("DemuxConn: marked closed\n")
     return True
Exemplo n.º 12
0
def main(argv):
    git.check_repo_or_die()

    sys.stdout.flush()
    out = byte_stream(sys.stdout)
    # Check out lib/bup/ls.py for the opt spec
    rc = ls.via_cmdline(argv[1:], out=out)
    sys.exit(rc)
Exemplo n.º 13
0
 def runtest(self):
     cmd = str(self.fspath)
     p = subprocess.Popen(cmd,
                          stdout=subprocess.PIPE,
                          stderr=subprocess.STDOUT)
     out = p.communicate()[0]
     sys.stdout.flush()
     byte_stream(sys.stdout).write(out)
     failures = [
         line for line in out.splitlines()
         if (line.startswith(b'!') and line.lower().endswith(b' failed'))
     ]
     if failures or p.returncode != 0:
         raise BupSubprocFailure(
             '%s failed (exit %d, %d failures)' %
             (cmd, p.returncode, len(failures)), cmd, p.returncode,
             failures)
Exemplo n.º 14
0
def main(argv):
    o = options.Options(optspec)
    opt, flags, extra = o.parse_bytes(argv[1:])

    if extra:
        o.fatal('no arguments expected')

    git.check_repo_or_die()
    sys.stdout.flush()
    out = byte_stream(sys.stdout)

    report(-1, out)
    _helpers.random_sha()
    report(0, out)

    with git.PackIdxList(git.repo(b'objects/pack'),
                         ignore_midx=opt.ignore_midx) as m:

        if opt.existing:

            def foreverit(mi):
                while 1:
                    for e in mi:
                        yield e

            objit = iter(foreverit(m))

        for c in range(opt.cycles):
            for n in range(opt.number):
                if opt.existing:
                    bin = next(objit)
                    assert (m.exists(bin))
                else:
                    bin = _helpers.random_sha()

                    # technically, a randomly generated object id might exist.
                    # but the likelihood of that is the likelihood of finding
                    # a collision in sha-1 by accident, which is so unlikely that
                    # we don't care.
                    assert (not m.exists(bin))
            report((c + 1) * opt.number, out)

    if bloom._total_searches:
        out.write(
            b'bloom: %d objects searched in %d steps: avg %.3f steps/object\n'
            % (bloom._total_searches, bloom._total_steps,
               bloom._total_steps * 1.0 / bloom._total_searches))
    if midx._total_searches:
        out.write(
            b'midx: %d objects searched in %d steps: avg %.3f steps/object\n' %
            (midx._total_searches, midx._total_steps,
             midx._total_steps * 1.0 / midx._total_searches))
    if git._total_searches:
        out.write(
            b'idx: %d objects searched in %d steps: avg %.3f steps/object\n' %
            (git._total_searches, git._total_steps,
             git._total_steps * 1.0 / git._total_searches))
    out.write(b'Total time: %.3fs\n' % (time.time() - start))
Exemplo n.º 15
0
Arquivo: main.py Projeto: gdt/bup
def run_subproc_cmd(args):

    c = (do_profile and [sys.executable, b'-m', b'cProfile'] or []) + args
    if not (fix_stdout or fix_stderr):
        os.execvp(c[0], c)

    sys.stdout.flush()
    sys.stderr.flush()
    out = byte_stream(sys.stdout)
    err = byte_stream(sys.stderr)
    p = None
    try:
        p = subprocess.Popen(c,
                             stdout=PIPE if fix_stdout else out,
                             stderr=PIPE if fix_stderr else err,
                             env=tty_env,
                             bufsize=4096,
                             close_fds=True)
        # Assume p will receive these signals and quit, which will
        # then cause us to quit.
        for sig in (signal.SIGINT, signal.SIGTERM, signal.SIGQUIT):
            signal.signal(sig, signal.SIG_IGN)

        srcs = []
        dests = []
        if fix_stdout:
            srcs.append(p.stdout.fileno())
            dests.append(out.fileno())
        if fix_stderr:
            srcs.append(p.stderr.fileno())
            dests.append(err.fileno())
        filter_output(srcs, dests)
        return p.wait()
    except BaseException as ex:
        add_ex_tb(ex)
        try:
            if p and p.poll() == None:
                os.kill(p.pid, signal.SIGTERM)
                p.wait()
        except BaseException as kill_ex:
            raise add_ex_ctx(add_ex_tb(kill_ex), ex)
        raise ex
Exemplo n.º 16
0
def main(argv):
    o = options.Options(optspec)
    opt, flags, extra = o.parse_bytes(argv[1:])

    git.check_repo_or_die()

    if not extra:
        o.fatal('must specify a target')
    if len(extra) > 1:
        o.fatal('only one target file allowed')
    if opt.bupm and opt.meta:
        o.fatal('--meta and --bupm are incompatible')

    target = argv_bytes(extra[0])

    if not re.match(br'/*[^/]+/[^/]+', target):
        o.fatal("path %r doesn't include a branch and revision" % target)

    with LocalRepo() as repo:
        resolved = vfs.resolve(repo, target, follow=False)
        leaf_name, leaf_item = resolved[-1]
        if not leaf_item:
            log('error: cannot access %r in %r\n' %
                (b'/'.join(name for name, item in resolved), target))
            sys.exit(1)

        mode = vfs.item_mode(leaf_item)

        sys.stdout.flush()
        out = byte_stream(sys.stdout)

        if opt.bupm:
            if not stat.S_ISDIR(mode):
                o.fatal('%r is not a directory' % target)
            _, bupm_oid = vfs.tree_data_and_bupm(repo, leaf_item.oid)
            if bupm_oid:
                with vfs.tree_data_reader(repo, bupm_oid) as meta_stream:
                    out.write(meta_stream.read())
        elif opt.meta:
            augmented = vfs.augment_item_meta(repo,
                                              leaf_item,
                                              include_size=True)
            out.write(augmented.meta.encode())
        else:
            if stat.S_ISREG(mode):
                with vfs.fopen(repo, leaf_item) as f:
                    for b in chunkyreader(f):
                        out.write(b)
            else:
                o.fatal('%r is not a plain file' % target)

    if saved_errors:
        log('warning: %d errors encountered\n' % len(saved_errors))
        sys.exit(1)
Exemplo n.º 17
0
 def runtest(self):
     cmd = str(self.fspath)
     p = subprocess.Popen(cmd,
                          stdout=subprocess.PIPE,
                          stderr=subprocess.STDOUT)
     out = p.communicate()[0]
     sys.stdout.flush()
     byte_stream(sys.stdout).write(out)
     lines = out.splitlines()
     for line in lines:
         if line.startswith(b'!') and line.lower().endswith(b' skip ok'):
             pytest.skip(line.decode('ascii'))
             return
     failures = [line for line in lines
                 if (line.startswith(b'!')
                     and line.lower().endswith(b' failed'))]
     if b'AssertionError' in out:
         raise BupSubprocFailure('AssertionError detected')
     if failures or p.returncode != 0:
         raise BupSubprocFailure('%s failed (exit %d, %d failures)'
                                 % (cmd, p.returncode, len(failures)),
                                 cmd, p.returncode, failures)
Exemplo n.º 18
0
 def _next_packet(self, timeout):
     if self.closed: return False
     rl, wl, xl = select.select([self.infd], [], [], timeout)
     if not rl: return False
     assert (rl[0] == self.infd)
     ns = b''.join(checked_reader(self.infd, 5))
     n, fdw = struct.unpack('!IB', ns)
     if n > MAX_PACKET:
         # assume that something went wrong and print stuff
         ns += os.read(self.infd, 1024)
         stderr = byte_stream(sys.stderr)
         stderr.write(ns)
         stderr.flush()
         raise Exception("Connection broken")
     if fdw == 1:
         self.reader = checked_reader(self.infd, n)
     elif fdw == 2:
         for buf in checked_reader(self.infd, n):
             byte_stream(sys.stderr).write(buf)
     elif fdw == 3:
         self.closed = True
         debug2("DemuxConn: marked closed\n")
     return True
Exemplo n.º 19
0
def main(argv):
    o = options.Options(optspec)
    opt, flags, extra = o.parse_bytes(argv[1:])

    handle_ctrl_c()

    opt.find = argv_bytes(opt.find) if opt.find else b''

    if not extra:
        o.fatal('you must provide at least one filename')

    if len(opt.find) > 40:
        o.fatal('--find parameter must be <= 40 chars long')
    else:
        if len(opt.find) % 2:
            s = opt.find + b'0'
        else:
            s = opt.find
        try:
            bin = unhexlify(s)
        except TypeError:
            o.fatal('--find parameter is not a valid hex string')

    sys.stdout.flush()
    out = byte_stream(sys.stdout)
    find = opt.find.lower()
    count = 0
    idxfiles = [argv_bytes(x) for x in extra]
    for name in idxfiles:
        try:
            ix = git.open_idx(name)
        except git.GitError as e:
            add_error('%r: %s' % (name, e))
            continue
        if len(opt.find) == 40:
            if ix.exists(bin):
                out.write(b'%s %s\n' % (name, find))
        else:
            # slow, exhaustive search
            for _i in ix:
                i = hexlify(_i)
                if i.startswith(find):
                    out.write(b'%s %s\n' % (name, i))
                qprogress('Searching: %d\r' % count)
                count += 1

    if saved_errors:
        log('WARNING: %d errors encountered while saving.\n' %
            len(saved_errors))
        sys.exit(1)
Exemplo n.º 20
0
def main(argv):
    o = options.Options(optspec)
    opt, flags, extra = o.parse_bytes(argv[1:])
    if extra:
        o.fatal('no arguments expected')

    # get the subcommand's argv.
    # Normally we could just pass this on the command line, but since we'll often
    # be getting called on the other end of an ssh pipe, which tends to mangle
    # argv (by sending it via the shell), this way is much safer.

    stdin = byte_stream(sys.stdin)
    buf = stdin.read(4)
    sz = struct.unpack('!I', buf)[0]
    assert(sz > 0)
    assert(sz < 1000000)
    buf = stdin.read(sz)
    assert(len(buf) == sz)
    argv = buf.split(b'\0')
    argv[0] = path.exe()
    argv = [argv[0], b'mux', b'--'] + argv


    # stdin/stdout are supposedly connected to 'bup server' that the caller
    # started for us (often on the other end of an ssh tunnel), so we don't want
    # to misuse them.  Move them out of the way, then replace stdout with
    # a pointer to stderr in case our subcommand wants to do something with it.
    #
    # It might be nice to do the same with stdin, but my experiments showed that
    # ssh seems to make its child's stderr a readable-but-never-reads-anything
    # socket.  They really should have used shutdown(SHUT_WR) on the other end
    # of it, but probably didn't.  Anyway, it's too messy, so let's just make sure
    # anyone reading from stdin is disappointed.
    #
    # (You can't just leave stdin/stdout "not open" by closing the file
    # descriptors.  Then the next file that opens is automatically assigned 0 or 1,
    # and people *trying* to read/write stdin/stdout get screwed.)
    os.dup2(0, 3)
    os.dup2(1, 4)
    os.dup2(2, 1)
    fd = os.open(os.devnull, os.O_RDONLY)
    os.dup2(fd, 0)
    os.close(fd)

    environ[b'BUP_SERVER_REVERSE'] = helpers.hostname()
    os.execvp(argv[0], argv)
    sys.exit(99)
Exemplo n.º 21
0
def main(argv):
    # Give the subcommand exclusive access to stdin.
    orig_stdin = os.dup(0)
    devnull = os.open(os.devnull, os.O_RDONLY)
    os.dup2(devnull, 0)
    os.close(devnull)

    o = options.Options(optspec)
    opt, flags, extra = o.parse_bytes(argv[1:])
    if len(extra) < 1:
        o.fatal('command is required')

    subcmd = extra

    debug2('bup mux: starting %r\n' % (extra, ))

    outr, outw = os.pipe()
    errr, errw = os.pipe()

    def close_fds():
        os.close(outr)
        os.close(errr)

    p = subprocess.Popen(subcmd,
                         stdin=orig_stdin,
                         stdout=outw,
                         stderr=errw,
                         close_fds=False,
                         preexec_fn=close_fds)
    os.close(outw)
    os.close(errw)
    sys.stdout.flush()
    out = byte_stream(sys.stdout)
    out.write(b'BUPMUX')
    out.flush()
    mux(p, out.fileno(), outr, errr)
    os.close(outr)
    os.close(errr)
    prv = p.wait()

    if prv:
        debug1('%s exited with code %d\n' % (extra[0], prv))

    debug1('bup mux: done\n')

    sys.exit(prv)
Exemplo n.º 22
0
def main(argv):
    o = options.Options(optspec)
    opt, flags, extra = o.parse_bytes(argv[1:])

    total = (opt.date or 0) + (opt.commit or 0)
    if total > 1:
        o.fatal('at most one option expected')

    sys.stdout.flush()
    out = byte_stream(sys.stdout)

    if opt.date:
        out.write(version.date.split(b' ')[0] + b'\n')
    elif opt.commit:
        out.write(version.commit + b'\n')
    else:
        out.write(version.version + b'\n')
Exemplo n.º 23
0
def main(argv):
    o = options.Options(optspec)
    opt, flags, extra = o.parse_bytes(argv[1:])

    sys.stdout.flush()
    out = byte_stream(sys.stdout)

    out.write(b'bup %s\n' % version.version)
    out.write(b'Source %s %s\n' % (version.commit, version.date))

    have_readline = getattr(_helpers, 'readline', None)
    have_libacl = getattr(_helpers, 'read_acl', None)
    have_xattr = metadata.xattr

    out.write(b'    Python: %s\n' % platform.python_version().encode('ascii'))
    show_support(out, have_readline, b'Command line editing (e.g. bup ftp)')
    show_support(out, have_libacl, b'Saving and restoring POSIX ACLs')
    show_support(out, have_xattr,
                 b'Saving and restoring extended attributes (xattrs)')
Exemplo n.º 24
0
    def __init__(self, infd, outp):
        BaseConn.__init__(self, outp)
        # Anything that comes through before the sync string was not
        # multiplexed and can be assumed to be debug/log before mux init.
        stderr = byte_stream(sys.stderr)
        cookie = b'BUPMUX'
        pos = 0
        while True:
            b = os.read(infd, 1)
            # Make sure to write all pre-BUPMUX output to stderr
            if not b:
                ex = IOError('demux: unexpected EOF during initialization')
                with pending_raise(ex):
                    stderr.write(cookie[:pos])
                    stderr.flush()

            if b == bytes_from_byte(cookie[pos]):
                pos += 1
                if pos == len(cookie):
                    break
                continue

            # If we can't find a new char of 'BUPMUX' then we must have some
            # pre-mux log messages - output those as soon and as complete as
            # possible.
            #
            # \r\n interacts badly with print_clean_line() in the main bup
            # so remove all the \r so we see the full the lines. This assumes
            # that nothing at this point will intentionally delete lines, but
            # as we're just during SSH init that seems reasonable.
            if b == b'\r':
                continue

            stderr.write(cookie[:pos]) # could be we have "BU" in the logs or so
            pos = 0
            stderr.write(b)  # pre-mux log messages
            stderr.flush()
        self.infd = infd
        self.reader = None
        self.buf = None
        self.closed = False
Exemplo n.º 25
0
 def __init__(self, infd, outp):
     BaseConn.__init__(self, outp)
     # Anything that comes through before the sync string was not
     # multiplexed and can be assumed to be debug/log before mux init.
     tail = b''
     stderr = byte_stream(sys.stderr)
     while tail != b'BUPMUX':
         # Make sure to write all pre-BUPMUX output to stderr
         b = os.read(infd, (len(tail) < 6) and (6 - len(tail)) or 1)
         if not b:
             ex = IOError('demux: unexpected EOF during initialization')
             with pending_raise(ex):
                 stderr.write(tail)
                 stderr.flush()
         tail += b
         stderr.write(tail[:-6])
         tail = tail[-6:]
     stderr.flush()
     self.infd = infd
     self.reader = None
     self.buf = None
     self.closed = False
Exemplo n.º 26
0
def main(argv):
    o = options.Options(optspec)
    opt, flags, extra = o.parse_bytes(argv[1:])

    if len(extra) != 1:
        o.fatal("exactly one filename expected")

    drecurse_top = argv_bytes(extra[0])
    excluded_paths = parse_excludes(flags, o.fatal)
    if not drecurse_top.startswith(b'/'):
        excluded_paths = [relpath(x) for x in excluded_paths]
    exclude_rxs = parse_rx_excludes(flags, o.fatal)
    it = drecurse.recursive_dirlist([drecurse_top],
                                    opt.xdev,
                                    excluded_paths=excluded_paths,
                                    exclude_rxs=exclude_rxs)
    if opt.profile:
        import cProfile

        def do_it():
            for i in it:
                pass

        cProfile.run('do_it()')
    else:
        if opt.quiet:
            for i in it:
                pass
        else:
            sys.stdout.flush()
            out = byte_stream(sys.stdout)
            for (name, st) in it:
                out.write(name + b'\n')

    if saved_errors:
        log('WARNING: %d errors encountered.\n' % len(saved_errors))
        sys.exit(1)
Exemplo n.º 27
0
Arquivo: split.py Projeto: fakegit/bup
def main(argv):
    opt = opts_from_cmdline(argv)
    if opt.verbose >= 2:
        git.verbose = opt.verbose - 1
    if opt.fanout:
        hashsplit.fanout = opt.fanout
    if opt.blobs:
        hashsplit.fanout = 0
    if opt.bwlimit:
        client.bwlimit = opt.bwlimit

    start_time = time.time()

    sys.stdout.flush()
    out = byte_stream(sys.stdout)
    stdin = byte_stream(sys.stdin)

    if opt.git_ids:
        # the input is actually a series of git object ids that we should retrieve
        # and split.
        #
        # This is a bit messy, but basically it converts from a series of
        # CatPipe.get() iterators into a series of file-type objects.
        # It would be less ugly if either CatPipe.get() returned a file-like object
        # (not very efficient), or split_to_shalist() expected an iterator instead
        # of a file.
        cp = git.CatPipe()

        class IterToFile:
            def __init__(self, it):
                self.it = iter(it)

            def read(self, size):
                v = next(self.it, None)
                return v or b''

        def read_ids():
            while 1:
                line = stdin.readline()
                if not line:
                    break
                if line:
                    line = line.strip()
                try:
                    it = cp.get(line.strip())
                    next(it, None)  # skip the file info
                except KeyError as e:
                    add_error('error: %s' % e)
                    continue
                yield IterToFile(it)

        files = read_ids()
    else:
        # the input either comes from a series of files or from stdin.
        if opt.sources:
            files = (open(argv_bytes(fn), 'rb') for fn in opt.sources)
        else:
            files = [stdin]

    writing = not (opt.noop or opt.copy)
    remote_dest = opt.remote or opt.is_reverse

    if writing:
        git.check_repo_or_die()

    if remote_dest and writing:
        cli = repo = client.Client(opt.remote)
    else:
        cli = nullcontext()
        repo = git

    # cli creation must be last nontrivial command in each if clause above
    with cli:
        if opt.name and writing:
            refname = opt.name and b'refs/heads/%s' % opt.name
            oldref = repo.read_ref(refname)
        else:
            refname = oldref = None

        if not writing:
            pack_writer = NoOpPackWriter()
        elif not remote_dest:
            pack_writer = git.PackWriter(compression_level=opt.compress,
                                         max_pack_size=opt.max_pack_size,
                                         max_pack_objects=opt.max_pack_objects)
        else:
            pack_writer = cli.new_packwriter(
                compression_level=opt.compress,
                max_pack_size=opt.max_pack_size,
                max_pack_objects=opt.max_pack_objects)

        # packwriter creation must be last command in each if clause above
        with pack_writer:
            commit = split(opt, files, oldref, out, pack_writer)

        # pack_writer must be closed before we can update the ref
        if refname:
            repo.update_ref(refname, commit, oldref)

    secs = time.time() - start_time
    size = hashsplit.total_split
    if opt.bench:
        log('bup: %.2f kbytes in %.2f secs = %.2f kbytes/sec\n' %
            (size / 1024, secs, size / 1024 / secs))

    if saved_errors:
        log('WARNING: %d errors encountered while saving.\n' %
            len(saved_errors))
        sys.exit(1)
Exemplo n.º 28
0
    pct = total and count*100.0/total or 100
    progress('Saving: %.2f%% (%d/%dk, %d/%d files), done.    \n'
             % (pct, count/1024, total/1024, fcount, ftotal))

while len(parts) > 1: # _pop() all the parts above the root
    _pop(force_tree = None)
assert(len(shalists) == 1)
assert(len(metalists) == 1)

# Finish the root directory.
tree = _pop(force_tree = None,
            # When there's a collision, use empty metadata for the root.
            dir_metadata = metadata.Metadata() if root_collision else None)

sys.stdout.flush()
out = byte_stream(sys.stdout)

if opt.tree:
    out.write(hexlify(tree))
    out.write(b'\n')
if opt.commit or name:
    msg = (b'bup save\n\nGenerated by command:\n%r\n'
           % [argv_bytes(x) for x in sys.argv])
    userline = (b'%s <%s@%s>' % (userfullname(), username(), hostname()))
    commit = w.new_commit(tree, oldref, userline, date, None,
                          userline, date, None, msg)
    if opt.commit:
        out.write(hexlify(commit))
        out.write(b'\n')

msr.close()
Exemplo n.º 29
0
optspec = """
bup on--server
--
    This command is run automatically by 'bup on'
"""
o = options.Options(optspec)
(opt, flags, extra) = o.parse(sys.argv[1:])
if extra:
    o.fatal('no arguments expected')

# get the subcommand's argv.
# Normally we could just pass this on the command line, but since we'll often
# be getting called on the other end of an ssh pipe, which tends to mangle
# argv (by sending it via the shell), this way is much safer.

stdin = byte_stream(sys.stdin)
buf = stdin.read(4)
sz = struct.unpack('!I', buf)[0]
assert(sz > 0)
assert(sz < 1000000)
buf = stdin.read(sz)
assert(len(buf) == sz)
argv = buf.split(b'\0')
argv[0] = path.exe()
argv = [argv[0], b'mux', b'--'] + argv


# stdin/stdout are supposedly connected to 'bup server' that the caller
# started for us (often on the other end of an ssh tunnel), so we don't want
# to misuse them.  Move them out of the way, then replace stdout with
# a pointer to stderr in case our subcommand wants to do something with it.
Exemplo n.º 30
0
def main(argv):

    target_filename = b''
    active_fields = metadata.all_fields

    o = options.Options(optspec)
    (opt, flags, remainder) = o.parse_bytes(argv[1:])

    atime_resolution = parse_timestamp_arg('atime', opt.atime_resolution)
    mtime_resolution = parse_timestamp_arg('mtime', opt.mtime_resolution)
    ctime_resolution = parse_timestamp_arg('ctime', opt.ctime_resolution)

    treat_include_fields_as_definitive = True
    for flag, value in flags:
        if flag == '--exclude-fields':
            exclude_fields = frozenset(value.split(','))
            for f in exclude_fields:
                if not f in metadata.all_fields:
                    o.fatal(f + ' is not a valid field name')
            active_fields = active_fields - exclude_fields
            treat_include_fields_as_definitive = False
        elif flag == '--include-fields':
            include_fields = frozenset(value.split(','))
            for f in include_fields:
                if not f in metadata.all_fields:
                    o.fatal(f + ' is not a valid field name')
            if treat_include_fields_as_definitive:
                active_fields = include_fields
                treat_include_fields_as_definitive = False
            else:
                active_fields = active_fields | include_fields

    opt.verbose = opt.verbose or 0
    opt.quiet = opt.quiet or 0
    metadata.verbose = opt.verbose - opt.quiet

    sys.stdout.flush()
    out = byte_stream(sys.stdout)

    first_path = True
    for path in remainder:
        path = argv_bytes(path)
        try:
            m = metadata.from_path(path, archive_path=path)
        except (OSError, IOError) as e:
            if e.errno == errno.ENOENT:
                add_error(e)
                continue
            else:
                raise
        if metadata.verbose >= 0:
            if not first_path:
                out.write(b'\n')
            if atime_resolution != 1:
                m.atime = (m.atime / atime_resolution) * atime_resolution
            if mtime_resolution != 1:
                m.mtime = (m.mtime / mtime_resolution) * mtime_resolution
            if ctime_resolution != 1:
                m.ctime = (m.ctime / ctime_resolution) * ctime_resolution
            out.write(metadata.detailed_bytes(m, active_fields))
            out.write(b'\n')
            first_path = False

    if saved_errors:
        log('WARNING: %d errors encountered.\n' % len(saved_errors))
        sys.exit(1)
    else:
        sys.exit(0)