Ejemplo n.º 1
0
def test_multiple_suggestions():
    with no_lingering_errors():
        with test_tempdir(b'bup-tclient-') as tmpdir:
            environ[b'BUP_DIR'] = bupdir = tmpdir
            git.init_repo(bupdir)

            lw = git.PackWriter()
            lw.new_blob(s1)
            lw.close()
            lw = git.PackWriter()
            lw.new_blob(s2)
            lw.close()
            WVPASSEQ(len(glob.glob(git.repo(b'objects/pack' + IDX_PAT))), 2)

            c = client.Client(bupdir, create=True)
            WVPASSEQ(len(glob.glob(c.cachedir + IDX_PAT)), 0)
            rw = c.new_packwriter()
            s1sha = rw.new_blob(s1)
            WVPASS(rw.exists(s1sha))
            s2sha = rw.new_blob(s2)
            # This is a little hacky, but ensures that we test the
            # code under test
            while (len(glob.glob(c.cachedir + IDX_PAT)) < 2
                   and not c.conn.has_input()):
                pass
            rw.new_blob(s2)
            WVPASS(rw.objcache.exists(s1sha))
            WVPASS(rw.objcache.exists(s2sha))
            rw.new_blob(s3)
            WVPASSEQ(len(glob.glob(c.cachedir + IDX_PAT)), 2)
            rw.close()
            WVPASSEQ(len(glob.glob(c.cachedir + IDX_PAT)), 3)
Ejemplo n.º 2
0
Archivo: repo.py Proyecto: zzmjohn/bup
 def __init__(self, address):
     self.address = address
     self.client = client.Client(address)
     self.new_packwriter = self.client.new_packwriter
     self.update_ref = self.client.update_ref
     self.rev_list = self.client.rev_list
     self._id = _repo_id(self.address)
Ejemplo n.º 3
0
def test_midx_refreshing(tmpdir):
    environ[b'BUP_DIR'] = bupdir = tmpdir
    git.init_repo(bupdir)
    c = client.Client(bupdir, create=True)
    rw = c.new_packwriter()
    rw.new_blob(s1)
    p1base = rw.breakpoint()
    p1name = os.path.join(c.cachedir, p1base)
    s1sha = rw.new_blob(s1)  # should not be written; it's already in p1
    s2sha = rw.new_blob(s2)
    p2base = rw.close()
    p2name = os.path.join(c.cachedir, p2base)
    del rw

    pi = git.PackIdxList(bupdir + b'/objects/pack')
    assert len(pi.packs) == 2
    pi.refresh()
    assert len(pi.packs) == 2
    assert sorted([os.path.basename(i.name)
                   for i in pi.packs]) == sorted([p1base, p2base])

    p1 = git.open_idx(p1name)
    assert p1.exists(s1sha)
    p2 = git.open_idx(p2name)
    assert not p2.exists(s1sha)
    assert p2.exists(s2sha)

    subprocess.call([path.exe(), b'midx', b'-f'])
    pi.refresh()
    assert len(pi.packs) == 1
    pi.refresh(skip_midx=True)
    assert len(pi.packs) == 2
    pi.refresh(skip_midx=False)
    assert len(pi.packs) == 1
Ejemplo n.º 4
0
def test_multiple_suggestions():
    initial_failures = wvfailure_count()
    tmpdir = tempfile.mkdtemp(dir=bup_tmp, prefix='bup-tclient-')
    os.environ['BUP_MAIN_EXE'] = '../../../bup'
    os.environ['BUP_DIR'] = bupdir = tmpdir
    git.init_repo(bupdir)

    lw = git.PackWriter()
    lw.new_blob(s1)
    lw.close()
    lw = git.PackWriter()
    lw.new_blob(s2)
    lw.close()
    WVPASSEQ(len(glob.glob(git.repo('objects/pack' + IDX_PAT))), 2)

    c = client.Client(bupdir, create=True)
    WVPASSEQ(len(glob.glob(c.cachedir + IDX_PAT)), 0)
    rw = c.new_packwriter()
    s1sha = rw.new_blob(s1)
    WVPASS(rw.exists(s1sha))
    s2sha = rw.new_blob(s2)
    # This is a little hacky, but ensures that we test the code under test
    while (len(glob.glob(c.cachedir + IDX_PAT)) < 2
           and not c.conn.has_input()):
        pass
    rw.new_blob(s2)
    WVPASS(rw.objcache.exists(s1sha))
    WVPASS(rw.objcache.exists(s2sha))
    rw.new_blob(s3)
    WVPASSEQ(len(glob.glob(c.cachedir + IDX_PAT)), 2)
    rw.close()
    WVPASSEQ(len(glob.glob(c.cachedir + IDX_PAT)), 3)
    if wvfailure_count() == initial_failures:
        subprocess.call(['rm', '-rf', tmpdir])
Ejemplo n.º 5
0
def test_multiple_suggestions(tmpdir):
    environ[b'BUP_DIR'] = bupdir = tmpdir
    git.init_repo(bupdir)

    lw = git.PackWriter()
    lw.new_blob(s1)
    lw.close()
    lw = git.PackWriter()
    lw.new_blob(s2)
    lw.close()
    assert len(glob.glob(git.repo(b'objects/pack' + IDX_PAT,
                                  repo_dir=bupdir))) == 2

    c = client.Client(bupdir, create=True)
    assert len(glob.glob(c.cachedir + IDX_PAT)) == 0
    rw = c.new_packwriter()
    s1sha = rw.new_blob(s1)
    assert rw.exists(s1sha)
    s2sha = rw.new_blob(s2)

    # This is a little hacky, but ensures that we test the
    # code under test. First, flush to ensure that we've
    # actually sent all the command ('receive-objects-v2')
    # and their data to the server. This may be needed if
    # the output buffer size is bigger than the data (both
    # command and objects) we're writing. To see the need
    # for this, change the object sizes at the beginning
    # of this file to be very small (e.g. 10 instead of 10k)
    c.conn.outp.flush()

    # Then, check if we've already received the idx files.
    # This may happen if we're preempted just after writing
    # the data, then the server runs and suggests, and only
    # then we continue in PackWriter_Remote::_raw_write()
    # and check the has_input(), in that case we'll receive
    # the idx still in the rw.new_blob() calls above.
    #
    # In most cases though, that doesn't happen, and we'll
    # get past the has_input() check before the server has
    # a chance to respond - it has to actually hash the new
    # object here, so it takes some time. So also break out
    # of the loop if the server has sent something on the
    # connection.
    #
    # Finally, abort this after a little while (about one
    # second) just in case something's actually broken.
    n = 0
    while (len(glob.glob(c.cachedir + IDX_PAT)) < 2 and not c.conn.has_input()
           and n < 10):
        time.sleep(0.1)
        n += 1
    assert len(glob.glob(c.cachedir + IDX_PAT)) == 2 or c.conn.has_input()
    rw.new_blob(s2)
    assert rw.objcache.exists(s1sha)
    assert rw.objcache.exists(s2sha)
    rw.new_blob(s3)
    assert len(glob.glob(c.cachedir + IDX_PAT)) == 2
    rw.close()
    assert len(glob.glob(c.cachedir + IDX_PAT)) == 3
Ejemplo n.º 6
0
def test_server_split_with_indexes(tmpdir):
    environ[b'BUP_DIR'] = bupdir = tmpdir
    git.init_repo(bupdir)
    with git.PackWriter() as lw:
        lw.new_blob(s1)
    with client.Client(bupdir, create=True) as c, \
         c.new_packwriter() as rw:
        rw.new_blob(s2)
        rw.breakpoint()
        rw.new_blob(s1)
Ejemplo n.º 7
0
def test_server_split_with_indexes(tmpdir):
    environ[b'BUP_DIR'] = bupdir = tmpdir
    git.init_repo(bupdir)
    lw = git.PackWriter()
    c = client.Client(bupdir, create=True)
    rw = c.new_packwriter()

    lw.new_blob(s1)
    lw.close()

    rw.new_blob(s2)
    rw.breakpoint()
    rw.new_blob(s1)
    rw.close()
Ejemplo n.º 8
0
def test_server_split_with_indexes():
    os.environ['BUP_MAIN_EXE'] = '../../../bup'
    os.environ['BUP_DIR'] = bupdir = 'buptest_tclient.tmp'
    subprocess.call(['rm', '-rf', bupdir])
    git.init_repo(bupdir)
    lw = git.PackWriter()
    c = client.Client(bupdir, create=True)
    rw = c.new_packwriter()

    lw.new_blob(s1)
    lw.close()

    rw.new_blob(s2)
    rw.breakpoint()
    rw.new_blob(s1)
Ejemplo n.º 9
0
def test_dumb_client_server(tmpdir):
    environ[b'BUP_DIR'] = bupdir = tmpdir
    git.init_repo(bupdir)
    open(git.repo(b'bup-dumb-server'), 'w').close()

    with git.PackWriter() as lw:
        lw.new_blob(s1)

    with client.Client(bupdir, create=True) as c, \
         c.new_packwriter() as rw:
        assert len(glob.glob(c.cachedir + IDX_PAT)) == 1
        rw.new_blob(s1)
        assert len(glob.glob(c.cachedir + IDX_PAT)) == 1
        rw.new_blob(s2)
    assert len(glob.glob(c.cachedir + IDX_PAT)) == 2
Ejemplo n.º 10
0
def test_server_split_with_indexes():
    with no_lingering_errors():
        with test_tempdir(b'bup-tclient-') as tmpdir:
            environ[b'BUP_DIR'] = bupdir = tmpdir
            git.init_repo(bupdir)
            lw = git.PackWriter()
            c = client.Client(bupdir, create=True)
            rw = c.new_packwriter()

            lw.new_blob(s1)
            lw.close()

            rw.new_blob(s2)
            rw.breakpoint()
            rw.new_blob(s1)
            rw.close()
Ejemplo n.º 11
0
def main(argv):
    o = options.Options(optspec)
    opt, flags, extra = o.parse_bytes(argv[1:])

    if extra:
        o.fatal("no arguments expected")

    try:
        git.init_repo()  # local repo
    except git.GitError as e:
        log("bup: error: could not init repository: %s" % e)
        sys.exit(1)

    if opt.remote:
        git.check_repo_or_die()
        cli = client.Client(argv_bytes(opt.remote), create=True)
        cli.close()
Ejemplo n.º 12
0
def test_server_split_with_indexes():
    initial_failures = wvfailure_count()
    tmpdir = tempfile.mkdtemp(dir=bup_tmp, prefix='bup-tclient-')
    os.environ['BUP_MAIN_EXE'] = '../../../bup'
    os.environ['BUP_DIR'] = bupdir = tmpdir
    git.init_repo(bupdir)
    lw = git.PackWriter()
    c = client.Client(bupdir, create=True)
    rw = c.new_packwriter()

    lw.new_blob(s1)
    lw.close()

    rw.new_blob(s2)
    rw.breakpoint()
    rw.new_blob(s1)
    if wvfailure_count() == initial_failures:
        subprocess.call(['rm', '-rf', tmpdir])
Ejemplo n.º 13
0
def test_dumb_client_server():
    with no_lingering_errors():
        with test_tempdir(b'bup-tclient-') as tmpdir:
            environ[b'BUP_DIR'] = bupdir = tmpdir
            git.init_repo(bupdir)
            open(git.repo(b'bup-dumb-server'), 'w').close()

            lw = git.PackWriter()
            lw.new_blob(s1)
            lw.close()

            c = client.Client(bupdir, create=True)
            rw = c.new_packwriter()
            WVPASSEQ(len(glob.glob(c.cachedir + IDX_PAT)), 1)
            rw.new_blob(s1)
            WVPASSEQ(len(glob.glob(c.cachedir + IDX_PAT)), 1)
            rw.new_blob(s2)
            rw.close()
            WVPASSEQ(len(glob.glob(c.cachedir + IDX_PAT)), 2)
Ejemplo n.º 14
0
def test_dumb_client_server():
    os.environ['BUP_MAIN_EXE'] = '../../../bup'
    os.environ['BUP_DIR'] = bupdir = 'buptest_tclient.tmp'
    subprocess.call(['rm', '-rf', bupdir])
    git.init_repo(bupdir)
    open(git.repo('bup-dumb-server'), 'w').close()

    lw = git.PackWriter()
    lw.new_blob(s1)
    lw.close()

    c = client.Client(bupdir, create=True)
    rw = c.new_packwriter()
    WVPASSEQ(len(glob.glob(c.cachedir+IDX_PAT)), 1)
    rw.new_blob(s1)
    WVPASSEQ(len(glob.glob(c.cachedir+IDX_PAT)), 1)
    rw.new_blob(s2)
    rw.close()
    WVPASSEQ(len(glob.glob(c.cachedir+IDX_PAT)), 2)
Ejemplo n.º 15
0
 def __init__(self, address, create=False, compression_level=None,
              max_pack_size=None, max_pack_objects=None):
     # if client.Client() raises an exception, have a client
     # anyway to avoid follow-up exceptions from __del__
     self.client = None
     self.client = client.Client(address)
     self.config = self.client.config
     # init the superclass only afterwards so it can access self.config()
     super(RemoteRepo, self).__init__(address,
                                      compression_level=compression_level,
                                      max_pack_size=max_pack_size,
                                      max_pack_objects=max_pack_objects)
     self.rev_list = self.client.rev_list
     self.list_indexes = self.client.list_indexes
     self.read_ref = self.client.read_ref
     self.send_index = self.client.send_index
     self.join = self.client.join
     self.refs = self.client.refs
     self.resolve = self.client.resolve
     self._packwriter = None
Ejemplo n.º 16
0
def test_dumb_client_server():
    initial_failures = wvfailure_count()
    tmpdir = tempfile.mkdtemp(dir=bup_tmp, prefix='bup-tclient-')
    os.environ['BUP_MAIN_EXE'] = '../../../bup'
    os.environ['BUP_DIR'] = bupdir = tmpdir
    git.init_repo(bupdir)
    open(git.repo('bup-dumb-server'), 'w').close()

    lw = git.PackWriter()
    lw.new_blob(s1)
    lw.close()

    c = client.Client(bupdir, create=True)
    rw = c.new_packwriter()
    WVPASSEQ(len(glob.glob(c.cachedir + IDX_PAT)), 1)
    rw.new_blob(s1)
    WVPASSEQ(len(glob.glob(c.cachedir + IDX_PAT)), 1)
    rw.new_blob(s2)
    rw.close()
    WVPASSEQ(len(glob.glob(c.cachedir + IDX_PAT)), 2)
    if wvfailure_count() == initial_failures:
        subprocess.call(['rm', '-rf', tmpdir])
Ejemplo n.º 17
0
def test_dumb_client_server(dumb_mode, tmpdir):
    environ[b'BUP_DIR'] = bupdir = tmpdir
    git.init_repo(bupdir)
    if dumb_mode == 'file':
        open(git.repo(b'bup-dumb-server', repo_dir=bupdir), 'w').close()
    elif dumb_mode == 'config':
        git.git_config_write(b'bup.dumb-server', b'true', repo_dir=bupdir)
    else:
        assert False

    lw = git.PackWriter()
    lw.new_blob(s1)
    lw.close()

    c = client.Client(bupdir, create=True)
    rw = c.new_packwriter()
    assert len(glob.glob(c.cachedir + IDX_PAT)) == 1
    rw.new_blob(s1)
    assert len(glob.glob(c.cachedir + IDX_PAT)) == 1
    rw.new_blob(s2)
    rw.close()
    assert len(glob.glob(c.cachedir + IDX_PAT)) == 2
Ejemplo n.º 18
0
def test_midx_refreshing():
    initial_failures = wvfailure_count()
    tmpdir = tempfile.mkdtemp(dir=bup_tmp, prefix='bup-tclient-')
    os.environ['BUP_MAIN_EXE'] = bupmain = '../../../bup'
    os.environ['BUP_DIR'] = bupdir = tmpdir
    git.init_repo(bupdir)
    c = client.Client(bupdir, create=True)
    rw = c.new_packwriter()
    rw.new_blob(s1)
    p1base = rw.breakpoint()
    p1name = os.path.join(c.cachedir, p1base)
    s1sha = rw.new_blob(s1)  # should not be written; it's already in p1
    s2sha = rw.new_blob(s2)
    p2base = rw.close()
    p2name = os.path.join(c.cachedir, p2base)
    del rw

    pi = git.PackIdxList(bupdir + '/objects/pack')
    WVPASSEQ(len(pi.packs), 2)
    pi.refresh()
    WVPASSEQ(len(pi.packs), 2)
    WVPASSEQ(sorted([os.path.basename(i.name) for i in pi.packs]),
             sorted([p1base, p2base]))

    p1 = git.open_idx(p1name)
    WVPASS(p1.exists(s1sha))
    p2 = git.open_idx(p2name)
    WVFAIL(p2.exists(s1sha))
    WVPASS(p2.exists(s2sha))

    subprocess.call([bupmain, 'midx', '-f'])
    pi.refresh()
    WVPASSEQ(len(pi.packs), 1)
    pi.refresh(skip_midx=True)
    WVPASSEQ(len(pi.packs), 2)
    pi.refresh(skip_midx=False)
    WVPASSEQ(len(pi.packs), 1)
    if wvfailure_count() == initial_failures:
        subprocess.call(['rm', '-rf', tmpdir])
Ejemplo n.º 19
0
Archivo: tclient.py Proyecto: yafey/bup
def test_midx_refreshing():
    with no_lingering_errors():
        with test_tempdir('bup-tclient-') as tmpdir:
            os.environ['BUP_MAIN_EXE'] = bupmain = '../../../bup'
            os.environ['BUP_DIR'] = bupdir = tmpdir
            git.init_repo(bupdir)
            c = client.Client(bupdir, create=True)
            rw = c.new_packwriter()
            rw.new_blob(s1)
            p1base = rw.breakpoint()
            p1name = os.path.join(c.cachedir, p1base)
            s1sha = rw.new_blob(
                s1)  # should not be written; it's already in p1
            s2sha = rw.new_blob(s2)
            p2base = rw.close()
            p2name = os.path.join(c.cachedir, p2base)
            del rw

            pi = git.PackIdxList(bupdir + '/objects/pack')
            WVPASSEQ(len(pi.packs), 2)
            pi.refresh()
            WVPASSEQ(len(pi.packs), 2)
            WVPASSEQ(sorted([os.path.basename(i.name) for i in pi.packs]),
                     sorted([p1base, p2base]))

            p1 = git.open_idx(p1name)
            WVPASS(p1.exists(s1sha))
            p2 = git.open_idx(p2name)
            WVFAIL(p2.exists(s1sha))
            WVPASS(p2.exists(s2sha))

            subprocess.call([bupmain, 'midx', '-f'])
            pi.refresh()
            WVPASSEQ(len(pi.packs), 1)
            pi.refresh(skip_midx=True)
            WVPASSEQ(len(pi.packs), 2)
            pi.refresh(skip_midx=False)
            WVPASSEQ(len(pi.packs), 1)
Ejemplo n.º 20
0
Archivo: split.py Proyecto: fakegit/bup
def main(argv):
    opt = opts_from_cmdline(argv)
    if opt.verbose >= 2:
        git.verbose = opt.verbose - 1
    if opt.fanout:
        hashsplit.fanout = opt.fanout
    if opt.blobs:
        hashsplit.fanout = 0
    if opt.bwlimit:
        client.bwlimit = opt.bwlimit

    start_time = time.time()

    sys.stdout.flush()
    out = byte_stream(sys.stdout)
    stdin = byte_stream(sys.stdin)

    if opt.git_ids:
        # the input is actually a series of git object ids that we should retrieve
        # and split.
        #
        # This is a bit messy, but basically it converts from a series of
        # CatPipe.get() iterators into a series of file-type objects.
        # It would be less ugly if either CatPipe.get() returned a file-like object
        # (not very efficient), or split_to_shalist() expected an iterator instead
        # of a file.
        cp = git.CatPipe()

        class IterToFile:
            def __init__(self, it):
                self.it = iter(it)

            def read(self, size):
                v = next(self.it, None)
                return v or b''

        def read_ids():
            while 1:
                line = stdin.readline()
                if not line:
                    break
                if line:
                    line = line.strip()
                try:
                    it = cp.get(line.strip())
                    next(it, None)  # skip the file info
                except KeyError as e:
                    add_error('error: %s' % e)
                    continue
                yield IterToFile(it)

        files = read_ids()
    else:
        # the input either comes from a series of files or from stdin.
        if opt.sources:
            files = (open(argv_bytes(fn), 'rb') for fn in opt.sources)
        else:
            files = [stdin]

    writing = not (opt.noop or opt.copy)
    remote_dest = opt.remote or opt.is_reverse

    if writing:
        git.check_repo_or_die()

    if remote_dest and writing:
        cli = repo = client.Client(opt.remote)
    else:
        cli = nullcontext()
        repo = git

    # cli creation must be last nontrivial command in each if clause above
    with cli:
        if opt.name and writing:
            refname = opt.name and b'refs/heads/%s' % opt.name
            oldref = repo.read_ref(refname)
        else:
            refname = oldref = None

        if not writing:
            pack_writer = NoOpPackWriter()
        elif not remote_dest:
            pack_writer = git.PackWriter(compression_level=opt.compress,
                                         max_pack_size=opt.max_pack_size,
                                         max_pack_objects=opt.max_pack_objects)
        else:
            pack_writer = cli.new_packwriter(
                compression_level=opt.compress,
                max_pack_size=opt.max_pack_size,
                max_pack_objects=opt.max_pack_objects)

        # packwriter creation must be last command in each if clause above
        with pack_writer:
            commit = split(opt, files, oldref, out, pack_writer)

        # pack_writer must be closed before we can update the ref
        if refname:
            repo.update_ref(refname, commit, oldref)

    secs = time.time() - start_time
    size = hashsplit.total_split
    if opt.bench:
        log('bup: %.2f kbytes in %.2f secs = %.2f kbytes/sec\n' %
            (size / 1024, secs, size / 1024 / secs))

    if saved_errors:
        log('WARNING: %d errors encountered while saving.\n' %
            len(saved_errors))
        sys.exit(1)
Ejemplo n.º 21
0
            if not (old_path and new_path):
                o.fatal("a graft point cannot be empty")
            graft_points.append((resolve_parent(old_path),
                                 resolve_parent(new_path)))

is_reverse = environ.get(b'BUP_SERVER_REVERSE')
if is_reverse and opt.remote:
    o.fatal("don't use -r in reverse mode; it's automatic")

name = opt.name
if name and not valid_save_name(name):
    o.fatal("'%s' is not a valid branch name" % path_msg(name))
refname = name and b'refs/heads/%s' % name or None
if opt.remote or is_reverse:
    try:
        cli = client.Client(opt.remote)
    except client.ClientError as e:
        log('error: %s' % e)
        sys.exit(1)
    oldref = refname and cli.read_ref(refname) or None
    w = cli.new_packwriter(compression_level=opt.compress)
else:
    cli = None
    oldref = refname and git.read_ref(refname) or None
    w = git.PackWriter(compression_level=opt.compress)

handle_ctrl_c()


# Metadata is stored in a file named .bupm in each directory.  The
# first metadata entry will be the metadata for the current directory.
Ejemplo n.º 22
0
#!/usr/bin/env python
import sys

from bup import git, options, client
from bup.helpers import *

optspec = """
[BUP_DIR=...] bup init [-r host:path]
--
r,remote=  remote repository path
"""
o = options.Options(optspec)
(opt, flags, extra) = o.parse(sys.argv[1:])

if extra:
    o.fatal("no arguments expected")

try:
    git.init_repo()  # local repo
except git.GitError, e:
    log("bup: error: could not init repository: %s" % e)
    sys.exit(1)

if opt.remote:
    git.check_repo_or_die()
    cli = client.Client(opt.remote, create=True)
    cli.close()
Ejemplo n.º 23
0
Archivo: save.py Proyecto: fakegit/bup
def main(argv):
    handle_ctrl_c()
    opt = opts_from_cmdline(argv)
    client.bwlimit = opt.bwlimit
    git.check_repo_or_die()

    remote_dest = opt.remote or opt.is_reverse
    if not remote_dest:
        repo = git
        cli = nullcontext()
    else:
        try:
            cli = repo = client.Client(opt.remote)
        except client.ClientError as e:
            log('error: %s' % e)
            sys.exit(1)

    # cli creation must be last nontrivial command in each if clause above
    with cli:
        if not remote_dest:
            w = git.PackWriter(compression_level=opt.compress)
        else:
            w = cli.new_packwriter(compression_level=opt.compress)

        with w:
            sys.stdout.flush()
            out = byte_stream(sys.stdout)

            if opt.name:
                refname = b'refs/heads/%s' % opt.name
                parent = repo.read_ref(refname)
            else:
                refname = parent = None

            indexfile = opt.indexfile or git.repo(b'bupindex')
            try:
                msr = index.MetaStoreReader(indexfile + b'.meta')
            except IOError as ex:
                if ex.errno != ENOENT:
                    raise
                log('error: cannot access %r; have you run bup index?'
                    % path_msg(indexfile))
                sys.exit(1)
            with msr, \
                 hlinkdb.HLinkDB(indexfile + b'.hlink') as hlink_db, \
                 index.Reader(indexfile) as reader:
                tree = save_tree(opt, reader, hlink_db, msr, w)
            if opt.tree:
                out.write(hexlify(tree))
                out.write(b'\n')
            if opt.commit or opt.name:
                commit = commit_tree(tree, parent, opt.date, argv, w)
                if opt.commit:
                    out.write(hexlify(commit))
                    out.write(b'\n')

        # packwriter must be closed before we can update the ref
        if opt.name:
            repo.update_ref(refname, commit, parent)

    if saved_errors:
        log('WARNING: %d errors encountered while saving.\n' % len(saved_errors))
        sys.exit(1)
Ejemplo n.º 24
0
Archivo: split.py Proyecto: gdt/bup
def main(argv):
    o = options.Options(optspec)
    opt, flags, extra = o.parse_bytes(argv[1:])
    if opt.name: opt.name = argv_bytes(opt.name)
    if opt.remote: opt.remote = argv_bytes(opt.remote)
    if opt.verbose is None: opt.verbose = 0

    if not (opt.blobs or opt.tree or opt.commit or opt.name or opt.noop
            or opt.copy):
        o.fatal("use one or more of -b, -t, -c, -n, --noop, --copy")
    if opt.copy and (opt.blobs or opt.tree):
        o.fatal('--copy is incompatible with -b, -t')
    if (opt.noop or opt.copy) and (opt.commit or opt.name):
        o.fatal('--noop and --copy are incompatible with -c, -n')
    if opt.blobs and (opt.tree or opt.commit or opt.name):
        o.fatal('-b is incompatible with -t, -c, -n')
    if extra and opt.git_ids:
        o.fatal("don't provide filenames when using --git-ids")

    if opt.verbose >= 2:
        git.verbose = opt.verbose - 1
        opt.bench = 1

    max_pack_size = None
    if opt.max_pack_size:
        max_pack_size = parse_num(opt.max_pack_size)
    max_pack_objects = None
    if opt.max_pack_objects:
        max_pack_objects = parse_num(opt.max_pack_objects)

    if opt.fanout:
        hashsplit.fanout = parse_num(opt.fanout)
    if opt.blobs:
        hashsplit.fanout = 0
    if opt.bwlimit:
        client.bwlimit = parse_num(opt.bwlimit)
    if opt.date:
        date = parse_date_or_fatal(opt.date, o.fatal)
    else:
        date = time.time()

    # Hack around lack of nonlocal vars in python 2
    total_bytes = [0]

    def prog(filenum, nbytes):
        total_bytes[0] += nbytes
        if filenum > 0:
            qprogress('Splitting: file #%d, %d kbytes\r' %
                      (filenum + 1, total_bytes[0] // 1024))
        else:
            qprogress('Splitting: %d kbytes\r' % (total_bytes[0] // 1024))

    is_reverse = environ.get(b'BUP_SERVER_REVERSE')
    if is_reverse and opt.remote:
        o.fatal("don't use -r in reverse mode; it's automatic")
    start_time = time.time()

    if opt.name and not valid_save_name(opt.name):
        o.fatal("'%r' is not a valid branch name." % opt.name)
    refname = opt.name and b'refs/heads/%s' % opt.name or None

    if opt.noop or opt.copy:
        cli = pack_writer = oldref = None
    elif opt.remote or is_reverse:
        git.check_repo_or_die()
        cli = client.Client(opt.remote)
        oldref = refname and cli.read_ref(refname) or None
        pack_writer = cli.new_packwriter(compression_level=opt.compress,
                                         max_pack_size=max_pack_size,
                                         max_pack_objects=max_pack_objects)
    else:
        git.check_repo_or_die()
        cli = None
        oldref = refname and git.read_ref(refname) or None
        pack_writer = git.PackWriter(compression_level=opt.compress,
                                     max_pack_size=max_pack_size,
                                     max_pack_objects=max_pack_objects)

    input = byte_stream(sys.stdin)

    if opt.git_ids:
        # the input is actually a series of git object ids that we should retrieve
        # and split.
        #
        # This is a bit messy, but basically it converts from a series of
        # CatPipe.get() iterators into a series of file-type objects.
        # It would be less ugly if either CatPipe.get() returned a file-like object
        # (not very efficient), or split_to_shalist() expected an iterator instead
        # of a file.
        cp = git.CatPipe()

        class IterToFile:
            def __init__(self, it):
                self.it = iter(it)

            def read(self, size):
                v = next(self.it, None)
                return v or b''

        def read_ids():
            while 1:
                line = input.readline()
                if not line:
                    break
                if line:
                    line = line.strip()
                try:
                    it = cp.get(line.strip())
                    next(it, None)  # skip the file info
                except KeyError as e:
                    add_error('error: %s' % e)
                    continue
                yield IterToFile(it)

        files = read_ids()
    else:
        # the input either comes from a series of files or from stdin.
        files = extra and (open(argv_bytes(fn), 'rb')
                           for fn in extra) or [input]

    if pack_writer:
        new_blob = pack_writer.new_blob
        new_tree = pack_writer.new_tree
    elif opt.blobs or opt.tree:
        # --noop mode
        new_blob = lambda content: git.calc_hash(b'blob', content)
        new_tree = lambda shalist: git.calc_hash(b'tree',
                                                 git.tree_encode(shalist))

    sys.stdout.flush()
    out = byte_stream(sys.stdout)

    if opt.blobs:
        shalist = hashsplit.split_to_blobs(new_blob,
                                           files,
                                           keep_boundaries=opt.keep_boundaries,
                                           progress=prog)
        for (sha, size, level) in shalist:
            out.write(hexlify(sha) + b'\n')
            reprogress()
    elif opt.tree or opt.commit or opt.name:
        if opt.name:  # insert dummy_name which may be used as a restore target
            mode, sha = \
                hashsplit.split_to_blob_or_tree(new_blob, new_tree, files,
                                                keep_boundaries=opt.keep_boundaries,
                                                progress=prog)
            splitfile_name = git.mangle_name(b'data', hashsplit.GIT_MODE_FILE,
                                             mode)
            shalist = [(mode, splitfile_name, sha)]
        else:
            shalist = hashsplit.split_to_shalist(
                new_blob,
                new_tree,
                files,
                keep_boundaries=opt.keep_boundaries,
                progress=prog)
        tree = new_tree(shalist)
    else:
        last = 0
        it = hashsplit.hashsplit_iter(files,
                                      keep_boundaries=opt.keep_boundaries,
                                      progress=prog)
        for (blob, level) in it:
            hashsplit.total_split += len(blob)
            if opt.copy:
                sys.stdout.write(str(blob))
            megs = hashsplit.total_split // 1024 // 1024
            if not opt.quiet and last != megs:
                last = megs

    if opt.verbose:
        log('\n')
    if opt.tree:
        out.write(hexlify(tree) + b'\n')
    if opt.commit or opt.name:
        msg = b'bup split\n\nGenerated by command:\n%r\n' % compat.get_argvb()
        ref = opt.name and (b'refs/heads/%s' % opt.name) or None
        userline = b'%s <%s@%s>' % (userfullname(), username(), hostname())
        commit = pack_writer.new_commit(tree, oldref, userline, date, None,
                                        userline, date, None, msg)
        if opt.commit:
            out.write(hexlify(commit) + b'\n')

    if pack_writer:
        pack_writer.close()  # must close before we can update the ref

    if opt.name:
        if cli:
            cli.update_ref(refname, commit, oldref)
        else:
            git.update_ref(refname, commit, oldref)

    if cli:
        cli.close()

    secs = time.time() - start_time
    size = hashsplit.total_split
    if opt.bench:
        log('bup: %.2f kbytes in %.2f secs = %.2f kbytes/sec\n' %
            (size / 1024, secs, size / 1024 / secs))

    if saved_errors:
        log('WARNING: %d errors encountered while saving.\n' %
            len(saved_errors))
        sys.exit(1)
Ejemplo n.º 25
0
def main(argv):

    # Hack around lack of nonlocal vars in python 2
    _nonlocal = {}

    o = options.Options(optspec)
    opt, flags, extra = o.parse_bytes(argv[1:])

    if opt.indexfile:
        opt.indexfile = argv_bytes(opt.indexfile)
    if opt.name:
        opt.name = argv_bytes(opt.name)
    if opt.remote:
        opt.remote = argv_bytes(opt.remote)
    if opt.strip_path:
        opt.strip_path = argv_bytes(opt.strip_path)

    git.check_repo_or_die()
    if not (opt.tree or opt.commit or opt.name):
        o.fatal("use one or more of -t, -c, -n")
    if not extra:
        o.fatal("no filenames given")

    extra = [argv_bytes(x) for x in extra]

    opt.progress = (istty2 and not opt.quiet)
    opt.smaller = parse_num(opt.smaller or 0)
    if opt.bwlimit:
        client.bwlimit = parse_num(opt.bwlimit)

    if opt.date:
        date = parse_date_or_fatal(opt.date, o.fatal)
    else:
        date = time.time()

    if opt.strip and opt.strip_path:
        o.fatal("--strip is incompatible with --strip-path")

    graft_points = []
    if opt.graft:
        if opt.strip:
            o.fatal("--strip is incompatible with --graft")

        if opt.strip_path:
            o.fatal("--strip-path is incompatible with --graft")

        for (option, parameter) in flags:
            if option == "--graft":
                parameter = argv_bytes(parameter)
                splitted_parameter = parameter.split(b'=')
                if len(splitted_parameter) != 2:
                    o.fatal(
                        "a graft point must be of the form old_path=new_path")
                old_path, new_path = splitted_parameter
                if not (old_path and new_path):
                    o.fatal("a graft point cannot be empty")
                graft_points.append(
                    (resolve_parent(old_path), resolve_parent(new_path)))

    is_reverse = environ.get(b'BUP_SERVER_REVERSE')
    if is_reverse and opt.remote:
        o.fatal("don't use -r in reverse mode; it's automatic")

    name = opt.name
    if name and not valid_save_name(name):
        o.fatal("'%s' is not a valid branch name" % path_msg(name))
    refname = name and b'refs/heads/%s' % name or None
    if opt.remote or is_reverse:
        try:
            cli = client.Client(opt.remote)
        except client.ClientError as e:
            log('error: %s' % e)
            sys.exit(1)
        oldref = refname and cli.read_ref(refname) or None
        w = cli.new_packwriter(compression_level=opt.compress)
    else:
        cli = None
        oldref = refname and git.read_ref(refname) or None
        w = git.PackWriter(compression_level=opt.compress)

    handle_ctrl_c()

    # Metadata is stored in a file named .bupm in each directory.  The
    # first metadata entry will be the metadata for the current directory.
    # The remaining entries will be for each of the other directory
    # elements, in the order they're listed in the index.
    #
    # Since the git tree elements are sorted according to
    # git.shalist_item_sort_key, the metalist items are accumulated as
    # (sort_key, metadata) tuples, and then sorted when the .bupm file is
    # created.  The sort_key should have been computed using the element's
    # mangled name and git mode (after hashsplitting), but the code isn't
    # actually doing that but rather uses the element's real name and mode.
    # This makes things a bit more difficult when reading it back, see
    # vfs.ordered_tree_entries().

    # Maintain a stack of information representing the current location in
    # the archive being constructed.  The current path is recorded in
    # parts, which will be something like ['', 'home', 'someuser'], and
    # the accumulated content and metadata for of the dirs in parts is
    # stored in parallel stacks in shalists and metalists.

    parts = []  # Current archive position (stack of dir names).
    shalists = []  # Hashes for each dir in paths.
    metalists = []  # Metadata for each dir in paths.

    def _push(part, metadata):
        # Enter a new archive directory -- make it the current directory.
        parts.append(part)
        shalists.append([])
        metalists.append([(b'', metadata)])  # This dir's metadata (no name).

    def _pop(force_tree, dir_metadata=None):
        # Leave the current archive directory and add its tree to its parent.
        assert (len(parts) >= 1)
        part = parts.pop()
        shalist = shalists.pop()
        metalist = metalists.pop()
        # FIXME: only test if collision is possible (i.e. given --strip, etc.)?
        if force_tree:
            tree = force_tree
        else:
            names_seen = set()
            clean_list = []
            metaidx = 1  # entry at 0 is for the dir
            for x in shalist:
                name = x[1]
                if name in names_seen:
                    parent_path = b'/'.join(parts) + b'/'
                    add_error('error: ignoring duplicate path %s in %s' %
                              (path_msg(name), path_msg(parent_path)))
                    if not stat.S_ISDIR(x[0]):
                        del metalist[metaidx]
                else:
                    names_seen.add(name)
                    clean_list.append(x)
                    if not stat.S_ISDIR(x[0]):
                        metaidx += 1

            if dir_metadata:  # Override the original metadata pushed for this dir.
                metalist = [(b'', dir_metadata)] + metalist[1:]
            sorted_metalist = sorted(metalist, key=lambda x: x[0])
            metadata = b''.join([m[1].encode() for m in sorted_metalist])
            metadata_f = BytesIO(metadata)
            mode, id = hashsplit.split_to_blob_or_tree(w.new_blob,
                                                       w.new_tree,
                                                       [metadata_f],
                                                       keep_boundaries=False)
            clean_list.append((mode, b'.bupm', id))

            tree = w.new_tree(clean_list)
        if shalists:
            shalists[-1].append((GIT_MODE_TREE,
                                 git.mangle_name(part, GIT_MODE_TREE,
                                                 GIT_MODE_TREE), tree))
        return tree

    _nonlocal['count'] = 0
    _nonlocal['subcount'] = 0
    _nonlocal['lastremain'] = None

    def progress_report(n):
        _nonlocal['subcount'] += n
        cc = _nonlocal['count'] + _nonlocal['subcount']
        pct = total and (cc * 100.0 / total) or 0
        now = time.time()
        elapsed = now - tstart
        kps = elapsed and int(cc / 1024. / elapsed)
        kps_frac = 10**int(math.log(kps + 1, 10) - 1)
        kps = int(kps / kps_frac) * kps_frac
        if cc:
            remain = elapsed * 1.0 / cc * (total - cc)
        else:
            remain = 0.0
        if (_nonlocal['lastremain'] and (remain > _nonlocal['lastremain']) and
            ((remain - _nonlocal['lastremain']) / _nonlocal['lastremain'] <
             0.05)):
            remain = _nonlocal['lastremain']
        else:
            _nonlocal['lastremain'] = remain
        hours = int(remain / 60 / 60)
        mins = int(remain / 60 - hours * 60)
        secs = int(remain - hours * 60 * 60 - mins * 60)
        if elapsed < 30:
            remainstr = ''
            kpsstr = ''
        else:
            kpsstr = '%dk/s' % kps
            if hours:
                remainstr = '%dh%dm' % (hours, mins)
            elif mins:
                remainstr = '%dm%d' % (mins, secs)
            else:
                remainstr = '%ds' % secs
        qprogress(
            'Saving: %.2f%% (%d/%dk, %d/%d files) %s %s\r' %
            (pct, cc / 1024, total / 1024, fcount, ftotal, remainstr, kpsstr))

    indexfile = opt.indexfile or git.repo(b'bupindex')
    r = index.Reader(indexfile)
    try:
        msr = index.MetaStoreReader(indexfile + b'.meta')
    except IOError as ex:
        if ex.errno != EACCES:
            raise
        log('error: cannot access %r; have you run bup index?' %
            path_msg(indexfile))
        sys.exit(1)
    hlink_db = hlinkdb.HLinkDB(indexfile + b'.hlink')

    def already_saved(ent):
        return ent.is_valid() and w.exists(ent.sha) and ent.sha

    def wantrecurse_pre(ent):
        return not already_saved(ent)

    def wantrecurse_during(ent):
        return not already_saved(ent) or ent.sha_missing()

    def find_hardlink_target(hlink_db, ent):
        if hlink_db and not stat.S_ISDIR(ent.mode) and ent.nlink > 1:
            link_paths = hlink_db.node_paths(ent.dev, ent.ino)
            if link_paths:
                return link_paths[0]

    total = ftotal = 0
    if opt.progress:
        for (transname, ent) in r.filter(extra, wantrecurse=wantrecurse_pre):
            if not (ftotal % 10024):
                qprogress('Reading index: %d\r' % ftotal)
            exists = ent.exists()
            hashvalid = already_saved(ent)
            ent.set_sha_missing(not hashvalid)
            if not opt.smaller or ent.size < opt.smaller:
                if exists and not hashvalid:
                    total += ent.size
            ftotal += 1
        progress('Reading index: %d, done.\n' % ftotal)
        hashsplit.progress_callback = progress_report

    # Root collisions occur when strip or graft options map more than one
    # path to the same directory (paths which originally had separate
    # parents).  When that situation is detected, use empty metadata for
    # the parent.  Otherwise, use the metadata for the common parent.
    # Collision example: "bup save ... --strip /foo /foo/bar /bar".

    # FIXME: Add collision tests, or handle collisions some other way.

    # FIXME: Detect/handle strip/graft name collisions (other than root),
    # i.e. if '/foo/bar' and '/bar' both map to '/'.

    first_root = None
    root_collision = None
    tstart = time.time()
    fcount = 0
    lastskip_name = None
    lastdir = b''
    for (transname, ent) in r.filter(extra, wantrecurse=wantrecurse_during):
        (dir, file) = os.path.split(ent.name)
        exists = (ent.flags & index.IX_EXISTS)
        hashvalid = already_saved(ent)
        wasmissing = ent.sha_missing()
        oldsize = ent.size
        if opt.verbose:
            if not exists:
                status = 'D'
            elif not hashvalid:
                if ent.sha == index.EMPTY_SHA:
                    status = 'A'
                else:
                    status = 'M'
            else:
                status = ' '
            if opt.verbose >= 2:
                log('%s %-70s\n' % (status, path_msg(ent.name)))
            elif not stat.S_ISDIR(ent.mode) and lastdir != dir:
                if not lastdir.startswith(dir):
                    log('%s %-70s\n' %
                        (status, path_msg(os.path.join(dir, b''))))
                lastdir = dir

        if opt.progress:
            progress_report(0)
        fcount += 1

        if not exists:
            continue
        if opt.smaller and ent.size >= opt.smaller:
            if exists and not hashvalid:
                if opt.verbose:
                    log('skipping large file "%s"\n' % path_msg(ent.name))
                lastskip_name = ent.name
            continue

        assert (dir.startswith(b'/'))
        if opt.strip:
            dirp = stripped_path_components(dir, extra)
        elif opt.strip_path:
            dirp = stripped_path_components(dir, [opt.strip_path])
        elif graft_points:
            dirp = grafted_path_components(graft_points, dir)
        else:
            dirp = path_components(dir)

        # At this point, dirp contains a representation of the archive
        # path that looks like [(archive_dir_name, real_fs_path), ...].
        # So given "bup save ... --strip /foo/bar /foo/bar/baz", dirp
        # might look like this at some point:
        #   [('', '/foo/bar'), ('baz', '/foo/bar/baz'), ...].

        # This dual representation supports stripping/grafting, where the
        # archive path may not have a direct correspondence with the
        # filesystem.  The root directory is represented by an initial
        # component named '', and any component that doesn't have a
        # corresponding filesystem directory (due to grafting, for
        # example) will have a real_fs_path of None, i.e. [('', None),
        # ...].

        if first_root == None:
            first_root = dirp[0]
        elif first_root != dirp[0]:
            root_collision = True

        # If switching to a new sub-tree, finish the current sub-tree.
        while parts > [x[0] for x in dirp]:
            _pop(force_tree=None)

        # If switching to a new sub-tree, start a new sub-tree.
        for path_component in dirp[len(parts):]:
            dir_name, fs_path = path_component
            # Not indexed, so just grab the FS metadata or use empty metadata.
            try:
                meta = metadata.from_path(fs_path, normalized=True) \
                    if fs_path else metadata.Metadata()
            except (OSError, IOError) as e:
                add_error(e)
                lastskip_name = dir_name
                meta = metadata.Metadata()
            _push(dir_name, meta)

        if not file:
            if len(parts) == 1:
                continue  # We're at the top level -- keep the current root dir
            # Since there's no filename, this is a subdir -- finish it.
            oldtree = already_saved(ent)  # may be None
            newtree = _pop(force_tree=oldtree)
            if not oldtree:
                if lastskip_name and lastskip_name.startswith(ent.name):
                    ent.invalidate()
                else:
                    ent.validate(GIT_MODE_TREE, newtree)
                ent.repack()
            if exists and wasmissing:
                _nonlocal['count'] += oldsize
            continue

        # it's not a directory
        if hashvalid:
            id = ent.sha
            git_name = git.mangle_name(file, ent.mode, ent.gitmode)
            git_info = (ent.gitmode, git_name, id)
            shalists[-1].append(git_info)
            sort_key = git.shalist_item_sort_key((ent.mode, file, id))
            meta = msr.metadata_at(ent.meta_ofs)
            meta.hardlink_target = find_hardlink_target(hlink_db, ent)
            # Restore the times that were cleared to 0 in the metastore.
            (meta.atime, meta.mtime, meta.ctime) = (ent.atime, ent.mtime,
                                                    ent.ctime)
            metalists[-1].append((sort_key, meta))
        else:
            id = None
            hlink = find_hardlink_target(hlink_db, ent)
            try:
                meta = metadata.from_path(
                    ent.name,
                    hardlink_target=hlink,
                    normalized=True,
                    after_stat=after_nondir_metadata_stat)
            except (OSError, IOError) as e:
                add_error(e)
                lastskip_name = ent.name
                continue
            if stat.S_IFMT(ent.mode) != stat.S_IFMT(meta.mode):
                # The mode changed since we indexed the file, this is bad.
                # This can cause two issues:
                # 1) We e.g. think the file is a regular file, but now it's
                #    something else (a device, socket, FIFO or symlink, etc.)
                #    and _read_ from it when we shouldn't.
                # 2) We then record it as valid, but don't update the index
                #    metadata, and on a subsequent save it has 'hashvalid'
                #    but is recorded as the file type from the index, when
                #    the content is something else ...
                # Avoid all of these consistency issues by just skipping such
                # things - it really ought to not happen anyway.
                add_error("%s: mode changed since indexing, skipping." %
                          path_msg(ent.name))
                lastskip_name = ent.name
                continue
            if stat.S_ISREG(ent.mode):
                try:
                    # If the file changes while we're reading it, then our reading
                    # may stop at some point, but the stat() above may have gotten
                    # a different size already. Recalculate the meta size so that
                    # the repository records the accurate size in the metadata, even
                    # if the other stat() data might be slightly older than the file
                    # content (which we can't fix, this is inherently racy, but we
                    # can prevent the size mismatch.)
                    meta.size = 0

                    def new_blob(data):
                        meta.size += len(data)
                        return w.new_blob(data)

                    before_saving_regular_file(ent.name)
                    with hashsplit.open_noatime(ent.name) as f:
                        (mode, id) = hashsplit.split_to_blob_or_tree(
                            new_blob, w.new_tree, [f], keep_boundaries=False)
                except (IOError, OSError) as e:
                    add_error('%s: %s' % (ent.name, e))
                    lastskip_name = ent.name
            elif stat.S_ISDIR(ent.mode):
                assert (0)  # handled above
            elif stat.S_ISLNK(ent.mode):
                mode, id = (GIT_MODE_SYMLINK, w.new_blob(meta.symlink_target))
            else:
                # Everything else should be fully described by its
                # metadata, so just record an empty blob, so the paths
                # in the tree and .bupm will match up.
                (mode, id) = (GIT_MODE_FILE, w.new_blob(b''))

            if id:
                ent.validate(mode, id)
                ent.repack()
                git_name = git.mangle_name(file, ent.mode, ent.gitmode)
                git_info = (mode, git_name, id)
                shalists[-1].append(git_info)
                sort_key = git.shalist_item_sort_key((ent.mode, file, id))
                metalists[-1].append((sort_key, meta))

        if exists and wasmissing:
            _nonlocal['count'] += oldsize
            _nonlocal['subcount'] = 0

    if opt.progress:
        pct = total and _nonlocal['count'] * 100.0 / total or 100
        progress(
            'Saving: %.2f%% (%d/%dk, %d/%d files), done.    \n' %
            (pct, _nonlocal['count'] / 1024, total / 1024, fcount, ftotal))

    while len(parts) > 1:  # _pop() all the parts above the root
        _pop(force_tree=None)
    assert (len(shalists) == 1)
    assert (len(metalists) == 1)

    # Finish the root directory.
    tree = _pop(
        force_tree=None,
        # When there's a collision, use empty metadata for the root.
        dir_metadata=metadata.Metadata() if root_collision else None)

    sys.stdout.flush()
    out = byte_stream(sys.stdout)

    if opt.tree:
        out.write(hexlify(tree))
        out.write(b'\n')
    if opt.commit or name:
        if compat.py_maj > 2:
            # Strip b prefix from python 3 bytes reprs to preserve previous format
            msgcmd = b'[%s]' % b', '.join(
                [repr(argv_bytes(x))[1:].encode('ascii') for x in argv])
        else:
            msgcmd = repr(argv)
        msg = b'bup save\n\nGenerated by command:\n%s\n' % msgcmd
        userline = (b'%s <%s@%s>' % (userfullname(), username(), hostname()))
        commit = w.new_commit(tree, oldref, userline, date, None, userline,
                              date, None, msg)
        if opt.commit:
            out.write(hexlify(commit))
            out.write(b'\n')

    msr.close()
    w.close()  # must close before we can update the ref

    if opt.name:
        if cli:
            cli.update_ref(refname, commit, oldref)
        else:
            git.update_ref(refname, commit, oldref)

    if cli:
        cli.close()

    if saved_errors:
        log('WARNING: %d errors encountered while saving.\n' %
            len(saved_errors))
        sys.exit(1)
Ejemplo n.º 26
0
 def __init__(self, address):
     self.address = address
     self.client = client.Client(address)
     self.rev_list = self.client.rev_list
Ejemplo n.º 27
0
sys.path[:0] = [os.path.dirname(os.path.realpath(__file__)) + '/..']

from bup import compat, git, options, client
from bup.helpers import log, saved_errors
from bup.compat import argv_bytes


optspec = """
[BUP_DIR=...] bup init [-r host:path]
--
r,remote=  remote repository path
"""
o = options.Options(optspec)
opt, flags, extra = o.parse(compat.argv[1:])

if extra:
    o.fatal("no arguments expected")


try:
    git.init_repo()  # local repo
except git.GitError as e:
    log("bup: error: could not init repository: %s" % e)
    sys.exit(1)

if opt.remote:
    git.check_repo_or_die()
    cli = client.Client(argv_bytes(opt.remote), create=True)
    cli.close()
Ejemplo n.º 28
0
    else:
        qprogress('Splitting: %d kbytes\r' % (total_bytes / 1024))


is_reverse = os.environ.get('BUP_SERVER_REVERSE')
if is_reverse and opt.remote:
    o.fatal("don't use -r in reverse mode; it's automatic")
start_time = time.time()

if opt.name and opt.name.startswith('.'):
    o.fatal("'%s' is not a valid branch name." % opt.name)
refname = opt.name and 'refs/heads/%s' % opt.name or None
if opt.noop or opt.copy:
    cli = pack_writer = oldref = None
elif opt.remote or is_reverse:
    cli = client.Client(opt.remote, compression_level=opt.compress)
    oldref = refname and cli.read_ref(refname) or None
    pack_writer = cli.new_packwriter()
else:
    cli = None
    oldref = refname and git.read_ref(refname) or None
    pack_writer = git.PackWriter(compression_level=opt.compress)

if opt.git_ids:
    # the input is actually a series of git object ids that we should retrieve
    # and split.
    #
    # This is a bit messy, but basically it converts from a series of
    # CatPipe.get() iterators into a series of file-type objects.
    # It would be less ugly if either CatPipe.get() returned a file-like object
    # (not very efficient), or split_to_shalist() expected an iterator instead