Exemplo n.º 1
0
def test_atomically_replaced_file():
    tmpdir = tempfile.mkdtemp(dir=bup_tmp, prefix='bup-thelper-')
    target_file = os.path.join(tmpdir, 'test-atomic-write')
    initial_failures = wvfailure_count()

    with atomically_replaced_file(target_file, mode='w') as f:
        f.write('asdf')
        WVPASSEQ(f.mode, 'w')
    f = open(target_file, 'r')
    WVPASSEQ(f.read(), 'asdf')

    try:
        with atomically_replaced_file(target_file, mode='w') as f:
            f.write('wxyz')
            raise Exception()
    except:
        pass
    with open(target_file) as f:
        WVPASSEQ(f.read(), 'asdf')

    with atomically_replaced_file(target_file, mode='wb') as f:
        f.write(os.urandom(20))
        WVPASSEQ(f.mode, 'wb')

    if wvfailure_count() == initial_failures:
        subprocess.call(['rm', '-rf', tmpdir])
Exemplo n.º 2
0
 def sync_index(self, name):
     mkdirp(self.cachedir)
     fn = os.path.join(self.cachedir, name)
     if os.path.exists(fn):
         msg = ("won't request existing .idx, try `bup bloom --check %s`"
                % path_msg(fn))
         raise ClientError(msg)
     with atomically_replaced_file(fn, 'wb') as f:
         self.send_index(name, f, lambda size: None)
Exemplo n.º 3
0
def test_atomically_replaced_file(tmpdir):
    target_file = os.path.join(tmpdir, b'test-atomic-write')

    with atomically_replaced_file(target_file, mode='w') as f:
        f.write('asdf')
        WVPASSEQ(f.mode, 'w')
    f = open(target_file, 'r')
    WVPASSEQ(f.read(), 'asdf')

    try:
        with atomically_replaced_file(target_file, mode='w') as f:
            f.write('wxyz')
            raise Exception()
    except:
        pass
    with open(target_file) as f:
        WVPASSEQ(f.read(), 'asdf')

    with atomically_replaced_file(target_file, mode='wb') as f:
        f.write(os.urandom(20))
        WVPASSEQ(f.mode, 'wb')
Exemplo n.º 4
0
def test_atomically_replaced_file():
    with no_lingering_errors(), test_tempdir('bup-thelper-') as tmpdir:
        target_file = os.path.join(tmpdir, 'test-atomic-write')

        with atomically_replaced_file(target_file, mode='w') as f:
            f.write('asdf')
            WVPASSEQ(f.mode, 'w')
        f = open(target_file, 'r')
        WVPASSEQ(f.read(), 'asdf')

        try:
            with atomically_replaced_file(target_file, mode='w') as f:
                f.write('wxyz')
                raise Exception()
        except:
            pass
        with open(target_file) as f:
            WVPASSEQ(f.read(), 'asdf')

        with atomically_replaced_file(target_file, mode='wb') as f:
            f.write(os.urandom(20))
            WVPASSEQ(f.mode, 'wb')
Exemplo n.º 5
0
def test_atomically_replaced_file():
    with no_lingering_errors(), test_tempdir('bup-thelper-') as tmpdir:
        target_file = os.path.join(tmpdir, 'test-atomic-write')

        with atomically_replaced_file(target_file, mode='w') as f:
            f.write('asdf')
            WVPASSEQ(f.mode, 'w')
        f = open(target_file, 'r')
        WVPASSEQ(f.read(), 'asdf')

        try:
            with atomically_replaced_file(target_file, mode='w') as f:
                f.write('wxyz')
                raise Exception()
        except:
            pass
        with open(target_file) as f:
            WVPASSEQ(f.read(), 'asdf')

        with atomically_replaced_file(target_file, mode='wb') as f:
            f.write(os.urandom(20))
            WVPASSEQ(f.mode, 'wb')
Exemplo n.º 6
0
 def prepare_save(self):
     """ Commit all of the relevant data to disk.  Do as much work
     as possible without actually making the changes visible."""
     if self._pending_save:
         raise Error('save of %r already in progress' % self._filename)
     with self._cleanup:
         if self._node_paths:
             dir, name = os.path.split(self._filename)
             self._pending_save = atomically_replaced_file(self._filename,
                                                           mode='wb',
                                                           buffering=65536)
             with self._cleanup.enter_context(self._pending_save) as f:
                 pickle.dump(self._node_paths, f, 2)
         else:  # No data
             self._cleanup.callback(lambda: unlink(self._filename))
         self._cleanup = self._cleanup.pop_all()
Exemplo n.º 7
0
 def sync_index(self, name):
     #debug1('requesting %r\n' % name)
     self.check_busy()
     mkdirp(self.cachedir)
     fn = os.path.join(self.cachedir, name)
     if os.path.exists(fn):
         msg = "won't request existing .idx, try `bup bloom --check %s`" % fn
         raise ClientError(msg)
     self.conn.write('send-index %s\n' % name)
     n = struct.unpack('!I', self.conn.read(4))[0]
     assert(n)
     with atomically_replaced_file(fn, 'w') as f:
         count = 0
         for b in chunkyreader(self.conn, n):
             f.write(b)
             count += len(b)
             qprogress('Receiving index from server: %d/%d\r' % (count, n))
         self.check_ok()
Exemplo n.º 8
0
 def __init__(self, filename, metastore, tmax):
     self.closed = False
     self.rootlevel = self.level = Level([], None)
     self.pending_index = None
     self.f = None
     self.count = 0
     self.lastfile = None
     self.filename = None
     self.filename = filename = resolve_parent(filename)
     self.metastore = metastore
     self.tmax = tmax
     (dir, name) = os.path.split(filename)
     with ExitStack() as self.cleanup:
         self.pending_index = atomically_replaced_file(self.filename,
                                                       mode='wb',
                                                       buffering=65536)
         self.f = self.cleanup.enter_context(self.pending_index)
         self.cleanup.enter_context(self.f)
         self.f.write(INDEX_HDR)
         self.cleanup = self.cleanup.pop_all()
Exemplo n.º 9
0
 def sync_index(self, name):
     #debug1('requesting %r\n' % name)
     self.check_busy()
     mkdirp(self.cachedir)
     fn = os.path.join(self.cachedir, name)
     if os.path.exists(fn):
         msg = "won't request existing .idx, try `bup bloom --check %s`" % fn
         raise ClientError(msg)
     self.conn.write('send-index %s\n' % name)
     n = struct.unpack('!I', self.conn.read(4))[0]
     assert (n)
     with atomically_replaced_file(fn, 'w') as f:
         count = 0
         progress('Receiving index from server: %d/%d\r' % (count, n))
         for b in chunkyreader(self.conn, n):
             f.write(b)
             count += len(b)
             qprogress('Receiving index from server: %d/%d\r' % (count, n))
         progress('Receiving index from server: %d/%d, done.\n' %
                  (count, n))
         self.check_ok()
Exemplo n.º 10
0
Arquivo: midx.py Projeto: bup/bup
def _do_midx(outdir, outfilename, infilenames, prefixstr,
             auto=False, force=False):
    global _first
    if not outfilename:
        assert(outdir)
        sum = hexlify(Sha1(b'\0'.join(infilenames)).digest())
        outfilename = b'%s/midx-%s.midx' % (outdir, sum)
    
    inp = []
    total = 0
    allfilenames = []
    midxs = []
    try:
        for name in infilenames:
            ix = git.open_idx(name)
            midxs.append(ix)
            inp.append((
                ix.map,
                len(ix),
                ix.sha_ofs,
                isinstance(ix, midx.PackMidx) and ix.which_ofs or 0,
                len(allfilenames),
            ))
            for n in ix.idxnames:
                allfilenames.append(os.path.basename(n))
            total += len(ix)
        inp.sort(reverse=True, key=lambda x: x[0][x[2] : x[2] + 20])

        if not _first: _first = outdir
        dirprefix = (_first != outdir) and git.repo_rel(outdir) + b': ' or b''
        debug1('midx: %s%screating from %d files (%d objects).\n'
               % (dirprefix, prefixstr, len(infilenames), total))
        if (auto and (total < 1024 and len(infilenames) < 3)) \
           or ((auto or force) and len(infilenames) < 2) \
           or (force and not total):
            debug1('midx: nothing to do.\n')
            return

        pages = int(total/SHA_PER_PAGE) or 1
        bits = int(math.ceil(math.log(pages, 2)))
        entries = 2**bits
        debug1('midx: table size: %d (%d bits)\n' % (entries*4, bits))

        unlink(outfilename)
        with atomically_replaced_file(outfilename, 'wb') as f:
            f.write(b'MIDX')
            f.write(struct.pack('!II', midx.MIDX_VERSION, bits))
            assert(f.tell() == 12)

            f.truncate(12 + 4*entries + 20*total + 4*total)
            f.flush()
            fdatasync(f.fileno())

            fmap = mmap_readwrite(f, close=False)
            count = merge_into(fmap, bits, total, inp)
            del fmap # Assume this calls msync() now.
            f.seek(0, os.SEEK_END)
            f.write(b'\0'.join(allfilenames))
    finally:
        for ix in midxs:
            if isinstance(ix, midx.PackMidx):
                ix.close()
        midxs = None
        inp = None


    # This is just for testing (if you enable this, don't clear inp above)
    if 0:
        p = midx.PackMidx(outfilename)
        assert(len(p.idxnames) == len(infilenames))
        log(repr(p.idxnames) + '\n')
        assert(len(p) == total)
        for pe, e in p, git.idxmerge(inp, final_progress=False):
            pin = next(pi)
            assert(i == pin)
            assert(p.exists(i))

    return total, outfilename
Exemplo n.º 11
0
def _do_midx(outdir, outfilename, infilenames, prefixstr):
    global _first
    if not outfilename:
        assert(outdir)
        sum = Sha1('\0'.join(infilenames)).hexdigest()
        outfilename = '%s/midx-%s.midx' % (outdir, sum)
    
    inp = []
    total = 0
    allfilenames = []
    midxs = []
    try:
        for name in infilenames:
            ix = git.open_idx(name)
            midxs.append(ix)
            inp.append((
                ix.map,
                len(ix),
                ix.sha_ofs,
                isinstance(ix, midx.PackMidx) and ix.which_ofs or 0,
                len(allfilenames),
            ))
            for n in ix.idxnames:
                allfilenames.append(os.path.basename(n))
            total += len(ix)
        inp.sort(lambda x,y: cmp(str(y[0][y[2]:y[2]+20]),str(x[0][x[2]:x[2]+20])))

        if not _first: _first = outdir
        dirprefix = (_first != outdir) and git.repo_rel(outdir)+': ' or ''
        debug1('midx: %s%screating from %d files (%d objects).\n'
               % (dirprefix, prefixstr, len(infilenames), total))
        if (opt.auto and (total < 1024 and len(infilenames) < 3)) \
           or ((opt.auto or opt.force) and len(infilenames) < 2) \
           or (opt.force and not total):
            debug1('midx: nothing to do.\n')
            return

        pages = int(total/SHA_PER_PAGE) or 1
        bits = int(math.ceil(math.log(pages, 2)))
        entries = 2**bits
        debug1('midx: table size: %d (%d bits)\n' % (entries*4, bits))

        unlink(outfilename)
        with atomically_replaced_file(outfilename, 'wb') as f:
            f.write('MIDX')
            f.write(struct.pack('!II', midx.MIDX_VERSION, bits))
            assert(f.tell() == 12)

            f.truncate(12 + 4*entries + 20*total + 4*total)
            f.flush()
            fdatasync(f.fileno())

            fmap = mmap_readwrite(f, close=False)

            count = merge_into(fmap, bits, total, inp)
            del fmap # Assume this calls msync() now.
            f.seek(0, os.SEEK_END)
            f.write('\0'.join(allfilenames))
    finally:
        for ix in midxs:
            if isinstance(ix, midx.PackMidx):
                ix.close()
        midxs = None
        inp = None


    # This is just for testing (if you enable this, don't clear inp above)
    if 0:
        p = midx.PackMidx(outfilename)
        assert(len(p.idxnames) == len(infilenames))
        print p.idxnames
        assert(len(p) == total)
        for pe, e in p, git.idxmerge(inp, final_progress=False):
            pin = pi.next()
            assert(i == pin)
            assert(p.exists(i))

    return total, outfilename