def index_negative_timestamps(): initial_failures = wvfailure_count() tmpdir = tempfile.mkdtemp(dir=bup_tmp, prefix='bup-tindex-') # Makes 'foo' exist foopath = tmpdir + '/foo' f = file(foopath, 'wb') f.close() # Dec 31, 1969 os.utime(foopath, (-86400, -86400)) ns_per_sec = 10**9 tstart = time.time() * ns_per_sec tmax = tstart - ns_per_sec e = index.BlankNewEntry(foopath, 0, tmax) e.from_stat(xstat.stat(foopath), 0, tstart) assert len(e.packed()) WVPASS() # Jun 10, 1893 os.utime(foopath, (-0x80000000, -0x80000000)) e = index.BlankNewEntry(foopath, 0, tmax) e.from_stat(xstat.stat(foopath), 0, tstart) assert len(e.packed()) WVPASS() if wvfailure_count() == initial_failures: subprocess.call(['rm', '-rf', tmpdir])
def index_dirty(): unlink('index.meta.tmp') unlink('index2.meta.tmp') unlink('index3.meta.tmp') default_meta = metadata.Metadata() ms1 = index.MetaStoreWriter('index.meta.tmp') ms2 = index.MetaStoreWriter('index2.meta.tmp') ms3 = index.MetaStoreWriter('index3.meta.tmp') meta_ofs1 = ms1.store(default_meta) meta_ofs2 = ms2.store(default_meta) meta_ofs3 = ms3.store(default_meta) unlink('index.tmp') unlink('index2.tmp') unlink('index3.tmp') ds = xstat.stat('.') fs = xstat.stat('tindex.py') tmax = (time.time() - 1) * 10**9 w1 = index.Writer('index.tmp', ms1, tmax) w1.add('/a/b/x', fs, meta_ofs1) w1.add('/a/b/c', fs, meta_ofs1) w1.add('/a/b/', ds, meta_ofs1) w1.add('/a/', ds, meta_ofs1) #w1.close() WVPASS() w2 = index.Writer('index2.tmp', ms2, tmax) w2.add('/a/b/n/2', fs, meta_ofs2) #w2.close() WVPASS() w3 = index.Writer('index3.tmp', ms3, tmax) w3.add('/a/c/n/3', fs, meta_ofs3) #w3.close() WVPASS() r1 = w1.new_reader() r2 = w2.new_reader() r3 = w3.new_reader() WVPASS() r1all = [e.name for e in r1] WVPASSEQ(r1all, ['/a/b/x', '/a/b/c', '/a/b/', '/a/', '/']) r2all = [e.name for e in r2] WVPASSEQ(r2all, ['/a/b/n/2', '/a/b/n/', '/a/b/', '/a/', '/']) r3all = [e.name for e in r3] WVPASSEQ(r3all, ['/a/c/n/3', '/a/c/n/', '/a/c/', '/a/', '/']) all = [e.name for e in index.merge(r2, r1, r3)] WVPASSEQ(all, ['/a/c/n/3', '/a/c/n/', '/a/c/', '/a/b/x', '/a/b/n/2', '/a/b/n/', '/a/b/c', '/a/b/', '/a/', '/']) fake_validate(r1) dump(r1) print [hex(e.flags) for e in r1]
def index_dirty(): initial_failures = wvfailure_count() orig_cwd = os.getcwd() tmpdir = tempfile.mkdtemp(dir=bup_tmp, prefix='bup-tindex-') try: os.chdir(tmpdir) default_meta = metadata.Metadata() ms1 = index.MetaStoreWriter('index.meta.tmp') ms2 = index.MetaStoreWriter('index2.meta.tmp') ms3 = index.MetaStoreWriter('index3.meta.tmp') meta_ofs1 = ms1.store(default_meta) meta_ofs2 = ms2.store(default_meta) meta_ofs3 = ms3.store(default_meta) ds = xstat.stat(lib_t_dir) fs = xstat.stat(lib_t_dir + '/tindex.py') tmax = (time.time() - 1) * 10**9 w1 = index.Writer('index.tmp', ms1, tmax) w1.add('/a/b/x', fs, meta_ofs1) w1.add('/a/b/c', fs, meta_ofs1) w1.add('/a/b/', ds, meta_ofs1) w1.add('/a/', ds, meta_ofs1) #w1.close() WVPASS() w2 = index.Writer('index2.tmp', ms2, tmax) w2.add('/a/b/n/2', fs, meta_ofs2) #w2.close() WVPASS() w3 = index.Writer('index3.tmp', ms3, tmax) w3.add('/a/c/n/3', fs, meta_ofs3) #w3.close() WVPASS() r1 = w1.new_reader() r2 = w2.new_reader() r3 = w3.new_reader() WVPASS() r1all = [e.name for e in r1] WVPASSEQ(r1all, ['/a/b/x', '/a/b/c', '/a/b/', '/a/', '/']) r2all = [e.name for e in r2] WVPASSEQ(r2all, ['/a/b/n/2', '/a/b/n/', '/a/b/', '/a/', '/']) r3all = [e.name for e in r3] WVPASSEQ(r3all, ['/a/c/n/3', '/a/c/n/', '/a/c/', '/a/', '/']) all = [e.name for e in index.merge(r2, r1, r3)] WVPASSEQ(all, ['/a/c/n/3', '/a/c/n/', '/a/c/', '/a/b/x', '/a/b/n/2', '/a/b/n/', '/a/b/c', '/a/b/', '/a/', '/']) fake_validate(r1) dump(r1) print [hex(e.flags) for e in r1]
def index_writer(): unlink('index.tmp') ds = xstat.stat('.') fs = xstat.stat('tindex.py') w = index.Writer('index.tmp', time.time() - 1) w.add('/var/tmp/sporky', fs) w.add('/etc/passwd', fs) w.add('/etc/', ds) w.add('/', ds) w.close()
def index_writer(): unlink('index.tmp') ds = xstat.stat('.') fs = xstat.stat('tindex.py') w = index.Writer('index.tmp') w.add('/var/tmp/sporky', fs) w.add('/etc/passwd', fs) w.add('/etc/', ds) w.add('/', ds) w.close()
def index_writer(): unlink('index.tmp') ds = xstat.stat('.') fs = xstat.stat('tindex.py') unlink('index.meta.tmp') ms = index.MetaStoreWriter('index.meta.tmp'); tmax = (time.time() - 1) * 10**9 w = index.Writer('index.tmp', ms, tmax) w.add('/var/tmp/sporky', fs, 0) w.add('/etc/passwd', fs, 0) w.add('/etc/', ds, 0) w.add('/', ds, 0) ms.close() w.close()
def index_writer(): unlink('index.tmp') ds = xstat.stat('.') fs = xstat.stat('tindex.py') unlink('index.meta.tmp') ms = index.MetaStoreWriter('index.meta.tmp') tmax = (time.time() - 1) * 10**9 w = index.Writer('index.tmp', ms, tmax) w.add('/var/tmp/sporky', fs, 0) w.add('/etc/passwd', fs, 0) w.add('/etc/', ds, 0) w.add('/', ds, 0) ms.close() w.close()
def index_dirty(): unlink('index.tmp') unlink('index2.tmp') ds = xstat.stat('.') fs = xstat.stat('tindex.py') tmax = time.time() - 1 w1 = index.Writer('index.tmp', tmax) w1.add('/a/b/x', fs) w1.add('/a/b/c', fs) w1.add('/a/b/', ds) w1.add('/a/', ds) #w1.close() WVPASS() w2 = index.Writer('index2.tmp', tmax) w2.add('/a/b/n/2', fs) #w2.close() WVPASS() w3 = index.Writer('index3.tmp', tmax) w3.add('/a/c/n/3', fs) #w3.close() WVPASS() r1 = w1.new_reader() r2 = w2.new_reader() r3 = w3.new_reader() WVPASS() r1all = [e.name for e in r1] WVPASSEQ(r1all, ['/a/b/x', '/a/b/c', '/a/b/', '/a/', '/']) r2all = [e.name for e in r2] WVPASSEQ(r2all, ['/a/b/n/2', '/a/b/n/', '/a/b/', '/a/', '/']) r3all = [e.name for e in r3] WVPASSEQ(r3all, ['/a/c/n/3', '/a/c/n/', '/a/c/', '/a/', '/']) all = [e.name for e in index.merge(r2, r1, r3)] WVPASSEQ(all, ['/a/c/n/3', '/a/c/n/', '/a/c/', '/a/b/x', '/a/b/n/2', '/a/b/n/', '/a/b/c', '/a/b/', '/a/', '/']) fake_validate(r1) dump(r1) print [hex(e.flags) for e in r1]
def test_index_writer(tmpdir): orig_cwd = os.getcwd() try: os.chdir(tmpdir) ds = xstat.stat(b'.') fs = xstat.stat(lib_t_dir + b'/test_index.py') tmax = (time.time() - 1) * 10**9 with index.MetaStoreWriter(b'index.meta.tmp') as ms, \ index.Writer(b'index.tmp', ms, tmax) as w: w.add(b'/var/tmp/sporky', fs, 0) w.add(b'/etc/passwd', fs, 0) w.add(b'/etc/', ds, 0) w.add(b'/', ds, 0) w.close() finally: os.chdir(orig_cwd)
def do_midx_dir(path): already = {} sizes = {} if opt.force and not opt.auto: midxs = [] # don't use existing midx files else: midxs = glob.glob('%s/*.midx' % path) contents = {} for mname in midxs: m = git.open_idx(mname) contents[mname] = [('%s/%s' % (path, i)) for i in m.idxnames] sizes[mname] = len(m) # sort the biggest+newest midxes first, so that we can eliminate # smaller (or older) redundant ones that come later in the list midxs.sort(key=lambda ix: (-sizes[ix], -xstat.stat(ix).st_mtime)) for mname in midxs: any = 0 for iname in contents[mname]: if not already.get(iname): already[iname] = 1 any = 1 if not any: debug1('%r is redundant\n' % mname) unlink(mname) already[mname] = 1 midxs = [k for k in midxs if not already.get(k)] idxs = [k for k in glob.glob('%s/*.idx' % path) if not already.get(k)] for iname in idxs: i = git.open_idx(iname) sizes[iname] = len(i) all = [(sizes[n], n) for n in (midxs + idxs)] # FIXME: what are the optimal values? Does this make sense? DESIRED_HWM = opt.force and 1 or 5 DESIRED_LWM = opt.force and 1 or 2 existed = dict((name, 1) for sz, name in all) debug1('midx: %d indexes; want no more than %d.\n' % (len(all), DESIRED_HWM)) if len(all) <= DESIRED_HWM: debug1('midx: nothing to do.\n') while len(all) > DESIRED_HWM: all.sort() part1 = [name for sz, name in all[:len(all) - DESIRED_LWM + 1]] part2 = all[len(all) - DESIRED_LWM + 1:] all = list(do_midx_group(path, part1)) + part2 if len(all) > DESIRED_HWM: debug1('\nStill too many indexes (%d > %d). Merging again.\n' % (len(all), DESIRED_HWM)) if opt['print']: for sz, name in all: if not existed.get(name): print name
def do_midx_dir(path): already = {} sizes = {} if opt.force and not opt.auto: midxs = [] # don't use existing midx files else: midxs = glob.glob('%s/*.midx' % path) contents = {} for mname in midxs: m = git.open_idx(mname) contents[mname] = [('%s/%s' % (path,i)) for i in m.idxnames] sizes[mname] = len(m) # sort the biggest+newest midxes first, so that we can eliminate # smaller (or older) redundant ones that come later in the list midxs.sort(key=lambda ix: (-sizes[ix], -xstat.stat(ix).st_mtime)) for mname in midxs: any = 0 for iname in contents[mname]: if not already.get(iname): already[iname] = 1 any = 1 if not any: debug1('%r is redundant\n' % mname) unlink(mname) already[mname] = 1 midxs = [k for k in midxs if not already.get(k)] idxs = [k for k in glob.glob('%s/*.idx' % path) if not already.get(k)] for iname in idxs: i = git.open_idx(iname) sizes[iname] = len(i) all = [(sizes[n],n) for n in (midxs + idxs)] # FIXME: what are the optimal values? Does this make sense? DESIRED_HWM = opt.force and 1 or 5 DESIRED_LWM = opt.force and 1 or 2 existed = dict((name,1) for sz,name in all) debug1('midx: %d indexes; want no more than %d.\n' % (len(all), DESIRED_HWM)) if len(all) <= DESIRED_HWM: debug1('midx: nothing to do.\n') while len(all) > DESIRED_HWM: all.sort() part1 = [name for sz,name in all[:len(all)-DESIRED_LWM+1]] part2 = all[len(all)-DESIRED_LWM+1:] all = list(do_midx_group(path, part1)) + part2 if len(all) > DESIRED_HWM: debug1('\nStill too many indexes (%d > %d). Merging again.\n' % (len(all), DESIRED_HWM)) if opt['print']: for sz,name in all: if not existed.get(name): print name
def index_writer(): with no_lingering_errors(), test_tempdir('bup-tindex-') as tmpdir: orig_cwd = os.getcwd() try: os.chdir(tmpdir) ds = xstat.stat('.') fs = xstat.stat(lib_t_dir + '/tindex.py') ms = index.MetaStoreWriter('index.meta.tmp') tmax = (time.time() - 1) * 10**9 w = index.Writer('index.tmp', ms, tmax) w.add('/var/tmp/sporky', fs, 0) w.add('/etc/passwd', fs, 0) w.add('/etc/', ds, 0) w.add('/', ds, 0) ms.close() w.close() finally: os.chdir(orig_cwd)
def index_writer(): with no_lingering_errors(), test_tempdir('bup-tindex-') as tmpdir: orig_cwd = os.getcwd() try: os.chdir(tmpdir) ds = xstat.stat('.') fs = xstat.stat(lib_t_dir + '/tindex.py') ms = index.MetaStoreWriter('index.meta.tmp'); tmax = (time.time() - 1) * 10**9 w = index.Writer('index.tmp', ms, tmax) w.add('/var/tmp/sporky', fs, 0) w.add('/etc/passwd', fs, 0) w.add('/etc/', ds, 0) w.add('/', ds, 0) ms.close() w.close() finally: os.chdir(orig_cwd)
def test_index_negative_timestamps(tmpdir): # Makes 'foo' exist foopath = tmpdir + b'/foo' f = open(foopath, 'wb') f.close() # Dec 31, 1969 os.utime(foopath, (-86400, -86400)) ns_per_sec = 10**9 tmax = (time.time() - 1) * ns_per_sec e = index.BlankNewEntry(foopath, 0, tmax) e.update_from_stat(xstat.stat(foopath), 0) WVPASS(e.packed()) # Jun 10, 1893 os.utime(foopath, (-0x80000000, -0x80000000)) e = index.BlankNewEntry(foopath, 0, tmax) e.update_from_stat(xstat.stat(foopath), 0) WVPASS(e.packed())
def index_negative_timestamps(): # Makes 'foo' exist f = file('foo', 'wb') f.close() # Dec 31, 1969 os.utime("foo", (-86400, -86400)) e = index.BlankNewEntry("foo") e.from_stat(xstat.stat("foo"), time.time()) assert len(e.packed()) WVPASS() # Jun 10, 1893 os.utime("foo", (-0x80000000, -0x80000000)) e = index.BlankNewEntry("foo") e.from_stat(xstat.stat("foo"), time.time()) assert len(e.packed()) WVPASS() unlink('foo')
def index_writer(): initial_failures = wvfailure_count() tmpdir = tempfile.mkdtemp(dir=bup_tmp, prefix='bup-tindex-') orig_cwd = os.getcwd() try: os.chdir(tmpdir) ds = xstat.stat('.') fs = xstat.stat(lib_t_dir + '/tindex.py') ms = index.MetaStoreWriter('index.meta.tmp'); tmax = (time.time() - 1) * 10**9 w = index.Writer('index.tmp', ms, tmax) w.add('/var/tmp/sporky', fs, 0) w.add('/etc/passwd', fs, 0) w.add('/etc/', ds, 0) w.add('/', ds, 0) ms.close() w.close() finally: os.chdir(orig_cwd) if wvfailure_count() == initial_failures: subprocess.call(['rm', '-rf', tmpdir])
def index_writer(): initial_failures = wvfailure_count() tmpdir = tempfile.mkdtemp(dir=bup_tmp, prefix='bup-tindex-') orig_cwd = os.getcwd() try: os.chdir(tmpdir) ds = xstat.stat('.') fs = xstat.stat(lib_t_dir + '/tindex.py') ms = index.MetaStoreWriter('index.meta.tmp') tmax = (time.time() - 1) * 10**9 w = index.Writer('index.tmp', ms, tmax) w.add('/var/tmp/sporky', fs, 0) w.add('/etc/passwd', fs, 0) w.add('/etc/', ds, 0) w.add('/', ds, 0) ms.close() w.close() finally: os.chdir(orig_cwd) if wvfailure_count() == initial_failures: subprocess.call(['rm', '-rf', tmpdir])
def index_negative_timestamps(): with no_lingering_errors(): with test_tempdir('bup-tindex-') as tmpdir: # Makes 'foo' exist foopath = tmpdir + '/foo' f = file(foopath, 'wb') f.close() # Dec 31, 1969 os.utime(foopath, (-86400, -86400)) ns_per_sec = 10**9 tmax = (time.time() - 1) * ns_per_sec e = index.BlankNewEntry(foopath, 0, tmax) e.update_from_stat(xstat.stat(foopath), 0) WVPASS(e.packed()) # Jun 10, 1893 os.utime(foopath, (-0x80000000, -0x80000000)) e = index.BlankNewEntry(foopath, 0, tmax) e.update_from_stat(xstat.stat(foopath), 0) WVPASS(e.packed())
def index_writer(): initial_failures = wvfailure_count() tmpdir = tempfile.mkdtemp(dir=bup_tmp, prefix="bup-tindex-") orig_cwd = os.getcwd() try: os.chdir(tmpdir) ds = xstat.stat(".") fs = xstat.stat(lib_t_dir + "/tindex.py") ms = index.MetaStoreWriter("index.meta.tmp") tmax = (time.time() - 1) * 10 ** 9 w = index.Writer("index.tmp", ms, tmax) w.add("/var/tmp/sporky", fs, 0) w.add("/etc/passwd", fs, 0) w.add("/etc/", ds, 0) w.add("/", ds, 0) ms.close() w.close() finally: os.chdir(orig_cwd) if wvfailure_count() == initial_failures: subprocess.call(["rm", "-rf", tmpdir])
def index_negative_timestamps(): # Makes 'foo' exist f = file('foo', 'wb') f.close() # Dec 31, 1969 os.utime("foo", (-86400, -86400)) now = time.time() e = index.BlankNewEntry("foo", now - 1) e.from_stat(xstat.stat("foo"), now) assert len(e.packed()) WVPASS() # Jun 10, 1893 os.utime("foo", (-0x80000000, -0x80000000)) e = index.BlankNewEntry("foo", now - 1) e.from_stat(xstat.stat("foo"), now) assert len(e.packed()) WVPASS() unlink('foo')
def index_negative_timestamps(): # Makes 'foo' exist f = file('foo', 'wb') f.close() # Dec 31, 1969 os.utime("foo", (-86400, -86400)) ns_per_sec = 10**9 tstart = time.time() * ns_per_sec tmax = tstart - ns_per_sec e = index.BlankNewEntry("foo", 0, tmax) e.from_stat(xstat.stat("foo"), 0, tstart) assert len(e.packed()) WVPASS() # Jun 10, 1893 os.utime("foo", (-0x80000000, -0x80000000)) e = index.BlankNewEntry("foo", 0, tmax) e.from_stat(xstat.stat("foo"), 0, tstart) assert len(e.packed()) WVPASS() unlink('foo')
def refresh(self, skip_midx=False): """Refresh the index list. This method verifies if .midx files were superseded (e.g. all of its contents are in another, bigger .midx file) and removes the superseded files. If skip_midx is True, all work on .midx files will be skipped and .midx files will be removed from the list. The instance variable 'ignore_midx' can force this function to always act as if skip_midx was True. """ if self.bloom is not None: self.bloom.close() self.bloom = None # Always reopen the bloom as it may have been relaced self.do_bloom = False skip_midx = skip_midx or self.ignore_midx d = dict((p.name, p) for p in self.packs if not skip_midx or not isinstance(p, midx.PackMidx)) if os.path.exists(self.dir): if not skip_midx: midxl = [] midxes = set(glob.glob(os.path.join(self.dir, b'*.midx'))) # remove any *.midx files from our list that no longer exist for ix in list(d.values()): if not isinstance(ix, midx.PackMidx): continue if ix.name in midxes: continue # remove the midx del d[ix.name] ix.close() self.packs.remove(ix) for ix in self.packs: if isinstance(ix, midx.PackMidx): for name in ix.idxnames: d[os.path.join(self.dir, name)] = ix for full in midxes: if not d.get(full): mx = midx.PackMidx(full) (mxd, mxf) = os.path.split(mx.name) broken = False for n in mx.idxnames: if not os.path.exists(os.path.join(mxd, n)): log(('warning: index %s missing\n' ' used by %s\n') % (path_msg(n), path_msg(mxf))) broken = True if broken: mx.close() del mx unlink(full) else: midxl.append(mx) midxl.sort( key=lambda ix: (-len(ix), -xstat.stat(ix.name).st_mtime)) for ix in midxl: any_needed = False for sub in ix.idxnames: found = d.get(os.path.join(self.dir, sub)) if not found or isinstance(found, PackIdx): # doesn't exist, or exists but not in a midx any_needed = True break if any_needed: d[ix.name] = ix for name in ix.idxnames: d[os.path.join(self.dir, name)] = ix elif not ix.force_keep: debug1('midx: removing redundant: %s\n' % path_msg(os.path.basename(ix.name))) ix.close() unlink(ix.name) for full in glob.glob(os.path.join(self.dir, b'*.idx')): if not d.get(full): try: ix = open_idx(full) except GitError as e: add_error(e) continue d[full] = ix bfull = os.path.join(self.dir, b'bup.bloom') if self.bloom is None and os.path.exists(bfull): self.bloom = bloom.ShaBloom(bfull) self.packs = list(set(d.values())) self.packs.sort(reverse=True, key=lambda x: len(x)) if self.bloom and self.bloom.valid() and len( self.bloom) >= len(self): self.do_bloom = True else: self.bloom = None debug1('PackIdxList: using %d index%s.\n' % (len(self.packs), len(self.packs) != 1 and 'es' or ''))
def refresh(self, skip_midx = False): """Refresh the index list. This method verifies if .midx files were superseded (e.g. all of its contents are in another, bigger .midx file) and removes the superseded files. If skip_midx is True, all work on .midx files will be skipped and .midx files will be removed from the list. The module-global variable 'ignore_midx' can force this function to always act as if skip_midx was True. """ self.bloom = None # Always reopen the bloom as it may have been relaced self.do_bloom = False skip_midx = skip_midx or ignore_midx d = dict((p.name, p) for p in self.packs if not skip_midx or not isinstance(p, midx.PackMidx)) if os.path.exists(self.dir): if not skip_midx: midxl = [] for ix in self.packs: if isinstance(ix, midx.PackMidx): for name in ix.idxnames: d[os.path.join(self.dir, name)] = ix for full in glob.glob(os.path.join(self.dir,'*.midx')): if not d.get(full): mx = midx.PackMidx(full) (mxd, mxf) = os.path.split(mx.name) broken = False for n in mx.idxnames: if not os.path.exists(os.path.join(mxd, n)): log(('warning: index %s missing\n' + ' used by %s\n') % (n, mxf)) broken = True if broken: del mx unlink(full) else: midxl.append(mx) midxl.sort(key=lambda ix: (-len(ix), -xstat.stat(ix.name).st_mtime)) for ix in midxl: any_needed = False for sub in ix.idxnames: found = d.get(os.path.join(self.dir, sub)) if not found or isinstance(found, PackIdx): # doesn't exist, or exists but not in a midx any_needed = True break if any_needed: d[ix.name] = ix for name in ix.idxnames: d[os.path.join(self.dir, name)] = ix elif not ix.force_keep: debug1('midx: removing redundant: %s\n' % os.path.basename(ix.name)) unlink(ix.name) for full in glob.glob(os.path.join(self.dir,'*.idx')): if not d.get(full): try: ix = open_idx(full) except GitError, e: add_error(e) continue d[full] = ix bfull = os.path.join(self.dir, 'bup.bloom') if self.bloom is None and os.path.exists(bfull): self.bloom = bloom.ShaBloom(bfull) self.packs = list(set(d.values())) self.packs.sort(lambda x,y: -cmp(len(x),len(y))) if self.bloom and self.bloom.valid() and len(self.bloom) >= len(self): self.do_bloom = True else: self.bloom = None
def index_dirty(): unlink('index.tmp') unlink('index2.tmp') ds = xstat.stat('.') fs = xstat.stat('tindex.py') tmax = time.time() - 1 w1 = index.Writer('index.tmp', tmax) w1.add('/a/b/x', fs) w1.add('/a/b/c', fs) w1.add('/a/b/', ds) w1.add('/a/', ds) #w1.close() WVPASS() w2 = index.Writer('index2.tmp', tmax) w2.add('/a/b/n/2', fs) #w2.close() WVPASS() w3 = index.Writer('index3.tmp', tmax) w3.add('/a/c/n/3', fs) #w3.close() WVPASS() r1 = w1.new_reader() r2 = w2.new_reader() r3 = w3.new_reader() WVPASS() r1all = [e.name for e in r1] WVPASSEQ(r1all, ['/a/b/x', '/a/b/c', '/a/b/', '/a/', '/']) r2all = [e.name for e in r2] WVPASSEQ(r2all, ['/a/b/n/2', '/a/b/n/', '/a/b/', '/a/', '/']) r3all = [e.name for e in r3] WVPASSEQ(r3all, ['/a/c/n/3', '/a/c/n/', '/a/c/', '/a/', '/']) all = [e.name for e in index.merge(r2, r1, r3)] WVPASSEQ(all, ['/a/c/n/3', '/a/c/n/', '/a/c/', '/a/b/x', '/a/b/n/2', '/a/b/n/', '/a/b/c', '/a/b/', '/a/', '/']) fake_validate(r1) dump(r1) print [hex(e.flags) for e in r1] WVPASSEQ([e.name for e in r1 if e.is_valid()], r1all) WVPASSEQ([e.name for e in r1 if not e.is_valid()], []) WVPASSEQ([e.name for e in index.merge(r2, r1, r3) if not e.is_valid()], ['/a/c/n/3', '/a/c/n/', '/a/c/', '/a/b/n/2', '/a/b/n/', '/a/b/', '/a/', '/']) expect_invalid = ['/'] + r2all + r3all expect_real = (set(r1all) - set(r2all) - set(r3all)) \ | set(['/a/b/n/2', '/a/c/n/3']) dump(index.merge(r2, r1, r3)) for e in index.merge(r2, r1, r3): print e.name, hex(e.flags), e.ctime eiv = e.name in expect_invalid er = e.name in expect_real WVPASSEQ(eiv, not e.is_valid()) WVPASSEQ(er, e.is_real()) fake_validate(r2, r3) dump(index.merge(r2, r1, r3)) WVPASSEQ([e.name for e in index.merge(r2, r1, r3) if not e.is_valid()], []) e = eget(index.merge(r2, r1, r3), '/a/b/c') e.invalidate() e.repack() dump(index.merge(r2, r1, r3)) WVPASSEQ([e.name for e in index.merge(r2, r1, r3) if not e.is_valid()], ['/a/b/c', '/a/b/', '/a/', '/'])
def test_index_dirty(tmpdir): orig_cwd = os.getcwd() try: os.chdir(tmpdir) default_meta = metadata.Metadata() with index.MetaStoreWriter(b'index.meta.tmp') as ms1, \ index.MetaStoreWriter(b'index2.meta.tmp') as ms2, \ index.MetaStoreWriter(b'index3.meta.tmp') as ms3: meta_ofs1 = ms1.store(default_meta) meta_ofs2 = ms2.store(default_meta) meta_ofs3 = ms3.store(default_meta) ds = xstat.stat(lib_t_dir) fs = xstat.stat(lib_t_dir + b'/test_index.py') tmax = (time.time() - 1) * 10**9 with index.Writer(b'index.tmp', ms1, tmax) as w1, \ index.Writer(b'index2.tmp', ms2, tmax) as w2, \ index.Writer(b'index3.tmp', ms3, tmax) as w3: w1.add(b'/a/b/x', fs, meta_ofs1) w1.add(b'/a/b/c', fs, meta_ofs1) w1.add(b'/a/b/', ds, meta_ofs1) w1.add(b'/a/', ds, meta_ofs1) #w1.close() WVPASS() w2.add(b'/a/b/n/2', fs, meta_ofs2) #w2.close() WVPASS() w3.add(b'/a/c/n/3', fs, meta_ofs3) #w3.close() WVPASS() with w1.new_reader() as r1, \ w2.new_reader() as r2, \ w3.new_reader() as r3: WVPASS() r1all = [e.name for e in r1] WVPASSEQ(r1all, [b'/a/b/x', b'/a/b/c', b'/a/b/', b'/a/', b'/']) r2all = [e.name for e in r2] WVPASSEQ(r2all, [b'/a/b/n/2', b'/a/b/n/', b'/a/b/', b'/a/', b'/']) r3all = [e.name for e in r3] WVPASSEQ(r3all, [b'/a/c/n/3', b'/a/c/n/', b'/a/c/', b'/a/', b'/']) all = [e.name for e in index.merge(r2, r1, r3)] WVPASSEQ(all, [ b'/a/c/n/3', b'/a/c/n/', b'/a/c/', b'/a/b/x', b'/a/b/n/2', b'/a/b/n/', b'/a/b/c', b'/a/b/', b'/a/', b'/' ]) fake_validate(r1) dump(r1) print([hex(e.flags) for e in r1]) WVPASSEQ([e.name for e in r1 if e.is_valid()], r1all) WVPASSEQ([e.name for e in r1 if not e.is_valid()], []) WVPASSEQ([ e.name for e in index.merge(r2, r1, r3) if not e.is_valid() ], [ b'/a/c/n/3', b'/a/c/n/', b'/a/c/', b'/a/b/n/2', b'/a/b/n/', b'/a/b/', b'/a/', b'/' ]) expect_invalid = [b'/'] + r2all + r3all expect_real = (set(r1all) - set(r2all) - set(r3all)) \ | set([b'/a/b/n/2', b'/a/c/n/3']) dump(index.merge(r2, r1, r3)) for e in index.merge(r2, r1, r3): print(e.name, hex(e.flags), e.ctime) eiv = e.name in expect_invalid er = e.name in expect_real WVPASSEQ(eiv, not e.is_valid()) WVPASSEQ(er, e.is_real()) fake_validate(r2, r3) dump(index.merge(r2, r1, r3)) WVPASSEQ([ e.name for e in index.merge(r2, r1, r3) if not e.is_valid() ], []) e = eget(index.merge(r2, r1, r3), b'/a/b/c') e.invalidate() e.repack() dump(index.merge(r2, r1, r3)) WVPASSEQ([ e.name for e in index.merge(r2, r1, r3) if not e.is_valid() ], [b'/a/b/c', b'/a/b/', b'/a/', b'/']) finally: os.chdir(orig_cwd)
def index_dirty(): with no_lingering_errors(): with test_tempdir('bup-tindex-') as tmpdir: orig_cwd = os.getcwd() try: os.chdir(tmpdir) default_meta = metadata.Metadata() ms1 = index.MetaStoreWriter('index.meta.tmp') ms2 = index.MetaStoreWriter('index2.meta.tmp') ms3 = index.MetaStoreWriter('index3.meta.tmp') meta_ofs1 = ms1.store(default_meta) meta_ofs2 = ms2.store(default_meta) meta_ofs3 = ms3.store(default_meta) ds = xstat.stat(lib_t_dir) fs = xstat.stat(lib_t_dir + '/tindex.py') tmax = (time.time() - 1) * 10**9 w1 = index.Writer('index.tmp', ms1, tmax) w1.add('/a/b/x', fs, meta_ofs1) w1.add('/a/b/c', fs, meta_ofs1) w1.add('/a/b/', ds, meta_ofs1) w1.add('/a/', ds, meta_ofs1) #w1.close() WVPASS() w2 = index.Writer('index2.tmp', ms2, tmax) w2.add('/a/b/n/2', fs, meta_ofs2) #w2.close() WVPASS() w3 = index.Writer('index3.tmp', ms3, tmax) w3.add('/a/c/n/3', fs, meta_ofs3) #w3.close() WVPASS() r1 = w1.new_reader() r2 = w2.new_reader() r3 = w3.new_reader() WVPASS() r1all = [e.name for e in r1] WVPASSEQ(r1all, ['/a/b/x', '/a/b/c', '/a/b/', '/a/', '/']) r2all = [e.name for e in r2] WVPASSEQ(r2all, ['/a/b/n/2', '/a/b/n/', '/a/b/', '/a/', '/']) r3all = [e.name for e in r3] WVPASSEQ(r3all, ['/a/c/n/3', '/a/c/n/', '/a/c/', '/a/', '/']) all = [e.name for e in index.merge(r2, r1, r3)] WVPASSEQ(all, [ '/a/c/n/3', '/a/c/n/', '/a/c/', '/a/b/x', '/a/b/n/2', '/a/b/n/', '/a/b/c', '/a/b/', '/a/', '/' ]) fake_validate(r1) dump(r1) print[hex(e.flags) for e in r1] WVPASSEQ([e.name for e in r1 if e.is_valid()], r1all) WVPASSEQ([e.name for e in r1 if not e.is_valid()], []) WVPASSEQ([ e.name for e in index.merge(r2, r1, r3) if not e.is_valid() ], [ '/a/c/n/3', '/a/c/n/', '/a/c/', '/a/b/n/2', '/a/b/n/', '/a/b/', '/a/', '/' ]) expect_invalid = ['/'] + r2all + r3all expect_real = (set(r1all) - set(r2all) - set(r3all)) \ | set(['/a/b/n/2', '/a/c/n/3']) dump(index.merge(r2, r1, r3)) for e in index.merge(r2, r1, r3): print e.name, hex(e.flags), e.ctime eiv = e.name in expect_invalid er = e.name in expect_real WVPASSEQ(eiv, not e.is_valid()) WVPASSEQ(er, e.is_real()) fake_validate(r2, r3) dump(index.merge(r2, r1, r3)) WVPASSEQ([ e.name for e in index.merge(r2, r1, r3) if not e.is_valid() ], []) e = eget(index.merge(r2, r1, r3), '/a/b/c') e.invalidate() e.repack() dump(index.merge(r2, r1, r3)) WVPASSEQ([ e.name for e in index.merge(r2, r1, r3) if not e.is_valid() ], ['/a/b/c', '/a/b/', '/a/', '/']) w1.close() w2.close() w3.close() finally: os.chdir(orig_cwd)
def index_dirty(): with no_lingering_errors(): with test_tempdir('bup-tindex-') as tmpdir: orig_cwd = os.getcwd() try: os.chdir(tmpdir) default_meta = metadata.Metadata() ms1 = index.MetaStoreWriter('index.meta.tmp') ms2 = index.MetaStoreWriter('index2.meta.tmp') ms3 = index.MetaStoreWriter('index3.meta.tmp') meta_ofs1 = ms1.store(default_meta) meta_ofs2 = ms2.store(default_meta) meta_ofs3 = ms3.store(default_meta) ds = xstat.stat(lib_t_dir) fs = xstat.stat(lib_t_dir + '/tindex.py') tmax = (time.time() - 1) * 10**9 w1 = index.Writer('index.tmp', ms1, tmax) w1.add('/a/b/x', fs, meta_ofs1) w1.add('/a/b/c', fs, meta_ofs1) w1.add('/a/b/', ds, meta_ofs1) w1.add('/a/', ds, meta_ofs1) #w1.close() WVPASS() w2 = index.Writer('index2.tmp', ms2, tmax) w2.add('/a/b/n/2', fs, meta_ofs2) #w2.close() WVPASS() w3 = index.Writer('index3.tmp', ms3, tmax) w3.add('/a/c/n/3', fs, meta_ofs3) #w3.close() WVPASS() r1 = w1.new_reader() r2 = w2.new_reader() r3 = w3.new_reader() WVPASS() r1all = [e.name for e in r1] WVPASSEQ(r1all, ['/a/b/x', '/a/b/c', '/a/b/', '/a/', '/']) r2all = [e.name for e in r2] WVPASSEQ(r2all, ['/a/b/n/2', '/a/b/n/', '/a/b/', '/a/', '/']) r3all = [e.name for e in r3] WVPASSEQ(r3all, ['/a/c/n/3', '/a/c/n/', '/a/c/', '/a/', '/']) all = [e.name for e in index.merge(r2, r1, r3)] WVPASSEQ(all, ['/a/c/n/3', '/a/c/n/', '/a/c/', '/a/b/x', '/a/b/n/2', '/a/b/n/', '/a/b/c', '/a/b/', '/a/', '/']) fake_validate(r1) dump(r1) print [hex(e.flags) for e in r1] WVPASSEQ([e.name for e in r1 if e.is_valid()], r1all) WVPASSEQ([e.name for e in r1 if not e.is_valid()], []) WVPASSEQ([e.name for e in index.merge(r2, r1, r3) if not e.is_valid()], ['/a/c/n/3', '/a/c/n/', '/a/c/', '/a/b/n/2', '/a/b/n/', '/a/b/', '/a/', '/']) expect_invalid = ['/'] + r2all + r3all expect_real = (set(r1all) - set(r2all) - set(r3all)) \ | set(['/a/b/n/2', '/a/c/n/3']) dump(index.merge(r2, r1, r3)) for e in index.merge(r2, r1, r3): print e.name, hex(e.flags), e.ctime eiv = e.name in expect_invalid er = e.name in expect_real WVPASSEQ(eiv, not e.is_valid()) WVPASSEQ(er, e.is_real()) fake_validate(r2, r3) dump(index.merge(r2, r1, r3)) WVPASSEQ([e.name for e in index.merge(r2, r1, r3) if not e.is_valid()], []) e = eget(index.merge(r2, r1, r3), '/a/b/c') e.invalidate() e.repack() dump(index.merge(r2, r1, r3)) WVPASSEQ([e.name for e in index.merge(r2, r1, r3) if not e.is_valid()], ['/a/b/c', '/a/b/', '/a/', '/']) w1.close() w2.close() w3.close() finally: