示例#1
0
def save_index(tmax):
    print('saving index')

    _hlinks.prepare_save()

    if _ri.exists():
        _ri.save()
        _wi.flush()
        if _wi.count:
            wr = _wi.new_reader()
            mi = index.Writer(indexfile, _msw, tmax)

            for e in index.merge(_ri, wr):
                # FIXME: shouldn't we remove deleted entries eventually?  When?
                mi.add_ixentry(e)

            _ri.close()
            mi.close()
            wr.close()
        _wi.abort()
    else:
        _wi.close()

    _msw.close()
    _hlinks.commit_save()
示例#2
0
文件: tindex.py 项目: Kelimion/bup
def index_dirty():
    unlink('index.meta.tmp')
    unlink('index2.meta.tmp')
    unlink('index3.meta.tmp')
    default_meta = metadata.Metadata()
    ms1 = index.MetaStoreWriter('index.meta.tmp')
    ms2 = index.MetaStoreWriter('index2.meta.tmp')
    ms3 = index.MetaStoreWriter('index3.meta.tmp')
    meta_ofs1 = ms1.store(default_meta)
    meta_ofs2 = ms2.store(default_meta)
    meta_ofs3 = ms3.store(default_meta)
    unlink('index.tmp')
    unlink('index2.tmp')
    unlink('index3.tmp')

    ds = xstat.stat('.')
    fs = xstat.stat('tindex.py')
    tmax = (time.time() - 1) * 10**9
    
    w1 = index.Writer('index.tmp', ms1, tmax)
    w1.add('/a/b/x', fs, meta_ofs1)
    w1.add('/a/b/c', fs, meta_ofs1)
    w1.add('/a/b/', ds, meta_ofs1)
    w1.add('/a/', ds, meta_ofs1)
    #w1.close()
    WVPASS()

    w2 = index.Writer('index2.tmp', ms2, tmax)
    w2.add('/a/b/n/2', fs, meta_ofs2)
    #w2.close()
    WVPASS()

    w3 = index.Writer('index3.tmp', ms3, tmax)
    w3.add('/a/c/n/3', fs, meta_ofs3)
    #w3.close()
    WVPASS()

    r1 = w1.new_reader()
    r2 = w2.new_reader()
    r3 = w3.new_reader()
    WVPASS()

    r1all = [e.name for e in r1]
    WVPASSEQ(r1all,
             ['/a/b/x', '/a/b/c', '/a/b/', '/a/', '/'])
    r2all = [e.name for e in r2]
    WVPASSEQ(r2all,
             ['/a/b/n/2', '/a/b/n/', '/a/b/', '/a/', '/'])
    r3all = [e.name for e in r3]
    WVPASSEQ(r3all,
             ['/a/c/n/3', '/a/c/n/', '/a/c/', '/a/', '/'])
    all = [e.name for e in index.merge(r2, r1, r3)]
    WVPASSEQ(all,
             ['/a/c/n/3', '/a/c/n/', '/a/c/',
              '/a/b/x', '/a/b/n/2', '/a/b/n/', '/a/b/c',
              '/a/b/', '/a/', '/'])
    fake_validate(r1)
    dump(r1)

    print [hex(e.flags) for e in r1]
示例#3
0
文件: tindex.py 项目: abael/bup
def index_dirty():
    initial_failures = wvfailure_count()
    orig_cwd = os.getcwd()
    tmpdir = tempfile.mkdtemp(dir=bup_tmp, prefix='bup-tindex-')
    try:
        os.chdir(tmpdir)
        default_meta = metadata.Metadata()
        ms1 = index.MetaStoreWriter('index.meta.tmp')
        ms2 = index.MetaStoreWriter('index2.meta.tmp')
        ms3 = index.MetaStoreWriter('index3.meta.tmp')
        meta_ofs1 = ms1.store(default_meta)
        meta_ofs2 = ms2.store(default_meta)
        meta_ofs3 = ms3.store(default_meta)

        ds = xstat.stat(lib_t_dir)
        fs = xstat.stat(lib_t_dir + '/tindex.py')
        tmax = (time.time() - 1) * 10**9

        w1 = index.Writer('index.tmp', ms1, tmax)
        w1.add('/a/b/x', fs, meta_ofs1)
        w1.add('/a/b/c', fs, meta_ofs1)
        w1.add('/a/b/', ds, meta_ofs1)
        w1.add('/a/', ds, meta_ofs1)
        #w1.close()
        WVPASS()

        w2 = index.Writer('index2.tmp', ms2, tmax)
        w2.add('/a/b/n/2', fs, meta_ofs2)
        #w2.close()
        WVPASS()

        w3 = index.Writer('index3.tmp', ms3, tmax)
        w3.add('/a/c/n/3', fs, meta_ofs3)
        #w3.close()
        WVPASS()

        r1 = w1.new_reader()
        r2 = w2.new_reader()
        r3 = w3.new_reader()
        WVPASS()

        r1all = [e.name for e in r1]
        WVPASSEQ(r1all,
                 ['/a/b/x', '/a/b/c', '/a/b/', '/a/', '/'])
        r2all = [e.name for e in r2]
        WVPASSEQ(r2all,
                 ['/a/b/n/2', '/a/b/n/', '/a/b/', '/a/', '/'])
        r3all = [e.name for e in r3]
        WVPASSEQ(r3all,
                 ['/a/c/n/3', '/a/c/n/', '/a/c/', '/a/', '/'])
        all = [e.name for e in index.merge(r2, r1, r3)]
        WVPASSEQ(all,
                 ['/a/c/n/3', '/a/c/n/', '/a/c/',
                  '/a/b/x', '/a/b/n/2', '/a/b/n/', '/a/b/c',
                  '/a/b/', '/a/', '/'])
        fake_validate(r1)
        dump(r1)

        print [hex(e.flags) for e in r1]
示例#4
0
def index_dirty():
    unlink('index.tmp')
    unlink('index2.tmp')
    ds = xstat.stat('.')
    fs = xstat.stat('tindex.py')
    tmax = time.time() - 1
    
    w1 = index.Writer('index.tmp', tmax)
    w1.add('/a/b/x', fs)
    w1.add('/a/b/c', fs)
    w1.add('/a/b/', ds)
    w1.add('/a/', ds)
    #w1.close()
    WVPASS()

    w2 = index.Writer('index2.tmp', tmax)
    w2.add('/a/b/n/2', fs)
    #w2.close()
    WVPASS()

    w3 = index.Writer('index3.tmp', tmax)
    w3.add('/a/c/n/3', fs)
    #w3.close()
    WVPASS()

    r1 = w1.new_reader()
    r2 = w2.new_reader()
    r3 = w3.new_reader()
    WVPASS()

    r1all = [e.name for e in r1]
    WVPASSEQ(r1all,
             ['/a/b/x', '/a/b/c', '/a/b/', '/a/', '/'])
    r2all = [e.name for e in r2]
    WVPASSEQ(r2all,
             ['/a/b/n/2', '/a/b/n/', '/a/b/', '/a/', '/'])
    r3all = [e.name for e in r3]
    WVPASSEQ(r3all,
             ['/a/c/n/3', '/a/c/n/', '/a/c/', '/a/', '/'])
    all = [e.name for e in index.merge(r2, r1, r3)]
    WVPASSEQ(all,
             ['/a/c/n/3', '/a/c/n/', '/a/c/',
              '/a/b/x', '/a/b/n/2', '/a/b/n/', '/a/b/c',
              '/a/b/', '/a/', '/'])
    fake_validate(r1)
    dump(r1)

    print [hex(e.flags) for e in r1]
示例#5
0
def update_index(top, excluded_paths, exclude_rxs, xdev_exceptions):
    # tmax and start must be epoch nanoseconds.
    tmax = (time.time() - 1) * 10**9
    ri = index.Reader(indexfile)
    msw = index.MetaStoreWriter(indexfile + '.meta')
    wi = index.Writer(indexfile, msw, tmax)
    rig = IterHelper(ri.iter(name=top))
    tstart = int(time.time()) * 10**9

    hlinks = hlinkdb.HLinkDB(indexfile + '.hlink')

    fake_hash = None
    if opt.fake_valid:

        def fake_hash(name):
            return (GIT_MODE_FILE, index.FAKE_SHA)

    total = 0
    bup_dir = os.path.abspath(git.repo())
    index_start = time.time()
    for path, pst in recursive_dirlist([top],
                                       xdev=opt.xdev,
                                       bup_dir=bup_dir,
                                       excluded_paths=excluded_paths,
                                       exclude_rxs=exclude_rxs,
                                       xdev_exceptions=xdev_exceptions):
        if opt.verbose >= 2 or (opt.verbose == 1
                                and stat.S_ISDIR(pst.st_mode)):
            sys.stdout.write('%s\n' % path)
            sys.stdout.flush()
            elapsed = time.time() - index_start
            paths_per_sec = total / elapsed if elapsed else 0
            qprogress('Indexing: %d (%d paths/s)\r' % (total, paths_per_sec))
        elif not (total % 128):
            elapsed = time.time() - index_start
            paths_per_sec = total / elapsed if elapsed else 0
            qprogress('Indexing: %d (%d paths/s)\r' % (total, paths_per_sec))
        total += 1

        while rig.cur and rig.cur.name > path:  # deleted paths
            if rig.cur.exists():
                rig.cur.set_deleted()
                rig.cur.repack()
                if rig.cur.nlink > 1 and not stat.S_ISDIR(rig.cur.mode):
                    hlinks.del_path(rig.cur.name)
            rig.next()

        if rig.cur and rig.cur.name == path:  # paths that already existed
            need_repack = False
            if (rig.cur.stale(pst, tstart, check_device=opt.check_device)):
                try:
                    meta = metadata.from_path(path, statinfo=pst)
                except (OSError, IOError) as e:
                    add_error(e)
                    rig.next()
                    continue
                if not stat.S_ISDIR(rig.cur.mode) and rig.cur.nlink > 1:
                    hlinks.del_path(rig.cur.name)
                if not stat.S_ISDIR(pst.st_mode) and pst.st_nlink > 1:
                    hlinks.add_path(path, pst.st_dev, pst.st_ino)
                # Clear these so they don't bloat the store -- they're
                # already in the index (since they vary a lot and they're
                # fixed length).  If you've noticed "tmax", you might
                # wonder why it's OK to do this, since that code may
                # adjust (mangle) the index mtime and ctime -- producing
                # fake values which must not end up in a .bupm.  However,
                # it looks like that shouldn't be possible:  (1) When
                # "save" validates the index entry, it always reads the
                # metadata from the filesytem. (2) Metadata is only
                # read/used from the index if hashvalid is true. (3)
                # "faked" entries will be stale(), and so we'll invalidate
                # them below.
                meta.ctime = meta.mtime = meta.atime = 0
                meta_ofs = msw.store(meta)
                rig.cur.update_from_stat(pst, meta_ofs)
                rig.cur.invalidate()
                need_repack = True
            if not (rig.cur.flags & index.IX_HASHVALID):
                if fake_hash:
                    rig.cur.gitmode, rig.cur.sha = fake_hash(path)
                    rig.cur.flags |= index.IX_HASHVALID
                    need_repack = True
            if opt.fake_invalid:
                rig.cur.invalidate()
                need_repack = True
            if need_repack:
                rig.cur.repack()
            rig.next()
        else:  # new paths
            try:
                meta = metadata.from_path(path, statinfo=pst)
            except (OSError, IOError) as e:
                add_error(e)
                continue
            # See same assignment to 0, above, for rationale.
            meta.atime = meta.mtime = meta.ctime = 0
            meta_ofs = msw.store(meta)
            wi.add(path, pst, meta_ofs, hashgen=fake_hash)
            if not stat.S_ISDIR(pst.st_mode) and pst.st_nlink > 1:
                hlinks.add_path(path, pst.st_dev, pst.st_ino)

    elapsed = time.time() - index_start
    paths_per_sec = total / elapsed if elapsed else 0
    progress('Indexing: %d, done (%d paths/s).\n' % (total, paths_per_sec))

    hlinks.prepare_save()

    if ri.exists():
        ri.save()
        wi.flush()
        if wi.count:
            wr = wi.new_reader()
            if opt.check:
                log('check: before merging: oldfile\n')
                check_index(ri)
                log('check: before merging: newfile\n')
                check_index(wr)
            mi = index.Writer(indexfile, msw, tmax)

            for e in index.merge(ri, wr):
                # FIXME: shouldn't we remove deleted entries eventually?  When?
                mi.add_ixentry(e)

            ri.close()
            mi.close()
            wr.close()
        wi.abort()
    else:
        wi.close()

    msw.close()
    hlinks.commit_save()
示例#6
0
文件: index-cmd.py 项目: Samurais/bup
    
    hlinks.prepare_save()

    if ri.exists():
        ri.save()
        wi.flush()
        if wi.count:
            wr = wi.new_reader()
            if opt.check:
                log('check: before merging: oldfile\n')
                check_index(ri)
                log('check: before merging: newfile\n')
                check_index(wr)
            mi = index.Writer(indexfile, msw, tmax)

            for e in index.merge(ri, wr):
                # FIXME: shouldn't we remove deleted entries eventually?  When?
                mi.add_ixentry(e)

            ri.close()
            mi.close()
            wr.close()
        wi.abort()
    else:
        wi.close()

    msw.close()
    hlinks.commit_save()


optspec = """
示例#7
0
文件: index-cmd.py 项目: Wiesel97/bup
def update_index(top, excluded_paths):
    # tmax and start must be epoch nanoseconds.
    tmax = (time.time() - 1) * 10**9
    ri = index.Reader(indexfile)
    msw = index.MetaStoreWriter(indexfile + '.meta')
    wi = index.Writer(indexfile, msw, tmax)
    rig = IterHelper(ri.iter(name=top))
    tstart = int(time.time()) * 10**9

    hlinks = hlinkdb.HLinkDB(indexfile + '.hlink')

    hashgen = None
    if opt.fake_valid:
        def hashgen(name):
            return (GIT_MODE_FILE, index.FAKE_SHA)

    total = 0
    bup_dir = os.path.abspath(git.repo())
    for (path,pst) in drecurse.recursive_dirlist([top], xdev=opt.xdev,
                                                 bup_dir=bup_dir,
                                                 excluded_paths=excluded_paths):
        if opt.verbose>=2 or (opt.verbose==1 and stat.S_ISDIR(pst.st_mode)):
            sys.stdout.write('%s\n' % path)
            sys.stdout.flush()
            qprogress('Indexing: %d\r' % total)
        elif not (total % 128):
            qprogress('Indexing: %d\r' % total)
        total += 1
        while rig.cur and rig.cur.name > path:  # deleted paths
            if rig.cur.exists():
                rig.cur.set_deleted()
                rig.cur.repack()
                if rig.cur.nlink > 1 and not stat.S_ISDIR(rig.cur.mode):
                    hlinks.del_path(rig.cur.name)
            rig.next()
        if rig.cur and rig.cur.name == path:    # paths that already existed
            if not stat.S_ISDIR(rig.cur.mode) and rig.cur.nlink > 1:
                hlinks.del_path(rig.cur.name)
            if not stat.S_ISDIR(pst.st_mode) and pst.st_nlink > 1:
                hlinks.add_path(path, pst.st_dev, pst.st_ino)
            meta = metadata.from_path(path, statinfo=pst)
            # Clear these so they don't bloat the store -- they're
            # already in the index (since they vary a lot and they're
            # fixed length).  If you've noticed "tmax", you might
            # wonder why it's OK to do this, since that code may
            # adjust (mangle) the index mtime and ctime -- producing
            # fake values which must not end up in a .bupm.  However,
            # it looks like that shouldn't be possible:  (1) When
            # "save" validates the index entry, it always reads the
            # metadata from the filesytem. (2) Metadata is only
            # read/used from the index if hashvalid is true. (3) index
            # always invalidates "faked" entries, because "old != new"
            # in from_stat().
            meta.ctime = meta.mtime = meta.atime = 0
            meta_ofs = msw.store(meta)
            rig.cur.from_stat(pst, meta_ofs, tstart)
            if not (rig.cur.flags & index.IX_HASHVALID):
                if hashgen:
                    (rig.cur.gitmode, rig.cur.sha) = hashgen(path)
                    rig.cur.flags |= index.IX_HASHVALID
            if opt.fake_invalid:
                rig.cur.invalidate()
            rig.cur.repack()
            rig.next()
        else:  # new paths
            meta = metadata.from_path(path, statinfo=pst)
            # See same assignment to 0, above, for rationale.
            meta.atime = meta.mtime = meta.ctime = 0
            meta_ofs = msw.store(meta)
            wi.add(path, pst, meta_ofs, hashgen = hashgen)
            if not stat.S_ISDIR(pst.st_mode) and pst.st_nlink > 1:
                hlinks.add_path(path, pst.st_dev, pst.st_ino)

    progress('Indexing: %d, done.\n' % total)
    
    hlinks.prepare_save()

    if ri.exists():
        ri.save()
        wi.flush()
        if wi.count:
            wr = wi.new_reader()
            if opt.check:
                log('check: before merging: oldfile\n')
                check_index(ri)
                log('check: before merging: newfile\n')
                check_index(wr)
            mi = index.Writer(indexfile, msw, tmax)

            for e in index.merge(ri, wr):
                # FIXME: shouldn't we remove deleted entries eventually?  When?
                mi.add_ixentry(e)

            ri.close()
            mi.close()
            wr.close()
        wi.abort()
    else:
        wi.close()

    msw.close()
    hlinks.commit_save()
示例#8
0
def index_dirty():
    unlink('index.tmp')
    unlink('index2.tmp')
    ds = xstat.stat('.')
    fs = xstat.stat('tindex.py')
    tmax = time.time() - 1
    
    w1 = index.Writer('index.tmp', tmax)
    w1.add('/a/b/x', fs)
    w1.add('/a/b/c', fs)
    w1.add('/a/b/', ds)
    w1.add('/a/', ds)
    #w1.close()
    WVPASS()

    w2 = index.Writer('index2.tmp', tmax)
    w2.add('/a/b/n/2', fs)
    #w2.close()
    WVPASS()

    w3 = index.Writer('index3.tmp', tmax)
    w3.add('/a/c/n/3', fs)
    #w3.close()
    WVPASS()

    r1 = w1.new_reader()
    r2 = w2.new_reader()
    r3 = w3.new_reader()
    WVPASS()

    r1all = [e.name for e in r1]
    WVPASSEQ(r1all,
             ['/a/b/x', '/a/b/c', '/a/b/', '/a/', '/'])
    r2all = [e.name for e in r2]
    WVPASSEQ(r2all,
             ['/a/b/n/2', '/a/b/n/', '/a/b/', '/a/', '/'])
    r3all = [e.name for e in r3]
    WVPASSEQ(r3all,
             ['/a/c/n/3', '/a/c/n/', '/a/c/', '/a/', '/'])
    all = [e.name for e in index.merge(r2, r1, r3)]
    WVPASSEQ(all,
             ['/a/c/n/3', '/a/c/n/', '/a/c/',
              '/a/b/x', '/a/b/n/2', '/a/b/n/', '/a/b/c',
              '/a/b/', '/a/', '/'])
    fake_validate(r1)
    dump(r1)

    print [hex(e.flags) for e in r1]
    WVPASSEQ([e.name for e in r1 if e.is_valid()], r1all)
    WVPASSEQ([e.name for e in r1 if not e.is_valid()], [])
    WVPASSEQ([e.name for e in index.merge(r2, r1, r3) if not e.is_valid()],
             ['/a/c/n/3', '/a/c/n/', '/a/c/',
              '/a/b/n/2', '/a/b/n/', '/a/b/', '/a/', '/'])

    expect_invalid = ['/'] + r2all + r3all
    expect_real = (set(r1all) - set(r2all) - set(r3all)) \
                    | set(['/a/b/n/2', '/a/c/n/3'])
    dump(index.merge(r2, r1, r3))
    for e in index.merge(r2, r1, r3):
        print e.name, hex(e.flags), e.ctime
        eiv = e.name in expect_invalid
        er  = e.name in expect_real
        WVPASSEQ(eiv, not e.is_valid())
        WVPASSEQ(er, e.is_real())
    fake_validate(r2, r3)
    dump(index.merge(r2, r1, r3))
    WVPASSEQ([e.name for e in index.merge(r2, r1, r3) if not e.is_valid()], [])
    
    e = eget(index.merge(r2, r1, r3), '/a/b/c')
    e.invalidate()
    e.repack()
    dump(index.merge(r2, r1, r3))
    WVPASSEQ([e.name for e in index.merge(r2, r1, r3) if not e.is_valid()],
             ['/a/b/c', '/a/b/', '/a/', '/'])
示例#9
0
文件: tindex.py 项目: vipup/bup
def index_dirty():
    with no_lingering_errors():
        with test_tempdir('bup-tindex-') as tmpdir:
            orig_cwd = os.getcwd()
            try:
                os.chdir(tmpdir)
                default_meta = metadata.Metadata()
                ms1 = index.MetaStoreWriter('index.meta.tmp')
                ms2 = index.MetaStoreWriter('index2.meta.tmp')
                ms3 = index.MetaStoreWriter('index3.meta.tmp')
                meta_ofs1 = ms1.store(default_meta)
                meta_ofs2 = ms2.store(default_meta)
                meta_ofs3 = ms3.store(default_meta)

                ds = xstat.stat(lib_t_dir)
                fs = xstat.stat(lib_t_dir + '/tindex.py')
                tmax = (time.time() - 1) * 10**9

                w1 = index.Writer('index.tmp', ms1, tmax)
                w1.add('/a/b/x', fs, meta_ofs1)
                w1.add('/a/b/c', fs, meta_ofs1)
                w1.add('/a/b/', ds, meta_ofs1)
                w1.add('/a/', ds, meta_ofs1)
                #w1.close()
                WVPASS()

                w2 = index.Writer('index2.tmp', ms2, tmax)
                w2.add('/a/b/n/2', fs, meta_ofs2)
                #w2.close()
                WVPASS()

                w3 = index.Writer('index3.tmp', ms3, tmax)
                w3.add('/a/c/n/3', fs, meta_ofs3)
                #w3.close()
                WVPASS()

                r1 = w1.new_reader()
                r2 = w2.new_reader()
                r3 = w3.new_reader()
                WVPASS()

                r1all = [e.name for e in r1]
                WVPASSEQ(r1all, ['/a/b/x', '/a/b/c', '/a/b/', '/a/', '/'])
                r2all = [e.name for e in r2]
                WVPASSEQ(r2all, ['/a/b/n/2', '/a/b/n/', '/a/b/', '/a/', '/'])
                r3all = [e.name for e in r3]
                WVPASSEQ(r3all, ['/a/c/n/3', '/a/c/n/', '/a/c/', '/a/', '/'])
                all = [e.name for e in index.merge(r2, r1, r3)]
                WVPASSEQ(all, [
                    '/a/c/n/3', '/a/c/n/', '/a/c/', '/a/b/x', '/a/b/n/2',
                    '/a/b/n/', '/a/b/c', '/a/b/', '/a/', '/'
                ])
                fake_validate(r1)
                dump(r1)

                print[hex(e.flags) for e in r1]
                WVPASSEQ([e.name for e in r1 if e.is_valid()], r1all)
                WVPASSEQ([e.name for e in r1 if not e.is_valid()], [])
                WVPASSEQ([
                    e.name
                    for e in index.merge(r2, r1, r3) if not e.is_valid()
                ], [
                    '/a/c/n/3', '/a/c/n/', '/a/c/', '/a/b/n/2', '/a/b/n/',
                    '/a/b/', '/a/', '/'
                ])

                expect_invalid = ['/'] + r2all + r3all
                expect_real = (set(r1all) - set(r2all) - set(r3all)) \
                                | set(['/a/b/n/2', '/a/c/n/3'])
                dump(index.merge(r2, r1, r3))
                for e in index.merge(r2, r1, r3):
                    print e.name, hex(e.flags), e.ctime
                    eiv = e.name in expect_invalid
                    er = e.name in expect_real
                    WVPASSEQ(eiv, not e.is_valid())
                    WVPASSEQ(er, e.is_real())
                fake_validate(r2, r3)
                dump(index.merge(r2, r1, r3))
                WVPASSEQ([
                    e.name
                    for e in index.merge(r2, r1, r3) if not e.is_valid()
                ], [])

                e = eget(index.merge(r2, r1, r3), '/a/b/c')
                e.invalidate()
                e.repack()
                dump(index.merge(r2, r1, r3))
                WVPASSEQ([
                    e.name
                    for e in index.merge(r2, r1, r3) if not e.is_valid()
                ], ['/a/b/c', '/a/b/', '/a/', '/'])
                w1.close()
                w2.close()
                w3.close()
            finally:
                os.chdir(orig_cwd)
示例#10
0
文件: tindex.py 项目: abael/bup
                 ['/a/b/n/2', '/a/b/n/', '/a/b/', '/a/', '/'])
        r3all = [e.name for e in r3]
        WVPASSEQ(r3all,
                 ['/a/c/n/3', '/a/c/n/', '/a/c/', '/a/', '/'])
        all = [e.name for e in index.merge(r2, r1, r3)]
        WVPASSEQ(all,
                 ['/a/c/n/3', '/a/c/n/', '/a/c/',
                  '/a/b/x', '/a/b/n/2', '/a/b/n/', '/a/b/c',
                  '/a/b/', '/a/', '/'])
        fake_validate(r1)
        dump(r1)

        print [hex(e.flags) for e in r1]
        WVPASSEQ([e.name for e in r1 if e.is_valid()], r1all)
        WVPASSEQ([e.name for e in r1 if not e.is_valid()], [])
        WVPASSEQ([e.name for e in index.merge(r2, r1, r3) if not e.is_valid()],
                 ['/a/c/n/3', '/a/c/n/', '/a/c/',
                  '/a/b/n/2', '/a/b/n/', '/a/b/', '/a/', '/'])

        expect_invalid = ['/'] + r2all + r3all
        expect_real = (set(r1all) - set(r2all) - set(r3all)) \
                        | set(['/a/b/n/2', '/a/c/n/3'])
        dump(index.merge(r2, r1, r3))
        for e in index.merge(r2, r1, r3):
            print e.name, hex(e.flags), e.ctime
            eiv = e.name in expect_invalid
            er  = e.name in expect_real
            WVPASSEQ(eiv, not e.is_valid())
            WVPASSEQ(er, e.is_real())
        fake_validate(r2, r3)
        dump(index.merge(r2, r1, r3))
示例#11
0
def update_index(top, excluded_paths):
    tmax = time.time() - 1
    ri = index.Reader(indexfile)
    wi = index.Writer(indexfile, tmax)
    rig = IterHelper(ri.iter(name=top))
    tstart = int(time.time())

    hashgen = None
    if opt.fake_valid:

        def hashgen(name):
            return (GIT_MODE_FILE, index.FAKE_SHA)

    total = 0
    bup_dir = os.path.abspath(git.repo())
    for (path,
         pst) in drecurse.recursive_dirlist([top],
                                            xdev=opt.xdev,
                                            bup_dir=bup_dir,
                                            excluded_paths=excluded_paths):
        if opt.verbose >= 2 or (opt.verbose == 1
                                and stat.S_ISDIR(pst.st_mode)):
            sys.stdout.write('%s\n' % path)
            sys.stdout.flush()
            qprogress('Indexing: %d\r' % total)
        elif not (total % 128):
            qprogress('Indexing: %d\r' % total)
        total += 1
        while rig.cur and rig.cur.name > path:  # deleted paths
            if rig.cur.exists():
                rig.cur.set_deleted()
                rig.cur.repack()
            rig.next()
        if rig.cur and rig.cur.name == path:  # paths that already existed
            if pst:
                rig.cur.from_stat(pst, tstart)
            if not (rig.cur.flags & index.IX_HASHVALID):
                if hashgen:
                    (rig.cur.gitmode, rig.cur.sha) = hashgen(path)
                    rig.cur.flags |= index.IX_HASHVALID
            if opt.fake_invalid:
                rig.cur.invalidate()
            rig.cur.repack()
            rig.next()
        else:  # new paths
            wi.add(path, pst, hashgen=hashgen)
    progress('Indexing: %d, done.\n' % total)

    if ri.exists():
        ri.save()
        wi.flush()
        if wi.count:
            wr = wi.new_reader()
            if opt.check:
                log('check: before merging: oldfile\n')
                check_index(ri)
                log('check: before merging: newfile\n')
                check_index(wr)
            mi = index.Writer(indexfile, tmax)

            for e in index.merge(ri, wr):
                # FIXME: shouldn't we remove deleted entries eventually?  When?
                mi.add_ixentry(e)

            ri.close()
            mi.close()
            wr.close()
        wi.abort()
    else:
        wi.close()
示例#12
0
def update_index(top, excluded_paths):
    tmax = time.time() - 1
    ri = index.Reader(indexfile)
    wi = index.Writer(indexfile, tmax)
    rig = IterHelper(ri.iter(name=top))
    tstart = int(time.time())

    hashgen = None
    if opt.fake_valid:
        def hashgen(name):
            return (GIT_MODE_FILE, index.FAKE_SHA)

    total = 0
    bup_dir = os.path.abspath(git.repo())
    for (path,pst) in drecurse.recursive_dirlist([top], xdev=opt.xdev,
                                                 bup_dir=bup_dir,
                                                 excluded_paths=excluded_paths):
        if opt.verbose>=2 or (opt.verbose==1 and stat.S_ISDIR(pst.st_mode)):
            sys.stdout.write('%s\n' % path)
            sys.stdout.flush()
            qprogress('Indexing: %d\r' % total)
        elif not (total % 128):
            qprogress('Indexing: %d\r' % total)
        total += 1
        while rig.cur and rig.cur.name > path:  # deleted paths
            if rig.cur.exists():
                rig.cur.set_deleted()
                rig.cur.repack()
            rig.next()
        if rig.cur and rig.cur.name == path:    # paths that already existed
            if pst:
                rig.cur.from_stat(pst, tstart)
            if not (rig.cur.flags & index.IX_HASHVALID):
                if hashgen:
                    (rig.cur.gitmode, rig.cur.sha) = hashgen(path)
                    rig.cur.flags |= index.IX_HASHVALID
            if opt.fake_invalid:
                rig.cur.invalidate()
            rig.cur.repack()
            rig.next()
        else:  # new paths
            wi.add(path, pst, hashgen = hashgen)
    progress('Indexing: %d, done.\n' % total)
    
    if ri.exists():
        ri.save()
        wi.flush()
        if wi.count:
            wr = wi.new_reader()
            if opt.check:
                log('check: before merging: oldfile\n')
                check_index(ri)
                log('check: before merging: newfile\n')
                check_index(wr)
            mi = index.Writer(indexfile, tmax)

            for e in index.merge(ri, wr):
                # FIXME: shouldn't we remove deleted entries eventually?  When?
                mi.add_ixentry(e)

            ri.close()
            mi.close()
            wr.close()
        wi.abort()
    else:
        wi.close()
示例#13
0
文件: tindex.py 项目: xx4h/bup
def index_dirty():
    with no_lingering_errors():
        with test_tempdir('bup-tindex-') as tmpdir:
            orig_cwd = os.getcwd()
            try:
                os.chdir(tmpdir)
                default_meta = metadata.Metadata()
                ms1 = index.MetaStoreWriter('index.meta.tmp')
                ms2 = index.MetaStoreWriter('index2.meta.tmp')
                ms3 = index.MetaStoreWriter('index3.meta.tmp')
                meta_ofs1 = ms1.store(default_meta)
                meta_ofs2 = ms2.store(default_meta)
                meta_ofs3 = ms3.store(default_meta)

                ds = xstat.stat(lib_t_dir)
                fs = xstat.stat(lib_t_dir + '/tindex.py')
                tmax = (time.time() - 1) * 10**9

                w1 = index.Writer('index.tmp', ms1, tmax)
                w1.add('/a/b/x', fs, meta_ofs1)
                w1.add('/a/b/c', fs, meta_ofs1)
                w1.add('/a/b/', ds, meta_ofs1)
                w1.add('/a/', ds, meta_ofs1)
                #w1.close()
                WVPASS()

                w2 = index.Writer('index2.tmp', ms2, tmax)
                w2.add('/a/b/n/2', fs, meta_ofs2)
                #w2.close()
                WVPASS()

                w3 = index.Writer('index3.tmp', ms3, tmax)
                w3.add('/a/c/n/3', fs, meta_ofs3)
                #w3.close()
                WVPASS()

                r1 = w1.new_reader()
                r2 = w2.new_reader()
                r3 = w3.new_reader()
                WVPASS()

                r1all = [e.name for e in r1]
                WVPASSEQ(r1all,
                         ['/a/b/x', '/a/b/c', '/a/b/', '/a/', '/'])
                r2all = [e.name for e in r2]
                WVPASSEQ(r2all,
                         ['/a/b/n/2', '/a/b/n/', '/a/b/', '/a/', '/'])
                r3all = [e.name for e in r3]
                WVPASSEQ(r3all,
                         ['/a/c/n/3', '/a/c/n/', '/a/c/', '/a/', '/'])
                all = [e.name for e in index.merge(r2, r1, r3)]
                WVPASSEQ(all,
                         ['/a/c/n/3', '/a/c/n/', '/a/c/',
                          '/a/b/x', '/a/b/n/2', '/a/b/n/', '/a/b/c',
                          '/a/b/', '/a/', '/'])
                fake_validate(r1)
                dump(r1)

                print [hex(e.flags) for e in r1]
                WVPASSEQ([e.name for e in r1 if e.is_valid()], r1all)
                WVPASSEQ([e.name for e in r1 if not e.is_valid()], [])
                WVPASSEQ([e.name for e in index.merge(r2, r1, r3) if not e.is_valid()],
                         ['/a/c/n/3', '/a/c/n/', '/a/c/',
                          '/a/b/n/2', '/a/b/n/', '/a/b/', '/a/', '/'])

                expect_invalid = ['/'] + r2all + r3all
                expect_real = (set(r1all) - set(r2all) - set(r3all)) \
                                | set(['/a/b/n/2', '/a/c/n/3'])
                dump(index.merge(r2, r1, r3))
                for e in index.merge(r2, r1, r3):
                    print e.name, hex(e.flags), e.ctime
                    eiv = e.name in expect_invalid
                    er  = e.name in expect_real
                    WVPASSEQ(eiv, not e.is_valid())
                    WVPASSEQ(er, e.is_real())
                fake_validate(r2, r3)
                dump(index.merge(r2, r1, r3))
                WVPASSEQ([e.name for e in index.merge(r2, r1, r3) if not e.is_valid()], [])

                e = eget(index.merge(r2, r1, r3), '/a/b/c')
                e.invalidate()
                e.repack()
                dump(index.merge(r2, r1, r3))
                WVPASSEQ([e.name for e in index.merge(r2, r1, r3) if not e.is_valid()],
                         ['/a/b/c', '/a/b/', '/a/', '/'])
                w1.close()
                w2.close()
                w3.close()
            finally:
示例#14
0
    hlinks.prepare_save()

    if ri.exists():
        ri.save()
        wi.flush()
        if wi.count:
            wr = wi.new_reader()
            if opt.check:
                log('check: before merging: oldfile\n')
                check_index(ri)
                log('check: before merging: newfile\n')
                check_index(wr)
            mi = index.Writer(indexfile, msw, tmax)

            for e in index.merge(ri, wr):
                # FIXME: shouldn't we remove deleted entries eventually?  When?
                mi.add_ixentry(e)

            ri.close()
            mi.close()
            wr.close()
        wi.abort()
    else:
        wi.close()

    msw.close()
    hlinks.commit_save()


optspec = """
示例#15
0
文件: test_index.py 项目: fakegit/bup
def test_index_dirty(tmpdir):
    orig_cwd = os.getcwd()
    try:
        os.chdir(tmpdir)
        default_meta = metadata.Metadata()

        with index.MetaStoreWriter(b'index.meta.tmp') as ms1, \
             index.MetaStoreWriter(b'index2.meta.tmp') as ms2, \
             index.MetaStoreWriter(b'index3.meta.tmp') as ms3:

            meta_ofs1 = ms1.store(default_meta)
            meta_ofs2 = ms2.store(default_meta)
            meta_ofs3 = ms3.store(default_meta)

            ds = xstat.stat(lib_t_dir)
            fs = xstat.stat(lib_t_dir + b'/test_index.py')
            tmax = (time.time() - 1) * 10**9

            with index.Writer(b'index.tmp', ms1, tmax) as w1, \
                 index.Writer(b'index2.tmp', ms2, tmax) as w2, \
                 index.Writer(b'index3.tmp', ms3, tmax) as w3:

                w1.add(b'/a/b/x', fs, meta_ofs1)
                w1.add(b'/a/b/c', fs, meta_ofs1)
                w1.add(b'/a/b/', ds, meta_ofs1)
                w1.add(b'/a/', ds, meta_ofs1)
                #w1.close()
                WVPASS()

                w2.add(b'/a/b/n/2', fs, meta_ofs2)
                #w2.close()
                WVPASS()

                w3.add(b'/a/c/n/3', fs, meta_ofs3)
                #w3.close()
                WVPASS()

                with w1.new_reader() as r1, \
                     w2.new_reader() as r2, \
                     w3.new_reader() as r3:
                    WVPASS()

                    r1all = [e.name for e in r1]
                    WVPASSEQ(r1all,
                             [b'/a/b/x', b'/a/b/c', b'/a/b/', b'/a/', b'/'])
                    r2all = [e.name for e in r2]
                    WVPASSEQ(r2all,
                             [b'/a/b/n/2', b'/a/b/n/', b'/a/b/', b'/a/', b'/'])
                    r3all = [e.name for e in r3]
                    WVPASSEQ(r3all,
                             [b'/a/c/n/3', b'/a/c/n/', b'/a/c/', b'/a/', b'/'])
                    all = [e.name for e in index.merge(r2, r1, r3)]
                    WVPASSEQ(all, [
                        b'/a/c/n/3', b'/a/c/n/', b'/a/c/', b'/a/b/x',
                        b'/a/b/n/2', b'/a/b/n/', b'/a/b/c', b'/a/b/', b'/a/',
                        b'/'
                    ])
                    fake_validate(r1)
                    dump(r1)

                    print([hex(e.flags) for e in r1])
                    WVPASSEQ([e.name for e in r1 if e.is_valid()], r1all)
                    WVPASSEQ([e.name for e in r1 if not e.is_valid()], [])
                    WVPASSEQ([
                        e.name
                        for e in index.merge(r2, r1, r3) if not e.is_valid()
                    ], [
                        b'/a/c/n/3', b'/a/c/n/', b'/a/c/', b'/a/b/n/2',
                        b'/a/b/n/', b'/a/b/', b'/a/', b'/'
                    ])

                    expect_invalid = [b'/'] + r2all + r3all
                    expect_real = (set(r1all) - set(r2all) - set(r3all)) \
                                    | set([b'/a/b/n/2', b'/a/c/n/3'])
                    dump(index.merge(r2, r1, r3))
                    for e in index.merge(r2, r1, r3):
                        print(e.name, hex(e.flags), e.ctime)
                        eiv = e.name in expect_invalid
                        er = e.name in expect_real
                        WVPASSEQ(eiv, not e.is_valid())
                        WVPASSEQ(er, e.is_real())
                    fake_validate(r2, r3)
                    dump(index.merge(r2, r1, r3))
                    WVPASSEQ([
                        e.name
                        for e in index.merge(r2, r1, r3) if not e.is_valid()
                    ], [])

                    e = eget(index.merge(r2, r1, r3), b'/a/b/c')
                    e.invalidate()
                    e.repack()
                    dump(index.merge(r2, r1, r3))
                    WVPASSEQ([
                        e.name
                        for e in index.merge(r2, r1, r3) if not e.is_valid()
                    ], [b'/a/b/c', b'/a/b/', b'/a/', b'/'])
    finally:
        os.chdir(orig_cwd)