Example #1
0
def simplify_precursors(co, handle, changedbs, pres, txn):
    # map big DAG precursors to little DAG
    dagdb = changedbs.dagdb
    precursors, indices = [], []
    for i in xrange(len(pres)):
        last = handle_last_modified(co, changedbs, handle, pres[i], txn)
        if last is None:
            continue

        # XXX: this is correct, but breaks old history
        if 0:
            pinfo = bdecode(dagdb.get(handle + last, txn=txn))
            if not pinfo.has_key('handle') and len(pinfo['precursors']) == 1:
                last = handle_last_modified(co, changedbs, handle,
                                            pinfo['precursors'][0][0], txn)
                if last is None:
                    continue

        precursors.append(last)
        indices.append(i)

    # second pass to eliminate precursors which are ancestors of others
    retval = []
    for i in xrange(len(precursors)):
        pre = precursors.pop(0)
        index = indices.pop(0)
        if _is_ancestor(co, pre, precursors, txn):
            continue
        precursors.append(pre)
        retval.append((pre, index))

    return retval, len(pres)
Example #2
0
def sync_history(co, point, txn, cache=dict()):
    named, modified = [], []

    points = [point]
    while len(points):
        npoint = points[-1]

        if cache.has_key(npoint):
            pinfo = cache[npoint]
        else:
            pinfo = bdecode(co.lcrepo.get(npoint, txn=txn))
            cache[npoint] = pinfo

        pres = []
        for pre in pinfo['precursors']:
            if co.changesdb.has_key(binascii.hexlify(pre), txn):
                continue
            pres.append(pre)
        pres.reverse()

        points.extend(pres)
        if pres != []:
            continue

        npoint = points.pop()

        if co.changesdb.has_key(binascii.hexlify(npoint), txn):
            continue

        named, modified = _sync_history(co, npoint, txn, cache=cache)
        co.changesdb.put(binascii.hexlify(npoint), '', txn=txn)

    return named, modified
Example #3
0
def pretty_print_dag(co, handle, heads):
    print 'digraph {'

    points = []
    for point in heads:
        head = handle_last_modified(co, co.contents, handle, point, None)
        if head is not None:
            points.append(head)

    cdagdb = co.contents.dagdb
    cache = {}
    while len(points):
        point = points.pop()
        if cache.has_key(point):
            continue
        cache[point] = 1

        sid = short_id(co, point)
        print 'c_%s [label="%s"]' % (sid, sid)

        info = bdecode(cdagdb.get(handle + point))
        for pre, foo in info['precursors']:
            print 'c_%s -> c_%s' % (sid, short_id(co, pre))

        points.extend([point for point, index in info['precursors']])

    print '}'
Example #4
0
def upgrade(old_repo, new_repo, changes, txn):
    UR = UpgradeRepository(old_repo, new_repo, txn)

    for old_handle in old_repo.staticdb.keys():
        hinfo = bdecode(old_repo.staticdb.get(old_handle))
        if hinfo['type'] == 'file':
            UR.all_old_handles[old_handle] = hinfo

    # sort the history
    ordering = UR.sort_history(changes)

    # sort again for better dag construction
    ordering.reverse()
    ordering = UR.sort_history(ordering)

    assert rootnode == ordering[0]

    print "%d changesets to convert" % (len(ordering), )

    for point in ordering:
        new_point = convert_cset(UR, point)

        stdout.write('.')
        stdout.flush()

    return UR
Example #5
0
def pretty_print_big_dag(co, heads):
    print 'digraph {'

    points = heads[:]
    cache = {}
    while len(points):
        point = points.pop()
        if cache.has_key(point):
            continue
        cache[point] = 1

        info = bdecode(co.lcrepo.get(point))
        sid = short_id(co, point)

        color = "orange"
        style = "dashed"
        if clean_merge_point(info) or point == rootnode:
            color = "lightblue"
            style = "solid"

        label = sid
        if point == rootnode:
            label = "root"

        print 'c_%s [label="%s",style=filled,color=%s]' % (sid, label, color)

        for pre in info['precursors']:
            print 'c_%s -> c_%s [style=%s]' % (short_id(co, pre), sid, style)
            style = "dashed"

        points.extend([point for point in info['precursors']])

    print '}'
Example #6
0
    def _history_deps(node, args):
        co = args[0]

        cset = bdecode(co.lcrepo.get(node))
        cset['precursors'].reverse()

        return cset['precursors']
Example #7
0
    def _history_deps(node, args):
        co = args[0]

        cset = bdecode(co.lcrepo.get(node))
        cset['precursors'].reverse()

        return cset['precursors']
Example #8
0
def upgrade(old_repo, new_repo, changes, txn):
    UR = UpgradeRepository(old_repo, new_repo, txn)

    for old_handle in old_repo.staticdb.keys():
        hinfo = bdecode(old_repo.staticdb.get(old_handle))
        if hinfo['type'] == 'file':
            UR.all_old_handles[old_handle] = hinfo

    # sort the history
    ordering = UR.sort_history(changes)

    # sort again for better dag construction
    ordering.reverse()
    ordering = UR.sort_history(ordering)

    assert rootnode == ordering[0]

    print "%d changesets to convert" % (len(ordering), )

    for point in ordering:
        new_point = convert_cset(UR, point)

        stdout.write('.')
        stdout.flush()

    return UR
Example #9
0
def write_index(co, point, handle, index, txn):
    cdagdb = co.contents.dagdb
    try:
        old_index = bdecode(cdagdb.get(handle + point, txn=txn))
        old_index['handle'] = index
    except (db.DBNotFoundError, TypeError):
        old_index = {'handle': index}
    cdagdb.put(handle + point, bencode(old_index), txn=txn)
Example #10
0
def handles_in_branch(co, lpoints, bpoints, txn, cache=None, deleted_modified=False):
    points = bpoints[:]
    seen, named, modified, deleted = {}, {}, {}, {}
    while len(points):
        pnext = points.pop()
        if seen.has_key(pnext):
            continue
        seen[pnext] = 1
        if _is_ancestor(co, pnext, lpoints, txn):
            continue
        if cache:
            if cache.has_key(pnext):
                pinfo = cache[pnext]
            else:
                pinfo = bdecode(co.lcrepo.get(pnext, txn=txn))
                cache[pnext] = pinfo
        else:
            pinfo = bdecode(co.lcrepo.get(pnext, txn=txn))
        for handle, hinfo in pinfo['handles'].items():
            if hinfo.has_key('name'):
                named[handle] = 1
            if hinfo.has_key('hash'):
                modified[handle] = 1
            if hinfo.has_key('delete'):
                named[handle] = 1
                if not deleted_modified:
                    deleted[handle] = 1

        # XXX: afaik, this is only an issue for ambiguous clean merges, which
        # don't happen with name operations. requires more thought.
        if co.contents.mergedb.has_key(pnext, txn):
            clean_merges = bdecode(co.contents.mergedb.get(pnext, txn=txn))
            for handle in clean_merges:
                modified[handle] = 1
            # XXX: check for deletes?

        points.extend(pinfo['precursors'])
    for handle in deleted.keys():
        if modified.has_key(handle):
            del modified[handle]
    return (named.keys(), modified.keys())
Example #11
0
def db_get(co, cdb, key, txn):
    try:
        cache = co.db_cache[db]
    except KeyError:
        cache = co.db_cache[db] = {}
    if cache.has_key(key):
        return cache[key]
    cache[key] = bdecode(cdb.get(key, txn=txn))
    #try:
    #    return cache[key]
    #except KeyError:
    #    cache[key] = bdecode(cdb.get(key, txn=txn))
    return cache[key]
Example #12
0
def dump_changeinfo(co, change, repo=None):
    output = []

    changeset = bdecode(co.lcrepo.get(change))
    if not clean_merge_point(changeset):
        raise ValueError

    output.append("<root>")
    if repo:
        output.append("<repository>" + saxutils.escape(repo) + "</repository>")
    output.append("<changenumber>" + binascii.hexlify(change) + "</changenumber>")
    output.append("<short-changenumber>" + short_id(co, change) + "</short-changenumber>")
    output.append("<committer>" + saxutils.escape(changeset['user']) + "</committer>")
    output.append("<date>" + str(changeset['time']) + "</date>")

    for change in changes_in_branch(co, [changeset['precursors'][0]],
                                    changeset['precursors'][1:], None):
        changeset = bdecode(co.lcrepo.get(change))
        if not clean_merge_point(changeset):
            output.extend(print_change(co, change, changeset))

    output.append("</root>\n")
    return '\n'.join(output)
Example #13
0
def changes_in_branch(co, lpoints, bpoints, txn, cache=None):
    points = bpoints[:]
    seen, changes = {}, []
    while len(points):
        pnext = points.pop()
        if seen.has_key(pnext):
            continue
        seen[pnext] = 1
        if _is_ancestor(co, pnext, lpoints, txn):
            continue
        if cache:
            if cache.has_key(pnext):
                pinfo = cache[pnext]
            else:
                pinfo = bdecode(co.lcrepo.get(pnext, txn=txn))
                cache[pnext] = pinfo
        else:
            pinfo = bdecode(co.lcrepo.get(pnext, txn=txn))

        points.extend(pinfo['precursors'])

        changes.append(pnext)

    return changes
Example #14
0
def _update_helper_content(co, handle, point, hinfo, txn):
    oinfo = {}
    if hinfo.has_key('hash'):
        if co.contents.dagdb.has_key(handle + point, txn):
            dinfo = bdecode(co.contents.dagdb.get(handle + point, txn=txn))
            oinfo['offset'] = dinfo['handle']['offset']
            oinfo['length'] = dinfo['handle']['length']
        else:
            # XXX: ugly in general
            oinfo['offset'] = -1
    if hinfo.has_key('add'):
        oinfo['add'] = 1
    elif hinfo.has_key('delete'):
        oinfo['delete'] = 1
    return oinfo
Example #15
0
    def clean_merges(self, UR, dagdb, point):
        clean_merges = {}
        handles      = []
        for handle in UR.all_old_handles.keys():
            if not dagdb.has_key(handle + point):
                continue

            hinfo = bdecode(dagdb.get(handle + point))
            if hinfo.has_key('handle'):
                continue

            if len(hinfo['precursors']) <= 1:
                continue

            clean_merges[handle] = 1
            handles.append(handle)

        return clean_merges, handles
Example #16
0
def _mini_dag_refcount(co, handle, point, txn, cache=None, info_cache=None):
    assert info_cache is not None
    if cache is None:
        cache = {}
    points = [point]
    while len(points):
        point = points.pop()
        if cache.has_key(point):
            cache[point]['refcount'] += 1
            continue
        cache[point] = {'refcount': 1}
        
        pinfo = bdecode(co.contents.dagdb.get(handle + point, txn=txn))
        info_cache[point] = pinfo

        for p, i in pinfo['precursors']:
            points.append(p)
    return cache
Example #17
0
    def clean_merges(self, UR, dagdb, point):
        clean_merges = {}
        handles = []
        for handle in UR.all_old_handles.keys():
            if not dagdb.has_key(handle + point):
                continue

            hinfo = bdecode(dagdb.get(handle + point))
            if hinfo.has_key('handle'):
                continue

            if len(hinfo['precursors']) <= 1:
                continue

            clean_merges[handle] = 1
            handles.append(handle)

        return clean_merges, handles
Example #18
0
def rebuild_from_points(co, points, txn):
    co.changesdb.truncate(txn)
    co.branchdb.truncate(txn)
    co.branchmapdb.truncate(txn)
    co.names.indexdb.truncate(txn)
    co.names.dagdb.truncate(txn)
    co.names.mergedb.truncate(txn)
    co.contents.indexdb.truncate(txn)
    co.contents.mergedb.truncate(txn)
    # we don't truncate the cdagdb because it contains the offsets and lengths
    # for the diffs in the files, which we can't recreate. the sync below will
    # read those parameters out and rewrite the cdagdb, anyway.
    co.linforepo.put('branchmax', bencode(0), txn=txn)

    cdagdb = co.contents.dagdb
    for key, value in cdagdb.items(txn):
        if len(key) != 40:
            continue
        if not bdecode(value).has_key('handle'):
            cdagdb.delete(key, txn=txn)

    for point in points:
        sync_history(co, point, txn)
Example #19
0
def convert_cset(UR, point):
    indices = {}

    old_cset = bdecode(UR.old_repo.lcrepo.get(point))

    new_cset = {}
    new_cset['precursors'] = [
        UR.point_map[pre] for pre in old_cset['precursors']
    ]

    if old_cset.has_key('time'):
        new_cset['time'] = old_cset['time']

    if old_cset.has_key('user'):
        new_cset['user'] = old_cset['user']

    # some heuristics for comments and whether this was a server change
    clean_merge = True
    force_new_cset = False

    if old_cset.has_key('comment'):
        clean_merge = False
        new_cset['comment'] = old_cset['comment'].rstrip()
        if len(new_cset['comment']):
            new_cset['comment'] = new_cset['comment'] + '\n'

    elif point == rootnode:
        pass

    elif old_cset['handles'] != {} or len(old_cset['precursors']) != 2:
        clean_merge = False
        new_cset['comment'] = '--- comment inserted by cdvupgrade ---\n'

    # sort the handles
    handle_list = UR.sort_names(old_cset['handles'])

    # find implicit clean content merges
    clean_merges, hl = UR.clean_merges(UR, UR.old_repo.contents.dagdb, point)
    handle_list.extend(hl)

    # find implicit clean name merges
    clean_nmerges, hl = UR.clean_merges(UR, UR.old_repo.names.dagdb, point)
    handle_list.extend(hl)

    new_cset['handles'] = handles = {}
    for old_handle in handle_list:
        old_hinfo = None
        try:
            old_hinfo = old_cset['handles'][old_handle]
        except KeyError:
            old_hinfo = {}

        # not much has changed
        new_hinfo = copy.copy(old_hinfo)

        new_handle = None
        if UR.handle_map.has_key(old_handle):
            new_handle = UR.handle_map[old_handle]

        # make name changes explicit
        if clean_nmerges.has_key(old_handle):
            name = old_handle_name_at_point(UR.old_repo, old_handle, point,
                                            None)
            new_hinfo['parent'] = name['parent']
            new_hinfo['name'] = name['name']

        # fixup the parent pointers
        if old_hinfo.has_key('parent'):
            new_hinfo['parent'] = UR.handle_map[old_hinfo['parent']]

        if old_hinfo.has_key('hash') or clean_merges.has_key(old_handle):
            # figure out what the file is supposed to look like now
            lines = old_handle_contents_at_point(UR.old_repo, old_handle,
                                                 point, None)['lines']

            # if the file is being added, there are no precursors
            precursors = []
            if new_handle is not None and not old_hinfo.has_key('add'):
                precursors = new_cset['precursors']

            # generate the diff against the new repo
            dinfo = gen_diff(UR.new_repo, new_handle, precursors, lines,
                             UR.txn)
            if old_hinfo.has_key('add'):
                dinfo['add'] = 1
                assert dinfo['matches'] == []

            if dinfo is not None:
                diff = bencode(dinfo)
                new_hinfo['hash'] = sha.new(diff).digest()

                # if this used to be a clean merge, we have to replace it
                if not old_cset.has_key(old_handle) or not old_cset[
                        old_handle].has_key('hash'):
                    force_new_cset = True

            elif new_hinfo.has_key('hash'):
                del new_hinfo['hash']

            # sanity check
            if new_handle is None:
                assert old_hinfo.has_key('add')
                assert old_hinfo['add']['type'] == 'file'

            # if the file is new, we have to create the handle before writing
            # the diff
            if old_hinfo.has_key('add'):
                nhandle = create_handle(new_cset['precursors'], new_hinfo)
                assert new_handle is None or new_handle == nhandle
                new_handle = nhandle
                UR.handle_map[old_handle] = new_handle

            # write out the new diff
            if new_hinfo.has_key('hash'):
                zdiff = zlib.compress(diff, 6)
                indices[new_handle] = write_diff(UR.new_repo, new_handle,
                                                 zdiff, UR.txn)

        elif old_hinfo.has_key('add'):
            assert old_hinfo['add']['type'] == 'dir'

            nhandle = create_handle(new_cset['precursors'], new_hinfo)
            assert new_handle is None or new_handle == nhandle
            new_handle = nhandle
            UR.handle_map[old_handle] = new_handle

        if new_hinfo != {}:
            handles[new_handle] = new_hinfo

    # if it used to be a clean merge, preserve the line of clean merge heads
    index_point = None
    if clean_merge and force_new_cset:
        forced_cset = new_cset

        forced_cset['comment'] = '--- change created by cdvupgrade ---\n'

        bforced_cset = bencode(forced_cset)
        forced_point = sha.new(bforced_cset).digest()
        UR.new_repo.lcrepo.put(forced_point, bforced_cset, txn=UR.txn)

        index_point = forced_point

        new_cset = {
            'precursors': [forced_cset['precursors'][0], forced_point],
            'user': forced_cset['user'],
            'time': forced_cset['time'],
            'handles': {}
        }

    # calculate the new point name and write it out
    bnew_cset = bencode(new_cset)
    new_point = sha.new(bnew_cset).digest()
    UR.new_repo.lcrepo.put(new_point, bnew_cset, txn=UR.txn)

    UR.point_map[point] = new_point

    if index_point is None:
        index_point = new_point

    # now that we know the new point name, write out the indices
    for new_handle, index in indices.items():
        write_index(UR.new_repo, index_point, new_handle, index, UR.txn)

    # diff generation depends on history syncing
    named, modified = sync_history(UR.new_repo, new_point, UR.txn)

    for new_handle in modified:
        handle_contents_at_point(UR.new_repo, new_handle, new_point, UR.txn)

    return new_point
Example #20
0
def handle_contents_at_point(co, handle, point, txn, dcache=None):
    if dcache is None:
        dcache = {}
    #staticinfo = bdecode(co.staticdb.get(handle, txn=txn))
    staticinfo = db_get(co, co.staticdb, handle, txn)
    if staticinfo['type'] != 'file':
        raise ValueError, 'no contents for non-file'

    change = handle_last_modified(co, co.contents, handle, point, txn)
    if change is None:
        return None

    hcache = {}
    cache = _mini_dag_refcount(co, handle, change, txn, info_cache=hcache)
    hfile = open(path.join(co.cpath, binascii.hexlify(handle)), 'rb')

    points = [change]
    while len(points):
        point = points[-1]

        # we may have already done this one
        if cache.has_key(point) and cache[point].has_key('info'):
            points.pop()
            continue

        # cache this, since we visit most nodes twice
        if hcache.has_key(point):
            hinfo = hcache[point]
        else:
            hinfo = bdecode(co.contents.dagdb.get(handle + point, txn=txn))
            hcache[point] = hinfo

        # check if we've got the precursors
        dirty = False
        for pre, foo in hinfo['precursors']:
            if not cache[pre].has_key('info'):
                dirty = True
                points.append(pre)
        if dirty:
            continue
        points.pop()

        # read the diff
        if dcache.has_key(point):
            diff = dcache[point]
        else:
            diff = _read_diff(hinfo, hfile)
        if diff is not None:
            diff = bdecode(zlib.decompress(diff))

        # put together the precursor list and decrement refcounts
        precursors = []
        for pre, foo in hinfo['precursors']:
            precursors.append(cache[pre]['info'])

            cache[pre]['refcount'] -= 1
            if cache[pre]['refcount'] == 0:
                del cache[pre]

        # finally, get the contents
        cache[point]['info'] = _handle_contents_at_point(point, hinfo,
                                                         precursors, diff)

    hfile.close()

    cache[change]['info']['type'] = staticinfo['type']
    return cache[change]['info']
Example #21
0
def convert_cset(UR, point):
    indices = {}

    old_cset = bdecode(UR.old_repo.lcrepo.get(point))

    new_cset = {}
    new_cset['precursors'] = [UR.point_map[pre] for pre in old_cset['precursors']]

    if old_cset.has_key('time'):
        new_cset['time'] = old_cset['time']

    if old_cset.has_key('user'):
        new_cset['user'] = old_cset['user']

    # some heuristics for comments and whether this was a server change
    clean_merge = True
    force_new_cset = False

    if old_cset.has_key('comment'):
        clean_merge = False
        new_cset['comment'] = old_cset['comment'].rstrip()
        if len(new_cset['comment']):
            new_cset['comment'] = new_cset['comment'] + '\n'

    elif point == rootnode:
        pass

    elif old_cset['handles'] != {} or len(old_cset['precursors']) != 2:
        clean_merge = False
        new_cset['comment'] = '--- comment inserted by cdvupgrade ---\n'

    # sort the handles
    handle_list = UR.sort_names(old_cset['handles'])

    # find implicit clean content merges
    clean_merges, hl = UR.clean_merges(UR, UR.old_repo.contents.dagdb, point)
    handle_list.extend(hl)

    # find implicit clean name merges
    clean_nmerges, hl = UR.clean_merges(UR, UR.old_repo.names.dagdb, point)
    handle_list.extend(hl)

    new_cset['handles'] = handles = {}
    for old_handle in handle_list:
        old_hinfo = None
        try:
            old_hinfo = old_cset['handles'][old_handle]
        except KeyError:
            old_hinfo = {}

        # not much has changed
        new_hinfo = copy.copy(old_hinfo)

        new_handle = None
        if UR.handle_map.has_key(old_handle):
            new_handle = UR.handle_map[old_handle]

        # make name changes explicit
        if clean_nmerges.has_key(old_handle):
            name = old_handle_name_at_point(UR.old_repo, old_handle, point, None)
            new_hinfo['parent'] = name['parent']
            new_hinfo['name'] = name['name']

        # fixup the parent pointers
        if old_hinfo.has_key('parent'):
            new_hinfo['parent'] = UR.handle_map[old_hinfo['parent']]

        if old_hinfo.has_key('hash') or clean_merges.has_key(old_handle):
            # figure out what the file is supposed to look like now
            lines = old_handle_contents_at_point(UR.old_repo, old_handle, point, None)['lines']

            # if the file is being added, there are no precursors
            precursors = []
            if new_handle is not None and not old_hinfo.has_key('add'):
                precursors = new_cset['precursors']

            # generate the diff against the new repo
            dinfo = gen_diff(UR.new_repo, new_handle, precursors, lines, UR.txn)
            if old_hinfo.has_key('add'):
                dinfo['add'] = 1
                assert dinfo['matches'] == []

            if dinfo is not None:
                diff = bencode(dinfo)
                new_hinfo['hash'] = sha.new(diff).digest()

                # if this used to be a clean merge, we have to replace it
                if not old_cset.has_key(old_handle) or not old_cset[old_handle].has_key('hash'):
                    force_new_cset = True

            elif new_hinfo.has_key('hash'):
                del new_hinfo['hash']

            # sanity check
            if new_handle is None:
                assert old_hinfo.has_key('add')
                assert old_hinfo['add']['type'] == 'file'

            # if the file is new, we have to create the handle before writing
            # the diff
            if old_hinfo.has_key('add'):
                nhandle = create_handle(new_cset['precursors'], new_hinfo)
                assert new_handle is None or new_handle == nhandle
                new_handle = nhandle
                UR.handle_map[old_handle] = new_handle

            # write out the new diff
            if new_hinfo.has_key('hash'):
                zdiff = zlib.compress(diff, 6)
                indices[new_handle] = write_diff(UR.new_repo, new_handle, zdiff, UR.txn)

        elif old_hinfo.has_key('add'):
            assert old_hinfo['add']['type'] == 'dir'

            nhandle = create_handle(new_cset['precursors'], new_hinfo)
            assert new_handle is None or new_handle == nhandle
            new_handle = nhandle
            UR.handle_map[old_handle] = new_handle

        if new_hinfo != {}:
            handles[new_handle] = new_hinfo

    # if it used to be a clean merge, preserve the line of clean merge heads
    index_point = None
    if clean_merge and force_new_cset:
        forced_cset = new_cset

        forced_cset['comment'] = '--- change created by cdvupgrade ---\n'

        bforced_cset = bencode(forced_cset)
        forced_point = sha.new(bforced_cset).digest()
        UR.new_repo.lcrepo.put(forced_point, bforced_cset, txn=UR.txn)

        index_point = forced_point

        new_cset = {'precursors': [forced_cset['precursors'][0], forced_point],
                    'user':       forced_cset['user'],
                    'time':       forced_cset['time'],
                    'handles':    {}}

    # calculate the new point name and write it out
    bnew_cset = bencode(new_cset)
    new_point = sha.new(bnew_cset).digest()
    UR.new_repo.lcrepo.put(new_point, bnew_cset, txn=UR.txn)

    UR.point_map[point] = new_point

    if index_point is None:
        index_point = new_point

    # now that we know the new point name, write out the indices
    for new_handle, index in indices.items():
        write_index(UR.new_repo, index_point, new_handle, index, UR.txn)

    # diff generation depends on history syncing
    named, modified = sync_history(UR.new_repo, new_point, UR.txn)

    for new_handle in modified:
        handle_contents_at_point(UR.new_repo, new_handle, new_point, UR.txn)

    return new_point
Example #22
0
def _handle_name_at_point(co, handle, point, txn, dochecks=0):
    def walk_precursors(cset, dochecks):
        precursors, points = [], [point]
        for pre, index in cset['precursors']:
            foo = _handle_name_at_point(co, handle, pre, txn, dochecks=dochecks)
            if foo is None:
                continue
            points = dmerge(points, foo['points'])
            precursors.append(foo)
        return precursors, points

    cset = bdecode(co.names.dagdb.get(handle + point, txn=txn))
    if not cset.has_key('handle'):
        precursors, points = walk_precursors(cset, dochecks)
        state = _handle_name_from_precursors(precursors, 0)
    elif cset['handle'].has_key('delete'):
        precursors, points = walk_precursors(cset, dochecks)
        state = _handle_name_from_precursors(precursors, 1)
        state['delete'] = 1
    else:
        precursors, points = walk_precursors(cset, dochecks)
        state = {}
        state['name'] = cset['handle']['name']
        try:
            state['parent'] = cset['handle']['parent']
        except KeyError:
            assert handle == roothandle
            assert cset['handle'].has_key('add')
        state['rename point'] = [point]

    state['points'] = points

    if dochecks == 0:
        return state

    co.name_cache[handle + point] = state

    if state['name'] == '' and handle != roothandle:
        raise HistoryError, 'illegal name'
    if state['name'] == '.' or state['name'] == '..':
        raise HistoryError, 'illegal name'

    if state.has_key('delete'):
        if len(children_count(co, handle, point, txn)):
            raise HistoryError, 'non-empty directory can\'t be deleted'
        return state

    staticinfo = db_get(co, co.staticdb, handle, txn)
    if staticinfo['type'] == 'dir':
        try:
            if parent_loop_check(co, state['parent'], point, txn):
                raise HistoryError, 'parent loop'
        except KeyError:
            pass

    try:
        #parentinfo = bdecode(co.staticdb.get(state['parent'], txn=txn))
        parentinfo = db_get(co, co.staticdb, state['parent'], txn)
        if parentinfo['type'] != 'dir':
            raise HistoryError, 'parent not a directory'

        parentstate = __handle_name_at_point(co, state['parent'], point, txn)
        if parentstate is None:
            raise HistoryError, 'parent not in repository'
        if parentstate.has_key('delete'):
            raise HistoryError, 'file committed with deleted parent'
        if len(name_use_count(co, state, point, txn)) != 1:
            raise HistoryError, 'name already in use'
        if state['name'] == 'CVILLE':
            raise HistoryError, 'illegal name'
    except KeyError:
        assert handle == roothandle

    return state
Example #23
0
def read_diff(co, handle, point, txn):
    hinfo = bdecode(co.contents.dagdb.get(handle + point, txn=txn))
    hfile = open(path.join(co.cpath, binascii.hexlify(handle)), 'rb')
    diff = _read_diff(hinfo, hfile)
    hfile.close()
    return diff
Example #24
0
def _sync_history(co, point, txn, cache=dict()):
    pinfo = cache[point]

    # see if we can extend an existing branch
    pre, prebminfo = None, None
    generations, pre_important = [], []
    bminfo = {'precursors': pinfo['precursors']}
    for pre in bminfo['precursors']:
        prebminfo = db_get(co, co.branchmapdb, pre, txn)
        generations.append(prebminfo['generation'])

        if bminfo.has_key('branch'):
            continue

        binfo = db_get(co, co.branchdb, prebminfo['branch'], txn)
        if prebminfo['branchnum'] == binfo['last']:
            bminfo['branch'] = prebminfo['branch']
            bminfo['branchnum'] = prebminfo['branchnum'] + 1
            binfo['last'] += 1
            db_put(co, co.branchdb, bminfo['branch'], binfo, txn)

            pre_important.append(pre)

    # generation == 'distance to root node', the exception is for the root
    try:
        bminfo['generation'] = max(generations) + 1
    except ValueError:
        bminfo['generation'] = 0

    # if we couldn't extend a branch, start a new one
    if not bminfo.has_key('branch'):
        bminfo['branch'] = bdecode(co.linforepo.get('branchmax', txn=txn)) + 1
        co.linforepo.put('branchmax', bencode(bminfo['branch']), txn=txn)
        bminfo['branchnum'] = 0

        try:
            # using the last precursor for this did the best empirically,
            # beating out both first precursor and largest branch number.
            binfo = {'last': 0,
                     'parent': prebminfo['branch'],
                     'parentnum': prebminfo['branchnum']}
            pre_important.append(pre)

        except TypeError:
            # special stuff for the rootnode
            assert bminfo['branch'] == 1
            binfo = {'last': 0}

        db_put(co, co.branchdb, bminfo['branch'], binfo, txn)

    db_put(co, co.branchmapdb, point, bminfo, txn)

    # put new files into staticdb
    for (handle, value) in pinfo['handles'].items():
        if value.has_key('add'):
            validate_handle(handle, pinfo['precursors'], value)
            db_put(co, co.staticdb, handle, {'type': value['add']['type']}, txn)

    # figure out which files were modified here and hand off to helpers
    named, modified = handles_in_branch(co, pre_important, [point], txn, cache=cache)

    pinfo['point'] = point
    _update_mini_dag(co, co.names, _update_helper_name,
                     named, pinfo, txn)
    _update_mini_dag(co, co.contents, _update_helper_content,
                     modified, pinfo, txn)

    return (named, modified)