def updatefromremote(ui, repo, remotemarks, path, trfunc, explicit=()): ui.debug("checking for updated bookmarks\n") localmarks = repo._bookmarks (addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same ) = compare(repo, remotemarks, localmarks, dsthex=hex) status = ui.status warn = ui.warn if ui.configbool('ui', 'quietbookmarkmove', False): status = warn = ui.debug explicit = set(explicit) changed = [] for b, scid, dcid in addsrc: if scid in repo: # add remote bookmarks for changes we already have changed.append((b, bin(scid), status, _("adding remote bookmark %s\n") % (b))) elif b in explicit: explicit.remove(b) ui.warn(_("remote bookmark %s points to locally missing %s\n") % (b, scid[:12])) for b, scid, dcid in advsrc: changed.append((b, bin(scid), status, _("updating bookmark %s\n") % (b))) # remove normal movement from explicit set explicit.difference_update(d[0] for d in changed) for b, scid, dcid in diverge: if b in explicit: explicit.discard(b) changed.append((b, bin(scid), status, _("importing bookmark %s\n") % (b))) else: snode = bin(scid) db = _diverge(ui, b, path, localmarks, snode) if db: changed.append((db, snode, warn, _("divergent bookmark %s stored as %s\n") % (b, db))) else: warn(_("warning: failed to assign numbered name " "to divergent bookmark %s\n") % (b)) for b, scid, dcid in adddst + advdst: if b in explicit: explicit.discard(b) changed.append((b, bin(scid), status, _("importing bookmark %s\n") % (b))) for b, scid, dcid in differ: if b in explicit: explicit.remove(b) ui.warn(_("remote bookmark %s points to locally missing %s\n") % (b, scid[:12])) if changed: tr = trfunc() for b, node, writer, msg in sorted(changed): localmarks[b] = node writer(msg) localmarks.recordchange(tr)
def load(cls, repo): fp = repo.vfs(cls._filename) try: version = int(fp.readline().strip()) if version != cls._version: raise error.Abort(_('this version of shelve is incompatible ' 'with the version used in this repo')) name = fp.readline().strip() wctx = fp.readline().strip() pendingctx = fp.readline().strip() parents = [nodemod.bin(h) for h in fp.readline().split()] stripnodes = [nodemod.bin(h) for h in fp.readline().split()] branchtorestore = fp.readline().strip() finally: fp.close() obj = cls() obj.name = name obj.wctx = repo[nodemod.bin(wctx)] obj.pendingctx = repo[nodemod.bin(pendingctx)] obj.parents = parents obj.stripnodes = stripnodes obj.branchtorestore = branchtorestore return obj
def load(cls, repo): fp = repo.vfs(cls._filename) try: version = int(fp.readline().strip()) if version != cls._version: raise error.Abort(_('this version of shelve is incompatible ' 'with the version used in this repo')) name = fp.readline().strip() wctx = nodemod.bin(fp.readline().strip()) pendingctx = nodemod.bin(fp.readline().strip()) parents = [nodemod.bin(h) for h in fp.readline().split()] stripnodes = [nodemod.bin(h) for h in fp.readline().split()] branchtorestore = fp.readline().strip() except (ValueError, TypeError) as err: raise error.CorruptedState(str(err)) finally: fp.close() try: obj = cls() obj.name = name obj.wctx = repo[wctx] obj.pendingctx = repo[pendingctx] obj.parents = parents obj.stripnodes = stripnodes obj.branchtorestore = branchtorestore except error.RepoLookupError as err: raise error.CorruptedState(str(err)) return obj
def load(cls, repo): fp = repo.opener(cls._filename) try: version = int(fp.readline().strip()) if version != cls._version: raise util.Abort(_('this version of shelve is incompatible ' 'with the version used in this repo')) name = fp.readline().strip() wctx = fp.readline().strip() pendingctx = fp.readline().strip() parents = [bin(h) for h in fp.readline().split()] stripnodes = [bin(h) for h in fp.readline().split()] unknownfiles = fp.readline()[:-1].split('\0') finally: fp.close() obj = cls() obj.name = name obj.wctx = repo[bin(wctx)] obj.pendingctx = repo[bin(pendingctx)] obj.parents = parents obj.stripnodes = stripnodes obj.unknownfiles = unknownfiles return obj
def get_refs(self, remote): self.export_commits() client, path = self.get_transport_and_path(remote) old_refs = {} new_refs = {} def changed(refs): old_refs.update(refs) to_push = set(self.local_heads().values() + self.tags.values()) new_refs.update(self.get_changed_refs(refs, to_push, True)) # don't push anything return {} try: client.send_pack(path, changed, lambda have, want: []) changed_refs = [ref for ref, sha in new_refs.iteritems() if sha != old_refs.get(ref)] new = [bin(self.map_hg_get(new_refs[ref])) for ref in changed_refs] old = {} for r in old_refs: old_ref = self.map_hg_get(old_refs[r]) if old_ref: old[bin(old_ref)] = 1 return old, new except (HangupException, GitProtocolError), e: raise hgutil.Abort(_("git remote error: ") + str(e))
def parse_manifest(mfdict, fdict, lines): for l in lines.splitlines(): f, n = l.split('\0') if len(n) > 40: fdict[f] = n[40:] mfdict[f] = bin(n[:40]) else: mfdict[f] = bin(n)
def fromstorage(cls, line): (time, user, command, namespace, name, oldhashes, newhashes) = line.split('\n') timestamp, tz = time.split() timestamp, tz = float(timestamp), int(tz) oldhashes = tuple(node.bin(hash) for hash in oldhashes.split(',')) newhashes = tuple(node.bin(hash) for hash in newhashes.split(',')) return cls( (timestamp, tz), user, command, namespace, name, oldhashes, newhashes)
def __init__(self, repo): self._repo = repo self._vfs = scmutil.vfs(repo.vfs.join('reviewboard'), audit=False) # Maps review identifiers to identifierrecord instances. self._identifiers = {} # Maps parent review id to identifierrecord instances. Shares the same # object instances as _identifiers. self._prids = {} # Maps nodes to noderecord instances. self._nodes = {} self.baseurl = None self.remoteurl = None try: for line in repo.vfs('reviews'): line = line.strip() if not line: continue fields = line.split(' ', 1) if len(fields) != 2: repo.ui.warn(_('malformed line in reviews file: %r\n') % line) continue t, d = fields # Identifier to parent review ID. if t == 'p': ident, rrid = d.split(' ', 1) r = identifierrecord(parentrrid=rrid) self._identifiers[ident] = r self._prids[rrid] = r # Node to review id. elif t == 'c': node, rid = d.split(' ', 1) assert len(node) == 40 r = self._nodes.setdefault(bin(node), noderecord()) r.rrids.add(rid) # Node to parent id. elif t == 'pc': node, pid = d.split(' ', 1) assert len(node) == 40 self._nodes[bin(node)].parentrrids.add(pid) elif t == 'u': self.baseurl = d elif t == 'r': self.remoteurl = d except IOError as inst: if inst.errno != errno.ENOENT: raise
def _prunedetectingmarkers(self, markers): for m in markers: if not m[1]: # no successors meta = obsolete.decodemeta(m[3]) if 'p1' in meta: p1 = node.bin(meta['p1']) self.prunedchildren.setdefault(p1, set()).add(m) if 'p2' in meta: p2 = node.bin(meta['p2']) self.prunedchildren.setdefault(p2, set()).add(m) yield m
def _verifyandtransform(cls, d): """Some basic shelvestate syntactic verification and transformation""" try: d['originalwctx'] = nodemod.bin(d['originalwctx']) d['pendingctx'] = nodemod.bin(d['pendingctx']) d['parents'] = [nodemod.bin(h) for h in d['parents'].split(' ')] d['nodestoremove'] = [nodemod.bin(h) for h in d['nodestoremove'].split(' ')] except (ValueError, TypeError, KeyError) as err: raise error.CorruptedState(str(err))
def putcommit(self, files, copies, parents, commit, source, revmap): files = dict(files) def getfilectx(repo, memctx, f): v = files[f] data, mode = source.getfile(f, v) if f == '.hgtags': data = self._rewritetags(source, revmap, data) return context.memfilectx(f, data, 'l' in mode, 'x' in mode, copies.get(f)) pl = [] for p in parents: if p not in pl: pl.append(p) parents = pl nparents = len(parents) if self.filemapmode and nparents == 1: m1node = self.repo.changelog.read(bin(parents[0]))[0] parent = parents[0] if len(parents) < 2: parents.append(nullid) if len(parents) < 2: parents.append(nullid) p2 = parents.pop(0) text = commit.desc extra = commit.extra.copy() if self.branchnames and commit.branch: extra['branch'] = commit.branch if commit.rev: extra['convert_revision'] = commit.rev while parents: p1 = p2 p2 = parents.pop(0) ctx = context.memctx(self.repo, (p1, p2), text, files.keys(), getfilectx, commit.author, commit.date, extra) self.repo.commitctx(ctx) text = "(octopus merge fixup)\n" p2 = hex(self.repo.changelog.tip()) if self.filemapmode and nparents == 1: man = self.repo.manifest mnode = self.repo.changelog.read(bin(p2))[0] closed = 'close' in commit.extra if not closed and not man.cmp(m1node, man.revision(mnode)): self.ui.status(_("filtering out empty revision\n")) self.repo.rollback() return parent return p2
def _obsdeserialise(flike): """read a file like object serialised with _obsserialise this desierialize into a {subject -> objects} mapping this was the very first format ever.""" rels = {} for line in flike: subhex, objhex = line.split() subnode = bin(subhex) if subnode == nullid: subnode = None rels.setdefault( subnode, set()).add(bin(objhex)) return rels
def cmd_filelist(self, n): n = node.bin(n) files = self.hgrepo.manifest.read(self.hgrepo.changelog.read(n)[0]).keys() for f in files: self.outs.write("%s\0" % f) self.outs.write("\0\n") self.outs.flush()
def prefetch(self, fileids, force=False, fetchdata=True, fetchhistory=False): """downloads the given file versions to the cache """ repo = self.repo idstocheck = [] for file, id in fileids: # hack # - we don't use .hgtags # - workingctx produces ids with length 42, # which we skip since they aren't in any cache if (file == '.hgtags' or len(id) == 42 or not repo.shallowmatch(file)): continue idstocheck.append((file, bin(id))) datastore = self.datastore historystore = self.historystore if force: datastore = unioncontentstore(*repo.shareddatastores) historystore = unionmetadatastore(*repo.sharedhistorystores) missingids = set() if fetchdata: missingids.update(datastore.getmissing(idstocheck)) if fetchhistory: missingids.update(historystore.getmissing(idstocheck)) # partition missing nodes into nullid and not-nullid so we can # warn about this filtering potentially shadowing bugs. nullids = len([None for unused, id in missingids if id == nullid]) if nullids: missingids = [(f, id) for f, id in missingids if id != nullid] repo.ui.develwarn( ('remotefilelog not fetching %d null revs' ' - this is likely hiding bugs' % nullids), config='remotefilelog-ext') if missingids: global fetches, fetched, fetchcost fetches += 1 # We want to be able to detect excess individual file downloads, so # let's log that information for debugging. if fetches >= 15 and fetches < 18: if fetches == 15: fetchwarning = self.ui.config('remotefilelog', 'fetchwarning') if fetchwarning: self.ui.warn(fetchwarning + '\n') self.logstacktrace() missingids = [(file, hex(id)) for file, id in missingids] fetched += len(missingids) start = time.time() missingids = self.request(missingids) if missingids: raise error.Abort(_("unable to download %d files") % len(missingids)) fetchcost += time.time() - start self._lfsprefetch(fileids)
def bmrevset(repo, subset, x): """``bookmark([name])`` The named bookmark or all bookmarks. """ # i18n: "bookmark" is a keyword args = revset.getargs(x, 0, 1, _('bookmark takes one or no arguments')) if args: bm = revset.getstring(args[0], # i18n: "bookmark" is a keyword _('the argument to bookmark must be a string')) bmrev = listbookmarks(repo).get(bm, None) if bmrev: bmrev = repo.changelog.rev(bin(bmrev)) return [r for r in subset if r == bmrev] bms = set([repo.changelog.rev(bin(r)) for r in listbookmarks(repo).values()]) return [r for r in subset if r in bms]
def __init__(self, repo, sha): self.repo = repo if not isinstance(sha, basestring): sha = sha.hex() self.commit = repo.handler.git.get_object(sha) self._overlay = getattr(repo, 'gitoverlay', repo) self._rev = self._overlay.rev(bin(self.commit.id))
def forbidcrlf(ui, repo, hooktype, node, **kwargs): halt = False for rev in xrange(repo.changelog.rev(bin(node)), repo.changelog.count()): c = repo.changectx(rev) for f in c.files(): if f not in c: continue data = c[f].data() if '\0' not in data and '\r\n' in data: if not halt: ui.warn(_('Attempt to commit or push text file(s) ' 'using CRLF line endings\n')) ui.warn(_('in %s: %s\n') % (short(c.node()), f)) halt = True if halt and hooktype == 'pretxnchangegroup': ui.warn(_('\nTo prevent this mistake in your local repository,\n' 'add to Mercurial.ini or .hg/hgrc:\n' '\n' '[hooks]\n' 'pretxncommit.crlf = python:hgext.win32text.forbidcrlf\n' '\n' 'and also consider adding:\n' '\n' '[extensions]\n' 'hgext.win32text =\n' '[encode]\n' '** = cleverencode:\n' '[decode]\n' '** = cleverdecode:\n')) return halt
def undump(self, xr): self._valid = True a = xr.attributes() self._root = hglib.fromunicode(a.value('', 'root').toString()) self._shortname = unicode(a.value('', 'shortname').toString()) self._basenode = node.bin(str(a.value('', 'basenode').toString())) RepoTreeItem.undump(self, xr)
def hook(ui, repo, hooktype, node=None, url=None, **kwargs): """ send CIA notification """ def sendmsg(cia, ctx): msg = ciamsg(cia, ctx).xml() if cia.dryrun: ui.write(msg) elif cia.ciaurl.startswith('mailto:'): if not cia.emailfrom: raise util.Abort(_('email.from must be defined when ' 'sending by email')) cia.sendemail(cia.ciaurl[7:], msg) else: cia.sendrpc(msg) n = bin(node) cia = hgcia(ui, repo) if not cia.user: ui.debug('cia: no user specified') return if not cia.project: ui.debug('cia: no project specified') return if hooktype == 'changegroup': start = repo.changelog.rev(n) end = len(repo.changelog) for rev in xrange(start, end): n = repo.changelog.node(rev) ctx = repo.changectx(n) sendmsg(cia, ctx) else: ctx = repo.changectx(n) sendmsg(cia, ctx)
def sync_repository(self, repository): """ Synchronize the whole Mercurial repository changelog into Trac's DB revision table. """ self.initialize_repository(repository) repo_nodes = set([ hex(self.repository.changelog.node(rev)) for rev in self.repository.changelog ]) self.cursor.execute("SELECT rev FROM revision WHERE repos = %s", (self.repository_id, )) sql_nodes = set([ i[0] for i in self.cursor.fetchall() ]) add_nodes = [ self._get_ctx_from_repo(bin(i)) for i in repo_nodes - sql_nodes ] del_nodes = [ (self.repository_id, i) for i in sql_nodes - repo_nodes ] sql_string = """ INSERT INTO revision (repos, rev, time, author, message) VALUES (%s, %s, %s, %s, %s) """ # XXX trac.db.utils.executemany need fix args whould be iterator not list # use list in this place slow self.cursor.executemany(sql_string, list(add_nodes)) self.db.commit() # XXX trac.db.utils.executemany need fix args whould be iterator not list # use list in this place slow self.cursor.executemany("DELETE FROM revision WHERE repos = %s AND rev = %s", list(del_nodes)) self.db.commit()
def getkeys(ui, repo, mygpg, sigdata, context): """get the keys who signed a data""" fn, ln = context node, version, sig = sigdata prefix = "%s:%d" % (fn, ln) node = hgnode.bin(node) data = node2txt(repo, node, version) sig = binascii.a2b_base64(sig) keys = mygpg.verify(data, sig) validkeys = [] # warn for expired key and/or sigs for key in keys: if key[0] == "ERRSIG": ui.write(_("%s Unknown key ID \"%s\"\n") % (prefix, shortkey(ui, key[1][:15]))) continue if key[0] == "BADSIG": ui.write(_("%s Bad signature from \"%s\"\n") % (prefix, key[2])) continue if key[0] == "EXPSIG": ui.write(_("%s Note: Signature has expired" " (signed by: \"%s\")\n") % (prefix, key[2])) elif key[0] == "EXPKEYSIG": ui.write(_("%s Note: This key has expired" " (signed by: \"%s\")\n") % (prefix, key[2])) validkeys.append((key[1], key[2], key[3])) return validkeys
def getkeys(ui, repo, mygpg, sigdata, context): """get the keys who signed a data""" fn, ln = context node, version, sig = sigdata prefix = "%s:%d" % (fn, ln) node = hgnode.bin(node) data = node2txt(repo, node, version) sig = binascii.a2b_base64(sig) err, keys = mygpg.verify(data, sig) if err: ui.warn("%s:%d %s\n" % (fn, ln, err)) return None validkeys = [] # warn for expired key and/or sigs for key in keys: if key[0] == "BADSIG": ui.write(_('%s Bad signature from "%s"\n') % (prefix, key[2])) continue if key[0] == "EXPSIG": ui.write(_("%s Note: Signature has expired" ' (signed by: "%s")\n') % (prefix, key[2])) elif key[0] == "EXPKEYSIG": ui.write(_("%s Note: This key has expired" ' (signed by: "%s")\n') % (prefix, key[2])) validkeys.append((key[1], key[2], key[3])) return validkeys
def firefoxtrees(self): trees = {} try: with open(self._firefoxtreespath, 'rb') as fh: data = fh.read() except IOError as e: if e.errno != errno.ENOENT: raise data = None if not data: return trees for line in data.splitlines(): line = line.strip() if not line: continue tree, hexnode = line.split() # Filter out try repos because they are special. if tree in TRY_TREES: continue trees[tree] = bin(hexnode) return trees
def exchangepullpushlog(orig, pullop): """This is called during pull to fetch pushlog data.""" # check stepsdone for future compatibility with bundle2 pushlog exchange. res = orig(pullop) if 'pushlog' in pullop.stepsdone or not pullop.remote.capable('pushlog'): return res repo = pullop.repo fetchfrom = repo.pushlog.lastpushid() + 1 lines = pullop.remote._call('pushlog', firstpush=str(fetchfrom)) lines = iter(lines.splitlines()) statusline = lines.next() if statusline[0] == '0': raise Abort('remote error fetching pushlog: %s' % lines.next()) elif statusline != '1': raise Abort('error fetching pushlog: unexpected response: %s\n' % statusline) pushes = [] for line in lines: pushid, who, when, nodes = line.split(' ', 3) nodes = [bin(n) for n in nodes.split()] pushes.append((int(pushid), who, int(when), nodes)) repo.pushlog.recordpushes(pushes) repo.ui.status('added %d pushes\n' % len(pushes)) return res
def buildtemprevlog(repo, file): # get filename key filekey = util.sha1(file).hexdigest() filedir = os.path.join(repo.path, 'store/data', filekey) # sort all entries based on linkrev fctxs = [] for filenode in os.listdir(filedir): fctxs.append(repo.filectx(file, fileid=bin(filenode))) fctxs = sorted(fctxs, key=lambda x: x.linkrev()) # add to revlog temppath = repo.sjoin('data/temprevlog.i') if os.path.exists(temppath): os.remove(temppath) r = filelog.filelog(repo.sopener, 'temprevlog') class faket(object): def add(self, a,b,c): pass t = faket() for fctx in fctxs: if fctx.node() not in repo: continue p = fctx.filelog().parents(fctx.filenode()) meta = {} if fctx.renamed(): meta['copy'] = fctx.renamed()[0] meta['copyrev'] = hex(fctx.renamed()[1]) r.add(fctx.data(), meta, t, fctx.linkrev(), p[0], p[1]) return r
def hook(ui, repo, hooktype, node=None, source=None, **kwargs): '''send email notifications to interested subscribers. if used as changegroup hook, send one email for all changesets in changegroup. else send one email per changeset.''' n = notifier(ui, repo, hooktype) if not n.subs: ui.debug(_('notify: no subscribers to repo %s\n') % n.root) return if n.skipsource(source): ui.debug(_('notify: changes have source "%s" - skipping\n') % source) return node = bin(node) ui.pushbuffer() if hooktype == 'changegroup': start = repo.changelog.rev(node) end = repo.changelog.count() count = end - start for rev in xrange(start, end): n.node(repo.changelog.node(rev)) n.diff(node, repo.changelog.tip()) else: count = 1 n.node(node) n.diff(node, node) data = ui.popbuffer() n.send(node, count, data)
def revset_svnrev(repo, subset, x): '''``svnrev(number)`` Select changesets that originate in the given Subversion revision. ''' args = revset.getargs(x, 1, 1, "svnrev takes one argument") rev = revset.getstring(args[0], "the argument to svnrev() must be a number") try: revnum = int(rev) except ValueError: raise error.ParseError("the argument to svnrev() must be a number") rev = rev + ' ' revs = [] meta = repo.svnmeta(skiperrorcheck=True) try: for l in maps.RevMap.readmapfile(meta.revmap_file, missingok=False): if l.startswith(rev): n = l.split(' ', 2)[1] r = repo[node.bin(n)].rev() if r in subset: revs.append(r) return revs except IOError, err: if err.errno != errno.ENOENT: raise raise hgutil.Abort("svn metadata is missing - " "run 'hg svn rebuildmeta' to reconstruct it")
def bundle2pushkey(orig, op, part): replacements = dict(sum([record.items() for record in op.records[rebaseparttype]], [])) namespace = pushkey.decode(part.params['namespace']) if namespace == 'phases': key = pushkey.decode(part.params['key']) part.params['key'] = pushkey.encode(replacements.get(key, key)) if namespace == 'bookmarks': new = pushkey.decode(part.params['new']) part.params['new'] = pushkey.encode(replacements.get(new, new)) serverbin = op.repo._bookmarks.get(part.params['key']) clienthex = pushkey.decode(part.params['old']) if serverbin and clienthex: cl = op.repo.changelog revserver = cl.rev(serverbin) revclient = cl.rev(bin(clienthex)) if revclient in cl.ancestors([revserver]): # if the client's bookmark origin is an lagging behind the # server's location for that bookmark (usual for pushrebase) # then update the old location to match the real location # # TODO: We would prefer to only do this for pushrebase pushes # but that isn't straightforward so we just do it always here. # This forbids moving bookmarks backwards from clients. part.params['old'] = pushkey.encode(hex(serverbin)) return orig(op, part)
def _getcommonheads(repo): commonheads = [] f = _getfile(repo, hgheadsfile) if f: commonheads = f.readlines() commonheads = [bin(x.strip()) for x in commonheads] return commonheads
def update_hg_bookmarks(self, refs): try: oldbm = getattr(bookmarks, 'parse', None) if oldbm: bms = bookmarks.parse(self.repo) else: bms = self.repo._bookmarks heads = dict([(ref[11:],refs[ref]) for ref in refs if ref.startswith('refs/heads/')]) for head, sha in heads.iteritems(): # refs contains all the refs in the server, not just # the ones we are pulling if sha not in self.git.object_store: continue hgsha = bin(self.map_hg_get(sha)) if not head in bms: # new branch bms[head] = hgsha else: bm = self.repo[bms[head]] if bm.ancestor(self.repo[hgsha]) == bm: # fast forward bms[head] = hgsha if heads: if oldbm: bookmarks.write(self.repo, bms) else: self.repo._bookmarks = bms bookmarks.write(self.repo) except AttributeError: self.ui.warn(_('creating bookmarks failed, do you have' ' bookmarks enabled?\n'))
def gitrefs(self): tagfile = self.join(os.path.join('git-remote-refs')) if os.path.exists(tagfile): tf = open(tagfile, 'rb') tagdata = tf.read().split('\n') td = [line.split(' ', 1) for line in tagdata if line] return dict([(name, bin(sha)) for sha, name in td]) return {}
def generatefiles(self, changedfiles, linknodes, commonrevs, source): if requirement in self._repo.requirements: repo = self._repo if isinstance(repo, bundlerepo.bundlerepository): # If the bundle contains filelogs, we can't pull from it, since # bundlerepo is heavily tied to revlogs. Instead require that # the user use unbundle instead. # Force load the filelog data. bundlerepo.bundlerepository.file(repo, 'foo') if repo.bundlefilespos: raise util.Abort("cannot pull from full bundles", hint="use `hg unbundle` instead") return [] filestosend = self.shouldaddfilegroups(source) if filestosend == NoFiles: changedfiles = list( [f for f in changedfiles if not repo.shallowmatch(f)]) else: files = [] # Prefetch the revisions being bundled for i, fname in enumerate(sorted(changedfiles)): filerevlog = repo.file(fname) linkrevnodes = linknodes(filerevlog, fname) # Normally we'd prune the linkrevnodes first, # but that would perform the server fetches one by one. for fnode, cnode in list(linkrevnodes.iteritems()): # Adjust linknodes so remote file revisions aren't sent if filestosend == LocalFiles: localkey = fileserverclient.getlocalkey( fname, hex(fnode)) localpath = repo.sjoin( os.path.join("data", localkey)) if not os.path.exists( localpath) and repo.shallowmatch(fname): del linkrevnodes[fnode] else: files.append((fname, hex(fnode))) else: files.append((fname, hex(fnode))) repo.fileservice.prefetch(files) # Prefetch the revisions that are going to be diffed against prevfiles = [] for fname, fnode in files: if repo.shallowmatch(fname): fnode = bin(fnode) filerevlog = repo.file(fname) ancestormap = filerevlog.ancestormap(fnode) p1, p2, linknode, copyfrom = ancestormap[fnode] if p1 != nullid: prevfiles.append((copyfrom or fname, hex(p1))) repo.fileservice.prefetch(prevfiles) return super(shallowcg1packer, self).generatefiles(changedfiles, linknodes, commonrevs, source)
def exchangepullpushlog(orig, pullop): """This is called during pull to fetch pushlog data. The goal of this function is to replicate the entire pushlog. This is in contrast to replicating only the pushlog data for changesets the client has pulled. Put another way, this attempts complete replication as opposed to partial, hole-y replication. """ # check stepsdone for future compatibility with bundle2 pushlog exchange. res = orig(pullop) if 'pushlog' in pullop.stepsdone or not pullop.remote.capable('pushlog'): return res repo = pullop.repo fetchfrom = repo.pushlog.lastpushid() + 1 lines = pullop.remote._call('pushlog', firstpush=str(fetchfrom)) lines = iter(lines.splitlines()) statusline = lines.next() if statusline[0] == '0': raise Abort('remote error fetching pushlog: %s' % lines.next()) elif statusline != '1': raise Abort('error fetching pushlog: unexpected response: %s\n' % statusline) pushes = [] for line in lines: pushid, who, when, nodes = line.split(' ', 3) nodes = [bin(n) for n in nodes.split()] # We stop processing if there is a reference to an unknown changeset. # This can happen in a few scenarios. # # Since the server streams *all* pushlog entries (from a starting # number), it could send pushlog entries for changesets the client # didn't request or were pushed since the client started pulling. # # If the remote repo contains obsolete changesets, we may see a # reference to a hidden changeset. # # This is arguably not the desirable behavior: pushlog replication # should be robust. However, doing things this way helps defend # against pushlog "corruption" since inserting references to unknown # changesets into the database is dangerous. try: [repo[n] for n in nodes] except error.RepoLookupError: repo.ui.warn( 'received pushlog entry for unknown changeset; ignoring\n') break pushes.append((int(pushid), who, int(when), nodes)) repo.pushlog.recordpushes(pushes, tr=pullop.trmanager.transaction()) repo.ui.status('added %d pushes\n' % len(pushes)) return res
def updatefromremote(ui, repo, remotemarks, path, trfunc, explicit=()): ui.debug("checking for updated bookmarks\n") localmarks = repo._bookmarks (addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same ) = compare(repo, remotemarks, localmarks, dsthex=hex) status = ui.status warn = ui.warn if ui.configbool('ui', 'quietbookmarkmove', False): status = warn = ui.debug explicit = set(explicit) changed = [] for b, scid, dcid in addsrc: if scid in repo: # add remote bookmarks for changes we already have changed.append((b, bin(scid), status, _("adding remote bookmark %s\n") % (b))) for b, scid, dcid in advsrc: changed.append((b, bin(scid), status, _("updating bookmark %s\n") % (b))) # remove normal movement from explicit set explicit.difference_update(d[0] for d in changed) for b, scid, dcid in diverge: if b in explicit: explicit.discard(b) changed.append((b, bin(scid), status, _("importing bookmark %s\n") % (b))) else: db = _diverge(ui, b, path, localmarks) changed.append((db, bin(scid), warn, _("divergent bookmark %s stored as %s\n") % (b, db))) for b, scid, dcid in adddst + advdst: if b in explicit: explicit.discard(b) changed.append((b, bin(scid), status, _("importing bookmark %s\n") % (b))) if changed: tr = trfunc() for b, node, writer, msg in sorted(changed): localmarks[b] = node writer(msg) localmarks.recordchange(tr)
def __init__(self, repo, sha, maybe_filtered=True): # Can't store this in self._repo because the base class uses that field self._hgrepo = repo if not isinstance(sha, basestring): sha = sha.hex() self.commit = repo.handler.git.get_object(_maybehex(sha)) self._overlay = getattr(repo, 'gitoverlay', repo) self._rev = self._overlay.rev(bin(self.commit.id)) self._maybe_filtered = maybe_filtered
def _findtags(self): (tags, tagtypes) = super(hgrepo, self)._findtags() git = GitHandler(self, self.ui) for tag, rev in git.tags.iteritems(): tags[tag] = bin(rev) tagtypes[tag] = 'git' return (tags, tagtypes)
def _makemaps(self, commits, refs): baserev = self.handler.repo[b'tip'].rev() self.revmap = {} self.nodemap = {} for i, n in enumerate(commits): rev = baserev + i + 1 self.revmap[n] = rev self.nodemap[rev] = n self.refmap = {} self.tagmap = {} for ref in refs: if ref.startswith(LOCAL_BRANCH_PREFIX): refname = ref[len(LOCAL_BRANCH_PREFIX) :] self.refmap.setdefault(bin(refs[ref]), []).append(refname) elif ref.startswith(LOCAL_TAG_PREFIX): tagname = ref[len(LOCAL_TAG_PREFIX) :] self.tagmap.setdefault(bin(refs[ref]), []).append(tagname)
def __init__(self, repo, path, changeid=None, fileid=None, filelog=None, changectx=None, ancestormap=None): if fileid == nullrev: fileid = nullid if fileid and len(fileid) == 40: fileid = bin(fileid) super(remotefilectx, self).__init__(repo, path, changeid, fileid, filelog, changectx) self._ancestormap = ancestormap
def putbookmarks(self, updatedbookmark): if not len(updatedbookmark): return self.ui.status(_("updating bookmarks\n")) destmarks = self.repo._bookmarks for bookmark in updatedbookmark: destmarks[bookmark] = bin(updatedbookmark[bookmark]) destmarks.write()
def node(self, r): if r == nullrev: return nullid t = self._db.execute( 'SELECT node FROM changelog WHERE rev = ?', (r,) ).fetchone() if t is None: raise error.LookupError(r, b'00changelog.i', _(b'no node')) return bin(t[0])
def _makemaps(self, commits, refs): baserev = self.handler.repo['tip'].rev() self.revmap = {} self.nodemap = {} for i, n in enumerate(commits): rev = baserev + i + 1 self.revmap[n] = rev self.nodemap[rev] = n self.refmap = {} self.tagmap = {} for ref in refs: if ref.startswith('refs/heads/'): refname = ref[11:] self.refmap.setdefault(bin(refs[ref]), []).append(refname) elif ref.startswith('refs/tags/'): tagname = ref[10:] self.tagmap.setdefault(bin(refs[ref]), []).append(tagname)
def lookup(self, node): if len(node) == 40: node = bin(node) if len(node) != 20: raise error.LookupError( node, self.filename, _(b'invalid lookup input') ) return node
def _findtags(self): (tags, tagtypes) = super(hgrepo, self)._findtags() for tag, rev in self.githandler.tags.iteritems(): tags[tag] = bin(rev) tagtypes[tag] = 'git' tags.update(self.githandler.remote_refs) return (tags, tagtypes)
def hook(ui, repo, hooktype, node=None, source=None, **kwargs): # read config parameters master = ui.config('hgbuildbot', 'master') if master: branchtype = ui.config('hgbuildbot', 'branchtype') branch = ui.config('hgbuildbot', 'branch') else: ui.write("* You must add a [hgbuildbot] section to .hg/hgrc in " "order to use buildbot hook\n") return if branch is None: if branchtype is not None: if branchtype == 'dirname': branch = os.path.basename(os.getcwd()) if branchtype == 'inrepo': branch = repo.workingctx().branch() if hooktype == 'changegroup': s = sendchange.Sender(master, None) d = defer.Deferred() reactor.callLater(0, d.callback, None) # process changesets def _send(res, c): ui.status("rev %s sent\n" % c['revision']) return s.send(c['branch'], c['revision'], c['comments'], c['files'], c['username']) node = bin(node) start = repo.changelog.rev(node) end = repo.changelog.count() for rev in xrange(start, end): # send changeset n = repo.changelog.node(rev) changeset = repo.changelog.extract(repo.changelog.revision(n)) change = { 'master': master, # note: this is more likely to be a full email address, which # would make the left-hand "Changes" column kind of wide. The # buildmaster should probably be improved to display an # abbreviation of the username. 'username': changeset[1], 'revision': hex(n), 'comments': changeset[4], 'files': changeset[3], 'branch': branch } d.addCallback(_send, change) d.addCallbacks(s.printSuccess, s.printFailure) d.addBoth(s.stop) s.run() else: ui.status(_('hgbuildbot: hook %s not supported\n') % hooktype) return
def _createfileblob(self, text, meta, flags, p1, p2, node, linknode): # text passed to "_createfileblob" does not include filelog metadata header = shallowutil.buildfileblobheader(len(text), flags) data = b"%s\0%s" % (header, text) realp1 = p1 copyfrom = b"" if meta and b'copy' in meta: copyfrom = meta[b'copy'] realp1 = bin(meta[b'copyrev']) data += b"%s%s%s%s%s\0" % (node, realp1, p2, linknode, copyfrom) visited = set() pancestors = {} queue = [] if realp1 != nullid: p1flog = self if copyfrom: p1flog = remotefilelog(self.opener, copyfrom, self.repo) pancestors.update(p1flog.ancestormap(realp1)) queue.append(realp1) visited.add(realp1) if p2 != nullid: pancestors.update(self.ancestormap(p2)) queue.append(p2) visited.add(p2) ancestortext = b"" # add the ancestors in topological order while queue: c = queue.pop(0) pa1, pa2, ancestorlinknode, pacopyfrom = pancestors[c] pacopyfrom = pacopyfrom or b'' ancestortext += b"%s%s%s%s%s\0" % ( c, pa1, pa2, ancestorlinknode, pacopyfrom, ) if pa1 != nullid and pa1 not in visited: queue.append(pa1) visited.add(pa1) if pa2 != nullid and pa2 not in visited: queue.append(pa2) visited.add(pa2) data += ancestortext return data
def _getUpstreamChangesets(self, sincerev): """Fetch new changesets from the source""" repo = self._getRepo() self._hgCommand('pull', 'default') from mercurial.node import bin for rev in xrange(repo.changelog.rev(bin(sincerev)) + 1, repo.changelog.count()): yield self._changesetForRevision(repo, str(rev))
def changegroupsubset(repo, req): req.respond(HTTP_OK, HGTYPE) bases = [] heads = [] if 'bases' in req.form: bases = [bin(x) for x in req.form['bases'][0].split(' ')] if 'heads' in req.form: heads = [bin(x) for x in req.form['heads'][0].split(' ')] z = zlib.compressobj() f = repo.changegroupsubset(bases, heads, 'serve') while 1: chunk = f.read(4096) if not chunk: break yield z.compress(chunk) yield z.flush()
def addtree(tree, dirname): for entry in tree.iteritems(): if entry.mode & 040000: # expand directory subtree = self.repo.handler.git.get_object(entry.sha) addtree(subtree, dirname + entry.path + '/') else: path = dirname + entry.path self._map[path] = bin(entry.sha) self._flagmap[path] = entry.mode
def addtree(tree, dirname): for entry in tree.entries(): if entry[0] & 040000: # expand directory subtree = self.repo.handler.git.get_object(entry[2]) addtree(subtree, dirname + entry[1] + '/') else: path = dirname + entry[1] self._map[path] = bin(entry[2]) self._flagmap[path] = entry[0]
def node(self, rev): maybe = self._db.execute( '''SELECT filenode FROM changedfiles INNER JOIN changelog ON changelog.node = changedfiles.node WHERE changelog.rev = ? AND filename = ? ''', (rev, pycompat.fsdecode(self.path)), ).fetchone() if maybe is None: raise IndexError('gitlog %r out of range %d' % (self.path, rev)) return bin(maybe[0])
def __call__(self, *args, **kwargs): for entry in kwargs.get('entries', []): push = web.repo.pushlog.pushfromnode(bin(entry[b'node'])) if push: entry[b'pushid'] = push.pushid entry[b'pushdate'] = dateutil.makedate(push.when) else: entry[b'pushid'] = None entry[b'pushdate'] = None return super(tmplwrapper, self).__call__(*args, **kwargs)
def readseries(self): nodes = [] merges = [] cur = nodes for line in self.opener.read(b'series').splitlines(): if line.startswith(b'# Merges'): cur = merges continue cur.append(bin(line)) return (nodes, merges)
def filelogrenamed(orig, self, node): if _islfs(self, node): rawtext = self._revlog.rawdata(node) if not rawtext: return False metadata = pointer.deserialize(rawtext) if b'x-hg-copy' in metadata and b'x-hg-copyrev' in metadata: return metadata[b'x-hg-copy'], bin(metadata[b'x-hg-copyrev']) else: return False return orig(self, node)
def tags(self): # TODO consider using self._tagscache tagscache = super(hgrepo, self).tags() tagscache.update(self.githandler.remote_refs) for tag, rev in self.githandler.tags.iteritems(): if tag in tagscache: continue tagscache[tag] = bin(rev) return tagscache
def unbundle(orig, repo, cg, heads, source, url): # Preload the manifests that the client says we'll need. This happens # outside the lock, thus cutting down on our lock time and increasing commit # throughput. if util.safehasattr(cg, 'params'): preloadmfs = cg.params.get('preloadmanifests') if preloadmfs: for mfnode in preloadmfs.split(','): repo.manifest.read(bin(mfnode)) return orig(repo, cg, heads, source, url)
def lookup(self, node): if len(node) not in (20, 40): node = int(node) if isinstance(node, int): assert False, b'todo revnums for nodes' if len(node) == 40: node = bin(node) hnode = gitutil.togitnode(node) if hnode in self.gitrepo: return node raise error.LookupError(self.path, node, _(b'no match found'))
def streamer(): fin = proto.fin opener = repo.sopener cachepath = repo.ui.config("remotefilelog", "servercachepath") if not cachepath: cachepath = os.path.join(repo.path, "remotefilelogcache") # everything should be user & group read/writable oldumask = os.umask(0o002) try: while True: request = fin.readline()[:-1] if not request: break node = bin(request[:40]) if node == nullid: yield '0\n' continue path = request[40:] filecachepath = os.path.join(cachepath, path, hex(node)) if not os.path.exists(filecachepath): filectx = repo.filectx(path, fileid=node) if filectx.node() == nullid: repo.changelog = changelog.changelog(repo.sopener) filectx = repo.filectx(path, fileid=node) text = createfileblob(filectx) text = lz4.compressHC(text) dirname = os.path.dirname(filecachepath) if not os.path.exists(dirname): os.makedirs(dirname) try: with open(filecachepath, "w") as f: f.write(text) except IOError: # Don't abort if the user only has permission to read, # and not write. pass else: with open(filecachepath, "r") as f: text = f.read() yield '%d\n%s' % (len(text), text) # it would be better to only flush after processing a whole batch # but currently we don't know if there are more requests coming proto.fout.flush() finally: os.umask(oldumask)
def filelogrenamed(orig, self, node): if _islfs(self, node): rawtext = self.revision(node, raw=True) if not rawtext: return False metadata = pointer.deserialize(rawtext) if 'x-hg-copy' in metadata and 'x-hg-copyrev' in metadata: return metadata['x-hg-copy'], bin(metadata['x-hg-copyrev']) else: return False return orig(self, node)
def changegroupsubset(web, req): req.respond(HTTP_OK, HGTYPE) bases = [] heads = [] if not web.allowpull: return if 'bases' in req.form: bases = [bin(x) for x in req.form['bases'][0].split(' ')] if 'heads' in req.form: heads = [bin(x) for x in req.form['heads'][0].split(' ')] z = zlib.compressobj() f = web.repo.changegroupsubset(bases, heads, 'serve') while 1: chunk = f.read(4096) if not chunk: break req.write(z.compress(chunk)) req.write(z.flush())
def parents(self, n): gitrev = self.repo.revmap.get(n) if not gitrev: # we've reached a revision we have return self.base.parents(n) commit = self.repo.handler.git.get_object(n) def gitorhg(n): hn = self.repo.handler.map_hg_get(hex(n)) if hn is not None: return bin(hn) return n # currently ignores the octopus p1 = gitorhg(bin(commit.parents[0])) if len(commit.parents) > 1: p2 = gitorhg(bin(commit.parents[1])) else: p2 = nullid return [p1, p2]