def verify(self, data, sig): """ returns of the good and bad signatures""" sigfile = datafile = None try: # create temporary files fd, sigfile = pycompat.mkstemp(prefix=b"hg-gpg-", suffix=b".sig") fp = os.fdopen(fd, r'wb') fp.write(sig) fp.close() fd, datafile = pycompat.mkstemp(prefix=b"hg-gpg-", suffix=b".txt") fp = os.fdopen(fd, r'wb') fp.write(data) fp.close() gpgcmd = ( b"%s --logger-fd 1 --status-fd 1 --verify \"%s\" \"%s\"" % ( self.path, sigfile, datafile, )) ret = procutil.filter(b"", gpgcmd) finally: for f in (sigfile, datafile): try: if f: os.unlink(f) except OSError: pass keys = [] key, fingerprint = None, None for l in ret.splitlines(): # see DETAILS in the gnupg documentation # filter the logger output if not l.startswith(b"[GNUPG:]"): continue l = l[9:] if l.startswith(b"VALIDSIG"): # fingerprint of the primary key fingerprint = l.split()[10] elif l.startswith(b"ERRSIG"): key = l.split(b" ", 3)[:2] key.append(b"") fingerprint = None elif (l.startswith(b"GOODSIG") or l.startswith(b"EXPSIG") or l.startswith(b"EXPKEYSIG") or l.startswith(b"BADSIG")): if key is not None: keys.append(key + [fingerprint]) key = l.split(b" ", 2) fingerprint = None if key is not None: keys.append(key + [fingerprint]) return keys
def bundle2scratchbranch(op, part): '''unbundle a bundle2 part containing a changegroup to store''' bundler = bundle2.bundle20(op.repo.ui) cgversion = part.params.get('cgversion', '01') cgpart = bundle2.bundlepart('changegroup', data=part.read()) cgpart.addparam('version', cgversion) bundler.addpart(cgpart) buf = util.chunkbuffer(bundler.getchunks()) fd, bundlefile = pycompat.mkstemp() try: try: fp = os.fdopen(fd, r'wb') fp.write(buf.read()) finally: fp.close() storebundle(op, part.params, bundlefile) finally: try: os.unlink(bundlefile) except OSError as e: if e.errno != errno.ENOENT: raise return 1
def filter(self, filter, node, changelog, patchfile): '''arbitrarily rewrite changeset before applying it''' self.ui.status(_(b'filtering %s\n') % patchfile) user, date, msg = (changelog[1], changelog[2], changelog[4]) fd, headerfile = pycompat.mkstemp(prefix=b'hg-transplant-') fp = os.fdopen(fd, 'wb') fp.write(b"# HG changeset patch\n") fp.write(b"# User %s\n" % user) fp.write(b"# Date %d %d\n" % date) fp.write(msg + b'\n') fp.close() try: self.ui.system( b'%s %s %s' % ( filter, procutil.shellquote(headerfile), procutil.shellquote(patchfile), ), environ={ b'HGUSER': changelog[1], b'HGREVISION': hex(node), }, onerr=error.Abort, errprefix=_(b'filter failed'), blockedtag=b'transplant_filter', ) user, date, msg = self.parselog(open(headerfile, b'rb'))[1:4] finally: os.unlink(headerfile) return (user, date, msg)
def storetobundlestore(orig, repo, op, unbundler): """stores the incoming bundle coming from push command to the bundlestore instead of applying on the revlogs""" repo.ui.status(_(b"storing changesets on the bundlestore\n")) bundler = bundle2.bundle20(repo.ui) # processing each part and storing it in bundler with bundle2.partiterator(repo, op, unbundler) as parts: for part in parts: bundlepart = None if part.type == b'replycaps': # This configures the current operation to allow reply parts. bundle2._processpart(op, part) else: bundlepart = bundle2.bundlepart(part.type, data=part.read()) for key, value in pycompat.iteritems(part.params): bundlepart.addparam(key, value) # Certain parts require a response if part.type in (b'pushkey', b'changegroup'): if op.reply is not None: rpart = op.reply.newpart(b'reply:%s' % part.type) rpart.addparam( b'in-reply-to', b'%d' % part.id, mandatory=False ) rpart.addparam(b'return', b'1', mandatory=False) op.records.add( part.type, { b'return': 1, }, ) if bundlepart: bundler.addpart(bundlepart) # storing the bundle in the bundlestore buf = util.chunkbuffer(bundler.getchunks()) fd, bundlefile = pycompat.mkstemp() try: try: fp = os.fdopen(fd, 'wb') fp.write(buf.read()) finally: fp.close() storebundle(op, {}, bundlefile) finally: try: os.unlink(bundlefile) except Exception: # we would rather see the original exception pass
def _makebundlefromraw(data): fp = None fd, bundlefile = pycompat.mkstemp() try: # guards bundlefile try: # guards fp fp = os.fdopen(fd, 'wb') fp.write(data) finally: fp.close() except Exception: try: os.unlink(bundlefile) except Exception: # we would rather see the original exception pass raise return bundlefile
def processparts(orig, repo, op, unbundler): # make sure we don't wrap processparts in case of `hg unbundle` if op.source == 'unbundle': return orig(repo, op, unbundler) # this server routes each push to bundle store if repo.ui.configbool('infinitepush', 'pushtobundlestore'): return storetobundlestore(orig, repo, op, unbundler) if unbundler.params.get('infinitepush') != 'True': return orig(repo, op, unbundler) handleallparts = repo.ui.configbool('infinitepush', 'storeallparts') bundler = bundle2.bundle20(repo.ui) cgparams = None with bundle2.partiterator(repo, op, unbundler) as parts: for part in parts: bundlepart = None if part.type == 'replycaps': # This configures the current operation to allow reply parts. bundle2._processpart(op, part) elif part.type == bundleparts.scratchbranchparttype: # Scratch branch parts need to be converted to normal # changegroup parts, and the extra parameters stored for later # when we upload to the store. Eventually those parameters will # be put on the actual bundle instead of this part, then we can # send a vanilla changegroup instead of the scratchbranch part. cgversion = part.params.get('cgversion', '01') bundlepart = bundle2.bundlepart('changegroup', data=part.read()) bundlepart.addparam('version', cgversion) cgparams = part.params # If we're not dumping all parts into the new bundle, we need to # alert the future pushkey and phase-heads handler to skip # the part. if not handleallparts: op.records.add(scratchbranchparttype + '_skippushkey', True) op.records.add(scratchbranchparttype + '_skipphaseheads', True) else: if handleallparts: # Ideally we would not process any parts, and instead just # forward them to the bundle for storage, but since this # differs from previous behavior, we need to put it behind a # config flag for incremental rollout. bundlepart = bundle2.bundlepart(part.type, data=part.read()) for key, value in part.params.iteritems(): bundlepart.addparam(key, value) # Certain parts require a response if part.type == 'pushkey': if op.reply is not None: rpart = op.reply.newpart('reply:pushkey') rpart.addparam('in-reply-to', str(part.id), mandatory=False) rpart.addparam('return', '1', mandatory=False) else: bundle2._processpart(op, part) if handleallparts: op.records.add(part.type, { 'return': 1, }) if bundlepart: bundler.addpart(bundlepart) # If commits were sent, store them if cgparams: buf = util.chunkbuffer(bundler.getchunks()) fd, bundlefile = pycompat.mkstemp() try: try: fp = os.fdopen(fd, r'wb') fp.write(buf.read()) finally: fp.close() storebundle(op, cgparams, bundlefile) finally: try: os.unlink(bundlefile) except Exception: # we would rather see the original exception pass
def apply(self, repo, source, revmap, merges, opts=None): '''apply the revisions in revmap one by one in revision order''' if opts is None: opts = {} revs = sorted(revmap) p1 = repo.dirstate.p1() pulls = [] diffopts = patch.difffeatureopts(self.ui, opts) diffopts.git = True lock = tr = None try: lock = repo.lock() tr = repo.transaction(b'transplant') for rev in revs: node = revmap[rev] revstr = b'%d:%s' % (rev, short(node)) if self.applied(repo, node, p1): self.ui.warn( _(b'skipping already applied revision %s\n') % revstr) continue parents = source.changelog.parents(node) if not (opts.get(b'filter') or opts.get(b'log')): # If the changeset parent is the same as the # wdir's parent, just pull it. if parents[0] == p1: pulls.append(node) p1 = node continue if pulls: if source != repo: exchange.pull(repo, source.peer(), heads=pulls) merge.update(repo[pulls[-1]]) p1 = repo.dirstate.p1() pulls = [] domerge = False if node in merges: # pulling all the merge revs at once would mean we # couldn't transplant after the latest even if # transplants before them fail. domerge = True if not hasnode(repo, node): exchange.pull(repo, source.peer(), heads=[node]) skipmerge = False if parents[1] != nullid: if not opts.get(b'parent'): self.ui.note( _(b'skipping merge changeset %d:%s\n') % (rev, short(node))) skipmerge = True else: parent = source.lookup(opts[b'parent']) if parent not in parents: raise error.Abort( _(b'%s is not a parent of %s') % (short(parent), short(node))) else: parent = parents[0] if skipmerge: patchfile = None else: fd, patchfile = pycompat.mkstemp(prefix=b'hg-transplant-') fp = os.fdopen(fd, 'wb') gen = patch.diff(source, parent, node, opts=diffopts) for chunk in gen: fp.write(chunk) fp.close() del revmap[rev] if patchfile or domerge: try: try: n = self.applyone( repo, node, source.changelog.read(node), patchfile, merge=domerge, log=opts.get(b'log'), filter=opts.get(b'filter'), ) except TransplantError: # Do not rollback, it is up to the user to # fix the merge or cancel everything tr.close() raise if n and domerge: self.ui.status( _(b'%s merged at %s\n') % (revstr, short(n))) elif n: self.ui.status( _(b'%s transplanted to %s\n') % (short(node), short(n))) finally: if patchfile: os.unlink(patchfile) tr.close() if pulls: exchange.pull(repo, source.peer(), heads=pulls) merge.update(repo[pulls[-1]]) finally: self.saveseries(revmap, merges) self.transplants.write() if tr: tr.release() if lock: lock.release()