def diff(self, tmpl, node1, node2, files): def filterfiles(filters, files): l = [x for x in files if x in filters] for t in filters: if t and t[-1] != os.sep: t += os.sep l += [x for x in files if x.startswith(t)] return l parity = paritygen(self.stripecount) def diffblock(diff, f, fn): yield tmpl( "diffblock", lines=prettyprintlines(diff), parity=parity.next(), file=f, filenode=hex(fn or nullid) ) blockcount = webutil.countgen() def prettyprintlines(diff): blockno = blockcount.next() for lineno, l in enumerate(diff.splitlines(1)): if blockno == 0: lineno = lineno + 1 else: lineno = "%d.%d" % (blockno, lineno + 1) if l.startswith("+"): ltype = "difflineplus" elif l.startswith("-"): ltype = "difflineminus" elif l.startswith("@"): ltype = "difflineat" else: ltype = "diffline" yield tmpl(ltype, line=l, lineid="l%s" % lineno, linenumber="% 8s" % lineno) r = self.repo c1 = r[node1] c2 = r[node2] date1 = util.datestr(c1.date()) date2 = util.datestr(c2.date()) modified, added, removed, deleted, unknown = r.status(node1, node2)[:5] if files: modified, added, removed = map(lambda x: filterfiles(files, x), (modified, added, removed)) for f in modified: to = c1.filectx(f).data() tn = c2.filectx(f).data() yield diffblock(mdiff.unidiff(to, date1, tn, date2, f, f), f, tn) for f in added: to = None tn = c2.filectx(f).data() yield diffblock(mdiff.unidiff(to, date1, tn, date2, f, f), f, tn) for f in removed: to = c1.filectx(f).data() tn = None yield diffblock(mdiff.unidiff(to, date1, tn, date2, f, f), f, tn)
def _parse(self): if self.changeset is not None: return self.changeset = {} maxrev = 0 if self.revs: if len(self.revs) > 1: raise util.Abort(_('cvs source does not support specifying ' 'multiple revs')) # TODO: handle tags try: # patchset number? maxrev = int(self.revs[0]) except ValueError: raise util.Abort(_('revision %s is not a patchset number') % self.revs[0]) d = os.getcwd() try: os.chdir(self.path) id = None cache = 'update' if not self.ui.configbool('convert', 'cvsps.cache', True): cache = None db = cvsps.createlog(self.ui, cache=cache) db = cvsps.createchangeset(self.ui, db, fuzz=int(self.ui.config('convert', 'cvsps.fuzz', 60)), mergeto=self.ui.config('convert', 'cvsps.mergeto', None), mergefrom=self.ui.config('convert', 'cvsps.mergefrom', None)) for cs in db: if maxrev and cs.id > maxrev: break id = str(cs.id) cs.author = self.recode(cs.author) self.lastbranch[cs.branch] = id cs.comment = self.recode(cs.comment) if self.ui.configbool('convert', 'localtimezone'): cs.date = makedatetimestamp(cs.date[0]) date = util.datestr(cs.date, '%Y-%m-%d %H:%M:%S %1%2') self.tags.update(dict.fromkeys(cs.tags, id)) files = {} for f in cs.entries: files[f.file] = "%s%s" % ('.'.join([str(x) for x in f.revision]), ['', '(DEAD)'][f.dead]) # add current commit to set c = commit(author=cs.author, date=date, parents=[str(p.id) for p in cs.parents], desc=cs.comment, branch=cs.branch or '') self.changeset[id] = c self.files[id] = files self.heads = self.lastbranch.values() finally: os.chdir(d)
def log_api(self, branch=None): def enc(string): try: for e in ('utf8', 'latin1', 'windows-1250', 'windows-1252'): return string.decode(e) except UnicodeError: return string.decode('ascii', 'ignore') repo = hg.repository(ui.ui(), self.repo_path) as_list, as_dict = [], defaultdict(list) for rev in repo: rev_obj = repo[rev] branch_ = rev_obj.branch() if branch and branch != branch_: continue node = rev_obj.hex() date = self._parse_date(datestr(rev_obj.date())) one = dict(branch=branch_, mess=rev_obj.description(), author=rev_obj.user(), date=date, files=map(enc, rev_obj.files()), tags=rev_obj.tags(), rev=rev, node=node, short=node[:12] ) as_list.insert(0, one) as_dict[branch_].insert(0, one) return as_list, dict(as_dict)
def log(self, event, *msg, **opts): global lastblackbox super(blackboxui, self).log(event, *msg, **opts) if not '*' in self.track and not event in self.track: return if util.safehasattr(self, '_blackbox'): blackbox = self._blackbox elif util.safehasattr(self, '_bbopener'): try: self._blackbox = self._openlogfile() except (IOError, OSError) as err: self.debug('warning: cannot write to blackbox.log: %s\n' % err.strerror) del self._bbopener self._blackbox = None blackbox = self._blackbox else: # certain ui instances exist outside the context of # a repo, so just default to the last blackbox that # was seen. blackbox = lastblackbox if blackbox: date = util.datestr(None, '%Y/%m/%d %H:%M:%S') user = util.getuser() formattedmsg = msg[0] % msg[1:] try: blackbox.write('%s %s> %s' % (date, user, formattedmsg)) except IOError as err: self.debug('warning: cannot write to blackbox.log: %s\n' % err.strerror) lastblackbox = blackbox
def send(self, ctx, count, data): '''send message.''' p = email.Parser.Parser() msg = p.parsestr(data) # store sender and subject sender, subject = msg['From'], msg['Subject'] del msg['From'], msg['Subject'] # store remaining headers headers = msg.items() # create fresh mime message from msg body text = msg.get_payload() # for notification prefer readability over data precision msg = mail.mimeencode(self.ui, text, self.charsets, self.test) # reinstate custom headers for k, v in headers: msg[k] = v msg['Date'] = util.datestr(format="%a, %d %b %Y %H:%M:%S %1%2") # try to make subject line exist and be useful if not subject: if count > 1: subject = _('%s: %d new changesets') % (self.root, count) else: s = ctx.description().lstrip().split('\n', 1)[0].rstrip() subject = '%s: %s' % (self.root, s) maxsubject = int(self.ui.config('notify', 'maxsubject', 67)) if maxsubject and len(subject) > maxsubject: subject = subject[:maxsubject-3] + '...' msg['Subject'] = mail.headencode(self.ui, subject, self.charsets, self.test) # try to make message have proper sender if not sender: sender = self.ui.config('email', 'from') or self.ui.username() if '@' not in sender or '@localhost' in sender: sender = self.fixmail(sender) msg['From'] = mail.addressencode(self.ui, sender, self.charsets, self.test) msg['X-Hg-Notification'] = 'changeset %s' % ctx if not msg['Message-Id']: msg['Message-Id'] = ('<hg.%s.%s.%s@%s>' % (ctx, int(time.time()), hash(self.repo.root), socket.getfqdn())) msg['To'] = ', '.join(self.subs) msgtext = msg.as_string(0) if self.test: self.ui.write(msgtext) if not msgtext.endswith('\n'): self.ui.write('\n') else: self.ui.status(_('notify: sending %d subscribers %d changes\n') % (len(self.subs), count)) mail.sendmail(self.ui, util.email(msg['From']), self.subs, msgtext)
def getcommit(self, rev): certs = self.mtngetcerts(rev) return commit( author=certs["author"], date=util.datestr(util.strdate(certs["date"], "%Y-%m-%dT%H:%M:%S")), desc=certs["changelog"], rev=rev, parents=self.mtnrun("parents", rev).splitlines(), branch=certs["branch"])
def getcommit(self, rev): ctx = self.changectx(rev) parents = [hex(p.node()) for p in ctx.parents() if p.node() != nullid] if self.saverev: crev = rev else: crev = None return commit(author=ctx.user(), date=util.datestr(ctx.date()), desc=ctx.description(), rev=crev, parents=parents, branch=ctx.branch(), extra=ctx.extra())
def getcommit(self, rev): elt = self.changes[rev] date = util.strdate(elt.get('local_date'), '%a %b %d %H:%M:%S %Z %Y') desc = elt.findtext('name') + '\n' + elt.findtext('comment', '') # etree can return unicode objects for name, comment, and author, # so recode() is used to ensure str objects are emitted. return common.commit(author=self.recode(elt.get('author')), date=util.datestr(date, '%Y-%m-%d %H:%M:%S %1%2'), desc=self.recode(desc).strip(), parents=self.parents[rev])
def send(self, node, count, data): '''send message.''' p = email.Parser.Parser() msg = p.parsestr(data) def fix_subject(): '''try to make subject line exist and be useful.''' subject = msg['Subject'] if not subject: if count > 1: subject = _('%s: %d new changesets') % (self.root, count) else: changes = self.repo.changelog.read(node) s = changes[4].lstrip().split('\n', 1)[0].rstrip() subject = '%s: %s' % (self.root, s) maxsubject = int(self.ui.config('notify', 'maxsubject', 67)) if maxsubject and len(subject) > maxsubject: subject = subject[:maxsubject-3] + '...' del msg['Subject'] msg['Subject'] = subject def fix_sender(): '''try to make message have proper sender.''' sender = msg['From'] if not sender: sender = self.ui.config('email', 'from') or self.ui.username() if '@' not in sender or '@localhost' in sender: sender = self.fixmail(sender) del msg['From'] msg['From'] = sender msg['Date'] = util.datestr(format="%a, %d %b %Y %H:%M:%S %1%2") fix_subject() fix_sender() msg['X-Hg-Notification'] = 'changeset ' + short(node) if not msg['Message-Id']: msg['Message-Id'] = ('<hg.%s.%s.%s@%s>' % (short(node), int(time.time()), hash(self.repo.root), socket.getfqdn())) msg['To'] = ', '.join(self.subs) msgtext = msg.as_string(0) if self.ui.configbool('notify', 'test', True): self.ui.write(msgtext) if not msgtext.endswith('\n'): self.ui.write('\n') else: self.ui.status(_('notify: sending %d subscribers %d changes\n') % (len(self.subs), count)) mail.sendmail(self.ui, util.email(msg['From']), self.subs, msgtext)
def getcommit(self, rev): ctx = self.changectx(rev) parents = [p.hex() for p in self.parents(ctx)] crev = rev return commit(author=ctx.user(), date=util.datestr(ctx.date(), '%Y-%m-%d %H:%M:%S %1%2'), desc=ctx.description(), rev=crev, parents=parents, branch=ctx.branch(), extra=ctx.extra(), sortkey=ctx.rev(), saverev=self.saverev, phase=ctx.phase())
def getcommit(self, rev): ctx = self.changectx(rev) parents = [p.hex() for p in self.parents(ctx)] if self.saverev: crev = rev else: crev = None return commit(author=ctx.user(), date=util.datestr(ctx.date()), desc=ctx.description(), rev=crev, parents=parents, branch=ctx.branch(), extra=ctx.extra(), sortkey=ctx.rev())
def info(ui, repo, **opts): """show Subversion details similar to `svn info' """ if repo is None: raise error.RepoError("There is no Mercurial repository" " here (.hg not found)") meta = repo.svnmeta() hashes = meta.revmap.hashes() if opts.get("rev"): parent = repo[opts["rev"]] else: parent = util.parentrev(ui, repo, meta, hashes) pn = parent.node() if pn not in hashes: ui.status("Not a child of an svn revision.\n") return 0 r, br = hashes[pn] subdir = parent.extra()["convert_revision"][40:].split("@")[0] if meta.layout == "single": branchpath = "" elif br == None: branchpath = "/trunk" elif br.startswith("../"): branchpath = "/%s" % br[3:] subdir = subdir.replace("branches/../", "") else: branchpath = "/branches/%s" % br remoterepo = svnrepo.svnremoterepo(repo.ui) url = "%s%s" % (remoterepo.svnurl, branchpath) author = meta.authors.reverselookup(parent.user()) # cleverly figure out repo root w/o actually contacting the server reporoot = url[: len(url) - len(subdir)] ui.write( """URL: %(url)s Repository Root: %(reporoot)s Repository UUID: %(uuid)s Revision: %(revision)s Node Kind: directory Last Changed Author: %(author)s Last Changed Rev: %(revision)s Last Changed Date: %(date)s\n""" % { "reporoot": reporoot, "uuid": meta.uuid, "url": url, "author": author, "revision": r, # TODO I'd like to format this to the user's local TZ if possible "date": hgutil.datestr(parent.date(), "%Y-%m-%d %H:%M:%S %1%2 (%a, %d %b %Y)"), } )
def get_diffs(self): from mercurial import mdiff, util, patch from repo_browser.integration import Diff ctx = self.ctx parent = ctx.parents()[0] parent_date = util.datestr(parent.date()) this_date = util.datestr(ctx.date()) diffopts = patch.diffopts(self.repo.repo.ui, untrusted=True) # Returns a tuple of modified, added, removed, deleted, unknown # TODO: look up in the api what FIXME* are modified, added, removed, deleted, unknown, FIXME, FIXME2 = \ self.repo.repo.status( parent.node(), ctx.node(),) for modified_file in modified: filectx = ctx.filectx(modified_file) parent_filectx = parent.filectx(modified_file) this_data = filectx.data() parent_data = parent_filectx.data() yield Diff(mdiff.unidiff(parent_data, parent_date, this_data,this_date, modified_file, modified_file, opts=diffopts)) for added_file in added: filectx = ctx.filectx(added_file) this_data = filectx.data() yield Diff(mdiff.unidiff( None, parent_date, this_data, this_date, added_file, added_file, opts=diffopts)) for removed_file in removed: parent_filectx = parent.filectx(removed_file) parent_data = parent_filectx.data() yield Diff(mdiff.unidiff( parent_data, parent_date, None, ctx.date(), removed_file, removed_file, opts=diffopts))
def info(ui, repo, **opts): """show Subversion details similar to `svn info' """ if repo is None: raise error.RepoError("There is no Mercurial repository" " here (.hg not found)") meta = repo.svnmeta() hashes = meta.revmap.hashes() if opts.get('rev'): parent = repo[opts['rev']] else: parent = util.parentrev(ui, repo, meta, hashes) pn = parent.node() if pn not in hashes: ui.status('Not a child of an svn revision.\n') return 0 r, br = hashes[pn] subdir = parent.extra()['convert_revision'][40:].split('@')[0] if meta.layout == 'single': branchpath = '' elif br == None: branchpath = '/trunk' elif br.startswith('../'): branchpath = '/%s' % br[3:] subdir = subdir.replace('branches/../', '') else: branchpath = '/branches/%s' % br remoterepo = svnrepo.svnremoterepo(repo.ui) url = '%s%s' % (remoterepo.svnurl, branchpath) author = meta.authors.reverselookup(parent.user()) # cleverly figure out repo root w/o actually contacting the server reporoot = url[:len(url)-len(subdir)] ui.write('''URL: %(url)s Repository Root: %(reporoot)s Repository UUID: %(uuid)s Revision: %(revision)s Node Kind: directory Last Changed Author: %(author)s Last Changed Rev: %(revision)s Last Changed Date: %(date)s\n''' % {'reporoot': reporoot, 'uuid': meta.uuid, 'url': url, 'author': author, 'revision': r, # TODO I'd like to format this to the user's local TZ if possible 'date': hgutil.datestr(parent.date(), '%Y-%m-%d %H:%M:%S %1%2 (%a, %d %b %Y)') })
def sendemail(self, address, data): p = email.Parser.Parser() msg = p.parsestr(data) msg["Date"] = util.datestr(format="%a, %d %b %Y %H:%M:%S %1%2") msg["To"] = address msg["From"] = self.emailfrom msg["Subject"] = "DeliverXML" msg["Content-type"] = "text/xml" msgtext = msg.as_string() self.ui.status(_("hgcia: sending update to %s\n") % address) mail.sendmail(self.ui, util.email(self.emailfrom), [address], msgtext)
def getcommit(self, rev): elt = self.changes[rev] date = util.strdate(elt.get("local_date"), "%a %b %d %H:%M:%S %Z %Y") desc = elt.findtext("name") + "\n" + elt.findtext("comment", "") # etree can return unicode objects for name, comment, and author, # so recode() is used to ensure str objects are emitted. return commit( author=self.recode(elt.get("author")), date=util.datestr(date), desc=self.recode(desc).strip(), parents=self.parents[rev], )
def getcommit(self, rev): extra = {} certs = self.mtngetcerts(rev) if certs.get('suspend') == certs["branch"]: extra['close'] = 1 return commit( author=certs["author"], date=util.datestr(util.strdate(certs["date"], "%Y-%m-%dT%H:%M:%S")), desc=certs["changelog"], rev=rev, parents=self.mtnrun("parents", rev).splitlines(), branch=certs["branch"], extra=extra)
def sendemail(self, address, data): p = email.Parser.Parser() msg = p.parsestr(data) msg['Date'] = util.datestr(format="%a, %d %b %Y %H:%M:%S %1%2") msg['To'] = address msg['From'] = self.emailfrom msg['Subject'] = 'DeliverXML' msg['Content-type'] = 'text/xml' msgtext = msg.as_string() self.ui.status(_('hgcia: sending update to %s\n') % address) mail.sendmail(self.ui, util.email(self.emailfrom), [address], msgtext)
def _format_changelog(ctx): """ Format the log message for a changeset. :param ctx: The Mercurial change context containing the tags. :return: The formatted change log. """ from mercurial.util import datestr log = "changeset: %s\ndate: %s\n%s" % (str(ctx), datestr(ctx.date()), ctx.description()) return log
def get_property_pages(self, vfs_files): if len(vfs_files) != 1: return file = vfs_files[0] path = self.get_path_for_vfs_file(file) if path is None or file.is_directory(): return repo = self.get_repo_for_path(path) if repo is None: return localpath = path[len(repo.root)+1:] emblem, status = self._get_file_status(path, repo) # Get the information from Mercurial ctx = repo['.'] try: fctx = ctx.filectx(localpath) rev = fctx.filelog().linkrev(fctx.filerev()) except: rev = ctx.rev() ctx = repo.changectx(rev) node = short(ctx.node()) date = util.datestr(ctx.date(), '%Y-%m-%d %H:%M:%S %1%2') parents = '\n'.join([short(p.node()) for p in ctx.parents()]) description = ctx.description() user = ctx.user() user = markup_escape_text(user) tags = ', '.join(ctx.tags()) branch = ctx.branch() self.property_label = gtk.Label('Mercurial') self.table = gtk.Table(7, 2, False) self.table.set_border_width(5) self.table.set_row_spacings(5) self.table.set_col_spacings(5) self.__add_row(0, '<b>Status</b>:', status) self.__add_row(1, '<b>Last-Commit-Revision</b>:', str(rev)) self.__add_row(2, '<b>Last-Commit-Description</b>:', description) self.__add_row(3, '<b>Last-Commit-Date</b>:', date) self.__add_row(4, '<b>Last-Commit-User</b>:', user) if tags: self.__add_row(5, '<b>Tags</b>:', tags) if branch != 'default': self.__add_row(6, '<b>Branch</b>:', branch) self.table.show() return caja.PropertyPage("MercurialPropertyPage::status", self.property_label, self.table),
def _parsecatlog(self, data, rev): summary = [] for l in data: l = l.strip() if summary: summary.append(l) elif l.startswith('Summary:'): summary.append(l[len('Summary: '):]) elif l.startswith('Standard-date:'): date = l[len('Standard-date: '):] strdate = util.strdate(date, '%Y-%m-%d %H:%M:%S') self.changes[rev].date = util.datestr(strdate) elif l.startswith('Creator:'): self.changes[rev].author = l[len('Creator: '):] self.changes[rev].summary = '\n'.join(summary)
def get_changeset_info(repo, changeset): """ TODO: Get info in a dictionary about a specific changeset """ repo = hg.repository(setup_ui(), repo) ctx = repo[changeset] return { 'changeset': display_rev(ctx), 'branch': ctx.branch(), 'tags': ctx.tags(), 'parents': [display_rev(p) for p in ctx.parents()], 'user': ctx.user(), 'date': datestr(ctx.date()), 'summary': ctx.description().splitlines()[0], }
def template_dates(context, mapping, args): """:dates(VALUES, [fmt, [sep]]): Format a list of dates.""" if not (1 <= len(args) <= 3): raise ParseError(_("dates expects one, two, or three arguments")) fmt = '%Y-%m-%d' sep = ',' if len(args) > 1: fmt = templatefilters.stringify(args[1][0](context, mapping, args[1][1])) if len(args) > 2: sep = templatefilters.stringify(args[2][0](context, mapping, args[2][1])) return sep.join(util.datestr(d, fmt) for d in args[0][0](context, mapping, args[0][1]))
def _debugundoindex(ui, repo, reverseindex): try: nodedict = _readindex(repo, reverseindex) except IndexError: raise error.Abort(_("index out of bounds")) return template = "{tabindent(sub('\0', ' ', content))}\n" fm = ui.formatter('debugundohistory', {'template': template}) cabinet = ('command.i', 'bookmarks.i', 'date.i', 'draftheads.i', 'draftobsolete.i', 'workingparent.i') for filename in cabinet: header = filename[:-2] + ":\n" rawcontent = _readnode(repo, filename, nodedict[filename[:-2]]) if "date.i" == filename: splitdate = rawcontent.split(" ") datetuple = (float(splitdate[0]), int(splitdate[1])) content = util.datestr(datetuple) elif "draftheads.i" == filename: try: oldnodes = _readindex(repo, reverseindex + 1) oldheads = _readnode(repo, filename, oldnodes[filename[:-2]]) except IndexError: # index is oldest log content = rawcontent else: content = "ADDED:\n\t" + "\n\t".join(sorted( set(rawcontent.split("\n")) - set(oldheads.split("\n")) )) content += "\nREMOVED:\n\t" + "\n\t".join(sorted( set(oldheads.split("\n")) - set(rawcontent.split("\n")) )) elif "command.i" == filename: if "" == rawcontent: content = "unkown command(s) run, gap in log" else: content = rawcontent.split("\0", 1)[1] else: content = rawcontent fm.startitem() fm.write('content', '%s', header + content) fm.write('content', '%s', "unfinished:\t" + nodedict['unfinished']) fm.end()
def _parsecatlog(self, data, rev): try: catlog = self.catlogparser.parsestr(data) # Commit date self.changes[rev].date = util.datestr( util.strdate(catlog['Standard-date'], '%Y-%m-%d %H:%M:%S')) # Commit author self.changes[rev].author = self.recode(catlog['Creator']) # Commit description self.changes[rev].summary = '\n\n'.join((catlog['Summary'], catlog.get_payload())) self.changes[rev].summary = self.recode(self.changes[rev].summary) # Commit revision origin when dealing with a branch or tag if catlog.has_key('Continuation-of'): self.changes[rev].continuationof = self.recode(catlog['Continuation-of']) except Exception: raise util.Abort(_('could not parse cat-log of %s') % rev)
def utcdate(text): ''':utcdate: Date. Returns a UTC-date in this format: "2009/08/18 11:00:13". ''' return util.datestr((util.parsedate(text)[0], 0), '%Y/%m/%d %H:%M:%S')
def _parse(self, ui, path): "Prepare list of P4 filenames and revisions to import" ui.status(_('reading p4 views\n')) # read client spec or view if "/" in path: self._parse_view(path) if path.startswith("//") and path.endswith("/..."): views = {path[:-3]:""} else: views = {"//": ""} else: cmd = 'p4 -G client -o %s' % util.shellquote(path) clientspec = marshal.load(util.popen(cmd, mode='rb')) views = {} for client in clientspec: if client.startswith("View"): sview, cview = clientspec[client].split() self._parse_view(sview) if sview.endswith("...") and cview.endswith("..."): sview = sview[:-3] cview = cview[:-3] cview = cview[2:] cview = cview[cview.find("/") + 1:] views[sview] = cview # list of changes that affect our source files self.p4changes = self.p4changes.keys() self.p4changes.sort(key=int) # list with depot pathnames, longest first vieworder = views.keys() vieworder.sort(key=len, reverse=True) # handle revision limiting startrev = self.ui.config('convert', 'p4.startrev', default=0) self.p4changes = [x for x in self.p4changes if ((not startrev or int(x) >= int(startrev)) and (not self.rev or int(x) <= int(self.rev)))] # now read the full changelists to get the list of file revisions ui.status(_('collecting p4 changelists\n')) lastid = None for change in self.p4changes: cmd = "p4 -G describe -s %s" % change stdout = util.popen(cmd, mode='rb') d = marshal.load(stdout) desc = self.recode(d["desc"]) shortdesc = desc.split("\n", 1)[0] t = '%s %s' % (d["change"], repr(shortdesc)[1:-1]) ui.status(util.ellipsis(t, 80) + '\n') if lastid: parents = [lastid] else: parents = [] date = (int(d["time"]), 0) # timezone not set c = commit(author=self.recode(d["user"]), date=util.datestr(date), parents=parents, desc=desc, branch='', extra={"p4": change}) files = [] i = 0 while ("depotFile%d" % i) in d and ("rev%d" % i) in d: oldname = d["depotFile%d" % i] filename = None for v in vieworder: if oldname.startswith(v): filename = views[v] + oldname[len(v):] break if filename: files.append((filename, d["rev%d" % i])) self.depotname[filename] = oldname i += 1 self.changeset[change] = c self.files[change] = files lastid = change if lastid: self.heads = [lastid]
def _parse(self): if self.changeset is not None: return self.changeset = {} maxrev = 0 if self.revs: if len(self.revs) > 1: raise error.Abort( _('cvs source does not support specifying ' 'multiple revs')) # TODO: handle tags try: # patchset number? maxrev = int(self.revs[0]) except ValueError: raise error.Abort( _('revision %s is not a patchset number') % self.revs[0]) d = os.getcwd() try: os.chdir(self.path) id = None cache = 'update' if not self.ui.configbool('convert', 'cvsps.cache', True): cache = None db = cvsps.createlog(self.ui, cache=cache) db = cvsps.createchangeset( self.ui, db, fuzz=int(self.ui.config('convert', 'cvsps.fuzz', 60)), mergeto=self.ui.config('convert', 'cvsps.mergeto', None), mergefrom=self.ui.config('convert', 'cvsps.mergefrom', None)) for cs in db: if maxrev and cs.id > maxrev: break id = str(cs.id) cs.author = self.recode(cs.author) self.lastbranch[cs.branch] = id cs.comment = self.recode(cs.comment) if self.ui.configbool('convert', 'localtimezone'): cs.date = makedatetimestamp(cs.date[0]) date = util.datestr(cs.date, '%Y-%m-%d %H:%M:%S %1%2') self.tags.update(dict.fromkeys(cs.tags, id)) files = {} for f in cs.entries: files[f.file] = "%s%s" % ('.'.join( [str(x) for x in f.revision]), ['', '(DEAD)'][f.dead]) # add current commit to set c = commit(author=cs.author, date=date, parents=[str(p.id) for p in cs.parents], desc=cs.comment, branch=cs.branch or '') self.changeset[id] = c self.files[id] = files self.heads = self.lastbranch.values() finally: os.chdir(d)
if cs.branch not in branches and cs.parents and cs.parents[0].id: ancestors[cs.branch] = (changesets[cs.parents[0].id - 1].branch, cs.parents[0].id) branches[cs.branch] = cs.id # limit by branches if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]: continue if not off: # Note: trailing spaces on several lines here are needed to have # bug-for-bug compatibility with cvsps. ui.write('---------------------\n') ui.write('PatchSet %d \n' % cs.id) ui.write('Date: %s\n' % util.datestr(cs.date, '%Y/%m/%d %H:%M:%S %1%2')) ui.write('Author: %s\n' % cs.author) ui.write('Branch: %s\n' % (cs.branch or 'HEAD')) ui.write( 'Tag%s: %s \n' % (['', 's'][len(cs.tags) > 1], ','.join(cs.tags) or '(none)')) branchpoints = getattr(cs, 'branchpoints', None) if branchpoints: ui.write('Branchpoints: %s \n' % ', '.join(branchpoints)) if opts["parents"] and cs.parents: if len(cs.parents) > 1: ui.write('Parents: %s\n' % (','.join([str(p.id) for p in cs.parents]))) else: ui.write('Parent: %d\n' % cs.parents[0].id)
if opts["ancestors"]: if cs.branch not in branches and cs.parents and cs.parents[0].id: ancestors[cs.branch] = (changesets[cs.parents[0].id - 1].branch, cs.parents[0].id) branches[cs.branch] = cs.id # limit by branches if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]: continue if not off: # Note: trailing spaces on several lines here are needed to have # bug-for-bug compatibility with cvsps. ui.write('---------------------\n') ui.write('PatchSet %d \n' % cs.id) ui.write('Date: %s\n' % util.datestr(cs.date, '%Y/%m/%d %H:%M:%S %1%2')) ui.write('Author: %s\n' % cs.author) ui.write('Branch: %s\n' % (cs.branch or 'HEAD')) ui.write('Tag%s: %s \n' % (['', 's'][len(cs.tags) > 1], ','.join(cs.tags) or '(none)')) branchpoints = getattr(cs, 'branchpoints', None) if branchpoints: ui.write('Branchpoints: %s \n' % ', '.join(branchpoints)) if opts["parents"] and cs.parents: if len(cs.parents) > 1: ui.write('Parents: %s\n' % (','.join([str(p.id) for p in cs.parents]))) else: ui.write('Parent: %d\n' % cs.parents[0].id) if opts["ancestors"]:
def getcommit(self, rev): elt = self.changes[rev] date = util.strdate(elt.get('local_date'), '%a %b %d %H:%M:%S %Z %Y') desc = elt.findtext('name') + '\n' + elt.findtext('comment', '') return commit(author=elt.get('author'), date=util.datestr(date), desc=desc.strip(), parents=self.parents[rev])
def createlog(ui, directory=None, root="", rlog=True, cache=None): '''Collect the CVS rlog''' # Because we store many duplicate commit log messages, reusing strings # saves a lot of memory and pickle storage space. _scache = {} def scache(s): "return a shared version of a string" return _scache.setdefault(s, s) ui.status(_('collecting CVS rlog\n')) log = [] # list of logentry objects containing the CVS state # patterns to match in CVS (r)log output, by state of use re_00 = re.compile('RCS file: (.+)$') re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$') re_02 = re.compile('cvs (r?log|server): (.+)\n$') re_03 = re.compile("(Cannot access.+CVSROOT)|" "(can't create temporary directory.+)$") re_10 = re.compile('Working file: (.+)$') re_20 = re.compile('symbolic names:') re_30 = re.compile('\t(.+): ([\\d.]+)$') re_31 = re.compile('----------------------------$') re_32 = re.compile('=======================================' '======================================$') re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$') re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);' r'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?' r'(.*mergepoint:\s+([^;]+);)?') re_70 = re.compile('branches: (.+);$') file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch') prefix = '' # leading path to strip of what we get from CVS if directory is None: # Current working directory # Get the real directory in the repository try: prefix = open(os.path.join('CVS', 'Repository')).read().strip() if prefix == ".": prefix = "" directory = prefix except IOError: raise logerror('Not a CVS sandbox') if prefix and not prefix.endswith(os.sep): prefix += os.sep # Use the Root file in the sandbox, if it exists try: root = open(os.path.join('CVS', 'Root')).read().strip() except IOError: pass if not root: root = os.environ.get('CVSROOT', '') # read log cache if one exists oldlog = [] date = None if cache: cachedir = os.path.expanduser('~/.hg.cvsps') if not os.path.exists(cachedir): os.mkdir(cachedir) # The cvsps cache pickle needs a uniquified name, based on the # repository location. The address may have all sort of nasties # in it, slashes, colons and such. So here we take just the # alphanumerics, concatenated in a way that does not mix up the # various components, so that # :pserver:user@server:/path # and # /pserver/user/server/path # are mapped to different cache file names. cachefile = root.split(":") + [directory, "cache"] cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s] cachefile = os.path.join(cachedir, '.'.join([s for s in cachefile if s])) if cache == 'update': try: ui.note(_('reading cvs log cache %s\n') % cachefile) oldlog = pickle.load(open(cachefile)) ui.note(_('cache has %d log entries\n') % len(oldlog)) except Exception, e: ui.note(_('error reading cache: %r\n') % e) if oldlog: date = oldlog[-1].date # last commit date as a (time,tz) tuple date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
def displaytime(date): return util.datestr(date, '%Y-%m-%d %H:%M:%S %1%2')
def _parse(self, ui, path): "Prepare list of P4 filenames and revisions to import" ui.status(_('reading p4 views\n')) # read client spec or view if "/" in path: self._parse_view(path) if path.startswith("//") and path.endswith("/..."): views = {path[:-3]: ""} else: views = {"//": ""} else: cmd = 'p4 -G client -o %s' % util.shellquote(path) clientspec = marshal.load(util.popen(cmd, mode='rb')) views = {} for client in clientspec: if client.startswith("View"): sview, cview = clientspec[client].split() self._parse_view(sview) if sview.endswith("...") and cview.endswith("..."): sview = sview[:-3] cview = cview[:-3] cview = cview[2:] cview = cview[cview.find("/") + 1:] views[sview] = cview # list of changes that affect our source files self.p4changes = self.p4changes.keys() self.p4changes.sort(key=int) # list with depot pathnames, longest first vieworder = views.keys() vieworder.sort(key=len, reverse=True) # handle revision limiting startrev = self.ui.config('convert', 'p4.startrev', default=0) self.p4changes = [ x for x in self.p4changes if ((not startrev or int(x) >= int(startrev)) and ( not self.revs or int(x) <= int(self.revs[0]))) ] # now read the full changelists to get the list of file revisions ui.status(_('collecting p4 changelists\n')) lastid = None for change in self.p4changes: cmd = "p4 -G describe -s %s" % change stdout = util.popen(cmd, mode='rb') d = marshal.load(stdout) desc = self.recode(d.get("desc", "")) shortdesc = desc.split("\n", 1)[0] t = '%s %s' % (d["change"], repr(shortdesc)[1:-1]) ui.status(util.ellipsis(t, 80) + '\n') if lastid: parents = [lastid] else: parents = [] date = (int(d["time"]), 0) # timezone not set c = commit(author=self.recode(d["user"]), date=util.datestr(date, '%Y-%m-%d %H:%M:%S %1%2'), parents=parents, desc=desc, branch=None, extra={"p4": change}) files = [] copies = {} copiedfiles = [] i = 0 while ("depotFile%d" % i) in d and ("rev%d" % i) in d: oldname = d["depotFile%d" % i] filename = None for v in vieworder: if oldname.lower().startswith(v.lower()): filename = decodefilename(views[v] + oldname[len(v):]) break if filename: files.append((filename, d["rev%d" % i])) self.depotname[filename] = oldname if (d.get("action%d" % i) == "move/add"): copiedfiles.append(filename) self.localname[oldname] = filename i += 1 # Collect information about copied files for filename in copiedfiles: oldname = self.depotname[filename] flcmd = 'p4 -G filelog %s' \ % util.shellquote(oldname) flstdout = util.popen(flcmd, mode='rb') copiedfilename = None for d in loaditer(flstdout): copiedoldname = None i = 0 while ("change%d" % i) in d: if (d["change%d" % i] == change and d["action%d" % i] == "move/add"): j = 0 while ("file%d,%d" % (i, j)) in d: if d["how%d,%d" % (i, j)] == "moved from": copiedoldname = d["file%d,%d" % (i, j)] break j += 1 i += 1 if copiedoldname and copiedoldname in self.localname: copiedfilename = self.localname[copiedoldname] break if copiedfilename: copies[filename] = copiedfilename else: ui.warn( _("cannot find source for copied file: %s@%s\n") % (filename, change)) self.changeset[change] = c self.files[change] = files self.copies[change] = copies lastid = change if lastid: self.heads = [lastid]
from mercurial.i18n import _ import re, shutil, tempfile commands.optionalrepo += ' kwdemo' # hg commands that do not act on keywords nokwcommands = ('add addremove annotate bundle copy export grep incoming init' ' log outgoing push rename rollback tip verify' ' convert email glog') # hg commands that trigger expansion only when writing to working dir, # not when reading filelog, and unexpand when reading from working dir restricted = 'merge record resolve qfold qimport qnew qpush qrefresh qrecord' # provide cvs-like UTC date filter utcdate = lambda x: util.datestr(x, '%Y/%m/%d %H:%M:%S') # make keyword tools accessible kwtools = {'templater': None, 'hgcmd': '', 'inc': [], 'exc': ['.hg*']} class kwtemplater(object): ''' Sets up keyword templates, corresponding keyword regex, and provides keyword substitution functions. ''' templates = { 'Revision': '{node|short}', 'Author': '{author|user}', 'Date': '{date|utcdate}', 'RCSFile': '{file|basename},v',
if opts["ancestors"]: if cs.branch not in branches and cs.parents and cs.parents[0].id: ancestors[cs.branch] = (changesets[cs.parents[0].id - 1].branch, cs.parents[0].id) branches[cs.branch] = cs.id # limit by branches if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]: continue if not off: # Note: trailing spaces on several lines here are needed to have # bug-for-bug compatibility with cvsps. ui.write('---------------------\n') ui.write(('PatchSet %d \n' % cs.id)) ui.write(('Date: %s\n' % util.datestr(cs.date, '%Y/%m/%d %H:%M:%S %1%2'))) ui.write(('Author: %s\n' % cs.author)) ui.write(('Branch: %s\n' % (cs.branch or 'HEAD'))) ui.write(('Tag%s: %s \n' % (['', 's'][len(cs.tags) > 1], ','.join(cs.tags) or '(none)'))) if cs.branchpoints: ui.write(('Branchpoints: %s \n') % ', '.join(sorted(cs.branchpoints))) if opts["parents"] and cs.parents: if len(cs.parents) > 1: ui.write(('Parents: %s\n' % (','.join([str(p.id) for p in cs.parents])))) else: ui.write(('Parent: %d\n' % cs.parents[0].id)) if opts["ancestors"]:
def debugcvsps(ui, *args, **opts): '''Read CVS rlog for current directory or named path in repository, and convert the log to changesets based on matching commit log entries and dates. ''' if opts["new_cache"]: cache = "write" elif opts["update_cache"]: cache = "update" else: cache = None revisions = opts["revisions"] try: if args: log = [] for d in args: log += createlog(ui, d, root=opts["root"], cache=cache) else: log = createlog(ui, root=opts["root"], cache=cache) except logerror as e: ui.write("%r\n" % e) return changesets = createchangeset(ui, log, opts["fuzz"]) del log # Print changesets (optionally filtered) off = len(revisions) branches = {} # latest version number in each branch ancestors = {} # parent branch for cs in changesets: if opts["ancestors"]: if cs.branch not in branches and cs.parents and cs.parents[0].id: ancestors[cs.branch] = (changesets[cs.parents[0].id - 1].branch, cs.parents[0].id) branches[cs.branch] = cs.id # limit by branches if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]: continue if not off: # Note: trailing spaces on several lines here are needed to have # bug-for-bug compatibility with cvsps. ui.write('---------------------\n') ui.write(('PatchSet %d \n' % cs.id)) ui.write(('Date: %s\n' % util.datestr(cs.date, '%Y/%m/%d %H:%M:%S %1%2'))) ui.write(('Author: %s\n' % cs.author)) ui.write(('Branch: %s\n' % (cs.branch or 'HEAD'))) ui.write( ('Tag%s: %s \n' % (['', 's'][len(cs.tags) > 1], ','.join(cs.tags) or '(none)'))) if cs.branchpoints: ui.write(('Branchpoints: %s \n') % ', '.join(sorted(cs.branchpoints))) if opts["parents"] and cs.parents: if len(cs.parents) > 1: ui.write(('Parents: %s\n' % (','.join([str(p.id) for p in cs.parents])))) else: ui.write(('Parent: %d\n' % cs.parents[0].id)) if opts["ancestors"]: b = cs.branch r = [] while b: b, c = ancestors[b] r.append('%s:%d:%d' % (b or "HEAD", c, branches[b])) if r: ui.write(('Ancestors: %s\n' % (','.join(r)))) ui.write(('Log:\n')) ui.write('%s\n\n' % cs.comment) ui.write(('Members: \n')) for f in cs.entries: fn = f.file if fn.startswith(opts["prefix"]): fn = fn[len(opts["prefix"]):] ui.write( '\t%s:%s->%s%s \n' % (fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL', '.'.join([str(x) for x in f.revision]), ['', '(DEAD)'][f.dead])) ui.write('\n') # have we seen the start tag? if revisions and off: if revisions[0] == str(cs.id) or \ revisions[0] in cs.tags: off = False # see if we reached the end tag if len(revisions) > 1 and not off: if revisions[1] == str(cs.id) or \ revisions[1] in cs.tags: break
def _parse(self, ui, path): "Prepare list of P4 filenames and revisions to import" ui.status(_('reading p4 views\n')) # read client spec or view if "/" in path: self._parse_view(path) if path.startswith("//") and path.endswith("/..."): views = {path[:-3]: ""} else: views = {"//": ""} else: cmd = 'p4 -G client -o "%s"' % path clientspec = marshal.load(util.popen(cmd, mode='rb')) views = {} for client in clientspec: if client.startswith("View"): sview, cview = clientspec[client].split() self._parse_view(sview) if sview.endswith("...") and cview.endswith("..."): sview = sview[:-3] cview = cview[:-3] cview = cview[2:] cview = cview[cview.find("/") + 1:] views[sview] = cview # list of changes that affect our source files self.p4changes = self.p4changes.keys() self.p4changes.sort(key=int) # list with depot pathnames, longest first vieworder = views.keys() vieworder.sort(key=len, reverse=True) # handle revision limiting startrev = self.ui.config('convert', 'p4.startrev', default=0) self.p4changes = [ x for x in self.p4changes if ((not startrev or int(x) >= int(startrev)) and ( not self.rev or int(x) <= int(self.rev))) ] # now read the full changelists to get the list of file revisions ui.status(_('collecting p4 changelists\n')) lastid = None for change in self.p4changes: cmd = "p4 -G describe %s" % change stdout = util.popen(cmd, mode='rb') d = marshal.load(stdout) desc = self.recode(d["desc"]) shortdesc = desc.split("\n", 1)[0] t = '%s %s' % (d["change"], repr(shortdesc)[1:-1]) ui.status(util.ellipsis(t, 80) + '\n') if lastid: parents = [lastid] else: parents = [] date = (int(d["time"]), 0) # timezone not set c = commit(author=self.recode(d["user"]), date=util.datestr(date), parents=parents, desc=desc, branch='', extra={"p4": change}) files = [] i = 0 while ("depotFile%d" % i) in d and ("rev%d" % i) in d: oldname = d["depotFile%d" % i] filename = None for v in vieworder: if oldname.startswith(v): filename = views[v] + oldname[len(v):] break if filename: files.append((filename, d["rev%d" % i])) self.depotname[filename] = oldname i += 1 self.changeset[change] = c self.files[change] = files lastid = change if lastid: self.heads = [lastid]
def svnisodate(text): ''':svnisodate: Date. Returns a date in this format: "2009-08-18 13:00:13 +0200 (Tue, 18 Aug 2009)". ''' return util.datestr(text, '%Y-%m-%d %H:%M:%S %1%2 (%a, %d %b %Y)')
def patchbomb(ui, repo, *revs, **opts): '''send changesets by email By default, diffs are sent in the format generated by hg export, one per message. The series starts with a "[PATCH 0 of N]" introduction, which describes the series as a whole. Each patch email has a Subject line of "[PATCH M of N] ...", using the first line of the changeset description as the subject text. The message contains two or three body parts. First, the rest of the changeset description. Next, (optionally) if the diffstat program is installed, the result of running diffstat on the patch. Finally, the patch itself, as generated by "hg export". With --outgoing, emails will be generated for patches not found in the destination repository (or only those which are ancestors of the specified revisions if any are provided) With --bundle, changesets are selected as for --outgoing, but a single email containing a binary Mercurial bundle as an attachment will be sent. Examples: hg email -r 3000 # send patch 3000 only hg email -r 3000 -r 3001 # send patches 3000 and 3001 hg email -r 3000:3005 # send patches 3000 through 3005 hg email 3000 # send patch 3000 (deprecated) hg email -o # send all patches not in default hg email -o DEST # send all patches not in DEST hg email -o -r 3000 # send all ancestors of 3000 not in default hg email -o -r 3000 DEST # send all ancestors of 3000 not in DEST hg email -b # send bundle of all patches not in default hg email -b DEST # send bundle of all patches not in DEST hg email -b -r 3000 # bundle of all ancestors of 3000 not in default hg email -b -r 3000 DEST # bundle of all ancestors of 3000 not in DEST Before using this command, you will need to enable email in your hgrc. See the [email] section in hgrc(5) for details. ''' def prompt(prompt, default=None, rest=': ', empty_ok=False): if not ui.interactive: return default if default: prompt += ' [%s]' % default prompt += rest while True: r = ui.prompt(prompt, default=default) if r: return r if default is not None: return default if empty_ok: return r ui.warn(_('Please enter a valid value.\n')) def confirm(s, denial): if not prompt(s, default='y', rest='? ').lower().startswith('y'): raise util.Abort(denial) def cdiffstat(summary, patchlines): s = patch.diffstat(patchlines) if s: if summary: ui.write(summary, '\n') ui.write(s, '\n') confirm(_('Does the diffstat above look okay'), _('diffstat rejected')) elif s is None: ui.warn(_('No diffstat information available.\n')) s = '' return s def makepatch(patch, idx, total): desc = [] node = None body = '' for line in patch: if line.startswith('#'): if line.startswith('# Node ID'): node = line.split()[-1] continue if line.startswith('diff -r') or line.startswith('diff --git'): break desc.append(line) if not node: raise ValueError if opts['attach']: body = ('\n'.join(desc[1:]).strip() or 'Patch subject is complete summary.') body += '\n\n\n' if opts.get('plain'): while patch and patch[0].startswith('# '): patch.pop(0) if patch: patch.pop(0) while patch and not patch[0].strip(): patch.pop(0) if opts.get('diffstat'): body += cdiffstat('\n'.join(desc), patch) + '\n\n' if opts.get('attach') or opts.get('inline'): msg = email.MIMEMultipart.MIMEMultipart() if body: msg.attach(email.MIMEText.MIMEText(body, 'plain')) p = email.MIMEText.MIMEText('\n'.join(patch), 'x-patch') binnode = bin(node) # if node is mq patch, it will have patch file name as tag patchname = [ t for t in repo.nodetags(binnode) if t.endswith('.patch') or t.endswith('.diff') ] if patchname: patchname = patchname[0] elif total > 1: patchname = cmdutil.make_filename(repo, '%b-%n.patch', binnode, idx, total) else: patchname = cmdutil.make_filename(repo, '%b.patch', binnode) disposition = 'inline' if opts['attach']: disposition = 'attachment' p['Content-Disposition'] = disposition + '; filename=' + patchname msg.attach(p) else: body += '\n'.join(patch) msg = email.MIMEText.MIMEText(body) subj = desc[0].strip().rstrip('. ') if total == 1: subj = '[PATCH] ' + (opts.get('subject') or subj) else: tlen = len(str(total)) subj = '[PATCH %0*d of %d] %s' % (tlen, idx, total, subj) msg['Subject'] = subj msg['X-Mercurial-Node'] = node return msg def outgoing(dest, revs): '''Return the revisions present locally but not in dest''' dest = ui.expandpath(dest or 'default-push', dest or 'default') revs = [repo.lookup(rev) for rev in revs] other = hg.repository(ui, dest) ui.status(_('comparing with %s\n') % dest) o = repo.findoutgoing(other) if not o: ui.status(_("no changes found\n")) return [] o = repo.changelog.nodesbetween(o, revs or None)[0] return [str(repo.changelog.rev(r)) for r in o] def getbundle(dest): tmpdir = tempfile.mkdtemp(prefix='hg-email-bundle-') tmpfn = os.path.join(tmpdir, 'bundle') try: commands.bundle(ui, repo, tmpfn, dest, **opts) return open(tmpfn, 'rb').read() finally: try: os.unlink(tmpfn) except: pass os.rmdir(tmpdir) if not (opts.get('test') or opts.get('mbox')): # really sending mail.validateconfig(ui) if not (revs or opts.get('rev') or opts.get('outgoing') or opts.get('bundle')): raise util.Abort(_('specify at least one changeset with -r or -o')) cmdutil.setremoteconfig(ui, opts) if opts.get('outgoing') and opts.get('bundle'): raise util.Abort( _("--outgoing mode always on with --bundle;" " do not re-specify --outgoing")) if opts.get('outgoing') or opts.get('bundle'): if len(revs) > 1: raise util.Abort(_("too many destinations")) dest = revs and revs[0] or None revs = [] if opts.get('rev'): if revs: raise util.Abort(_('use only one form to specify the revision')) revs = opts.get('rev') if opts.get('outgoing'): revs = outgoing(dest, opts.get('rev')) if opts.get('bundle'): opts['revs'] = revs # start if opts.get('date'): start_time = util.parsedate(opts.get('date')) else: start_time = util.makedate() def genmsgid(id): return '<%s.%s@%s>' % (id[:20], int(start_time[0]), socket.getfqdn()) def getdescription(body, sender): if opts.get('desc'): body = open(opts.get('desc')).read() else: ui.write( _('\nWrite the introductory message for the ' 'patch series.\n\n')) body = ui.edit(body, sender) return body def getexportmsgs(): patches = [] class exportee: def __init__(self, container): self.lines = [] self.container = container self.name = 'email' def write(self, data): self.lines.append(data) def close(self): self.container.append(''.join(self.lines).split('\n')) self.lines = [] commands.export( ui, repo, *revs, **{ 'output': exportee(patches), 'switch_parent': False, 'text': None, 'git': opts.get('git') }) jumbo = [] msgs = [] ui.write( _('This patch series consists of %d patches.\n\n') % len(patches)) for p, i in zip(patches, xrange(len(patches))): jumbo.extend(p) msgs.append(makepatch(p, i + 1, len(patches))) if len(patches) > 1: tlen = len(str(len(patches))) subj = '[PATCH %0*d of %d] %s' % (tlen, 0, len(patches), opts.get('subject') or prompt( 'Subject:', rest=' [PATCH %0*d of %d] ' % (tlen, 0, len(patches)))) body = '' if opts.get('diffstat'): d = cdiffstat(_('Final summary:\n'), jumbo) if d: body = '\n' + d body = getdescription(body, sender) msg = email.MIMEText.MIMEText(body) msg['Subject'] = subj msgs.insert(0, msg) return msgs def getbundlemsgs(bundle): subj = (opts.get('subject') or prompt('Subject:', default='A bundle for your repository')) body = getdescription('', sender) msg = email.MIMEMultipart.MIMEMultipart() if body: msg.attach(email.MIMEText.MIMEText(body, 'plain')) datapart = email.MIMEBase.MIMEBase('application', 'x-mercurial-bundle') datapart.set_payload(bundle) datapart.add_header('Content-Disposition', 'attachment', filename='bundle.hg') email.Encoders.encode_base64(datapart) msg.attach(datapart) msg['Subject'] = subj return [msg] sender = (opts.get('from') or ui.config('email', 'from') or ui.config('patchbomb', 'from') or prompt('From', ui.username())) if opts.get('bundle'): msgs = getbundlemsgs(getbundle(dest)) else: msgs = getexportmsgs() def getaddrs(opt, prpt, default=None): addrs = opts.get(opt) or (ui.config('email', opt) or ui.config( 'patchbomb', opt) or prompt(prpt, default=default)).split(',') return [a.strip() for a in addrs if a.strip()] to = getaddrs('to', 'To') cc = getaddrs('cc', 'Cc', '') bcc = opts.get('bcc') or (ui.config('email', 'bcc') or ui.config( 'patchbomb', 'bcc') or '').split(',') bcc = [a.strip() for a in bcc if a.strip()] ui.write('\n') parent = None sender_addr = email.Utils.parseaddr(sender)[1] sendmail = None for m in msgs: try: m['Message-Id'] = genmsgid(m['X-Mercurial-Node']) except TypeError: m['Message-Id'] = genmsgid('patchbomb') if parent: m['In-Reply-To'] = parent else: parent = m['Message-Id'] m['Date'] = util.datestr(start_time, "%a, %d %b %Y %H:%M:%S %1%2") start_time = (start_time[0] + 1, start_time[1]) m['From'] = sender m['To'] = ', '.join(to) if cc: m['Cc'] = ', '.join(cc) if bcc: m['Bcc'] = ', '.join(bcc) if opts.get('test'): ui.status('Displaying ', m['Subject'], ' ...\n') ui.flush() if 'PAGER' in os.environ: fp = os.popen(os.environ['PAGER'], 'w') else: fp = ui try: fp.write(m.as_string(0)) fp.write('\n') except IOError, inst: if inst.errno != errno.EPIPE: raise if fp is not ui: fp.close() elif opts.get('mbox'): ui.status('Writing ', m['Subject'], ' ...\n') fp = open(opts.get('mbox'), 'In-Reply-To' in m and 'ab+' or 'wb+') date = util.datestr(start_time, '%a %b %d %H:%M:%S %Y') fp.write('From %s %s\n' % (sender_addr, date)) fp.write(m.as_string(0)) fp.write('\n\n') fp.close()
def svnutcdate(text): ''':svnutcdate: Date. Returns a UTC-date in this format: "2009-08-18 11:00:13Z". ''' return util.datestr((util.parsedate(text)[0], 0), '%Y-%m-%d %H:%M:%SZ')
def parselogentry(orig_paths, revnum, author, date, message): """Return the parsed commit object or None, and True if the revision is a branch root. """ self.ui.debug("parsing revision %d (%d changes)\n" % (revnum, len(orig_paths))) branched = False rev = self.revid(revnum) # branch log might return entries for a parent we already have if rev in self.commits or revnum < to_revnum: return None, branched parents = [] # check whether this revision is the start of a branch or part # of a branch renaming orig_paths = sorted(orig_paths.iteritems()) root_paths = [(p, e) for p, e in orig_paths if self.module.startswith(p)] if root_paths: path, ent = root_paths[-1] if ent.copyfrom_path: branched = True newpath = ent.copyfrom_path + self.module[len(path):] # ent.copyfrom_rev may not be the actual last revision previd = self.latest(newpath, ent.copyfrom_rev) if previd is not None: prevmodule, prevnum = revsplit(previd)[1:] if prevnum >= self.startrev: parents = [previd] self.ui.note( _('found parent of branch %s at %d: %s\n') % (self.module, prevnum, prevmodule)) else: self.ui.debug("no copyfrom path, don't know what to do.\n") paths = [] # filter out unrelated paths for path, ent in orig_paths: if self.getrelpath(path) is None: continue paths.append((path, ent)) # Example SVN datetime. Includes microseconds. # ISO-8601 conformant # '2007-01-04T17:35:00.902377Z' date = util.parsedate(date[:19] + " UTC", ["%Y-%m-%dT%H:%M:%S"]) log = message and self.recode(message) or '' author = author and self.recode(author) or '' try: branch = self.module.split("/")[-1] if branch == self.trunkname: branch = None except IndexError: branch = None cset = commit(author=author, date=util.datestr(date, '%Y-%m-%d %H:%M:%S %1%2'), desc=log, parents=parents, branch=branch, rev=rev) self.commits[rev] = cset # The parents list is *shared* among self.paths and the # commit object. Both will be updated below. self.paths[rev] = (paths, cset.parents) if self.child_cset and not self.child_cset.parents: self.child_cset.parents[:] = [rev] self.child_cset = cset return cset, branched
import re, shutil, tempfile commands.optionalrepo += ' kwdemo' # hg commands that do not act on keywords nokwcommands = ('add addremove annotate bundle copy export grep incoming init' ' log outgoing push rename rollback tip verify' ' convert email glog') # hg commands that trigger expansion only when writing to working dir, # not when reading filelog, and unexpand when reading from working dir restricted = ('merge record resolve qfold qimport qnew qpush qrefresh qrecord' ' transplant') # provide cvs-like UTC date filter utcdate = lambda x: util.datestr((x[0], 0), '%Y/%m/%d %H:%M:%S') # make keyword tools accessible kwtools = {'templater': None, 'hgcmd': '', 'inc': [], 'exc': ['.hg*']} class kwtemplater(object): ''' Sets up keyword templates, corresponding keyword regex, and provides keyword substitution functions. ''' templates = { 'Revision': '{node|short}', 'Author': '{author|user}', 'Date': '{date|utcdate}', 'RCSfile': '{file|basename},v',
def _parse(self): if self.changeset is not None: return self.changeset = {} maxrev = 0 cmd = self.cmd if self.rev: # TODO: handle tags try: # patchset number? maxrev = int(self.rev) except ValueError: try: # date util.parsedate(self.rev, ['%Y/%m/%d %H:%M:%S']) cmd = '%s -d "1970/01/01 00:00:01" -d "%s"' % (cmd, self.rev) except util.Abort: raise util.Abort( _('revision %s is not a patchset number or date') % self.rev) d = os.getcwd() try: os.chdir(self.path) id = None state = 0 filerevids = {} if self.builtin: # builtin cvsps code self.ui.status(_('using builtin cvsps\n')) cache = 'update' if not self.ui.configbool('convert', 'cvsps.cache', True): cache = None db = cvsps.createlog(self.ui, cache=cache) db = cvsps.createchangeset( self.ui, db, fuzz=int(self.ui.config('convert', 'cvsps.fuzz', 60)), mergeto=self.ui.config('convert', 'cvsps.mergeto', None), mergefrom=self.ui.config('convert', 'cvsps.mergefrom', None)) for cs in db: if maxrev and cs.id > maxrev: break id = str(cs.id) cs.author = self.recode(cs.author) self.lastbranch[cs.branch] = id cs.comment = self.recode(cs.comment) date = util.datestr(cs.date) self.tags.update(dict.fromkeys(cs.tags, id)) files = {} for f in cs.entries: files[f.file] = "%s%s" % ('.'.join( [str(x) for x in f.revision]), ['', '(DEAD)'][f.dead]) # add current commit to set c = commit(author=cs.author, date=date, parents=[str(p.id) for p in cs.parents], desc=cs.comment, branch=cs.branch or '') self.changeset[id] = c self.files[id] = files else: # external cvsps for l in util.popen(cmd): if state == 0: # header if l.startswith("PatchSet"): id = l[9:-2] if maxrev and int(id) > maxrev: # ignore everything state = 3 elif l.startswith("Date:"): date = util.parsedate(l[6:-1], ["%Y/%m/%d %H:%M:%S"]) date = util.datestr(date) elif l.startswith("Branch:"): branch = l[8:-1] self.parent[id] = self.lastbranch.get( branch, 'bad') self.lastbranch[branch] = id elif l.startswith("Ancestor branch:"): ancestor = l[17:-1] # figure out the parent later self.parent[id] = self.lastbranch[ancestor] elif l.startswith("Author:"): author = self.recode(l[8:-1]) elif l.startswith("Tag:") or l.startswith("Tags:"): t = l[l.index(':') + 1:] t = [ut.strip() for ut in t.split(',')] if (len(t) > 1) or (t[0] and (t[0] != "(none)")): self.tags.update(dict.fromkeys(t, id)) elif l.startswith("Log:"): # switch to gathering log state = 1 log = "" elif state == 1: # log if l == "Members: \n": # switch to gathering members files = {} oldrevs = [] log = self.recode(log[:-1]) state = 2 else: # gather log log += l elif state == 2: # members if l == "\n": # start of next entry state = 0 p = [self.parent[id]] if id == "1": p = [] if branch == "HEAD": branch = "" if branch: latest = 0 # the last changeset that contains a base # file is our parent for r in oldrevs: latest = max(filerevids.get(r, 0), latest) if latest: p = [latest] # add current commit to set c = commit(author=author, date=date, parents=p, desc=log, branch=branch) self.changeset[id] = c self.files[id] = files else: colon = l.rfind(':') file = l[1:colon] rev = l[colon + 1:-2] oldrev, rev = rev.split("->") files[file] = rev # save some information for identifying branch points oldrevs.append("%s:%s" % (oldrev, file)) filerevids["%s:%s" % (rev, file)] = id elif state == 3: # swallow all input continue self.heads = self.lastbranch.values() finally: os.chdir(d)
def parselogentry(orig_paths, revnum, author, date, message): """Return the parsed commit object or None, and True if the revision is a branch root. """ self.ui.debug("parsing revision %d (%d changes)\n" % (revnum, len(orig_paths))) branched = False rev = self.revid(revnum) # branch log might return entries for a parent we already have if (rev in self.commits or revnum < to_revnum): return None, branched parents = [] # check whether this revision is the start of a branch or part # of a branch renaming orig_paths = orig_paths.items() orig_paths.sort() root_paths = [(p, e) for p, e in orig_paths if self.module.startswith(p)] if root_paths: path, ent = root_paths[-1] if ent.copyfrom_path: # If dir was moved while one of its file was removed # the log may look like: # A /dir (from /dir:x) # A /dir/a (from /dir/a:y) # A /dir/b (from /dir/b:z) # ... # for all remaining children. # Let's take the highest child element from rev as source. copies = [ (p, e) for p, e in orig_paths[:-1] if isdescendantof(ent.copyfrom_path, e.copyfrom_path) ] fromrev = max([e.copyfrom_rev for p, e in copies] + [ent.copyfrom_rev]) branched = True newpath = ent.copyfrom_path + self.module[len(path):] # ent.copyfrom_rev may not be the actual last revision previd = self.latest(newpath, fromrev) if previd is not None: prevmodule, prevnum = self.revsplit(previd)[1:] if prevnum >= self.startrev: parents = [previd] self.ui.note( 'found parent of branch %s at %d: %s\n' % (self.module, prevnum, prevmodule)) else: self.ui.debug("No copyfrom path, don't know what to do.\n") paths = [] # filter out unrelated paths for path, ent in orig_paths: if self.getrelpath(path) is None: continue paths.append((path, ent)) # Example SVN datetime. Includes microseconds. # ISO-8601 conformant # '2007-01-04T17:35:00.902377Z' date = util.parsedate(date[:19] + " UTC", ["%Y-%m-%dT%H:%M:%S"]) log = message and self.recode(message) or '' author = author and self.recode(author) or '' try: branch = self.module.split("/")[-1] if branch == 'trunk': branch = '' except IndexError: branch = None cset = commit(author=author, date=util.datestr(date), desc=log, parents=parents, branch=branch, rev=rev.encode('utf-8')) self.commits[rev] = cset # The parents list is *shared* among self.paths and the # commit object. Both will be updated below. self.paths[rev] = (paths, cset.parents) if self.child_cset and not self.child_cset.parents: self.child_cset.parents[:] = [rev] self.child_cset = cset return cset, branched
def createlog(ui, directory=None, root="", rlog=True, cache=None): '''Collect the CVS rlog''' # Because we store many duplicate commit log messages, reusing strings # saves a lot of memory and pickle storage space. _scache = {} def scache(s): "return a shared version of a string" return _scache.setdefault(s, s) ui.status(_('collecting CVS rlog\n')) log = [] # list of logentry objects containing the CVS state # patterns to match in CVS (r)log output, by state of use re_00 = re.compile('RCS file: (.+)$') re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$') re_02 = re.compile('cvs (r?log|server): (.+)\n$') re_03 = re.compile("(Cannot access.+CVSROOT)|" "(can't create temporary directory.+)$") re_10 = re.compile('Working file: (.+)$') re_20 = re.compile('symbolic names:') re_30 = re.compile('\t(.+): ([\\d.]+)$') re_31 = re.compile('----------------------------$') re_32 = re.compile('=======================================' '======================================$') re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$') re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);' r'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?' r'(\s+commitid:\s+([^;]+);)?' r'(.*mergepoint:\s+([^;]+);)?') re_70 = re.compile('branches: (.+);$') file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch') prefix = '' # leading path to strip of what we get from CVS if directory is None: # Current working directory # Get the real directory in the repository try: prefix = open(os.path.join('CVS', 'Repository')).read().strip() directory = prefix if prefix == ".": prefix = "" except IOError: raise logerror(_('not a CVS sandbox')) if prefix and not prefix.endswith(pycompat.ossep): prefix += pycompat.ossep # Use the Root file in the sandbox, if it exists try: root = open(os.path.join('CVS', 'Root')).read().strip() except IOError: pass if not root: root = encoding.environ.get('CVSROOT', '') # read log cache if one exists oldlog = [] date = None if cache: cachedir = os.path.expanduser('~/.hg.cvsps') if not os.path.exists(cachedir): os.mkdir(cachedir) # The cvsps cache pickle needs a uniquified name, based on the # repository location. The address may have all sort of nasties # in it, slashes, colons and such. So here we take just the # alphanumeric characters, concatenated in a way that does not # mix up the various components, so that # :pserver:user@server:/path # and # /pserver/user/server/path # are mapped to different cache file names. cachefile = root.split(":") + [directory, "cache"] cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s] cachefile = os.path.join(cachedir, '.'.join([s for s in cachefile if s])) if cache == 'update': try: ui.note(_('reading cvs log cache %s\n') % cachefile) oldlog = pickle.load(open(cachefile)) for e in oldlog: if not (util.safehasattr(e, 'branchpoints') and util.safehasattr(e, 'commitid') and util.safehasattr(e, 'mergepoint')): ui.status(_('ignoring old cache\n')) oldlog = [] break ui.note(_('cache has %d log entries\n') % len(oldlog)) except Exception as e: ui.note(_('error reading cache: %r\n') % e) if oldlog: date = oldlog[-1].date # last commit date as a (time,tz) tuple date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2') # build the CVS commandline cmd = ['cvs', '-q'] if root: cmd.append('-d%s' % root) p = util.normpath(getrepopath(root)) if not p.endswith('/'): p += '/' if prefix: # looks like normpath replaces "" by "." prefix = p + util.normpath(prefix) else: prefix = p cmd.append(['log', 'rlog'][rlog]) if date: # no space between option and date string cmd.append('-d>%s' % date) cmd.append(directory) # state machine begins here tags = {} # dictionary of revisions on current file with their tags branchmap = {} # mapping between branch names and revision numbers rcsmap = {} state = 0 store = False # set when a new record can be appended cmd = [util.shellquote(arg) for arg in cmd] ui.note(_("running %s\n") % (' '.join(cmd))) ui.debug("prefix=%r directory=%r root=%r\n" % (prefix, directory, root)) pfp = util.popen(' '.join(cmd)) peek = pfp.readline() while True: line = peek if line == '': break peek = pfp.readline() if line.endswith('\n'): line = line[:-1] #ui.debug('state=%d line=%r\n' % (state, line)) if state == 0: # initial state, consume input until we see 'RCS file' match = re_00.match(line) if match: rcs = match.group(1) tags = {} if rlog: filename = util.normpath(rcs[:-2]) if filename.startswith(prefix): filename = filename[len(prefix):] if filename.startswith('/'): filename = filename[1:] if filename.startswith('Attic/'): filename = filename[6:] else: filename = filename.replace('/Attic/', '/') state = 2 continue state = 1 continue match = re_01.match(line) if match: raise logerror(match.group(1)) match = re_02.match(line) if match: raise logerror(match.group(2)) if re_03.match(line): raise logerror(line) elif state == 1: # expect 'Working file' (only when using log instead of rlog) match = re_10.match(line) assert match, _('RCS file must be followed by working file') filename = util.normpath(match.group(1)) state = 2 elif state == 2: # expect 'symbolic names' if re_20.match(line): branchmap = {} state = 3 elif state == 3: # read the symbolic names and store as tags match = re_30.match(line) if match: rev = [int(x) for x in match.group(2).split('.')] # Convert magic branch number to an odd-numbered one revn = len(rev) if revn > 3 and (revn % 2) == 0 and rev[-2] == 0: rev = rev[:-2] + rev[-1:] rev = tuple(rev) if rev not in tags: tags[rev] = [] tags[rev].append(match.group(1)) branchmap[match.group(1)] = match.group(2) elif re_31.match(line): state = 5 elif re_32.match(line): state = 0 elif state == 4: # expecting '------' separator before first revision if re_31.match(line): state = 5 else: assert not re_32.match(line), _('must have at least ' 'some revisions') elif state == 5: # expecting revision number and possibly (ignored) lock indication # we create the logentry here from values stored in states 0 to 4, # as this state is re-entered for subsequent revisions of a file. match = re_50.match(line) assert match, _('expected revision number') e = logentry(rcs=scache(rcs), file=scache(filename), revision=tuple( [int(x) for x in match.group(1).split('.')]), branches=[], parent=None, commitid=None, mergepoint=None, branchpoints=set()) state = 6 elif state == 6: # expecting date, author, state, lines changed match = re_60.match(line) assert match, _('revision must be followed by date line') d = match.group(1) if d[2] == '/': # Y2K d = '19' + d if len(d.split()) != 3: # cvs log dates always in GMT d = d + ' UTC' e.date = util.parsedate(d, [ '%y/%m/%d %H:%M:%S', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M:%S' ]) e.author = scache(match.group(2)) e.dead = match.group(3).lower() == 'dead' if match.group(5): if match.group(6): e.lines = (int(match.group(5)), int(match.group(6))) else: e.lines = (int(match.group(5)), 0) elif match.group(6): e.lines = (0, int(match.group(6))) else: e.lines = None if match.group(7): # cvs 1.12 commitid e.commitid = match.group(8) if match.group(9): # cvsnt mergepoint myrev = match.group(10).split('.') if len(myrev) == 2: # head e.mergepoint = 'HEAD' else: myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]]) branches = [b for b in branchmap if branchmap[b] == myrev] assert len(branches) == 1, ('unknown branch: %s' % e.mergepoint) e.mergepoint = branches[0] e.comment = [] state = 7 elif state == 7: # read the revision numbers of branches that start at this revision # or store the commit log message otherwise m = re_70.match(line) if m: e.branches = [ tuple([int(y) for y in x.strip().split('.')]) for x in m.group(1).split(';') ] state = 8 elif re_31.match(line) and re_50.match(peek): state = 5 store = True elif re_32.match(line): state = 0 store = True else: e.comment.append(line) elif state == 8: # store commit log message if re_31.match(line): cpeek = peek if cpeek.endswith('\n'): cpeek = cpeek[:-1] if re_50.match(cpeek): state = 5 store = True else: e.comment.append(line) elif re_32.match(line): state = 0 store = True else: e.comment.append(line) # When a file is added on a branch B1, CVS creates a synthetic # dead trunk revision 1.1 so that the branch has a root. # Likewise, if you merge such a file to a later branch B2 (one # that already existed when the file was added on B1), CVS # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop # these revisions now, but mark them synthetic so # createchangeset() can take care of them. if (store and e.dead and e.revision[-1] == 1 and # 1.1 or 1.1.x.1 len(e.comment) == 1 and file_added_re.match(e.comment[0])): ui.debug('found synthetic revision in %s: %r\n' % (e.rcs, e.comment[0])) e.synthetic = True if store: # clean up the results and save in the log. store = False e.tags = sorted([scache(x) for x in tags.get(e.revision, [])]) e.comment = scache('\n'.join(e.comment)) revn = len(e.revision) if revn > 3 and (revn % 2) == 0: e.branch = tags.get(e.revision[:-1], [None])[0] else: e.branch = None # find the branches starting from this revision branchpoints = set() for branch, revision in branchmap.iteritems(): revparts = tuple([int(i) for i in revision.split('.')]) if len(revparts) < 2: # bad tags continue if revparts[-2] == 0 and revparts[-1] % 2 == 0: # normal branch if revparts[:-2] == e.revision: branchpoints.add(branch) elif revparts == (1, 1, 1): # vendor branch if revparts in e.branches: branchpoints.add(branch) e.branchpoints = branchpoints log.append(e) rcsmap[e.rcs.replace('/Attic/', '/')] = e.rcs if len(log) % 100 == 0: ui.status( util.ellipsis('%d %s' % (len(log), e.file), 80) + '\n') log.sort(key=lambda x: (x.rcs, x.revision)) # find parent revisions of individual files versions = {} for e in sorted(oldlog, key=lambda x: (x.rcs, x.revision)): rcs = e.rcs.replace('/Attic/', '/') if rcs in rcsmap: e.rcs = rcsmap[rcs] branch = e.revision[:-1] versions[(e.rcs, branch)] = e.revision for e in log: branch = e.revision[:-1] p = versions.get((e.rcs, branch), None) if p is None: p = e.revision[:-2] e.parent = p versions[(e.rcs, branch)] = e.revision # update the log cache if cache: if log: # join up the old and new logs log.sort(key=lambda x: x.date) if oldlog and oldlog[-1].date >= log[0].date: raise logerror( _('log cache overlaps with new log entries,' ' re-run without cache.')) log = oldlog + log # write the new cachefile ui.note(_('writing cvs log cache %s\n') % cachefile) pickle.dump(log, open(cachefile, 'w')) else: log = oldlog ui.status(_('%d log entries\n') % len(log)) hook.hook(ui, None, "cvslog", True, log=log) return log
try: self._blackbox = self._openlogfile() except (IOError, OSError), err: self.debug('warning: cannot write to blackbox.log: %s\n' % err.strerror) del self._bbopener self._blackbox = None blackbox = self._blackbox else: # certain ui instances exist outside the context of # a repo, so just default to the last blackbox that # was seen. blackbox = lastblackbox if blackbox: date = util.datestr(None, '%Y/%m/%d %H:%M:%S') user = util.getuser() formattedmsg = msg[0] % msg[1:] try: blackbox.write('%s %s> %s' % (date, user, formattedmsg)) except IOError, err: self.debug('warning: cannot write to blackbox.log: %s\n' % err.strerror) lastblackbox = blackbox def setrepo(self, repo): self._bbopener = repo.opener ui.__class__ = blackboxui
def send(self, ctx, count, data): '''send message.''' # Select subscribers by revset subs = set() for sub, spec in self.subs: if spec is None: subs.add(sub) continue revs = self.repo.revs('%r and %d:', spec, ctx.rev()) if len(revs): subs.add(sub) continue if len(subs) == 0: self.ui.debug('notify: no subscribers to selected repo ' 'and revset\n') return p = email.Parser.Parser() try: msg = p.parsestr(data) except email.Errors.MessageParseError as inst: raise error.Abort(inst) # store sender and subject sender, subject = msg['From'], msg['Subject'] del msg['From'], msg['Subject'] if not msg.is_multipart(): # create fresh mime message from scratch # (multipart templates must take care of this themselves) headers = msg.items() payload = msg.get_payload() # for notification prefer readability over data precision msg = mail.mimeencode(self.ui, payload, self.charsets, self.test) # reinstate custom headers for k, v in headers: msg[k] = v msg['Date'] = util.datestr(format="%a, %d %b %Y %H:%M:%S %1%2") # try to make subject line exist and be useful if not subject: if count > 1: subject = _('%s: %d new changesets') % (self.root, count) else: s = ctx.description().lstrip().split('\n', 1)[0].rstrip() subject = '%s: %s' % (self.root, s) maxsubject = int(self.ui.config('notify', 'maxsubject', 67)) if maxsubject: subject = util.ellipsis(subject, maxsubject) msg['Subject'] = mail.headencode(self.ui, subject, self.charsets, self.test) # try to make message have proper sender if not sender: sender = self.ui.config('email', 'from') or self.ui.username() if '@' not in sender or '@localhost' in sender: sender = self.fixmail(sender) msg['From'] = mail.addressencode(self.ui, sender, self.charsets, self.test) msg['X-Hg-Notification'] = 'changeset %s' % ctx if not msg['Message-Id']: msg['Message-Id'] = ('<hg.%s.%s.%s@%s>' % (ctx, int(time.time()), hash(self.repo.root), socket.getfqdn())) msg['To'] = ', '.join(sorted(subs)) msgtext = msg.as_string() if self.test: self.ui.write(msgtext) if not msgtext.endswith('\n'): self.ui.write('\n') else: self.ui.status(_('notify: sending %d subscribers %d changes\n') % (len(subs), count)) mail.sendmail(self.ui, util.email(msg['From']), subs, msgtext, mbox=self.mbox)
def parselogentry(orig_paths, revnum, author, date, message): """Return the parsed commit object or None, and True if the revision is a branch root. """ self.ui.debug("parsing revision %d (%d changes)\n" % (revnum, len(orig_paths))) branched = False rev = self.revid(revnum) # branch log might return entries for a parent we already have if rev in self.commits or revnum < to_revnum: return None, branched parents = [] # check whether this revision is the start of a branch or part # of a branch renaming orig_paths = sorted(orig_paths.iteritems()) root_paths = [(p, e) for p, e in orig_paths if self.module.startswith(p)] if root_paths: path, ent = root_paths[-1] if ent.copyfrom_path: branched = True newpath = ent.copyfrom_path + self.module[len(path):] # ent.copyfrom_rev may not be the actual last revision previd = self.latest(newpath, ent.copyfrom_rev) if previd is not None: prevmodule, prevnum = self.revsplit(previd)[1:] if prevnum >= self.startrev: parents = [previd] self.ui.note( _('found parent of branch %s at %d: %s\n') % (self.module, prevnum, prevmodule)) else: self.ui.debug("no copyfrom path, don't know what to do.\n") paths = [] # filter out unrelated paths for path, ent in orig_paths: if self.getrelpath(path) is None: continue paths.append((path, ent)) # Example SVN datetime. Includes microseconds. # ISO-8601 conformant # '2007-01-04T17:35:00.902377Z' date = util.parsedate(date[:19] + " UTC", ["%Y-%m-%dT%H:%M:%S"]) log = message and self.recode(message) or '' author = author and self.recode(author) or '' try: branch = self.module.split("/")[-1] if branch == 'trunk': branch = '' except IndexError: branch = None cset = commit(author=author, date=util.datestr(date), desc=log, parents=parents, branch=branch, rev=rev) self.commits[rev] = cset # The parents list is *shared* among self.paths and the # commit object. Both will be updated below. self.paths[rev] = (paths, cset.parents) if self.child_cset and not self.child_cset.parents: self.child_cset.parents[:] = [rev] self.child_cset = cset return cset, branched
class notifier(object): '''email notification class.''' def __init__(self, ui, repo, hooktype): self.ui = ui cfg = self.ui.config('notify', 'config') if cfg: self.ui.readconfig(cfg, sections=['usersubs', 'reposubs']) self.repo = repo self.stripcount = int(self.ui.config('notify', 'strip', 0)) self.root = self.strip(self.repo.root) self.domain = self.ui.config('notify', 'domain') self.mbox = self.ui.config('notify', 'mbox') self.test = self.ui.configbool('notify', 'test', True) self.charsets = mail._charsets(self.ui) self.subs = self.subscribers() self.merge = self.ui.configbool('notify', 'merge', True) mapfile = self.ui.config('notify', 'style') template = (self.ui.config('notify', hooktype) or self.ui.config('notify', 'template')) if not mapfile and not template: template = deftemplates.get(hooktype) or single_template if template: template = templater.parsestring(template, quoted=False) self.t = cmdutil.changeset_templater(self.ui, self.repo, False, None, template, mapfile, False) def strip(self, path): '''strip leading slashes from local path, turn into web-safe path.''' path = util.pconvert(path) count = self.stripcount while count > 0: c = path.find('/') if c == -1: break path = path[c + 1:] count -= 1 return path def fixmail(self, addr): '''try to clean up email addresses.''' addr = util.email(addr.strip()) if self.domain: a = addr.find('@localhost') if a != -1: addr = addr[:a] if '@' not in addr: return addr + '@' + self.domain return addr def subscribers(self): '''return list of email addresses of subscribers to this repo.''' subs = set() for user, pats in self.ui.configitems('usersubs'): for pat in pats.split(','): if '#' in pat: pat, revs = pat.split('#', 1) else: revs = None if fnmatch.fnmatch(self.repo.root, pat.strip()): subs.add((self.fixmail(user), revs)) for pat, users in self.ui.configitems('reposubs'): if '#' in pat: pat, revs = pat.split('#', 1) else: revs = None if fnmatch.fnmatch(self.repo.root, pat): for user in users.split(','): subs.add((self.fixmail(user), revs)) return [(mail.addressencode(self.ui, s, self.charsets, self.test), r) for s, r in sorted(subs)] def node(self, ctx, **props): '''format one changeset, unless it is a suppressed merge.''' if not self.merge and len(ctx.parents()) > 1: return False self.t.show(ctx, changes=ctx.changeset(), baseurl=self.ui.config('web', 'baseurl'), root=self.repo.root, webroot=self.root, **props) return True def skipsource(self, source): '''true if incoming changes from this source should be skipped.''' ok_sources = self.ui.config('notify', 'sources', 'serve').split() return source not in ok_sources def send(self, ctx, count, data): '''send message.''' # Select subscribers by revset subs = set() for sub, spec in self.subs: if spec is None: subs.add(sub) continue revs = self.repo.revs('%r and %d:', spec, ctx.rev()) if len(revs): subs.add(sub) continue if len(subs) == 0: self.ui.debug('notify: no subscribers to selected repo ' 'and revset\n') return p = email.Parser.Parser() try: msg = p.parsestr(data) except email.Errors.MessageParseError, inst: raise util.Abort(inst) # store sender and subject sender, subject = msg['From'], msg['Subject'] del msg['From'], msg['Subject'] if not msg.is_multipart(): # create fresh mime message from scratch # (multipart templates must take care of this themselves) headers = msg.items() payload = msg.get_payload() # for notification prefer readability over data precision msg = mail.mimeencode(self.ui, payload, self.charsets, self.test) # reinstate custom headers for k, v in headers: msg[k] = v msg['Date'] = util.datestr(format="%a, %d %b %Y %H:%M:%S %1%2") # try to make subject line exist and be useful if not subject: if count > 1: subject = _('%s: %d new changesets') % (self.root, count) else: s = ctx.description().lstrip().split('\n', 1)[0].rstrip() subject = '%s: %s' % (self.root, s) maxsubject = int(self.ui.config('notify', 'maxsubject', 67)) if maxsubject: subject = util.ellipsis(subject, maxsubject) msg['Subject'] = mail.headencode(self.ui, subject, self.charsets, self.test) # try to make message have proper sender if not sender: sender = self.ui.config('email', 'from') or self.ui.username() if '@' not in sender or '@localhost' in sender: sender = self.fixmail(sender) msg['From'] = mail.addressencode(self.ui, sender, self.charsets, self.test) msg['X-Hg-Notification'] = 'changeset %s' % ctx if not msg['Message-Id']: msg['Message-Id'] = ('<hg.%s.%s.%s@%s>' % (ctx, int( time.time()), hash(self.repo.root), socket.getfqdn())) msg['To'] = ', '.join(sorted(subs)) msgtext = msg.as_string() if self.test: self.ui.write(msgtext) if not msgtext.endswith('\n'): self.ui.write('\n') else: self.ui.status( _('notify: sending %d subscribers %d changes\n') % (len(subs), count)) mail.sendmail(self.ui, util.email(msg['From']), subs, msgtext, mbox=self.mbox)
def createlog(ui, directory=None, root="", rlog=True, cache=None): '''Collect the CVS rlog''' # Because we store many duplicate commit log messages, reusing strings # saves a lot of memory and pickle storage space. _scache = {} def scache(s): "return a shared version of a string" return _scache.setdefault(s, s) ui.status(_('collecting CVS rlog\n')) log = [] # list of logentry objects containing the CVS state # patterns to match in CVS (r)log output, by state of use re_00 = re.compile('RCS file: (.+)$') re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$') re_02 = re.compile('cvs (r?log|server): (.+)\n$') re_03 = re.compile("(Cannot access.+CVSROOT)|" "(can't create temporary directory.+)$") re_10 = re.compile('Working file: (.+)$') re_20 = re.compile('symbolic names:') re_30 = re.compile('\t(.+): ([\\d.]+)$') re_31 = re.compile('----------------------------$') re_32 = re.compile('=======================================' '======================================$') re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$') re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);' r'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?' r'(\s+commitid:\s+([^;]+);)?' r'(.*mergepoint:\s+([^;]+);)?') re_70 = re.compile('branches: (.+);$') file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch') prefix = '' # leading path to strip of what we get from CVS if directory is None: # Current working directory # Get the real directory in the repository try: prefix = open(os.path.join('CVS','Repository')).read().strip() directory = prefix if prefix == ".": prefix = "" except IOError: raise logerror(_('not a CVS sandbox')) if prefix and not prefix.endswith(os.sep): prefix += os.sep # Use the Root file in the sandbox, if it exists try: root = open(os.path.join('CVS','Root')).read().strip() except IOError: pass if not root: root = os.environ.get('CVSROOT', '') # read log cache if one exists oldlog = [] date = None if cache: cachedir = os.path.expanduser('~/.hg.cvsps') if not os.path.exists(cachedir): os.mkdir(cachedir) # The cvsps cache pickle needs a uniquified name, based on the # repository location. The address may have all sort of nasties # in it, slashes, colons and such. So here we take just the # alphanumeric characters, concatenated in a way that does not # mix up the various components, so that # :pserver:user@server:/path # and # /pserver/user/server/path # are mapped to different cache file names. cachefile = root.split(":") + [directory, "cache"] cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s] cachefile = os.path.join(cachedir, '.'.join([s for s in cachefile if s])) if cache == 'update': try: ui.note(_('reading cvs log cache %s\n') % cachefile) oldlog = pickle.load(open(cachefile)) for e in oldlog: if not (util.safehasattr(e, 'branchpoints') and util.safehasattr(e, 'commitid') and util.safehasattr(e, 'mergepoint')): ui.status(_('ignoring old cache\n')) oldlog = [] break ui.note(_('cache has %d log entries\n') % len(oldlog)) except Exception, e: ui.note(_('error reading cache: %r\n') % e) if oldlog: date = oldlog[-1].date # last commit date as a (time,tz) tuple date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
def patchbomb(ui, repo, *revs, **opts): '''send changesets by email By default, diffs are sent in the format generated by hg export, one per message. The series starts with a "[PATCH 0 of N]" introduction, which describes the series as a whole. Each patch email has a Subject line of "[PATCH M of N] ...", using the first line of the changeset description as the subject text. The message contains two or three parts. First, the changeset description. Next, (optionally) if the diffstat program is installed and -d/--diffstat is used, the result of running diffstat on the patch. Finally, the patch itself, as generated by "hg export". By default the patch is included as text in the email body for easy reviewing. Using the -a/--attach option will instead create an attachment for the patch. With -i/--inline an inline attachment will be created. With -o/--outgoing, emails will be generated for patches not found in the destination repository (or only those which are ancestors of the specified revisions if any are provided) With -b/--bundle, changesets are selected as for --outgoing, but a single email containing a binary Mercurial bundle as an attachment will be sent. Examples: hg email -r 3000 # send patch 3000 only hg email -r 3000 -r 3001 # send patches 3000 and 3001 hg email -r 3000:3005 # send patches 3000 through 3005 hg email 3000 # send patch 3000 (deprecated) hg email -o # send all patches not in default hg email -o DEST # send all patches not in DEST hg email -o -r 3000 # send all ancestors of 3000 not in default hg email -o -r 3000 DEST # send all ancestors of 3000 not in DEST hg email -b # send bundle of all patches not in default hg email -b DEST # send bundle of all patches not in DEST hg email -b -r 3000 # bundle of all ancestors of 3000 not in default hg email -b -r 3000 DEST # bundle of all ancestors of 3000 not in DEST Before using this command, you will need to enable email in your hgrc. See the [email] section in hgrc(5) for details. ''' _charsets = mail._charsets(ui) def outgoing(dest, revs): '''Return the revisions present locally but not in dest''' dest = ui.expandpath(dest or 'default-push', dest or 'default') revs = [repo.lookup(rev) for rev in revs] other = hg.repository(cmdutil.remoteui(repo, opts), dest) ui.status(_('comparing with %s\n') % dest) o = repo.findoutgoing(other) if not o: ui.status(_("no changes found\n")) return [] o = repo.changelog.nodesbetween(o, revs or None)[0] return [str(repo.changelog.rev(r)) for r in o] def getpatches(revs): for r in cmdutil.revrange(repo, revs): output = cStringIO.StringIO() patch.export(repo, [r], fp=output, opts=patch.diffopts(ui, opts)) yield output.getvalue().split('\n') def getbundle(dest): tmpdir = tempfile.mkdtemp(prefix='hg-email-bundle-') tmpfn = os.path.join(tmpdir, 'bundle') try: commands.bundle(ui, repo, tmpfn, dest, **opts) return open(tmpfn, 'rb').read() finally: try: os.unlink(tmpfn) except: pass os.rmdir(tmpdir) if not (opts.get('test') or opts.get('mbox')): # really sending mail.validateconfig(ui) if not (revs or opts.get('rev') or opts.get('outgoing') or opts.get('bundle') or opts.get('patches')): raise util.Abort(_('specify at least one changeset with -r or -o')) if opts.get('outgoing') and opts.get('bundle'): raise util.Abort( _("--outgoing mode always on with --bundle;" " do not re-specify --outgoing")) if opts.get('outgoing') or opts.get('bundle'): if len(revs) > 1: raise util.Abort(_("too many destinations")) dest = revs and revs[0] or None revs = [] if opts.get('rev'): if revs: raise util.Abort(_('use only one form to specify the revision')) revs = opts.get('rev') if opts.get('outgoing'): revs = outgoing(dest, opts.get('rev')) if opts.get('bundle'): opts['revs'] = revs # start if opts.get('date'): start_time = util.parsedate(opts.get('date')) else: start_time = util.makedate() def genmsgid(id): return '<%s.%s@%s>' % (id[:20], int(start_time[0]), socket.getfqdn()) def getdescription(body, sender): if opts.get('desc'): body = open(opts.get('desc')).read() else: ui.write( _('\nWrite the introductory message for the ' 'patch series.\n\n')) body = ui.edit(body, sender) return body def getpatchmsgs(patches, patchnames=None): jumbo = [] msgs = [] ui.write( _('This patch series consists of %d patches.\n\n') % len(patches)) name = None for i, p in enumerate(patches): jumbo.extend(p) if patchnames: name = patchnames[i] msg = makepatch(ui, repo, p, opts, _charsets, i + 1, len(patches), name) msgs.append(msg) if len(patches) > 1 or opts.get('intro'): tlen = len(str(len(patches))) subj = '[PATCH %0*d of %d] %s' % (tlen, 0, len(patches), opts.get('subject') or prompt( ui, 'Subject:', rest=' [PATCH %0*d of %d] ' % (tlen, 0, len(patches)))) body = '' if opts.get('diffstat'): d = cdiffstat(ui, _('Final summary:\n'), jumbo) if d: body = '\n' + d body = getdescription(body, sender) msg = mail.mimeencode(ui, body, _charsets, opts.get('test')) msg['Subject'] = mail.headencode(ui, subj, _charsets, opts.get('test')) msgs.insert(0, (msg, subj)) return msgs def getbundlemsgs(bundle): subj = (opts.get('subject') or prompt(ui, 'Subject:', 'A bundle for your repository')) body = getdescription('', sender) msg = email.MIMEMultipart.MIMEMultipart() if body: msg.attach(mail.mimeencode(ui, body, _charsets, opts.get('test'))) datapart = email.MIMEBase.MIMEBase('application', 'x-mercurial-bundle') datapart.set_payload(bundle) bundlename = '%s.hg' % opts.get('bundlename', 'bundle') datapart.add_header('Content-Disposition', 'attachment', filename=bundlename) email.Encoders.encode_base64(datapart) msg.attach(datapart) msg['Subject'] = mail.headencode(ui, subj, _charsets, opts.get('test')) return [(msg, subj)] sender = (opts.get('from') or ui.config('email', 'from') or ui.config('patchbomb', 'from') or prompt(ui, 'From', ui.username())) # internal option used by pbranches patches = opts.get('patches') if patches: msgs = getpatchmsgs(patches, opts.get('patchnames')) elif opts.get('bundle'): msgs = getbundlemsgs(getbundle(dest)) else: msgs = getpatchmsgs(list(getpatches(revs))) def getaddrs(opt, prpt, default=None): addrs = opts.get(opt) or (ui.config('email', opt) or ui.config( 'patchbomb', opt) or prompt(ui, prpt, default)).split(',') return [ mail.addressencode(ui, a.strip(), _charsets, opts.get('test')) for a in addrs if a.strip() ] to = getaddrs('to', 'To') cc = getaddrs('cc', 'Cc', '') bcc = opts.get('bcc') or (ui.config('email', 'bcc') or ui.config( 'patchbomb', 'bcc') or '').split(',') bcc = [ mail.addressencode(ui, a.strip(), _charsets, opts.get('test')) for a in bcc if a.strip() ] ui.write('\n') parent = opts.get('in_reply_to') or None # angle brackets may be omitted, they're not semantically part of the msg-id if parent is not None: if not parent.startswith('<'): parent = '<' + parent if not parent.endswith('>'): parent += '>' first = True sender_addr = email.Utils.parseaddr(sender)[1] sender = mail.addressencode(ui, sender, _charsets, opts.get('test')) sendmail = None for m, subj in msgs: try: m['Message-Id'] = genmsgid(m['X-Mercurial-Node']) except TypeError: m['Message-Id'] = genmsgid('patchbomb') if parent: m['In-Reply-To'] = parent m['References'] = parent if first: parent = m['Message-Id'] first = False m['User-Agent'] = 'Mercurial-patchbomb/%s' % util.version() m['Date'] = email.Utils.formatdate(start_time[0]) start_time = (start_time[0] + 1, start_time[1]) m['From'] = sender m['To'] = ', '.join(to) if cc: m['Cc'] = ', '.join(cc) if bcc: m['Bcc'] = ', '.join(bcc) if opts.get('test'): ui.status(_('Displaying '), subj, ' ...\n') ui.flush() if 'PAGER' in os.environ: fp = util.popen(os.environ['PAGER'], 'w') else: fp = ui generator = email.Generator.Generator(fp, mangle_from_=False) try: generator.flatten(m, 0) fp.write('\n') except IOError, inst: if inst.errno != errno.EPIPE: raise if fp is not ui: fp.close() elif opts.get('mbox'): ui.status(_('Writing '), subj, ' ...\n') fp = open(opts.get('mbox'), 'In-Reply-To' in m and 'ab+' or 'wb+') generator = email.Generator.Generator(fp, mangle_from_=True) date = util.datestr(start_time, '%a %b %d %H:%M:%S %Y') fp.write('From %s %s\n' % (sender_addr, date)) generator.flatten(m, 0) fp.write('\n\n') fp.close()