def makedesc(c): """build a initial action line for a ctx `c` line are in the form: pick <hash> <rev> <summary> """ summary = '' if c.description(): summary = c.description().splitlines()[0] line = 'pick %s %d %s' % (c, c.rev(), summary) # trim to 80 columns so it's not stupidly wide in my editor return util.ellipsis(line, 80)
def listcmd(ui, repo, pats, opts): """subcommand that displays the list of shelves""" pats = set(pats) width = 80 if not ui.plain(): width = ui.termwidth() namelabel = 'shelve.newest' for mtime, name in listshelves(repo): sname = util.split(name)[1] if pats and sname not in pats: continue ui.write(sname, label=namelabel) namelabel = 'shelve.name' if ui.quiet: ui.write('\n') continue ui.write(' ' * (16 - len(sname))) used = 16 age = '(%s)' % templatefilters.age(util.makedate(mtime), abbrev=True) ui.write(age, label='shelve.age') ui.write(' ' * (12 - len(age))) used += 12 fp = open(name + '.patch', 'rb') try: while True: line = fp.readline() if not line: break if not line.startswith('#'): desc = line.rstrip() if ui.formatted(): desc = util.ellipsis(desc, width - used) ui.write(desc) break ui.write('\n') if not (opts['patch'] or opts['stat']): continue difflines = fp.readlines() if opts['patch']: for chunk, label in patch.difflabel(iter, difflines): ui.write(chunk, label=label) if opts['stat']: for chunk, label in patch.diffstatui(difflines, width=width, git=True): ui.write(chunk, label=label) finally: fp.close()
def makedesc(repo, action, rev): """build a initial action line for a ctx line are in the form: <action> <hash> <rev> <summary> """ ctx = repo[rev] summary = '' if ctx.description(): summary = ctx.description().splitlines()[0] line = '%s %s %d %s' % (action, ctx, ctx.rev(), summary) # trim to 80 columns so it's not stupidly wide in my editor maxlen = repo.ui.configint('histedit', 'linelen', default=80) maxlen = max(maxlen, 22) # avoid truncating hash return util.ellipsis(line, maxlen)
def _parse(self, ui, path): "Prepare list of P4 filenames and revisions to import" p4changes = {} changeset = {} files_map = {} copies_map = {} localname = {} depotname = {} heads = [] ui.status(_('reading p4 views\n')) # read client spec or view if "/" in path: p4changes.update(self._parse_view(path)) if path.startswith("//") and path.endswith("/..."): views = {path[:-3]: ""} else: views = {"//": ""} else: cmd = 'p4 -G client -o %s' % util.shellquote(path) clientspec = marshal.load(util.popen(cmd, mode='rb')) views = {} for client in clientspec: if client.startswith("View"): sview, cview = clientspec[client].split() p4changes.update(self._parse_view(sview)) if sview.endswith("...") and cview.endswith("..."): sview = sview[:-3] cview = cview[:-3] cview = cview[2:] cview = cview[cview.find("/") + 1:] views[sview] = cview # list of changes that affect our source files p4changes = p4changes.keys() p4changes.sort(key=int) # list with depot pathnames, longest first vieworder = views.keys() vieworder.sort(key=len, reverse=True) # handle revision limiting startrev = self.ui.config('convert', 'p4.startrev', default=0) # now read the full changelists to get the list of file revisions ui.status(_('collecting p4 changelists\n')) lastid = None for change in p4changes: if startrev and int(change) < int(startrev): continue if self.revs and int(change) > int(self.revs[0]): continue if change in self.revmap: # Ignore already present revisions, but set the parent pointer. lastid = change continue if lastid: parents = [lastid] else: parents = [] d = self._fetch_revision(change) c = self._construct_commit(d, parents) descarr = c.desc.splitlines(True) if len(descarr) > 0: shortdesc = descarr[0].rstrip('\r\n') else: shortdesc = '**empty changelist description**' t = '%s %s' % (c.rev, repr(shortdesc)[1:-1]) ui.status(util.ellipsis(t, 80) + '\n') files = [] copies = {} copiedfiles = [] i = 0 while ("depotFile%d" % i) in d and ("rev%d" % i) in d: oldname = d["depotFile%d" % i] filename = None for v in vieworder: if oldname.lower().startswith(v.lower()): filename = decodefilename(views[v] + oldname[len(v):]) break if filename: files.append((filename, d["rev%d" % i])) depotname[filename] = oldname if (d.get("action%d" % i) == "move/add"): copiedfiles.append(filename) localname[oldname] = filename i += 1 # Collect information about copied files for filename in copiedfiles: oldname = depotname[filename] flcmd = 'p4 -G filelog %s' \ % util.shellquote(oldname) flstdout = util.popen(flcmd, mode='rb') copiedfilename = None for d in loaditer(flstdout): copiedoldname = None i = 0 while ("change%d" % i) in d: if (d["change%d" % i] == change and d["action%d" % i] == "move/add"): j = 0 while ("file%d,%d" % (i, j)) in d: if d["how%d,%d" % (i, j)] == "moved from": copiedoldname = d["file%d,%d" % (i, j)] break j += 1 i += 1 if copiedoldname and copiedoldname in localname: copiedfilename = localname[copiedoldname] break if copiedfilename: copies[filename] = copiedfilename else: ui.warn( _("cannot find source for copied file: %s@%s\n") % (filename, change)) changeset[change] = c files_map[change] = files copies_map[change] = copies lastid = change if lastid and len(changeset) > 0: heads = [lastid] return { 'changeset': changeset, 'files': files_map, 'copies': copies_map, 'heads': heads, 'depotname': depotname, }
revparts = tuple([int(i) for i in revision.split('.')]) if len(revparts) < 2: # bad tags continue if revparts[-2] == 0 and revparts[-1] % 2 == 0: # normal branch if revparts[:-2] == e.revision: branchpoints.add(branch) elif revparts == (1, 1, 1): # vendor branch if revparts in e.branches: branchpoints.add(branch) e.branchpoints = branchpoints log.append(e) if len(log) % 100 == 0: ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n') log.sort(key=lambda x: (x.rcs, x.revision)) # find parent revisions of individual files versions = {} for e in log: branch = e.revision[:-1] p = versions.get((e.rcs, branch), None) if p is None: p = e.revision[:-2] e.parent = p versions[(e.rcs, branch)] = e.revision # update the log cache if cache:
def createcmd(ui, repo, pats, opts): """subcommand that creates a new shelve""" def publicancestors(ctx): """Compute the public ancestors of a commit. Much faster than the revset ancestors(ctx) & draft()""" seen = set([nullrev]) visit = util.deque() visit.append(ctx) while visit: ctx = visit.popleft() yield ctx.node() for parent in ctx.parents(): rev = parent.rev() if rev not in seen: seen.add(rev) if parent.mutable(): visit.append(parent) wctx = repo[None] parents = wctx.parents() if len(parents) > 1: raise util.Abort(_('cannot shelve while merging')) parent = parents[0] # we never need the user, so we use a generic user for all shelve operations user = '******' label = repo._bookmarkcurrent or parent.branch() or 'default' # slashes aren't allowed in filenames, therefore we rename it label = label.replace('/', '_') def gennames(): yield label for i in xrange(1, 100): yield '%s-%02d' % (label, i) shelvedfiles = [] def commitfunc(ui, repo, message, match, opts): # check modified, added, removed, deleted only for flist in repo.status(match=match)[:4]: shelvedfiles.extend(flist) hasmq = util.safehasattr(repo, 'mq') if hasmq: saved, repo.mq.checkapplied = repo.mq.checkapplied, False try: return repo.commit(message, user, opts.get('date'), match, editor=cmdutil.getcommiteditor(**opts)) finally: if hasmq: repo.mq.checkapplied = saved if parent.node() != nullid: desc = "changes to '%s'" % parent.description().split('\n', 1)[0] else: desc = '(changes in empty repository)' if not opts['message']: opts['message'] = desc name = opts['name'] wlock = lock = tr = bms = None try: wlock = repo.wlock() lock = repo.lock() bms = repo._bookmarks.copy() # use an uncommitted transaction to generate the bundle to avoid # pull races. ensure we don't print the abort message to stderr. tr = repo.transaction('commit', report=lambda x: None) if name: if shelvedfile(repo, name, 'hg').exists(): raise util.Abort(_("a shelved change named '%s' already exists") % name) else: for n in gennames(): if not shelvedfile(repo, n, 'hg').exists(): name = n break else: raise util.Abort(_("too many shelved changes named '%s'") % label) # ensure we are not creating a subdirectory or a hidden file if '/' in name or '\\' in name: raise util.Abort(_('shelved change names may not contain slashes')) if name.startswith('.'): raise util.Abort(_("shelved change names may not start with '.'")) node = cmdutil.commit(ui, repo, commitfunc, pats, opts) if not node: stat = repo.status(match=scmutil.match(repo[None], pats, opts)) if stat[3]: ui.status(_("nothing changed (%d missing files, see " "'hg status')\n") % len(stat[3])) else: ui.status(_("nothing changed\n")) return 1 phases.retractboundary(repo, phases.secret, [node]) fp = shelvedfile(repo, name, 'files').opener('wb') fp.write('\0'.join(shelvedfiles)) bases = list(publicancestors(repo[node])) cg = changegroup.changegroupsubset(repo, bases, [node], 'shelve') shelvedfile(repo, name, 'hg').writebundle(cg) cmdutil.export(repo, [node], fp=shelvedfile(repo, name, 'patch').opener('wb'), opts=mdiff.diffopts(git=True)) if ui.formatted(): desc = util.ellipsis(desc, ui.termwidth()) ui.status(_('shelved as %s\n') % name) hg.update(repo, parent.node()) finally: if bms: # restore old bookmarks repo._bookmarks.update(bms) repo._bookmarks.write() if tr: tr.abort() lockmod.release(lock, wlock)
local._("abort: %s: %s\n") % (inst.desc or inst.filename, reason)) except error.LockUnavailable, inst: ui.warn( local._("abort: could not lock %s: %s\n") % (inst.desc or inst.filename, inst.strerror)) except error.RepoError, inst: ui.warn(local._("abort: %s!\n") % inst) except error.ResponseError, inst: ui.warn(local._("abort: %s") % inst.args[0]) if not isinstance(inst.args[1], basestring): ui.warn(" %r\n" % (inst.args[1], )) elif not inst.args[1]: ui.warn(local._(" empty string\n")) else: ui.warn("\n%r\n" % util.ellipsis(inst.args[1])) except error.RevlogError, inst: ui.warn(local._("abort: %s!\n") % inst) except socket.error, inst: ui.warn(local._("abort: %s!\n") % str(inst)) except IOError, inst: if hasattr(inst, "code"): ui.warn(local._("abort: %s\n") % inst) elif hasattr(inst, "reason"): try: # usually it is in the form (errno, strerror) reason = inst.reason.args[1] except: # it might be anything, for example a string reason = inst.reason ui.warn(local._("abort: error: %s\n") % reason) elif hasattr(inst, "args") and inst.args[0] == errno.EPIPE: if ui.debugflag:
def createlog(ui, directory=None, root="", rlog=True, cache=None): '''Collect the CVS rlog''' # Because we store many duplicate commit log messages, reusing strings # saves a lot of memory and pickle storage space. _scache = {} def scache(s): "return a shared version of a string" return _scache.setdefault(s, s) ui.status(_('collecting CVS rlog\n')) log = [] # list of logentry objects containing the CVS state # patterns to match in CVS (r)log output, by state of use re_00 = re.compile('RCS file: (.+)$') re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$') re_02 = re.compile('cvs (r?log|server): (.+)\n$') re_03 = re.compile("(Cannot access.+CVSROOT)|" "(can't create temporary directory.+)$") re_10 = re.compile('Working file: (.+)$') re_20 = re.compile('symbolic names:') re_30 = re.compile('\t(.+): ([\\d.]+)$') re_31 = re.compile('----------------------------$') re_32 = re.compile('=======================================' '======================================$') re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$') re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);' r'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?' r'(\s+commitid:\s+([^;]+);)?' r'(.*mergepoint:\s+([^;]+);)?') re_70 = re.compile('branches: (.+);$') file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch') prefix = '' # leading path to strip of what we get from CVS if directory is None: # Current working directory # Get the real directory in the repository try: prefix = open(os.path.join('CVS','Repository')).read().strip() directory = prefix if prefix == ".": prefix = "" except IOError: raise logerror(_('not a CVS sandbox')) if prefix and not prefix.endswith(os.sep): prefix += os.sep # Use the Root file in the sandbox, if it exists try: root = open(os.path.join('CVS','Root')).read().strip() except IOError: pass if not root: root = os.environ.get('CVSROOT', '') # read log cache if one exists oldlog = [] date = None if cache: cachedir = os.path.expanduser('~/.hg.cvsps') if not os.path.exists(cachedir): os.mkdir(cachedir) # The cvsps cache pickle needs a uniquified name, based on the # repository location. The address may have all sort of nasties # in it, slashes, colons and such. So here we take just the # alphanumeric characters, concatenated in a way that does not # mix up the various components, so that # :pserver:user@server:/path # and # /pserver/user/server/path # are mapped to different cache file names. cachefile = root.split(":") + [directory, "cache"] cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s] cachefile = os.path.join(cachedir, '.'.join([s for s in cachefile if s])) if cache == 'update': try: ui.note(_('reading cvs log cache %s\n') % cachefile) oldlog = pickle.load(open(cachefile)) for e in oldlog: if not (util.safehasattr(e, 'branchpoints') and util.safehasattr(e, 'commitid') and util.safehasattr(e, 'mergepoint')): ui.status(_('ignoring old cache\n')) oldlog = [] break ui.note(_('cache has %d log entries\n') % len(oldlog)) except Exception as e: ui.note(_('error reading cache: %r\n') % e) if oldlog: date = oldlog[-1].date # last commit date as a (time,tz) tuple date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2') # build the CVS commandline cmd = ['cvs', '-q'] if root: cmd.append('-d%s' % root) p = util.normpath(getrepopath(root)) if not p.endswith('/'): p += '/' if prefix: # looks like normpath replaces "" by "." prefix = p + util.normpath(prefix) else: prefix = p cmd.append(['log', 'rlog'][rlog]) if date: # no space between option and date string cmd.append('-d>%s' % date) cmd.append(directory) # state machine begins here tags = {} # dictionary of revisions on current file with their tags branchmap = {} # mapping between branch names and revision numbers rcsmap = {} state = 0 store = False # set when a new record can be appended cmd = [util.shellquote(arg) for arg in cmd] ui.note(_("running %s\n") % (' '.join(cmd))) ui.debug("prefix=%r directory=%r root=%r\n" % (prefix, directory, root)) pfp = util.popen(' '.join(cmd)) peek = pfp.readline() while True: line = peek if line == '': break peek = pfp.readline() if line.endswith('\n'): line = line[:-1] #ui.debug('state=%d line=%r\n' % (state, line)) if state == 0: # initial state, consume input until we see 'RCS file' match = re_00.match(line) if match: rcs = match.group(1) tags = {} if rlog: filename = util.normpath(rcs[:-2]) if filename.startswith(prefix): filename = filename[len(prefix):] if filename.startswith('/'): filename = filename[1:] if filename.startswith('Attic/'): filename = filename[6:] else: filename = filename.replace('/Attic/', '/') state = 2 continue state = 1 continue match = re_01.match(line) if match: raise logerror(match.group(1)) match = re_02.match(line) if match: raise logerror(match.group(2)) if re_03.match(line): raise logerror(line) elif state == 1: # expect 'Working file' (only when using log instead of rlog) match = re_10.match(line) assert match, _('RCS file must be followed by working file') filename = util.normpath(match.group(1)) state = 2 elif state == 2: # expect 'symbolic names' if re_20.match(line): branchmap = {} state = 3 elif state == 3: # read the symbolic names and store as tags match = re_30.match(line) if match: rev = [int(x) for x in match.group(2).split('.')] # Convert magic branch number to an odd-numbered one revn = len(rev) if revn > 3 and (revn % 2) == 0 and rev[-2] == 0: rev = rev[:-2] + rev[-1:] rev = tuple(rev) if rev not in tags: tags[rev] = [] tags[rev].append(match.group(1)) branchmap[match.group(1)] = match.group(2) elif re_31.match(line): state = 5 elif re_32.match(line): state = 0 elif state == 4: # expecting '------' separator before first revision if re_31.match(line): state = 5 else: assert not re_32.match(line), _('must have at least ' 'some revisions') elif state == 5: # expecting revision number and possibly (ignored) lock indication # we create the logentry here from values stored in states 0 to 4, # as this state is re-entered for subsequent revisions of a file. match = re_50.match(line) assert match, _('expected revision number') e = logentry(rcs=scache(rcs), file=scache(filename), revision=tuple([int(x) for x in match.group(1).split('.')]), branches=[], parent=None, commitid=None, mergepoint=None, branchpoints=set()) state = 6 elif state == 6: # expecting date, author, state, lines changed match = re_60.match(line) assert match, _('revision must be followed by date line') d = match.group(1) if d[2] == '/': # Y2K d = '19' + d if len(d.split()) != 3: # cvs log dates always in GMT d = d + ' UTC' e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M:%S']) e.author = scache(match.group(2)) e.dead = match.group(3).lower() == 'dead' if match.group(5): if match.group(6): e.lines = (int(match.group(5)), int(match.group(6))) else: e.lines = (int(match.group(5)), 0) elif match.group(6): e.lines = (0, int(match.group(6))) else: e.lines = None if match.group(7): # cvs 1.12 commitid e.commitid = match.group(8) if match.group(9): # cvsnt mergepoint myrev = match.group(10).split('.') if len(myrev) == 2: # head e.mergepoint = 'HEAD' else: myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]]) branches = [b for b in branchmap if branchmap[b] == myrev] assert len(branches) == 1, ('unknown branch: %s' % e.mergepoint) e.mergepoint = branches[0] e.comment = [] state = 7 elif state == 7: # read the revision numbers of branches that start at this revision # or store the commit log message otherwise m = re_70.match(line) if m: e.branches = [tuple([int(y) for y in x.strip().split('.')]) for x in m.group(1).split(';')] state = 8 elif re_31.match(line) and re_50.match(peek): state = 5 store = True elif re_32.match(line): state = 0 store = True else: e.comment.append(line) elif state == 8: # store commit log message if re_31.match(line): cpeek = peek if cpeek.endswith('\n'): cpeek = cpeek[:-1] if re_50.match(cpeek): state = 5 store = True else: e.comment.append(line) elif re_32.match(line): state = 0 store = True else: e.comment.append(line) # When a file is added on a branch B1, CVS creates a synthetic # dead trunk revision 1.1 so that the branch has a root. # Likewise, if you merge such a file to a later branch B2 (one # that already existed when the file was added on B1), CVS # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop # these revisions now, but mark them synthetic so # createchangeset() can take care of them. if (store and e.dead and e.revision[-1] == 1 and # 1.1 or 1.1.x.1 len(e.comment) == 1 and file_added_re.match(e.comment[0])): ui.debug('found synthetic revision in %s: %r\n' % (e.rcs, e.comment[0])) e.synthetic = True if store: # clean up the results and save in the log. store = False e.tags = sorted([scache(x) for x in tags.get(e.revision, [])]) e.comment = scache('\n'.join(e.comment)) revn = len(e.revision) if revn > 3 and (revn % 2) == 0: e.branch = tags.get(e.revision[:-1], [None])[0] else: e.branch = None # find the branches starting from this revision branchpoints = set() for branch, revision in branchmap.iteritems(): revparts = tuple([int(i) for i in revision.split('.')]) if len(revparts) < 2: # bad tags continue if revparts[-2] == 0 and revparts[-1] % 2 == 0: # normal branch if revparts[:-2] == e.revision: branchpoints.add(branch) elif revparts == (1, 1, 1): # vendor branch if revparts in e.branches: branchpoints.add(branch) e.branchpoints = branchpoints log.append(e) rcsmap[e.rcs.replace('/Attic/', '/')] = e.rcs if len(log) % 100 == 0: ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n') log.sort(key=lambda x: (x.rcs, x.revision)) # find parent revisions of individual files versions = {} for e in sorted(oldlog, key=lambda x: (x.rcs, x.revision)): rcs = e.rcs.replace('/Attic/', '/') if rcs in rcsmap: e.rcs = rcsmap[rcs] branch = e.revision[:-1] versions[(e.rcs, branch)] = e.revision for e in log: branch = e.revision[:-1] p = versions.get((e.rcs, branch), None) if p is None: p = e.revision[:-2] e.parent = p versions[(e.rcs, branch)] = e.revision # update the log cache if cache: if log: # join up the old and new logs log.sort(key=lambda x: x.date) if oldlog and oldlog[-1].date >= log[0].date: raise logerror(_('log cache overlaps with new log entries,' ' re-run without cache.')) log = oldlog + log # write the new cachefile ui.note(_('writing cvs log cache %s\n') % cachefile) pickle.dump(log, open(cachefile, 'w')) else: log = oldlog ui.status(_('%d log entries\n') % len(log)) hook.hook(ui, None, "cvslog", True, log=log) return log
class notifier(object): '''email notification class.''' def __init__(self, ui, repo, hooktype): self.ui = ui cfg = self.ui.config('notify', 'config') if cfg: self.ui.readconfig(cfg, sections=['usersubs', 'reposubs']) self.repo = repo self.stripcount = int(self.ui.config('notify', 'strip', 0)) self.root = self.strip(self.repo.root) self.domain = self.ui.config('notify', 'domain') self.mbox = self.ui.config('notify', 'mbox') self.test = self.ui.configbool('notify', 'test', True) self.charsets = mail._charsets(self.ui) self.subs = self.subscribers() self.merge = self.ui.configbool('notify', 'merge', True) mapfile = self.ui.config('notify', 'style') template = (self.ui.config('notify', hooktype) or self.ui.config('notify', 'template')) if not mapfile and not template: template = deftemplates.get(hooktype) or single_template if template: template = templater.parsestring(template, quoted=False) self.t = cmdutil.changeset_templater(self.ui, self.repo, False, None, template, mapfile, False) def strip(self, path): '''strip leading slashes from local path, turn into web-safe path.''' path = util.pconvert(path) count = self.stripcount while count > 0: c = path.find('/') if c == -1: break path = path[c + 1:] count -= 1 return path def fixmail(self, addr): '''try to clean up email addresses.''' addr = util.email(addr.strip()) if self.domain: a = addr.find('@localhost') if a != -1: addr = addr[:a] if '@' not in addr: return addr + '@' + self.domain return addr def subscribers(self): '''return list of email addresses of subscribers to this repo.''' subs = set() for user, pats in self.ui.configitems('usersubs'): for pat in pats.split(','): if '#' in pat: pat, revs = pat.split('#', 1) else: revs = None if fnmatch.fnmatch(self.repo.root, pat.strip()): subs.add((self.fixmail(user), revs)) for pat, users in self.ui.configitems('reposubs'): if '#' in pat: pat, revs = pat.split('#', 1) else: revs = None if fnmatch.fnmatch(self.repo.root, pat): for user in users.split(','): subs.add((self.fixmail(user), revs)) return [(mail.addressencode(self.ui, s, self.charsets, self.test), r) for s, r in sorted(subs)] def node(self, ctx, **props): '''format one changeset, unless it is a suppressed merge.''' if not self.merge and len(ctx.parents()) > 1: return False self.t.show(ctx, changes=ctx.changeset(), baseurl=self.ui.config('web', 'baseurl'), root=self.repo.root, webroot=self.root, **props) return True def skipsource(self, source): '''true if incoming changes from this source should be skipped.''' ok_sources = self.ui.config('notify', 'sources', 'serve').split() return source not in ok_sources def send(self, ctx, count, data): '''send message.''' # Select subscribers by revset subs = set() for sub, spec in self.subs: if spec is None: subs.add(sub) continue revs = self.repo.revs('%r and %d:', spec, ctx.rev()) if len(revs): subs.add(sub) continue if len(subs) == 0: self.ui.debug('notify: no subscribers to selected repo ' 'and revset\n') return p = email.Parser.Parser() try: msg = p.parsestr(data) except email.Errors.MessageParseError, inst: raise util.Abort(inst) # store sender and subject sender, subject = msg['From'], msg['Subject'] del msg['From'], msg['Subject'] if not msg.is_multipart(): # create fresh mime message from scratch # (multipart templates must take care of this themselves) headers = msg.items() payload = msg.get_payload() # for notification prefer readability over data precision msg = mail.mimeencode(self.ui, payload, self.charsets, self.test) # reinstate custom headers for k, v in headers: msg[k] = v msg['Date'] = util.datestr(format="%a, %d %b %Y %H:%M:%S %1%2") # try to make subject line exist and be useful if not subject: if count > 1: subject = _('%s: %d new changesets') % (self.root, count) else: s = ctx.description().lstrip().split('\n', 1)[0].rstrip() subject = '%s: %s' % (self.root, s) maxsubject = int(self.ui.config('notify', 'maxsubject', 67)) if maxsubject: subject = util.ellipsis(subject, maxsubject) msg['Subject'] = mail.headencode(self.ui, subject, self.charsets, self.test) # try to make message have proper sender if not sender: sender = self.ui.config('email', 'from') or self.ui.username() if '@' not in sender or '@localhost' in sender: sender = self.fixmail(sender) msg['From'] = mail.addressencode(self.ui, sender, self.charsets, self.test) msg['X-Hg-Notification'] = 'changeset %s' % ctx if not msg['Message-Id']: msg['Message-Id'] = ('<hg.%s.%s.%s@%s>' % (ctx, int( time.time()), hash(self.repo.root), socket.getfqdn())) msg['To'] = ', '.join(sorted(subs)) msgtext = msg.as_string() if self.test: self.ui.write(msgtext) if not msgtext.endswith('\n'): self.ui.write('\n') else: self.ui.status( _('notify: sending %d subscribers %d changes\n') % (len(subs), count)) mail.sendmail(self.ui, util.email(msg['From']), subs, msgtext, mbox=self.mbox)
def createcmd(ui, repo, pats, opts): """subcommand that creates a new shelve""" def publicancestors(ctx): """Compute the public ancestors of a commit. Much faster than the revset ancestors(ctx) & draft()""" seen = set([nullrev]) visit = util.deque() visit.append(ctx) while visit: ctx = visit.popleft() yield ctx.node() for parent in ctx.parents(): rev = parent.rev() if rev not in seen: seen.add(rev) if parent.mutable(): visit.append(parent) wctx = repo[None] parents = wctx.parents() if len(parents) > 1: raise util.Abort(_('cannot shelve while merging')) parent = parents[0] # we never need the user, so we use a generic user for all shelve operations user = '******' label = repo._bookmarkcurrent or parent.branch() or 'default' # slashes aren't allowed in filenames, therefore we rename it label = label.replace('/', '_') def gennames(): yield label for i in xrange(1, 100): yield '%s-%02d' % (label, i) shelvedfiles = [] def commitfunc(ui, repo, message, match, opts): # check modified, added, removed, deleted only for flist in repo.status(match=match)[:4]: shelvedfiles.extend(flist) hasmq = util.safehasattr(repo, 'mq') if hasmq: saved, repo.mq.checkapplied = repo.mq.checkapplied, False try: return repo.commit(message, user, opts.get('date'), match, editor=cmdutil.getcommiteditor(**opts)) finally: if hasmq: repo.mq.checkapplied = saved if parent.node() != nullid: desc = "changes to '%s'" % parent.description().split('\n', 1)[0] else: desc = '(changes in empty repository)' if not opts['message']: opts['message'] = desc name = opts['name'] wlock = lock = tr = bms = None try: wlock = repo.wlock() lock = repo.lock() bms = repo._bookmarks.copy() # use an uncommitted transaction to generate the bundle to avoid # pull races. ensure we don't print the abort message to stderr. tr = repo.transaction('commit', report=lambda x: None) if name: if shelvedfile(repo, name, 'hg').exists(): raise util.Abort( _("a shelved change named '%s' already exists") % name) else: for n in gennames(): if not shelvedfile(repo, n, 'hg').exists(): name = n break else: raise util.Abort( _("too many shelved changes named '%s'") % label) # ensure we are not creating a subdirectory or a hidden file if '/' in name or '\\' in name: raise util.Abort(_('shelved change names may not contain slashes')) if name.startswith('.'): raise util.Abort(_("shelved change names may not start with '.'")) node = cmdutil.commit(ui, repo, commitfunc, pats, opts) if not node: stat = repo.status(match=scmutil.match(repo[None], pats, opts)) if stat[3]: ui.status( _("nothing changed (%d missing files, see " "'hg status')\n") % len(stat[3])) else: ui.status(_("nothing changed\n")) return 1 phases.retractboundary(repo, phases.secret, [node]) fp = shelvedfile(repo, name, 'files').opener('wb') fp.write('\0'.join(shelvedfiles)) bases = list(publicancestors(repo[node])) cg = changegroup.changegroupsubset(repo, bases, [node], 'shelve') shelvedfile(repo, name, 'hg').writebundle(cg) cmdutil.export(repo, [node], fp=shelvedfile(repo, name, 'patch').opener('wb'), opts=mdiff.diffopts(git=True)) if ui.formatted(): desc = util.ellipsis(desc, ui.termwidth()) ui.status(_('shelved as %s\n') % name) hg.update(repo, parent.node()) finally: if bms: # restore old bookmarks repo._bookmarks.update(bms) repo._bookmarks.write() if tr: tr.abort() lockmod.release(lock, wlock)
def _parse(self, ui, path): "Prepare list of P4 filenames and revisions to import" ui.status(_('reading p4 views\n')) # read client spec or view if "/" in path: self._parse_view(path) if path.startswith("//") and path.endswith("/..."): views = {path[:-3]: ""} else: views = {"//": ""} else: cmd = 'p4 -G client -o %s' % util.shellquote(path) clientspec = marshal.load(util.popen(cmd, mode='rb')) views = {} for client in clientspec: if client.startswith("View"): sview, cview = clientspec[client].split() self._parse_view(sview) if sview.endswith("...") and cview.endswith("..."): sview = sview[:-3] cview = cview[:-3] cview = cview[2:] cview = cview[cview.find("/") + 1:] views[sview] = cview # list of changes that affect our source files self.p4changes = self.p4changes.keys() self.p4changes.sort(key=int) # list with depot pathnames, longest first vieworder = views.keys() vieworder.sort(key=len, reverse=True) # handle revision limiting startrev = self.ui.config('convert', 'p4.startrev', default=0) self.p4changes = [ x for x in self.p4changes if ((not startrev or int(x) >= int(startrev)) and ( not self.revs or int(x) <= int(self.revs[0]))) ] # now read the full changelists to get the list of file revisions ui.status(_('collecting p4 changelists\n')) lastid = None for change in self.p4changes: cmd = "p4 -G describe -s %s" % change stdout = util.popen(cmd, mode='rb') d = marshal.load(stdout) desc = self.recode(d.get("desc", "")) shortdesc = desc.split("\n", 1)[0] t = '%s %s' % (d["change"], repr(shortdesc)[1:-1]) ui.status(util.ellipsis(t, 80) + '\n') if lastid: parents = [lastid] else: parents = [] date = (int(d["time"]), 0) # timezone not set c = common.commit(author=self.recode(d["user"]), date=util.datestr(date, '%Y-%m-%d %H:%M:%S %1%2'), parents=parents, desc=desc, branch=None, extra={"p4": change}) files = [] copies = {} copiedfiles = [] i = 0 while ("depotFile%d" % i) in d and ("rev%d" % i) in d: oldname = d["depotFile%d" % i] filename = None for v in vieworder: if oldname.lower().startswith(v.lower()): filename = decodefilename(views[v] + oldname[len(v):]) break if filename: files.append((filename, d["rev%d" % i])) self.depotname[filename] = oldname if (d.get("action%d" % i) == "move/add"): copiedfiles.append(filename) self.localname[oldname] = filename i += 1 # Collect information about copied files for filename in copiedfiles: oldname = self.depotname[filename] flcmd = 'p4 -G filelog %s' \ % util.shellquote(oldname) flstdout = util.popen(flcmd, mode='rb') copiedfilename = None for d in loaditer(flstdout): copiedoldname = None i = 0 while ("change%d" % i) in d: if (d["change%d" % i] == change and d["action%d" % i] == "move/add"): j = 0 while ("file%d,%d" % (i, j)) in d: if d["how%d,%d" % (i, j)] == "moved from": copiedoldname = d["file%d,%d" % (i, j)] break j += 1 i += 1 if copiedoldname and copiedoldname in self.localname: copiedfilename = self.localname[copiedoldname] break if copiedfilename: copies[filename] = copiedfilename else: ui.warn( _("cannot find source for copied file: %s@%s\n") % (filename, change)) self.changeset[change] = c self.files[change] = files self.copies[change] = copies lastid = change if lastid: self.heads = [lastid]
def send(self, ctx, count, data): '''send message.''' # Select subscribers by revset subs = set() for sub, spec in self.subs: if spec is None: subs.add(sub) continue revs = self.repo.revs('%r and %d:', spec, ctx.rev()) if len(revs): subs.add(sub) continue if len(subs) == 0: self.ui.debug('notify: no subscribers to selected repo ' 'and revset\n') return p = email.Parser.Parser() try: msg = p.parsestr(data) except email.Errors.MessageParseError as inst: raise util.Abort(inst) # store sender and subject sender, subject = msg['From'], msg['Subject'] del msg['From'], msg['Subject'] if not msg.is_multipart(): # create fresh mime message from scratch # (multipart templates must take care of this themselves) headers = msg.items() payload = msg.get_payload() # for notification prefer readability over data precision msg = mail.mimeencode(self.ui, payload, self.charsets, self.test) # reinstate custom headers for k, v in headers: msg[k] = v msg['Date'] = util.datestr(format="%a, %d %b %Y %H:%M:%S %1%2") # try to make subject line exist and be useful if not subject: if count > 1: subject = _('%s: %d new changesets') % (self.root, count) else: s = ctx.description().lstrip().split('\n', 1)[0].rstrip() subject = '%s: %s' % (self.root, s) maxsubject = int(self.ui.config('notify', 'maxsubject', 67)) if maxsubject: subject = util.ellipsis(subject, maxsubject) msg['Subject'] = mail.headencode(self.ui, subject, self.charsets, self.test) # try to make message have proper sender if not sender: sender = self.ui.config('email', 'from') or self.ui.username() if '@' not in sender or '@localhost' in sender: sender = self.fixmail(sender) msg['From'] = mail.addressencode(self.ui, sender, self.charsets, self.test) msg['X-Hg-Notification'] = 'changeset %s' % ctx if not msg['Message-Id']: msg['Message-Id'] = ('<hg.%s.%s.%s@%s>' % (ctx, int(time.time()), hash(self.repo.root), socket.getfqdn())) msg['To'] = ', '.join(sorted(subs)) msgtext = msg.as_string() if self.test: self.ui.write(msgtext) if not msgtext.endswith('\n'): self.ui.write('\n') else: self.ui.status(_('notify: sending %d subscribers %d changes\n') % (len(subs), count)) mail.sendmail(self.ui, util.email(msg['From']), subs, msgtext, mbox=self.mbox)
def _parse(self, ui, path): "Prepare list of P4 filenames and revisions to import" ui.status(_('reading p4 views\n')) # read client spec or view if "/" in path: self._parse_view(path) if path.startswith("//") and path.endswith("/..."): views = {path[:-3]:""} else: views = {"//": ""} else: cmd = 'p4 -G client -o "%s"' % path clientspec = marshal.load(util.popen(cmd)) views = {} for client in clientspec: if client.startswith("View"): sview, cview = clientspec[client].split() self._parse_view(sview) if sview.endswith("...") and cview.endswith("..."): sview = sview[:-3] cview = cview[:-3] cview = cview[2:] cview = cview[cview.find("/") + 1:] views[sview] = cview # list of changes that affect our source files self.p4changes = self.p4changes.keys() self.p4changes.sort(key=int) # list with depot pathnames, longest first vieworder = views.keys() vieworder.sort(key=len, reverse=True) # handle revision limiting startrev = self.ui.config('convert', 'p4.startrev', default=0) self.p4changes = [x for x in self.p4changes if ((not startrev or int(x) >= int(startrev)) and (not self.rev or int(x) <= int(self.rev)))] # now read the full changelists to get the list of file revisions ui.status(_('collecting p4 changelists\n')) lastid = None for change in self.p4changes: cmd = "p4 -G describe %s" % change stdout = util.popen(cmd) d = marshal.load(stdout) desc = self.recode(d["desc"]) shortdesc = desc.split("\n", 1)[0] t = '%s %s' % (d["change"], repr(shortdesc)[1:-1]) ui.status(util.ellipsis(t, 80) + '\n') if lastid: parents = [lastid] else: parents = [] date = (int(d["time"]), 0) # timezone not set c = commit(author=self.recode(d["user"]), date=util.datestr(date), parents=parents, desc=desc, branch='', extra={"p4": change}) files = [] i = 0 while ("depotFile%d" % i) in d and ("rev%d" % i) in d: oldname = d["depotFile%d" % i] filename = None for v in vieworder: if oldname.startswith(v): filename = views[v] + oldname[len(v):] break if filename: files.append((filename, d["rev%d" % i])) self.depotname[filename] = oldname i += 1 self.changeset[change] = c self.files[change] = files lastid = change if lastid: self.heads = [lastid]
def send(self, ctx, count, data): '''send message.''' # Select subscribers by revset subs = set() for sub, spec in self.subs: if spec is None: subs.add(sub) continue revs = self.repo.revs('%r and %d:', spec, ctx.rev()) if len(revs): subs.add(sub) continue if len(subs) == 0: self.ui.debug('notify: no subscribers to selected repo ' 'and revset\n') return p = email.Parser.Parser() try: msg = p.parsestr(data) except email.Errors.MessageParseError as inst: raise util.Abort(inst) # store sender and subject sender, subject = msg['From'], msg['Subject'] del msg['From'], msg['Subject'] if not msg.is_multipart(): # create fresh mime message from scratch # (multipart templates must take care of this themselves) headers = msg.items() payload = msg.get_payload() # for notification prefer readability over data precision msg = mail.mimeencode(self.ui, payload, self.charsets, self.test) # reinstate custom headers for k, v in headers: msg[k] = v msg['Date'] = util.datestr(format="%a, %d %b %Y %H:%M:%S %1%2") # try to make subject line exist and be useful if not subject: if count > 1: subject = _('%s: %d new changesets') % (self.root, count) else: s = ctx.description().lstrip().split('\n', 1)[0].rstrip() subject = '%s: %s' % (self.root, s) maxsubject = int(self.ui.config('notify', 'maxsubject', 67)) if maxsubject: subject = util.ellipsis(subject, maxsubject) msg['Subject'] = mail.headencode(self.ui, subject, self.charsets, self.test) # try to make message have proper sender if not sender: sender = self.ui.config('email', 'from') or self.ui.username() if '@' not in sender or '@localhost' in sender: sender = self.fixmail(sender) msg['From'] = mail.addressencode(self.ui, sender, self.charsets, self.test) msg['X-Hg-Notification'] = 'changeset %s' % ctx if not msg['Message-Id']: msg['Message-Id'] = ('<hg.%s.%s.%s@%s>' % (ctx, int( time.time()), hash(self.repo.root), socket.getfqdn())) msg['To'] = ', '.join(sorted(subs)) msgtext = msg.as_string() if self.test: self.ui.write(msgtext) if not msgtext.endswith('\n'): self.ui.write('\n') else: self.ui.status( _('notify: sending %d subscribers %d changes\n') % (len(subs), count)) mail.sendmail(self.ui, util.email(msg['From']), subs, msgtext, mbox=self.mbox)
def createcmd(ui, repo, pats, opts): """subcommand that creates a new shelve""" def mutableancestors(ctx): """return all mutable ancestors for ctx (included) Much faster than the revset ancestors(ctx) & draft()""" seen = set([nullrev]) visit = collections.deque() visit.append(ctx) while visit: ctx = visit.popleft() yield ctx.node() for parent in ctx.parents(): rev = parent.rev() if rev not in seen: seen.add(rev) if parent.mutable(): visit.append(parent) wctx = repo[None] parents = wctx.parents() if len(parents) > 1: raise error.Abort(_('cannot shelve while merging')) parent = parents[0] # we never need the user, so we use a generic user for all shelve operations user = '******' label = repo._activebookmark or parent.branch() or 'default' # slashes aren't allowed in filenames, therefore we rename it label = label.replace('/', '_') def gennames(): yield label for i in xrange(1, 100): yield '%s-%02d' % (label, i) def commitfunc(ui, repo, message, match, opts): hasmq = util.safehasattr(repo, 'mq') if hasmq: saved, repo.mq.checkapplied = repo.mq.checkapplied, False backup = repo.ui.backupconfig('phases', 'new-commit') try: repo.ui. setconfig('phases', 'new-commit', phases.secret) editor = cmdutil.getcommiteditor(editform='shelve.shelve', **opts) return repo.commit(message, user, opts.get('date'), match, editor=editor) finally: repo.ui.restoreconfig(backup) if hasmq: repo.mq.checkapplied = saved if parent.node() != nullid: desc = "changes to '%s'" % parent.description().split('\n', 1)[0] else: desc = '(changes in empty repository)' if not opts['message']: opts['message'] = desc name = opts['name'] wlock = lock = tr = None try: wlock = repo.wlock() lock = repo.lock() # use an uncommitted transaction to generate the bundle to avoid # pull races. ensure we don't print the abort message to stderr. tr = repo.transaction('commit', report=lambda x: None) if name: if shelvedfile(repo, name, 'hg').exists(): raise error.Abort(_("a shelved change named '%s' already exists" ) % name) else: for n in gennames(): if not shelvedfile(repo, n, 'hg').exists(): name = n break else: raise error.Abort(_("too many shelved changes named '%s'") % label) # ensure we are not creating a subdirectory or a hidden file if '/' in name or '\\' in name: raise error.Abort(_('shelved change names may not contain slashes')) if name.startswith('.'): raise error.Abort(_("shelved change names may not start with '.'")) interactive = opts.get('interactive', False) def interactivecommitfunc(ui, repo, *pats, **opts): match = scmutil.match(repo['.'], pats, {}) message = opts['message'] return commitfunc(ui, repo, message, match, opts) if not interactive: node = cmdutil.commit(ui, repo, commitfunc, pats, opts) else: node = cmdutil.dorecord(ui, repo, interactivecommitfunc, None, False, cmdutil.recordfilter, *pats, **opts) if not node: stat = repo.status(match=scmutil.match(repo[None], pats, opts)) if stat.deleted: ui.status(_("nothing changed (%d missing files, see " "'hg status')\n") % len(stat.deleted)) else: ui.status(_("nothing changed\n")) return 1 bases = list(mutableancestors(repo[node])) shelvedfile(repo, name, 'hg').writebundle(bases, node) cmdutil.export(repo, [node], fp=shelvedfile(repo, name, 'patch').opener('wb'), opts=mdiff.diffopts(git=True)) if ui.formatted(): desc = util.ellipsis(desc, ui.termwidth()) ui.status(_('shelved as %s\n') % name) hg.update(repo, parent.node()) _aborttransaction(repo) finally: lockmod.release(tr, lock, wlock)
def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None): '''Convert log into changesets.''' ui.status(_('creating changesets\n')) # try to order commitids by date mindate = {} for e in log: if e.commitid: mindate[e.commitid] = min(e.date, mindate.get(e.commitid)) # Merge changesets log.sort(key=lambda x: (mindate.get(x.commitid), x.commitid, x.comment, x. author, x.branch, x.date, x.branchpoints)) changesets = [] files = set() c = None for i, e in enumerate(log): # Check if log entry belongs to the current changeset or not. # Since CVS is file-centric, two different file revisions with # different branchpoints should be treated as belonging to two # different changesets (and the ordering is important and not # honoured by cvsps at this point). # # Consider the following case: # foo 1.1 branchpoints: [MYBRANCH] # bar 1.1 branchpoints: [MYBRANCH, MYBRANCH2] # # Here foo is part only of MYBRANCH, but not MYBRANCH2, e.g. a # later version of foo may be in MYBRANCH2, so foo should be the # first changeset and bar the next and MYBRANCH and MYBRANCH2 # should both start off of the bar changeset. No provisions are # made to ensure that this is, in fact, what happens. if not (c and e.branchpoints == c.branchpoints and ( # cvs commitids (e.commitid is not None and e.commitid == c.commitid) or ( # no commitids, use fuzzy commit detection (e.commitid is None or c.commitid is None) and e.comment == c.comment and e.author == c.author and e.branch == c.branch and ((c.date[0] + c.date[1]) <= (e.date[0] + e.date[1]) <= (c.date[0] + c.date[1]) + fuzz) and e.file not in files))): c = changeset(comment=e.comment, author=e.author, branch=e.branch, date=e.date, entries=[], mergepoint=e.mergepoint, branchpoints=e.branchpoints, commitid=e.commitid) changesets.append(c) files = set() if len(changesets) % 100 == 0: t = '%d %s' % (len(changesets), repr(e.comment)[1:-1]) ui.status(util.ellipsis(t, 80) + '\n') c.entries.append(e) files.add(e.file) c.date = e.date # changeset date is date of latest commit in it # Mark synthetic changesets for c in changesets: # Synthetic revisions always get their own changeset, because # the log message includes the filename. E.g. if you add file3 # and file4 on a branch, you get four log entries and three # changesets: # "File file3 was added on branch ..." (synthetic, 1 entry) # "File file4 was added on branch ..." (synthetic, 1 entry) # "Add file3 and file4 to fix ..." (real, 2 entries) # Hence the check for 1 entry here. c.synthetic = len(c.entries) == 1 and c.entries[0].synthetic # Sort files in each changeset def entitycompare(l, r): 'Mimic cvsps sorting order' l = l.file.split('/') r = r.file.split('/') nl = len(l) nr = len(r) n = min(nl, nr) for i in range(n): if i + 1 == nl and nl < nr: return -1 elif i + 1 == nr and nl > nr: return +1 elif l[i] < r[i]: return -1 elif l[i] > r[i]: return +1 return 0 for c in changesets: c.entries.sort(entitycompare) # Sort changesets by date odd = set() def cscmp(l, r, odd=odd): d = sum(l.date) - sum(r.date) if d: return d # detect vendor branches and initial commits on a branch le = {} for e in l.entries: le[e.rcs] = e.revision re = {} for e in r.entries: re[e.rcs] = e.revision d = 0 for e in l.entries: if re.get(e.rcs, None) == e.parent: assert not d d = 1 break for e in r.entries: if le.get(e.rcs, None) == e.parent: if d: odd.add((l, r)) d = -1 break # By this point, the changesets are sufficiently compared that # we don't really care about ordering. However, this leaves # some race conditions in the tests, so we compare on the # number of files modified, the files contained in each # changeset, and the branchpoints in the change to ensure test # output remains stable. # recommended replacement for cmp from # https://docs.python.org/3.0/whatsnew/3.0.html c = lambda x, y: (x > y) - (x < y) # Sort bigger changes first. if not d: d = c(len(l.entries), len(r.entries)) # Try sorting by filename in the change. if not d: d = c([e.file for e in l.entries], [e.file for e in r.entries]) # Try and put changes without a branch point before ones with # a branch point. if not d: d = c(len(l.branchpoints), len(r.branchpoints)) return d changesets.sort(cscmp) # Collect tags globaltags = {} for c in changesets: for e in c.entries: for tag in e.tags: # remember which is the latest changeset to have this tag globaltags[tag] = c for c in changesets: tags = set() for e in c.entries: tags.update(e.tags) # remember tags only if this is the latest changeset to have it c.tags = sorted(tag for tag in tags if globaltags[tag] is c) # Find parent changesets, handle {{mergetobranch BRANCHNAME}} # by inserting dummy changesets with two parents, and handle # {{mergefrombranch BRANCHNAME}} by setting two parents. if mergeto is None: mergeto = r'{{mergetobranch ([-\w]+)}}' if mergeto: mergeto = re.compile(mergeto) if mergefrom is None: mergefrom = r'{{mergefrombranch ([-\w]+)}}' if mergefrom: mergefrom = re.compile(mergefrom) versions = {} # changeset index where we saw any particular file version branches = {} # changeset index where we saw a branch n = len(changesets) i = 0 while i < n: c = changesets[i] for f in c.entries: versions[(f.rcs, f.revision)] = i p = None if c.branch in branches: p = branches[c.branch] else: # first changeset on a new branch # the parent is a changeset with the branch in its # branchpoints such that it is the latest possible # commit without any intervening, unrelated commits. for candidate in xrange(i): if c.branch not in changesets[candidate].branchpoints: if p is not None: break continue p = candidate c.parents = [] if p is not None: p = changesets[p] # Ensure no changeset has a synthetic changeset as a parent. while p.synthetic: assert len(p.parents) <= 1, \ _('synthetic changeset cannot have multiple parents') if p.parents: p = p.parents[0] else: p = None break if p is not None: c.parents.append(p) if c.mergepoint: if c.mergepoint == 'HEAD': c.mergepoint = None c.parents.append(changesets[branches[c.mergepoint]]) if mergefrom: m = mergefrom.search(c.comment) if m: m = m.group(1) if m == 'HEAD': m = None try: candidate = changesets[branches[m]] except KeyError: ui.warn( _("warning: CVS commit message references " "non-existent branch %r:\n%s\n") % (m, c.comment)) if m in branches and c.branch != m and not candidate.synthetic: c.parents.append(candidate) if mergeto: m = mergeto.search(c.comment) if m: if m.groups(): m = m.group(1) if m == 'HEAD': m = None else: m = None # if no group found then merge to HEAD if m in branches and c.branch != m: # insert empty changeset for merge cc = changeset( author=c.author, branch=m, date=c.date, comment='convert-repo: CVS merge from branch %s' % c.branch, entries=[], tags=[], parents=[changesets[branches[m]], c]) changesets.insert(i + 1, cc) branches[m] = i + 1 # adjust our loop counters now we have inserted a new entry n += 1 i += 2 continue branches[c.branch] = i i += 1 # Drop synthetic changesets (safe now that we have ensured no other # changesets can have them as parents). i = 0 while i < len(changesets): if changesets[i].synthetic: del changesets[i] else: i += 1 # Number changesets for i, c in enumerate(changesets): c.id = i + 1 if odd: for l, r in odd: if l.id is not None and r.id is not None: ui.warn( _('changeset %d is both before and after %d\n') % (l.id, r.id)) ui.status(_('%d changeset entries\n') % len(changesets)) hook.hook(ui, None, "cvschangesets", True, changesets=changesets) return changesets
def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None): '''Convert log into changesets.''' ui.status(_('creating changesets\n')) # try to order commitids by date mindate = {} for e in log: if e.commitid: mindate[e.commitid] = min(e.date, mindate.get(e.commitid)) # Merge changesets log.sort(key=lambda x: (mindate.get(x.commitid), x.commitid, x.comment, x.author, x.branch, x.date, x.branchpoints)) changesets = [] files = set() c = None for i, e in enumerate(log): # Check if log entry belongs to the current changeset or not. # Since CVS is file-centric, two different file revisions with # different branchpoints should be treated as belonging to two # different changesets (and the ordering is important and not # honoured by cvsps at this point). # # Consider the following case: # foo 1.1 branchpoints: [MYBRANCH] # bar 1.1 branchpoints: [MYBRANCH, MYBRANCH2] # # Here foo is part only of MYBRANCH, but not MYBRANCH2, e.g. a # later version of foo may be in MYBRANCH2, so foo should be the # first changeset and bar the next and MYBRANCH and MYBRANCH2 # should both start off of the bar changeset. No provisions are # made to ensure that this is, in fact, what happens. if not (c and e.branchpoints == c.branchpoints and (# cvs commitids (e.commitid is not None and e.commitid == c.commitid) or (# no commitids, use fuzzy commit detection (e.commitid is None or c.commitid is None) and e.comment == c.comment and e.author == c.author and e.branch == c.branch and ((c.date[0] + c.date[1]) <= (e.date[0] + e.date[1]) <= (c.date[0] + c.date[1]) + fuzz) and e.file not in files))): c = changeset(comment=e.comment, author=e.author, branch=e.branch, date=e.date, entries=[], mergepoint=e.mergepoint, branchpoints=e.branchpoints, commitid=e.commitid) changesets.append(c) files = set() if len(changesets) % 100 == 0: t = '%d %s' % (len(changesets), repr(e.comment)[1:-1]) ui.status(util.ellipsis(t, 80) + '\n') c.entries.append(e) files.add(e.file) c.date = e.date # changeset date is date of latest commit in it # Mark synthetic changesets for c in changesets: # Synthetic revisions always get their own changeset, because # the log message includes the filename. E.g. if you add file3 # and file4 on a branch, you get four log entries and three # changesets: # "File file3 was added on branch ..." (synthetic, 1 entry) # "File file4 was added on branch ..." (synthetic, 1 entry) # "Add file3 and file4 to fix ..." (real, 2 entries) # Hence the check for 1 entry here. c.synthetic = len(c.entries) == 1 and c.entries[0].synthetic # Sort files in each changeset def entitycompare(l, r): 'Mimic cvsps sorting order' l = l.file.split('/') r = r.file.split('/') nl = len(l) nr = len(r) n = min(nl, nr) for i in range(n): if i + 1 == nl and nl < nr: return -1 elif i + 1 == nr and nl > nr: return +1 elif l[i] < r[i]: return -1 elif l[i] > r[i]: return +1 return 0 for c in changesets: c.entries.sort(entitycompare) # Sort changesets by date odd = set() def cscmp(l, r, odd=odd): d = sum(l.date) - sum(r.date) if d: return d # detect vendor branches and initial commits on a branch le = {} for e in l.entries: le[e.rcs] = e.revision re = {} for e in r.entries: re[e.rcs] = e.revision d = 0 for e in l.entries: if re.get(e.rcs, None) == e.parent: assert not d d = 1 break for e in r.entries: if le.get(e.rcs, None) == e.parent: if d: odd.add((l, r)) d = -1 break return d changesets.sort(cscmp) # Collect tags globaltags = {} for c in changesets: for e in c.entries: for tag in e.tags: # remember which is the latest changeset to have this tag globaltags[tag] = c for c in changesets: tags = set() for e in c.entries: tags.update(e.tags) # remember tags only if this is the latest changeset to have it c.tags = sorted(tag for tag in tags if globaltags[tag] is c) # Find parent changesets, handle {{mergetobranch BRANCHNAME}} # by inserting dummy changesets with two parents, and handle # {{mergefrombranch BRANCHNAME}} by setting two parents. if mergeto is None: mergeto = r'{{mergetobranch ([-\w]+)}}' if mergeto: mergeto = re.compile(mergeto) if mergefrom is None: mergefrom = r'{{mergefrombranch ([-\w]+)}}' if mergefrom: mergefrom = re.compile(mergefrom) versions = {} # changeset index where we saw any particular file version branches = {} # changeset index where we saw a branch n = len(changesets) i = 0 while i < n: c = changesets[i] for f in c.entries: versions[(f.rcs, f.revision)] = i p = None if c.branch in branches: p = branches[c.branch] else: # first changeset on a new branch # the parent is a changeset with the branch in its # branchpoints such that it is the latest possible # commit without any intervening, unrelated commits. for candidate in xrange(i): if c.branch not in changesets[candidate].branchpoints: if p is not None: break continue p = candidate c.parents = [] if p is not None: p = changesets[p] # Ensure no changeset has a synthetic changeset as a parent. while p.synthetic: assert len(p.parents) <= 1, \ _('synthetic changeset cannot have multiple parents') if p.parents: p = p.parents[0] else: p = None break if p is not None: c.parents.append(p) if c.mergepoint: if c.mergepoint == 'HEAD': c.mergepoint = None c.parents.append(changesets[branches[c.mergepoint]]) if mergefrom: m = mergefrom.search(c.comment) if m: m = m.group(1) if m == 'HEAD': m = None try: candidate = changesets[branches[m]] except KeyError: ui.warn(_("warning: CVS commit message references " "non-existent branch %r:\n%s\n") % (m, c.comment)) if m in branches and c.branch != m and not candidate.synthetic: c.parents.append(candidate) if mergeto: m = mergeto.search(c.comment) if m: if m.groups(): m = m.group(1) if m == 'HEAD': m = None else: m = None # if no group found then merge to HEAD if m in branches and c.branch != m: # insert empty changeset for merge cc = changeset( author=c.author, branch=m, date=c.date, comment='convert-repo: CVS merge from branch %s' % c.branch, entries=[], tags=[], parents=[changesets[branches[m]], c]) changesets.insert(i + 1, cc) branches[m] = i + 1 # adjust our loop counters now we have inserted a new entry n += 1 i += 2 continue branches[c.branch] = i i += 1 # Drop synthetic changesets (safe now that we have ensured no other # changesets can have them as parents). i = 0 while i < len(changesets): if changesets[i].synthetic: del changesets[i] else: i += 1 # Number changesets for i, c in enumerate(changesets): c.id = i + 1 if odd: for l, r in odd: if l.id is not None and r.id is not None: ui.warn(_('changeset %d is both before and after %d\n') % (l.id, r.id)) ui.status(_('%d changeset entries\n') % len(changesets)) hook.hook(ui, None, "cvschangesets", True, changesets=changesets) return changesets
def createlog(ui, directory=None, root="", rlog=True, cache=None): '''Collect the CVS rlog''' # Because we store many duplicate commit log messages, reusing strings # saves a lot of memory and pickle storage space. _scache = {} def scache(s): "return a shared version of a string" return _scache.setdefault(s, s) ui.status(_('collecting CVS rlog\n')) log = [] # list of logentry objects containing the CVS state # patterns to match in CVS (r)log output, by state of use re_00 = re.compile('RCS file: (.+)$') re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$') re_02 = re.compile('cvs (r?log|server): (.+)\n$') re_03 = re.compile("(Cannot access.+CVSROOT)|" "(can't create temporary directory.+)$") re_10 = re.compile('Working file: (.+)$') re_20 = re.compile('symbolic names:') re_30 = re.compile('\t(.+): ([\\d.]+)$') re_31 = re.compile('----------------------------$') re_32 = re.compile('=======================================' '======================================$') re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$') re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);' r'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?' r'(\s+commitid:\s+([^;]+);)?' r'(.*mergepoint:\s+([^;]+);)?') re_70 = re.compile('branches: (.+);$') file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch') prefix = '' # leading path to strip of what we get from CVS if directory is None: # Current working directory # Get the real directory in the repository try: prefix = open(os.path.join('CVS', 'Repository')).read().strip() directory = prefix if prefix == ".": prefix = "" except IOError: raise logerror(_('not a CVS sandbox')) if prefix and not prefix.endswith(os.sep): prefix += os.sep # Use the Root file in the sandbox, if it exists try: root = open(os.path.join('CVS', 'Root')).read().strip() except IOError: pass if not root: root = os.environ.get('CVSROOT', '') # read log cache if one exists oldlog = [] date = None if cache: cachedir = os.path.expanduser('~/.hg.cvsps') if not os.path.exists(cachedir): os.mkdir(cachedir) # The cvsps cache pickle needs a uniquified name, based on the # repository location. The address may have all sort of nasties # in it, slashes, colons and such. So here we take just the # alphanumeric characters, concatenated in a way that does not # mix up the various components, so that # :pserver:user@server:/path # and # /pserver/user/server/path # are mapped to different cache file names. cachefile = root.split(":") + [directory, "cache"] cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s] cachefile = os.path.join(cachedir, '.'.join([s for s in cachefile if s])) if cache == 'update': try: ui.note(_('reading cvs log cache %s\n') % cachefile) oldlog = pickle.load(open(cachefile)) for e in oldlog: if not (util.safehasattr(e, 'branchpoints') and util.safehasattr(e, 'commitid') and util.safehasattr(e, 'mergepoint')): ui.status(_('ignoring old cache\n')) oldlog = [] break ui.note(_('cache has %d log entries\n') % len(oldlog)) except Exception as e: ui.note(_('error reading cache: %r\n') % e) if oldlog: date = oldlog[-1].date # last commit date as a (time,tz) tuple date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2') # build the CVS commandline cmd = ['cvs', '-q'] if root: cmd.append('-d%s' % root) p = util.normpath(getrepopath(root)) if not p.endswith('/'): p += '/' if prefix: # looks like normpath replaces "" by "." prefix = p + util.normpath(prefix) else: prefix = p cmd.append(['log', 'rlog'][rlog]) if date: # no space between option and date string cmd.append('-d>%s' % date) cmd.append(directory) # state machine begins here tags = {} # dictionary of revisions on current file with their tags branchmap = {} # mapping between branch names and revision numbers rcsmap = {} state = 0 store = False # set when a new record can be appended cmd = [util.shellquote(arg) for arg in cmd] ui.note(_("running %s\n") % (' '.join(cmd))) ui.debug("prefix=%r directory=%r root=%r\n" % (prefix, directory, root)) pfp = util.popen(' '.join(cmd)) peek = pfp.readline() while True: line = peek if line == '': break peek = pfp.readline() if line.endswith('\n'): line = line[:-1] #ui.debug('state=%d line=%r\n' % (state, line)) if state == 0: # initial state, consume input until we see 'RCS file' match = re_00.match(line) if match: rcs = match.group(1) tags = {} if rlog: filename = util.normpath(rcs[:-2]) if filename.startswith(prefix): filename = filename[len(prefix):] if filename.startswith('/'): filename = filename[1:] if filename.startswith('Attic/'): filename = filename[6:] else: filename = filename.replace('/Attic/', '/') state = 2 continue state = 1 continue match = re_01.match(line) if match: raise logerror(match.group(1)) match = re_02.match(line) if match: raise logerror(match.group(2)) if re_03.match(line): raise logerror(line) elif state == 1: # expect 'Working file' (only when using log instead of rlog) match = re_10.match(line) assert match, _('RCS file must be followed by working file') filename = util.normpath(match.group(1)) state = 2 elif state == 2: # expect 'symbolic names' if re_20.match(line): branchmap = {} state = 3 elif state == 3: # read the symbolic names and store as tags match = re_30.match(line) if match: rev = [int(x) for x in match.group(2).split('.')] # Convert magic branch number to an odd-numbered one revn = len(rev) if revn > 3 and (revn % 2) == 0 and rev[-2] == 0: rev = rev[:-2] + rev[-1:] rev = tuple(rev) if rev not in tags: tags[rev] = [] tags[rev].append(match.group(1)) branchmap[match.group(1)] = match.group(2) elif re_31.match(line): state = 5 elif re_32.match(line): state = 0 elif state == 4: # expecting '------' separator before first revision if re_31.match(line): state = 5 else: assert not re_32.match(line), _('must have at least ' 'some revisions') elif state == 5: # expecting revision number and possibly (ignored) lock indication # we create the logentry here from values stored in states 0 to 4, # as this state is re-entered for subsequent revisions of a file. match = re_50.match(line) assert match, _('expected revision number') e = logentry(rcs=scache(rcs), file=scache(filename), revision=tuple( [int(x) for x in match.group(1).split('.')]), branches=[], parent=None, commitid=None, mergepoint=None, branchpoints=set()) state = 6 elif state == 6: # expecting date, author, state, lines changed match = re_60.match(line) assert match, _('revision must be followed by date line') d = match.group(1) if d[2] == '/': # Y2K d = '19' + d if len(d.split()) != 3: # cvs log dates always in GMT d = d + ' UTC' e.date = util.parsedate(d, [ '%y/%m/%d %H:%M:%S', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M:%S' ]) e.author = scache(match.group(2)) e.dead = match.group(3).lower() == 'dead' if match.group(5): if match.group(6): e.lines = (int(match.group(5)), int(match.group(6))) else: e.lines = (int(match.group(5)), 0) elif match.group(6): e.lines = (0, int(match.group(6))) else: e.lines = None if match.group(7): # cvs 1.12 commitid e.commitid = match.group(8) if match.group(9): # cvsnt mergepoint myrev = match.group(10).split('.') if len(myrev) == 2: # head e.mergepoint = 'HEAD' else: myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]]) branches = [b for b in branchmap if branchmap[b] == myrev] assert len(branches) == 1, ('unknown branch: %s' % e.mergepoint) e.mergepoint = branches[0] e.comment = [] state = 7 elif state == 7: # read the revision numbers of branches that start at this revision # or store the commit log message otherwise m = re_70.match(line) if m: e.branches = [ tuple([int(y) for y in x.strip().split('.')]) for x in m.group(1).split(';') ] state = 8 elif re_31.match(line) and re_50.match(peek): state = 5 store = True elif re_32.match(line): state = 0 store = True else: e.comment.append(line) elif state == 8: # store commit log message if re_31.match(line): cpeek = peek if cpeek.endswith('\n'): cpeek = cpeek[:-1] if re_50.match(cpeek): state = 5 store = True else: e.comment.append(line) elif re_32.match(line): state = 0 store = True else: e.comment.append(line) # When a file is added on a branch B1, CVS creates a synthetic # dead trunk revision 1.1 so that the branch has a root. # Likewise, if you merge such a file to a later branch B2 (one # that already existed when the file was added on B1), CVS # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop # these revisions now, but mark them synthetic so # createchangeset() can take care of them. if (store and e.dead and e.revision[-1] == 1 and # 1.1 or 1.1.x.1 len(e.comment) == 1 and file_added_re.match(e.comment[0])): ui.debug('found synthetic revision in %s: %r\n' % (e.rcs, e.comment[0])) e.synthetic = True if store: # clean up the results and save in the log. store = False e.tags = sorted([scache(x) for x in tags.get(e.revision, [])]) e.comment = scache('\n'.join(e.comment)) revn = len(e.revision) if revn > 3 and (revn % 2) == 0: e.branch = tags.get(e.revision[:-1], [None])[0] else: e.branch = None # find the branches starting from this revision branchpoints = set() for branch, revision in branchmap.iteritems(): revparts = tuple([int(i) for i in revision.split('.')]) if len(revparts) < 2: # bad tags continue if revparts[-2] == 0 and revparts[-1] % 2 == 0: # normal branch if revparts[:-2] == e.revision: branchpoints.add(branch) elif revparts == (1, 1, 1): # vendor branch if revparts in e.branches: branchpoints.add(branch) e.branchpoints = branchpoints log.append(e) rcsmap[e.rcs.replace('/Attic/', '/')] = e.rcs if len(log) % 100 == 0: ui.status( util.ellipsis('%d %s' % (len(log), e.file), 80) + '\n') log.sort(key=lambda x: (x.rcs, x.revision)) # find parent revisions of individual files versions = {} for e in sorted(oldlog, key=lambda x: (x.rcs, x.revision)): rcs = e.rcs.replace('/Attic/', '/') if rcs in rcsmap: e.rcs = rcsmap[rcs] branch = e.revision[:-1] versions[(e.rcs, branch)] = e.revision for e in log: branch = e.revision[:-1] p = versions.get((e.rcs, branch), None) if p is None: p = e.revision[:-2] e.parent = p versions[(e.rcs, branch)] = e.revision # update the log cache if cache: if log: # join up the old and new logs log.sort(key=lambda x: x.date) if oldlog and oldlog[-1].date >= log[0].date: raise logerror( _('log cache overlaps with new log entries,' ' re-run without cache.')) log = oldlog + log # write the new cachefile ui.note(_('writing cvs log cache %s\n') % cachefile) pickle.dump(log, open(cachefile, 'w')) else: log = oldlog ui.status(_('%d log entries\n') % len(log)) hook.hook(ui, None, "cvslog", True, log=log) return log
def _docreatecmd(ui, repo, pats, opts): wctx = repo[None] parents = wctx.parents() if len(parents) > 1: raise error.Abort(_('cannot shelve while merging')) parent = parents[0] origbranch = wctx.branch() if parent.node() != nodemod.nullid: desc = "changes to: %s" % parent.description().split('\n', 1)[0] else: desc = '(changes in empty repository)' if not opts.get('message'): opts['message'] = desc lock = tr = activebookmark = None try: lock = repo.lock() # use an uncommitted transaction to generate the bundle to avoid # pull races. ensure we don't print the abort message to stderr. tr = repo.transaction('commit', report=lambda x: None) interactive = opts.get('interactive', False) includeunknown = (opts.get('unknown', False) and not opts.get('addremove', False)) name = getshelvename(repo, parent, opts) activebookmark = _backupactivebookmark(repo) extra = {} if includeunknown: _includeunknownfiles(repo, pats, opts, extra) if _iswctxonnewbranch(repo) and not _isbareshelve(pats, opts): # In non-bare shelve we don't store newly created branch # at bundled commit repo.dirstate.setbranch(repo['.'].branch()) commitfunc = getcommitfunc(extra, interactive, editor=True) if not interactive: node = cmdutil.commit(ui, repo, commitfunc, pats, opts) else: node = cmdutil.dorecord(ui, repo, commitfunc, None, False, cmdutil.recordfilter, *pats, **pycompat.strkwargs(opts)) if not node: _nothingtoshelvemessaging(ui, repo, pats, opts) return 1 _shelvecreatedcommit(repo, node, name) if ui.formatted(): desc = util.ellipsis(desc, ui.termwidth()) ui.status(_('shelved as %s\n') % name) hg.update(repo, parent.node()) if origbranch != repo['.'].branch() and not _isbareshelve(pats, opts): repo.dirstate.setbranch(origbranch) _finishshelve(repo) finally: _restoreactivebookmark(repo, activebookmark) lockmod.release(tr, lock)
def _parse(self, ui, path): "Prepare list of P4 filenames and revisions to import" ui.status(_('reading p4 views\n')) # read client spec or view if "/" in path: self._parse_view(path) if path.startswith("//") and path.endswith("/..."): views = {path[:-3]:""} else: views = {"//": ""} else: cmd = 'p4 -G client -o %s' % util.shellquote(path) clientspec = marshal.load(util.popen(cmd, mode='rb')) views = {} for client in clientspec: if client.startswith("View"): sview, cview = clientspec[client].split() self._parse_view(sview) if sview.endswith("...") and cview.endswith("..."): sview = sview[:-3] cview = cview[:-3] cview = cview[2:] cview = cview[cview.find("/") + 1:] views[sview] = cview # list of changes that affect our source files self.p4changes = self.p4changes.keys() self.p4changes.sort(key=int) # list with depot pathnames, longest first vieworder = views.keys() vieworder.sort(key=len, reverse=True) # handle revision limiting startrev = self.ui.config('convert', 'p4.startrev', default=0) self.p4changes = [x for x in self.p4changes if ((not startrev or int(x) >= int(startrev)) and (not self.rev or int(x) <= int(self.rev)))] # now read the full changelists to get the list of file revisions ui.status(_('collecting p4 changelists\n')) lastid = None for change in self.p4changes: cmd = "p4 -G describe -s %s" % change stdout = util.popen(cmd, mode='rb') d = marshal.load(stdout) desc = self.recode(d["desc"]) shortdesc = desc.split("\n", 1)[0] t = '%s %s' % (d["change"], repr(shortdesc)[1:-1]) ui.status(util.ellipsis(t, 80) + '\n') if lastid: parents = [lastid] else: parents = [] date = (int(d["time"]), 0) # timezone not set c = commit(author=self.recode(d["user"]), date=util.datestr(date), parents=parents, desc=desc, branch='', extra={"p4": change}) files = [] i = 0 while ("depotFile%d" % i) in d and ("rev%d" % i) in d: oldname = d["depotFile%d" % i] filename = None for v in vieworder: if oldname.startswith(v): filename = views[v] + oldname[len(v):] break if filename: files.append((filename, d["rev%d" % i])) self.depotname[filename] = oldname i += 1 self.changeset[change] = c self.files[change] = files lastid = change if lastid: self.heads = [lastid]
def _parse(self, ui, path): "Prepare list of P4 filenames and revisions to import" ui.status(_('reading p4 views\n')) # read client spec or view if "/" in path: self._parse_view(path) if path.startswith("//") and path.endswith("/..."): views = {path[:-3]:""} else: views = {"//": ""} else: cmd = 'p4 -G client -o %s' % util.shellquote(path) clientspec = marshal.load(util.popen(cmd, mode='rb')) views = {} for client in clientspec: if client.startswith("View"): sview, cview = clientspec[client].split() self._parse_view(sview) if sview.endswith("...") and cview.endswith("..."): sview = sview[:-3] cview = cview[:-3] cview = cview[2:] cview = cview[cview.find("/") + 1:] views[sview] = cview # list of changes that affect our source files self.p4changes = self.p4changes.keys() self.p4changes.sort(key=int) # list with depot pathnames, longest first vieworder = views.keys() vieworder.sort(key=len, reverse=True) # handle revision limiting startrev = self.ui.config('convert', 'p4.startrev', default=0) self.p4changes = [x for x in self.p4changes if ((not startrev or int(x) >= int(startrev)) and (not self.revs or int(x) <= int(self.revs[0])))] # now read the full changelists to get the list of file revisions ui.status(_('collecting p4 changelists\n')) lastid = None for change in self.p4changes: cmd = "p4 -G describe -s %s" % change stdout = util.popen(cmd, mode='rb') d = marshal.load(stdout) desc = self.recode(d.get("desc", "")) shortdesc = desc.split("\n", 1)[0] t = '%s %s' % (d["change"], repr(shortdesc)[1:-1]) ui.status(util.ellipsis(t, 80) + '\n') if lastid: parents = [lastid] else: parents = [] date = (int(d["time"]), 0) # timezone not set c = commit(author=self.recode(d["user"]), date=util.datestr(date, '%Y-%m-%d %H:%M:%S %1%2'), parents=parents, desc=desc, branch=None, extra={"p4": change}) files = [] copies = {} copiedfiles = [] i = 0 while ("depotFile%d" % i) in d and ("rev%d" % i) in d: oldname = d["depotFile%d" % i] filename = None for v in vieworder: if oldname.lower().startswith(v.lower()): filename = decodefilename(views[v] + oldname[len(v):]) break if filename: files.append((filename, d["rev%d" % i])) self.depotname[filename] = oldname if (d.get("action%d" % i) == "move/add"): copiedfiles.append(filename) self.localname[oldname] = filename i += 1 # Collect information about copied files for filename in copiedfiles: oldname = self.depotname[filename] flcmd = 'p4 -G filelog %s' \ % util.shellquote(oldname) flstdout = util.popen(flcmd, mode='rb') copiedfilename = None for d in loaditer(flstdout): copiedoldname = None i = 0 while ("change%d" % i) in d: if (d["change%d" % i] == change and d["action%d" % i] == "move/add"): j = 0 while ("file%d,%d" % (i, j)) in d: if d["how%d,%d" % (i, j)] == "moved from": copiedoldname = d["file%d,%d" % (i, j)] break j += 1 i += 1 if copiedoldname and copiedoldname in self.localname: copiedfilename = self.localname[copiedoldname] break if copiedfilename: copies[filename] = copiedfilename else: ui.warn(_("cannot find source for copied file: %s@%s\n") % (filename, change)) self.changeset[change] = c self.files[change] = files self.copies[change] = copies lastid = change if lastid: self.heads = [lastid]
def _docreatecmd(ui, repo, pats, opts): wctx = repo[None] parents = wctx.parents() if len(parents) > 1: raise error.Abort(_('cannot shelve while merging')) parent = parents[0] origbranch = wctx.branch() if parent.node() != nodemod.nullid: desc = "changes to: %s" % parent.description().split('\n', 1)[0] else: desc = '(changes in empty repository)' if not opts.get('message'): opts['message'] = desc lock = tr = activebookmark = None try: lock = repo.lock() # depending on whether shelve is traditional or # obsolescense-based, we either abort or commit this # transaction in the end. If we abort it, we don't # want to print anything to stderr report = None tr = repo.transaction('commit', report=report) interactive = opts.get('interactive', False) includeunknown = (opts.get('unknown', False) and not opts.get('addremove', False)) name = getshelvename(repo, parent, opts) activebookmark = _backupactivebookmark(repo) extra = {} if includeunknown: _includeunknownfiles(repo, pats, opts, extra) if _iswctxonnewbranch(repo) and not _isbareshelve(pats, opts): # In non-bare shelve we don't store newly created branch # at bundled commit repo.dirstate.setbranch(repo['.'].branch()) commitfunc = getcommitfunc(extra, interactive, editor=True) if not interactive: node = cmdutil.commit(ui, repo, commitfunc, pats, opts) else: node = cmdutil.dorecord(ui, repo, commitfunc, None, False, cmdutil.recordfilter, *pats, **opts) if not node: _nothingtoshelvemessaging(ui, repo, pats, opts) return 1 _shelvecreatedcommit(ui, repo, node, name, tr) if ui.formatted(): desc = util.ellipsis(desc, ui.termwidth()) ui.status(_('shelved as %s\n') % name) # current wc parent may be already obsolete becuase # it might have been created previously and shelve just # reuses it hg.update(repo.unfiltered(), parent.node()) if origbranch != repo['.'].branch() and not _isbareshelve(pats, opts): repo.dirstate.setbranch(origbranch) _finishshelve(ui, repo, tr, node, activebookmark) finally: _restoreactivebookmark(repo, activebookmark) lockmod.release(tr, lock)
else: reason = local._('lock held by %s') % inst.locker ui.warn(local._("abort: %s: %s\n") % (inst.desc or inst.filename, reason)) except error.LockUnavailable, inst: ui.warn(local._("abort: could not lock %s: %s\n") % (inst.desc or inst.filename, inst.strerror)) except error.RepoError, inst: ui.warn(local._("abort: %s!\n") % inst) except error.ResponseError, inst: ui.warn(local._("abort: %s") % inst.args[0]) if not isinstance(inst.args[1], basestring): ui.warn(" %r\n" % (inst.args[1],)) elif not inst.args[1]: ui.warn(local._(" empty string\n")) else: ui.warn("\n%r\n" % util.ellipsis(inst.args[1])) except error.RevlogError, inst: ui.warn(local._("abort: %s!\n") % inst) except socket.error, inst: ui.warn(local._("abort: %s!\n") % str(inst)) except IOError, inst: if hasattr(inst, "code"): ui.warn(local._("abort: %s\n") % inst) elif hasattr(inst, "reason"): try: # usually it is in the form (errno, strerror) reason = inst.reason.args[1] except: # it might be anything, for example a string reason = inst.reason ui.warn(local._("abort: error: %s\n") % reason) elif hasattr(inst, "args") and inst.args[0] == errno.EPIPE: if ui.debugflag:
def _docreatecmd(ui, repo, pats, opts): def mutableancestors(ctx): """return all mutable ancestors for ctx (included) Much faster than the revset ancestors(ctx) & draft()""" seen = set([nodemod.nullrev]) visit = collections.deque() visit.append(ctx) while visit: ctx = visit.popleft() yield ctx.node() for parent in ctx.parents(): rev = parent.rev() if rev not in seen: seen.add(rev) if parent.mutable(): visit.append(parent) wctx = repo[None] parents = wctx.parents() if len(parents) > 1: raise error.Abort(_('cannot shelve while merging')) parent = parents[0] origbranch = wctx.branch() # we never need the user, so we use a generic user for all shelve operations user = '******' label = repo._activebookmark or parent.branch() or 'default' # slashes aren't allowed in filenames, therefore we rename it label = label.replace('/', '_') def gennames(): yield label for i in xrange(1, 100): yield '%s-%02d' % (label, i) if parent.node() != nodemod.nullid: desc = "changes to: %s" % parent.description().split('\n', 1)[0] else: desc = '(changes in empty repository)' if not opts.get('message'): opts['message'] = desc name = opts.get('name') lock = tr = None try: lock = repo.lock() # use an uncommitted transaction to generate the bundle to avoid # pull races. ensure we don't print the abort message to stderr. tr = repo.transaction('commit', report=lambda x: None) if name: if shelvedfile(repo, name, 'hg').exists(): raise error.Abort( _("a shelved change named '%s' already exists") % name) else: for n in gennames(): if not shelvedfile(repo, n, 'hg').exists(): name = n break else: raise error.Abort( _("too many shelved changes named '%s'") % label) # ensure we are not creating a subdirectory or a hidden file if '/' in name or '\\' in name: raise error.Abort( _('shelved change names may not contain slashes')) if name.startswith('.'): raise error.Abort(_("shelved change names may not start with '.'")) interactive = opts.get('interactive', False) includeunknown = (opts.get('unknown', False) and not opts.get('addremove', False)) extra = {} if includeunknown: s = repo.status(match=scmutil.match(repo[None], pats, opts), unknown=True) if s.unknown: extra['shelve_unknown'] = '\0'.join(s.unknown) repo[None].add(s.unknown) if _iswctxonnewbranch(repo) and not _isbareshelve(pats, opts): # In non-bare shelve we don't store newly created branch # at bundled commit repo.dirstate.setbranch(repo['.'].branch()) def commitfunc(ui, repo, message, match, opts): hasmq = util.safehasattr(repo, 'mq') if hasmq: saved, repo.mq.checkapplied = repo.mq.checkapplied, False backup = repo.ui.backupconfig('phases', 'new-commit') try: repo.ui.setconfig('phases', 'new-commit', phases.secret) editor = cmdutil.getcommiteditor(editform='shelve.shelve', **opts) return repo.commit(message, user, opts.get('date'), match, editor=editor, extra=extra) finally: repo.ui.restoreconfig(backup) if hasmq: repo.mq.checkapplied = saved def interactivecommitfunc(ui, repo, *pats, **opts): match = scmutil.match(repo['.'], pats, {}) message = opts['message'] return commitfunc(ui, repo, message, match, opts) if not interactive: node = cmdutil.commit(ui, repo, commitfunc, pats, opts) else: node = cmdutil.dorecord(ui, repo, interactivecommitfunc, None, False, cmdutil.recordfilter, *pats, **opts) if not node: stat = repo.status(match=scmutil.match(repo[None], pats, opts)) if stat.deleted: ui.status( _("nothing changed (%d missing files, see " "'hg status')\n") % len(stat.deleted)) else: ui.status(_("nothing changed\n")) return 1 bases = list(mutableancestors(repo[node])) shelvedfile(repo, name, 'hg').writebundle(bases, node) cmdutil.export(repo, [node], fp=shelvedfile(repo, name, 'patch').opener('wb'), opts=mdiff.diffopts(git=True)) if ui.formatted(): desc = util.ellipsis(desc, ui.termwidth()) ui.status(_('shelved as %s\n') % name) hg.update(repo, parent.node()) if origbranch != repo['.'].branch() and not _isbareshelve(pats, opts): repo.dirstate.setbranch(origbranch) _aborttransaction(repo) finally: lockmod.release(tr, lock)