Beispiel #1
0
 def notify(self, bugs, committer):
     '''tell bugzilla to send mail.'''
     self.ui.status(_('telling bugzilla to send mail:\n'))
     (user, userid) = self.get_bugzilla_user(committer)
     for id in bugs.keys():
         self.ui.status(_('  bug %s\n') % id)
         cmdfmt = self.ui.config('bugzilla', 'notify', self.default_notify)
         bzdir = self.ui.config('bugzilla', 'bzdir')
         try:
             # Backwards-compatible with old notify string, which
             # took one string. This will throw with a new format
             # string.
             cmd = cmdfmt % id
         except TypeError:
             cmd = cmdfmt % {'bzdir': bzdir, 'id': id, 'user': user}
         self.ui.note(_('running notify command %s\n') % cmd)
         fp = procutil.popen('(%s) 2>&1' % cmd, 'rb')
         out = util.fromnativeeol(fp.read())
         ret = fp.close()
         if ret:
             self.ui.warn(out)
             raise error.Abort(
                 _('bugzilla notify command %s') %
                 procutil.explainexit(ret))
     self.ui.status(_('done\n'))
Beispiel #2
0
 def _parse_view(self, path):
     """Read changes affecting the path"""
     cmd = b'p4 -G changes -s submitted %s' % procutil.shellquote(path)
     stdout = procutil.popen(cmd, mode=b'rb')
     p4changes = {}
     for d in loaditer(stdout):
         c = d.get(b"change", None)
         if c:
             p4changes[c] = True
     return p4changes
Beispiel #3
0
 def _fetch_revision(self, rev):
     """Return an output of `p4 describe` including author, commit date as
     a dictionary."""
     cmd = b"p4 -G describe -s %s" % rev
     stdout = procutil.popen(cmd, mode=b'rb')
     return marshal.load(stdout)
Beispiel #4
0
    def getfile(self, name, rev):
        cmd = b'p4 -G print %s' % procutil.shellquote(
            b"%s#%s" % (self.depotname[name], rev)
        )

        lasterror = None
        while True:
            stdout = procutil.popen(cmd, mode=b'rb')

            mode = None
            contents = []
            keywords = None

            for d in loaditer(stdout):
                code = d[b"code"]
                data = d.get(b"data")

                if code == b"error":
                    # if this is the first time error happened
                    # re-attempt getting the file
                    if not lasterror:
                        lasterror = IOError(d[b"generic"], data)
                        # this will exit inner-most for-loop
                        break
                    else:
                        raise lasterror

                elif code == b"stat":
                    action = d.get(b"action")
                    if action in [b"purge", b"delete", b"move/delete"]:
                        return None, None
                    p4type = self.re_type.match(d[b"type"])
                    if p4type:
                        mode = b""
                        flags = (p4type.group(1) or b"") + (
                            p4type.group(3) or b""
                        )
                        if b"x" in flags:
                            mode = b"x"
                        if p4type.group(2) == b"symlink":
                            mode = b"l"
                        if b"ko" in flags:
                            keywords = self.re_keywords_old
                        elif b"k" in flags:
                            keywords = self.re_keywords

                elif code == b"text" or code == b"binary":
                    contents.append(data)

                lasterror = None

            if not lasterror:
                break

        if mode is None:
            return None, None

        contents = b''.join(contents)

        if keywords:
            contents = keywords.sub(b"$\\1$", contents)
        if mode == b"l" and contents.endswith(b"\n"):
            contents = contents[:-1]

        return contents, mode
Beispiel #5
0
    def _parse(self, ui, path):
        """Prepare list of P4 filenames and revisions to import"""
        p4changes = {}
        changeset = {}
        files_map = {}
        copies_map = {}
        localname = {}
        depotname = {}
        heads = []

        ui.status(_(b'reading p4 views\n'))

        # read client spec or view
        if b"/" in path:
            p4changes.update(self._parse_view(path))
            if path.startswith(b"//") and path.endswith(b"/..."):
                views = {path[:-3]: b""}
            else:
                views = {b"//": b""}
        else:
            cmd = b'p4 -G client -o %s' % procutil.shellquote(path)
            clientspec = marshal.load(procutil.popen(cmd, mode=b'rb'))

            views = {}
            for client in clientspec:
                if client.startswith(b"View"):
                    sview, cview = clientspec[client].split()
                    p4changes.update(self._parse_view(sview))
                    if sview.endswith(b"...") and cview.endswith(b"..."):
                        sview = sview[:-3]
                        cview = cview[:-3]
                    cview = cview[2:]
                    cview = cview[cview.find(b"/") + 1 :]
                    views[sview] = cview

        # list of changes that affect our source files
        p4changes = p4changes.keys()
        p4changes.sort(key=int)

        # list with depot pathnames, longest first
        vieworder = views.keys()
        vieworder.sort(key=len, reverse=True)

        # handle revision limiting
        startrev = self.ui.config(b'convert', b'p4.startrev')

        # now read the full changelists to get the list of file revisions
        ui.status(_(b'collecting p4 changelists\n'))
        lastid = None
        for change in p4changes:
            if startrev and int(change) < int(startrev):
                continue
            if self.revs and int(change) > int(self.revs[0]):
                continue
            if change in self.revmap:
                # Ignore already present revisions, but set the parent pointer.
                lastid = change
                continue

            if lastid:
                parents = [lastid]
            else:
                parents = []

            d = self._fetch_revision(change)
            c = self._construct_commit(d, parents)

            descarr = c.desc.splitlines(True)
            if len(descarr) > 0:
                shortdesc = descarr[0].rstrip(b'\r\n')
            else:
                shortdesc = b'**empty changelist description**'

            t = b'%s %s' % (c.rev, repr(shortdesc)[1:-1])
            ui.status(stringutil.ellipsis(t, 80) + b'\n')

            files = []
            copies = {}
            copiedfiles = []
            i = 0
            while (b"depotFile%d" % i) in d and (b"rev%d" % i) in d:
                oldname = d[b"depotFile%d" % i]
                filename = None
                for v in vieworder:
                    if oldname.lower().startswith(v.lower()):
                        filename = decodefilename(views[v] + oldname[len(v) :])
                        break
                if filename:
                    files.append((filename, d[b"rev%d" % i]))
                    depotname[filename] = oldname
                    if d.get(b"action%d" % i) == b"move/add":
                        copiedfiles.append(filename)
                    localname[oldname] = filename
                i += 1

            # Collect information about copied files
            for filename in copiedfiles:
                oldname = depotname[filename]

                flcmd = b'p4 -G filelog %s' % procutil.shellquote(oldname)
                flstdout = procutil.popen(flcmd, mode=b'rb')

                copiedfilename = None
                for d in loaditer(flstdout):
                    copiedoldname = None

                    i = 0
                    while (b"change%d" % i) in d:
                        if (
                            d[b"change%d" % i] == change
                            and d[b"action%d" % i] == b"move/add"
                        ):
                            j = 0
                            while (b"file%d,%d" % (i, j)) in d:
                                if d[b"how%d,%d" % (i, j)] == b"moved from":
                                    copiedoldname = d[b"file%d,%d" % (i, j)]
                                    break
                                j += 1
                        i += 1

                    if copiedoldname and copiedoldname in localname:
                        copiedfilename = localname[copiedoldname]
                        break

                if copiedfilename:
                    copies[filename] = copiedfilename
                else:
                    ui.warn(
                        _(b"cannot find source for copied file: %s@%s\n")
                        % (filename, change)
                    )

            changeset[change] = c
            files_map[change] = files
            copies_map[change] = copies
            lastid = change

        if lastid and len(changeset) > 0:
            heads = [lastid]

        return {
            b'changeset': changeset,
            b'files': files_map,
            b'copies': copies_map,
            b'heads': heads,
            b'depotname': depotname,
        }
Beispiel #6
0
def createlog(ui, directory=None, root="", rlog=True, cache=None):
    '''Collect the CVS rlog'''

    # Because we store many duplicate commit log messages, reusing strings
    # saves a lot of memory and pickle storage space.
    _scache = {}
    def scache(s):
        "return a shared version of a string"
        return _scache.setdefault(s, s)

    ui.status(_('collecting CVS rlog\n'))

    log = []      # list of logentry objects containing the CVS state

    # patterns to match in CVS (r)log output, by state of use
    re_00 = re.compile(b'RCS file: (.+)$')
    re_01 = re.compile(b'cvs \\[r?log aborted\\]: (.+)$')
    re_02 = re.compile(b'cvs (r?log|server): (.+)\n$')
    re_03 = re.compile(b"(Cannot access.+CVSROOT)|"
                       b"(can't create temporary directory.+)$")
    re_10 = re.compile(b'Working file: (.+)$')
    re_20 = re.compile(b'symbolic names:')
    re_30 = re.compile(b'\t(.+): ([\\d.]+)$')
    re_31 = re.compile(b'----------------------------$')
    re_32 = re.compile(b'======================================='
                       b'======================================$')
    re_50 = re.compile(b'revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
    re_60 = re.compile(br'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);'
                       br'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?'
                       br'(\s+commitid:\s+([^;]+);)?'
                       br'(.*mergepoint:\s+([^;]+);)?')
    re_70 = re.compile(b'branches: (.+);$')

    file_added_re = re.compile(br'file [^/]+ was (initially )?added on branch')

    prefix = ''   # leading path to strip of what we get from CVS

    if directory is None:
        # Current working directory

        # Get the real directory in the repository
        try:
            prefix = open(os.path.join('CVS','Repository'), 'rb').read().strip()
            directory = prefix
            if prefix == ".":
                prefix = ""
        except IOError:
            raise logerror(_('not a CVS sandbox'))

        if prefix and not prefix.endswith(pycompat.ossep):
            prefix += pycompat.ossep

        # Use the Root file in the sandbox, if it exists
        try:
            root = open(os.path.join('CVS','Root'), 'rb').read().strip()
        except IOError:
            pass

    if not root:
        root = encoding.environ.get('CVSROOT', '')

    # read log cache if one exists
    oldlog = []
    date = None

    if cache:
        cachedir = os.path.expanduser('~/.hg.cvsps')
        if not os.path.exists(cachedir):
            os.mkdir(cachedir)

        # The cvsps cache pickle needs a uniquified name, based on the
        # repository location. The address may have all sort of nasties
        # in it, slashes, colons and such. So here we take just the
        # alphanumeric characters, concatenated in a way that does not
        # mix up the various components, so that
        #    :pserver:user@server:/path
        # and
        #    /pserver/user/server/path
        # are mapped to different cache file names.
        cachefile = root.split(":") + [directory, "cache"]
        cachefile = ['-'.join(re.findall(br'\w+', s)) for s in cachefile if s]
        cachefile = os.path.join(cachedir,
                                 '.'.join([s for s in cachefile if s]))

    if cache == 'update':
        try:
            ui.note(_('reading cvs log cache %s\n') % cachefile)
            oldlog = pickle.load(open(cachefile, 'rb'))
            for e in oldlog:
                if not (util.safehasattr(e, 'branchpoints') and
                        util.safehasattr(e, 'commitid') and
                        util.safehasattr(e, 'mergepoint')):
                    ui.status(_('ignoring old cache\n'))
                    oldlog = []
                    break

            ui.note(_('cache has %d log entries\n') % len(oldlog))
        except Exception as e:
            ui.note(_('error reading cache: %r\n') % e)

        if oldlog:
            date = oldlog[-1].date    # last commit date as a (time,tz) tuple
            date = dateutil.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')

    # build the CVS commandline
    cmd = ['cvs', '-q']
    if root:
        cmd.append('-d%s' % root)
        p = util.normpath(getrepopath(root))
        if not p.endswith('/'):
            p += '/'
        if prefix:
            # looks like normpath replaces "" by "."
            prefix = p + util.normpath(prefix)
        else:
            prefix = p
    cmd.append(['log', 'rlog'][rlog])
    if date:
        # no space between option and date string
        cmd.append('-d>%s' % date)
    cmd.append(directory)

    # state machine begins here
    tags = {}     # dictionary of revisions on current file with their tags
    branchmap = {} # mapping between branch names and revision numbers
    rcsmap = {}
    state = 0
    store = False # set when a new record can be appended

    cmd = [procutil.shellquote(arg) for arg in cmd]
    ui.note(_("running %s\n") % (' '.join(cmd)))
    ui.debug("prefix=%r directory=%r root=%r\n" % (prefix, directory, root))

    pfp = procutil.popen(' '.join(cmd), 'rb')
    peek = util.fromnativeeol(pfp.readline())
    while True:
        line = peek
        if line == '':
            break
        peek = util.fromnativeeol(pfp.readline())
        if line.endswith('\n'):
            line = line[:-1]
        #ui.debug('state=%d line=%r\n' % (state, line))

        if state == 0:
            # initial state, consume input until we see 'RCS file'
            match = re_00.match(line)
            if match:
                rcs = match.group(1)
                tags = {}
                if rlog:
                    filename = util.normpath(rcs[:-2])
                    if filename.startswith(prefix):
                        filename = filename[len(prefix):]
                    if filename.startswith('/'):
                        filename = filename[1:]
                    if filename.startswith('Attic/'):
                        filename = filename[6:]
                    else:
                        filename = filename.replace('/Attic/', '/')
                    state = 2
                    continue
                state = 1
                continue
            match = re_01.match(line)
            if match:
                raise logerror(match.group(1))
            match = re_02.match(line)
            if match:
                raise logerror(match.group(2))
            if re_03.match(line):
                raise logerror(line)

        elif state == 1:
            # expect 'Working file' (only when using log instead of rlog)
            match = re_10.match(line)
            assert match, _('RCS file must be followed by working file')
            filename = util.normpath(match.group(1))
            state = 2

        elif state == 2:
            # expect 'symbolic names'
            if re_20.match(line):
                branchmap = {}
                state = 3

        elif state == 3:
            # read the symbolic names and store as tags
            match = re_30.match(line)
            if match:
                rev = [int(x) for x in match.group(2).split('.')]

                # Convert magic branch number to an odd-numbered one
                revn = len(rev)
                if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
                    rev = rev[:-2] + rev[-1:]
                rev = tuple(rev)

                if rev not in tags:
                    tags[rev] = []
                tags[rev].append(match.group(1))
                branchmap[match.group(1)] = match.group(2)

            elif re_31.match(line):
                state = 5
            elif re_32.match(line):
                state = 0

        elif state == 4:
            # expecting '------' separator before first revision
            if re_31.match(line):
                state = 5
            else:
                assert not re_32.match(line), _('must have at least '
                                                'some revisions')

        elif state == 5:
            # expecting revision number and possibly (ignored) lock indication
            # we create the logentry here from values stored in states 0 to 4,
            # as this state is re-entered for subsequent revisions of a file.
            match = re_50.match(line)
            assert match, _('expected revision number')
            e = logentry(rcs=scache(rcs),
                         file=scache(filename),
                         revision=tuple([int(x) for x in
                                         match.group(1).split('.')]),
                         branches=[],
                         parent=None,
                         commitid=None,
                         mergepoint=None,
                         branchpoints=set())

            state = 6

        elif state == 6:
            # expecting date, author, state, lines changed
            match = re_60.match(line)
            assert match, _('revision must be followed by date line')
            d = match.group(1)
            if d[2] == '/':
                # Y2K
                d = '19' + d

            if len(d.split()) != 3:
                # cvs log dates always in GMT
                d = d + ' UTC'
            e.date = dateutil.parsedate(d, ['%y/%m/%d %H:%M:%S',
                                        '%Y/%m/%d %H:%M:%S',
                                        '%Y-%m-%d %H:%M:%S'])
            e.author = scache(match.group(2))
            e.dead = match.group(3).lower() == 'dead'

            if match.group(5):
                if match.group(6):
                    e.lines = (int(match.group(5)), int(match.group(6)))
                else:
                    e.lines = (int(match.group(5)), 0)
            elif match.group(6):
                e.lines = (0, int(match.group(6)))
            else:
                e.lines = None

            if match.group(7): # cvs 1.12 commitid
                e.commitid = match.group(8)

            if match.group(9): # cvsnt mergepoint
                myrev = match.group(10).split('.')
                if len(myrev) == 2: # head
                    e.mergepoint = 'HEAD'
                else:
                    myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
                    branches = [b for b in branchmap if branchmap[b] == myrev]
                    assert len(branches) == 1, ('unknown branch: %s'
                                                % e.mergepoint)
                    e.mergepoint = branches[0]

            e.comment = []
            state = 7

        elif state == 7:
            # read the revision numbers of branches that start at this revision
            # or store the commit log message otherwise
            m = re_70.match(line)
            if m:
                e.branches = [tuple([int(y) for y in x.strip().split('.')])
                                for x in m.group(1).split(';')]
                state = 8
            elif re_31.match(line) and re_50.match(peek):
                state = 5
                store = True
            elif re_32.match(line):
                state = 0
                store = True
            else:
                e.comment.append(line)

        elif state == 8:
            # store commit log message
            if re_31.match(line):
                cpeek = peek
                if cpeek.endswith('\n'):
                    cpeek = cpeek[:-1]
                if re_50.match(cpeek):
                    state = 5
                    store = True
                else:
                    e.comment.append(line)
            elif re_32.match(line):
                state = 0
                store = True
            else:
                e.comment.append(line)

        # When a file is added on a branch B1, CVS creates a synthetic
        # dead trunk revision 1.1 so that the branch has a root.
        # Likewise, if you merge such a file to a later branch B2 (one
        # that already existed when the file was added on B1), CVS
        # creates a synthetic dead revision 1.1.x.1 on B2.  Don't drop
        # these revisions now, but mark them synthetic so
        # createchangeset() can take care of them.
        if (store and
              e.dead and
              e.revision[-1] == 1 and      # 1.1 or 1.1.x.1
              len(e.comment) == 1 and
              file_added_re.match(e.comment[0])):
            ui.debug('found synthetic revision in %s: %r\n'
                     % (e.rcs, e.comment[0]))
            e.synthetic = True

        if store:
            # clean up the results and save in the log.
            store = False
            e.tags = sorted([scache(x) for x in tags.get(e.revision, [])])
            e.comment = scache('\n'.join(e.comment))

            revn = len(e.revision)
            if revn > 3 and (revn % 2) == 0:
                e.branch = tags.get(e.revision[:-1], [None])[0]
            else:
                e.branch = None

            # find the branches starting from this revision
            branchpoints = set()
            for branch, revision in branchmap.iteritems():
                revparts = tuple([int(i) for i in revision.split('.')])
                if len(revparts) < 2: # bad tags
                    continue
                if revparts[-2] == 0 and revparts[-1] % 2 == 0:
                    # normal branch
                    if revparts[:-2] == e.revision:
                        branchpoints.add(branch)
                elif revparts == (1, 1, 1): # vendor branch
                    if revparts in e.branches:
                        branchpoints.add(branch)
            e.branchpoints = branchpoints

            log.append(e)

            rcsmap[e.rcs.replace('/Attic/', '/')] = e.rcs

            if len(log) % 100 == 0:
                ui.status(stringutil.ellipsis('%d %s' % (len(log), e.file), 80)
                          + '\n')

    log.sort(key=lambda x: (x.rcs, x.revision))

    # find parent revisions of individual files
    versions = {}
    for e in sorted(oldlog, key=lambda x: (x.rcs, x.revision)):
        rcs = e.rcs.replace('/Attic/', '/')
        if rcs in rcsmap:
            e.rcs = rcsmap[rcs]
        branch = e.revision[:-1]
        versions[(e.rcs, branch)] = e.revision

    for e in log:
        branch = e.revision[:-1]
        p = versions.get((e.rcs, branch), None)
        if p is None:
            p = e.revision[:-2]
        e.parent = p
        versions[(e.rcs, branch)] = e.revision

    # update the log cache
    if cache:
        if log:
            # join up the old and new logs
            log.sort(key=lambda x: x.date)

            if oldlog and oldlog[-1].date >= log[0].date:
                raise logerror(_('log cache overlaps with new log entries,'
                                 ' re-run without cache.'))

            log = oldlog + log

            # write the new cachefile
            ui.note(_('writing cvs log cache %s\n') % cachefile)
            pickle.dump(log, open(cachefile, 'wb'))
        else:
            log = oldlog

    ui.status(_('%d log entries\n') % len(log))

    encodings = ui.configlist('convert', 'cvsps.logencoding')
    if encodings:
        def revstr(r):
            # this is needed, because logentry.revision is a tuple of "int"
            # (e.g. (1, 2) for "1.2")
            return '.'.join(pycompat.maplist(pycompat.bytestr, r))

        for entry in log:
            comment = entry.comment
            for e in encodings:
                try:
                    entry.comment = comment.decode(
                        pycompat.sysstr(e)).encode('utf-8')
                    if ui.debugflag:
                        ui.debug("transcoding by %s: %s of %s\n" %
                                 (e, revstr(entry.revision), entry.file))
                    break
                except UnicodeDecodeError:
                    pass # try next encoding
                except LookupError as inst: # unknown encoding, maybe
                    raise error.Abort(inst,
                                      hint=_('check convert.cvsps.logencoding'
                                             ' configuration'))
            else:
                raise error.Abort(_("no encoding can transcode"
                                    " CVS log message for %s of %s")
                                  % (revstr(entry.revision), entry.file),
                                  hint=_('check convert.cvsps.logencoding'
                                         ' configuration'))

    hook.hook(ui, None, "cvslog", True, log=log)

    return log