コード例 #1
0
    def apply_patch(self,
                    patch_file,
                    base_path,
                    base_dir,
                    p=None,
                    revert=False):
        """Apply the patch and return a PatchResult indicating its success."""
        # Figure out the -p argument for patch. We override the calculated
        # value if it is supplied via a commandline option.
        p_num = p or self._get_p_number(base_path, base_dir)

        cmd = ['patch']

        if revert:
            cmd.append('-R')

        if p_num >= 0:
            cmd.append('-p%d' % p_num)

        cmd.extend(['-i', six.text_type(patch_file)])

        # Ignore return code 2 in case the patch file consists of only empty
        # files, which 'patch' can't handle. Other 'patch' errors also give
        # return code 2, so we must check the command output.
        rc, patch_output = execute(cmd,
                                   extra_ignore_errors=(2, ),
                                   return_error_code=True)
        only_garbage_in_patch = ('patch: **** Only garbage was found in the '
                                 'patch input.\n')

        if (patch_output and patch_output.startswith('patch: **** ')
                and patch_output != only_garbage_in_patch):
            raise SCMError('Failed to execute command: %s\n%s' %
                           (cmd, patch_output))

        # Check the patch for any added/deleted empty files to handle.
        if self.supports_empty_files():
            try:
                with open(patch_file, 'rb') as f:
                    patch = f.read()
            except IOError as e:
                logging.error('Unable to read file %s: %s', patch_file, e)
                return

            patched_empty_files = self.apply_patch_for_empty_files(
                patch, p_num, revert=revert)

            # If there are no empty files in a "garbage-only" patch, the patch
            # is probably malformed.
            if (patch_output == only_garbage_in_patch
                    and not patched_empty_files):
                raise SCMError('Failed to execute command: %s\n%s' %
                               (cmd, patch_output))

        # TODO: Should this take into account apply_patch_for_empty_files ?
        #       The return value of that function is False both when it fails
        #       and when there are no empty files.
        return PatchResult(applied=(rc == 0), patch_output=patch_output)
コード例 #2
0
ファイル: tfs.py プロジェクト: pombredanne/rbtools
    def diff(self, revisions, include_files, exclude_patterns):
        """Return the generated diff.

        Args:
            revisions (dict):
                A dictionary containing ``base`` and ``tip`` keys.

            include_files (list):
                A list of file paths to include in the diff.

            exclude_patterns (list):
                A list of file paths to exclude from the diff.

        Returns:
            dict:
            A dictionary containing the following keys:

            ``diff`` (:py:class:`bytes`):
                The contents of the diff to upload.

            ``base_commit_id` (:py:class:`unicode`, optional):
                The ID of the commit that the change is based on, if available.
                This is necessary for some hosting services that don't provide
                individual file access.
        """
        base = str(revisions['base'])
        tip = str(revisions['tip'])

        if tip == self.REVISION_WORKING_COPY:
            return self._diff_working_copy(base, include_files,
                                           exclude_patterns)
        else:
            raise SCMError('Posting committed changes is not yet supported '
                           'for TFS when using the Team Explorer Everywhere '
                           'wrapper.')
コード例 #3
0
ファイル: svn.py プロジェクト: beol/rbtools
    def _convert_symbolic_revision(self, revision):
        command = ['-r', six.text_type(revision), '-l', '1']

        if getattr(self.options, 'repository_url', None):
            command.append(self.options.repository_url)

        log = self.svn_log_xml(command)

        if log is not None:
            try:
                root = ElementTree.fromstring(log)
            except ValueError as e:
                # _convert_symbolic_revision() nominally raises a ValueError to
                # indicate any failure to determine the revision number from
                # the log entry.  Here, we explicitly catch a ValueError from
                # ElementTree and raise a generic SCMError so that this
                # specific failure to parse the XML log output is
                # differentiated from the nominal case.
                raise SCMError('Failed to parse svn log - %s.' % e)

            logentry = root.find('logentry')
            if logentry is not None:
                return int(logentry.attrib['revision'])

        raise ValueError
コード例 #4
0
    def diff(self, revisions, include_files, exclude_patterns):
        """Return the generated diff.

        Args:
            revisions (dict):
                A dictionary containing ``base`` and ``tip`` keys.

            include_files (list):
                A list of file paths to include in the diff.

            exclude_patterns (list):
                A list of file paths to exclude from the diff.

        Returns:
            dict:
            A dictionary containing ``diff``, ``parent_diff``, and
            ``base_commit_id`` keys. In the case of TFS, the parent diff key
            will always be ``None``.
        """
        base = str(revisions['base'])
        tip = str(revisions['tip'])

        if tip == self.REVISION_WORKING_COPY:
            return self._diff_working_copy(base, include_files,
                                           exclude_patterns)
        else:
            raise SCMError('Posting committed changes is not yet supported '
                           'for TFS when using the Team Explorer Everywhere '
                           'wrapper.')
コード例 #5
0
ファイル: tfs.py プロジェクト: pombredanne/rbtools
    def diff(self, revisions, include_files, exclude_patterns):
        """Return the generated diff.

        Args:
            revisions (dict):
                A dictionary containing ``base`` and ``tip`` keys.

            include_files (list):
                A list of file paths to include in the diff.

            exclude_patterns (list):
                A list of file paths to exclude from the diff.

        Returns:
            dict:
            A dictionary containing the following keys:

            ``diff`` (:py:class:`bytes`):
                The contents of the diff to upload.

            ``base_commit_id` (:py:class:`unicode`, optional):
                The ID of the commit that the change is based on, if available.
                This is necessary for some hosting services that don't provide
                individual file access.

        Raises:
            rbtools.clients.errors.SCMError:
                Something failed when creating the diff.
        """
        base = revisions['base']
        tip = revisions['tip']

        rc, diff, errors = self._run_helper(['diff', '--', base, tip],
                                            ignore_errors=True,
                                            results_unicode=False,
                                            log_output_on_error=False)

        if rc in (0, 2):
            if rc == 2:
                # Magic return code that means success, but there were
                # un-tracked files in the working directory.
                logging.warning('There are added or deleted files which have '
                                'not been added to TFS. These will not be '
                                'included in your review request.')

            return {
                'diff': diff,
                'parent_diff': None,
                'base_commit_id': None,
            }
        else:
            raise SCMError(errors.strip())
コード例 #6
0
    def diff(self, revisions, include_files, exclude_patterns):
        """Return the generated diff.

        Args:
            revisions (dict):
                A dictionary containing ``base`` and ``tip`` keys.

            include_files (list):
                A list of file paths to include in the diff.

            exclude_patterns (list):
                A list of file paths to exclude from the diff.

        Returns:
            dict:
            A dictionary containing ``diff``, ``parent_diff``, and
            ``base_commit_id`` keys. In the case of TFS, the parent diff key
            will always be ``None``.

        Raises:
            rbtools.clients.errors.SCMError:
                Something failed when creating the diff.
        """
        base = revisions['base']
        tip = revisions['tip']

        rc, diff, errors = self._run_helper(['diff', '--', base, tip],
                                            ignore_errors=True,
                                            results_unicode=False,
                                            log_output_on_error=False)

        if rc in (0, 2):
            if rc == 2:
                # Magic return code that means success, but there were
                # un-tracked files in the working directory.
                logging.warning('There are added or deleted files which have '
                                'not been added to TFS. These will not be '
                                'included in your review request.')

            return {
                'diff': diff,
                'parent_diff': None,
                'base_commit_id': None,
            }
        else:
            raise SCMError(errors.strip())
コード例 #7
0
ファイル: mercurial.py プロジェクト: totoroliu/rbtools
    def _get_remote_branch(self):
        """Return the remote branch assoicated with this repository.

        If the remote branch is not defined, the parent branch of the
        repository is returned.
        """
        remote = getattr(self.options, 'tracking', None)

        if not remote:
            try:
                remote = self._remote_path[0]
            except IndexError:
                remote = None

        if not remote:
            raise SCMError('Could not determine remote branch to use for '
                           'diff creation. Specify --tracking-branch to '
                           'continue.')

        return remote
コード例 #8
0
ファイル: svn.py プロジェクト: pbwkoswara/rbtools
    def get_raw_commit_message(self, revisions):
        """Extracts the commit message based on the provided revision range."""
        """Only takes the tip revision at the moment"""
        command = ['-r', six.text_type(revisions['tip']), '-l', '1']
        log = self.svn_log_xml(command)

        if log is not None:
            try:
                root = ElementTree.fromstring(log)
            except ValueError as e:
                # _convert_symbolic_revision() nominally raises a ValueError to
                # indicate any failure to determine the revision number from
                # the log entry.  Here, we explicitly catch a ValueError from
                # ElementTree and raise a generic SCMError so that this
                # specific failure to parse the XML log output is
                # differentiated from the nominal case.
                raise SCMError('Failed to parse svn log - %s.' % e)

            logentry = root.find('logentry')
            if logentry is not None:
                return logentry.find('msg').text

        raise ValueError
コード例 #9
0
    def get_raw_commit_message(self, revisions):
        """Return the raw commit message(s) for the given revisions.

        Args:
            revisions (dict):
                Revisions to get the commit messages for. This will contain
                ``tip`` and ``base`` keys.

        Returns:
            unicode:
            The commit messages for all the requested revisions.
        """
        base = six.text_type(revisions['base'])
        tip = six.text_type(revisions['tip'])

        if (tip == SVNClient.REVISION_WORKING_COPY
                or tip.startswith(SVNClient.REVISION_CHANGELIST_PREFIX)):
            return ''

        command = ['-r', '%s:%s' % (base, tip)]

        if getattr(self.options, 'repository_url', None):
            command.append(self.options.repository_url)

        log = self.svn_log_xml(command)

        try:
            root = ElementTree.fromstring(log)
        except ValueError as e:
            raise SCMError('Failed to parse svn log: %s' % e)

        # We skip the first commit message, because we want commit messages
        # corresponding to the changes that will be included in the diff.
        messages = root.findall('.//msg')[1:]

        return '\n\n'.join(message.text for message in messages)
コード例 #10
0
ファイル: git.py プロジェクト: fangwentong/rbtools
    def get_repository_info(self):
        """Get repository information for the current Git working tree.

        Returns:
            rbtools.clients.RepositoryInfo:
            The repository info structure.
        """
        # Temporarily reset the toplevel. This is necessary for making things
        # work correctly in unit tests where we may be moving the cwd around a
        # lot.
        self._git_toplevel = None

        if not check_install(['git', '--help']):
            # CreateProcess (launched via subprocess, used by check_install)
            # does not automatically append .cmd for things it finds in PATH.
            # If we're on Windows, and this works, save it for further use.
            if (sys.platform.startswith('win') and
                check_install(['git.cmd', '--help'])):
                self.git = 'git.cmd'
            else:
                logging.debug('Unable to execute "git --help" or "git.cmd '
                              '--help": skipping Git')
                return None

        git_dir = self._execute([self.git, 'rev-parse', '--git-dir'],
                                ignore_errors=True).rstrip('\n')

        if git_dir.startswith('fatal:') or not os.path.isdir(git_dir):
            return None

        # Sometimes core.bare is not set, and generates an error, so ignore
        # errors. Valid values are 'true' or '1'.
        bare = execute([self.git, 'config', 'core.bare'],
                       ignore_errors=True).strip()
        self.bare = bare in ('true', '1')

        # Running in directories other than the top level of
        # of a work-tree would result in broken diffs on the server
        if not self.bare:
            git_top = execute([self.git, 'rev-parse', '--show-toplevel'],
                              ignore_errors=True).rstrip('\n')

            # Top level might not work on old git version se we use git dir
            # to find it.
            if (git_top.startswith(('fatal:', 'cygdrive')) or
                not os.path.isdir(git_dir)):
                git_top = git_dir

            self._git_toplevel = os.path.abspath(git_top)

        self._head_ref = self._execute(
            [self.git, 'symbolic-ref', '-q', 'HEAD'],
            ignore_errors=True).strip()

        # We know we have something we can work with. Let's find out
        # what it is. We'll try SVN first, but only if there's a .git/svn
        # directory. Otherwise, it may attempt to create one and scan
        # revisions, which can be slow. Also skip SVN detection if the git
        # repository was specified on command line.
        git_svn_dir = os.path.join(git_dir, 'svn')

        if (not getattr(self.options, 'repository_url', None) and
            os.path.isdir(git_svn_dir) and
            len(os.listdir(git_svn_dir)) > 0):
            data = self._execute([self.git, 'svn', 'info'], ignore_errors=True)

            m = re.search(r'^Repository Root: (.+)$', data, re.M)

            if m:
                path = m.group(1)
                m = re.search(r'^URL: (.+)$', data, re.M)

                if m:
                    base_path = m.group(1)[len(path):] or '/'
                    m = re.search(r'^Repository UUID: (.+)$', data, re.M)

                    if m:
                        uuid = m.group(1)
                        self._type = self.TYPE_GIT_SVN

                        m = re.search(r'Working Copy Root Path: (.+)$', data,
                                      re.M)

                        if m:
                            local_path = m.group(1)
                        else:
                            local_path = self._git_toplevel

                        return SVNRepositoryInfo(path=path,
                                                 base_path=base_path,
                                                 local_path=local_path,
                                                 uuid=uuid,
                                                 supports_parent_diffs=True)
            else:
                # Versions of git-svn before 1.5.4 don't (appear to) support
                # 'git svn info'.  If we fail because of an older git install,
                # here, figure out what version of git is installed and give
                # the user a hint about what to do next.
                version = self._execute([self.git, 'svn', '--version'],
                                        ignore_errors=True)
                version_parts = re.search('version (\d+)\.(\d+)\.(\d+)',
                                          version)
                svn_remote = self._execute(
                    [self.git, 'config', '--get', 'svn-remote.svn.url'],
                    ignore_errors=True)

                if (version_parts and svn_remote and
                    not is_valid_version((int(version_parts.group(1)),
                                          int(version_parts.group(2)),
                                          int(version_parts.group(3))),
                                         (1, 5, 4))):
                    raise SCMError('Your installation of git-svn must be '
                                   'upgraded to version 1.5.4 or later.')

        # Okay, maybe Perforce (git-p4).
        git_p4_ref = os.path.join(git_dir, 'refs', 'remotes', 'p4', 'master')
        if os.path.exists(git_p4_ref):
            data = self._execute([self.git, 'config', '--get', 'git-p4.port'],
                                 ignore_errors=True)
            m = re.search(r'(.+)', data)
            if m:
                port = m.group(1)
            else:
                port = os.getenv('P4PORT')

            if port:
                self._type = self.TYPE_GIT_P4
                return RepositoryInfo(path=port,
                                      base_path='',
                                      local_path=self._git_toplevel,
                                      supports_parent_diffs=True)

        # Nope, it's git then.
        # Check for a tracking branch and determine merge-base
        self._type = self.TYPE_GIT
        url = None

        if getattr(self.options, 'repository_url', None):
            url = self.options.repository_url
        else:
            upstream_branch = self._get_parent_branch()
            url = self._get_origin(upstream_branch).rstrip('/')

            if url.startswith('fatal:'):
                raise SCMError('Could not determine remote URL for upstream '
                               'branch %s' % upstream_branch)

            # Central bare repositories don't have origin URLs.
            # We return git_dir instead and hope for the best.
            if not url:
                url = os.path.abspath(git_dir)

        if url:
            return RepositoryInfo(path=url,
                                  base_path='',
                                  local_path=self._git_toplevel,
                                  supports_parent_diffs=True)
        return None
コード例 #11
0
    def _process_diffs(self, my_diff_entries):
        # Diff generation based on perforce client
        diff_lines = []

        empty_filename = make_tempfile()
        tmp_diff_from_filename = make_tempfile()
        tmp_diff_to_filename = make_tempfile()

        for f in my_diff_entries:
            f = f.strip()

            if not f:
                continue

            m = re.search(
                r'(?P<type>[ACMD]) (?P<file>.*) '
                r'(?P<revspec>rev:revid:[-\d]+) '
                r'(?P<parentrevspec>rev:revid:[-\d]+) '
                r'src:(?P<srcpath>.*) '
                r'dst:(?P<dstpath>.*)$', f)
            if not m:
                raise SCMError('Could not parse "cm log" response: %s' % f)

            changetype = m.group("type")
            filename = m.group("file")

            if changetype == "M":
                # Handle moved files as a delete followed by an add.
                # Clunky, but at least it works
                oldfilename = m.group("srcpath")
                oldspec = m.group("revspec")
                newfilename = m.group("dstpath")
                newspec = m.group("revspec")

                self._write_file(oldfilename, oldspec, tmp_diff_from_filename)
                dl = self._diff_files(tmp_diff_from_filename, empty_filename,
                                      oldfilename, "rev:revid:-1", oldspec,
                                      changetype)
                diff_lines += dl

                self._write_file(newfilename, newspec, tmp_diff_to_filename)
                dl = self._diff_files(empty_filename, tmp_diff_to_filename,
                                      newfilename, newspec, "rev:revid:-1",
                                      changetype)
                diff_lines += dl

            else:
                newrevspec = m.group("revspec")
                parentrevspec = m.group("parentrevspec")

                logging.debug(
                    "Type %s File %s Old %s New %s" %
                    (changetype, filename, parentrevspec, newrevspec))

                old_file = new_file = empty_filename

                if (changetype in ['A'] or
                    (changetype in ['C'] and parentrevspec == "rev:revid:-1")):
                    # There's only one content to show
                    self._write_file(filename, newrevspec,
                                     tmp_diff_to_filename)
                    new_file = tmp_diff_to_filename
                elif changetype in ['C']:
                    self._write_file(filename, parentrevspec,
                                     tmp_diff_from_filename)
                    old_file = tmp_diff_from_filename
                    self._write_file(filename, newrevspec,
                                     tmp_diff_to_filename)
                    new_file = tmp_diff_to_filename
                elif changetype in ['D']:
                    self._write_file(filename, parentrevspec,
                                     tmp_diff_from_filename)
                    old_file = tmp_diff_from_filename
                else:
                    raise SCMError("Don't know how to handle change type "
                                   "'%s' for %s" % (changetype, filename))

                dl = self._diff_files(old_file, new_file, filename, newrevspec,
                                      parentrevspec, changetype)
                diff_lines += dl

        os.unlink(empty_filename)
        os.unlink(tmp_diff_from_filename)
        os.unlink(tmp_diff_to_filename)

        return ''.join(diff_lines)
コード例 #12
0
    def _process_diffs(self, diff_entries):
        """Process the given diff entries.

        Args:
            diff_entries (list):
                The list of diff entries.

        Returns:
            bytes:
            The processed diffs.
        """
        diff_lines = []

        empty_filename = make_tempfile()
        tmp_diff_from_filename = make_tempfile()
        tmp_diff_to_filename = make_tempfile()

        for f in diff_entries:
            f = f.strip()

            if not f:
                continue

            m = re.search(
                br'(?P<type>[ACMD]) (?P<file>.*) '
                br'(?P<revspec>rev:revid:[-\d]+) '
                br'(?P<parentrevspec>rev:revid:[-\d]+) '
                br'src:(?P<srcpath>.*) '
                br'dst:(?P<dstpath>.*)$', f)
            if not m:
                raise SCMError('Could not parse "cm log" response: %s' % f)

            changetype = m.group('type')
            filename = m.group('file')

            if changetype == b'M':
                # Handle moved files as a delete followed by an add.
                # Clunky, but at least it works
                oldfilename = m.group('srcpath')
                oldspec = m.group('revspec')
                newfilename = m.group('dstpath')
                newspec = m.group('revspec')

                self._write_file(oldfilename, oldspec, tmp_diff_from_filename)
                dl = self._diff_files(tmp_diff_from_filename, empty_filename,
                                      oldfilename, 'rev:revid:-1', oldspec,
                                      changetype)
                diff_lines += dl

                self._write_file(newfilename, newspec, tmp_diff_to_filename)
                dl = self._diff_files(empty_filename, tmp_diff_to_filename,
                                      newfilename, newspec, 'rev:revid:-1',
                                      changetype)
                diff_lines += dl

            else:
                newrevspec = m.group('revspec')
                parentrevspec = m.group('parentrevspec')

                logging.debug('Type %s File %s Old %s New %s', changetype,
                              filename, parentrevspec, newrevspec)

                old_file = new_file = empty_filename

                if (changetype in [b'A']
                        or (changetype in [b'C']
                            and parentrevspec == b'rev:revid:-1')):
                    # There's only one content to show
                    self._write_file(filename, newrevspec,
                                     tmp_diff_to_filename)
                    new_file = tmp_diff_to_filename
                elif changetype in [b'C']:
                    self._write_file(filename, parentrevspec,
                                     tmp_diff_from_filename)
                    old_file = tmp_diff_from_filename
                    self._write_file(filename, newrevspec,
                                     tmp_diff_to_filename)
                    new_file = tmp_diff_to_filename
                elif changetype in [b'D']:
                    self._write_file(filename, parentrevspec,
                                     tmp_diff_from_filename)
                    old_file = tmp_diff_from_filename
                else:
                    raise SCMError('Unknown change type "%s" for %s' %
                                   (changetype, filename))

                dl = self._diff_files(old_file, new_file, filename, newrevspec,
                                      parentrevspec, changetype)
                diff_lines += dl

        os.unlink(empty_filename)
        os.unlink(tmp_diff_from_filename)
        os.unlink(tmp_diff_to_filename)

        return b''.join(diff_lines)
コード例 #13
0
ファイル: git.py プロジェクト: LatticeEngines/rbtools
    def get_commit_history(self, revisions):
        """Return the commit history specified by the revisions.

        Args:
            revisions (dict):
                A dictionary of revisions to generate history for, as returned
                by :py:meth:`parse_revision_spec`.

        Returns:
            list of dict:
            The list of history entries, in order. The dictionaries have the
            following keys:

            ``commit_id``:
                The unique identifier of the commit.

            ``parent_id``:
                The unique identifier of the parent commit.

            ``author_name``:
                The name of the commit's author.

            ``author_email``:
                The e-mail address of the commit's author.

            ``author_date``:
                The date the commit was authored.

            ``committer_name``:
                The committer's name.

            ``committer_email``:
                The e-mail address of the committer.

            ``committer_date``:
                The date the commit was committed.

            ``commit_message``:
                The commit's message.

        Raises:
            rbtools.clients.errors.SCMError:
                The history is non-linear or there is a commit with no parents.
        """
        log_fields = {
            'commit_id': b'%H',
            'parent_id': b'%P',
            'author_name': b'%an',
            'author_email': b'%ae',
            'author_date': b'%ad',
            'committer_name': b'%cn',
            'committer_email': b'%ce',
            'committer_date': b'%cd',
            'commit_message': b'%B',
        }

        # 0x1f is the ASCII field separator. It is a non-printable character
        # that should not appear in any field in `git log`.
        log_format = b'%x1f'.join(six.itervalues(log_fields))

        log_entries = execute([
            self.git,
            b'log',
            b'-z',
            b'--reverse',
            b'--pretty=format:%s' % log_format,
            b'--date=iso8601-strict',
            b'%s..%s' % (bytes(revisions['base']), bytes(revisions['tip'])),
        ],
                              ignore_errors=True,
                              none_on_ignored_error=True,
                              results_unicode=True)

        if not log_entries:
            return None

        history = []
        field_names = six.viewkeys(log_fields)

        for log_entry in log_entries.split(self._NUL):
            fields = log_entry.split(self._FIELD_SEP)
            entry = dict(zip(field_names, fields))

            parents = entry['parent_id'].split()

            if len(parents) > 1:
                raise SCMError(
                    'The Git SCMClient only supports posting commit histories '
                    'that are entirely linear.')
            elif len(parents) == 0:
                raise SCMError(
                    'The Git SCMClient only supports posting commits that '
                    'have exactly one parent.')

            history.append(entry)

        return history
コード例 #14
0
ファイル: clearcase.py プロジェクト: solarmist/rbtools
    def get_repository_info(self):
        """Return information on the Clear Case repository.

        This will first check if the cleartool command is installed and in the
        path, and that the current working directory is inside of the view.
        """
        if not check_install(['cleartool', 'help']):
            logging.debug('Unable to execute "cleartool help": skipping '
                          'ClearCase')
            return None

        viewname = execute(['cleartool', 'pwv', '-short']).strip()
        if viewname.startswith('** NONE'):
            return None

        # Now that we know it's ClearCase, make sure we have GNU diff
        # installed, and error out if we don't.
        check_gnu_diff()

        property_lines = execute(
            ['cleartool', 'lsview', '-full', '-properties', '-cview'],
            split_lines=True)
        for line in property_lines:
            properties = line.split(' ')
            if properties[0] == 'Properties:':
                # Determine the view type and check if it's supported.
                #
                # Specifically check if webview was listed in properties
                # because webview types also list the 'snapshot'
                # entry in properties.
                if 'webview' in properties:
                    raise SCMError('Webviews are not supported. You can use '
                                   'rbt commands only in dynamic or snapshot '
                                   'views.')
                if 'dynamic' in properties:
                    self.viewtype = 'dynamic'
                else:
                    self.viewtype = 'snapshot'

                break

        # Find current VOB's tag
        vobstag = execute(['cleartool', 'describe', '-short', 'vob:.'],
                          ignore_errors=True).strip()
        if 'Error: ' in vobstag:
            raise SCMError('Failed to generate diff run rbt inside vob.')

        root_path = execute(['cleartool', 'pwv', '-root'],
                            ignore_errors=True).strip()
        if 'Error: ' in root_path:
            raise SCMError('Failed to generate diff run rbt inside view.')

        # From current working directory cut path to VOB. On Windows
        # and under cygwin, the VOB tag contains the VOB's path including
        # name, e.g. `\new_proj` for a VOB `new_proj` mounted at the root
        # of a drive. On Unix, the VOB tag is similar, but with a different
        # path separator, e.g. `/vobs/new_proj` for our new_proj VOB mounted
        # at `/vobs`.
        cwd = os.getcwd()
        base_path = cwd[:len(root_path) + len(vobstag)]

        return ClearCaseRepositoryInfo(path=base_path,
                                       base_path=base_path,
                                       vobstag=vobstag,
                                       supports_parent_diffs=False)
コード例 #15
0
ファイル: clearcase.py プロジェクト: solarmist/rbtools
    def find_server_repository_info(self, server):
        """
        The point of this function is to find a repository on the server that
        matches self, even if the paths aren't the same. (For example, if self
        uses an 'http' path, but the server uses a 'file' path for the same
        repository.) It does this by comparing the VOB's name and uuid. If the
        repositories use the same path, you'll get back self, otherwise you'll
        get a different ClearCaseRepositoryInfo object (with a different path).
        """

        # Find VOB's family uuid based on VOB's tag
        uuid = self._get_vobs_uuid(self.vobstag)
        logging.debug('Repository vobstag %s uuid is %r', self.vobstag, uuid)

        # To reduce HTTP requests (_get_repository_info calls), we build an
        # ordered list of ClearCase repositories starting with the ones that
        # have a similar vobstag.
        repository_scan_order = deque()

        # Because the VOB tag is platform-specific, we split and search
        # for the remote name in any sub-part so this HTTP request
        # optimization can work for users on both Windows and Unix-like
        # platforms.
        vob_tag_parts = self.vobstag.split(cpath.sep)

        # Reduce list of repositories to only ClearCase ones and sort them by
        # repo name matching vobstag (or some part of the vobstag) first.
        for repository in server.get_repositories(tool='ClearCase').all_items:
            # Ignore non-ClearCase repositories.
            if repository['tool'] != 'ClearCase':
                continue

            repo_name = repository['name']

            # Repositories with a similar VOB tag get put at the beginning and
            # the others at the end.
            if repo_name == self.vobstag or repo_name in vob_tag_parts:
                repository_scan_order.appendleft(repository)
            else:
                repository_scan_order.append(repository)

        # Now try to find a matching uuid
        for repository in repository_scan_order:
            repo_name = repository['name']
            try:
                info = repository.get_info()
            except APIError as e:
                # If the current repository is not publicly accessible and the
                # current user has no explicit access to it, the server will
                # return error_code 101 and http_status 403.
                if not (e.error_code == 101 and e.http_status == 403):
                    # We can safely ignore this repository unless the VOB tag
                    # matches.
                    if repo_name == self.vobstag:
                        raise SCMError('You do not have permission to access '
                                       'this repository.')

                    continue
                else:
                    # Bubble up any other errors
                    raise e

            if not info or uuid != info['uuid']:
                continue

            path = info['repopath']
            logging.debug('Matching repository uuid:%s with path:%s', uuid,
                          path)
            return ClearCaseRepositoryInfo(path=path,
                                           base_path=path,
                                           vobstag=self.vobstag)

        # We didn't found uuid but if version is >= 1.5.3
        # we can try to use VOB's name hoping it is better
        # than current VOB's path.
        if parse_version(server.rb_version) >= parse_version('1.5.3'):
            self.path = cpath.split(self.vobstag)[1]

        # We didn't find a matching repository on the server.
        # We'll just return self and hope for the best.
        return self
コード例 #16
0
ファイル: clearcase.py プロジェクト: solarmist/rbtools
    def _get_label_changeset(self, labels):
        """Returns information about the versions changed between labels.

        This takes into account the changes done between labels and restrict
        analysis to current working directory. A ClearCase label belongs to a
        uniq vob.
        """
        changeset = []
        tmp_labels = []

        # Initialize comparison_path to current working directory.
        # TODO: support another argument to manage a different comparison path.
        comparison_path = os.getcwd()

        error_message = None
        try:
            # Unless user has provided 2 labels, set a temporary label on
            # current version seen of comparison_path directory. It will be
            # used to process changeset.
            # Indeed ClearCase can identify easily each file and associated
            # version belonging to a label.
            if len(labels) == 1:
                tmp_lb = self._get_tmp_label()
                tmp_labels.append(tmp_lb)
                self._set_label(tmp_lb, comparison_path)
                labels.append(tmp_lb)

            label_count = len(labels)
            if label_count != 2:
                raise Exception(
                    'ClearCase label comparison does not support %d labels' %
                    label_count)

            # Now we get 2 labels for comparison, check if they are both valid.
            repository_info = self.get_repository_info()
            for label in labels:
                if not self._is_a_label(label, repository_info.vobstag):
                    raise Exception('ClearCase label %s is not a valid label' %
                                    label)

            previous_label, current_label = labels
            logging.debug('Comparison between labels %s and %s on %s',
                          previous_label, current_label, comparison_path)

            # List ClearCase element path and version belonging to previous and
            # current labels, element path is the key of each dict.
            previous_elements = {}
            current_elements = {}
            previous_label_elements_thread = get_elements_from_label_thread(
                1, comparison_path, previous_label, previous_elements)
            previous_label_elements_thread.start()

            current_label_elements_thread = get_elements_from_label_thread(
                2, comparison_path, current_label, current_elements)
            current_label_elements_thread.start()

            previous_label_elements_thread.join()
            current_label_elements_thread.join()

            seen = []
            changelist = {}
            # Iterate on each ClearCase path in order to find respective
            # previous and current version.
            for path in itertools.chain(previous_elements.keys(),
                                        current_elements.keys()):
                if path in seen:
                    continue
                seen.append(path)

                # Initialize previous and current version to '/main/0'
                changelist[path] = {
                    'previous': '/main/0',
                    'current': '/main/0',
                }

                if path in current_elements:
                    changelist[path]['current'] = \
                        current_elements[path]['version']
                if path in previous_elements:
                    changelist[path]['previous'] = \
                        previous_elements[path]['version']
                logging.debug('path: %s\nprevious: %s\ncurrent:  %s\n', path,
                              changelist[path]['previous'],
                              changelist[path]['current'])

                # Prevent adding identical version to comparison.
                if changelist[path]['current'] == changelist[path]['previous']:
                    continue
                changeset.append((self._construct_extended_path(
                    path, changelist[path]['previous']),
                                  self._construct_extended_path(
                                      path, changelist[path]['current'])))

        except Exception as e:
            error_message = str(e)

        finally:
            # Delete all temporary labels.
            for lb in tmp_labels:
                if self._is_a_label(lb):
                    self._remove_label(lb)
            if error_message:
                raise SCMError('Label comparison failed:\n%s' % error_message)

        return changeset
コード例 #17
0
ファイル: clearcase.py プロジェクト: solarmist/rbtools
    def _sanitize_activity_changeset(self, changeset):
        """Return changeset containing non-binary, branched file versions.

        A UCM activity changeset contains all file revisions created/touched
        during this activity. File revisions are ordered earlier versions first
        in the format:
        changelist = [
        <path>@@<branch_path>/<version_number>, ...,
        <path>@@<branch_path>/<version_number>
        ]

        <path> is relative path to file
        <branch_path> is clearcase specific branch path to file revision
        <version number> is the version number of the file in <branch_path>.

        A UCM activity changeset can contain changes from different vobs,
        however reviewboard supports only changes from a single repo at the
        same time, so changes made outside of the current vobstag will be
        ignored.
        """
        changelist = {}
        # Maybe we should be able to access repository_info without calling
        # cleartool again.
        repository_info = self.get_repository_info()

        for change in changeset:
            path, current = change.split('@@')

            # If a file isn't in the correct vob, then ignore it.
            if path.find('%s/' % (repository_info.vobstag, )) == -1:
                logging.debug('Vobstag does not match, ignoring changes on %s',
                              path)
                continue

            version_number = self._determine_version(current)
            if path not in changelist:
                changelist[path] = {
                    'highest': version_number,
                    'lowest': version_number,
                    'current': current,
                }

            if version_number == 0:
                raise SCMError('Unexepected version_number=0 in activity '
                               'changeset')
            elif version_number > changelist[path]['highest']:
                changelist[path]['highest'] = version_number
                changelist[path]['current'] = current
            elif version_number < changelist[path]['lowest']:
                changelist[path]['lowest'] = version_number

        # Convert to list
        changeranges = []
        for path, version in six.iteritems(changelist):
            # Previous version is predecessor of lowest ie its version number
            # decreased by 1.
            branch_path = self._determine_branch_path(version['current'])
            prev_version_number = str(int(version['lowest']) - 1)
            version['previous'] = self._construct_revision(
                branch_path, prev_version_number)
            changeranges.append(
                (self._construct_extended_path(path, version['previous']),
                 self._construct_extended_path(path, version['current'])))

        return changeranges
コード例 #18
0
ファイル: clearcase.py プロジェクト: solarmist/rbtools
    def parse_revision_spec(self, revisions):
        """Parses the given revision spec.

        The 'revisions' argument is a list of revisions as specified by the
        user. Items in the list do not necessarily represent a single revision,
        since the user can use SCM-native syntaxes such as "r1..r2" or "r1:r2".
        SCMTool-specific overrides of this method are expected to deal with
        such syntaxes.

        This will return a dictionary with the following keys:
            'base':        A revision to use as the base of the resulting diff.
            'tip':         A revision to use as the tip of the resulting diff.

        These will be used to generate the diffs to upload to Review Board (or
        print).

        There are many different ways to generate diffs for clearcase, because
        there are so many different workflows. This method serves more as a way
        to validate the passed-in arguments than actually parsing them in the
        way that other clients do.
        """
        n_revs = len(revisions)

        if n_revs == 0:
            return {
                'base': self.REVISION_CHECKEDOUT_BASE,
                'tip': self.REVISION_CHECKEDOUT_CHANGESET,
            }
        elif n_revs == 1:
            if revisions[0].startswith(self.REVISION_ACTIVITY_PREFIX):
                return {
                    'base': self.REVISION_ACTIVITY_BASE,
                    'tip': revisions[0][len(self.REVISION_ACTIVITY_PREFIX):],
                }
            if revisions[0].startswith(self.REVISION_BRANCH_PREFIX):
                return {
                    'base': self.REVISION_BRANCH_BASE,
                    'tip': revisions[0][len(self.REVISION_BRANCH_PREFIX):],
                }
            if revisions[0].startswith(self.REVISION_LABEL_PREFIX):
                return {
                    'base': self.REVISION_LABEL_BASE,
                    'tip': [revisions[0][len(self.REVISION_BRANCH_PREFIX):]],
                }
            # TODO:
            # stream:streamname[@pvob] => review changes in this UCM stream
            #                             (UCM "branch")
            # baseline:baseline[@pvob] => review changes between this baseline
            #                             and the working directory
        elif n_revs == 2:
            if self.viewtype != 'dynamic':
                raise SCMError('To generate a diff using multiple revisions, '
                               'you must use a dynamic view.')

            if (revisions[0].startswith(self.REVISION_LABEL_PREFIX)
                    and revisions[1].startswith(self.REVISION_LABEL_PREFIX)):
                return {
                    'base':
                    self.REVISION_LABEL_BASE,
                    'tip':
                    [x[len(self.REVISION_BRANCH_PREFIX):] for x in revisions],
                }
            # TODO:
            # baseline:baseline1[@pvob] baseline:baseline2[@pvob]
            #                             => review changes between these two
            #                                baselines
            pass

        pairs = []
        for r in revisions:
            p = r.split(':')
            if len(p) != 2:
                raise InvalidRevisionSpecError(
                    '"%s" is not a valid file@revision pair' % r)
            pairs.append(p)

        return {
            'base': self.REVISION_FILES,
            'tip': pairs,
        }
コード例 #19
0
    def get_repository_info(self):
        """Get repository information for the current Git working tree.

        This function changes the directory to the top level directory of the
        current working tree.
        """
        if not check_install(['git', '--help']):
            # CreateProcess (launched via subprocess, used by check_install)
            # does not automatically append .cmd for things it finds in PATH.
            # If we're on Windows, and this works, save it for further use.
            if (sys.platform.startswith('win') and
                check_install(['git.cmd', '--help'])):
                self.git = 'git.cmd'
            else:
                logging.debug('Unable to execute "git --help" or "git.cmd '
                              '--help": skipping Git')
                return None

        git_dir = execute([self.git, "rev-parse", "--git-dir"],
                          ignore_errors=True).rstrip("\n")

        if git_dir.startswith("fatal:") or not os.path.isdir(git_dir):
            return None

        # Sometimes core.bare is not set, and generates an error, so ignore
        # errors. Valid values are 'true' or '1'.
        bare = execute([self.git, 'config', 'core.bare'],
                       ignore_errors=True).strip()
        self.bare = bare in ('true', '1')

        # If we are not working in a bare repository, then we will change
        # directory to the top level working tree lose our original position.
        # However, we need the original working directory for file exclusion
        # patterns, so we save it here.
        if self._original_cwd is None:
            self._original_cwd = os.getcwd()

        # Running in directories other than the top level of
        # of a work-tree would result in broken diffs on the server
        if not self.bare:
            git_top = execute([self.git, "rev-parse", "--show-toplevel"],
                              ignore_errors=True).rstrip("\n")

            # Top level might not work on old git version se we use git dir
            # to find it.
            if (git_top.startswith('fatal:') or not os.path.isdir(git_dir)
                or git_top.startswith('cygdrive')):
                git_top = git_dir

            os.chdir(os.path.abspath(git_top))

        self.head_ref = execute([self.git, 'symbolic-ref', '-q',
                                 'HEAD'], ignore_errors=True).strip()

        # We know we have something we can work with. Let's find out
        # what it is. We'll try SVN first, but only if there's a .git/svn
        # directory. Otherwise, it may attempt to create one and scan
        # revisions, which can be slow. Also skip SVN detection if the git
        # repository was specified on command line.
        git_svn_dir = os.path.join(git_dir, 'svn')

        if (not getattr(self.options, 'repository_url', None) and
            os.path.isdir(git_svn_dir) and len(os.listdir(git_svn_dir)) > 0):
            data = execute([self.git, "svn", "info"], ignore_errors=True)

            m = re.search(r'^Repository Root: (.+)$', data, re.M)

            if m:
                path = m.group(1)
                m = re.search(r'^URL: (.+)$', data, re.M)

                if m:
                    base_path = m.group(1)[len(path):] or "/"
                    m = re.search(r'^Repository UUID: (.+)$', data, re.M)

                    if m:
                        uuid = m.group(1)
                        self.type = "svn"

                        # Get SVN tracking branch
                        if getattr(self.options, 'tracking', None):
                            self.upstream_branch = self.options.tracking
                        else:
                            data = execute([self.git, "svn", "rebase", "-n"],
                                           ignore_errors=True)
                            m = re.search(r'^Remote Branch:\s*(.+)$', data,
                                          re.M)

                            if m:
                                self.upstream_branch = m.group(1)
                            else:
                                sys.stderr.write('Failed to determine SVN '
                                                 'tracking branch. Defaulting'
                                                 'to "master"\n')
                                self.upstream_branch = 'master'

                        return SVNRepositoryInfo(path=path,
                                                 base_path=base_path,
                                                 uuid=uuid,
                                                 supports_parent_diffs=True)
            else:
                # Versions of git-svn before 1.5.4 don't (appear to) support
                # 'git svn info'.  If we fail because of an older git install,
                # here, figure out what version of git is installed and give
                # the user a hint about what to do next.
                version = execute([self.git, "svn", "--version"],
                                  ignore_errors=True)
                version_parts = re.search('version (\d+)\.(\d+)\.(\d+)',
                                          version)
                svn_remote = execute(
                    [self.git, "config", "--get", "svn-remote.svn.url"],
                    ignore_errors=True)

                if (version_parts and svn_remote and
                    not is_valid_version((int(version_parts.group(1)),
                                          int(version_parts.group(2)),
                                          int(version_parts.group(3))),
                                         (1, 5, 4))):
                    raise SCMError('Your installation of git-svn must be '
                                   'upgraded to version 1.5.4 or later.')

        # Okay, maybe Perforce (git-p4).
        git_p4_ref = os.path.join(git_dir, 'refs', 'remotes', 'p4', 'master')
        if os.path.exists(git_p4_ref):
            data = execute([self.git, 'config', '--get', 'git-p4.port'],
                           ignore_errors=True)
            m = re.search(r'(.+)', data)
            if m:
                port = m.group(1)
            else:
                port = os.getenv('P4PORT')

            if port:
                self.type = 'perforce'
                self.upstream_branch = 'remotes/p4/master'
                return RepositoryInfo(path=port,
                                      base_path='',
                                      supports_parent_diffs=True)

        # Nope, it's git then.
        # Check for a tracking branch and determine merge-base
        self.upstream_branch = ''
        if self.head_ref:
            short_head = self._strip_heads_prefix(self.head_ref)
            merge = execute([self.git, 'config', '--get',
                             'branch.%s.merge' % short_head],
                            ignore_errors=True).strip()
            remote = execute([self.git, 'config', '--get',
                              'branch.%s.remote' % short_head],
                             ignore_errors=True).strip()

            merge = self._strip_heads_prefix(merge)

            if remote and remote != '.' and merge:
                self.upstream_branch = '%s/%s' % (remote, merge)

        url = None
        if getattr(self.options, 'repository_url', None):
            url = self.options.repository_url
            self.upstream_branch = self.get_origin(self.upstream_branch,
                                                   True)[0]
        else:
            self.upstream_branch, origin_url = \
                self.get_origin(self.upstream_branch, True)

            if not origin_url or origin_url.startswith("fatal:"):
                self.upstream_branch, origin_url = self.get_origin()

            url = origin_url.rstrip('/')

            # Central bare repositories don't have origin URLs.
            # We return git_dir instead and hope for the best.
            if not url:
                url = os.path.abspath(git_dir)

                # There is no remote, so skip this part of upstream_branch.
                self.upstream_branch = self.upstream_branch.split('/')[-1]

        if url:
            self.type = "git"
            return RepositoryInfo(path=url, base_path='',
                                  supports_parent_diffs=True)
        return None
コード例 #20
0
    def apply_patch(self, patch_file, base_path, base_dir, p=None,
                    revert=False):
        """Apply the patch and return a PatchResult indicating its success.

        Args:
            patch_file (unicode):
                The name of the patch file to apply.

            base_path (unicode):
                The base path that the diff was generated in.

            base_dir (unicode):
                The path of the current working directory relative to the root
                of the repository.

            p (unicode, optional):
                The prefix level of the diff.

            revert (bool, optional):
                Whether the patch should be reverted rather than applied.

        Returns:
            rbtools.clients.PatchResult:
            The result of the patch operation.
        """
        # Figure out the -p argument for patch. We override the calculated
        # value if it is supplied via a commandline option.
        p_num = p or self._get_p_number(base_path, base_dir)

        cmd = ['patch']

        if revert:
            cmd.append('-R')

        try:
            p_num = int(p_num)
        except ValueError:
            p_num = 0
            logging.warn('Invalid -p value: %s; assuming zero.', p_num)

        if p_num is not None:
            if p_num >= 0:
                cmd.append('-p%d' % p_num)
            else:
                logging.warn('Unsupported -p value: %d; assuming zero.', p_num)

        cmd.extend(['-i', six.text_type(patch_file)])

        # Ignore return code 2 in case the patch file consists of only empty
        # files, which 'patch' can't handle. Other 'patch' errors also give
        # return code 2, so we must check the command output.
        rc, patch_output = execute(cmd, extra_ignore_errors=(2,),
                                   return_error_code=True)
        only_garbage_in_patch = ('patch: **** Only garbage was found in the '
                                 'patch input.\n')

        if (patch_output and patch_output.startswith('patch: **** ') and
            patch_output != only_garbage_in_patch):
            raise SCMError('Failed to execute command: %s\n%s'
                           % (cmd, patch_output))

        # Check the patch for any added/deleted empty files to handle.
        if self.supports_empty_files():
            try:
                with open(patch_file, 'rb') as f:
                    patch = f.read()
            except IOError as e:
                logging.error('Unable to read file %s: %s', patch_file, e)
                return

            patched_empty_files = self.apply_patch_for_empty_files(
                patch, p_num, revert=revert)

            # If there are no empty files in a "garbage-only" patch, the patch
            # is probably malformed.
            if (patch_output == only_garbage_in_patch and
                not patched_empty_files):
                raise SCMError('Failed to execute command: %s\n%s'
                               % (cmd, patch_output))

        # TODO: Should this take into account apply_patch_for_empty_files ?
        #       The return value of that function is False both when it fails
        #       and when there are no empty files.
        return PatchResult(applied=(rc == 0), patch_output=patch_output)
コード例 #21
0
    def get_commit_history(self, revisions):
        """Return the commit history specified by the revisions.

        Args:
            revisions (dict):
                A dictionary of revisions to generate history for, as returned
                by :py:meth:`parse_revision_spec`.

        Returns:
            list of dict:
            This list of history entries, in order.

        Raises:
            rbtools.clients.errors.SCMError:
                The history is non-linear or there is a commit with no parents.
        """
        log_fields = {
            'commit_id': '{node}',
            'parent_id': '{p1node}',
            'author_name': '{author|person}',
            'author_email': '{author|email}',
            'author_date': '{date|rfc3339date}',
            'parent2': '{p2node}',
            'commit_message': '{desc}',
        }
        log_format = self._FIELD_SEP_ESC.join(six.itervalues(log_fields))

        log_entries = execute([
            self._exe,
            'log',
            '--template',
            '%s%s' % (log_format, self._RECORD_SEP_ESC),
            '-r',
            '%(base)s::%(tip)s and not %(base)s' % revisions,
        ],
                              ignore_errors=True,
                              none_on_ignored_error=True,
                              results_unicode=True)

        if not log_entries:
            return None

        history = []
        field_names = six.viewkeys(log_fields)

        # The ASCII record separator will be appended to every record, so if we
        # attempt to split the entire output by the record separator, we will
        # end up with an empty ``log_entry`` at the end, which will cause
        # errors.
        for log_entry in log_entries[:-1].split(self._RECORD_SEP):
            fields = log_entry.split(self._FIELD_SEP)
            entry = dict(zip(field_names, fields))

            # We do not want `parent2` to be included in the entry because
            # the entry's items are used as the keyword arguments to the
            # method that uploads a commit and it would be unexpected.
            if entry.pop('parent2') != self.NO_PARENT:
                raise SCMError(
                    'The Mercurial SCMClient only supports posting commit '
                    'histories that are entirely linear.')
            elif entry['parent_id'] == self.NO_PARENT:
                raise SCMError(
                    'The Mercurial SCMClient only supports posting commits '
                    'that have exactly one parent.')

            history.append(entry)

        return history