Beispiel #1
0
def _file_grep_list_only_wtree(file, path, opts, path_prefix=None):
    # test and skip binary files
    if b'\x00' in file.read(1024):
        if opts.verbose:
            trace.warning("Binary file '%s' skipped.", path)
        return

    file.seek(0)  # search from beginning

    found = False
    if opts.fixed_string:
        pattern = opts.pattern.encode(_user_encoding, 'replace')
        for line in file:
            if pattern in line:
                found = True
                break
    else:  # not fixed_string
        for line in file:
            if opts.patternc.search(line):
                found = True
                break

    if (opts.files_with_matches and found) or \
            (opts.files_without_match and not found):
        if path_prefix and path_prefix != '.':
            # user has passed a dir arg, show that as result prefix
            path = osutils.pathjoin(path_prefix, path)
        opts.outputter.get_writer(path, None, None)()
Beispiel #2
0
def warn_codeplex(host):
    global _warned_codeplex
    if not _warned_codeplex:
        warning("Please note %s is hosted on Codeplex which runs a broken "
                "Subversion server. Please consider using the bzr-tfs plugin, "
                "which provides support for CodePlex' native Team Foundation Server." % host)
        _warned_codeplex = True
Beispiel #3
0
 def set_user_option(self,
                     name,
                     value,
                     store=STORE_LOCATION,
                     warn_masked=False):
     if store == STORE_GLOBAL:
         self._get_global_config().set_user_option(name, value)
     elif store == STORE_BRANCH:
         raise NotImplementedError(
             "Saving in branch config not supported for Subversion "
             "branches")
     else:
         self._get_location_config().set_user_option(name, value, store)
     if not warn_masked:
         return
     if store in (STORE_GLOBAL, STORE_BRANCH):
         mask_value = self._get_location_config().get_user_option(name)
         if mask_value is not None:
             trace.warning(
                 'Value "%s" is masked by "%s" from'
                 ' locations.conf', value, mask_value)
         else:
             if store == STORE_GLOBAL:
                 branch_config = self._get_branch_data_config()
                 mask_value = branch_config.get_user_option(name)
                 if mask_value is not None:
                     trace.warning(
                         'Value "%s" is masked by "%s" from'
                         ' branch.conf', value, mask_value)
Beispiel #4
0
 def __init__(self):
     super(BzrSvnMapping, self).__init__(foreign_vcs_svn)
     if ((version_info[3] == 'exp' or self.experimental)
             and not BzrSvnMapping._warned_experimental):
         from breezy.trace import warning
         warning("using experimental bzr-svn mappings; may break existing "
                 "branches in the most horrible ways")
         BzrSvnMapping._warned_experimental = True
Beispiel #5
0
 def wipe_commit_data(self):
     if (self.tree.branch.get_physical_lock_status()
             or self.tree.branch.is_locked()):
         # XXX maybe show this in a GUI MessageBox (information box)???
         from breezy.trace import warning
         warning("Cannot wipe commit data because the branch is locked.")
         return
     self.ci_data.wipe()
Beispiel #6
0
def check_pysqlite_version(sqlite3):
    """Check that sqlite library is compatible.

    """
    if (sqlite3.sqlite_version_info[0] < 3
            or (sqlite3.sqlite_version_info[0] == 3
                and sqlite3.sqlite_version_info[1] < 3)):
        trace.warning('Needs at least sqlite 3.3.x')
        raise errors.BzrError("incompatible sqlite library")
Beispiel #7
0
def warn_uuid_reuse(uuid, location):
    """Warn that a UUID is being reused for different repositories."""
    global _reuse_uuids_warned
    if uuid in _reuse_uuids_warned:
        return
    trace.warning(
        "Repository with UUID %s at %s contains fewer revisions "
        "than cache. This either means that this repository contains an out "
        "of date mirror of another repository (harmless), or that the UUID "
        "is being used for two different Subversion repositories ("
        "potential repository corruption).", uuid, location)
    _reuse_uuids_warned.add(uuid)
Beispiel #8
0
 def __init__(self, path=None):
     self.path = path
     if path is None:
         self.db = {}
     else:
         import tdb
         if not mapdbs().has_key(path):
             mapdbs()[path] = tdb.Tdb(path, TDB_HASH_SIZE, tdb.DEFAULT,
                                      os.O_RDWR | os.O_CREAT)
         self.db = mapdbs()[path]
     try:
         if int(self.db["version"]) != TDB_MAP_VERSION:
             trace.warning(
                 "SHA Map is incompatible (%s -> %d), rebuilding database.",
                 self.db["version"], TDB_MAP_VERSION)
             self.db.clear()
             self.db["version"] = str(TDB_MAP_VERSION)
     except KeyError:
         self.db["version"] = str(TDB_MAP_VERSION)
Beispiel #9
0
    def _finish_computing_transform(self):
        """Finalize the transform and report the changes.

        This is the second half of _compute_transform.
        """
        child_pb = ui.ui_factory.nested_progress_bar()
        try:
            fs_conflicts = transform.resolve_conflicts(
                self.tt, child_pb,
                lambda t, c: transform.conflict_pass(t, c, self.other_tree))
        finally:
            child_pb.finished()
        if self.change_reporter is not None:
            from breezy import delta
            delta.report_changes(self.tt.iter_changes(), self.change_reporter)
        self.cook_conflicts(fs_conflicts)
        from breezy import trace
        for conflict in self.cooked_conflicts:
            trace.warning(conflict)
Beispiel #10
0
def warn_slow_revprops(config, server):
    global _warned_slow_revprops
    if _warned_slow_revprops:
        return

    try:
        warn_upgrade = config.get_bool("warn-upgrade")
    except KeyError:
        warn_upgrade = True
    if warn_upgrade:
        if server:
            trace.warning(
                "Upgrade server to svn 1.5 or higher for faster retrieving "
                "of revision properties.")
        else:
            trace.warning(
                "Upgrade to svn 1.5 or higher for faster retrieving of "
                "revision properties.")
        _warned_slow_revprops = True
Beispiel #11
0
 def save_commit_data(self):
     if (self.tree.branch.get_physical_lock_status()
             or self.tree.branch.is_locked()):
         # XXX maybe show this in a GUI MessageBox (information box)???
         from breezy.trace import warning
         warning("Cannot save commit data because the branch is locked.")
         return
     # collect data
     ci_data = QBzrCommitData(tree=self.tree)
     message = str(self.message.toPlainText()).strip()
     if message:
         ci_data['message'] = message
     bug_str = ''
     if self.bugsCheckBox.isChecked():
         bug_str = str(self.bugs.text()).strip()
     if bug_str:
         ci_data['bugs'] = bug_str
     # save only if data different
     if not ci_data.compare_data(self.ci_data, all_keys=False):
         ci_data.save()
Beispiel #12
0
def serve_svn(transport, host=None, port=None, inet=False):
    trace.warning("server support in bzr-svn is experimental.")

    if transport.base.startswith("readonly+"):
        url = transport.base[len("readonly+"):]
    path = urlutils.local_path_from_url(url)

    backend = BzrServerBackend(path)
    if inet:

        def send_fn(data):
            sys.stdout.write(data)
            sys.stdout.flush()

        server = SVNServer(backend, sys.stdin.read, send_fn)
    else:
        if port is None:
            port = SVN_PORT
        if host is None:
            host = '0.0.0.0'
        server = TCPSVNServer(backend, (host, port))
    server.serve()
Beispiel #13
0
 def warn(self, *msg):
     for x in msg:
         trace.warning("hg: %s" % x.rstrip())
Beispiel #14
0
 def _warn_busy_cache(self):
     trace.warning("Cache for repository %s busy, ignoring")
Beispiel #15
0
    def sprout(self,
               url,
               revision_id=None,
               force_new_repo=False,
               recurse='down',
               possible_transports=None,
               accelerator_tree=None,
               hardlink=False,
               stacked=False,
               source_branch=None,
               create_tree_if_local=True):
        from breezy.repository import InterRepository
        from breezy.transport.local import LocalTransport
        relpath = self._determine_relpath(None)
        if relpath == u"":
            guessed_layout = self.find_repository().get_guessed_layout()
            if guessed_layout is not None and not guessed_layout.is_branch(
                    u""):
                trace.warning(
                    'Cloning Subversion repository as branch. '
                    'To import the individual branches in the repository, '
                    'use "bzr svn-import".')
        target_transport = get_transport(url, possible_transports)
        target_transport.ensure_base()
        require_colocated = ("branch"
                             in target_transport.get_segment_parameters())
        cloning_format = self.cloning_metadir(
            require_colocated=require_colocated)
        # Create/update the result branch
        result = cloning_format.initialize_on_transport(target_transport)

        source_repository = self.find_repository()
        if force_new_repo:
            result_repo = result.create_repository()
            target_is_empty = True
        else:
            try:
                result_repo = result.find_repository()
            except errors.NoRepositoryPresent:
                result_repo = result.create_repository()
                target_is_empty = True
            else:
                target_is_empty = None  # Unknown
        if stacked:
            raise UnstackableBranchFormat(self._format.get_branch_format(),
                                          self.root_transport.base)
        interrepo = InterRepository.get(source_repository, result_repo)
        try:
            source_branch = self.open_branch()
        except errors.NotBranchError:
            source_branch = None
            project = None
            mapping = None
        else:
            project = source_branch.project
            mapping = source_branch.mapping
        interrepo.fetch(revision_id=revision_id,
                        project=project,
                        mapping=mapping,
                        target_is_empty=target_is_empty,
                        exclude_non_mainline=False)
        if source_branch is not None:
            if revision_id is None:
                revision_id = source_branch.last_revision()
            result_branch = source_branch.sprout(result,
                                                 revision_id=revision_id,
                                                 repository=result_repo)
            interbranch = InterBranch.get(source_branch, result_branch)
            interbranch.fetch(stop_revision=revision_id,
                              exclude_non_mainline=False)  # For the tags
        else:
            result_branch = result.create_branch()
        if (create_tree_if_local
                and isinstance(target_transport, LocalTransport)
                and (result_repo is None or result_repo.make_working_trees())):
            result.create_workingtree(accelerator_tree=accelerator_tree,
                                      hardlink=hardlink,
                                      from_branch=result_branch)
        return result
Beispiel #16
0
def versioned_grep(opts):
    wt, branch, relpath = \
        controldir.ControlDir.open_containing_tree_or_branch('.')
    with branch.lock_read():
        start_rev = opts.revision[0]
        start_revid = start_rev.as_revision_id(branch)
        if start_revid is None:
            start_rev = RevisionSpec_revno.from_string("revno:1")
            start_revid = start_rev.as_revision_id(branch)
        srevno_tuple = branch.revision_id_to_dotted_revno(start_revid)

        if len(opts.revision) == 2:
            end_rev = opts.revision[1]
            end_revid = end_rev.as_revision_id(branch)
            if end_revid is None:
                end_revno, end_revid = branch.last_revision_info()
            erevno_tuple = branch.revision_id_to_dotted_revno(end_revid)

            grep_mainline = (_rev_on_mainline(srevno_tuple)
                             and _rev_on_mainline(erevno_tuple))

            # ensure that we go in reverse order
            if srevno_tuple > erevno_tuple:
                srevno_tuple, erevno_tuple = erevno_tuple, srevno_tuple
                start_revid, end_revid = end_revid, start_revid

            # Optimization: Traversing the mainline in reverse order is much
            # faster when we don't want to look at merged revs. We try this
            # with _linear_view_revisions. If all revs are to be grepped we
            # use the slower _graph_view_revisions
            if opts.levels == 1 and grep_mainline:
                given_revs = _linear_view_revisions(
                    branch, start_revid, end_revid)
            else:
                given_revs = _graph_view_revisions(
                    branch, start_revid, end_revid)
        else:
            # We do an optimization below. For grepping a specific revison
            # We don't need to call _graph_view_revisions which is slow.
            # We create the start_rev_tuple for only that specific revision.
            # _graph_view_revisions is used only for revision range.
            start_revno = '.'.join(map(str, srevno_tuple))
            start_rev_tuple = (start_revid, start_revno, 0)
            given_revs = [start_rev_tuple]

        # GZ 2010-06-02: Shouldn't be smuggling this on opts, but easy for now
        opts.outputter = _Outputter(opts, use_cache=True)

        for revid, revno, merge_depth in given_revs:
            if opts.levels == 1 and merge_depth != 0:
                # with level=1 show only top level
                continue

            rev = RevisionSpec_revid.from_string(
                "revid:" + revid.decode('utf-8'))
            tree = rev.as_tree(branch)
            for path in opts.path_list:
                tree_path = osutils.pathjoin(relpath, path)
                if not tree.has_filename(tree_path):
                    trace.warning("Skipped unknown file '%s'.", path)
                    continue

                if osutils.isdir(path):
                    path_prefix = path
                    dir_grep(tree, path, relpath, opts, revno, path_prefix)
                else:
                    versioned_file_grep(
                        tree, tree_path, '.', path, opts, revno)
Beispiel #17
0
def _log_cleanup_error(exc):
    trace.mutter('Cleanup failed:')
    trace.log_exception_quietly()
    if 'cleanup' in debug.debug_flags:
        trace.warning('brz: warning: Cleanup failed: %s', exc)
Beispiel #18
0
def _compare_trees(old_tree, new_tree, want_unchanged, specific_files,
                   include_root, extra_trees=None,
                   require_versioned=False, want_unversioned=False):
    """Worker function that implements Tree.changes_from."""
    delta = TreeDelta()
    # mutter('start compare_trees')

    for change in new_tree.iter_changes(
            old_tree, want_unchanged, specific_files, extra_trees=extra_trees,
            require_versioned=require_versioned,
            want_unversioned=want_unversioned):
        if change.versioned == (False, False):
            delta.unversioned.append(change)
            continue
        if not include_root and (None, None) == change.parent_id:
            continue
        fully_present = tuple(
            (change.versioned[x] and change.kind[x] is not None)
            for x in range(2))
        if fully_present[0] != fully_present[1]:
            if fully_present[1] is True:
                delta.added.append(change)
            else:
                if change.kind[0] == 'symlink' and not new_tree.supports_symlinks():
                    trace.warning(
                        'Ignoring "%s" as symlinks '
                        'are not supported on this filesystem.' % (change.path[0],))
                else:
                    delta.removed.append(change)
        elif fully_present[0] is False:
            delta.missing.append(change)
        elif change.name[0] != change.name[1] or change.parent_id[0] != change.parent_id[1]:
            # If the name changes, or the parent_id changes, we have a rename or copy
            # (if we move a parent, that doesn't count as a rename for the
            # file)
            if change.copied:
                delta.copied.append(change)
            else:
                delta.renamed.append(change)
        elif change.kind[0] != change.kind[1]:
            delta.kind_changed.append(change)
        elif change.changed_content or change.executable[0] != change.executable[1]:
            delta.modified.append(change)
        else:
            delta.unchanged.append(change)

    def change_key(change):
        if change.path[0] is None:
            path = change.path[1]
        else:
            path = change.path[0]
        return (path, change.file_id)

    delta.removed.sort(key=change_key)
    delta.added.sort(key=change_key)
    delta.renamed.sort(key=change_key)
    delta.copied.sort(key=change_key)
    delta.missing.sort(key=change_key)
    # TODO: jam 20060529 These lists shouldn't need to be sorted
    #       since we added them in alphabetical order.
    delta.modified.sort(key=change_key)
    delta.unchanged.sort(key=change_key)
    delta.unversioned.sort(key=change_key)

    return delta
Beispiel #19
0
def _file_grep(file_text, path, opts, revno, path_prefix=None, cache_id=None):
    # test and skip binary files
    if b'\x00' in file_text[:1024]:
        if opts.verbose:
            trace.warning("Binary file '%s' skipped.", path)
        return

    if path_prefix and path_prefix != '.':
        # user has passed a dir arg, show that as result prefix
        path = osutils.pathjoin(path_prefix, path)

    # GZ 2010-06-07: There's no actual guarentee the file contents will be in
    #                the user encoding, but we have to guess something and it
    #                is a reasonable default without a better mechanism.
    file_encoding = _user_encoding
    pattern = opts.pattern.encode(_user_encoding, 'replace')

    writeline = opts.outputter.get_writer(path, revno, cache_id)

    if opts.files_with_matches or opts.files_without_match:
        if opts.fixed_string:
            found = pattern in file_text
        else:
            search = opts.patternc.search
            if b"$" not in pattern:
                found = search(file_text) is not None
            else:
                for line in file_text.splitlines():
                    if search(line):
                        found = True
                        break
                else:
                    found = False
        if (opts.files_with_matches and found) or \
                (opts.files_without_match and not found):
            writeline()
    elif opts.fixed_string:
        # Fast path for no match, search through the entire file at once rather
        # than a line at a time. <http://effbot.org/zone/stringlib.htm>
        i = file_text.find(pattern)
        if i == -1:
            return
        b = file_text.rfind(b"\n", 0, i) + 1
        if opts.line_number:
            start = file_text.count(b"\n", 0, b) + 1
        file_text = file_text[b:]
        if opts.line_number:
            for index, line in enumerate(file_text.splitlines()):
                if pattern in line:
                    line = line.decode(file_encoding, 'replace')
                    writeline(lineno=index + start, line=line)
        else:
            for line in file_text.splitlines():
                if pattern in line:
                    line = line.decode(file_encoding, 'replace')
                    writeline(line=line)
    else:
        # Fast path on no match, the re module avoids bad behaviour in most
        # standard cases, but perhaps could try and detect backtracking
        # patterns here and avoid whole text search in those cases
        search = opts.patternc.search
        if b"$" not in pattern:
            # GZ 2010-06-05: Grr, re.MULTILINE can't save us when searching
            #                through revisions as bazaar returns binary mode
            #                and trailing \r breaks $ as line ending match
            m = search(file_text)
            if m is None:
                return
            b = file_text.rfind(b"\n", 0, m.start()) + 1
            if opts.line_number:
                start = file_text.count(b"\n", 0, b) + 1
            file_text = file_text[b:]
        else:
            start = 1
        if opts.line_number:
            for index, line in enumerate(file_text.splitlines()):
                if search(line):
                    line = line.decode(file_encoding, 'replace')
                    writeline(lineno=index + start, line=line)
        else:
            for line in file_text.splitlines():
                if search(line):
                    line = line.decode(file_encoding, 'replace')
                    writeline(line=line)
Beispiel #20
0
    if (sqlite3.sqlite_version_info[0] < 3
            or (sqlite3.sqlite_version_info[0] == 3
                and sqlite3.sqlite_version_info[1] < 3)):
        trace.warning('Needs at least sqlite 3.3.x')
        raise errors.BzrError("incompatible sqlite library")


try:
    try:
        import sqlite3
        check_pysqlite_version(sqlite3)
    except (ImportError, errors.BzrError) as e:
        from pysqlite2 import dbapi2 as sqlite3
        check_pysqlite_version(sqlite3)
except:
    trace.warning('Needs at least Python2.5 or Python2.4 with the pysqlite2 '
                  'module')
    raise errors.BzrError("missing sqlite library")


def _connect_sqlite3_file(path):
    return sqlite3.connect(path, timeout=20.0, isolation_level=None)


connect_cachefile = _connect_sqlite3_file


class CacheTable(object):
    """Simple base class for SQLite-based caches."""
    def __init__(self, cache_db=None):
        if cache_db is None:
            self.cachedb = sqlite3.connect(":memory:")