Exemplo n.º 1
0
 def _delete(self, abspath, f):
     try:
         mutter("FTP rm: %s", abspath)
         f.delete(abspath)
     except ftplib.error_perm, e:
         self._translate_ftp_error(e, abspath, 'error deleting',
             unknown_exc=errors.NoSuchFile)
Exemplo n.º 2
0
 def get_credentials(self, scheme, host, port=None, user=None, path=None, 
                     realm=None):
     if gnomekeyring is None:
         return None
     attrs = {
         "protocol": scheme.encode("utf-8"),
         "server": host.encode("utf-8"),
         }
     # TODO: realm ?
     if port is not None:
         attrs["port"] = port
     if user is not None:
         attrs["user"] = user.encode("utf-8")
     credentials = { "scheme": scheme, "host": host, "port": port, 
         "realm": realm, "user": user}
     try:
         items = gnomekeyring.find_items_sync(
             gnomekeyring.ITEM_NETWORK_PASSWORD, attrs)
         credentials["user"] = items[0].attributes["user"]
         credentials["password"] = items[0].secret
         return credentials
     except (gnomekeyring.NoMatchError, gnomekeyring.DeniedError, gnomekeyring.NoKeyringDaemonError,
             gnomekeyring.IOError), e:
         from bzrlib import trace
         trace.mutter('Unable to obtain credentials for %r from GNOME keyring: %r',
                      attrs, e)
         return None
Exemplo n.º 3
0
    def __init__(self, to_repository, from_repository, last_revision=None,
        find_ghosts=True, fetch_spec=None):
        """Create a repo fetcher.

        :param last_revision: If set, try to limit to the data this revision
            references.
        :param fetch_spec: A SearchResult specifying which revisions to fetch.
            If set, this overrides last_revision.
        :param find_ghosts: If True search the entire history for ghosts.
        """
        # repository.fetch has the responsibility for short-circuiting
        # attempts to copy between a repository and itself.
        self.to_repository = to_repository
        self.from_repository = from_repository
        self.sink = to_repository._get_sink()
        # must not mutate self._last_revision as its potentially a shared instance
        self._last_revision = last_revision
        self._fetch_spec = fetch_spec
        self.find_ghosts = find_ghosts
        self.from_repository.lock_read()
        mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
               self.from_repository, self.from_repository._format,
               self.to_repository, self.to_repository._format)
        try:
            self.__fetch()
        finally:
            self.from_repository.unlock()
Exemplo n.º 4
0
    def __fetch(self):
        """Primary worker function.

        This initialises all the needed variables, and then fetches the
        requested revisions, finally clearing the progress bar.
        """
        # Roughly this is what we're aiming for fetch to become:
        #
        # missing = self.sink.insert_stream(self.source.get_stream(search))
        # if missing:
        #     missing = self.sink.insert_stream(self.source.get_items(missing))
        # assert not missing
        self.count_total = 0
        self.file_ids_names = {}
        pb = ui.ui_factory.nested_progress_bar()
        pb.show_pct = pb.show_count = False
        try:
            pb.update(gettext("Finding revisions"), 0, 2)
            search_result = self._revids_to_fetch()
            mutter('fetching: %s', search_result)
            if search_result.is_empty():
                return
            pb.update(gettext("Fetching revisions"), 1, 2)
            self._fetch_everything_for_search(search_result)
        finally:
            pb.finished()
Exemplo n.º 5
0
    def update_remote_working_copy(self):
        """Update the target branch's working copy if it isn't local."""
        target_transport = self.target_branch.bzrdir.root_transport
        user = getattr(target_transport, '_user', None)
        host = getattr(target_transport, '_host')
        port = getattr(target_transport, '_port', None)
        path = getattr(target_transport, '_path', None)
        if user:
            user = '******' % (user,)
        else:
            user = ''

        if port:
            port = ':%s' % (port,)
        else:
            port = ''

        if path.startswith('/~/'):
            path = path[3:] # This is meant to be a relative path

        remote_bzr = self.target_branch.get_config().get_bzr_remote_path()
        # The path needs to be double escaped. We pass it to ssh as a single
        # argument, but ssh passes it to the child shell as a whole string.
        path = '"%s"' % (path,)
        cmd = ['ssh', user+host+port, remote_bzr, 'update', path]
        trace.mutter('running "%s"' % (' '.join(cmd)))

        subprocess.call(cmd)
Exemplo n.º 6
0
 def test_recorded_ancestry(self):
     """Test that commit records all ancestors"""
     br1, br2 = make_branches(self)
     d = [('a@u-0-0', ['a@u-0-0']),
          ('a@u-0-1', ['a@u-0-0', 'a@u-0-1']),
          ('a@u-0-2', ['a@u-0-0', 'a@u-0-1', 'a@u-0-2']),
          ('b@u-0-3', ['a@u-0-0', 'a@u-0-1', 'a@u-0-2', 'b@u-0-3']),
          ('b@u-0-4', ['a@u-0-0', 'a@u-0-1', 'a@u-0-2', 'b@u-0-3',
                       'b@u-0-4']),
          ('a@u-0-3', ['a@u-0-0', 'a@u-0-1', 'a@u-0-2', 'b@u-0-3', 'b@u-0-4',
                       'a@u-0-3']),
          ('a@u-0-4', ['a@u-0-0', 'a@u-0-1', 'a@u-0-2', 'b@u-0-3', 'b@u-0-4',
                       'a@u-0-3', 'a@u-0-4']),
          ('b@u-0-5', ['a@u-0-0', 'a@u-0-1', 'a@u-0-2', 'b@u-0-3', 'b@u-0-4',
                       'b@u-0-5']),
          ('a@u-0-5', ['a@u-0-0', 'a@u-0-1', 'a@u-0-2', 'a@u-0-3', 'a@u-0-4',
                       'b@u-0-3', 'b@u-0-4',
                       'b@u-0-5', 'a@u-0-5']),
          ('b@u-0-6', ['a@u-0-0', 'a@u-0-1', 'a@u-0-2',
                       'b@u-0-3', 'b@u-0-4',
                       'b@u-0-5', 'b@u-0-6']),
          ]
     br1_only = ('a@u-0-3', 'a@u-0-4', 'a@u-0-5')
     br2_only = ('b@u-0-6',)
     for branch in br1, br2:
         for rev_id, anc in d:
             if rev_id in br1_only and not branch is br1:
                 continue
             if rev_id in br2_only and not branch is br2:
                 continue
             mutter('ancestry of {%s}: %r',
                    rev_id, branch.repository.get_ancestry(rev_id))
             result = sorted(branch.repository.get_ancestry(rev_id))
             self.assertEquals(result, [None] + sorted(anc))
Exemplo n.º 7
0
 def finish(self):
     try:
         osutils.rmtree(self._root)
     except OSError, e:
         if e.errno != errno.ENOENT:
             mutter("The temporary directory \"%s\" was not "
                     "cleanly removed: %s." % (self._root, e))
Exemplo n.º 8
0
    def __call__(self, oldrevid, newrevid, newparents):
        """Replay a commit in a working tree, with a different base.

        :param oldrevid: Old revision id
        :param newrevid: New revision id
        :param newparents: New parent revision ids
        """
        repository = self.wt.branch.repository
        if self.merge_type is None:
            from bzrlib.merge import Merge3Merger
            merge_type = Merge3Merger
        else:
            merge_type = self.merge_type
        oldrev = self.wt.branch.repository.get_revision(oldrevid)
        # Make sure there are no conflicts or pending merges/changes
        # in the working tree
        complete_revert(self.wt, [newparents[0]])
        assert not self.wt.changes_from(self.wt.basis_tree()).has_changed(), "Changes in rev"

        oldtree = repository.revision_tree(oldrevid)
        self.state.write_active_revid(oldrevid)
        merger = Merger(self.wt.branch, this_tree=self.wt)
        merger.set_other_revision(oldrevid, self.wt.branch)
        base_revid = self.determine_base(oldrevid, oldrev.parent_ids,
                                           newrevid, newparents)
        mutter('replaying %r as %r with base %r and new parents %r' %
               (oldrevid, newrevid, base_revid, newparents))
        merger.set_base_revision(base_revid, self.wt.branch)
        merger.merge_type = merge_type
        merger.do_merge()
        for newparent in newparents[1:]:
            self.wt.add_pending_merge(newparent)
        self.commit_rebase(oldrev, newrevid)
        self.state.write_active_revid(None)
Exemplo n.º 9
0
def recurse_matches_py(a, b, alo, blo, ahi, bhi, answer, maxrecursion):
    """Find all of the matching text in the lines of a and b.

    :param a: A sequence
    :param b: Another sequence
    :param alo: The start location of a to check, typically 0
    :param ahi: The start location of b to check, typically 0
    :param ahi: The maximum length of a to check, typically len(a)
    :param bhi: The maximum length of b to check, typically len(b)
    :param answer: The return array. Will be filled with tuples
                   indicating [(line_in_a, line_in_b)]
    :param maxrecursion: The maximum depth to recurse.
                         Must be a positive integer.
    :return: None, the return value is in the parameter answer, which
             should be a list

    """
    if maxrecursion < 0:
        mutter('max recursion depth reached')
        # this will never happen normally, this check is to prevent DOS attacks
        return
    oldlength = len(answer)
    if alo == ahi or blo == bhi:
        return
    last_a_pos = alo-1
    last_b_pos = blo-1
    for apos, bpos in unique_lcs_py(a[alo:ahi], b[blo:bhi]):
        # recurse between lines which are unique in each file and match
        apos += alo
        bpos += blo
        # Most of the time, you will have a sequence of similar entries
        if last_a_pos+1 != apos or last_b_pos+1 != bpos:
            recurse_matches_py(a, b, last_a_pos+1, last_b_pos+1,
                apos, bpos, answer, maxrecursion - 1)
        last_a_pos = apos
        last_b_pos = bpos
        answer.append((apos, bpos))
    if len(answer) > oldlength:
        # find matches between the last match and the end
        recurse_matches_py(a, b, last_a_pos+1, last_b_pos+1,
                           ahi, bhi, answer, maxrecursion - 1)
    elif a[alo] == b[blo]:
        # find matching lines at the very beginning
        while alo < ahi and blo < bhi and a[alo] == b[blo]:
            answer.append((alo, blo))
            alo += 1
            blo += 1
        recurse_matches_py(a, b, alo, blo,
                           ahi, bhi, answer, maxrecursion - 1)
    elif a[ahi - 1] == b[bhi - 1]:
        # find matching lines at the very end
        nahi = ahi - 1
        nbhi = bhi - 1
        while nahi > alo and nbhi > blo and a[nahi - 1] == b[nbhi - 1]:
            nahi -= 1
            nbhi -= 1
        recurse_matches_py(a, b, last_a_pos+1, last_b_pos+1,
                           nahi, nbhi, answer, maxrecursion - 1)
        for i in xrange(ahi - nahi):
            answer.append((nahi + i, nbhi + i))
Exemplo n.º 10
0
def _get_newest_versions(the_branch, latest_pub):
    """Get information about how 'fresh' this packaging branch is.

    :param the_branch: The Branch to check
    :param latest_pub: The LatestPublication used to check most recent
        published version.
    :return: (latest_ver, branch_latest_ver)
    """
    t = time.time()
    latest_ver = latest_pub.get_latest_version()
    t_latest_ver = time.time() - t
    trace.mutter('LatestPublication.get_latest_version took: %.3fs'
                 % (t_latest_ver,))
    if latest_ver is None:
        return None, None
    t = time.time()
    tags = the_branch.tags.get_tag_dict()
    t_tag_dict = time.time() - t
    trace.mutter('LatestPublication.get_tag_dict took: %.3fs' % (t_tag_dict,))
    if latest_ver in tags:
        # branch might have a newer tag, but we don't really care
        return latest_ver, latest_ver
    else:
        best_tag = get_most_recent_tag(tags, the_branch)
        return latest_ver, best_tag
Exemplo n.º 11
0
    def __setitem__(self, key, value):
        """Add a new value to the cache"""
        if key is _null_key:
            raise ValueError('cannot use _null_key as a key')
        node = self._cache.get(key, None)
        value_len = self._compute_size(value)
        if value_len >= self._after_cleanup_size:
            # The new value is 'too big to fit', as it would fill up/overflow
            # the cache all by itself
            trace.mutter('Adding the key %r to an LRUSizeCache failed.'
                         ' value %d is too big to fit in a the cache'
                         ' with size %d %d', key, value_len,
                         self._after_cleanup_size, self._max_size)
            if node is not None:
                # We won't be replacing the old node, so just remove it
                self._remove_node(node)
            return
        if node is None:
            node = _LRUNode(key, value)
            self._cache[key] = node
        else:
            self._value_size -= self._compute_size(node.value)
        self._value_size += value_len
        self._record_access(node)

        if self._value_size > self._max_size:
            # Time to cleanup
            self.cleanup()
Exemplo n.º 12
0
    def initialize(self, a_bzrdir, shared=False):
        """Create a knit format 1 repository.

        :param a_bzrdir: bzrdir to contain the new repository; must already
            be initialized.
        :param shared: If true the repository will be initialized as a shared
                       repository.
        """
        mutter('creating repository in %s.', a_bzrdir.transport.base)
        dirs = ['knits']
        files = []
        utf8_files = [('format', self.get_format_string())]
        
        self._upload_blank_content(a_bzrdir, dirs, files, utf8_files, shared)
        repo_transport = a_bzrdir.get_repository_transport(None)
        control_files = lockable_files.LockableFiles(repo_transport,
                                'lock', lockdir.LockDir)
        transaction = transactions.WriteTransaction()
        result = self.open(a_bzrdir=a_bzrdir, _found=True)
        result.lock_write()
        # the revision id here is irrelevant: it will not be stored, and cannot
        # already exist, we do this to create files on disk for older clients.
        result.inventories.get_parent_map([('A',)])
        result.revisions.get_parent_map([('A',)])
        result.signatures.get_parent_map([('A',)])
        result.unlock()
        return result
Exemplo n.º 13
0
        def __init__(self, filename):
            super(_fcntl_WriteLock, self).__init__()
            # Check we can grab a lock before we actually open the file.
            self.filename = osutils.realpath(filename)
            if self.filename in _fcntl_WriteLock._open_locks:
                self._clear_f()
                raise errors.LockContention(self.filename)
            if self.filename in _fcntl_ReadLock._open_locks:
                if 'strict_locks' in debug.debug_flags:
                    self._clear_f()
                    raise errors.LockContention(self.filename)
                else:
                    trace.mutter('Write lock taken w/ an open read lock on: %s'
                                 % (self.filename,))

            self._open(self.filename, 'rb+')
            # reserve a slot for this lock - even if the lockf call fails,
            # at this point unlock() will be called, because self.f is set.
            # TODO: make this fully threadsafe, if we decide we care.
            _fcntl_WriteLock._open_locks.add(self.filename)
            try:
                # LOCK_NB will cause IOError to be raised if we can't grab a
                # lock right away.
                fcntl.lockf(self.f, fcntl.LOCK_EX | fcntl.LOCK_NB)
            except IOError, e:
                if e.errno in (errno.EAGAIN, errno.EACCES):
                    # We couldn't grab the lock
                    self.unlock()
                # we should be more precise about whats a locking
                # error and whats a random-other error
                raise errors.LockContention(self.filename, e)
Exemplo n.º 14
0
    def is_lock_holder_known_dead(self):
        """True if the lock holder process is known to be dead.

        False if it's either known to be still alive, or if we just can't tell.

        We can be fairly sure the lock holder is dead if it declared the same
        hostname and there is no process with the given pid alive.  If people
        have multiple machines with the same hostname this may cause trouble.

        This doesn't check whether the lock holder is in fact the same process
        calling this method.  (In that case it will return true.)
        """
        if self.get('hostname') != get_host_name():
            return False
        if self.get('hostname') == 'localhost':
            # Too ambiguous.
            return False
        if self.get('user') != get_username_for_lock_info():
            # Could well be another local process by a different user, but
            # just to be safe we won't conclude about this either.
            return False
        pid_str = self.info_dict.get('pid', None)
        if not pid_str:
            mutter("no pid recorded in %r" % (self, ))
            return False
        try:
            pid = int(pid_str)
        except ValueError:
            mutter("can't parse pid %r from %r"
                % (pid_str, self))
            return False
        return osutils.is_local_pid_dead(pid)
Exemplo n.º 15
0
    def initialize(self, a_bzrdir, shared=False, _internal=False):
        """Create a weave repository."""
        if shared:
            raise errors.IncompatibleFormat(self, a_bzrdir._format)

        if not _internal:
            # always initialized when the bzrdir is.
            return self.open(a_bzrdir, _found=True)
        
        # Create an empty weave
        sio = StringIO()
        weavefile.write_weave_v5(weave.Weave(), sio)
        empty_weave = sio.getvalue()

        mutter('creating repository in %s.', a_bzrdir.transport.base)
        
        # FIXME: RBC 20060125 don't peek under the covers
        # NB: no need to escape relative paths that are url safe.
        control_files = lockable_files.LockableFiles(a_bzrdir.transport,
            'branch-lock', lockable_files.TransportLock)
        control_files.create_lock()
        control_files.lock_write()
        transport = a_bzrdir.transport
        try:
            transport.mkdir_multi(['revision-store', 'weaves'],
                mode=a_bzrdir._get_dir_mode())
            transport.put_bytes_non_atomic('inventory.weave', empty_weave)
        finally:
            control_files.unlock()
        return self.open(a_bzrdir, _found=True)
Exemplo n.º 16
0
    def get(self, relpath, retries=0):
        """Get the file at the given relative path.

        :param relpath: The relative path to the file
        :param retries: Number of retries after temporary failures so far
                        for this operation.

        We're meant to return a file-like object which bzr will
        then read from. For now we do this via the magic of StringIO
        """
        try:
            if 'gio' in debug.debug_flags:
                mutter("GIO get: %s" % relpath)
            f = self._get_GIO(relpath)
            fin = f.read()
            buf = fin.read()
            fin.close()
            ret = StringIO(buf)
            return ret
        except gio.Error, e:
            #If we get a not mounted here it might mean
            #that a bad path has been entered (or that mount failed)
            if (e.code == gio.ERROR_NOT_MOUNTED):
                raise errors.PathError(relpath,
                  extra='Failed to get file, make sure the path is correct. ' \
                  + str(e))
            else:
                self._translate_gio_error(e, relpath)
Exemplo n.º 17
0
    def put_file(self, relpath, fp, mode=None):
        """Copy the file-like object into the location.

        :param relpath: Location to put the contents, relative to base.
        :param fp:       File-like or string object.
        """
        if 'gio' in debug.debug_flags:
            mutter("GIO put_file %s" % relpath)
        tmppath = '%s.tmp.%.9f.%d.%d' % (relpath, time.time(),
                    os.getpid(), random.randint(0, 0x7FFFFFFF))
        f = None
        fout = None
        try:
            closed = True
            try:
                f = self._get_GIO(tmppath)
                fout = f.create()
                closed = False
                length = self._pump(fp, fout)
                fout.close()
                closed = True
                self.stat(tmppath)
                dest = self._get_GIO(relpath)
                f.move(dest, flags=gio.FILE_COPY_OVERWRITE)
                f = None
                if mode is not None:
                    self._setmode(relpath, mode)
                return length
            except gio.Error, e:
                self._translate_gio_error(e, relpath)
        finally:
            if not closed and fout is not None:
                fout.close()
            if f is not None and f.query_exists():
                f.delete()
Exemplo n.º 18
0
    def gssapi_login(self, user):
        # Try GSSAPI login first

        # Used FTP response codes:
        # 235 [ADAT=base64data] - indicates that the security data exchange
        #     completed successfully.
        # 334 [ADAT=base64data] - indicates that the requested security
        #     mechanism is ok, and includes security data to be used by the
        #     client to construct the next command.
        # 335 [ADAT=base64data] - indicates that the security data is
        #     acceptable, and more is required to complete the security
        #     data exchange.

        resp = self.sendcmd('AUTH GSSAPI')
        if resp.startswith('334 '):
            rc, self.vc = kerberos.authGSSClientInit("ftp@%s" % self.host)
            if kerberos.authGSSClientStep(self.vc, "") != 1:
                while resp[:4] in ('334 ', '335 '):
                    authdata = kerberos.authGSSClientResponse(self.vc)
                    resp = self.sendcmd('ADAT ' + authdata)
                    if resp[:9] in ('235 ADAT=', '335 ADAT='):
                        rc = kerberos.authGSSClientStep(self.vc, resp[9:])
                        if not ((resp.startswith('235 ') and rc == 1) or
                                (resp.startswith('335 ') and rc == 0)):
                            raise ftplib.error_reply, resp
            note(gettext("Authenticated as %s") %
                 kerberos.authGSSClientUserName(self.vc))

            # Monkey patch ftplib
            self.putcmd = self.mic_putcmd
            self.getline = self.mic_getline
            self.sendcmd('USER ' + user)
            return resp
        mutter("Unable to use GSSAPI authentication: %s", resp)
Exemplo n.º 19
0
 def _trim(self):
     """Trim the cache back if needed."""
     if self._limit < 0 or self._limit - len(self._clean_objects) > 0:
         return
     needed = len(self._clean_objects) - self._limit
     offset = 0
     while needed and offset < len(self._clean_objects):
         # references we know of:
         # temp passed to getrefcount in our frame
         # temp in getrefcount's frame
         # the map forward
         # the map backwards
         # _clean_objects
         # _clean_queue
         # 1 missing ?
         if (
             sys.getrefcount(self._clean_queue[offset]) <= 7
             and not self._clean_queue[offset] in self._precious_objects
         ):
             removed = self._clean_queue[offset]
             self._clean_objects.remove(removed)
             del self._clean_queue[offset]
             self.map.remove_object(removed)
             mutter("removed object %r", removed)
             needed -= 1
         else:
             offset += 1
Exemplo n.º 20
0
def classify_filename(name):
    """Classify a file based on its name.
    
    :param name: File path.
    :return: One of code, documentation, translation or art. 
        None if determining the file type failed.
    """
    # FIXME: Use mime types? Ohcount? 
    extension = os.path.splitext(name)[1]
    if extension in (".c", ".h", ".py", ".cpp", ".rb", ".pm", ".pl", ".ac"):
        return "code"
    if extension in (".html", ".xml", ".txt", ".rst", ".TODO"):
        return "documentation"
    if extension in (".po",):
        return "translation"
    if extension in (".svg", ".png", ".jpg"):
        return "art"
    if not extension:
        basename = urlutils.basename(name)
        if basename in ("README", "NEWS", "TODO", 
                        "AUTHORS", "COPYING"):
            return "documentation"
        if basename in ("Makefile",):
            return "code"

    mutter("don't know how to classify %s", name)
    return None
Exemplo n.º 21
0
    def from_string(spec):
        """Parse a revision spec string into a RevisionSpec object.

        :param spec: A string specified by the user
        :return: A RevisionSpec object that understands how to parse the
            supplied notation.
        """
        if not isinstance(spec, (type(None), basestring)):
            raise TypeError('error')

        if spec is None:
            return RevisionSpec(None, _internal=True)
        for spectype in SPEC_TYPES:
            if spec.startswith(spectype.prefix):
                trace.mutter('Returning RevisionSpec %s for %s',
                             spectype.__name__, spec)
                return spectype(spec, _internal=True)
        else:
            # RevisionSpec_revno is special cased, because it is the only
            # one that directly handles plain integers
            # TODO: This should not be special cased rather it should be
            # a method invocation on spectype.canparse()
            global _revno_regex
            if _revno_regex is None:
                _revno_regex = re.compile(r'^(?:(\d+(\.\d+)*)|-\d+)(:.*)?$')
            if _revno_regex.match(spec) is not None:
                return RevisionSpec_revno(spec, _internal=True)

            raise errors.NoSuchRevisionSpec(spec)
Exemplo n.º 22
0
 def _post(self, body_bytes):
     curl = self._get_curl()
     abspath, data, header = self._setup_request(curl, '.bzr/smart')
     curl.setopt(pycurl.POST, 1)
     fake_file = StringIO(body_bytes)
     curl.setopt(pycurl.POSTFIELDSIZE, len(body_bytes))
     curl.setopt(pycurl.READFUNCTION, fake_file.read)
     # We override the Expect: header so that pycurl will send the POST
     # body immediately.
     try:
         self._curl_perform(curl, header,
                            ['Expect: ',
                             'Content-Type: application/octet-stream'])
     except pycurl.error, e:
         if e[0] == CURLE_SEND_ERROR:
             # When talking to an HTTP/1.0 server, getting a 400+ error code
             # triggers a bug in some combinations of curl/kernel in rare
             # occurrences. Basically, the server closes the connection
             # after sending the error but the client (having received and
             # parsed the response) still try to send the request body (see
             # bug #225020 and its upstream associated bug).  Since the
             # error code and the headers are known to be available, we just
             # swallow the exception, leaving the upper levels handle the
             # 400+ error.
             trace.mutter('got pycurl error in POST: %s, %s, %s, url: %s ',
                          e[0], e[1], e, abspath)
         else:
             # Re-raise otherwise
             raise
Exemplo n.º 23
0
 def _translate_gio_error(self, err, path, extra=None):
     if 'gio' in debug.debug_flags:
         mutter("GIO Error: %s %s" % (str(err), path))
     if extra is None:
         extra = str(err)
     if err.code == gio.ERROR_NOT_FOUND:
         raise errors.NoSuchFile(path, extra=extra)
     elif err.code == gio.ERROR_EXISTS:
         raise errors.FileExists(path, extra=extra)
     elif err.code == gio.ERROR_NOT_DIRECTORY:
         raise errors.NotADirectory(path, extra=extra)
     elif err.code == gio.ERROR_NOT_EMPTY:
         raise errors.DirectoryNotEmpty(path, extra=extra)
     elif err.code == gio.ERROR_BUSY:
         raise errors.ResourceBusy(path, extra=extra)
     elif err.code == gio.ERROR_PERMISSION_DENIED:
         raise errors.PermissionDenied(path, extra=extra)
     elif err.code == gio.ERROR_HOST_NOT_FOUND:
         raise errors.PathError(path, extra=extra)
     elif err.code == gio.ERROR_IS_DIRECTORY:
         raise errors.PathError(path, extra=extra)
     else:
         mutter('unable to understand error for path: %s: %s', path, err)
         raise errors.PathError(path,
                 extra="Unhandled gio error: " + str(err))
Exemplo n.º 24
0
    def _try_append(self, relpath, text, mode=None, retries=0):
        """Try repeatedly to append the given text to the file at relpath.

        This is a recursive function. On errors, it will be called until the
        number of retries is exceeded.
        """
        try:
            abspath = self._remote_path(relpath)
            mutter("FTP appe (try %d) to %s", retries, abspath)
            ftp = self._get_FTP()
            cmd = "APPE %s" % abspath
            conn = ftp.transfercmd(cmd)
            conn.sendall(text)
            conn.close()
            self._setmode(relpath, mode)
            ftp.getresp()
        except ftplib.error_perm, e:
            # Check whether the command is not supported (reply code 502)
            if str(e).startswith('502 '):
                warning("FTP server does not support file appending natively. "
                        "Performance may be severely degraded! (%s)", e)
                self._has_append = False
                self._fallback_append(relpath, text, mode)
            else:
                self._translate_ftp_error(e, abspath, extra='error appending',
                    unknown_exc=errors.NoSuchFile)
Exemplo n.º 25
0
 def _curl_perform(self, curl, header, more_headers=[]):
     """Perform curl operation and translate exceptions."""
     try:
         # There's no way in http/1.0 to say "must
         # revalidate"; we don't want to force it to always
         # retrieve.  so just turn off the default Pragma
         # provided by Curl.
         headers = ['Cache-control: max-age=0',
                    'Pragma: no-cache',
                    'Connection: Keep-Alive']
         curl.setopt(pycurl.HTTPHEADER, headers + more_headers)
         curl.perform()
     except pycurl.error, e:
         url = curl.getinfo(pycurl.EFFECTIVE_URL)
         mutter('got pycurl error: %s, %s, %s, url: %s ',
                 e[0], e[1], e, url)
         if e[0] in (CURLE_SSL_CACERT_BADFILE,
                     CURLE_COULDNT_RESOLVE_HOST,
                     CURLE_COULDNT_CONNECT,
                     CURLE_GOT_NOTHING,
                     CURLE_COULDNT_RESOLVE_PROXY,):
             raise errors.ConnectionError(
                 'curl connection error (%s)\non %s' % (e[1], url))
         elif e[0] == CURLE_PARTIAL_FILE:
             # Pycurl itself has detected a short read.  We do not have all
             # the information for the ShortReadvError, but that should be
             # enough
             raise errors.ShortReadvError(url,
                                          offset='unknown', length='unknown',
                                          actual='unknown',
                                          extra='Server aborted the request')
         raise
Exemplo n.º 26
0
    def _create_connection(self, credentials=None):
        """Create a new connection with the provided credentials.

        :param credentials: The credentials needed to establish the connection.

        :return: The created connection and its associated credentials.

        The input credentials are only the password as it may have been
        entered interactively by the user and may be different from the one
        provided in base url at transport creation time.  The returned
        credentials are username, password.
        """
        if credentials is None:
            user, password = self._user, self._password
        else:
            user, password = credentials

        auth = config.AuthenticationConfig()
        if user is None:
            user = auth.get_user('ftp', self._host, port=self._port,
                                 default=getpass.getuser())
        mutter("Constructing FTP instance against %r" %
               ((self._host, self._port, user, '********',
                self.is_active),))
        try:
            connection = self.connection_class()
            connection.connect(host=self._host, port=self._port)
            self._login(connection, auth, user, password)
            connection.set_pasv(not self.is_active)
            # binary mode is the default
            connection.voidcmd('TYPE I')
        except socket.error, e:
            raise errors.SocketConnectionError(self._host, self._port,
                                               msg='Unable to connect to',
                                               orig_error= e)
Exemplo n.º 27
0
    def _copy_one(self, fileid, suffix, other, pb):
        # TODO: Once the copy_to interface is improved to allow a source
        #       and destination targets, then we can always do the copy
        #       as long as other is a TextStore
        if not (isinstance(other, TextStore)
            and other._prefixed == self._prefixed):
            return super(TextStore, self)._copy_one(fileid, suffix, other, pb)

        mutter('_copy_one: %r, %r', fileid, suffix)
        path = other._get_name(fileid, suffix)
        if path is None:
            raise KeyError(fileid + '-' + str(suffix))

        try:
            result = other._transport.copy_to([path], self._transport,
                                              mode=self._file_mode)
        except NoSuchFile:
            if not self._prefixed:
                raise
            try:
                self._transport.mkdir(osutils.dirname(path), mode=self._dir_mode)
            except FileExists:
                pass
            result = other._transport.copy_to([path], self._transport,
                                              mode=self._file_mode)

        if result != 1:
            raise BzrError('Unable to copy file: %r' % (path,))
Exemplo n.º 28
0
def _filter_nonexistent(orig_paths, old_tree, new_tree):
    """Convert orig_paths to two sorted lists and return them.

    The first is orig_paths paths minus the items in the second list,
    and the second list is paths that are not in either inventory or
    tree (they don't qualify if they exist in the tree's inventory, or
    if they exist in the tree but are not versioned.)

    If either of the two lists is empty, return it as an empty list.

    This can be used by operations such as bzr status that can accept
    unknown or ignored files.
    """
    mutter("check paths: %r", orig_paths)
    if not orig_paths:
        return orig_paths, []
    s = old_tree.filter_unversioned_files(orig_paths)
    s = new_tree.filter_unversioned_files(s)
    nonexistent = [path for path in s if not new_tree.has_filename(path)]
    remaining   = [path for path in orig_paths if not path in nonexistent]
    # Sorting the 'remaining' list doesn't have much effect in
    # practice, since the various status output sections will sort
    # their groups individually.  But for consistency of this
    # function's API, it's better to sort both than just 'nonexistent'.
    return sorted(remaining), sorted(nonexistent)
Exemplo n.º 29
0
 def _fix_text_parent(self, file_id, versions_with_bad_parents,
         unused_versions, all_versions):
     """Fix bad versionedfile entries in a single versioned file."""
     mutter('fixing text parent: %r (%d versions)', file_id,
             len(versions_with_bad_parents))
     mutter('(%d are unused)', len(unused_versions))
     new_file_id = 'temp:%s' % file_id
     new_parents = {}
     needed_keys = set()
     for version in all_versions:
         if version in unused_versions:
             continue
         elif version in versions_with_bad_parents:
             parents = versions_with_bad_parents[version][1]
         else:
             pmap = self.repo.texts.get_parent_map([(file_id, version)])
             parents = [key[-1] for key in pmap[(file_id, version)]]
         new_parents[(new_file_id, version)] = [
             (new_file_id, parent) for parent in parents]
         needed_keys.add((file_id, version))
     def fix_parents(stream):
         for record in stream:
             bytes = record.get_bytes_as('fulltext')
             new_key = (new_file_id, record.key[-1])
             parents = new_parents[new_key]
             yield FulltextContentFactory(new_key, parents, record.sha1, bytes)
     stream = self.repo.texts.get_record_stream(needed_keys, 'topological', True)
     self.repo._remove_file_id(new_file_id)
     self.repo.texts.insert_record_stream(fix_parents(stream))
     self.repo._remove_file_id(file_id)
     if len(new_parents):
         self.repo._move_file_id(new_file_id, file_id)
Exemplo n.º 30
0
    def _append_by_head_put(self, relpath, bytes):
        """Append without getting the whole file.

        When the server allows it, a 'Content-Range' header can be specified.
        """
        response = self._head(relpath)
        code = response.code
        if code == 404:
            relpath_size = 0
        else:
            # Consider the absence of Content-Length header as
            # indicating an existing but empty file (Apache 2.0
            # does this, and there is even a comment in
            # modules/http/http_protocol.c calling that a *hack*,
            # I agree, it's a hack. On the other hand if the file
            # do not exist we get a 404, if the file does exist,
            # is not empty and we get no Content-Length header,
            # then the server is buggy :-/ )
            relpath_size = int(response.headers.get('Content-Length', 0))
            if relpath_size == 0:
                trace.mutter('if %s is not empty, the server is buggy'
                             % relpath)
        if relpath_size:
            self._put_bytes_ranged(relpath, bytes, relpath_size)
        else:
            self.put_bytes(relpath, bytes)

        return relpath_size
Exemplo n.º 31
0
    errors,
    trace,
    __version__ as bzrlib_version,
)
import bzrlib
from bzrlib.trace import mutter
from bzrlib.transport.http import (
    ca_bundle,
    HttpTransportBase,
    response,
)

try:
    import pycurl
except ImportError, e:
    mutter("failed to import pycurl: %s", e)
    raise errors.DependencyNotPresent('pycurl', e)

try:
    # see if we can actually initialize PyCurl - sometimes it will load but
    # fail to start up due to this bug:
    #
    #   32. (At least on Windows) If libcurl is built with c-ares and there's
    #   no DNS server configured in the system, the ares_init() call fails and
    #   thus curl_easy_init() fails as well. This causes weird effects for
    #   people who use numerical IP addresses only.
    #
    # reported by Alexander Belchenko, 2006-04-26
    pycurl.Curl()
except pycurl.error, e:
    mutter("failed to initialize pycurl: %s", e)
Exemplo n.º 32
0
class HashCache(object):
    """Cache for looking up file SHA-1.

    Files are considered to match the cached value if the fingerprint
    of the file has not changed.  This includes its mtime, ctime,
    device number, inode number, and size.  This should catch
    modifications or replacement of the file by a new one.

    This may not catch modifications that do not change the file's
    size and that occur within the resolution window of the
    timestamps.  To handle this we specifically do not cache files
    which have changed since the start of the present second, since
    they could undetectably change again.

    This scheme may fail if the machine's clock steps backwards.
    Don't do that.

    This does not canonicalize the paths passed in; that should be
    done by the caller.

    _cache
        Indexed by path, points to a two-tuple of the SHA-1 of the file.
        and its fingerprint.

    stat_count
        number of times files have been statted

    hit_count
        number of times files have been retrieved from the cache, avoiding a
        re-read

    miss_count
        number of misses (times files have been completely re-read)
    """
    needs_write = False

    def __init__(self,
                 root,
                 cache_file_name,
                 mode=None,
                 content_filter_stack_provider=None):
        """Create a hash cache in base dir, and set the file mode to mode.

        :param content_filter_stack_provider: a function that takes a
            path (relative to the top of the tree) and a file-id as
            parameters and returns a stack of ContentFilters.
            If None, no content filtering is performed.
        """
        self.root = osutils.safe_unicode(root)
        self.root_utf8 = self.root.encode(
            'utf8')  # where is the filesystem encoding ?
        self.hit_count = 0
        self.miss_count = 0
        self.stat_count = 0
        self.danger_count = 0
        self.removed_count = 0
        self.update_count = 0
        self._cache = {}
        self._mode = mode
        self._cache_file_name = osutils.safe_unicode(cache_file_name)
        self._filter_provider = content_filter_stack_provider

    def cache_file_name(self):
        return self._cache_file_name

    def clear(self):
        """Discard all cached information.

        This does not reset the counters."""
        if self._cache:
            self.needs_write = True
            self._cache = {}

    def scan(self):
        """Scan all files and remove entries where the cache entry is obsolete.

        Obsolete entries are those where the file has been modified or deleted
        since the entry was inserted.
        """
        # FIXME optimisation opportunity, on linux [and check other oses]:
        # rather than iteritems order, stat in inode order.
        prep = [(ce[1][3], path, ce) for (path, ce) in self._cache.iteritems()]
        prep.sort()

        for inum, path, cache_entry in prep:
            abspath = osutils.pathjoin(self.root, path)
            fp = self._fingerprint(abspath)
            self.stat_count += 1

            cache_fp = cache_entry[1]

            if (not fp) or (cache_fp != fp):
                # not here or not a regular file anymore
                self.removed_count += 1
                self.needs_write = True
                del self._cache[path]

    def get_sha1(self, path, stat_value=None):
        """Return the sha1 of a file.
        """
        if path.__class__ is str:
            abspath = osutils.pathjoin(self.root_utf8, path)
        else:
            abspath = osutils.pathjoin(self.root, path)
        self.stat_count += 1
        file_fp = self._fingerprint(abspath, stat_value)

        if not file_fp:
            # not a regular file or not existing
            if path in self._cache:
                self.removed_count += 1
                self.needs_write = True
                del self._cache[path]
            return None

        if path in self._cache:
            cache_sha1, cache_fp = self._cache[path]
        else:
            cache_sha1, cache_fp = None, None

        if cache_fp == file_fp:
            ## mutter("hashcache hit for %s %r -> %s", path, file_fp, cache_sha1)
            ## mutter("now = %s", time.time())
            self.hit_count += 1
            return cache_sha1

        self.miss_count += 1

        mode = file_fp[FP_MODE_COLUMN]
        if stat.S_ISREG(mode):
            if self._filter_provider is None:
                filters = []
            else:
                filters = self._filter_provider(path=path, file_id=None)
            digest = self._really_sha1_file(abspath, filters)
        elif stat.S_ISLNK(mode):
            target = osutils.readlink(osutils.safe_unicode(abspath))
            digest = osutils.sha_string(target.encode('UTF-8'))
        else:
            raise errors.BzrError("file %r: unknown file stat mode: %o" %
                                  (abspath, mode))

        # window of 3 seconds to allow for 2s resolution on windows,
        # unsynchronized file servers, etc.
        cutoff = self._cutoff_time()
        if file_fp[FP_MTIME_COLUMN] >= cutoff \
                or file_fp[FP_CTIME_COLUMN] >= cutoff:
            # changed too recently; can't be cached.  we can
            # return the result and it could possibly be cached
            # next time.
            #
            # the point is that we only want to cache when we are sure that any
            # subsequent modifications of the file can be detected.  If a
            # modification neither changes the inode, the device, the size, nor
            # the mode, then we can only distinguish it by time; therefore we
            # need to let sufficient time elapse before we may cache this entry
            # again.  If we didn't do this, then, for example, a very quick 1
            # byte replacement in the file might go undetected.
            ## mutter('%r modified too recently; not caching', path)
            self.danger_count += 1
            if cache_fp:
                self.removed_count += 1
                self.needs_write = True
                del self._cache[path]
        else:
            ## mutter('%r added to cache: now=%f, mtime=%d, ctime=%d',
            ##        path, time.time(), file_fp[FP_MTIME_COLUMN],
            ##        file_fp[FP_CTIME_COLUMN])
            self.update_count += 1
            self.needs_write = True
            self._cache[path] = (digest, file_fp)
        return digest

    def _really_sha1_file(self, abspath, filters):
        """Calculate the SHA1 of a file by reading the full text"""
        return _mod_filters.internal_size_sha_file_byname(abspath, filters)[1]

    def write(self):
        """Write contents of cache to file."""
        outf = atomicfile.AtomicFile(self.cache_file_name(),
                                     'wb',
                                     new_mode=self._mode)
        try:
            outf.write(CACHE_HEADER)

            for path, c in self._cache.iteritems():
                line_info = [path.encode('utf-8'), '// ', c[0], ' ']
                line_info.append(' '.join([str(fld) for fld in c[1]]))
                line_info.append('\n')
                outf.write(''.join(line_info))
            outf.commit()
            self.needs_write = False
            ## mutter("write hash cache: %s hits=%d misses=%d stat=%d recent=%d updates=%d",
            ##        self.cache_file_name(), self.hit_count, self.miss_count,
            ##        self.stat_count,
            ##        self.danger_count, self.update_count)
        finally:
            outf.close()

    def read(self):
        """Reinstate cache from file.

        Overwrites existing cache.

        If the cache file has the wrong version marker, this just clears
        the cache."""
        self._cache = {}

        fn = self.cache_file_name()
        try:
            inf = file(fn, 'rb', buffering=65000)
        except IOError, e:
            trace.mutter("failed to open %s: %s", fn, e)
            # better write it now so it is valid
            self.needs_write = True
            return

        hdr = inf.readline()
        if hdr != CACHE_HEADER:
            trace.mutter(
                'cache header marker not found at top of %s;'
                ' discarding cache', fn)
            self.needs_write = True
            return

        for l in inf:
            pos = l.index('// ')
            path = l[:pos].decode('utf-8')
            if path in self._cache:
                trace.warning('duplicated path %r in cache' % path)
                continue

            pos += 3
            fields = l[pos:].split(' ')
            if len(fields) != 7:
                trace.warning("bad line in hashcache: %r" % l)
                continue

            sha1 = fields[0]
            if len(sha1) != 40:
                trace.warning("bad sha1 in hashcache: %r" % sha1)
                continue

            fp = tuple(map(long, fields[1:]))

            self._cache[path] = (sha1, fp)

        # GZ 2009-09-20: Should really use a try/finally block to ensure close
        inf.close()

        self.needs_write = False
Exemplo n.º 33
0
    def _readv(self, relpath, offsets):
        """Get parts of the file at the given relative path.

        :param offsets: A list of (offset, size) tuples.
        :param return: A list or generator of (offset, data) tuples
        """

        # offsets may be a generator, we will iterate it several times, so
        # build a list
        offsets = list(offsets)

        try_again = True
        retried_offset = None
        while try_again:
            try_again = False

            # Coalesce the offsets to minimize the GET requests issued
            sorted_offsets = sorted(offsets)
            coalesced = self._coalesce_offsets(
                sorted_offsets,
                limit=self._max_readv_combine,
                fudge_factor=self._bytes_to_read_before_seek,
                max_size=self._get_max_size)

            # Turn it into a list, we will iterate it several times
            coalesced = list(coalesced)
            mutter('http readv of %s  offsets => %s collapsed %s', relpath,
                   len(offsets), len(coalesced))

            # Cache the data read, but only until it's been used
            data_map = {}
            # We will iterate on the data received from the GET requests and
            # serve the corresponding offsets respecting the initial order. We
            # need an offset iterator for that.
            iter_offsets = iter(offsets)
            cur_offset_and_size = iter_offsets.next()

            try:
                for cur_coal, rfile in self._coalesce_readv(
                        relpath, coalesced):
                    # Split the received chunk
                    for offset, size in cur_coal.ranges:
                        start = cur_coal.start + offset
                        rfile.seek(start, 0)
                        data = rfile.read(size)
                        data_len = len(data)
                        if data_len != size:
                            raise errors.ShortReadvError(relpath,
                                                         start,
                                                         size,
                                                         actual=data_len)
                        if (start, size) == cur_offset_and_size:
                            # The offset requested are sorted as the coalesced
                            # ones, no need to cache. Win !
                            yield cur_offset_and_size[0], data
                            cur_offset_and_size = iter_offsets.next()
                        else:
                            # Different sorting. We need to cache.
                            data_map[(start, size)] = data

                    # Yield everything we can
                    while cur_offset_and_size in data_map:
                        # Clean the cached data since we use it
                        # XXX: will break if offsets contains duplicates --
                        # vila20071129
                        this_data = data_map.pop(cur_offset_and_size)
                        yield cur_offset_and_size[0], this_data
                        cur_offset_and_size = iter_offsets.next()

            except (errors.ShortReadvError, errors.InvalidRange,
                    errors.InvalidHttpRange), e:
                mutter('Exception %r: %s during http._readv', e, e)
                if (not isinstance(e, errors.ShortReadvError)
                        or retried_offset == cur_offset_and_size):
                    # We don't degrade the range hint for ShortReadvError since
                    # they do not indicate a problem with the server ability to
                    # handle ranges. Except when we fail to get back a required
                    # offset twice in a row. In that case, falling back to
                    # single range or whole file should help or end up in a
                    # fatal exception.
                    self._degrade_range_hint(relpath, coalesced,
                                             sys.exc_info())
                # Some offsets may have been already processed, so we retry
                # only the unsuccessful ones.
                offsets = [cur_offset_and_size] + [o for o in iter_offsets]
                retried_offset = cur_offset_and_size
                try_again = True
Exemplo n.º 34
0
 def test_trace_argument_utf8(self):
     """Write a Unicode argument to the trace log"""
     mutter(u'the unicode character for benzene is %s',
            u'\N{BENZENE RING}'.encode('utf-8'))
     log = self.get_log()
     self.assertContainsRe(log, 'the unicode character')
Exemplo n.º 35
0
    def commit(self,
               message=None,
               timestamp=None,
               timezone=None,
               committer=None,
               specific_files=None,
               rev_id=None,
               allow_pointless=True,
               strict=False,
               verbose=False,
               revprops=None,
               working_tree=None,
               local=False,
               reporter=None,
               config=None,
               message_callback=None,
               recursive='down',
               exclude=None):
        """Commit working copy as a new revision.

        :param message: the commit message (it or message_callback is required)

        :param timestamp: if not None, seconds-since-epoch for a
            postdated/predated commit.

        :param specific_files: If true, commit only those files.

        :param rev_id: If set, use this as the new revision id.
            Useful for test or import commands that need to tightly
            control what revisions are assigned.  If you duplicate
            a revision id that exists elsewhere it is your own fault.
            If null (default), a time/random revision id is generated.

        :param allow_pointless: If true (default), commit even if nothing
            has changed and no merges are recorded.

        :param strict: If true, don't allow a commit if the working tree
            contains unknown files.

        :param revprops: Properties for new revision
        :param local: Perform a local only commit.
        :param reporter: the reporter to use or None for the default
        :param verbose: if True and the reporter is not None, report everything
        :param recursive: If set to 'down', commit in any subtrees that have
            pending changes of any sort during this commit.
        :param exclude: None or a list of relative paths to exclude from the
            commit. Pending changes to excluded files will be ignored by the
            commit. 
        """
        mutter('preparing to commit')

        if working_tree is None:
            raise BzrError("working_tree must be passed into commit().")
        else:
            self.work_tree = working_tree
            self.branch = self.work_tree.branch
            if getattr(self.work_tree, 'requires_rich_root', lambda: False)():
                if not self.branch.repository.supports_rich_root():
                    raise errors.RootNotRich()
        if message_callback is None:
            if message is not None:
                if isinstance(message, str):
                    message = message.decode(bzrlib.user_encoding)
                message_callback = lambda x: message
            else:
                raise BzrError("The message or message_callback keyword"
                               " parameter is required for commit().")

        self.bound_branch = None
        self.any_entries_changed = False
        self.any_entries_deleted = False
        if exclude is not None:
            self.exclude = sorted(
                minimum_path_selection(exclude))
        else:
            self.exclude = []
        self.local = local
        self.master_branch = None
        self.master_locked = False
        self.recursive = recursive
        self.rev_id = None
        if specific_files is not None:
            self.specific_files = sorted(
                minimum_path_selection(specific_files))
        else:
            self.specific_files = None
        self.specific_file_ids = None
        self.allow_pointless = allow_pointless
        self.revprops = revprops
        self.message_callback = message_callback
        self.timestamp = timestamp
        self.timezone = timezone
        self.committer = committer
        self.strict = strict
        self.verbose = verbose
        # accumulates an inventory delta to the basis entry, so we can make
        # just the necessary updates to the workingtree's cached basis.
        self._basis_delta = []

        self.work_tree.lock_write()
        self.pb = bzrlib.ui.ui_factory.nested_progress_bar()
        self.basis_revid = self.work_tree.last_revision()
        self.basis_tree = self.work_tree.basis_tree()
        self.basis_tree.lock_read()
        try:
            # Cannot commit with conflicts present.
            if len(self.work_tree.conflicts()) > 0:
                raise ConflictsInTree

            # Setup the bound branch variables as needed.
            self._check_bound_branch()

            # Check that the working tree is up to date
            old_revno, new_revno = self._check_out_of_date_tree()

            # Complete configuration setup
            if reporter is not None:
                self.reporter = reporter
            elif self.reporter is None:
                self.reporter = self._select_reporter()
            if self.config is None:
                self.config = self.branch.get_config()

            # If provided, ensure the specified files are versioned
            if self.specific_files is not None:
                # Note: This routine is being called because it raises
                # PathNotVersionedError as a side effect of finding the IDs. We
                # later use the ids we found as input to the working tree
                # inventory iterator, so we only consider those ids rather than
                # examining the whole tree again.
                # XXX: Dont we have filter_unversioned to do this more
                # cheaply?
                self.specific_file_ids = tree.find_ids_across_trees(
                    specific_files, [self.basis_tree, self.work_tree])

            # Setup the progress bar. As the number of files that need to be
            # committed in unknown, progress is reported as stages.
            # We keep track of entries separately though and include that
            # information in the progress bar during the relevant stages.
            self.pb_stage_name = ""
            self.pb_stage_count = 0
            self.pb_stage_total = 5
            if self.bound_branch:
                self.pb_stage_total += 1
            self.pb.show_pct = False
            self.pb.show_spinner = False
            self.pb.show_eta = False
            self.pb.show_count = True
            self.pb.show_bar = True

            self.basis_inv = self.basis_tree.inventory
            self._gather_parents()
            # After a merge, a selected file commit is not supported.
            # See 'bzr help merge' for an explanation as to why.
            if len(self.parents) > 1 and self.specific_files:
                raise errors.CannotCommitSelectedFileMerge(self.specific_files)
            # Excludes are a form of selected file commit.
            if len(self.parents) > 1 and self.exclude:
                raise errors.CannotCommitSelectedFileMerge(self.exclude)

            # Collect the changes
            self._set_progress_stage("Collecting changes",
                    entries_title="Directory")
            self.builder = self.branch.get_commit_builder(self.parents,
                self.config, timestamp, timezone, committer, revprops, rev_id)
            
            try:
                # find the location being committed to
                if self.bound_branch:
                    master_location = self.master_branch.base
                else:
                    master_location = self.branch.base

                # report the start of the commit
                self.reporter.started(new_revno, self.rev_id, master_location)

                self._update_builder_with_changes()
                self._report_and_accumulate_deletes()
                self._check_pointless()

                # TODO: Now the new inventory is known, check for conflicts.
                # ADHB 2006-08-08: If this is done, populate_new_inv should not add
                # weave lines, because nothing should be recorded until it is known
                # that commit will succeed.
                self._set_progress_stage("Saving data locally")
                self.builder.finish_inventory()

                # Prompt the user for a commit message if none provided
                message = message_callback(self)
                self.message = message
                self._escape_commit_message()

                # Add revision data to the local branch
                self.rev_id = self.builder.commit(self.message)

            except:
                self.builder.abort()
                raise

            self._process_pre_hooks(old_revno, new_revno)

            # Upload revision data to the master.
            # this will propagate merged revisions too if needed.
            if self.bound_branch:
                if not self.master_branch.repository.has_same_location(
                        self.branch.repository):
                    self._set_progress_stage("Uploading data to master branch")
                    self.master_branch.repository.fetch(self.branch.repository,
                        revision_id=self.rev_id)
                # now the master has the revision data
                # 'commit' to the master first so a timeout here causes the
                # local branch to be out of date
                self.master_branch.set_last_revision_info(new_revno,
                                                          self.rev_id)

            # and now do the commit locally.
            self.branch.set_last_revision_info(new_revno, self.rev_id)

            # Make the working tree up to date with the branch
            self._set_progress_stage("Updating the working tree")
            self.work_tree.update_basis_by_delta(self.rev_id,
                 self._basis_delta)
            self.reporter.completed(new_revno, self.rev_id)
            self._process_post_hooks(old_revno, new_revno)
        finally:
            self._cleanup()
        return self.rev_id
Exemplo n.º 36
0
import base64, ftplib

from bzrlib import (
    errors, )
from bzrlib.i18n import gettext
from bzrlib.trace import (
    mutter,
    note,
)
from bzrlib.transport.ftp import FtpTransport

try:
    import kerberos
except ImportError, e:
    mutter('failed to import kerberos lib: %s', e)
    raise errors.DependencyNotPresent('kerberos', e)

if getattr(kerberos, "authGSSClientWrap", None) is None:
    raise errors.DependencyNotPresent(
        'kerberos', "missing encryption function authGSSClientWrap")


class GSSAPIFtp(ftplib.FTP):
    """Extended version of ftplib.FTP that can authenticate using GSSAPI."""
    def mic_putcmd(self, line):
        rc = kerberos.authGSSClientWrap(self.vc, base64.b64encode(line))
        wrapped = kerberos.authGSSClientResponse(self.vc)
        ftplib.FTP.putcmd(self, "MIC " + wrapped)

    def mic_getline(self):
Exemplo n.º 37
0
    def _detect_moves(self, threshold, dry_run):
        delta = self.tree.changes_from(self.basis_tree, want_unversioned=True)
        inv = self.tree.inventory
        unknowns = self._find_unknowns(delta)
        removed = set()
        matches = []
        for path, file_id, kind in delta.removed:
            if kind == "directory":
                continue
            path = inv.id2path(file_id)
            for new_path, new_kind in unknowns:
                if kind != new_kind:
                    continue
                similarity = self._compare_files(file_id, new_path)
                matches.append((similarity, path, new_path))
            removed.add(path)
        matches.sort(reverse=True)

        # Try to detect file renames, based on text similarity
        used = set()
        file_renames = []
        for similarity, old_path, new_path in matches:
            if similarity < threshold:
                self.outf.write(
                    "Skipping %d file(s) with similarity below "
                    "%d%%.\n" % (len(removed), threshold * 100))
                break
            if old_path not in removed or new_path in used:
                trace.mutter("File %s already moved", old_path)
                continue
            used.add(new_path)
            removed.remove(old_path)
            file_renames.append((similarity, old_path, new_path))

        # Try to detect directory renames, based on file renames
        dir_renames = []
        dir_rename_map = {}
        for similarity, old_path, new_path in file_renames:
            old_dirs = osutils.splitpath(old_path)[:-1]
            new_dirs = osutils.splitpath(new_path)[:-1]
            for old_dir, new_dir in zip(old_dirs, new_dirs):
                dir_rename_map.setdefault(old_dir, set()).add(new_dir)
        for old_dir, new_dirs in sorted(dir_rename_map.iteritems()):
            if len(new_dirs) != 1:
                continue
            new_dir = iter(new_dirs).next()
            dir_renames.append((-1, old_dir, new_dir))
        # needs to be smarted to be enabled
        dir_renames = []

        # Actually rename
        renames = dir_renames + file_renames
        for similarity, old_path, new_path in renames:
            if not dry_run:
                parent_dirs = []
                to_dir = new_path
                while True:
                    to_dir, to_tail = os.path.split(to_dir)
                    if inv.path2id(to_dir) is None:
                        parent_dirs.append(to_dir)
                    if not to_tail or not to_dir:
                        break
                if parent_dirs:
                    self.tree.add(reversed(parent_dirs))
                    self.tree.flush()
                self.tree.rename_one(old_path, new_path, after=True)
            if similarity == -1:
                self.outf.write("%s => %s\n" % (old_path, new_path))
            else:
                self.outf.write("%s => %s (%d%% similar)\n" % (
                    old_path, new_path, similarity * 100))
Exemplo n.º 38
0
 def listable(self):
     """See Transport.listable."""
     if 'gio' in debug.debug_flags:
         mutter("GIO listable")
     return True
Exemplo n.º 39
0
        try:
            if 'gio' in debug.debug_flags:
                mutter("GIO rmdir %s" % relpath)
            st = self.stat(relpath)
            if stat.S_ISDIR(st.st_mode):
                f = self._get_GIO(relpath)
                f.delete()
            else:
                raise errors.NotADirectory(relpath)
        except gio.Error, e:
            self._translate_gio_error(e, relpath)
        except errors.NotADirectory, e:
            #just pass it forward
            raise e
        except Exception, e:
            mutter('failed to rmdir %s: %s' % (relpath, e))
            raise errors.PathError(relpath)

    def append_file(self, relpath, file, mode=None):
        """Append the text in the file-like object into the final
        location.
        """
        #GIO append_to seems not to append but to truncate
        #Work around this.
        if 'gio' in debug.debug_flags:
            mutter("GIO append_file: %s" % relpath)
        tmppath = '%s.tmp.%.9f.%d.%d' % (relpath, time.time(), os.getpid(),
                                         random.randint(0, 0x7FFFFFFF))
        try:
            result = 0
            fo = self._get_GIO(tmppath)
Exemplo n.º 40
0
            # Because we set_pipelined() earlier, theoretically we might
            # avoid the round trip for fout.close()
            if mode is not None:
                self._get_sftp().chmod(tmp_abspath, mode)
            fout.close()
            closed = True
            self._rename_and_overwrite(tmp_abspath, abspath)
            return length
        except Exception, e:
            # If we fail, try to clean up the temporary file
            # before we throw the exception
            # but don't let another exception mess things up
            # Write out the traceback, because otherwise
            # the catch and throw destroys it
            import traceback
            mutter(traceback.format_exc())
            try:
                if not closed:
                    fout.close()
                self._get_sftp().remove(tmp_abspath)
            except:
                # raise the saved except
                raise e
            # raise the original with its traceback if we can.
            raise

    def _put_non_atomic_helper(self,
                               relpath,
                               writer,
                               mode=None,
                               create_parent_dir=False,
Exemplo n.º 41
0
    def request_and_yield_offsets(self, fp):
        """Request the data from the remote machine, yielding the results.

        :param fp: A Paramiko SFTPFile object that supports readv.
        :return: Yield the data requested by the original readv caller, one by
            one.
        """
        requests = self._get_requests()
        offset_iter = iter(self.original_offsets)
        cur_offset, cur_size = offset_iter.next()
        # paramiko .readv() yields strings that are in the order of the requests
        # So we track the current request to know where the next data is
        # being returned from.
        input_start = None
        last_end = None
        buffered_data = []
        buffered_len = 0

        # This is used to buffer chunks which we couldn't process yet
        # It is (start, end, data) tuples.
        data_chunks = []
        # Create an 'unlimited' data stream, so we stop based on requests,
        # rather than just because the data stream ended. This lets us detect
        # short readv.
        data_stream = itertools.chain(fp.readv(requests),
                                      itertools.repeat(None))
        for (start, length), data in itertools.izip(requests, data_stream):
            if data is None:
                if cur_coalesced is not None:
                    raise errors.ShortReadvError(self.relpath, start, length,
                                                 len(data))
            if len(data) != length:
                raise errors.ShortReadvError(self.relpath, start, length,
                                             len(data))
            self._report_activity(length, 'read')
            if last_end is None:
                # This is the first request, just buffer it
                buffered_data = [data]
                buffered_len = length
                input_start = start
            elif start == last_end:
                # The data we are reading fits neatly on the previous
                # buffer, so this is all part of a larger coalesced range.
                buffered_data.append(data)
                buffered_len += length
            else:
                # We have an 'interrupt' in the data stream. So we know we are
                # at a request boundary.
                if buffered_len > 0:
                    # We haven't consumed the buffer so far, so put it into
                    # data_chunks, and continue.
                    buffered = ''.join(buffered_data)
                    data_chunks.append((input_start, buffered))
                input_start = start
                buffered_data = [data]
                buffered_len = length
            last_end = start + length
            if input_start == cur_offset and cur_size <= buffered_len:
                # Simplify the next steps a bit by transforming buffered_data
                # into a single string. We also have the nice property that
                # when there is only one string ''.join([x]) == x, so there is
                # no data copying.
                buffered = ''.join(buffered_data)
                # Clean out buffered data so that we keep memory
                # consumption low
                del buffered_data[:]
                buffered_offset = 0
                # TODO: We *could* also consider the case where cur_offset is in
                #       in the buffered range, even though it doesn't *start*
                #       the buffered range. But for packs we pretty much always
                #       read in order, so you won't get any extra data in the
                #       middle.
                while (input_start == cur_offset
                       and (buffered_offset + cur_size) <= buffered_len):
                    # We've buffered enough data to process this request, spit it
                    # out
                    cur_data = buffered[buffered_offset:buffered_offset +
                                        cur_size]
                    # move the direct pointer into our buffered data
                    buffered_offset += cur_size
                    # Move the start-of-buffer pointer
                    input_start += cur_size
                    # Yield the requested data
                    yield cur_offset, cur_data
                    cur_offset, cur_size = offset_iter.next()
                # at this point, we've consumed as much of buffered as we can,
                # so break off the portion that we consumed
                if buffered_offset == len(buffered_data):
                    # No tail to leave behind
                    buffered_data = []
                    buffered_len = 0
                else:
                    buffered = buffered[buffered_offset:]
                    buffered_data = [buffered]
                    buffered_len = len(buffered)
        # now that the data stream is done, close the handle
        fp.close()
        if buffered_len:
            buffered = ''.join(buffered_data)
            del buffered_data[:]
            data_chunks.append((input_start, buffered))
        if data_chunks:
            if 'sftp' in debug.debug_flags:
                mutter('SFTP readv left with %d out-of-order bytes',
                       sum(map(lambda x: len(x[1]), data_chunks)))
            # We've processed all the readv data, at this point, anything we
            # couldn't process is in data_chunks. This doesn't happen often, so
            # this code path isn't optimized
            # We use an interesting process for data_chunks
            # Specifically if we have "bisect_left([(start, len, entries)],
            #                                       (qstart,)])
            # If start == qstart, then we get the specific node. Otherwise we
            # get the previous node
            while True:
                idx = bisect.bisect_left(data_chunks, (cur_offset, ))
                if idx < len(
                        data_chunks) and data_chunks[idx][0] == cur_offset:
                    # The data starts here
                    data = data_chunks[idx][1][:cur_size]
                elif idx > 0:
                    # The data is in a portion of a previous page
                    idx -= 1
                    sub_offset = cur_offset - data_chunks[idx][0]
                    data = data_chunks[idx][1]
                    data = data[sub_offset:sub_offset + cur_size]
                else:
                    # We are missing the page where the data should be found,
                    # something is wrong
                    data = ''
                if len(data) != cur_size:
                    raise AssertionError(
                        'We must have miscalulated.'
                        ' We expected %d bytes, but only found %d' %
                        (cur_size, len(data)))
                yield cur_offset, data
                cur_offset, cur_size = offset_iter.next()
Exemplo n.º 42
0
 def _preload_libraries(self):
     for pyname in libraries_to_preload:
         try:
             __import__(pyname)
         except ImportError as e:
             trace.mutter('failed to preload %s: %s' % (pyname, e))
Exemplo n.º 43
0
 def external_url(self):
     """See bzrlib.transport.Transport.external_url."""
     if 'gio' in debug.debug_flags:
         mutter("GIO external_url", self.base)
     # GIO external url
     return self.base
Exemplo n.º 44
0
 def _match_on(self, branch, revs):
     trace.mutter('matching ancestor: on: %s, %s', self.spec, branch)
     return self._find_revision_info(branch,
                                     self._get_submit_location(branch))
Exemplo n.º 45
0
def load_from_zip(zip_name):
    """Load all the plugins in a zip."""
    valid_suffixes = ('.py', '.pyc', '.pyo')    # only python modules/packages
                                                # is allowed
    try:
        index = zip_name.rindex('.zip')
    except ValueError:
        return
    archive = zip_name[:index+4]
    prefix = zip_name[index+5:]

    mutter('Looking for plugins in %r', zip_name)

    # use zipfile to get list of files/dirs inside zip
    try:
        z = zipfile.ZipFile(archive)
        namelist = z.namelist()
        z.close()
    except zipfile.error:
        # not a valid zip
        return

    if prefix:
        prefix = prefix.replace('\\','/')
        if prefix[-1] != '/':
            prefix += '/'
        ix = len(prefix)
        namelist = [name[ix:]
                    for name in namelist
                    if name.startswith(prefix)]

    mutter('Names in archive: %r', namelist)
    
    for name in namelist:
        if not name or name.endswith('/'):
            continue
    
        # '/' is used to separate pathname components inside zip archives
        ix = name.rfind('/')
        if ix == -1:
            head, tail = '', name
        else:
            head, tail = name.rsplit('/',1)
        if '/' in head:
            # we don't need looking in subdirectories
            continue
    
        base, suffix = osutils.splitext(tail)
        if suffix not in valid_suffixes:
            continue
    
        if base == '__init__':
            # package
            plugin_name = head
        elif head == '':
            # module
            plugin_name = base
        else:
            continue
    
        if not plugin_name:
            continue
        if getattr(_mod_plugins, plugin_name, None):
            mutter('Plugin name %s already loaded', plugin_name)
            continue
    
        try:
            exec "import bzrlib.plugins.%s" % plugin_name in {}
            mutter('Load plugin %s from zip %r', plugin_name, zip_name)
        except KeyboardInterrupt:
            raise
        except Exception, e:
            ## import pdb; pdb.set_trace()
            warning('Unable to load plugin %r from %r'
                    % (name, zip_name))
            log_exception_quietly()
            if 'error' in debug.debug_flags:
                trace.print_exception(sys.exc_info(), sys.stderr)
Exemplo n.º 46
0

def report_bug(exc_info, stderr):
    if ('no_apport' in debug.debug_flags) or \
        os.environ.get('APPORT_DISABLE', None):
        return report_bug_legacy(exc_info, stderr)
    try:
        if report_bug_to_apport(exc_info, stderr):
            # wrote a file; if None then report the old way
            return
    except ImportError, e:
        trace.mutter("couldn't find apport bug-reporting library: %s" % e)
    except Exception, e:
        # this should only happen if apport is installed but it didn't
        # work, eg because of an io error writing the crash file
        trace.mutter("bzr: failed to report crash using apport: %r" % e)
        trace.log_exception_quietly()
    return report_bug_legacy(exc_info, stderr)


def report_bug_legacy(exc_info, err_file):
    """Report a bug by just printing a message to the user."""
    trace.print_exception(exc_info, err_file)
    err_file.write('\n')
    import textwrap

    def print_wrapped(l):
        err_file.write(
            textwrap.fill(l, width=78, subsequent_indent='    ') + '\n')
    print_wrapped('bzr %s on python %s (%s)\n' % \
        (bzrlib.__version__,
Exemplo n.º 47
0
 def test_trace_argument_unicode(self):
     """Write a Unicode argument to the trace log"""
     mutter(u'the unicode character for benzene is %s', u'\N{BENZENE RING}')
     self.assertContainsRe(self._get_log(keep_log_file=True),
                           'the unicode character')
Exemplo n.º 48
0
 def iter_files_recursive(self):
     # needs special handling because it does not have a relpath parameter
     mutter("%s %s"
         % ('iter_files_recursive', self._decorated.base))
     return self._call_and_log_result('iter_files_recursive', (), {})
Exemplo n.º 49
0
 def _match_on(self, branch, revs):
     trace.mutter('Returning RevisionSpec._match_on: None')
     return RevisionInfo(branch, None, None)
Exemplo n.º 50
0
def load_host_keys():
    """
    Load system host keys (probably doesn't work on windows) and any
    "discovered" keys from previous sessions.
    """
    global SYSTEM_HOSTKEYS, BZR_HOSTKEYS
    try:
        SYSTEM_HOSTKEYS = paramiko.util.load_host_keys(
            os.path.expanduser('~/.ssh/known_hosts'))
    except IOError, e:
        trace.mutter('failed to load system host keys: ' + str(e))
    bzr_hostkey_path = osutils.pathjoin(config.config_dir(), 'ssh_host_keys')
    try:
        BZR_HOSTKEYS = paramiko.util.load_host_keys(bzr_hostkey_path)
    except IOError, e:
        trace.mutter('failed to load bzr host keys: ' + str(e))
        save_host_keys()


def save_host_keys():
    """
    Save "discovered" host keys in $(config)/ssh_host_keys/.
    """
    global SYSTEM_HOSTKEYS, BZR_HOSTKEYS
    bzr_hostkey_path = osutils.pathjoin(config.config_dir(), 'ssh_host_keys')
    config.ensure_config_dir_exists()

    try:
        f = open(bzr_hostkey_path, 'w')
        f.write('# SSH host keys collected by bzr\n')
        for hostname, keys in BZR_HOSTKEYS.iteritems():
Exemplo n.º 51
0
 def __init__(self, *args, **kwargs):
     trace.mutter('Initializing ftp_server: %r, %r', args, kwargs)
     medusa.ftp_server.ftp_server.__init__(self, *args, **kwargs)
Exemplo n.º 52
0
def zip_exporter_generator(tree,
                           dest,
                           root,
                           subdir=None,
                           force_mtime=None,
                           fileobj=None):
    """ Export this tree to a new zip file.

    `dest` will be created holding the contents of this tree; if it
    already exists, it will be overwritten".
    """

    compression = zipfile.ZIP_DEFLATED
    if fileobj is not None:
        dest = fileobj
    elif dest == "-":
        dest = sys.stdout
    zipf = zipfile.ZipFile(dest, "w", compression)
    try:
        for dp, tp, ie in _export_iter_entries(tree, subdir):
            file_id = ie.file_id
            mutter("  export {%s} kind %s to %s", file_id, ie.kind, dest)

            # zipfile.ZipFile switches all paths to forward
            # slashes anyway, so just stick with that.
            if force_mtime is not None:
                mtime = force_mtime
            else:
                mtime = tree.get_file_mtime(ie.file_id, tp)
            date_time = time.localtime(mtime)[:6]
            filename = osutils.pathjoin(root, dp).encode('utf8')
            if ie.kind == "file":
                zinfo = zipfile.ZipInfo(filename=filename, date_time=date_time)
                zinfo.compress_type = compression
                zinfo.external_attr = _FILE_ATTR
                content = tree.get_file_text(file_id, tp)
                zipf.writestr(zinfo, content)
            elif ie.kind == "directory":
                # Directories must contain a trailing slash, to indicate
                # to the zip routine that they are really directories and
                # not just empty files.
                zinfo = zipfile.ZipInfo(filename=filename + '/',
                                        date_time=date_time)
                zinfo.compress_type = compression
                zinfo.external_attr = _DIR_ATTR
                zipf.writestr(zinfo, '')
            elif ie.kind == "symlink":
                zinfo = zipfile.ZipInfo(filename=(filename + '.lnk'),
                                        date_time=date_time)
                zinfo.compress_type = compression
                zinfo.external_attr = _FILE_ATTR
                zipf.writestr(zinfo, tree.get_symlink_target(file_id, tp))
            yield

        zipf.close()

    except UnicodeEncodeError:
        zipf.close()
        os.remove(dest)
        from bzrlib.errors import BzrError
        raise BzrError("Can't export non-ascii filenames to zip")
Exemplo n.º 53
0
Arquivo: trace.py Projeto: biji/qbzr
def report_exception(exc_info=None,
                     type=MAIN_LOAD_METHOD,
                     window=None,
                     ui_mode=False):
    """Report an exception.

    The error is reported to the console or a message box, depending
    on the type. 
    """

    # We only want one error to show if the user chose Close
    global closing_due_to_error
    # 0.20 special: We check hasattr() first to work around
    # <http://bugs.python.org/issue4230>
    if closing_due_to_error or \
            (hasattr(window, 'closing_due_to_error') and
             window.closing_due_to_error):
        return

    if exc_info is None:
        exc_info = sys.exc_info()

    exc_type, exc_object, exc_tb = exc_info

    # Don't show error for StopException
    if isinstance(exc_object, StopException):
        # Do we maybe want to log this?
        return

    msg_box = ((type == MAIN_LOAD_METHOD and
                (window and window.ui_mode or ui_mode))
               or not type == MAIN_LOAD_METHOD)
    pdb = os.environ.get('BZR_PDB')
    if pdb:
        msg_box = False

    if msg_box:
        err_file = StringIO()
    else:
        err_file = sys.stderr

    # always tell bzr to report it, so it ends up in the log.
    # See https://bugs.launchpad.net/bzr/+bug/785695
    error_type = _bzrlib_report_exception(exc_info, err_file)
    backtrace = traceback.format_exception(*exc_info)
    mutter(''.join(backtrace))

    if (type == MAIN_LOAD_METHOD and window):
        window.ret_code = error_type

    # XXX This is very similar to bzrlib.commands.exception_to_return_code.
    # We shoud get bzr to refactor so that that this is reuseable.
    if pdb:
        # With out this - pyQt shows lot of warnings. see:
        # http://www.riverbankcomputing.co.uk/static/Docs/PyQt4/pyqt4ref.html#using-pyqt-from-the-python-shell
        QtCore.pyqtRemoveInputHook()

        print '**** entering debugger'
        tb = exc_info[2]
        import pdb
        if sys.version_info[:2] < (2, 6):
            # XXX: we want to do
            #    pdb.post_mortem(tb)
            # but because pdb.post_mortem gives bad results for tracebacks
            # from inside generators, we do it manually.
            # (http://bugs.python.org/issue4150, fixed in Python 2.6)

            # Setup pdb on the traceback
            p = pdb.Pdb()
            p.reset()
            p.setup(tb.tb_frame, tb)
            # Point the debugger at the deepest frame of the stack
            p.curindex = len(p.stack) - 1
            p.curframe = p.stack[p.curindex][0]
            # Start the pdb prompt.
            p.print_stack_entry(p.stack[p.curindex])
            p.execRcLines()
            p.cmdloop()
        else:
            pdb.post_mortem(tb)

    close = True
    if msg_box:
        if isinstance(exc_object, errors.LockContention):
            msg_box = create_lockerror_dialog(error_type, window)

        elif error_type == errors.EXIT_INTERNAL_ERROR:
            # this is a copy of bzrlib.trace.report_bug
            # but we seperate the message, and the trace back,
            # and addes a hyper link to the filebug page.
            traceback_file = StringIO()
            _bzrlib_print_exception(exc_info, traceback_file)
            traceback_file.write('\n')
            traceback_file.write('bzr %s on python %s (%s)\n' % \
                               (bzrlib.__version__,
                                bzrlib._format_version_tuple(sys.version_info),
                                sys.platform))
            traceback_file.write('arguments: %r\n' % sys.argv)
            traceback_file.write(
                'encoding: %r, fsenc: %r, lang: %r\n' %
                (osutils.get_user_encoding(), sys.getfilesystemencoding(),
                 os.environ.get('LANG')))
            traceback_file.write("plugins:\n")
            for name, a_plugin in sorted(plugin.plugins().items()):
                traceback_file.write(
                    "  %-20s %s [%s]\n" %
                    (name, a_plugin.path(), a_plugin.__version__))

            msg_box = ErrorReport(gettext("Error"), True,
                                  traceback_file.getvalue(), exc_info, type,
                                  window)
        else:
            msg_box = ErrorReport(gettext("Error"), False, err_file.getvalue(),
                                  exc_info, type, window)
        if window is None:
            icon = QtGui.QIcon()
            icon.addFile(":/bzr-16.png", QtCore.QSize(16, 16))
            icon.addFile(":/bzr-32.png", QtCore.QSize(32, 32))
            icon.addFile(":/bzr-48.png", QtCore.QSize(48, 48))
            msg_box.setWindowIcon(icon)

        msg_box.exec_()

        if not msg_box.result() == QtGui.QMessageBox.Close:
            close = False

    if close:
        if window is None:
            closing_due_to_error = True
            QtCore.QCoreApplication.instance().quit()
        else:
            window.closing_due_to_error = True
            window.close()
    return error_type
Exemplo n.º 54
0
from bzrlib import registry
from bzrlib.trace import mutter, warning

try:
    try:
        # it's in this package in python2.5
        from xml.etree.cElementTree import (ElementTree, SubElement, Element,
            XMLTreeBuilder, fromstring, tostring)
        import xml.etree as elementtree
    except ImportError:
        from cElementTree import (ElementTree, SubElement, Element,
                                  XMLTreeBuilder, fromstring, tostring)
        import elementtree.ElementTree
    ParseError = SyntaxError
except ImportError:
    mutter('WARNING: using slower ElementTree; consider installing cElementTree'
           " and make sure it's on your PYTHONPATH")
    # this copy is shipped with bzr
    from util.elementtree.ElementTree import (ElementTree, SubElement,
                                              Element, XMLTreeBuilder,
                                              fromstring, tostring)
    import util.elementtree as elementtree
    from xml.parsers.expat import ExpatError as ParseError

from bzrlib import errors


class Serializer(object):
    """Abstract object serialize/deserialize"""

    def write_inventory(self, inv, f):
        """Write inventory to a file"""
Exemplo n.º 55
0
def _log_cleanup_error(exc):
    trace.mutter('Cleanup failed:')
    trace.log_exception_quietly()
    if 'cleanup' in debug.debug_flags:
        trace.warning('bzr: warning: Cleanup failed: %s', exc)
Exemplo n.º 56
0
 def _trace(self, format, *args):
     if 'lock' not in debug.debug_flags:
         return
     mutter(str(self) + ": " + (format % args))
Exemplo n.º 57
0
    def _commit(self, operation, message, timestamp, timezone, committer,
                specific_files, rev_id, allow_pointless, strict, verbose,
                working_tree, local, reporter, message_callback, recursive,
                exclude, possible_master_transports, lossy):
        mutter('preparing to commit')

        if working_tree is None:
            raise BzrError("working_tree must be passed into commit().")
        else:
            self.work_tree = working_tree
            self.branch = self.work_tree.branch
            if getattr(self.work_tree, 'requires_rich_root', lambda: False)():
                if not self.branch.repository.supports_rich_root():
                    raise errors.RootNotRich()
        if message_callback is None:
            if message is not None:
                if isinstance(message, str):
                    message = message.decode(get_user_encoding())
                message_callback = lambda x: message
            else:
                raise BzrError("The message or message_callback keyword"
                               " parameter is required for commit().")

        self.bound_branch = None
        self.any_entries_deleted = False
        if exclude is not None:
            self.exclude = sorted(minimum_path_selection(exclude))
        else:
            self.exclude = []
        self.local = local
        self.master_branch = None
        self.recursive = recursive
        self.rev_id = None
        # self.specific_files is None to indicate no filter, or any iterable to
        # indicate a filter - [] means no files at all, as per iter_changes.
        if specific_files is not None:
            self.specific_files = sorted(
                minimum_path_selection(specific_files))
        else:
            self.specific_files = None

        self.allow_pointless = allow_pointless
        self.message_callback = message_callback
        self.timestamp = timestamp
        self.timezone = timezone
        self.committer = committer
        self.strict = strict
        self.verbose = verbose

        self.work_tree.lock_write()
        operation.add_cleanup(self.work_tree.unlock)
        self.parents = self.work_tree.get_parent_ids()
        # We can use record_iter_changes IFF iter_changes is compatible with
        # the command line parameters, and the repository has fast delta
        # generation. See bug 347649.
        self.use_record_iter_changes = (
            not self.exclude
            and not self.branch.repository._format.supports_tree_reference
            and (self.branch.repository._format.fast_deltas
                 or len(self.parents) < 2))
        self.pb = ui.ui_factory.nested_progress_bar()
        operation.add_cleanup(self.pb.finished)
        self.basis_revid = self.work_tree.last_revision()
        self.basis_tree = self.work_tree.basis_tree()
        self.basis_tree.lock_read()
        operation.add_cleanup(self.basis_tree.unlock)
        # Cannot commit with conflicts present.
        if len(self.work_tree.conflicts()) > 0:
            raise ConflictsInTree

        # Setup the bound branch variables as needed.
        self._check_bound_branch(operation, possible_master_transports)

        # Check that the working tree is up to date
        old_revno, old_revid, new_revno = self._check_out_of_date_tree()

        # Complete configuration setup
        if reporter is not None:
            self.reporter = reporter
        elif self.reporter is None:
            self.reporter = self._select_reporter()
        if self.config_stack is None:
            self.config_stack = self.work_tree.get_config_stack()

        self._set_specific_file_ids()

        # Setup the progress bar. As the number of files that need to be
        # committed in unknown, progress is reported as stages.
        # We keep track of entries separately though and include that
        # information in the progress bar during the relevant stages.
        self.pb_stage_name = ""
        self.pb_stage_count = 0
        self.pb_stage_total = 5
        if self.bound_branch:
            # 2 extra stages: "Uploading data to master branch" and "Merging
            # tags to master branch"
            self.pb_stage_total += 2
        self.pb.show_pct = False
        self.pb.show_spinner = False
        self.pb.show_eta = False
        self.pb.show_count = True
        self.pb.show_bar = True

        self._gather_parents()
        # After a merge, a selected file commit is not supported.
        # See 'bzr help merge' for an explanation as to why.
        if len(self.parents) > 1 and self.specific_files is not None:
            raise errors.CannotCommitSelectedFileMerge(self.specific_files)
        # Excludes are a form of selected file commit.
        if len(self.parents) > 1 and self.exclude:
            raise errors.CannotCommitSelectedFileMerge(self.exclude)

        # Collect the changes
        self._set_progress_stage("Collecting changes", counter=True)
        self._lossy = lossy
        self.builder = self.branch.get_commit_builder(self.parents,
                                                      self.config_stack,
                                                      timestamp,
                                                      timezone,
                                                      committer,
                                                      self.revprops,
                                                      rev_id,
                                                      lossy=lossy)
        if not self.builder.supports_record_entry_contents and self.exclude:
            self.builder.abort()
            raise errors.ExcludesUnsupported(self.branch.repository)

        if self.builder.updates_branch and self.bound_branch:
            self.builder.abort()
            raise AssertionError(
                "bound branches not supported for commit builders "
                "that update the branch")

        try:
            self.builder.will_record_deletes()
            # find the location being committed to
            if self.bound_branch:
                master_location = self.master_branch.base
            else:
                master_location = self.branch.base

            # report the start of the commit
            self.reporter.started(new_revno, self.rev_id, master_location)

            self._update_builder_with_changes()
            self._check_pointless()

            # TODO: Now the new inventory is known, check for conflicts.
            # ADHB 2006-08-08: If this is done, populate_new_inv should not add
            # weave lines, because nothing should be recorded until it is known
            # that commit will succeed.
            self._set_progress_stage("Saving data locally")
            self.builder.finish_inventory()

            # Prompt the user for a commit message if none provided
            message = message_callback(self)
            self.message = message

            # Add revision data to the local branch
            self.rev_id = self.builder.commit(self.message)

        except Exception, e:
            mutter("aborting commit write group because of exception:")
            trace.log_exception_quietly()
            self.builder.abort()
            raise
Exemplo n.º 58
0
Arquivo: trace.py Projeto: biji/qbzr
    def __init__(self,
                 title,
                 message_internal,
                 trace_back,
                 exc_info,
                 type=MAIN_LOAD_METHOD,
                 parent=None):

        QtGui.QDialog.__init__(self, parent)

        self.buttonbox = QtGui.QDialogButtonBox()

        if parent:
            win_title = None
            if hasattr(parent, 'title'):
                if isinstance(parent.title, basestring):
                    win_title = parent.title
                elif isinstance(title, (list, tuple)):
                    # just the first item is more usefull.
                    win_title = parent.title[0]
            else:
                if hasattr(parent, 'windowTitle'):
                    win_title = parent.windowTitle()

            if win_title:
                close_label = gettext("Close %s Window") % win_title
            else:
                close_label = gettext("Close Window")
        else:
            close_label = gettext("Close Application")

        # PyQt is stupid and thinks QMessageBox.StandardButton and
        # QDialogButtonBox.StandardButton are different, so we have to
        # duplicate this :-(
        if type == MAIN_LOAD_METHOD:
            button = self.buttonbox.addButton(QtGui.QDialogButtonBox.Close)
            button.setText(close_label)
        elif type == SUB_LOAD_METHOD:
            button = self.buttonbox.addButton(QtGui.QDialogButtonBox.Ok)
            button.setText(gettext("Close Error Dialog"))
        elif type == ITEM_OR_EVENT_METHOD:
            button = self.buttonbox.addButton(QtGui.QDialogButtonBox.Close)
            button.setText(close_label)
            button = self.buttonbox.addButton(QtGui.QDialogButtonBox.Ignore)
            button.setText(gettext("Ignore Error"))

        def report_bug():
            from bzrlib import crash
            #Using private method because bzrlib.crash is not currently intended for reuse from GUIs
            #see https://bugs.launchpad.net/bzr/+bug/785696
            crash_filename = crash._write_apport_report_to_file(exc_info)

        try:
            import apport
        except ImportError, e:
            mutter("No Apport available to Bazaar")
            if message_internal:
                message = (
                    'Bazaar has encountered an internal error. Please '
                    'report a bug at <a href="%s">%s</a> including this '
                    'traceback, and a description of what you were doing '
                    'when the error occurred.' %
                    (_file_bugs_url, _file_bugs_url))
            else:
                message = (
                    'Bazaar has encountered an environmental error. Please '
                    'report a bug if this is not the result of a local problem '
                    'at <a href="%s">%s</a> including this '
                    'traceback, and a description of what you were doing '
                    'when the error occurred.' %
                    (_file_bugs_url, _file_bugs_url))
Exemplo n.º 59
0
    def push(self, overwrite=False, stop_revision=None, lossy=False):
        if not lossy:
            raise errors.NoRoundtrippingSupport(self.source, self.target)
        result = branch.BranchPushResult()
        result.source_branch = self.source
        result.target_branch = self.target
        result.old_revno, result.old_revid = self.target.last_revision_info()
        self.source.lock_read()
        try:
            graph = self.source.repository.get_graph()
            # This just handles simple cases, but that's good enough for tests
            my_history = branch_history(self.target.repository.get_graph(),
                                        result.old_revid)
            if stop_revision is None:
                stop_revision = self.source.last_revision()
            their_history = branch_history(graph, stop_revision)
            if their_history[:min(len(my_history), len(their_history)
                                  )] != my_history:
                raise errors.DivergedBranches(self.target, self.source)
            todo = their_history[len(my_history):]
            revidmap = {}
            for revid in todo:
                rev = self.source.repository.get_revision(revid)
                tree = self.source.repository.revision_tree(revid)

                def get_file_with_stat(file_id, path=None):
                    return (tree.get_file(file_id), None)

                tree.get_file_with_stat = get_file_with_stat
                new_revid = self.target.mapping.revision_id_foreign_to_bzr(
                    (str(rev.timestamp), str(rev.timezone),
                     str(self.target.revno())))
                parent_revno, parent_revid = self.target.last_revision_info()
                if parent_revid == revision.NULL_REVISION:
                    parent_revids = []
                else:
                    parent_revids = [parent_revid]
                builder = self.target.get_commit_builder(
                    parent_revids, self.target.get_config_stack(),
                    rev.timestamp, rev.timezone, rev.committer, rev.properties,
                    new_revid)
                try:
                    parent_tree = self.target.repository.revision_tree(
                        parent_revid)
                    for path, ie in tree.iter_entries_by_dir():
                        new_ie = ie.copy()
                        new_ie.revision = None
                        builder.record_entry_contents(
                            new_ie, [parent_tree.root_inventory], path, tree,
                            (ie.kind, ie.text_size, ie.executable,
                             ie.text_sha1))
                    builder.finish_inventory()
                except:
                    builder.abort()
                    raise
                revidmap[revid] = builder.commit(rev.message)
                self.target.set_last_revision_info(parent_revno + 1,
                                                   revidmap[revid])
                trace.mutter('lossily pushed revision %s -> %s', revid,
                             revidmap[revid])
        finally:
            self.source.unlock()
        result.new_revno, result.new_revid = self.target.last_revision_info()
        result.revidmap = revidmap
        return result
Exemplo n.º 60
0
 def log_info(self, message, type='info'):
     """Redirect logging requests."""
     trace.mutter('ftp_channel %s: %s', type, message)