예제 #1
0
    def _load_announcements(self):
        # Announcements contain unicode, because they come from JSON. We tell
        # PyYAML to give us unicode instead of str/bytes.
        def construct_unicode(loader, node):
            return node.value

        yaml.SafeLoader.add_constructor("tag:yaml.org,2002:str",
                                        construct_unicode)
        try:
            with self._cache_filepath.open() as f:
                servers = yaml.safe_load(f)
        except EnvironmentError:
            return  # no cache file
        if not isinstance(servers, list):
            log.err(InvalidCacheError("not a list"), level=log.WEIRD)
            return
        self.log("Using server data from cache", level=log.UNUSUAL)
        for server_params in servers:
            if not isinstance(server_params, dict):
                log.err(InvalidCacheError("not a dict: %r" %
                                          (server_params, )),
                        level=log.WEIRD)
                continue
            self._deliver_announcements(server_params['key_s'],
                                        server_params['ann'])
예제 #2
0
    def _add_lease_failed(self, f, server_name, storage_index):
        # Older versions of Tahoe didn't handle the add-lease message very
        # well: <=1.1.0 throws a NameError because it doesn't implement
        # remote_add_lease(), 1.2.0/1.3.0 throw IndexError on unknown buckets
        # (which is most of them, since we send add-lease to everybody,
        # before we know whether or not they have any shares for us), and
        # 1.2.0 throws KeyError even on known buckets due to an internal bug
        # in the latency-measuring code.

        # we want to ignore the known-harmless errors and log the others. In
        # particular we want to log any local errors caused by coding
        # problems.

        if f.check(DeadReferenceError):
            return
        if f.check(RemoteException):
            if f.value.failure.check(KeyError, IndexError, NameError):
                # this may ignore a bit too much, but that only hurts us
                # during debugging
                return
            self.log(format="error in add_lease from [%(name)s]: %(f_value)s",
                     name=server_name,
                     f_value=str(f.value),
                     failure=f,
                     level=log.WEIRD,
                     umid="atbAxw")
            return
        # local errors are cause for alarm
        log.err(f,
                format="local error in add_lease to [%(name)s]: %(f_value)s",
                name=server_name,
                f_value=str(f.value),
                level=log.WEIRD,
                umid="hEGuQg")
예제 #3
0
 def remote_publish(self, announcement):
     try:
         self._publish(announcement)
     except:
         log.err(format="Introducer.remote_publish failed on %(ann)s",
                 ann=announcement, level=log.UNUSUAL, umid="620rWA")
         raise
예제 #4
0
    def _add_lease_failed(self, f, server_name, storage_index):
        # Older versions of Tahoe didn't handle the add-lease message very
        # well: <=1.1.0 throws a NameError because it doesn't implement
        # remote_add_lease(), 1.2.0/1.3.0 throw IndexError on unknown buckets
        # (which is most of them, since we send add-lease to everybody,
        # before we know whether or not they have any shares for us), and
        # 1.2.0 throws KeyError even on known buckets due to an internal bug
        # in the latency-measuring code.

        # we want to ignore the known-harmless errors and log the others. In
        # particular we want to log any local errors caused by coding
        # problems.

        if f.check(DeadReferenceError):
            return
        if f.check(RemoteException):
            if f.value.failure.check(KeyError, IndexError, NameError):
                # this may ignore a bit too much, but that only hurts us
                # during debugging
                return
            self.log(format="error in add_lease from [%(name)s]: %(f_value)s",
                     name=server_name,
                     f_value=str(f.value),
                     failure=f,
                     level=log.WEIRD, umid="atbAxw")
            return
        # local errors are cause for alarm
        log.err(f,
                format="local error in add_lease to [%(name)s]: %(f_value)s",
                name=server_name,
                f_value=str(f.value),
                level=log.WEIRD, umid="hEGuQg")
예제 #5
0
 def _cleanup():
     try:
         fileutil.rm_dir(dirpath)
     finally:
         log.err(
             "We were unable to delete a non-ASCII directory %r created by the test. "
             "This is liable to cause failures on future builds." %
             (dirpath, ))
예제 #6
0
 def publish(self, ann_t, canary, lp):
     try:
         self._publish(ann_t, canary, lp)
     except:
         log.err(format="Introducer.remote_publish failed on %(ann)s",
                 ann=ann_t,
                 level=log.UNUSUAL, parent=lp, umid="620rWA")
         raise
예제 #7
0
 def publish(self, ann_t, canary, lp):
     try:
         self._publish(ann_t, canary, lp)
     except:
         log.err(format="Introducer.remote_publish failed on %(ann)s",
                 ann=ann_t,
                 level=log.UNUSUAL, parent=lp, umid="620rWA")
         raise
예제 #8
0
 def _cleanup():
     try:
         fileutil.rm_dir(dirpath)
     finally:
         if os.path.exists(dirpath):
             msg = ("We were unable to delete a non-ASCII directory %r created by the test. "
                    "This is liable to cause failures on future builds." % (dirpath,))
             print msg
             log.err(msg)
예제 #9
0
 def _do_pending_calls():
     self._pending_call = None
     for path1 in self._pending:
         if self._callbacks:
             for cb in self._callbacks:
                 try:
                     cb(None, path1, IN_CHANGED)
                 except Exception, e2:
                     log.err(e2)
예제 #10
0
 def _do_pending_calls():
     self._pending_call = None
     for path in self._pending:
         if self._callbacks:
             for cb in self._callbacks:
                 try:
                     cb(None, path, IN_CHANGED)
                 except Exception, e:
                     log.err(e)
예제 #11
0
 def test_err(self):
     try:
         raise SampleError("simple sample")
     except:
         f = Failure()
     tahoe_log.err(format="intentional sample error",
                   failure=f,
                   level=tahoe_log.OPERATIONAL,
                   umid="wO9UoQ")
     self.flushLoggedErrors(SampleError)
예제 #12
0
 def test_err(self):
     """Logging with log.err() causes tests to fail."""
     try:
         raise SampleError("simple sample")
     except:
         f = Failure()
     tahoe_log.err(format="intentional sample error",
                   failure=f, level=tahoe_log.OPERATIONAL, umid="wO9UoQ")
     result = self.flushLoggedErrors(SampleError)
     self.assertEqual(len(result), 1)
예제 #13
0
 def _do_pending_calls():
     event_mask = IN_CHANGED
     self._pending_call = None
     for path1 in self._pending:
         if self._callbacks:
             for cb in self._callbacks:
                 try:
                     with CALLBACK(inotify_events=event_mask):
                         cb(None, path1, event_mask)
                 except Exception as e2:
                     log.err(e2)
     self._pending = set()
예제 #14
0
def _with_log(op, res):
    """
    The default behaviour on firing an already-fired Deferred is unhelpful for
    debugging, because the AlreadyCalledError can easily get lost or be raised
    in a context that results in a different error. So make sure it is logged
    (for the abstractions defined here). If we are in a test, log.err will cause
    the test to fail.
    """
    try:
        op(res)
    except defer.AlreadyCalledError as e:
        log.err(e, op=repr(op), level=log.WEIRD)
예제 #15
0
 def _do_pending_calls():
     event_mask = IN_CHANGED
     self._pending_call = None
     for path1 in self._pending:
         if self._callbacks:
             for cb in self._callbacks:
                 try:
                     with CALLBACK(inotify_events=event_mask):
                         cb(None, path1, event_mask)
                 except Exception as e2:
                     log.err(e2)
     self._pending = set()
예제 #16
0
def _with_log(op, res):
    """
    The default behaviour on firing an already-fired Deferred is unhelpful for
    debugging, because the AlreadyCalledError can easily get lost or be raised
    in a context that results in a different error. So make sure it is logged
    (for the abstractions defined here). If we are in a test, log.err will cause
    the test to fail.
    """
    try:
        op(res)
    except defer.AlreadyCalledError as e:
        log.err(e, op=repr(op), level=log.WEIRD)
예제 #17
0
 def remote_announce(self, announcements):
     self.log("received %d announcements" % len(announcements))
     self._debug_counts["inbound_message"] += 1
     for ann in announcements:
         try:
             self._process_announcement(ann)
         except:
             log.err(format="unable to process announcement %(ann)s", ann=ann)
             # Don't let a corrupt announcement prevent us from processing
             # the remaining ones. Don't return an error to the server,
             # since they'd just ignore it anyways.
             pass
예제 #18
0
    def _thread(self):
        try:
            _assert(self._filter is not None, "no watch set")

            # To call Twisted or Tahoe APIs, use reactor.callFromThread as described in
            # <http://twistedmatrix.com/documents/current/core/howto/threading.html>.

            fni = FileNotifyInformation()

            while True:
                self._state = STARTED
                try:
                    fni.read_changes(self._hDirectory, self._recursive, self._filter)
                except WindowsError as e:
                    self._state = STOPPING

                if self._check_stop():
                    return
                for info in fni:
                    # print info
                    path = self._path.preauthChild(info.filename)  # FilePath with Unicode path
                    if info.action == FILE_ACTION_MODIFIED and path.isdir():
                        # print "Filtering out %r" % (info,)
                        continue
                    #mask = _action_to_inotify_mask.get(info.action, IN_CHANGED)

                    def _do_pending_calls():
                        self._pending_call = None
                        for path1 in self._pending:
                            if self._callbacks:
                                for cb in self._callbacks:
                                    try:
                                        cb(None, path1, IN_CHANGED)
                                    except Exception, e2:
                                        log.err(e2)
                        self._pending = set()

                    def _maybe_notify(path2):
                        if path2 not in self._pending:
                            self._pending.add(path2)
                        if self._state not in [STOPPING, STOPPED]:
                            _do_pending_calls()
#                        if self._pending_call is None and self._state not in [STOPPING, STOPPED]:
#                            self._pending_call = reactor.callLater(self._pending_delay, _do_pending_calls)

                    reactor.callFromThread(_maybe_notify, path)
                    if self._check_stop():
                        return
        except Exception, e:
            log.err(e)
            self._state = STOPPED
            raise
예제 #19
0
    def _thread(self):
        try:
            _assert(self._filter is not None, "no watch set")

            # To call Twisted or Tahoe APIs, use reactor.callFromThread as described in
            # <http://twistedmatrix.com/documents/current/core/howto/threading.html>.

            fni = FileNotifyInformation()

            while True:
                self._state = STARTED
                try:
                    fni.read_changes(self._hDirectory, self._recursive, self._filter)
                except WindowsError as e:
                    self._state = STOPPING

                if self._check_stop():
                    return
                for info in fni:
                    # print info
                    path = self._path.preauthChild(info.filename)  # FilePath with Unicode path
                    if info.action == FILE_ACTION_MODIFIED and path.isdir():
                        # print "Filtering out %r" % (info,)
                        continue
                    #mask = _action_to_inotify_mask.get(info.action, IN_CHANGED)

                    def _do_pending_calls():
                        self._pending_call = None
                        for path in self._pending:
                            if self._callbacks:
                                for cb in self._callbacks:
                                    try:
                                        cb(None, path, IN_CHANGED)
                                    except Exception, e:
                                        log.err(e)
                        self._pending = set()

                    def _maybe_notify(path):
                        if path not in self._pending:
                            self._pending.add(path)
                        if self._state not in [STOPPING, STOPPED]:
                            _do_pending_calls()
#                        if self._pending_call is None and self._state not in [STOPPING, STOPPED]:
#                            self._pending_call = reactor.callLater(self._pending_delay, _do_pending_calls)

                    reactor.callFromThread(_maybe_notify, path)
                    if self._check_stop():
                        return
        except Exception, e:
            log.err(e)
            self._state = STOPPED
            raise
예제 #20
0
 def remote_announce(self, announcements):
     self.log("received %d announcements" % len(announcements))
     self._debug_counts["inbound_message"] += 1
     for ann in announcements:
         try:
             self._process_announcement(ann)
         except:
             log.err(format="unable to process announcement %(ann)s",
                     ann=ann)
             # Don't let a corrupt announcement prevent us from processing
             # the remaining ones. Don't return an error to the server,
             # since they'd just ignore it anyways.
             pass
예제 #21
0
    def _send_requests(self, desired):
        ask = desired - self._pending - self._received.get_spans()
        log.msg("%s._send_requests, desired=%s, pending=%s, ask=%s" %
                (repr(self), desired.dump(), self._pending.dump(), ask.dump()),
                level=log.NOISY, parent=self._lp, umid="E94CVA")
        # XXX At one time, this code distinguished between data blocks and
        # hashes, and made sure to send (small) requests for hashes before
        # sending (big) requests for blocks. The idea was to make sure that
        # all hashes arrive before the blocks, so the blocks can be consumed
        # and released in a single turn. I removed this for simplicity.
        # Reconsider the removal: maybe bring it back.
        ds = self._download_status

        for (start, length) in ask:
            # TODO: quantize to reasonably-large blocks
            self._pending.add(start, length)
            lp = log.msg(format="%(share)s._send_request"
                         " [%(start)d:+%(length)d]",
                         share=repr(self),
                         start=start, length=length,
                         level=log.NOISY, parent=self._lp, umid="sgVAyA")
            block_ev = ds.add_block_request(self._server, self._shnum,
                                            start, length, now())
            d = self._send_request(start, length)
            d.addCallback(self._got_data, start, length, block_ev, lp)
            d.addErrback(self._got_error, start, length, block_ev, lp)
            d.addCallback(self._trigger_loop)
            d.addErrback(lambda f:
                         log.err(format="unhandled error during send_request",
                                 failure=f, parent=self._lp,
                                 level=log.WEIRD, umid="qZu0wg"))
예제 #22
0
 def _load_announcements(self):
     try:
         with self._cache_filepath.open() as f:
             servers = yamlutil.safe_load(f)
     except EnvironmentError:
         return # no cache file
     if not isinstance(servers, list):
         log.err(InvalidCacheError("not a list"), level=log.WEIRD)
         return
     self.log("Using server data from cache", level=log.UNUSUAL)
     for server_params in servers:
         if not isinstance(server_params, dict):
             log.err(InvalidCacheError("not a dict: %r" % (server_params,)),
                     level=log.WEIRD)
             continue
         self._deliver_announcements(server_params['key_s'],
                                     server_params['ann'])
예제 #23
0
 def _load_announcements(self):
     try:
         with self._cache_filepath.open() as f:
             servers = yamlutil.safe_load(f)
     except EnvironmentError:
         return # no cache file
     if not isinstance(servers, list):
         log.err(InvalidCacheError("not a list"), level=log.WEIRD)
         return
     self.log("Using server data from cache", level=log.UNUSUAL)
     for server_params in servers:
         if not isinstance(server_params, dict):
             log.err(InvalidCacheError("not a dict: %r" % (server_params,)),
                     level=log.WEIRD)
             continue
         # everything coming from yamlutil.safe_load is unicode
         key_s = server_params['key_s'].encode("ascii")
         self._deliver_announcements(key_s, server_params['ann'])
예제 #24
0
    def _process_deque(self):
        self._log("_process_deque %r" % (self._deque,))
        # process everything currently in the queue. we're turning it
        # into a list so that if any new items get added while we're
        # processing, they'll not run until next time)
        to_process = list(self._deque)
        self._deque.clear()
        self._count('objects_queued', -len(to_process))

        self._log("%d items to process" % len(to_process), )
        for item in to_process:
            self._process_history.appendleft(item)
            try:
                self._log("  processing '%r'" % (item,))
                proc = yield self._process(item)
                self._log("  done: %r" % proc)
            except Exception as e:
                log.err("processing '%r' failed: %s" % (item, e))
                proc = None  # actually in old _lazy_tail way, proc would be Failure
            # XXX can we just get rid of the hooks now?
            yield self._call_hook(proc, 'processed')
예제 #25
0
    def _process_deque(self):
        self._log("_process_deque %r" % (self._deque,))
        # process everything currently in the queue. we're turning it
        # into a list so that if any new items get added while we're
        # processing, they'll not run until next time)
        to_process = list(self._deque)
        self._deque.clear()
        self._count('objects_queued', -len(to_process))

        self._log("%d items to process" % len(to_process), )
        for item in to_process:
            self._process_history.appendleft(item)
            try:
                self._log("  processing '%r'" % (item,))
                proc = yield self._process(item)
                self._log("  done: %r" % proc)
            except Exception as e:
                log.err("processing '%r' failed: %s" % (item, e))
                proc = None  # actually in old _lazy_tail way, proc would be Failure
            # XXX can we just get rid of the hooks now?
            yield self._call_hook(proc, 'processed')
예제 #26
0
 def _load_announcements(self):
     # Announcements contain unicode, because they come from JSON. We tell
     # PyYAML to give us unicode instead of str/bytes.
     def construct_unicode(loader, node):
         return node.value
     yaml.SafeLoader.add_constructor("tag:yaml.org,2002:str",
                                     construct_unicode)
     try:
         with self._cache_filepath.open() as f:
             servers = yaml.safe_load(f)
     except EnvironmentError:
         return # no cache file
     if not isinstance(servers, list):
         log.err(InvalidCacheError("not a list"), level=log.WEIRD)
         return
     self.log("Using server data from cache", level=log.UNUSUAL)
     for server_params in servers:
         if not isinstance(server_params, dict):
             log.err(InvalidCacheError("not a dict: %r" % (server_params,)),
                     level=log.WEIRD)
             continue
         self._deliver_announcements(server_params['key_s'],
                                     server_params['ann'])
예제 #27
0
파일: node.py 프로젝트: ducki2p/tahoe-lafs
 def process_blocks(self, segnum, blocks):
     d = defer.maybeDeferred(self._decode_blocks, segnum, blocks)
     d.addCallback(self._check_ciphertext_hash, segnum)
     def _deliver(result):
         ds = self._download_status
         if isinstance(result, Failure):
             ds.add_segment_error(segnum, now())
         else:
             (offset, segment, decodetime) = result
             ds.add_segment_delivery(segnum, now(),
                                     offset, len(segment), decodetime)
         log.msg(format="delivering segment(%(segnum)d)",
                 segnum=segnum,
                 level=log.OPERATIONAL, parent=self._lp,
                 umid="j60Ojg")
         for (d,c) in self._extract_requests(segnum):
             eventually(self._deliver, d, c, result)
         self._active_segment = None
         self._start_new_segment()
     d.addBoth(_deliver)
     d.addErrback(lambda f:
                  log.err("unhandled error during process_blocks",
                          failure=f, level=log.WEIRD,
                          parent=self._lp, umid="MkEsCg"))
예제 #28
0
 def _got_error(self, f):
     if f.check(DeadReferenceError):
         return
     log.err(f, parent=self._logparent)
예제 #29
0
    def _thread(self):
        try:
            _assert(self._filter is not None, "no watch set")

            # To call Twisted or Tahoe APIs, use reactor.callFromThread as described in
            # <http://twistedmatrix.com/documents/current/core/howto/threading.html>.

            fni = FileNotifyInformation()

            while True:
                self._state = STARTED
                action = start_action(
                    action_type=u"read-changes",
                    directory=self._path.path,
                    recursive=self._recursive,
                    filter=self._filter,
                )
                try:
                    with action:
                        fni.read_changes(self._hDirectory, self._recursive, self._filter)
                except WindowsError as e:
                    self._state = STOPPING

                if self._check_stop():
                    return
                for info in fni:
                    path = self._path.preauthChild(info.filename)  # FilePath with Unicode path
                    if info.action == FILE_ACTION_MODIFIED and path.isdir():
                        Message.log(
                            message_type=u"filtering-out",
                            info=repr(info),
                        )
                        continue
                    else:
                        Message.log(
                            message_type=u"processing",
                            info=repr(info),
                        )
                    #mask = _action_to_inotify_mask.get(info.action, IN_CHANGED)

                    @log_call(
                        action_type=MAYBE_NOTIFY.action_type,
                        include_args=[],
                        include_result=False,
                    )
                    def _do_pending_calls():
                        event_mask = IN_CHANGED
                        self._pending_call = None
                        for path1 in self._pending:
                            if self._callbacks:
                                for cb in self._callbacks:
                                    try:
                                        with CALLBACK(inotify_events=event_mask):
                                            cb(None, path1, event_mask)
                                    except Exception as e2:
                                        log.err(e2)
                        self._pending = set()

                    def _maybe_notify(path2):
                        if path2 not in self._pending:
                            self._pending.add(path2)
                        if self._state not in [STOPPING, STOPPED]:
                            _do_pending_calls()
#                        if self._pending_call is None and self._state not in [STOPPING, STOPPED]:
#                            self._pending_call = reactor.callLater(self._pending_delay, _do_pending_calls)

                    reactor.callFromThread(_maybe_notify, path)
                    if self._check_stop():
                        return
        except Exception as e:
            log.err(e)
            self._state = STOPPED
            raise
예제 #30
0
 def _got_error(self, f):
     if f.check(DeadReferenceError):
         return
     log.err(f, parent=self._logparent)
     pass
예제 #31
0
    def _thread(self):
        try:
            _assert(self._filter is not None, "no watch set")

            # To call Twisted or Tahoe APIs, use reactor.callFromThread as described in
            # <http://twistedmatrix.com/documents/current/core/howto/threading.html>.

            fni = FileNotifyInformation()

            while True:
                self._state = STARTED
                action = start_action(
                    action_type=u"read-changes",
                    directory=self._path.path,
                    recursive=self._recursive,
                    filter=self._filter,
                )
                try:
                    with action:
                        fni.read_changes(self._hDirectory, self._recursive, self._filter)
                except WindowsError as e:
                    self._state = STOPPING

                if self._check_stop():
                    return
                for info in fni:
                    path = self._path.preauthChild(info.filename)  # FilePath with Unicode path
                    if info.action == FILE_ACTION_MODIFIED and path.isdir():
                        Message.log(
                            message_type=u"filtering-out",
                            info=repr(info),
                        )
                        continue
                    else:
                        Message.log(
                            message_type=u"processing",
                            info=repr(info),
                        )
                    #mask = _action_to_inotify_mask.get(info.action, IN_CHANGED)

                    @log_call(
                        action_type=MAYBE_NOTIFY.action_type,
                        include_args=[],
                        include_result=False,
                    )
                    def _do_pending_calls():
                        event_mask = IN_CHANGED
                        self._pending_call = None
                        for path1 in self._pending:
                            if self._callbacks:
                                for cb in self._callbacks:
                                    try:
                                        with CALLBACK(inotify_events=event_mask):
                                            cb(None, path1, event_mask)
                                    except Exception as e2:
                                        log.err(e2)
                        self._pending = set()

                    def _maybe_notify(path2):
                        if path2 not in self._pending:
                            self._pending.add(path2)
                        if self._state not in [STOPPING, STOPPED]:
                            _do_pending_calls()
#                        if self._pending_call is None and self._state not in [STOPPING, STOPPED]:
#                            self._pending_call = reactor.callLater(self._pending_delay, _do_pending_calls)

                    reactor.callFromThread(_maybe_notify, path)
                    if self._check_stop():
                        return
        except Exception as e:
            log.err(e)
            self._state = STOPPED
            raise