Beispiel #1
0
 def subscribe_to(self, service_name, cb, *args, **kwargs):
     self._local_subscribers.append( (service_name,cb,args,kwargs) )
     self._subscribed_service_names.add(service_name)
     self._maybe_subscribe()
     for (servicename,nodeid),ann_d in self._current_announcements.items():
         if servicename == service_name:
             eventually(cb, nodeid, ann_d)
Beispiel #2
0
 def _deliver(result):
     log.msg(format="delivering segment(%(segnum)d)",
             segnum=segnum,
             level=log.OPERATIONAL, parent=self._lp,
             umid="j60Ojg")
     when = now()
     if isinstance(result, Failure):
         # this catches failures in decode or ciphertext hash
         for (d,c,seg_ev) in self._extract_requests(segnum):
             seg_ev.error(when)
             eventually(self._deliver, d, c, result)
     else:
         (offset, segment, decodetime) = result
         for (d,c,seg_ev) in self._extract_requests(segnum):
             # when we have two requests for the same segment, the
             # second one will not be "activated" before the data is
             # delivered, so to allow the status-reporting code to see
             # consistent behavior, we activate them all now. The
             # SegmentEvent will ignore duplicate activate() calls.
             # Note that this will result in an inaccurate "receive
             # speed" for the second request.
             seg_ev.activate(when)
             seg_ev.deliver(when, offset, len(segment), decodetime)
             eventually(self._deliver, d, c, result)
     self._active_segment = None
     self._start_new_segment()
Beispiel #3
0
 def resumeProducing(self):
     self._hungry = True
     eventually(self._maybe_fetch_next)
     if self._start_pause is not None:
         paused = now() - self._start_pause
         self._read_ev.update(0, 0, paused)
         self._start_pause = None
Beispiel #4
0
 def _got_ref(self, rref, arg, kw):
     self.failUnlessEqual(self.attached, False)
     self.attached = True
     self.failUnlessEqual(arg, "arg")
     self.failUnlessEqual(kw, "kwarg")
     ri = self.rc.getReconnectionInfo()
     self.assertEqual(ri.state, "connected")
     time2 = time.time()
     last = ri.lastAttempt
     self.assert_(self._time1 <= last <= time2, (self._time1, last, time2))
     ci = ri.connectionInfo
     self.assertEqual(ci.connected, True)
     hints = referenceable.SturdyRef(self.url).getTubRef().getLocations()
     expected_hint = hints[0]
     self.assertEqual(ci.winningHint, expected_hint)
     self.assertEqual(ci.listenerStatus, (None, None))
     self.assertEqual(ci.connectorStatuses, {expected_hint: "successful"})
     self.assertEqual(ci.connectionHandlers, {expected_hint: "tcp"})
     self.count += 1
     rref.notifyOnDisconnect(self._disconnected, self.count)
     if self.count < 2:
         # forcibly disconnect it
         eventually(rref.tracker.broker.transport.loseConnection)
     else:
         self.done.callback("done")
Beispiel #5
0
 def add_shares(self, shares):
     # called when ShareFinder locates a new share, and when a non-initial
     # segment fetch is started and we already know about shares from the
     # previous segment
     self._shares.extend(shares)
     self._shares.sort(key=lambda s: (s._dyhb_rtt, s._shnum) )
     eventually(self.loop)
    def _call_ESMTPSenderFactory_PGP(self, username, password, fromEmail, toEmail, f, d,
                                     retries=5, timeout=None, contextFactory=None, heloFallback=False,
                                     requireAuthentication=True, requireTransportSecurity=True):
        self.failUnlessEqual(username, self.SMTP_USERNAME)
        self.failUnlessEqual(password, self.SMTP_PASSWORD)
        self.failUnlessEqual(fromEmail, self.FROM_EMAIL)
        self.failUnlessEqual(toEmail, self.PGP_NOTIFICATION_EMAIL)
        f.seek(0, 0)
        # assume f can be read in one call
        message = f.read()
        assert f.read() == ''

        # although MIME specifies CRLF line endings, it is just LF at this point
        (headers, sep, body) = message.partition('\n\n')
        self.failUnlessEqual(sep, '\n\n')
        self.failUnlessIn('Message-ID: ', headers)
        self.failUnlessIn('Date: ', headers)
        self.failUnlessIn('Subject: ', headers)
        self.failUnlessIn('From: ', headers)
        self.failUnlessIn('To: ', headers)
        # FIXME: test for UTF-8
        self.failUnlessIn('Content-Type: text/plain', headers)
        self.failUnlessIn(self.CUSTOMER_NAME, body)
        #self.failUnlessIn('https://monitoring.leastauthority.com/', body)
        #self.failUnlessIn('/%s/' % (self.PUBIP,), body)
        #self.failUnlessIn('https://leastauthority.com/support', body)

        eventually(d.callback, None)
        return self.the_factory
Beispiel #7
0
 def fetch_failed(self, sf, f):
     assert sf is self._active_segment
     # deliver error upwards
     for (d,c) in self._extract_requests(sf.segnum):
         eventually(self._deliver, d, c, f)
     self._active_segment = None
     self._start_new_segment()
Beispiel #8
0
 def _deliver_shares(self, shares):
     # they will call hungry() again if they want more
     self._hungry = False
     shares_s = ",".join([str(sh) for sh in shares])
     self.log(format="delivering shares: %s" % shares_s,
              level=log.NOISY, umid="2n1qQw")
     eventually(self.share_consumer.got_shares, shares)
Beispiel #9
0
    def _done(self):
        """
        I am called by _download_current_segment when the download process
        has finished successfully. After making some useful logging
        statements, I return the decrypted contents to the owner of this
        Retrieve object through self._done_deferred.
        """
        self._running = False
        self._status.set_active(False)
        now = time.time()
        self._status.timings['total'] = now - self._started
        self._status.timings['fetch'] = now - self._started_fetching
        self._status.set_status("Finished")
        self._status.set_progress(1.0)

        # remember the encoding parameters, use them again next time
        (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
         offsets_tuple) = self.verinfo
        self._node._populate_required_shares(k)
        self._node._populate_total_shares(N)

        if self._verify:
            ret = list(self._bad_shares)
            self.log("done verifying, found %d bad shares" % len(ret))
        else:
            # TODO: upload status here?
            ret = self._consumer
            self._consumer.unregisterProducer()
        eventually(self._done_deferred.callback, ret)
Beispiel #10
0
 def subscribe_to(self, service_name, cb, *args, **kwargs):
     self._local_subscribers.append( (service_name,cb,args,kwargs) )
     self._subscribed_service_names.add(service_name)
     self._maybe_subscribe()
     for index,(ann,key_s,when) in self._inbound_announcements.items():
         servicename = index[0]
         if servicename == service_name:
             eventually(cb, key_s, ann, *args, **kwargs)
Beispiel #11
0
 def _error(self, f):
     # all errors, including NotEnoughSharesError, land here
     self._running = False
     self._status.set_active(False)
     now = time.time()
     self._status.timings['total'] = now - self._started
     self._status.timings['fetch'] = now - self._started_fetching
     self._status.set_status("Failed")
     eventually(self._done_deferred.errback, f)
Beispiel #12
0
    def _process_announcement(self, ann):
        self._debug_counts["inbound_announcement"] += 1
        (furl, service_name, ri_name, nickname_utf8, ver, oldest) = ann
        if service_name not in self._subscribed_service_names:
            self.log(
                "announcement for a service we don't care about [%s]" % (service_name,),
                level=log.UNUSUAL,
                umid="dIpGNA",
            )
            self._debug_counts["wrong_service"] += 1
            return
        self.log("announcement for [%s]: %s" % (service_name, ann), umid="BoKEag")
        assert type(furl) is str
        assert type(service_name) is str
        assert type(ri_name) is str
        assert type(nickname_utf8) is str
        nickname = nickname_utf8.decode("utf-8")
        assert type(nickname) is unicode
        assert type(ver) is str
        assert type(oldest) is str

        nodeid = b32decode(SturdyRef(furl).tubID.upper())
        nodeid_s = idlib.shortnodeid_b2a(nodeid)

        ann_d = {
            "version": 0,
            "service-name": service_name,
            "FURL": furl,
            "nickname": nickname,
            "app-versions": {},  # need #466 and v2 introducer
            "my-version": ver,
            "oldest-supported": oldest,
        }

        index = (service_name, nodeid)
        if self._current_announcements.get(index, None) == ann_d:
            self.log(
                "reannouncement for [%(service)s]:%(nodeid)s, ignoring",
                service=service_name,
                nodeid=nodeid_s,
                level=log.UNUSUAL,
                umid="B1MIdA",
            )
            self._debug_counts["duplicate_announcement"] += 1
            return
        if index in self._current_announcements:
            self._debug_counts["update"] += 1
        else:
            self._debug_counts["new_announcement"] += 1

        self._current_announcements[index] = ann_d
        # note: we never forget an index, but we might update its value

        for (service_name2, cb, args, kwargs) in self._local_subscribers:
            if service_name2 == service_name:
                eventually(cb, nodeid, ann_d, *args, **kwargs)
Beispiel #13
0
 def _got_connection(self, rref):
     lp = log.msg(format="got connection to %(name)s, getting versions",
                  name=self.name(),
                  facility="tahoe.storage_broker", umid="coUECQ")
     if self._trigger_cb:
         eventually(self._trigger_cb)
     default = self.VERSION_DEFAULTS
     d = add_version_to_remote_reference(rref, default)
     d.addCallback(self._got_versioned_service, lp)
     d.addErrback(log.err, format="storageclient._got_connection",
                  name=self.name(), umid="Sdq3pg")
Beispiel #14
0
 def _fire_readers(self):
     self._pending_timer = None
     pending = self._pending
     self._pending = {}
     for peerid in self._sequence:
         if peerid in pending:
             for (d, shares) in pending.pop(peerid):
                 eventually(d.callback, shares)
     for peerid in pending:
         for (d, shares) in pending[peerid]:
             eventually(d.callback, shares)
Beispiel #15
0
 def _fire_readers(self):
     self._pending_timer = None
     pending = self._pending
     self._pending = {}
     for peerid in self._sequence:
         if peerid in pending:
             for (d, shares) in pending.pop(peerid):
                 eventually(d.callback, shares)
     for peerid in pending:
         for (d, shares) in pending[peerid]:
             eventually(d.callback, shares)
 def _got_ref(self, rref, arg, kw):
     self.failUnlessEqual(self.attached, False)
     self.attached = True
     self.failUnlessEqual(arg, "arg")
     self.failUnlessEqual(kw, "kwarg")
     self.count += 1
     rref.notifyOnDisconnect(self._disconnected, self.count)
     if self.count < 2:
         # forcibly disconnect it
         eventually(rref.tracker.broker.transport.loseConnection)
     else:
         self.done.callback("done")
Beispiel #17
0
 def __init__(self):
     service.MultiService.__init__(self)
     # we don't use time.clock() here, because the constructor is run by
     # the twistd parent process (as it loads the .tac file), whereas the
     # rest of the program will be run by the child process, after twistd
     # forks. Instead, set self.initial_cpu as soon as the reactor starts
     # up.
     self.initial_cpu = 0.0  # just in case
     eventually(self._set_initial_cpu)
     self.samples = []
     # we provide 1min, 5min, and 15min moving averages
     TimerService(self.POLL_INTERVAL, self.check).setServiceParent(self)
Beispiel #18
0
    def _check_connected_high_water_mark(self):
        current = len(self.get_connected_servers())
        if current > self._connected_high_water_mark:
            self._connected_high_water_mark = current

        remaining = []
        for threshold, d in self._threshold_listeners:
            if self._connected_high_water_mark >= threshold:
                eventually(d.callback, None)
            else:
                remaining.append((threshold, d))
        self._threshold_listeners = remaining
Beispiel #19
0
 def _got_connection(self, rref):
     lp = log.msg(format="got connection to %(name)s, getting versions",
                  name=self.get_name(),
                  facility="tahoe.storage_broker", umid="coUECQ")
     if self._trigger_cb:
         eventually(self._trigger_cb)
     default = self.VERSION_DEFAULTS
     d = add_version_to_remote_reference(rref, default)
     d.addCallback(self._got_versioned_service, lp)
     d.addCallback(lambda ign: self._on_status_changed.notify(self))
     d.addErrback(log.err, format="storageclient._got_connection",
                  name=self.get_name(), umid="Sdq3pg")
Beispiel #20
0
 def _got_ref(self, rref, arg, kw):
     self.failUnlessEqual(self.attached, False)
     self.attached = True
     self.failUnlessEqual(arg, "arg")
     self.failUnlessEqual(kw, "kwarg")
     self.count += 1
     rref.notifyOnDisconnect(self._disconnected, self.count)
     if self.count < 2:
         # forcibly disconnect it
         eventually(rref.tracker.broker.transport.loseConnection)
     else:
         self.done.callback("done")
    def _check_connected_high_water_mark(self):
        current = len(self.get_connected_servers())
        if current > self._connected_high_water_mark:
            self._connected_high_water_mark = current

        remaining = []
        for threshold, d in self._threshold_listeners:
            if self._connected_high_water_mark >= threshold:
                eventually(d.callback, None)
            else:
                remaining.append( (threshold, d) )
        self._threshold_listeners = remaining
Beispiel #22
0
 def __init__(self):
     service.MultiService.__init__(self)
     # we don't use time.clock() here, because the constructor is run by
     # the twistd parent process (as it loads the .tac file), whereas the
     # rest of the program will be run by the child process, after twistd
     # forks. Instead, set self.initial_cpu as soon as the reactor starts
     # up.
     self.initial_cpu = 0.0 # just in case
     eventually(self._set_initial_cpu)
     self.samples = []
     # we provide 1min, 5min, and 15min moving averages
     TimerService(self.POLL_INTERVAL, self.check).setServiceParent(self)
            def callRemote(self, methname, *args, **kwargs):
                d = defer.Deferred()

                # Even after the 3rd answer we're still hungry because
                # we're interested in finding a share on a 3rd server
                # so we don't have to download more than one share
                # from the first server. This is actually necessary to
                # trigger the bug.
                def _give_buckets_and_hunger_again():
                    d.callback(self.buckets)
                    self.s.hungry()
                eventually(_give_buckets_and_hunger_again)
                return d
Beispiel #24
0
    def resumeProducing(self):
        """
        I am called by my download target once it is ready to begin
        receiving data again.
        """
        if self._pause_deferred is None:
            return

        p = self._pause_deferred
        self._pause_deferred = None
        self._status.set_status(self._old_status)

        eventually(p.callback, None)
Beispiel #25
0
            def callRemote(self, methname, *args, **kwargs):
                d = defer.Deferred()

                # Even after the 3rd answer we're still hungry because
                # we're interested in finding a share on a 3rd server
                # so we don't have to download more than one share
                # from the first server. This is actually necessary to
                # trigger the bug.
                def _give_buckets_and_hunger_again():
                    d.callback(self.buckets)
                    self.s.hungry()
                eventually(_give_buckets_and_hunger_again)
                return d
Beispiel #26
0
    def resumeProducing(self):
        """
        I am called by my download target once it is ready to begin
        receiving data again.
        """
        if self._pause_deferred is None:
            return

        p = self._pause_deferred
        self._pause_deferred = None
        self._status.set_status(self._old_status)

        eventually(p.callback, None)
Beispiel #27
0
    def _process_announcement(self, ann, key_s):
        self._debug_counts["inbound_announcement"] += 1
        service_name = str(ann["service-name"])
        if service_name not in self._subscribed_service_names:
            self.log("announcement for a service we don't care about [%s]"
                     % (service_name,), level=log.UNUSUAL, umid="dIpGNA")
            self._debug_counts["wrong_service"] += 1
            return
        # for ASCII values, simplejson might give us unicode *or* bytes
        if "nickname" in ann and isinstance(ann["nickname"], str):
            ann["nickname"] = unicode(ann["nickname"])
        nick_s = ann.get("nickname",u"").encode("utf-8")
        lp2 = self.log(format="announcement for nickname '%(nick)s', service=%(svc)s: %(ann)s",
                       nick=nick_s, svc=service_name, ann=ann, umid="BoKEag")

        # how do we describe this node in the logs?
        desc_bits = []
        if key_s:
            desc_bits.append("serverid=" + key_s[:20])
        if "anonymous-storage-FURL" in ann:
            tubid_s = get_tubid_string_from_ann(ann)
            desc_bits.append("tubid=" + tubid_s[:8])
        description = "/".join(desc_bits)

        # the index is used to track duplicates
        index = make_index(ann, key_s)

        # is this announcement a duplicate?
        if (index in self._current_announcements
            and self._current_announcements[index][0] == ann):
            self.log(format="reannouncement for [%(service)s]:%(description)s, ignoring",
                     service=service_name, description=description,
                     parent=lp2, level=log.UNUSUAL, umid="B1MIdA")
            self._debug_counts["duplicate_announcement"] += 1
            return
        # does it update an existing one?
        if index in self._current_announcements:
            self._debug_counts["update"] += 1
            self.log("replacing old announcement: %s" % (ann,),
                     parent=lp2, level=log.NOISY, umid="wxwgIQ")
        else:
            self._debug_counts["new_announcement"] += 1
            self.log("new announcement[%s]" % service_name,
                     parent=lp2, level=log.NOISY)

        self._current_announcements[index] = (ann, key_s, time.time())
        # note: we never forget an index, but we might update its value

        for (service_name2,cb,args,kwargs) in self._local_subscribers:
            if service_name2 == service_name:
                eventually(cb, key_s, ann, *args, **kwargs)
 def call_ESMTPSenderFactory(username,
                             password,
                             fromEmail,
                             toEmail,
                             f,
                             d,
                             retries=5,
                             timeout=None,
                             contextFactory=None,
                             heloFallback=False,
                             requireAuthentication=True,
                             requireTransportSecurity=True):
     eventually(d.callback, None)
     return Mock()
Beispiel #29
0
 def startService(self):
     # Note: this class can be started and stopped at most once.
     self.log("Node.startService")
     # Record the process id in the twisted log, after startService()
     # (__init__ is called before fork(), but startService is called
     # after). Note that Foolscap logs handle pid-logging by itself, no
     # need to send a pid to the foolscap log here.
     twlog.msg("My pid: %s" % os.getpid())
     try:
         os.chmod("twistd.pid", 0644)
     except EnvironmentError:
         pass
     # Delay until the reactor is running.
     eventually(self._startService)
Beispiel #30
0
    def _process_announcement(self, ann):
        self._debug_counts["inbound_announcement"] += 1
        (furl, service_name, ri_name, nickname_utf8, ver, oldest) = ann
        if service_name not in self._subscribed_service_names:
            self.log("announcement for a service we don't care about [%s]"
                     % (service_name,), level=log.UNUSUAL, umid="dIpGNA")
            self._debug_counts["wrong_service"] += 1
            return
        self.log("announcement for [%s]: %s" % (service_name, ann),
                 umid="BoKEag")
        assert type(furl) is str
        assert type(service_name) is str
        assert type(ri_name) is str
        assert type(nickname_utf8) is str
        nickname = nickname_utf8.decode("utf-8")
        assert type(nickname) is unicode
        assert type(ver) is str
        assert type(oldest) is str

        nodeid = b32decode(SturdyRef(furl).tubID.upper())
        nodeid_s = idlib.shortnodeid_b2a(nodeid)

        ann_d = { "version": 0,
                  "service-name": service_name,

                  "FURL": furl,
                  "nickname": nickname,
                  "app-versions": {}, # need #466 and v2 introducer
                  "my-version": ver,
                  "oldest-supported": oldest,
                  }

        index = (service_name, nodeid)
        if self._current_announcements.get(index, None) == ann_d:
            self.log("reannouncement for [%(service)s]:%(nodeid)s, ignoring",
                     service=service_name, nodeid=nodeid_s,
                     level=log.UNUSUAL, umid="B1MIdA")
            self._debug_counts["duplicate_announcement"] += 1
            return
        if index in self._current_announcements:
            self._debug_counts["update"] += 1
        else:
            self._debug_counts["new_announcement"] += 1

        self._current_announcements[index] = ann_d
        # note: we never forget an index, but we might update its value

        for (service_name2,cb,args,kwargs) in self._local_subscribers:
            if service_name2 == service_name:
                eventually(cb, nodeid, ann_d, *args, **kwargs)
Beispiel #31
0
 def startService(self):
     # Note: this class can be started and stopped at most once.
     self.log("Node.startService")
     # Record the process id in the twisted log, after startService()
     # (__init__ is called before fork(), but startService is called
     # after). Note that Foolscap logs handle pid-logging by itself, no
     # need to send a pid to the foolscap log here.
     twlog.msg("My pid: %s" % os.getpid())
     try:
         os.chmod("twistd.pid", 0644)
     except EnvironmentError:
         pass
     # Delay until the reactor is running.
     eventually(self._startService)
Beispiel #32
0
 def _deliver(result):
     ds = self._download_status
     if isinstance(result, Failure):
         ds.add_segment_error(segnum, now())
     else:
         (offset, segment, decodetime) = result
         ds.add_segment_delivery(segnum, now(),
                                 offset, len(segment), decodetime)
     log.msg(format="delivering segment(%(segnum)d)",
             segnum=segnum,
             level=log.OPERATIONAL, parent=self._lp,
             umid="j60Ojg")
     for (d,c) in self._extract_requests(segnum):
         eventually(self._deliver, d, c, result)
     self._active_segment = None
     self._start_new_segment()
Beispiel #33
0
    def loop(self):
        pending_s = ",".join([
            ensure_str(rt.server.get_name()) for rt in self.pending_requests
        ])  # sort?
        self.log(format="ShareFinder loop: running=%(running)s"
                 " hungry=%(hungry)s, pending=%(pending)s",
                 running=self.running,
                 hungry=self._hungry,
                 pending=pending_s,
                 level=log.NOISY,
                 umid="kRtS4Q")
        if not self.running:
            return
        if not self._hungry:
            return

        non_overdue = self.pending_requests - self.overdue_requests
        if len(non_overdue) >= self.max_outstanding_requests:
            # cannot send more requests, must wait for some to retire
            return

        server = None
        try:
            if self._servers:
                server = next(self._servers)
        except StopIteration:
            self._servers = None

        if server:
            self.send_request(server)
            # we loop again to get parallel queries. The check above will
            # prevent us from looping forever.
            eventually(self.loop)
            return

        if self.pending_requests:
            # no server, but there are still requests in flight: maybe one of
            # them will make progress
            return

        self.log(format="ShareFinder.loop: no_more_shares, ever",
                 level=log.UNUSUAL,
                 umid="XjQlzg")
        # we've run out of servers (so we can't send any more requests), and
        # we have nothing in flight. No further progress can be made. They
        # are destined to remain hungry.
        eventually(self.share_consumer.no_more_shares)
Beispiel #34
0
    def _done(self):
        if not self._running:
            return
        self._running = False
        now = time.time()
        elapsed = now - self._started
        self._status.set_finished(now)
        self._status.timings["total"] = elapsed
        self._status.set_progress(1.0)
        self._status.set_status("Finished")
        self._status.set_active(False)

        self._servermap.last_update_mode = self.mode
        self._servermap.last_update_time = self._started
        # the servermap will not be touched after this
        self.log("servermap: %s" % self._servermap.summarize_versions())
        eventually(self._done_deferred.callback, self._servermap)
Beispiel #35
0
    def init_client_storage_broker(self):
        # create a StorageFarmBroker object, for use by Uploader/Downloader
        # (and everybody else who wants to use storage servers)
        ps = self.get_config("client", "peers.preferred", "").split(",")
        preferred_peers = tuple([p.strip() for p in ps if p != ""])
        sb = storage_client.StorageFarmBroker(permute_peers=True,
                                              preferred_peers=preferred_peers,
                                              tub_options=self.tub_options,
                                              tub_handlers=self.tub_handlers)
        self.storage_broker = sb
        sb.setServiceParent(self)

        # utilize the loaded static server specifications
        for key, server in self.connections_config['servers'].items():
            handlers = server.get("transport_handlers")
            eventually(self.storage_broker.got_static_announcement,
                       key, server['announcement'], handlers)

        sb.use_introducer(self.introducer_client)
Beispiel #36
0
    def loop(self):
        pending_s = ",".join([idlib.shortnodeid_b2a(rt.peerid)
                              for rt in self.pending_requests]) # sort?
        self.log(format="ShareFinder loop: running=%(running)s"
                 " hungry=%(hungry)s, pending=%(pending)s",
                 running=self.running, hungry=self._hungry, pending=pending_s,
                 level=log.NOISY, umid="kRtS4Q")
        if not self.running:
            return
        if not self._hungry:
            return

        non_overdue = self.pending_requests - self.overdue_requests
        if len(non_overdue) >= self.max_outstanding_requests:
            # cannot send more requests, must wait for some to retire
            return

        server = None
        try:
            if self._servers:
                server = self._servers.next()
        except StopIteration:
            self._servers = None

        if server:
            self.send_request(server)
            # we loop again to get parallel queries. The check above will
            # prevent us from looping forever.
            eventually(self.loop)
            return

        if self.pending_requests:
            # no server, but there are still requests in flight: maybe one of
            # them will make progress
            return

        self.log(format="ShareFinder.loop: no_more_shares, ever",
                 level=log.UNUSUAL, umid="XjQlzg")
        # we've run out of servers (so we can't send any more requests), and
        # we have nothing in flight. No further progress can be made. They
        # are destined to remain hungry.
        eventually(self.share_consumer.no_more_shares)
Beispiel #37
0
 def _done(self, res):
     if not self._running:
         return
     self._running = False
     self._status.set_active(False)
     self._status.timings["total"] = time.time() - self._started
     # res is either the new contents, or a Failure
     if isinstance(res, failure.Failure):
         self.log("Retrieve done, with failure", failure=res,
                  level=log.UNUSUAL)
         self._status.set_status("Failed")
     else:
         self.log("Retrieve done, success!")
         self._status.set_status("Finished")
         self._status.set_progress(1.0)
         # remember the encoding parameters, use them again next time
         (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
          offsets_tuple) = self.verinfo
         self._node._populate_required_shares(k)
         self._node._populate_total_shares(N)
     eventually(self._done_deferred.callback, res)
    def _call_ESMTPSenderFactory_non_PGP(self,
                                         username,
                                         password,
                                         fromEmail,
                                         toEmail,
                                         f,
                                         d,
                                         retries=5,
                                         timeout=None,
                                         contextFactory=None,
                                         heloFallback=False,
                                         requireAuthentication=True,
                                         requireTransportSecurity=True):
        self.failUnlessEqual(username, self.SMTP_USERNAME)
        self.failUnlessEqual(password, self.SMTP_PASSWORD)
        self.failUnlessEqual(fromEmail, self.FROM_EMAIL)
        self.failUnlessEqual(toEmail, self.CUSTOMER_EMAIL)
        f.seek(0, 0)
        # assume f can be read in one call
        message = f.read()
        assert f.read() == ''

        # although MIME specifies CRLF line endings, it is just LF at this point
        (headers, sep, body) = message.partition('\n\n')
        self.failUnlessEqual(sep, '\n\n')
        self.failUnlessIn('Message-ID: ', headers)
        self.failUnlessIn('Date: ', headers)
        self.failUnlessIn('Subject: ', headers)
        self.failUnlessIn('From: ', headers)
        self.failUnlessIn('To: ', headers)
        # FIXME: test for UTF-8
        self.failUnlessIn('Content-Type: text/plain', headers)
        self.failUnlessIn(self.CUSTOMER_NAME, body)
        self.failUnlessIn('https://leastauthority.com/howtoconfigure', body)
        self.failUnlessIn(self.EXTERNAL_INTRODUCER_FURL, body)
        self.failUnlessIn('%s' % (self.PUBIP, ), body)
        self.failUnlessIn('https://leastauthority.com/support', body)

        eventually(d.callback, None)
        return self.the_factory
Beispiel #39
0
    def _block_request_activity(self, share, shnum, state, block=None, f=None):
        # called by Shares, in response to our s.send_request() calls.
        if not self._running:
            return
        log.msg("SegmentFetcher(%s)._block_request_activity: %s -> %s" %
                (self._node._si_prefix, repr(share), state),
                level=log.NOISY,
                parent=self._lp,
                umid="vilNWA")
        # COMPLETE, CORRUPT, DEAD, BADSEGNUM are terminal. Remove the share
        # from all our tracking lists.
        if state in (COMPLETE, CORRUPT, DEAD, BADSEGNUM):
            self._share_observers.pop(share, None)
            server = share._server  # XXX
            self._shares_from_server.discard(server, share)
            if self._active_share_map.get(shnum) is share:
                del self._active_share_map[shnum]
            self._overdue_share_map.discard(shnum, share)

        if state is COMPLETE:
            # 'block' is fully validated and complete
            self._blocks[shnum] = block

        if state is OVERDUE:
            # no longer active, but still might complete
            del self._active_share_map[shnum]
            self._overdue_share_map.add(shnum, share)
            # OVERDUE is not terminal: it will eventually transition to
            # COMPLETE, CORRUPT, or DEAD.

        if state is DEAD:
            self._last_failure = f
        if state is BADSEGNUM:
            # our main loop will ask the DownloadNode each time for the
            # number of segments, so we'll deal with this in the top of
            # _do_loop
            pass

        eventually(self.loop)
Beispiel #40
0
 def _done(self, res):
     if not self._running:
         return
     self._running = False
     self._status.set_active(False)
     self._status.timings["total"] = time.time() - self._started
     # res is either the new contents, or a Failure
     if isinstance(res, failure.Failure):
         self.log("Retrieve done, with failure",
                  failure=res,
                  level=log.UNUSUAL)
         self._status.set_status("Failed")
     else:
         self.log("Retrieve done, success!")
         self._status.set_status("Finished")
         self._status.set_progress(1.0)
         # remember the encoding parameters, use them again next time
         (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
          offsets_tuple) = self.verinfo
         self._node._populate_required_shares(k)
         self._node._populate_total_shares(N)
     eventually(self._done_deferred.callback, res)
Beispiel #41
0
    def init_client_storage_broker(self):
        # create a StorageFarmBroker object, for use by Uploader/Downloader
        # (and everybody else who wants to use storage servers)
        ps = self.get_config("client", "peers.preferred", "").split(",")
        preferred_peers = tuple([p.strip() for p in ps if p != ""])
        sb = storage_client.StorageFarmBroker(permute_peers=True,
                                              preferred_peers=preferred_peers,
                                              tub_options=self.tub_options)
        self.storage_broker = sb
        sb.setServiceParent(self)

        connection_threshold = min(self.encoding_params["k"],
                                   self.encoding_params["happy"] + 1)
        helper = storage_client.ConnectedEnough(sb, connection_threshold)
        self.upload_ready_d = helper.when_connected_enough()

        # utilize the loaded static server specifications
        for key, server in self.connections_config['servers'].items():
            eventually(self.storage_broker.got_static_announcement,
                       key, server['announcement'])

        sb.use_introducer(self.introducer_client)
Beispiel #42
0
 def _done(self, res):
     if not self._running:
         return
     self._running = False
     now = time.time()
     self._status.timings["total"] = now - self._started
     self._status.set_active(False)
     if isinstance(res, failure.Failure):
         self.log("Publish done, with failure", failure=res,
                  level=log.WEIRD, umid="nRsR9Q")
         self._status.set_status("Failed")
     elif self.surprised:
         self.log("Publish done, UncoordinatedWriteError", level=log.UNUSUAL)
         self._status.set_status("UncoordinatedWriteError")
         # deliver a failure
         res = failure.Failure(UncoordinatedWriteError())
         # TODO: recovery
     else:
         self.log("Publish done, success")
         self._status.set_status("Finished")
         self._status.set_progress(1.0)
     eventually(self.done_deferred.callback, res)
Beispiel #43
0
    def _block_request_activity(self, share, shnum, state, block=None, f=None):
        # called by Shares, in response to our s.send_request() calls.
        if not self._running:
            return
        log.msg("SegmentFetcher(%s)._block_request_activity:"
                " Share(sh%d-on-%s) -> %s" %
                (self._node._si_prefix, shnum, share._peerid_s, state),
                level=log.NOISY, parent=self._lp, umid="vilNWA")
        # COMPLETE, CORRUPT, DEAD, BADSEGNUM are terminal. Remove the share
        # from all our tracking lists.
        if state in (COMPLETE, CORRUPT, DEAD, BADSEGNUM):
            self._share_observers.pop(share, None)
            self._shares_from_server.discard(shnum, share)
            if self._active_share_map.get(shnum) is share:
                del self._active_share_map[shnum]
            self._overdue_share_map.discard(shnum, share)

        if state is COMPLETE:
            # 'block' is fully validated and complete
            self._blocks[shnum] = block

        if state is OVERDUE:
            # no longer active, but still might complete
            del self._active_share_map[shnum]
            self._overdue_share_map.add(shnum, share)
            # OVERDUE is not terminal: it will eventually transition to
            # COMPLETE, CORRUPT, or DEAD.

        if state is DEAD:
            self._last_failure = f
        if state is BADSEGNUM:
            # our main loop will ask the DownloadNode each time for the
            # number of segments, so we'll deal with this in the top of
            # _do_loop
            pass

        eventually(self.loop)
Beispiel #44
0
 def _do_serialized(self, cb, *args, **kwargs):
     # note: to avoid deadlock, this callable is *not* allowed to invoke
     # other serialized methods within this (or any other)
     # MutableFileNode. The callable should be a bound method of this same
     # MFN instance.
     d = defer.Deferred()
     self._serializer.addCallback(lambda ignore: cb(*args, **kwargs))
     # we need to put off d.callback until this Deferred is finished being
     # processed. Otherwise the caller's subsequent activities (like,
     # doing other things with this node) can cause reentrancy problems in
     # the Deferred code itself
     self._serializer.addBoth(lambda res: eventually(d.callback, res))
     # add a log.err just in case something really weird happens, because
     # self._serializer stays around forever, therefore we won't see the
     # usual Unhandled Error in Deferred that would give us a hint.
     self._serializer.addErrback(log.err)
     return d
Beispiel #45
0
 def _do_serialized(self, cb, *args, **kwargs):
     # note: to avoid deadlock, this callable is *not* allowed to invoke
     # other serialized methods within this (or any other)
     # MutableFileNode. The callable should be a bound method of this same
     # MFN instance.
     d = defer.Deferred()
     self._serializer.addCallback(lambda ignore: cb(*args, **kwargs))
     # we need to put off d.callback until this Deferred is finished being
     # processed. Otherwise the caller's subsequent activities (like,
     # doing other things with this node) can cause reentrancy problems in
     # the Deferred code itself
     self._serializer.addBoth(lambda res: eventually(d.callback, res))
     # add a log.err just in case something really weird happens, because
     # self._serializer stays around forever, therefore we won't see the
     # usual Unhandled Error in Deferred that would give us a hint.
     self._serializer.addErrback(log.err)
     return d
Beispiel #46
0
 def _deliver_announcements(self, key_s, ann):
     service_name = str(ann["service-name"])
     for (service_name2,cb,args,kwargs) in self._local_subscribers:
         if service_name2 == service_name:
             eventually(cb, key_s, ann, *args, **kwargs)
Beispiel #47
0
    def _notify(self, opaque, path, events_mask):
        self._log("inotify event %r, %r, %r\n" % (opaque, path, ', '.join(
            self._inotify.humanReadableMask(events_mask))))

        self._stats_provider.count('drop_upload.files_queued', 1)
        eventually(self._process, opaque, path, events_mask)
Beispiel #48
0
 def write(self, data):
     eventually(self._write, data)
Beispiel #49
0
 def _notify(self, result_kwargs):
     o, watcher_kwargs = self._watcher
     kwargs = dict(result_kwargs)
     kwargs.update(watcher_kwargs)
     eventually(o, **kwargs)
Beispiel #50
0
 def notify(self, *args, **kwargs):
     for o in self._watchers:
         eventually(o, *args, **kwargs)
Beispiel #51
0
 def _fire(self, result):
     for w in self._watchers:
         eventually(w.callback, result)
     del self._watchers
     self.__repr__ = self._fired_repr
Beispiel #52
0
 def finished_cycle(self, cycle):
     eventually(self.finished_d.callback, None)
Beispiel #53
0
 def _errback(res):
     eventually(_with_log, d.errback, res)
     return res
Beispiel #54
0
 def overdue(self, req):
     del self.overdue_timers[req]
     assert req in self.pending_requests  # paranoia, should never be false
     self.overdue_requests.add(req)
     eventually(self.loop)
Beispiel #55
0
 def add_reader(self, reader):
     AskUntilSuccessMixin.add_reader(self, reader)
     eventually(self._start)
Beispiel #56
0
 def loseConnection(self, why=failure.Failure(CONNECTION_DONE)):
     assert isinstance(why, failure.Failure), why
     if self.connected:
         self.connected = False
         # this one is slightly weird because 'why' is a Failure
         eventually(self._loseConnection, why)
Beispiel #57
0
 def _done(self, res, done_d):
     self.active -= 1
     eventually(done_d.callback, res)
     eventually(self.maybe_start_task)
Beispiel #58
0
 def _deliver_announcements(self, key_s, ann):
     precondition(isinstance(key_s, bytes), key_s)
     service_name = str(ann["service-name"])
     for (service_name2, cb, args, kwargs) in self._local_subscribers:
         if service_name2 == service_name:
             eventually(cb, key_s, ann, *args, **kwargs)
Beispiel #59
0
 def schedule_loop(self):
     if self._loop_scheduled:
         return
     self._loop_scheduled = True
     eventually(self.loop)
Beispiel #60
0
 def _callback(res):
     eventually(_with_log, d.callback, res)
     return res