def got_response_negotiating(self, stream, request, response): m = json.loads(response.body.read()) PROPERTIES = ("authorization", "queue_pos", "real_address", "unchoked") for k in PROPERTIES: self.conf["_%s" % k] = m[k] if not self.conf["_unchoked"]: LOG.complete("done (queue_pos %d)" % m["queue_pos"]) STATE.update("negotiate", {"queue_pos": m["queue_pos"]}) self.connection_ready(stream) else: LOG.complete("done (unchoked)") sha1 = hashlib.sha1() sha1.update(m["authorization"]) self.conf["bittorrent.my_id"] = sha1.digest() LOG.debug("* My ID: %s" % sha1.hexdigest()) self.http_stream = stream self.negotiating = False peer = PeerNeubot(self.poller) peer.complete = self.peer_test_complete peer.connection_lost = self.peer_connection_lost peer.connection_failed = self.peer_connection_failed peer.configure(self.conf) peer.connect((self.http_stream.peername[0], self.conf["bittorrent.port"]))
def got_response_collecting(self, stream, request, response): LOG.complete() if self.success: # # Always measure at the receiver because there is more # information at the receiver and also to make my friend # Enrico happier :-P. # The following is not a bug: it's just that the server # returns a result using the point of view of the client, # i.e. upload_speed is _our_ upload speed. # m = json.loads(response.body.read()) self.my_side["upload_speed"] = m["upload_speed"] upload = utils.speed_formatter(m["upload_speed"]) STATE.update("test_upload", upload) if privacy.collect_allowed(self.my_side): table_bittorrent.insert(DATABASE.connection(), self.my_side) # Update the upstream channel estimate target_bytes = int(m["target_bytes"]) if target_bytes > 0: estimate.UPLOAD = target_bytes stream.close()
def got_not_interested(self, stream): if self.state != UPLOADING: raise RuntimeError("NOT_INTERESTED when state != UPLOADING") LOG.complete() if self.connector_side: self.complete(stream, self.dload_speed, self.rtt, self.target_bytes) stream.close() else: self.state = SENT_INTERESTED stream.send_interested()
def got_response(self, stream, request, response): if response.code != "200": LOG.complete("bad response") self._schedule() else: LOG.complete() s = response.body.read() try: m1 = marshal.unmarshal_object(s, "application/json", compat.RendezvousResponse) except ValueError: LOG.exception() self._schedule() else: if "version" in m1.update and "uri" in m1.update: ver, uri = m1.update["version"], m1.update["uri"] LOG.info("Version %s available at %s" % (ver, uri)) STATE.update("update", {"version": ver, "uri": uri}) _open_browser_on_windows("update.html") # Update tests known by the runner runner_lst.update(m1.available) # # Choose the test we would like to run even if # we're not going to run it because we're running # in debug mode or tests are disabled. # This allows us to print to the logger the test # we /would/ have choosen if we were allowed to run # it. # test = runner_lst.get_next_test() if not test: LOG.warning("No test available") self._schedule() return LOG.info("* Chosen test: %s" % test) # Are we allowed to run a test? if not CONFIG["enabled"] or CONFIG["rendezvous.client.debug"]: LOG.info("Tests are disabled... not running") self._schedule() else: # Do we have negotiate URI for test? negotiate_uri = runner_lst.test_to_negotiate_uri(test) if not negotiate_uri: LOG.warning("No negotiate URI for test") self._schedule() else: # Actually run the test runner_core.run(test, negotiate_uri, self._schedule)
def connection_ready(self, stream): LOG.complete() STATE.update("negotiate") LOG.start("BitTorrent: negotiating") request = Message() body = json.dumps({"target_bytes": self.conf["bittorrent.bytes.up"]}) request.compose(method="GET", pathquery="/negotiate/bittorrent", host=self.host_header, body=body, mimetype="application/json") request["authorization"] = self.conf.get("_authorization", "") stream.send_request(request)
def connection_made(self, sock, rtt=0): if rtt: latency = utils.time_formatter(rtt) LOG.complete("done, %s" % latency) STATE.update("test_latency", latency) self.rtt = rtt stream = StreamBitTorrent(self.poller) if not self.connector_side: # # Note that we use self.__class__() because self # might be a subclass of PeerNeubot. # peer = self.__class__(self.poller) peer.configure(self.conf) else: peer = self stream.attach(peer, sock, peer.conf) stream.watchdog = self.conf["bittorrent.watchdog"]
if __name__ == "__main__": sys.path.insert(0, ".") from neubot.log import LOG from neubot.log import _log_info from neubot import compat if __name__ == "__main__": # Make sure the hackish name substitution works assert(logging.info == _log_info) LOG.start("Testing the in-progress feature") LOG.progress("...") LOG.progress() LOG.complete("success!") logging.info("INFO w/ logging.info") # The following should work because it should not interpolate logging.debug("DEBUG w/ logging.debug", "ciao") logging.warning("WARNING w/ logging.warning") logging.error("ERROR w/ logging.error") LOG.verbose() logging.info("INFO w/ logging.info") logging.debug("DEBUG w/ logging.debug") logging.warning("WARNING w/ logging.warning") logging.error("ERROR w/ logging.error") LOG.error("testing neubot logger -- This is an error message")
def got_response(self, stream, request, response): if response.code != "200": LOG.complete("bad response") self._schedule() else: LOG.complete() s = response.body.read() try: m1 = marshal.unmarshal_object(s, "application/json", compat.RendezvousResponse) except ValueError: LOG.exception() self._schedule() else: if "version" in m1.update and "uri" in m1.update: ver, uri = m1.update["version"], m1.update["uri"] LOG.info("Version %s available at %s" % (ver, uri)) STATE.update("update", {"version": ver, "uri": uri}) # # Choose the test we would like to run even if # we're not going to run it because we're running # in debug mode or tests are disabled. # This allows us to print to the logger the test # we /would/ have choosen if we were allowed to run # it. # tests = [] if "speedtest" in m1.available: tests.append("speedtest") if "bittorrent" in m1.available: tests.append("bittorrent") #XXX alternate the two tests if self._latest: tests.remove(self._latest) test = random.choice(tests) self._latest = test LOG.info("* Chosen test: %s" % test) # Are we allowed to run a test? if not CONFIG["enabled"] or CONFIG["rendezvous.client.debug"]: LOG.info("Tests are disabled... not running") self._schedule() else: if (CONFIG["privacy.informed"] and not CONFIG["privacy.can_collect"]): LOG.warning("cannot run test without permission " "to save the results") self._schedule() else: conf = self.conf.copy() # # Subscribe _before_ connecting. This way we # immediately see "testdone" if the connection fails # and we can _schedule the next attempt. # NOTIFIER.subscribe("testdone", lambda *a, **kw: \ self._schedule()) if test == "speedtest": conf["speedtest.client.uri"] = m1.available[ "speedtest"][0] client = ClientSpeedtest(POLLER) client.configure(conf) client.connect_uri() elif test == "bittorrent": conf["bittorrent._uri"] = m1.available[ "bittorrent"][0] bittorrent.run(POLLER, conf) else: NOTIFIER.publish("testdone")
def update(self): if self.finished: return # # Decide whether we can transition to the next phase of # the speedtest or not. Fall through to next request if # needed, or return to the caller and rewind the stack. # ostate = self.state if not self.state: self.state = "negotiate" del QUEUE_HISTORY[:] elif self.state == "negotiate": if self.conf.get("speedtest.client.unchoked", False): LOG.complete("authorized to take the test\n") self.state = "latency" elif "speedtest.client.queuepos" in self.conf: queuepos = self.conf["speedtest.client.queuepos"] LOG.complete("waiting in queue, pos %s\n" % queuepos) STATE.update("negotiate", {"queue_pos": queuepos}) QUEUE_HISTORY.append(queuepos) elif self.state == "latency": tries = self.conf.get("speedtest.client.latency_tries", 10) if tries == 0: # Calculate average latency latency = self.conf["speedtest.client.latency"] latency = sum(latency) / len(latency) self.conf["speedtest.client.latency"] = latency # Advertise the result STATE.update("test_latency", utils.time_formatter(latency)) LOG.complete("done, %s\n" % utils.time_formatter(latency)) self.state = "download" else: self.conf["speedtest.client.latency_tries"] = tries - 1 elif self.state in ("download", "upload"): if len(self.streams) == self.conf.get("speedtest.client.nconn", 1): # Calculate average speed speed = self.conf["speedtest.client.%s" % self.state] elapsed = (max(map(lambda t: t[1], speed)) - min(map(lambda t: t[0], speed))) speed = sum(map(lambda t: t[2], speed)) / elapsed LOG.progress(".[%s,%s]." % (utils.time_formatter(elapsed), utils.speed_formatter(speed))) # # O(N) loopless adaptation to the channel w/ memory # TODO bittorrent/peer.py implements an enhanced version # of this algorithm, with a cap to the max number of # subsequent tests. In addition to that, the bittorrent # code also anticipates the update of target_bytes. # if elapsed > LO_THRESH: ESTIMATE[self.state] *= TARGET/elapsed self.conf["speedtest.client.%s" % self.state] = speed # Advertise STATE.update("test_%s" % self.state, utils.speed_formatter(speed)) LOG.complete("done, %s\n" % utils.speed_formatter(speed)) if self.state == "download": self.state = "upload" else: self.state = "collect" elif elapsed > LO_THRESH/3: del self.conf["speedtest.client.%s" % self.state] ESTIMATE[self.state] *= TARGET/elapsed else: del self.conf["speedtest.client.%s" % self.state] ESTIMATE[self.state] *= 2 else: # Wait for all pending requests to complete return elif self.state == "collect": LOG.complete() self.cleanup() return else: raise RuntimeError("Invalid state") # # Perform state transition and run the next phase of the # speedtest. Not all phases need to employ all the connection # with the upstream server. # if self.state == "negotiate": ctor, justone = ClientNegotiate, True elif self.state == "latency": ctor, justone = ClientLatency, True elif self.state == "download": ctor, justone = ClientDownload, False elif self.state == "upload": ctor, justone = ClientUpload, False elif self.state == "collect": ctor, justone = ClientCollect, True else: raise RuntimeError("Invalid state") if ostate != self.state: self.child = ctor(self.poller) self.child.configure(self.conf) self.child.host_header = self.host_header if self.state not in ("negotiate", "collect"): if ostate == "negotiate" and self.state == "latency": STATE.update("test_latency", "---", publish=False) STATE.update("test_download", "---", publish=False) STATE.update("test_upload", "---", publish=False) STATE.update("test", "speedtest") else: STATE.update(self.state) LOG.start("* speedtest: %s" % self.state) elif self.state == "negotiate": LOG.start("* speedtest: %s" % self.state) while self.streams: # # Override child Time-To-Connect (TTC) with our TTC # so for the child it's like it really performed the # connect(), not us. # self.child.rtts = self.rtts self.child.connection_ready(self.streams.popleft()) if justone: break
def connection_failed(self, connector, exception): LOG.complete("failure (error: %s)" % str(exception)) NOTIFIER.publish(TESTDONE)
def got_piece(self, stream, index, begin, block): if self.state != DOWNLOADING: raise RuntimeError("PIECE when state != DOWNLOADING") # Start measuring if not self.saved_ticks: self.saved_bytes = stream.bytes_recv_tot self.saved_ticks = utils.ticks() # Get next piece try: vector = self.sched_req.next() except StopIteration: vector = None if vector: # Send next piece index, begin, length = vector[0] stream.send_request(index, begin, length) else: # # No more pieces: Wait for the pipeline to empty # # TODO Check whether it's better to stop the measurement # when the pipeline starts emptying instead of when it # becomes empty (maybe it is reasonable to discard when # it fills and when it empties, isn't it?) # self.inflight -= 1 if self.inflight == 0: xfered = stream.bytes_recv_tot - self.saved_bytes elapsed = utils.ticks() - self.saved_ticks speed = xfered/elapsed LOG.complete("%s" % utils.speed_formatter(speed)) # # Make sure that next test would take about # TARGET secs, under current conditions. # We're a bit conservative when the elapsed # time is small because there is the risk of # overestimating the available bandwith. # TODO Don't start from scratch but use speedtest # estimate (maybe we need to divide it by two # but I'm not sure at the moment). # if elapsed >= LO_THRESH/3: self.target_bytes = int(self.target_bytes * TARGET/elapsed) else: self.target_bytes *= 2 # # The stopping rule is when the test has run # for more than LO_THRESH seconds or after some # number of runs (just to be sure that we can # not run forever due to unexpected network # conditions). # self.repeat -= 1 if elapsed > LO_THRESH or self.repeat <= 0: self.dload_speed = speed self.state = SENT_NOT_INTERESTED stream.send_not_interested() if not self.connector_side: self.complete(stream, self.dload_speed, self.rtt, self.target_bytes) else: download = utils.speed_formatter(self.dload_speed) STATE.update("test_download", download) else: self.saved_ticks = 0 self.make_sched() self.state = SENT_INTERESTED #XXX self.got_unchoke(stream) elif self.inflight < 0: raise RuntimeError("Inflight became negative")
def got_bitfield(self, b): self.peer_bitfield = Bitfield(self.numpieces, b) LOG.complete()