def got_response_collecting(self, stream, request, response): logging.info("BitTorrent: collecting ... done") if self.success: # # Always measure at the receiver because there is more # information at the receiver and also to make my friend # Enrico happier :-P. # The following is not a bug: it's just that the server # returns a result using the point of view of the client, # i.e. upload_speed is _our_ upload speed. # m = json.loads(response.body.read()) self.my_side["upload_speed"] = m["upload_speed"] upload = utils.speed_formatter(m["upload_speed"]) STATE.update("test_progress", "100%", publish=False) STATE.update("test_upload", upload) logging.info('BitTorrent: upload speed: %s', upload) if privacy.collect_allowed(self.my_side): if DATABASE.readonly: logging.warning('bittorrent_client: readonly database') else: table_bittorrent.insert(DATABASE.connection(), self.my_side) # Update the upstream channel estimate target_bytes = int(m["target_bytes"]) if target_bytes > 0: estimate.UPLOAD = target_bytes self.final_state = True stream.close()
def _schedule_after(self, interval): ''' Schedule next rendezvous after interval seconds ''' logging.info('background_rendezvous: next rendezvous in %d seconds', interval) timestamp = POLLER.sched(interval, self.run) STATE.update('idle', publish=False) STATE.update('next_rendezvous', timestamp)
def _start_collect(self, stream, result): ''' Start the COLLECT phase ''' STATE.update('collect') logging.debug('raw_negotiate: collect in progress...') context = stream.opaque extra = context.extra extra['local_result'] = result body = six.b(json.dumps(result)) host_header = utils_net.format_epnt((extra['address'], extra['port'])) self.append_request(stream, 'POST', '/collect/raw', 'HTTP/1.1') self.append_header(stream, 'Host', host_header) self.append_header(stream, 'User-Agent', utils_version.HTTP_HEADER) self.append_header(stream, 'Content-Type', 'application/json') self.append_header(stream, 'Content-Length', str(len(body))) self.append_header(stream, 'Cache-Control', 'no-cache') self.append_header(stream, 'Pragma', 'no-cache') self.append_header(stream, 'Connection', 'close') if extra['authorization']: self.append_header(stream, 'Authorization', extra['authorization']) self.append_end_of_headers(stream) self.append_bytes(stream, body) http_utils.prettyprint_json(result, '>') self.send_message(stream) context.body = six.StringIO() # Want to save body extra['requests'] += 1
def _api_config(self, stream, request, query): response = Message() indent, mimetype, sort_keys = None, "application/json", False dictionary = cgi.parse_qs(query) if "debug" in dictionary and utils.intify(dictionary["debug"][0]): indent, mimetype, sort_keys = 4, "text/plain", True if request.method == "POST": s = request.body.read() updates = qs_to_dictionary(s) privacy.check(updates) # Very low barrier to prevent damage from kiddies if "agent.interval" in updates: interval = int(updates["agent.interval"]) if interval < 1380 and interval != 0: raise ConfigError("Bad agent.interval") CONFIG.merge_api(updates, DATABASE.connection()) STATE.update("config", updates) # Empty JSON b/c '204 No Content' is treated as an error s = "{}" else: s = json.dumps(CONFIG.conf, sort_keys=sort_keys, indent=indent) stringio = StringIO.StringIO(s) response.compose(code="200", reason="Ok", body=stringio, mimetype=mimetype) stream.send_response(request, response)
def got_response_negotiating(self, stream, request, response): m = json.loads(response.body.read()) PROPERTIES = ("authorization", "queue_pos", "real_address", "unchoked") for k in PROPERTIES: self.conf["_%s" % k] = m[k] if not self.conf["_unchoked"]: LOG.complete("done (queue_pos %d)" % m["queue_pos"]) STATE.update("negotiate", {"queue_pos": m["queue_pos"]}) self.connection_ready(stream) else: LOG.complete("done (unchoked)") sha1 = hashlib.sha1() sha1.update(m["authorization"]) self.conf["bittorrent.my_id"] = sha1.digest() LOG.debug("* My ID: %s" % sha1.hexdigest()) self.http_stream = stream self.negotiating = False peer = PeerNeubot(self.poller) peer.complete = self.peer_test_complete peer.connection_lost = self.peer_connection_lost peer.connection_failed = self.peer_connection_failed peer.configure(self.conf) peer.connect((self.http_stream.peername[0], self.conf["bittorrent.port"]))
def runner_api_done(state): ''' Invoked when the test completes successfully ''' # # State value should be 'idle'. This is needed otherwise the GUI stays # on collect after a test is run on demand. # STATE.update(state)
def __init__(self, poller): ClientHTTP.__init__(self, poller) STATE.update("test_name", "speedtest") self.child = None self.streams = collections.deque() self.finished = False self.state = None
def _waiting_pingback(self, stream, data): ''' Invoke when waiting for PINGBACK ''' #context = stream.opaque #context.bufferise(data) #tmp = context.pullup(len(PINGBACK)) #if not tmp: stream.recv(len(PINGBACK), self._waiting_pingback) return if tmp[4:5] != PINGBACK_CODE: raise RuntimeError('skype_clnt: received invalid message') timediff = utils.ticks() - context.alrtt_ticks context.state.setdefault('alrtt_list', []).append(timediff) logging.debug('< PINGBACK') logging.debug('skype_clnt: alrtt_sample: %f', timediff) context.alrtt_cnt -= 1 if context.alrtt_cnt > 0: self._send_ping(stream) return alrtt_list = context.state['alrtt_list'] alrtt_avg = sum(alrtt_list) / len(alrtt_list) context.state['alrtt_avg'] = alrtt_avg latency = utils.time_formatter(alrtt_avg) logging.info('skype_clnt: alrtt_avg: %s', latency) STATE.update('test_latency', latency) logging.info('skype_clnt: estimating ALRTT... complete') logging.info('skype_clnt: skype goodput test... in progress') logging.debug('> RAWTEST') stream.send(RAWTEST, self._skypetest_sent)
def __init__(self, poller): ClientHTTP.__init__(self, poller) STATE.update("test_name", "bittorrent") self.negotiating = True self.http_stream = None self.success = False self.my_side = {}
def runner_api_done(): ''' Invoked when the test is done ''' # # Needed otherwise the GUI stays on collect after a # test is run on demand. # STATE.update('idle')
def got_response_collecting(self, stream, request, response): LOG.complete() if self.success: # # Always measure at the receiver because there is more # information at the receiver and also to make my friend # Enrico happier :-P. # The following is not a bug: it's just that the server # returns a result using the point of view of the client, # i.e. upload_speed is _our_ upload speed. # m = json.loads(response.body.read()) self.my_side["upload_speed"] = m["upload_speed"] upload = utils.speed_formatter(m["upload_speed"]) STATE.update("test_upload", upload) if privacy.collect_allowed(self.my_side): table_bittorrent.insert(DATABASE.connection(), self.my_side) # Update the upstream channel estimate target_bytes = int(m["target_bytes"]) if target_bytes > 0: estimate.UPLOAD = target_bytes stream.close()
def _waiting_pingback(self, stream, data): """ Invoke when waiting for PINGBACK """ context = stream.opaque context.bufferise(data) tmp = context.pullup(len(PINGBACK)) if not tmp: stream.recv(len(PINGBACK), self._waiting_pingback) return if tmp[4:5] != PINGBACK_CODE: raise RuntimeError("raw_clnt: received invalid message") timediff = utils.ticks() - context.alrtt_ticks context.state.setdefault("alrtt_list", []).append(timediff) logging.debug("< PINGBACK") logging.debug("raw_clnt: alrtt_sample: %f", timediff) context.alrtt_cnt -= 1 if context.alrtt_cnt > 0: self._send_ping(stream) return alrtt_list = context.state["alrtt_list"] alrtt_avg = sum(alrtt_list) / len(alrtt_list) context.state["alrtt_avg"] = alrtt_avg latency = utils.time_formatter(alrtt_avg) logging.info("raw_clnt: alrtt_avg: %s", latency) STATE.update("test_progress", "50%", publish=False) STATE.update("test_latency", latency) logging.info("raw_clnt: estimating ALRTT... complete") logging.info("raw_clnt: raw goodput test... in progress") logging.debug("> RAWTEST") stream.send(RAWTEST, self._rawtest_sent)
def handle_connect(self, connector, sock, rtt, sslconfig, state): logging.info("raw_clnt: connection established with %s", connector) logging.info("raw_clnt: connect_time: %s", utils.time_formatter(rtt)) state["connect_time"] = rtt Stream(sock, self._connection_ready, self._connection_lost, sslconfig, "", ClientContext(state)) STATE.update("test", "raw") state["mss"] = sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_MAXSEG) state["rcvr_data"] = []
def handle_connect(self, connector, sock, rtt, sslconfig, state): logging.info('raw_clnt: connection established with %s', connector) logging.info('raw_clnt: connect_time: %s', utils.time_formatter(rtt)) state['connect_time'] = rtt Stream(sock, self._connection_ready, self._connection_lost, sslconfig, '', ClientContext(state)) STATE.update('test', 'raw') state['mss'] = sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_MAXSEG) state['rcvr_data'] = []
def connect_uri(self, uri=None, count=None): self._task = None if not uri: uri = "http://%s:9773/rendezvous" % CONFIG["agent.master"] LOG.start("* Rendezvous with %s" % uri) STATE.update("rendezvous") # We need to make just one connection ClientHTTP.connect_uri(self, uri, 1)
def __init__(self, poller): ClientHTTP.__init__(self, poller) STATE.update("test_latency", "---", publish=False) STATE.update("test_download", "---", publish=False) STATE.update("test_upload", "---", publish=False) STATE.update("test_progress", "0%", publish=False) STATE.update("test_name", "speedtest") self.child = None self.streams = collections.deque() self.finished = False self.state = None
def connection_ready(self, stream): LOG.complete() STATE.update("negotiate") LOG.start("BitTorrent: negotiating") request = Message() body = json.dumps({"target_bytes": self.conf["bittorrent.bytes.up"]}) request.compose(method="GET", pathquery="/negotiate/bittorrent", host=self.host_header, body=body, mimetype="application/json") request["authorization"] = self.conf.get("_authorization", "") stream.send_request(request)
def got_response(self, stream, request, response): if response.code != "200": LOG.complete("bad response") self._schedule() else: LOG.complete() s = response.body.read() try: m1 = marshal.unmarshal_object(s, "application/json", compat.RendezvousResponse) except ValueError: LOG.exception() self._schedule() else: if "version" in m1.update and "uri" in m1.update: ver, uri = m1.update["version"], m1.update["uri"] LOG.info("Version %s available at %s" % (ver, uri)) STATE.update("update", {"version": ver, "uri": uri}) _open_browser_on_windows("update.html") # Update tests known by the runner runner_lst.update(m1.available) # # Choose the test we would like to run even if # we're not going to run it because we're running # in debug mode or tests are disabled. # This allows us to print to the logger the test # we /would/ have choosen if we were allowed to run # it. # test = runner_lst.get_next_test() if not test: LOG.warning("No test available") self._schedule() return LOG.info("* Chosen test: %s" % test) # Are we allowed to run a test? if not CONFIG["enabled"] or CONFIG["rendezvous.client.debug"]: LOG.info("Tests are disabled... not running") self._schedule() else: # Do we have negotiate URI for test? negotiate_uri = runner_lst.test_to_negotiate_uri(test) if not negotiate_uri: LOG.warning("No negotiate URI for test") self._schedule() else: # Actually run the test runner_core.run(test, negotiate_uri, self._schedule)
def __init__(self, poller): ClientHTTP.__init__(self, poller) STATE.update("test_latency", "---", publish=False) STATE.update("test_download", "---", publish=False) STATE.update("test_upload", "---", publish=False) STATE.update("test_progress", "0%", publish=False) STATE.update("test_name", "bittorrent") self.negotiating = True self.http_stream = None self.success = False self.my_side = {} self.final_state = False
def _after_rendezvous(self, unused): ''' After rendezvous actions ''' # # This function is invoked both when the rendezvous fails # and succeeds. If it succeeds, OK we have fresh information # on available tests and updates and we use it. Otherwise, # if rendezvous fails, we may either have old information, or # no information, if this is the first rendezvous. In any # case, we do our best to use the available information. # logging.info('background_rendezvous: automatic rendezvous... done') # Inform the user when we have updates new_version = RUNNER_UPDATES.get_update_version() new_uri = RUNNER_UPDATES.get_update_uri() if new_version and new_uri and not CONFIG['win32_updater']: logging.info('runner_rendezvous: version %s available at %s', new_version, new_uri) STATE.update('update', {'version': new_version, 'uri': new_uri}) self._open_browser_on_windows('update.html') # # Choose the test we would like to run even if # we're not going to run it because tests are # disabled. So we can print the test name also # when tests are disabled. # test = RUNNER_POLICY.get_next_test() logging.info('background_rendezvous: chosen test: %s', test) # Are we allowed to run a test? if not CONFIG['enabled']: raise RuntimeError('background_rendezvous: automatic ' 'tests disabled') # # RAW test requires auto_discover to be True, since it uses mlab-ns # to discover servers. Other tests don't need that, since, at the # moment, they discover servers during the rendezvous. So, if their # auto_discover were True, they'd end up running two rendezvous in # a row for no good reason. # auto_discover = (test == 'raw') # Actually run the test deferred = Deferred() deferred.add_callback(self._schedule) RUNNER_CORE.run(test, deferred, auto_discover, None)
def connection_ready(self, stream): uri = "http://%s/" % self.host_header logging.info("BitTorrent: connecting to %s ... done", uri) STATE.update("negotiate") logging.info("BitTorrent: negotiating in progress...") request = Message() body = json.dumps({"test_version": CONFIG['bittorrent_test_version'], "target_bytes": self.conf['bittorrent.bytes.up']}) request.compose(method="POST", pathquery="/negotiate/bittorrent", host=self.host_header, body=body, mimetype="application/json") request["authorization"] = self.conf.get("_authorization", "") stream.send_request(request)
def _after_rendezvous(self, unused): ''' After rendezvous actions ''' # # This function is invoked both when the rendezvous fails # and succeeds. If it succeeds, OK we have fresh information # on available tests and updates and we use it. Otherwise, # if rendezvous fails, we may either have old information, or # no information, if this is the first rendezvous. In any # case, we do our best to use the available information. # logging.info('background_rendezvous: automatic rendezvous... done') # Inform the user when we have updates new_version = RUNNER_UPDATES.get_update_version() new_uri = RUNNER_UPDATES.get_update_uri() if new_version and new_uri and not CONFIG['win32_updater']: logging.info('runner_rendezvous: version %s available at %s', new_version, new_uri) STATE.update('update', {'version': new_version, 'uri': new_uri}) # # Choose the test we would like to run even if # we're not going to run it because tests are # disabled. So we can print the test name also # when tests are disabled. # # Note: we pick a test at random because now we # have a fixed probability of running a test. # test = RUNNER_POLICY.get_random_test() logging.info('background_rendezvous: chosen test: %s', test) # Are we allowed to run a test? if not CONFIG['enabled']: raise RuntimeError('background_rendezvous: automatic ' 'tests disabled') # # The two legacy tests, speedtest and bittorent, use the rendezvous # to discover the servers. Other tests use mlab-ns. # use_mlabns = (test != 'speedtest' and test != 'bittorrent') # Actually run the test deferred = Deferred() deferred.add_callback(self._schedule) RUNNER_CORE.run(test, deferred, use_mlabns, None)
def __init__(self, poller): StreamHandler.__init__(self, poller) STATE.update("test", "bittorrent") self.connector_side = False self.saved_bytes = 0 self.saved_ticks = 0 self.inflight = 0 self.dload_speed = 0 self.repeat = MAX_REPEAT self.state = INITIAL self.infohash = None self.rtt = 0 self.version = 1 self.begin_upload = 0.0
def _after_rendezvous(self): ''' After rendezvous actions ''' # TODO Make this function more robust wrt unexpected errors # # This function is invoked both when the rendezvous fails # and succeeds. If it succeeds, OK we have fresh information # on available tests and updates and we use it. Otherwise, # if rendezvous fails, we may either have old information, or # no information, if this is the first rendezvous. In any # case, we do our best to use the available information. # logging.info('background_rendezvous: automatic rendezvous... done') # Inform the user when we have updates new_version = RUNNER_UPDATES.get_update_version() new_uri = RUNNER_UPDATES.get_update_uri() if new_version and new_uri and not CONFIG['win32_updater']: logging.info('runner_rendezvous: version %s available at %s', new_version, new_uri) STATE.update('update', {'version': new_version, 'uri': new_uri}) self._open_browser_on_windows('update.html') # # Choose the test we would like to run even if # we're not going to run it because tests are # disabled. So we can print the test name also # when tests are disabled. # test = RUNNER_TESTS.get_next_test() if not test: logging.warning('background_rendezvous: no test available') self._schedule() return logging.info('background_rendezvous: chosen test: %s', test) # Are we allowed to run a test? if not CONFIG['enabled']: logging.info('background_rendezvous: automatic tests are disabled') self._schedule() return # Actually run the test RUNNER_CORE.run(test, self._schedule)
def connection_ready(self, stream): """ Invoked when the connection is ready """ if self.iteration == 0: STATE.update("test_latency", utils.time_formatter(self.rtts[0])) logging.info("dash: latency %s", utils.time_formatter(self.rtts[0])) # # Pick the greatest rate in the vector that is smaller # than the latest piece rate (saved in speed_kbit). # # Note: we pick one minus the bisect point because we # want to use the closest smaller rate for the next # chunk of "video" that we download. # rate_index = bisect.bisect(self.rates, self.speed_kbit) - 1 if rate_index < 0: rate_index = 0 self.rate_kbit = self.rates[rate_index] count = ((self.rate_kbit * 1000) / 8) * DASH_SECONDS uri = "/dash/download/%d" % count logging.debug("dash: connection ready - rate %d Kbit/s", self.rate_kbit) request = Message() request.compose(method="GET", pathquery=uri, host=self.host_header) if self.parent: auth = self.parent.get_auth() logging.debug("dash: authorization - %s", auth) request["authorization"] = auth self.saved_ticks = utils.ticks() self.saved_cnt = stream.bytes_recv_tot self.saved_times = os.times()[:2] response = Message() # Receive and discard the body response.body.write = lambda piece: None logging.debug( "dash: send request - ticks %f, bytes %d, times %s", self.saved_ticks, self.saved_cnt, self.saved_times ) stream.set_timeout(10) stream.send_request(request, response)
def _waiting_piece(self, stream, data): ''' Invoked when new data is available ''' # Note: this loop cannot be adapted to process other messages # easily, as pointed out in <skype_defs.py>. context = stream.opaque context.bufferise(data) context.state['rcvr_data'].append((utils.ticks(), len(data))) while True: if context.left > 0: context.left = context.skip(context.left) if context.left > 0: break elif context.left == 0: tmp = context.pullup(4) if not tmp: break context.left, = struct.unpack('!I', tmp) if context.left > MAXRECV: raise RuntimeError('skype_clnt: PIECE too large') if not context.ticks: context.ticks = context.snap_ticks = utils.ticks() context.count = context.snap_count = stream.bytes_in context.snap_utime, context.snap_stime = os.times()[:2] POLLER.sched(1, self._periodic, stream) if context.left == 0: logging.debug('< {empty-message}') logging.info('skype_clnt: skype goodput test... complete') ticks = utils.ticks() timediff = ticks - context.ticks bytesdiff = stream.bytes_in - context.count context.state['goodput'] = { 'ticks': ticks, 'bytesdiff': bytesdiff, 'timediff': timediff, } if timediff > 1e-06: speed = utils.speed_formatter(bytesdiff / timediff) logging.info('skype_clnt: goodput: %s', speed) STATE.update('test_download', speed, publish=0) STATE.update('test_upload', 'N/A') self._periodic_internal(stream) context.state['complete'] = 1 stream.close() return else: raise RuntimeError('skype_clnt: internal error') stream.recv(MAXRECV, self._waiting_piece)
def connect_uri(self, uri=None, count=None): self._task = None if not privacy.allowed_to_run(): _open_browser_on_windows("privacy.html") privacy.complain() self._schedule() return if not uri: uri = "http://%s:9773/rendezvous" % CONFIG["agent.master"] LOG.start("* Rendezvous with %s" % uri) STATE.update("rendezvous") # We need to make just one connection ClientHTTP.connect_uri(self, uri, 1)
def connection_made(self, sock, rtt=0): if rtt: latency = utils.time_formatter(rtt) LOG.complete("done, %s" % latency) STATE.update("test_latency", latency) self.rtt = rtt stream = StreamBitTorrent(self.poller) if not self.connector_side: # # Note that we use self.__class__() because self # might be a subclass of PeerNeubot. # peer = self.__class__(self.poller) peer.configure(self.conf) else: peer = self stream.attach(peer, sock, peer.conf) stream.watchdog = self.conf["bittorrent.watchdog"]
def peer_test_complete(self, stream, download_speed, rtt, target_bytes): self.success = True stream = self.http_stream # Update the downstream channel estimate estimate.DOWNLOAD = target_bytes self.my_side = { # The server will override our timestamp "timestamp": utils.timestamp(), "uuid": self.conf.get("uuid"), "internal_address": stream.myname[0], "real_address": self.conf.get("_real_address", ""), "remote_address": stream.peername[0], "privacy_informed": self.conf.get("privacy.informed", 0), "privacy_can_collect": self.conf.get("privacy.can_collect", 0), "privacy_can_publish": self.conf.get("privacy.can_publish", 0), # Upload speed measured at the server "connect_time": rtt, "download_speed": download_speed, # OS and version info "neubot_version": utils_version.to_numeric("0.4.12-rc2"), "platform": sys.platform, } logging.info("BitTorrent: collecting in progress...") STATE.update("collect") s = json.dumps(self.my_side) stringio = StringIO.StringIO(s) request = Message() request.compose( method="POST", pathquery="/collect/bittorrent", body=stringio, mimetype="application/json", host=self.host_header) request["authorization"] = self.conf.get("_authorization", "") stream.send_request(request)
def _after_rendezvous(self): ''' After rendezvous actions ''' # # If rendezvous fails, RUNNER_UPDATES and RUNNER_TESTS # may be empty. In such case, this function becomes just # a no operation and nothing happens. # # Inform the user when we have updates new_version = RUNNER_UPDATES.get_update_version() new_uri = RUNNER_UPDATES.get_update_uri() if new_version and new_uri: logging.info("Version %s available at %s", new_version, new_uri) STATE.update("update", {"version": new_version, "uri": new_uri}) _open_browser_on_windows('update.html') # # Choose the test we would like to run even if # we're not going to run it because we're running # in debug mode or tests are disabled. # This allows us to print to the logger the test # we /would/ have choosen if we were allowed to # run tests. # test = RUNNER_TESTS.get_next_test() if not test: logging.warning("No test available") self._schedule() return logging.info("* Chosen test: %s", test) # Are we allowed to run a test? if not CONFIG["enabled"]: logging.info("Tests are disabled... not running") self._schedule() return # Actually run the test RUNNER_CORE.run(test, self._schedule)