def resched(self, delta, *args, **kwargs): self.time = ticks() + delta self.timestamp = timestamp() + int(delta) if args: self.args = args if kwargs: self.kwargs = kwargs
def prune(connection, days_ago=None, commit=True): if not days_ago: days_ago = 30 until = utils.timestamp() - days_ago * 24 * 60 * 60 connection.execute("DELETE FROM log WHERE timestamp < ?;", (until,)) if commit: connection.commit()
def prune(connection, until=None, commit=True): """ Removes old results from bittorrent table """ if not until: until = utils.timestamp() - 365 * 24 * 60 * 60 connection.execute("DELETE FROM bittorrent WHERE timestamp < ?;", (until,)) if commit: connection.commit()
def prune(connection, until=None, commit=True): ''' Removes old results from the table ''' if not until: until = utils.timestamp() - 365 * 24 * 60 * 60 connection.execute("DELETE FROM speedtest WHERE timestamp < ?;", (until,)) if commit: connection.commit()
def collect(self, stream, request_body): """ Invoked when we must save the result of a session """ sha256 = self._stream_to_sha256(stream) if sha256 not in self.peers: raise RuntimeError("dash: not authorized: %s", sha256) # Note: no more than one collect per session result = self.peers.pop(sha256) logging.debug("dash: del sha256 (OK): %s", sha256) server_timestamp = utils.timestamp() BACKEND.store_generic("dash", { "srvr_schema_version": 3, "srvr_timestamp": server_timestamp, "client": request_body, "server": result, }) # # Return back, at a minimum, the server timestamp. # TODO Also gather and return Web100 stats. # for index in range(len(request_body)): if index <= len(result): result.append({}) result[index]["timestamp"] = server_timestamp return result
def complete(self, stream, speed, rtt, target_bytes): # Avoid leak: do not add an entry if not needed if stream.id in AUTH_PEERS: AUTH_PEERS[stream.id] = { "upload_speed": speed, "timestamp": utils.timestamp(), "target_bytes": target_bytes, }
def complete(self, stream, speed, rtt, target_bytes): # Avoid leak: do not add an entry if not needed if stream.id in NEGOTIATE_SERVER_BITTORRENT.peers: NEGOTIATE_SERVER_BITTORRENT.peers[stream.id] = { "upload_speed": speed, "timestamp": utils.timestamp(), "target_bytes": target_bytes, }
def __init__(self, publish=NOTIFIER.publish, time=utils.T): self.publish = publish self.time = time self.current = "" self.events = {} self.tsnap = self.time() self.update("since", utils.timestamp()) self.update("pid", os.getpid())
def __init__(self, publish=NOTIFIER.publish, T=utils.T): self._publish = publish self._T = T self._current = "" self._events = {} self._t = self._T() self.update("since", utils.timestamp()) self.update("pid", os.getpid())
def session_prune(self): stale = [] now = utils.timestamp() for session in self.queue: if now - session.timestamp > 30: stale.append(session) if not stale: return False for session in stale: self._do_remove(session) return True
def session_negotiate(self, identifier): if not identifier in self.identifiers: session = SessionState() # XXX collision is not impossible but very unlikely session.identifier = utils.get_uuid() session.timestamp = utils.timestamp() self._do_add(session) else: session = self.identifiers[identifier] session.negotiations += 1 return session
def runTest(self): """Make sure bittorrent table works as expected""" connection = sqlite3.connect(":memory:") connection.row_factory = sqlite3.Row table_bittorrent.create(connection) table_bittorrent.create(connection) v = map(None, ResultIterator()) for d in v: table_bittorrent.insert(connection, d, override_timestamp=False) v1 = table_bittorrent.listify(connection) self.assertEquals(sorted(v), sorted(v1)) since = utils.timestamp() - 7 * 24 * 60 * 60 until = utils.timestamp() - 3 * 24 * 60 * 60 v2 = table_bittorrent.listify(connection, since=since, until=until) self.assertTrue(len(v2) < len(v)) table_bittorrent.prune(connection, until) self.assertTrue(len(table_bittorrent.listify(connection)) < len(v1))
def main(args): try: options, arguments = getopt.getopt(args[1:], "f:") except getopt.GetoptError: sys.stderr.write(USAGE) sys.exit(1) for key, value in options: if key == "-f": DATABASE.set_path(value) DATABASE.connect() if not arguments: sys.stdout.write('%s\n' % DATABASE.path) elif arguments[0] == "regen_uuid": if DATABASE.readonly: sys.exit('ERROR: readonly database') table_config.update(DATABASE.connection(), {"uuid": utils.get_uuid()}.iteritems()) elif arguments[0] == "prune": if DATABASE.readonly: sys.exit('ERROR: readonly database') table_speedtest.prune(DATABASE.connection()) elif arguments[0] == "delete_all": if DATABASE.readonly: sys.exit('ERROR: readonly database') table_speedtest.prune(DATABASE.connection(), until=utils.timestamp()) DATABASE.connection().execute("VACUUM;") elif arguments[0] in ("show", "dump"): d = { "config": table_config.dictionarize(DATABASE.connection()), "speedtest": table_speedtest.listify(DATABASE.connection()) } if arguments[0] == "show": compat.json.dump(d, sys.stdout, indent=4) elif arguments[0] == "dump": compat.json.dump(d, sys.stdout) else: sys.stdout.write(USAGE) sys.exit(0)
def connection_ready(self, stream): m1 = SpeedtestCollect() m1.client = self.conf.get("uuid", "") m1.timestamp = utils.timestamp() m1.internalAddress = stream.myname[0] m1.realAddress = self.conf.get("speedtest.client.public_address", "") m1.remoteAddress = stream.peername[0] m1.latency = self.conf.get("speedtest.client.latency", 0.0) m1.downloadSpeed = self.conf.get("speedtest.client.download", 0.0) m1.uploadSpeed = self.conf.get("speedtest.client.upload", 0.0) m1.privacy_informed = self.conf.get("privacy.informed", 0) m1.privacy_can_collect = self.conf.get("privacy.can_collect", 0) m1.privacy_can_share = self.conf.get("privacy.can_publish", 0) # XXX m1.neubot_version = utils_version.NUMERIC_VERSION m1.platform = sys.platform m1.connectTime = sum(self.rtts) / len(self.rtts) # Test version (added Neubot 0.4.12) m1.testVersion = CONFIG["speedtest_test_version"] s = marshal.marshal_object(m1, "text/xml") stringio = StringIO.StringIO(s) # # Pass a dictionary because the function does not accept # anymore an object # if privacy.collect_allowed(m1.__dict__): if DATABASE.readonly: logging.warning("speedtest: readonly database") else: insertxxx(DATABASE.connection(), m1) request = Message() request.compose( method="POST", pathquery="/speedtest/collect", body=stringio, mimetype="application/xml", host=self.host_header, ) request["authorization"] = self.conf.get("speedtest.client.authorization", "") stream.send_request(request)
def connection_ready(self, stream): m1 = compat.SpeedtestCollect() m1.client = self.conf.get("uuid", "") m1.timestamp = utils.timestamp() m1.internalAddress = stream.myname[0] m1.realAddress = self.conf.get("speedtest.client.public_address", "") m1.remoteAddress = stream.peername[0] m1.latency = self.conf.get("speedtest.client.latency", 0.0) m1.downloadSpeed = self.conf.get("speedtest.client.download", 0.0) m1.uploadSpeed = self.conf.get("speedtest.client.upload", 0.0) m1.privacy_informed = self.conf.get("privacy.informed", 0) m1.privacy_can_collect = self.conf.get("privacy.can_collect", 0) m1.privacy_can_share = self.conf.get("privacy.can_share", 0) m1.neubot_version = LibVersion.to_numeric("0.4.2") m1.platform = sys.platform if self.measurer: m1.connectTime = self.measurer.measure_rtt()[0] # import pprint # if hasattr(self.measurer, "recv_hist"): # download = self.measurer.recv_hist.get("download", []) # pprint.pprint(download) # if hasattr(self.measurer, "send_hist"): # upload = self.measurer.send_hist.get("upload", []) # pprint.pprint(upload) s = marshal.marshal_object(m1, "text/xml") stringio = StringIO.StringIO(s) if privacy.collect_allowed(m1): table_speedtest.insertxxx(DATABASE.connection(), m1) request = Message() request.compose(method="POST", pathquery="/speedtest/collect", body=stringio, mimetype="application/xml", host=self.host_header) request["authorization"] = self.conf.get( "speedtest.client.authorization", "") stream.send_request(request)
def peer_test_complete(self, stream, download_speed, rtt, target_bytes): self.success = True stream = self.http_stream # Update the downstream channel estimate estimate.DOWNLOAD = target_bytes self.my_side = { # The server will override our timestamp "timestamp": utils.timestamp(), "uuid": self.conf.get("uuid"), "internal_address": stream.myname[0], "real_address": self.conf.get("_real_address", ""), "remote_address": stream.peername[0], "privacy_informed": self.conf.get("privacy.informed", 0), "privacy_can_collect": self.conf.get("privacy.can_collect", 0), "privacy_can_publish": self.conf.get("privacy.can_publish", 0), # Upload speed measured at the server "connect_time": rtt, "download_speed": download_speed, # OS and version info "neubot_version": utils_version.to_numeric("0.4.12-rc2"), "platform": sys.platform, } logging.info("BitTorrent: collecting in progress...") STATE.update("collect") s = json.dumps(self.my_side) stringio = StringIO.StringIO(s) request = Message() request.compose( method="POST", pathquery="/collect/bittorrent", body=stringio, mimetype="application/json", host=self.host_header) request["authorization"] = self.conf.get("_authorization", "") stream.send_request(request)
def notify_page(self, html_page): ''' Open a webpage to notify the user ''' # # If Neubot is disabled and the user does not want to # receive notifications when it is disabled, then make # sure we don't annoy her. # Do not prompt the user too frequently, because it may # become TOO ANNOYING if the browser opens every fifteen # minutes or so. Reported some time ago by a user who # complained with me at the phone. # logging.debug('notifier_browser: maybe notify: %s', html_page) honor_enabled = CONFIG['notifier_browser.honor_enabled'] enabled = CONFIG['enabled'] if not enabled and honor_enabled: logging.debug('notifier_browser: honoring enabled') return now = utils.timestamp() last_show = self.last_show.get(html_page, 0) min_interval = CONFIG['notifier_browser.min_interval'] if now - last_show < min_interval: logging.debug('notifier_browser: avoid spamming the user') return self.last_show[html_page] = now # FIXME This discards IPv6 localhost address address = CONFIG['agent.api.address'] if ' ' in address: address = address.split()[0] uri = 'http://%s/%s' % ( utils_net.format_epnt((address, CONFIG['agent.api.port'])), html_page ) return browser.open_browser(uri)
def _log(self, severity, message): message = message.rstrip() if self._use_database and severity != "ACCESS": record = { "timestamp": utils.timestamp(), "severity": severity, "message": message, } # # We don't need to commit INFO and DEBUG # records: it's OK to see those with some # delay. While we want to see immediately # WARNING and ERROR records. # TODO We need to commit the database on # sys.exit() and signals etc. (This is # more a database problem that a problem # of this file.) # if severity in ("INFO", "DEBUG"): commit = False # Do we need to commit now? self._nocommit = self._nocommit -1 if self._nocommit <= 0: self._nocommit = NOCOMMIT commit = True else: # Must commit now self._nocommit = NOCOMMIT commit = True self._queue.append(record) if commit: self._writeback() self.logger(severity, message)
def _piece_sent(self, stream): ''' Invoked when a message has been sent ''' context = stream.opaque ticks = utils.ticks() if ticks - context.ticks < 10: stream.send(context.message, self._piece_sent) #logging.debug('> PIECE') return logging.info('raw_srvr: raw test... complete') ticks = utils.ticks() timediff = ticks - context.ticks bytesdiff = stream.bytes_out - context.count context.state['timestamp'] = utils.timestamp() context.state['goodput'] = { 'ticks': ticks, 'bytesdiff': bytesdiff, 'timediff': timediff, } if timediff > 1e-06: speed = utils.speed_formatter(bytesdiff / timediff) logging.info('raw_srvr: goodput: %s', speed) self._periodic_internal(stream) stream.send(EMPTY_MESSAGE, self._empty_message_sent) logging.debug('> {empty-message}')
def do_insert_into(connection, query, dictobj, template, commit=True, override_timestamp=True): ''' Wrapper for INSERT INTO that makes sure that @dictobj has the same fields of @template, to avoid a programming error in sqlite3. If @override timestamp is True, the function will also override @dictobj timestamp. If @commit is True, the function will also commit to @database. ''' for key in template.keys(): if not key in dictobj: dictobj[key] = None # Override timestamp on server-side to guarantee consistency if override_timestamp: dictobj['timestamp'] = utils.timestamp() connection.execute(query, dictobj) if commit: connection.commit()
from neubot.utils import get_uuid from neubot.utils import timestamp # # DAYS Time-span of simulation in days # ROWS Number of rows in simulation # UUIDS Number of UUIDs in simulation # IPCHANGETHR Prob. that client IP address would change # START Simulation start time # DAYS = 100 ROWS = 100 UUIDS = 100 IPCHANGETHR = 0.05 START = timestamp() - DAYS * 24 * 60 * 60 def get_addr(): return "".join(map(str, ["10.0.", random.randint(0, 254), ".", random.randint(1, 254)])) def get_time(start, days): return int(start + random.randint(0, days*3600*24)) class ResultIterator(object): def __init__(self): self.times = (get_time(START, DAYS) for _ in xrange(ROWS)) self.uuids = [get_uuid() for _ in xrange(0, UUIDS)] self.addrs = {}
def got_response(self, stream, request, response): """ Invoked when we receive the response from the server """ if response.code != "200": logging.warning("dash: invalid response: %s", response.code) stream.close() return new_ticks = utils.ticks() new_bytes = stream.bytes_recv_tot new_times = os.times()[:2] logging.debug("dash: got response - ticks %f, bytes %d, times %s", new_ticks, new_bytes, new_times) elapsed = new_ticks - self.saved_ticks received = new_bytes - self.saved_cnt delta_user_time = new_times[0] - self.saved_times[0] delta_sys_time = new_times[1] - self.saved_times[1] if elapsed < 0: raise RuntimeError("dash: clock going backwards") logging.debug( "dash: got response - elaps %f, rcvd %d, user %f, sys %f", elapsed, received, delta_user_time, delta_sys_time, ) if self.parent: result = { "connect_time": self.rtts[0], "delta_user_time": delta_user_time, "delta_sys_time": delta_sys_time, "elapsed": elapsed, "elapsed_target": DASH_SECONDS, "internal_address": stream.myname[0], "iteration": self.iteration, "platform": sys.platform, "rate": self.rate_kbit, "real_address": self.parent.real_address, "received": received, "remote_address": stream.peername[0], "request_ticks": self.saved_ticks, "timestamp": utils.timestamp(), "uuid": self.conf.get("uuid"), "version": utils_version.NUMERIC_VERSION, } self.parent.append_result(result) self.iteration += 1 # # TODO it would be nice to also STATE.update() with the dash # rate, but that change requires also some www changes. # STATE.update("test_progress", "%d%%" % ((100 * self.iteration) / DASH_MAX_ITERATION), publish=False) speed = received / elapsed self.speed_kbit = (speed * 8) / 1000 STATE.update("test_download", utils.speed_formatter(speed)) logging.info( "dash: [%2d/%d] rate: %6d Kbit/s, speed: %6d Kbit/s, elapsed: %.3f s", self.iteration, DASH_MAX_ITERATION, self.rate_kbit, self.speed_kbit, elapsed, ) if self.iteration >= DASH_MAX_ITERATION: logging.debug("dash: done all iteration") stream.close() return # # If we're adding too much delay, artificially reduce the # measured speed to let the bottleneck breathe. # if elapsed > DASH_SECONDS: rel_err = 1 - elapsed / DASH_SECONDS self.speed_kbit += rel_err * self.speed_kbit if self.speed_kbit < 0: self.speed_kbit = 100 self.connection_ready(stream)
def __init__(self, delta, func): """ Initialize """ self.time = ticks() + delta self.timestamp = timestamp() + int(delta) self.func = func
def session_active(self, identifier): if identifier in self.identifiers: session = self.identifiers[identifier] session.timestamp = utils.timestamp() # XXX return session.active return False
def sched(self, delta, func, *args): ''' Schedule task ''' #logging.debug('poller: sched: %s, %s, %s', delta, func, args) self.enter(delta, 0, self._run_task, (func, args)) return timestamp() + delta
def got_response(self, stream, request, response): if response.code != "200": logging.warning("dash: http request error: %s", response.code) stream.close() return if self.state == STATE_NEGOTIATE: response_body = json.load(response.body) # # Note: the following are the standard fields that # the negotiate API call MUST return. # self.authorization = response_body["authorization"] self.queue_pos = response_body["queue_pos"] self.real_address = response_body["real_address"] self.unchoked = response_body["unchoked"] if not self.unchoked: logging.info("dash: negotiate... done (queue pos %d)", self.queue_pos) STATE.update("negotiate", {"queue_pos": self.queue_pos}) self.connection_ready(stream) return logging.info("dash: negotiate... done (unchoked)") self.stream = stream # # The server may override the vector of rates with a "better" # vector of rates of its choice. # rates = list(response_body.get("dash_rates", DASH_RATES)) self.client = DASHClientSmpl(self.poller, self, rates) self.client.configure(self.conf.copy()) self.client.connect((self.stream.peername[0], 80)) # XXX elif self.state == STATE_COLLECT: response_body = json.load(response.body) # # We store each iteration of the test as a separate row of # the backend. We also add a whole test timestamp, to allow # one to understand which row belong to the same test. # whole_test_timestamp = utils.timestamp() for index, elem in enumerate(self.measurements): elem["clnt_schema_version"] = 3 elem["whole_test_timestamp"] = whole_test_timestamp if index < len(response_body): elem["srvr_data"] = response_body[index] BACKEND.store_generic("dash", elem) stream.close() else: raise RuntimeError("dash: internal error")
def _log(self, severity, message, args, exc_info): ''' Really log a message ''' # No point in logging empty lines if not message: return # # Honor verbose. We cannot leave this choice to the # "root" logger because all messages must be passed # to the streaming feature. Hence the "root" logger # must always be configured to be vebose. # if not CONFIG['verbose'] and severity == 'DEBUG': return # Lazy processing if args: message = message % args if exc_info: exc_list = traceback.format_exception(exc_info[0], exc_info[1], exc_info[2]) message = "%s\n%s\n" % (message, ''.join(exc_list)) for line in message.split('\n'): self._log(severity, line, None, None) return message = message.rstrip() # Write log into the database if self._use_database and severity != "ACCESS": record = { "timestamp": utils.timestamp(), "severity": severity, "message": message, } # # We don't need to commit INFO and DEBUG # records: it's OK to see those with some # delay. While we want to see immediately # WARNING and ERROR records. # TODO We need to commit the database on # sys.exit() and signals etc. (This is # more a database problem that a problem # of this file.) # if severity in ("INFO", "DEBUG"): commit = False # Do we need to commit now? self._nocommit = self._nocommit -1 if self._nocommit <= 0: self._nocommit = NOCOMMIT commit = True else: # Must commit now self._nocommit = NOCOMMIT commit = True self._queue.append(record) if commit: self.writeback() # Write to the current logger object self.logger(severity, message)
def log_tuple(self, severity, message, args, exc_info): ''' Really log a message (without any *magic) ''' # No point in logging empty lines if not message: return # # Streaming allows consumers to register with the log # object and follow the events that happen during a # test as if they were running the test in their local # context. When the test is done, the runner of the # test will automatically disconnected all the attached # streams. # Log streaming makes this function less efficient # because lazy processing of log records can't be # performed. We must pass the client all the logs # and it will decide whether to be verbose. # Err, of course passing ACCESS logs down the stream # is pointless for a client that wants to follow a # remote test. # if self.streams: # "Lazy" processing if args: message = message % args args = () message = message.rstrip() try: if severity != 'ACCESS': logline = "%s %s\r\n" % (severity, message) logline = logline.encode("utf-8") for stream in self.streams: stream.start_send(logline) except (KeyboardInterrupt, SystemExit): raise except: pass # Not verbose? Stop processing the log record here if not self.noisy and severity == 'DEBUG': return # Lazy processing if args: message = message % args args = () if exc_info: message = "%s: %s\n" % (message, str(exc_info[1])) # Ensure we do not accidentaly keep the exception alive exc_info = None message = message.rstrip() # Write log into the database if self._use_database and severity != "ACCESS": record = { "timestamp": utils.timestamp(), "severity": severity, "message": message, } # # We don't need to commit INFO and DEBUG # records: it's OK to see those with some # delay. While we want to see immediately # WARNING and ERROR records. # TODO We need to commit the database on # sys.exit() and signals etc. (This is # more a database problem that a problem # of this file.) # if severity in ("INFO", "DEBUG"): commit = False # Do we need to commit now? self._nocommit = self._nocommit -1 if self._nocommit <= 0: self._nocommit = NOCOMMIT commit = True else: # Must commit now self._nocommit = NOCOMMIT commit = True self._queue.append(record) if commit: self._writeback() # Write to the current logger object self.logger(severity, message)
def __init__(self, delta, func): self.time = ticks() + delta self.timestamp = timestamp() + int(delta) self.func = func
def __init__(self, delta, func, *args, **kwargs): self.time = ticks() + delta self.timestamp = timestamp() + int(delta) self.func = func self.args = args self.kwargs = kwargs