def log_api(stream, request, query): ''' Implements /api/log ''' # Get logs and options logs = LOG.listify() options = cgi.parse_qs(query) # Reverse logs on request if utils.intify(options.get('reversed', ['0'])[0]): logs = reversed(logs) # Filter according to verbosity if utils.intify(options.get('verbosity', ['1'])[0]) < 2: logs = [ log for log in logs if log['severity'] != 'DEBUG' ] if utils.intify(options.get('verbosity', ['1'])[0]) < 1: logs = [ log for log in logs if log['severity'] != 'INFO' ] # Human-readable output? if utils.intify(options.get('debug', ['0'])[0]): logs = [ '%(timestamp)d [%(severity)s]\t%(message)s\r\n' % log for log in logs ] body = ''.join(logs).encode('utf-8') mimetype = 'text/plain; encoding=utf-8' else: body = json.dumps(logs) mimetype = 'application/json' # Compose and send response response = Message() response.compose(code='200', reason='Ok', body=body, mimetype=mimetype) stream.send_response(request, response)
def collect_allowed(message): ''' We are allowed to collect a result in the database if the user is informed and has provided the permission to collect her Internet address ''' return (utils.intify(message['privacy_informed']) and utils.intify(message['privacy_can_collect']))
def _api_index(self, stream, request, query): ''' Redirect either to /index.html or /privacy.html depending on whether the user has already set privacy permissions or not ''' response = Message() if (not utils.intify(CONFIG['privacy.informed']) or not utils.intify(CONFIG['privacy.can_collect'])): response.compose_redirect(stream, '/privacy.html') else: response.compose_redirect(stream, '/index.html') stream.send_response(request, response)
def collect_allowed(m): ''' Returns True if we are allowed to collect a result into the database, and False otherwise ''' if type(m) != types.DictType: # # XXX This is a shame therefore put the oops() and hope that # it does its moral suasion job as expected. # LOG.oops("TODO: please pass me a dictionary!", LOG.debug) m = m.__dict__ return (not utils.intify(m["privacy_informed"]) or utils.intify(m["privacy_can_collect"]))
def main(args): CONFIG.register_descriptions({ "speedtest.client.uri": "Base URI to connect to", "speedtest.client.nconn": "Number of concurrent connections to use", "speedtest.client.latency_tries": "Number of latency measurements", }) common.main("speedtest.client", "Speedtest client", args) conf = CONFIG.copy() # # If possible use the runner, which will execute the # test in the context of the neubot daemon. Then exit # to bypass the POLLER.loop() invokation that is below # here. # If the runner fails, fallback to the usual code path, # which executes the test in the context of the local # process. # Set 'runned.enabled' to 0 to bypass the runner and # run the test locally. # if (utils.intify(conf['runner.enabled']) and runner_clnt.runner_client(conf["agent.api.address"], conf["agent.api.port"], LOG.noisy, "speedtest")): sys.exit(0) LOG.info('Will run the test in the local context...') client = ClientSpeedtest(POLLER) client.configure(conf) client.connect_uri() POLLER.loop()
def api_data(stream, request, query): ''' Get data stored on the local database ''' since, until = -1, -1 test = '' dictionary = cgi.parse_qs(query) if "test" in dictionary: test = str(dictionary["test"][0]) if "since" in dictionary: since = int(dictionary["since"][0]) if "until" in dictionary: until = int(dictionary["until"][0]) if test == 'bittorrent': table = table_bittorrent elif test == 'speedtest': table = table_speedtest elif test == 'raw': table = table_raw else: raise NotImplementedTest("Test not implemented") indent, mimetype, sort_keys = None, "application/json", False if "debug" in dictionary and utils.intify(dictionary["debug"][0]): indent, mimetype, sort_keys = 4, "text/plain", True response = Message() lst = table.listify(DATABASE.connection(), since, until) body = json.dumps(lst, indent=indent, sort_keys=sort_keys) response.compose(code="200", reason="Ok", body=body, mimetype=mimetype) stream.send_response(request, response)
def _api_config(self, stream, request, query): response = Message() indent, mimetype, sort_keys = None, "application/json", False dictionary = cgi.parse_qs(query) if "debug" in dictionary and utils.intify(dictionary["debug"][0]): indent, mimetype, sort_keys = 4, "text/plain", True if request.method == "POST": s = request.body.read() updates = qs_to_dictionary(s) privacy.check(updates) # Very low barrier to prevent damage from kiddies if "agent.interval" in updates: interval = int(updates["agent.interval"]) if interval < 1380 and interval != 0: raise ConfigError("Bad agent.interval") CONFIG.merge_api(updates, DATABASE.connection()) STATE.update("config", updates) # Empty JSON b/c '204 No Content' is treated as an error s = "{}" else: s = json.dumps(CONFIG.conf, sort_keys=sort_keys, indent=indent) stringio = StringIO.StringIO(s) response.compose(code="200", reason="Ok", body=stringio, mimetype=mimetype) stream.send_response(request, response)
def got_response(self, stream, request, response): m = marshal.unmarshal_object(response.body.read(), "text/xml", SpeedtestNegotiate_Response) self.conf["speedtest.client.authorization"] = m.authorization self.conf["speedtest.client.public_address"] = m.publicAddress self.conf["speedtest.client.unchoked"] = utils.intify(m.unchoked) if m.queuePos: self.conf["speedtest.client.queuepos"] = m.queuePos
def api_results(stream, request, query): ''' Populates www/results.html page ''' dictionary = cgi.parse_qs(query) test = CONFIG['www_default_test_to_show'] if 'test' in dictionary: test = str(dictionary['test'][0]) # Read the directory each time, so you don't need to restart the daemon # after you have changed the description of a test. available_tests = {} for filename in os.listdir(TESTDIR): if filename.endswith('.json'): index = filename.rfind('.json') if index == -1: raise RuntimeError('api_results: internal error') name = filename[:index] available_tests[name] = filename if not test in available_tests: raise NotImplementedTest('Test not implemented') # Allow power users to customize results.html heavily, by creating JSON # descriptions with local modifications. filepath = utils_path.append(TESTDIR, available_tests[test], False) if not filepath: raise RuntimeError("api_results: append() path failed") localfilepath = filepath + '.local' if os.path.isfile(localfilepath): filep = open(localfilepath, 'rb') else: filep = open(filepath, 'rb') response_body = json.loads(filep.read()) filep.close() # Add extra information needed to populate results.html selection that # allows to select which test results must be shown. response_body['available_tests'] = available_tests.keys() response_body['selected_test'] = test descrpath = filepath.replace('.json', '.html') if os.path.isfile(descrpath): filep = open(descrpath, 'rb') response_body['description'] = filep.read() filep.close() # Provide the web user interface some settings it needs, but only if they # were not already provided by the `.local` file. for variable in COPY_CONFIG_VARIABLES: if not variable in response_body: response_body[variable] = CONFIG[variable] # Note: DO NOT sort keys here: order MUST be preserved indent, mimetype = None, 'application/json' if 'debug' in dictionary and utils.intify(dictionary['debug'][0]): indent, mimetype = 4, 'text/plain' response = Message() body = json.dumps(response_body, indent=indent) response.compose(code='200', reason='Ok', body=body, mimetype=mimetype) stream.send_response(request, response)
def api_results(stream, request, query): ''' Provide results for queried tests ''' since, until = -1, -1 test = '' dictionary = cgi.parse_qs(query) if dictionary.has_key("test"): test = str(dictionary["test"][0]) if dictionary.has_key("since"): since = int(dictionary["since"][0]) if dictionary.has_key("until"): until = int(dictionary["until"][0]) if test == 'bittorrent': table = table_bittorrent elif test == 'speedtest': table = table_speedtest else: raise NotImplementedTest("Test '%s' is not implemented" % test) indent, mimetype, sort_keys = None, "application/json", False if "debug" in dictionary and utils.intify(dictionary["debug"][0]): indent, mimetype, sort_keys = 4, "text/plain", True response = Message() lst = table.listify(DATABASE.connection(), since, until) body = json.dumps(lst, indent=indent, sort_keys=sort_keys) response.compose(code="200", reason="Ok", body=body, mimetype=mimetype) stream.send_response(request, response)
def log_api(stream, request, query): ''' Implements /api/log ''' # # CAVEAT Currently Neubot do not update logs "in real # time" using AJAX. If it did we would run in trouble # because each request for /api/log would generate a # new access log record. In turn, a new access log # record will cause a new "logwritten" event, leading # to a log-caused Comet storm. # # Get logs and options logs = LOG.listify() options = cgi.parse_qs(query) # Reverse logs on request if utils.intify(options.get('reversed', ['0'])[0]): logs = reversed(logs) # Filter according to verbosity if utils.intify(options.get('verbosity', ['1'])[0]) < 2: logs = [ log for log in logs if log['severity'] != 'DEBUG' ] if utils.intify(options.get('verbosity', ['1'])[0]) < 1: logs = [ log for log in logs if log['severity'] != 'INFO' ] # Human-readable output? if utils.intify(options.get('debug', ['0'])[0]): logs = [ '%(timestamp)d [%(severity)s]\t%(message)s\r\n' % log for log in logs ] body = ''.join(logs).encode('utf-8') mimetype = 'text/plain; encoding=utf-8' else: body = json.dumps(logs) mimetype = 'application/json' # Compose and send response response = Message() response.compose(code='200', reason='Ok', body=body, mimetype=mimetype) stream.send_response(request, response)
def _api_configlabels(self, stream, request, query): indent, mimetype = None, "application/json" dictionary = cgi.parse_qs(query) if "debug" in dictionary and utils.intify(dictionary["debug"][0]): indent, mimetype = 4, "text/plain" response = Message() s = json.dumps(CONFIG.descriptions, sort_keys=True, indent=indent) stringio = StringIO.StringIO(s) response.compose(code="200", reason="Ok", body=stringio, mimetype=mimetype) stream.send_response(request, response)
def count_valid(updates, prefix): ''' Return the number of valid privacy settings found and return -1 in case of error ''' count = 0 for setting in ('informed', 'can_collect', 'can_publish'): name = "%s%s" % (prefix, setting) if name in updates: value = utils.intify(updates[name]) if not value: return -1 count += 1 return count
def print_settings(connection, database_path): ''' Print privacy settings and exit ''' sys.stdout.write(USAGE + '\n') sys.stdout.write('Current database: %s\n' % database_path) sys.stdout.write('Current settings:\n') dictionary = table_config.dictionarize(connection) for name, value in dictionary.items(): if name.startswith('privacy.'): sys.stdout.write(' %-20s: %d\n' % (name, utils.intify(value))) sys.stdout.write('\n') return 0
def print_settings(connection, database_path): ''' Print privacy settings and exit ''' sys.stdout.write('database: %s\n' % database_path) sys.stdout.write('settings:\n') dictionary = table_config.dictionarize(connection) for name, value in dictionary.items(): if name.startswith('privacy.'): name = name.replace("privacy.", "") sys.stdout.write(' %-12s: %d\n' % (name, utils.intify(value))) sys.stdout.write('\n') return 0
def _api_state_complete(self, event, context): stream, request, query, t = context indent, mimetype = None, "application/json" dictionary = cgi.parse_qs(query) if "debug" in dictionary and utils.intify(dictionary["debug"][0]): indent, mimetype = 4, "text/plain" dictionary = STATE.dictionarize() octets = json.dumps(dictionary, indent=indent) stringio = StringIO.StringIO(octets) response = Message() response.compose(code="200", reason="Ok", body=stringio, mimetype=mimetype) stream.send_response(request, response)
def _api_state_complete(event, context): ''' Callback invoked when the /api/state has changed ''' stream, request, query, otime = context indent, mimetype = None, "application/json" dictionary = cgi.parse_qs(query) if "debug" in dictionary and utils.intify(dictionary["debug"][0]): indent, mimetype = 4, "text/plain" dictionary = STATE.dictionarize() octets = json.dumps(dictionary, indent=indent) response = Message() response.compose(code="200", reason="Ok", body=octets, mimetype=mimetype) stream.send_response(request, response)
def _api_speedtest(self, stream, request, query): since, until = -1, -1 dictionary = cgi.parse_qs(query) if dictionary.has_key("since"): since = int(dictionary["since"][0]) if dictionary.has_key("until"): until = int(dictionary["until"][0]) indent, mimetype, sort_keys = None, "application/json", False if "debug" in dictionary and utils.intify(dictionary["debug"][0]): indent, mimetype, sort_keys = 4, "text/plain", True response = Message() lst = table_speedtest.listify(DATABASE.connection(), since, until) s = json.dumps(lst, indent=indent, sort_keys=sort_keys) stringio = StringIO.StringIO(s) response.compose(code="200", reason="Ok", body=stringio, mimetype=mimetype) stream.send_response(request, response)
def _api_log(self, stream, request, query): response = Message() dictionary = cgi.parse_qs(query) if "debug" in dictionary and utils.intify(dictionary["debug"][0]): stringio = StringIO.StringIO() for row in LOG.listify(): ln = "%d [%s]\t%s" % (row["timestamp"], row["severity"], row["message"]) stringio.write(ln.encode("utf-8")) stringio.write("\r\n") stringio.seek(0) mimetype = "text/plain" else: s = json.dumps(LOG.listify()) stringio = StringIO.StringIO(s) mimetype = "application/json" response.compose(code="200", reason="Ok", body=stringio, mimetype=mimetype) stream.send_response(request, response)
def check(updates): ''' Raises ConfigError if the user is trying to update the privacy settings in a wrong way ''' # Merge a copy of config with privacy settings conf = CONFIG.copy() for key in PRIVACYKEYS: if key in updates: conf[key] = utils.intify(updates[key]) # Extract privacy settings from such copy informed = utils.intify(conf.get("privacy.informed", 0)) can_collect = utils.intify(conf.get("privacy.can_collect", 0)) can_share = utils.intify(conf.get("privacy.can_share", 0)) if not informed: # When you're not informed you cannot raise the other settings if can_collect or can_share: raise ConfigError("You cannot set can_collect or can_share " "without asserting that you are informed") else: # It's not possible to share if you don't agree to collect if can_share and not can_collect: raise ConfigError("You cannot set can_share without also " "setting can_collect (how are we supposed " "to share what we cannot collect)?") # You must give the can_collect bit if not can_collect: raise ConfigError("You must agree to collect or Neubot " "cannot work. You should uninstall Neubot " "if you don't want it to collect") # You cannot remove the informed bit if utils.intify(CONFIG['privacy.informed']) and not informed: raise ConfigError("Once you're informed you cannot step back") # You cannot remove the can_collect bit if utils.intify(CONFIG['privacy.can_collect']) and not can_collect: raise ConfigError("You can't remove the can_collect bit because " "Neubot cannot work. You should uninstall Neubot " "if you don't want it to collect")
def api_data(stream, request, query): ''' Get data stored on the local database ''' since, until = -1, -1 test = '' dictionary = cgi.parse_qs(query) if "test" in dictionary: test = str(dictionary["test"][0]) if "since" in dictionary: since = int(dictionary["since"][0]) if "until" in dictionary: until = int(dictionary["until"][0]) if test == 'bittorrent': table = table_bittorrent elif test == 'speedtest': table = table_speedtest elif test == 'raw': table = table_raw else: table = None indent, mimetype, sort_keys = None, "application/json", False if "debug" in dictionary and utils.intify(dictionary["debug"][0]): indent, mimetype, sort_keys = 4, "text/plain", True response = Message() if table: lst = table.listify(DATABASE.connection(), since, until) # # TODO We should migrate all the tests to use the new # generic interface. At that point, we can also change # the API to access "pages" of data by index. # # Until we change the API, we have an API that allows # the caller to specify date ranges. For this reason # below we emulate the date-ranges semantics provided # by database-based tests. # # Note: we assume that, whatever the test structure, # there is a field called "timestamp". # else: lst = [] indexes = [None] indexes.extend(range(16)) for index in indexes: tmp = BACKEND.walk_generic(test, index) if not tmp: break found_start = False for elem in reversed(tmp): if until >= 0 and elem["timestamp"] > until: continue if since >= 0 and elem["timestamp"] < since: found_start = True break lst.append(elem) if found_start: break body = json.dumps(lst, indent=indent, sort_keys=sort_keys) response.compose(code="200", reason="Ok", body=body, mimetype=mimetype) stream.send_response(request, response)
def main(argv): slowpath = False webgui = False start = False status = False stop = False if sys.version_info[0] > 2 or sys.version_info[1] < 5: sys.stderr.write("fatal: wrong Python version\n") sys.stderr.write("please run neubot using Python >= 2.5 and < 3.0\n") sys.exit(1) if os.environ.get("NEUBOT_DEBUG", ""): from neubot import utils if utils.intify(os.environ["NEUBOT_DEBUG"]): sys.stderr.write("Running in debug mode\n") from neubot.debug import PROFILER sys.setprofile(PROFILER.notify_event) if os.environ.get("NEUBOT_MEMLEAK", ""): from neubot import utils if utils.intify(os.environ["NEUBOT_MEMLEAK"]): sys.stderr.write("Running in leak-detection mode\n") import gc gc.set_debug(gc.DEBUG_LEAK) # Hook for Neubot for MacOSX if os.name == 'posix' and sys.platform == 'darwin': from neubot import main_macos main_macos.main(argv) return # Quick argv classification if len(argv) == 1: start = True webgui = True elif len(argv) == 2: command = argv[1] if command == "--help": sys.stdout.write(USAGE) sys.exit(0) elif command == "-V": sys.stdout.write(VERSION + "\n") sys.exit(0) elif command == "start": start = True elif command == "status": status = True elif command == "stop": stop = True else: slowpath = True else: slowpath = True # Slow / quick startup if slowpath: from neubot.main import module module.run(argv) else: running = False # Running? if start or status or stop: try: import httplib connection = httplib.HTTPConnection("127.0.0.1", "9774") connection.request("GET", "/api/version") response = connection.getresponse() if response.status == 200: running = True response.read() connection.close() except (SystemExit, KeyboardInterrupt): raise except: pass if status: if not running: sys.stdout.write("Not running\n") else: sys.stdout.write("Running\n") sys.exit(0) if running and start: sys.stdout.write("Already running\n") if not running and stop: sys.stdout.write("Not running\n") # Stop if running and stop: try: connection = httplib.HTTPConnection("127.0.0.1", "9774") connection.request("POST", "/api/exit") # New /api/exit does not send any response #response = connection.getresponse() connection.close() except (SystemExit, KeyboardInterrupt): raise except: logging.error('Exception', exc_info=1) sys.exit(1) # start / webbrowser if os.name == "posix": # # Fork off a child and use it to start the # Neubot agent. The parent process will # open the browser, if needed. Otherwise # it will exit. # if not running and start: if os.fork() == 0: from neubot import agent arguments = [ argv[0] ] agent.main(arguments) sys.exit(0) # # It's not wise at all to open the browser when # we are running as root. Assume that when we # are root the user wants just to start the agent. # if webgui and "DISPLAY" in os.environ: if os.getuid() != 0: from neubot.main import browser browser.open_patient("127.0.0.1", "9774") elif os.name == "nt": if webgui: from neubot.main import browser if not running and start: browser.open_patient("127.0.0.1", "9774", True) else: browser.open_patient("127.0.0.1", "9774") if not running and start: from neubot import agent agent.main([argv[0]]) else: sys.stderr.write("Your operating system is not supported\n") sys.exit(1)
def main(args): config.register_descriptions() common.main("bittorrent", "Neubot BitTorrent module", args) conf = CONFIG.copy() config.finalize_conf(conf) if conf["bittorrent.listen"]: # # If we need to negotiate and we're runing # standalone we also need to bring up the # global HTTP server. # if conf["bittorrent.negotiate"]: HTTP_SERVER.configure(conf) HTTP_SERVER.listen((conf["bittorrent.address"], conf["bittorrent.negotiate.port"])) conf["negotiate.listen"] = True negotiate.run(POLLER, conf) # # Drop privileges after listen() so we can # bind() to privileged ports # if conf["bittorrent.daemonize"]: system.change_dir() system.go_background() LOG.redirect() system.drop_privileges(LOG.error) else: # # If possible use the runner, which will execute the # test in the context of the neubot daemon. Then exit # to bypass the run() invokation that is below here. # If the runner fails, fallback to the usual code path, # which executes the test in the context of the local # process. # Set 'runned.enabled' to 0 to bypass the runner and # run the test locally. # if (utils.intify(conf['runner.enabled']) and runner_clnt.runner_client(conf["agent.api.address"], conf["agent.api.port"], LOG.noisy, "bittorrent")): sys.exit(0) LOG.info('Will run the test in the local context...') # # When we're connecting to a remote host to perform a test # we want Neubot to quit at the end of the test. When this # happens the test code publishes the "testdone" event, so # here we prepare to intercept the event and break our main # loop. # NOTIFIER.subscribe("testdone", lambda event, ctx: POLLER.break_loop()) run(POLLER, conf) POLLER.loop()
def runner_api(stream, request, query): ''' Implements /api/runner ''' response = Message() # # DO NOT allow to start a test when another test is in # progress because I have noticed that is confusing both # from # command line and WUI. # if RUNNER_CORE.test_is_running(): raise ConfigError('A test is already in progress, try again later') # # If there is not a query string this API is just # a no-operation and returns an empty JSON body to # keep happy the AJAX code. # if not query: response.compose(code='200', reason='Ok', body='{}', mimetype='application/json') stream.send_response(request, response) return options = cgi.parse_qs(query) # # If the query does not contain the name of the # test, this is an error and we must notify that # to the caller. Raise ConfigError, which will # be automatically transformed into a 500 message # with the proper body and reason. # if not 'test' in options: raise ConfigError('Missing "test" option in query string') test = options['test'][0] # # Simple case: the caller does not want to follow the # test via log streaming. We can immediately start # the test using the runner and, if everything is OK, # we can send a succesful response, with an empty JSON # body to keep happy the AJAX code. # if not 'streaming' in options or not utils.intify(options['streaming'][0]): RUNNER_CORE.run(test, runner_api_done) response.compose(code='200', reason='Ok', body='{}', mimetype='application/json') stream.send_response(request, response) return # # More interesting case: the caller wants to see the log # messages during the test via the log streaming API. # We prepare a succesful response terminated by EOF and # then arrange things so that every new log message will # be copied to the HTTP response. # Then we kick off the runner, and note that we do that # AFTER we setup the response for eventual runner errors # to be copied to the HTTP response. # The runner core will automatically close all attached # streams at the end of the test. # response.compose(code='200', reason='Ok', up_to_eof=True, mimetype='text/plain') stream.send_response(request, response) LOG.start_streaming(stream) RUNNER_CORE.run(test, runner_api_done)
def config_api(stream, request, query): ''' Implements /api/config API ''' # Adapted from neubot/api/server.py # # Fetch and process common options from the query # string, for now the only implemented option is # debug, which modifies the semantic to return text # for humans instead of JSON. # mimetype = 'application/json' indent = None options = cgi.parse_qs(query) if utils.intify(options.get('debug', ['0'])[0]): mimetype = 'text/plain' indent = 4 # # Now that we know the response format, decide what is # the content of the response. If the labels option is # available we return the documentation coupled with a # setting. When the method is not POST, return instead # the name and value of each setting. # if utils.intify(options.get('labels', ['0'])[0]): obj = CONFIG.descriptions elif request.method != 'POST': obj = CONFIG.conf else: # # When the method is POST we need to read the # new settings from the request body. Settings # are a x-www-form-urlencoded dictionary to # ease AJAX programming. # body = request.body.read() updates = marshal.qs_to_dictionary(body) # # PRE-update checks. We need to make sure that # the following things are True: # # 1. that the incoming dictionary does not contain # invalid privacy settings; # # 2. that the interval between automatic tests is # either reasonable or set to zero, which means # that it needs to be extracted randomly. # count = privacy.count_valid(updates, 'privacy.') if count < 0: raise ConfigError('Passed invalid privacy settings') agent_interval = int(updates.get('agent.interval', 0)) if agent_interval != 0 and agent_interval < 1380: raise ConfigError('Passed invalid agent.interval') # Merge settings CONFIG.merge_api(updates, DATABASE.connection()) # # Update the state, such that, if the AJAX code is # tracking the state it gets a notification that # some configurations variable have been modified. # Given that we communicate the update via that # channel, the response body is an empty dict to # keep happy the AJAX code. # STATE.update('config', updates) obj = '{}' # # Now that we know the body, prepare and send # the response for the client. # response = Message() body = json.dumps(obj, sort_keys=True, indent=indent) response.compose(code="200", reason="Ok", body=body, mimetype=mimetype) stream.send_response(request, response)