def run_queue(self): ''' If possible run the first test in queue ''' # Adapted from neubot/rendezvous/client.py if not self.queue: return if self.running: return # # Subscribe BEFORE starting the test, otherwise we # may miss the 'testdone' event if the connection # to the negotiator service fails, and we will stay # stuck forever. # NOTIFIER.subscribe('testdone', self.test_done) # Prevent concurrent tests self.running = True # Safely run first element in queue deferred = Deferred() deferred.add_callback(self._do_run_queue) deferred.add_errback(self._run_queue_error) deferred.callback(self.queue[0])
def _connection_lost(self, stream): ''' Invoked when the connection is lost ''' deferred = Deferred() deferred.add_callback(self._connection_lost_internal) deferred.add_errback(lambda error: self._connection_lost_error(stream, error)) deferred.callback(stream)
def run(self): ''' Periodically run rendezvous ''' logging.info('background_rendezvous: automatic rendezvous...') deferred = Deferred() deferred.add_callback(self._after_rendezvous) deferred.add_errback(self._schedule) RUNNER_CORE.run('rendezvous', deferred, False, None)
def main(args): ''' Main function ''' try: options, arguments = getopt.getopt(args[1:], 'f:nv') except getopt.error: sys.exit(USAGE) database_path = system.get_default_database_path() auto_discover = True for name, value in options: if name == '-f': database_path = value elif name == '-n': auto_discover = False elif name == '-v': CONFIG['verbose'] = 1 if len(arguments) != 1 and len(arguments) != 2: sys.exit(USAGE) DATABASE.set_path(database_path) CONFIG.merge_database(DATABASE.connection()) if len(arguments) == 2: RUNNER_TESTS.update({arguments[0]: [arguments[1]]}) ctx = {'uri': arguments[1]} else: ctx = None deferred = Deferred() deferred.add_callback(lambda param: None) RUNNER_CORE.run(arguments[0], deferred, auto_discover, ctx) POLLER.loop()
def _retrieve_tarball(self, ctx): ''' Retrieve tarball for a given version ''' if not 'result' in ctx: logging.error('updater_runner: no result') self._schedule() return length, body, error = ctx.pop('result') if length == -1: logging.info('updater_runner: %s', str(error)) self._schedule() return logging.info('updater_runner: signature (base64): %s', base64.b64encode(body)) ctx['signature'] = body ctx['uri'] = updater_utils.tarball_get_uri(self.system, ctx['vinfo']) logging.info('updater_runner: GET %s', ctx['uri']) deferred = Deferred() deferred.add_callback(self._process_files) deferred.add_errback(self._handle_failure) RUNNER_CORE.run('dload', deferred, False, ctx)
def test_callback_to_errback(self): ''' Make sure we switch correctly from callback to errback ''' deferred = Deferred() deferred.add_callback(self._callback) deferred.add_errback(self._errback) _call(deferred.callback, 65537) self.assertEqual(self.count_callbacks, 1) self.assertEqual(self.count_errbacks, 1)
def test_callback_chain(self): ''' Make sure the callback chain works ''' deferred = Deferred() deferred.add_callback(self._callback) deferred.add_callback(self._callback) deferred.add_callback(self._callback) deferred.callback(0) self.assertEqual(self.counter, 3)
def test_callback_chain(self): """ Make sure the callback chain works """ deferred = Deferred() deferred.add_callback(self._callback) deferred.add_callback(self._callback) deferred.add_callback(self._callback) deferred.callback(0) self.assertEqual(self.counter, 3)
def test_callback_to_errback(self): """ Make sure we switch correctly from callback to errback """ deferred = Deferred() deferred.add_callback(self._callback) deferred.add_errback(self._errback) _call(deferred.callback, 65537) self.assertEqual(self.count_callbacks, 1) self.assertEqual(self.count_errbacks, 1)
def handle_write(self): POLLER.unset_writable(self) if not utils_net.isconnected(self.endpoint, self.sock): self._connection_failed() return deferred = Deferred() deferred.add_callback(self._handle_connect) deferred.add_errback(self._handle_connect_error) deferred.callback(utils.ticks())
def _periodic(self, args): """ Periodically snap goodput """ stream = args[0] if stream.opaque: deferred = Deferred() deferred.add_callback(self._periodic_internal) deferred.add_errback(lambda err: self._periodic_error(stream, err)) deferred.callback(stream) POLLER.sched(1, self._periodic, stream)
def run(self): ''' Periodically run rendezvous ''' # # Except from opening the browser, privacy actions are # now performed by RUNNER_CORE # if not privacy.allowed_to_run(): self._open_browser_on_windows('privacy.html') logging.info('background_rendezvous: automatic rendezvous...') deferred = Deferred() deferred.add_callback(self._after_rendezvous) deferred.add_errback(self._schedule) RUNNER_CORE.run('rendezvous', deferred, False, None)
def _after_rendezvous(self, unused): ''' After rendezvous actions ''' # # This function is invoked both when the rendezvous fails # and succeeds. If it succeeds, OK we have fresh information # on available tests and updates and we use it. Otherwise, # if rendezvous fails, we may either have old information, or # no information, if this is the first rendezvous. In any # case, we do our best to use the available information. # logging.info('background_rendezvous: automatic rendezvous... done') # Inform the user when we have updates new_version = RUNNER_UPDATES.get_update_version() new_uri = RUNNER_UPDATES.get_update_uri() if new_version and new_uri and not CONFIG['win32_updater']: logging.info('runner_rendezvous: version %s available at %s', new_version, new_uri) STATE.update('update', {'version': new_version, 'uri': new_uri}) self._open_browser_on_windows('update.html') # # Choose the test we would like to run even if # we're not going to run it because tests are # disabled. So we can print the test name also # when tests are disabled. # test = RUNNER_POLICY.get_next_test() logging.info('background_rendezvous: chosen test: %s', test) # Are we allowed to run a test? if not CONFIG['enabled']: raise RuntimeError('background_rendezvous: automatic ' 'tests disabled') # # RAW test requires auto_discover to be True, since it uses mlab-ns # to discover servers. Other tests don't need that, since, at the # moment, they discover servers during the rendezvous. So, if their # auto_discover were True, they'd end up running two rendezvous in # a row for no good reason. # auto_discover = (test == 'raw') # Actually run the test deferred = Deferred() deferred.add_callback(self._schedule) RUNNER_CORE.run(test, deferred, auto_discover, None)
def retrieve_files(self, ctx, vinfo): ''' Retrieve files for a given version ''' # Note: this is a separate function for testability uri = updater_utils.signature_get_uri(self.system, vinfo) ctx['uri'] = uri ctx['vinfo'] = vinfo logging.info('updater_runner: GET %s', uri) deferred = Deferred() deferred.add_callback(self._retrieve_tarball) deferred.add_errback(self._handle_failure) RUNNER_CORE.run('dload', deferred, False, ctx)
def run(self, test, deferred, auto_discover=True, ctx=None): ''' Run test and deferred when done ''' # Always refresh adjacent servers before running a transmission test if auto_discover: logging.info('runner_core: Need to auto-discover first...') deferred2 = Deferred() deferred2.add_callback(lambda param: None) if test == 'raw': # Raw uses mlab-ns and wants a random server self.queue.append(('mlab-ns', deferred2, {'policy': 'random'})) else: self.queue.append(('rendezvous', deferred2, None)) self.queue.append((test, deferred, ctx)) self.run_queue()
def _after_rendezvous(self, unused): ''' After rendezvous actions ''' # # This function is invoked both when the rendezvous fails # and succeeds. If it succeeds, OK we have fresh information # on available tests and updates and we use it. Otherwise, # if rendezvous fails, we may either have old information, or # no information, if this is the first rendezvous. In any # case, we do our best to use the available information. # logging.info('background_rendezvous: automatic rendezvous... done') # Inform the user when we have updates new_version = RUNNER_UPDATES.get_update_version() new_uri = RUNNER_UPDATES.get_update_uri() if new_version and new_uri and not CONFIG['win32_updater']: logging.info('runner_rendezvous: version %s available at %s', new_version, new_uri) STATE.update('update', {'version': new_version, 'uri': new_uri}) # # Choose the test we would like to run even if # we're not going to run it because tests are # disabled. So we can print the test name also # when tests are disabled. # # Note: we pick a test at random because now we # have a fixed probability of running a test. # test = RUNNER_POLICY.get_random_test() logging.info('background_rendezvous: chosen test: %s', test) # Are we allowed to run a test? if not CONFIG['enabled']: raise RuntimeError('background_rendezvous: automatic ' 'tests disabled') # # The two legacy tests, speedtest and bittorent, use the rendezvous # to discover the servers. Other tests use mlab-ns. # use_mlabns = (test != 'speedtest' and test != 'bittorrent') # Actually run the test deferred = Deferred() deferred.add_callback(self._schedule) RUNNER_CORE.run(test, deferred, use_mlabns, None)
def _process_collect_response(self, stream, remote_result): ''' Process response when in collect state ''' context = stream.opaque extra = context.extra tmp = context.headers.get(CONTENT_TYPE) if context.code != CODE200 or tmp != APPLICATION_JSON: logging.warning('skype_negotiate: collect complete... bad response') stream.close() return deferred = Deferred() deferred.add_callback(self._save_results) deferred.callback((extra['local_result'], remote_result)) extra['final_state'] = 1 stream.close()
def test_deferred_ping_pong(self): ''' Make sure we ping pong between callback and errback ''' deferred = Deferred() deferred.add_errback(self._errback) deferred.add_callback(self._callback) deferred.add_callback(self._callback) deferred.add_errback(self._errback) deferred.add_callback(self._callback) deferred.add_errback(self._errback) _call(deferred.callback, 0) self.assertEqual(self.count_callbacks, 2) self.assertEqual(self.count_errbacks, 2)
def test_deferred_ping_pong(self): """ Make sure we ping pong between callback and errback """ deferred = Deferred() deferred.add_errback(self._errback) deferred.add_callback(self._callback) deferred.add_callback(self._callback) deferred.add_errback(self._errback) deferred.add_callback(self._callback) deferred.add_errback(self._errback) _call(deferred.callback, 0) self.assertEqual(self.count_callbacks, 2) self.assertEqual(self.count_errbacks, 2)
def retrieve_versioninfo(self): ''' Retrieve version information ''' # # The windows updater is still experimental, so it # is disabled by default and one needs to enable it # explicitly using the Web UI. # if not CONFIG['win32_updater']: self._schedule() return channel = CONFIG['win32_updater_channel'] ctx = { 'uri': updater_utils.versioninfo_get_uri(self.system, channel) } deferred = Deferred() deferred.add_callback(self._process_versioninfo) deferred.add_errback(self._handle_failure) RUNNER_CORE.run('dload', deferred, False, ctx)
def _process_negotiate_response(self, stream, response): ''' Process response when in negotiate state ''' # Note: this function MUST be callable multiple times extra = stream.opaque.extra extra['authorization'] = response['authorization'] if response['unchoked']: logging.debug('skype_negotiate: negotiate complete... unchoked') response['address'] = extra['address'] # XXX logging.debug('skype_negotiate: test in progress...') deferred = Deferred() deferred.add_callback(self._start_test) errback = lambda error: self._handle_test_failure(stream, error) deferred.add_errback(errback) successback = lambda state: self._handle_test_success(stream, state) deferred.callback((successback, errback, response, extra)) return queue_pos = response['queue_pos'] logging.debug('skype_negotiate: negotiate complete... in queue (%d)', queue_pos) STATE.update('negotiate', {'queue_pos': queue_pos}) self.handle_connection_made(stream) # Tail call (sort of)
def run(self, test, deferred, auto_discover=True, ctx=None): ''' Run test and deferred when done ''' if ( test != "rendezvous" and test != "speedtest" and test != "bittorrent" and test != "dload" and test != "raw" and test != "mlab-ns" and test not in self.dynamic_tests ): utils_modules.modprobe("mod_" + test, "register_test", self.dynamic_tests) if auto_discover: logging.info('runner_core: Need to auto-discover first...') deferred2 = Deferred() deferred2.add_callback(lambda param: None) if test == 'raw': self.queue.append(('mlab-ns', deferred2, {'policy': 'random'})) elif test == "bittorrent" or test == "speedtest": self.queue.append(('rendezvous', deferred2, None)) else: try: test_rec = self.dynamic_tests[test] self.queue.append((test_rec["discover_method"], deferred2, {"policy": test_rec["discover_policy"]})) except (KeyboardInterrupt, SystemExit): raise except: logging.warning("runner: internal error", exc_info=1) self.queue.append((test, deferred, ctx)) self.run_queue()
class Stream(Pollable): ''' A pollable stream socket ''' # # Init path: register connection_made() and connection_lost() callbacks, # and eventually configure SSL. Note that this class routes the calls # to another class, so the protocol does not need to subclass this class, # reducing explict code dependency. # def __init__(self, sock, connection_made, connection_lost, sslconfig, sslcert, opaque): Pollable.__init__(self) self.filenum = sock.fileno() self.myname = utils_net.getsockname(sock) self.peername = utils_net.getpeername(sock) self.logname = '%s %s' % (utils_net.format_epnt(self.myname), utils_net.format_epnt(self.peername)) logging.debug('stream: __init__(): %s', self.logname) # Variables pointing to other objects self.atclose = Deferred() self.atconnect = Deferred() self.opaque = opaque self.recv_complete = None self.send_complete = None self.send_octets = EMPTY_STRING self.sock = None # Variables we don't need to clear self.bytes_in = 0 self.bytes_out = 0 self.conn_rst = False self.eof = False self.isclosed = False self.recv_bytes = 0 self.recv_blocked = False self.send_blocked = False self.atclose.add_callback(connection_lost) self.atconnect.add_callback(connection_made) self.atconnect.add_errback(self._connection_made_error) if not sslconfig: self.sock = _stream_wrapper(sock) self.atconnect.callback(self) return # # Lazy import: this fails on Python 2.5, because SSL is not part of # v2.5 standard library. We do not intercept the error here, because # accept() code already needs to setup a try..except to route any # error away from the listening socket. # from neubot import sslstream # # If there is SSL support, initialise() deals transparently with SSL # negotiation, and invokes connection_made() when done. Errors are # routed to the POLLER, which generates CLOSE events accordingly. # sslstream.initialise(self, sock, sslcert) def _connection_made_error(self, exception): ''' Invoked when connection_made() callback fails ''' logging.warning('stream: connection_made() failed: %s', str(exception)) POLLER.close(self) # # Close path: the close() function simply tells the poller to generate # the handle_close() event, the handle_close() function is reentrant and # invokes the registered callback functions. # def register_cleanup(self, func): ''' Register a cleanup function ''' self.atclose.add_callback(func) def close(self): ''' Close the stream ''' POLLER.close(self) def handle_close(self): if self.isclosed: return logging.debug('stream: closing %s', self.logname) self.isclosed = True self.atclose.callback_each_np(self) self.sock.close() self.atclose = None self.atconnect = None self.opaque = None self.recv_complete = None self.send_complete = None self.send_octets = None self.sock = None def __del__(self): logging.debug('stream: __del__(): %s', self.logname) # # Receive path: the protocol invokes recv() to start an async recv() # operation, the poller invokes handle_read() when the socket becomes # readbable, handle_read() invokes recv_complete() when the recv() # is complete. # def recv(self, recv_bytes, recv_complete): ''' Async recv() ''' if self.isclosed: raise RuntimeError('stream: recv() on a closed stream') if self.recv_bytes > 0: raise RuntimeError('stream: already recv()ing') if recv_bytes <= 0: raise RuntimeError('stream: invalid recv_bytes') self.recv_bytes = recv_bytes self.recv_complete = recv_complete if self.recv_blocked: logging.debug('stream: recv() is blocked') return POLLER.set_readable(self) def handle_read(self): # # Deal with the case where recv() is blocked by send(), that happens # when we are using SSL and write() returned WANT_READ. In the common # case, this costs just one extra if in the fast path. # if self.recv_blocked: logging.debug('stream: handle_read() => handle_write()') POLLER.set_writable(self) if self.recv_bytes <= 0: POLLER.unset_readable(self) self.recv_blocked = False self.handle_write() return status, octets = self.sock.sorecv(self.recv_bytes) # # Optimisation: reorder if branches such that the ones more relevant # for better performance come first. Testing in early 2011 showed that # this arrangement allows to gain a little more speed. (And the code # is still readable.) # if status == SUCCESS and octets: self.bytes_in += len(octets) self.recv_bytes = 0 POLLER.unset_readable(self) self.recv_complete(self, octets) return if status == WANT_READ: return if status == WANT_WRITE: logging.debug('stream: blocking send()') POLLER.unset_readable(self) POLLER.set_writable(self) self.send_blocked = True return if status == SUCCESS and not octets: logging.debug('stream: EOF') self.eof = True POLLER.close(self) return if status == CONNRST and not octets: logging.debug('stream: RST ') self.conn_rst = True POLLER.close(self) return raise RuntimeError('stream: invalid status') # # Send path: the protocol invokes start send to start an async send() # operation, the poller invokes handle_write() when the underlying socket # becomes writable, handle_write() invokes send_complete() when send() # is complete. # def send(self, send_octets, send_complete): ''' Async send() ''' if self.isclosed: raise RuntimeError('stream: send() on a closed stream') if self.send_octets: raise RuntimeError('stream: already send()ing') self.send_octets = send_octets self.send_complete = send_complete if self.send_blocked: logging.debug('stream: send() is blocked') return POLLER.set_writable(self) def handle_write(self): # # Deal with the case where send() is blocked by recv(), that happens # when we are using SSL and recv() returned WANT_WRITE. In the common # case, this costs just one extra if in the fast path. # if self.send_blocked: logging.debug('stream: handle_write() => handle_read()') POLLER.set_readable(self) if not self.send_octets: POLLER.unset_writable(self) self.send_blocked = False self.handle_read() return status, count = self.sock.sosend(self.send_octets) # # Optimisation: reorder if branches such that the ones more relevant # for better performance come first. Testing in early 2011 showed that # this arrangement allows to gain a little more speed. (And the code # is still readable.) # if status == SUCCESS and count > 0: self.bytes_out += count if count == len(self.send_octets): POLLER.unset_writable(self) self.send_octets = EMPTY_STRING self.send_complete(self) return if count < len(self.send_octets): self.send_octets = six.buff(self.send_octets, count) return raise RuntimeError('stream: invalid count') if status == WANT_WRITE: return if status == WANT_READ: logging.debug('stream: blocking recv()') POLLER.unset_writable(self) POLLER.set_readable(self) self.recv_blocked = True return if status == CONNRST and count == 0: logging.debug('stream: RST') self.conn_rst = True POLLER.close(self) return if status == SUCCESS and count < 0: raise RuntimeError('stream: negative count') raise RuntimeError('stream: invalid status') # # Miscellaneous functions # def __repr__(self): return self.logname def fileno(self): return self.filenum
def runner_api(stream, request, query): ''' Implements /api/runner ''' response = Message() # # DO NOT allow to start a test when another test is in # progress because I have noticed that is confusing both # from # command line and WUI. # if RUNNER_CORE.test_is_running(): raise ConfigError('A test is already in progress, try again later') # # If there is not a query string this API is just # a no-operation and returns an empty JSON body to # keep happy the AJAX code. # if not query: response.compose(code='200', reason='Ok', body='{}', mimetype='application/json') stream.send_response(request, response) return options = cgi.parse_qs(query) # # If the query does not contain the name of the # test, this is an error and we must notify that # to the caller. Raise ConfigError, which will # be automatically transformed into a 500 message # with the proper body and reason. # if not 'test' in options: raise ConfigError('Missing "test" option in query string') test = options['test'][0] # # Simple case: the caller does not want to follow the # test via log streaming. We can immediately start # the test using the runner and, if everything is OK, # we can send a succesful response, with an empty JSON # body to keep happy the AJAX code. # if not 'streaming' in options or not utils.intify(options['streaming'][0]): deferred = Deferred() deferred.add_callback(runner_api_done) RUNNER_CORE.run(test, deferred, True, 'idle') response.compose(code='200', reason='Ok', body='{}', mimetype='application/json') stream.send_response(request, response) return # # More interesting case: the caller wants to see the log # messages during the test via the log streaming API. # We prepare a succesful response terminated by EOF and # then arrange things so that every new log message will # be copied to the HTTP response. # Then we kick off the runner, and note that we do that # AFTER we setup the response for eventual runner errors # to be copied to the HTTP response. # The runner core will automatically close all attached # streams at the end of the test. # response.compose(code='200', reason='Ok', up_to_eof=True, mimetype='text/plain') stream.send_response(request, response) STREAMING_LOG.start_streaming(stream) deferred = Deferred() deferred.add_callback(runner_api_done) RUNNER_CORE.run(test, deferred, True, 'idle')
class Connector(Pollable): ''' Pollable socket connector ''' def __init__(self, parent, endpoint, prefer_ipv6, sslconfig, extra): Pollable.__init__(self) self.epnts = collections.deque() self.parent = parent self.prefer_ipv6 = prefer_ipv6 self.sslconfig = sslconfig self.extra = extra self.sock = None self.timestamp = 0 self.watchdog = 10 self.aterror = Deferred() self.aterror.add_callback(self.parent.handle_connect_error) # For logging purpose, save original endpoint self.endpoint = endpoint if " " in endpoint[0]: for address in endpoint[0].split(): tmp = (address.strip(), endpoint[1]) self.epnts.append(tmp) else: self.epnts.append(endpoint) self._connect() def __repr__(self): return str(self.endpoint) def register_errfunc(self, func): ''' Register a cleanup function ''' self.aterror.add_callback(func) def _connection_failed(self): ''' Failed to connect first available epnt ''' if self.sock: POLLER.unset_writable(self) self.sock = None # MUST be below unset_writable() if not self.epnts: self.aterror.callback_each_np(self) return self._connect() def _connect(self): ''' Connect first available epnt ''' sock = utils_net.connect(self.epnts.popleft(), self.prefer_ipv6) if sock: self.sock = sock self.timestamp = utils.ticks() POLLER.set_writable(self) else: self._connection_failed() def fileno(self): return self.sock.fileno() def handle_write(self): POLLER.unset_writable(self) if not utils_net.isconnected(self.endpoint, self.sock): self._connection_failed() return deferred = Deferred() deferred.add_callback(self._handle_connect) deferred.add_errback(self._handle_connect_error) deferred.callback(utils.ticks()) def _handle_connect(self, ticks): ''' Internally handle connect ''' self.parent.handle_connect(self, self.sock, (ticks - self.timestamp), self.sslconfig, self.extra) def _handle_connect_error(self, error): ''' Internally handle connect error ''' logging.warning('connector: connect() error: %s', str(error)) self._connection_failed() def handle_close(self): self._connection_failed()