Exemplo n.º 1
0
    def _do_negotiate(self, baton):
        ''' Respond to a /negotiate request '''
        stream, request, position = baton

        module = request.uri.replace('/negotiate/', '')
        module = self.modules[module]
        request_body = json.load(request.body)

        parallelism = CONFIG['negotiate.parallelism']
        unchoked = int(position < parallelism)
        response_body = {
                         'queue_pos': position,
                         'real_address': stream.peername[0],
                         'unchoked': unchoked,
                        }
        if unchoked:
            extra = module.unchoke(stream, request_body)
            if not 'authorization' in extra:
                raise RuntimeError('Negotiate API violation')
            extra.update(response_body)
            response_body = extra
        else:
            response_body['authorization'] = ''

        response = Message()
        response.compose(code='200', reason='Ok',
                         body=json.dumps(response_body),
                         keepalive=True,
                         mimetype='application/json')
        stream.send_response(request, response)
Exemplo n.º 2
0
    def process_request(self, stream, request):
        ''' Process a /collect or /negotiate HTTP request '''

        #
        # We always pass upstream the collect request.  If it is
        # not authorized the module does not have the identifier in
        # its global table and will raise a KeyError.
        # Here we always keepalive=False so the HTTP layer closes
        # the connection and we are notified that the queue should
        # be changed.
        #
        if request.uri.startswith('/collect/'):
            module = request.uri.replace('/collect/', '')
            module = self.modules[module]
            request_body = json.load(request.body)

            response_body = module.collect_legacy(stream, request_body, request)
            response_body = json.dumps(response_body)

            response = Message()
            response.compose(code='200', reason='Ok', body=response_body,
                             keepalive=False, mimetype='application/json')
            stream.send_response(request, response)

        #
        # The first time we see a stream, we decide whether to
        # accept or drop it, depending on the length of the
        # queue.  The decision whether to accept or not depends
        # on the current queue length and follows the Random
        # Early Discard algorithm.  When we accept it, we also
        # register a function to be called when the stream is
        # closed so that we can update the queue.  And we
        # immediately send a response.
        # When it's not the first time we see a stream, we just
        # take note that we owe it a response.  But we won't
        # respond until its queue position changes.
        #
        elif request.uri.startswith('/negotiate/'):
            if not stream in self.known:
                position = len(self.queue)
                min_thresh = CONFIG['negotiate.min_thresh']
                max_thresh = CONFIG['negotiate.max_thresh']
                if random.random() < float(position - min_thresh) / (
                                       max_thresh - min_thresh):
                    stream.close()
                    return
                self.queue.append(stream)
                self.known.add(stream)
                stream.atclose(self._update_queue)
                self._do_negotiate((stream, request, position))
            else:
                stream.opaque = request

        # For robustness
        else:
            raise RuntimeError('Unexpected URI')
Exemplo n.º 3
0
    def got_response(self, stream, request, response):
        ''' Invoked when the response is received '''

        if response.code != '200':
            logging.info('runner_rendezvous: bad response')
            stream.close()
            return

        message = json.load(response.body)

        RUNNER_TESTS.update(message['available'])
        RUNNER_UPDATES.update(message['update'])

        logging.info('runner_rendezvous: rendezvous complete')
        stream.close()
Exemplo n.º 4
0
    def process_request(self, stream, request):
        ''' Process HTTP request and return response '''

        response = Message()

        if request.uri == '/debugmem/collect':
            body = gc.collect(2)

        elif request.uri == '/debugmem/count':
            counts = gc.get_count()
            body = {
                    'len_gc_objects': len(gc.get_objects()),
                    'len_gc_garbage': len(gc.garbage),
                    'gc_count0': counts[0],
                    'gc_count1': counts[1],
                    'gc_count2': counts[2],

                    # Add the length of the most relevant globals
                    'NEGOTIATE_SERVER.queue': len(NEGOTIATE_SERVER.queue),
                    'NEGOTIATE_SERVER.known': len(NEGOTIATE_SERVER.known),
                    'NEGOTIATE_SERVER_BITTORRENT.peers': \
                        len(NEGOTIATE_SERVER_BITTORRENT.peers),
                    'NEGOTIATE_SERVER_SPEEDTEST.clients': \
                        len(NEGOTIATE_SERVER_SPEEDTEST.clients),
                    'POLLER.readset': len(POLLER.readset),
                    'POLLER.writeset': len(POLLER.writeset),
                    'LOG._queue': len(LOG._queue),
                    'CONFIG.conf': len(CONFIG.conf),
                    'NOTIFIER._timestamps': len(NOTIFIER._timestamps),
                    'NOTIFIER._subscribers': len(NOTIFIER._subscribers),
                    'NOTIFIER._tofire': len(NOTIFIER._tofire),
                   }

        elif request.uri == '/debugmem/garbage':
            body = [str(obj) for obj in gc.garbage]

        elif request.uri == '/debugmem/saveall':
            enable = json.load(request.body)
            flags = gc.get_debug()
            if enable:
                flags |= gc.DEBUG_SAVEALL
            else:
                flags &= ~gc.DEBUG_SAVEALL
            gc.set_debug(flags)
            body = enable

        elif request.uri.startswith('/debugmem/types'):
            if request.uri.startswith('/debugmem/types/'):
                typename = request.uri.replace('/debugmem/types/', '')
                objects = objgraph.by_type(typename)
                body = [str(obj) for obj in objects]
            else:
                body = objgraph.typestats()

        else:
            body = None

        if body is not None:
            body = json.dumps(body, indent=4, sort_keys=True)
            response.compose(code="200", reason="Ok", body=body,
                             mimetype="application/json")
        else:
            response.compose(code="404", reason="Not Found")

        stream.send_response(request, response)
Exemplo n.º 5
0
    def got_response(self, stream, request, response):

        if response.code != "200":
            logging.warning("dash: http request error: %s", response.code)
            stream.close()
            return

        if self.state == STATE_NEGOTIATE:

            response_body = json.load(response.body)

            #
            # Note: the following are the standard fields that
            # the negotiate API call MUST return.
            #
            self.authorization = response_body["authorization"]
            self.queue_pos = response_body["queue_pos"]
            self.real_address = response_body["real_address"]
            self.unchoked = response_body["unchoked"]

            if not self.unchoked:
                logging.info("dash: negotiate... done (queue pos %d)",
                             self.queue_pos)
                STATE.update("negotiate", {"queue_pos": self.queue_pos})
                self.connection_ready(stream)
                return

            logging.info("dash: negotiate... done (unchoked)")

            self.stream = stream

            #
            # The server may override the vector of rates with a "better"
            # vector of rates of its choice.
            #
            rates = list(response_body.get("dash_rates", DASH_RATES))

            self.client = DASHClientSmpl(self.poller, self, rates)
            self.client.configure(self.conf.copy())
            self.client.connect((self.stream.peername[0], 80))  # XXX

        elif self.state == STATE_COLLECT:

            response_body = json.load(response.body)

            #
            # We store each iteration of the test as a separate row of
            # the backend. We also add a whole test timestamp, to allow
            # one to understand which row belong to the same test.
            #
            whole_test_timestamp = utils.timestamp()

            for index, elem in enumerate(self.measurements):
                elem["clnt_schema_version"] = 3
                elem["whole_test_timestamp"] = whole_test_timestamp
                if index < len(response_body):
                    elem["srvr_data"] = response_body[index]
                BACKEND.store_generic("dash", elem)

            stream.close()

        else:
            raise RuntimeError("dash: internal error")