Пример #1
0
 def setUp(self):
     self.rp = ReceiverProtocol(Deferred())
Пример #2
0
 def wrapper(*args, **kwargs):
     defer = Deferred()
     gen = func(*args, **kwargs)
     Task(gen, defer)
     return defer
Пример #3
0
                result_list.append(result[1]['media_id'])
            return result_list

        @stack
        def post_it(ids):
            poster = beta_blogger_blog_poster(export_info, self.log, ids,
                                              title, description, options)
            d3 = poster.do_post()
            d3.addCallback(handle_post_success)
            d3.addErrback(handle_post_failure)
            return d3

        ##
        ## Need to convert the image_ids to media_ids for posting
        ##
        d = Deferred()
        for id in image_ids:
            d.addCallback(get_media_id, id)
        d.addCallback(post_it)
        d.callback(media_list)
        return d


class beta_blogger_blog_list_getter(HTTPClientFactory):
    def __init__(self, token, log):
        self.re_ID = re.compile(
            "^tag:blogger\.com\,[0-9]+:user-([0-9]+)\.blog-([0-9]+)$")
        self.token = token
        self.log = log
        url = "http://www.blogger.com/feeds/default/blogs"
        header_dict = {
Пример #4
0
    def run(self):
        def onExperimentSucceeded(_):
            msg("experiment suceeded")
            reactor.stop()

        def onExperimentFailed(failure):
            err("Experiment execution failed, exiting with error.")
            err(failure)
            if reactor.running:
                reactor.stop()
            reactor.addSystemEventTrigger('after', 'shutdown', sys.exit, 1)

        chdir(self._workspace_dir)

        # Step 1:
        # Inject all the config options as env variables to give sub-processes easy acces to them.
        self.local_env = environ.copy()
        self.local_env.update(configToEnv(self._cfg))
        self.local_env['LOCAL_RUN'] = 'True'

        # Step 2:
        # Clear output dir before starting.
        output_dir = path.join(self._workspace_dir, 'output')
        if path.exists(output_dir):
            rmtree(output_dir)

        # Step 3:
        # Sync the working dir with the head nodes
        d = Deferred()
        d.addCallback(lambda _: self.copyWorkspaceToHeadNodes())

        # Step 4:
        # Run the set up script, both locally and in the head nodes
        d.addCallback(lambda _: self.runSetupScripts())

        # Step 5:
        # Start the tracker, either locally or on the first head node of the list.
        d.addCallback(lambda _: self.startTracker())

        # Step 6:
        # Start the config server, always locally if running instances locally as the head nodes are firewalled and
        # can only be reached from the outside trough SSH.
        d.addCallback(lambda _: self.startExperimentServer())

        # Step 7:
        # Spawn both local and remote instance runner scripts, which will connect to the config server and wait for all
        # of them to be ready before starting the experiment.
        d.addCallback(lambda _: self.startInstances())

        # Step 8:
        # Collect all the data from the remote head nodes.
        d.addCallback(lambda _: self.collectOutputFromHeadNodes())

        # Step 9:
        # Extract the data and graph stuff
        d.addCallback(lambda _: self.runPostProcess())

        # TODO: From here onwards
        reactor.callLater(0, d.callback, None)
        # reactor.callLater(60, reactor.stop)

        return d.addCallbacks(onExperimentSucceeded, onExperimentFailed)
Пример #5
0
 def __init__(self, ampProtocol, logDirectory, logFile):
     self._ampProtocol = ampProtocol
     self._logDirectory = logDirectory
     self._logFile = logFile
     self.endDeferred = Deferred()
Пример #6
0
 def dataReceived(self, data):
     waiter = self.character_waiter
     self.character_waiter = Deferred()
     waiter.callback(data)
Пример #7
0
 def serverJoined(ignored):
     d1 = firstClient.packetReceived = Deferred()
     d2 = secondClient.packetReceived = Deferred()
     firstClient.transport.write("hello world", ("225.0.0.250", portno))
     return gatherResults([d1, d2])
Пример #8
0
    def amqp_incoming_response_to_csr_request(self,
                                              message=None,
                                              properties=None,
                                              correlation_info=None,
                                              **kwargs):
        """
        A response from a CSR request has been received. Lets process it.

        :param properties: Properties of the AQMP message.
        :param body: The message itself.
        :param correlation: Any correlation data regarding the AQMP message. We can check for timing, etc.
        :return:
        """
        logger.info("Received a signed SSL/TLS certificate for: {sslname}",
                    sslname=self.sslname)
        print("sslcert: processing 2")

        logger.info("TLS cert body: {message}", message=message)
        print("sslcert: processing 3")

        if "csr_hash" not in message:
            print("sslcert: processing 4")

            logger.warn("'csr_hash' is missing from incoming amqp TLS key.")
            print("sslcert: processing 5")

            return
        print("sslcert: processing 10")

        csr_hash = message["csr_hash"]
        print("sslcert: processing 10")
        if csr_hash != self.next_csr_hash:
            print("sslcert: processing 10")
            logger.warn(
                "Incoming TLS (SSL) key hash is mismatched. Discarding. "
                "Have: {next_csr_hash}, received: {csr_hash}",
                next_csr_hash=self.next_csr_hash,
                csr_hash=csr_hash)
            return
        print("sslcert: processing 20")

        self.next_status = message["status"]
        self.next_status_msg = message["status_msg"]
        if message["status"] == "signed":
            self.next_chain_text = message["chain_text"]
            self.next_cert_text = message["cert_text"]
            self.next_signed_at = message["cert_signed_at"]
            self.next_expires_at = message["cert_expires_at"]
            self.next_is_valid = True
            self.dirty = True
            yield self.check_if_rotate_needed(
            )  # this will rotate next into current

        print("sslcert: processing 30")
        method = None
        if self.current_is_valid is not True:
            logger.warn(
                "Received a new cert and rotated, but the new cert doesn't seem to be valid."
            )
            return
        print("sslcert: processing 40")

        if self.update_callback is not None and isinstance(
                self.update_callback, collections.Callable):
            method = self.update_callback
        elif self.update_callback_type is not None and \
                self.update_callback_component is not None and \
                self.update_callback_function is not None:
            try:
                method = self._Parent._Loader.find_function(
                    self.update_callback_type,
                    self.update_callback_component,
                    self.update_callback_function,
                )
            except YomboWarning as e:
                logger.warn(
                    "Invalid update_callback information provided: {e}", e=e)
        print("sslcert: processing 50")

        logger.info(
            "Method to notify ssl requester that there's a new cert: {method}",
            method=method)

        if method is not None and isinstance(method, collections.Callable):
            logger.info(
                "About to tell the SSL/TLS cert requester know we have a new cert, from: {sslname}",
                sslname=self.sslname)

            print("sslcert: processing 60")

            the_cert = self.get()
            d = Deferred()
            d.addCallback(lambda ignored: maybeDeferred(method, the_cert))
            d.addErrback(self.tell_requester_failure)
            d.callback(1)
            yield d
Пример #9
0
    def _sendPrioritisedData(self, *args):
        """
        The data sending loop. This function repeatedly calls itself, either
        from L{Deferred}s or from
        L{reactor.callLater<twisted.internet.interfaces.IReactorTime.callLater>}

        This function sends data on streams according to the rules of HTTP/2
        priority. It ensures that the data from each stream is interleved
        according to the priority signalled by the client, making sure that the
        connection is used with maximal efficiency.

        This function will execute if data is available: if all data is
        exhausted, the function will place a deferred onto the L{H2Connection}
        object and wait until it is called to resume executing.
        """
        # If producing has stopped, we're done. Don't reschedule ourselves
        if not self._stillProducing:
            return

        stream = None

        while stream is None:
            try:
                stream = next(self.priority)
            except priority.DeadlockError:
                # All streams are currently blocked or not progressing. Wait
                # until a new one becomes available.
                assert self._sendingDeferred is None
                self._sendingDeferred = Deferred()
                self._sendingDeferred.addCallback(self._sendPrioritisedData)
                return

        # Wait behind the transport.
        if self._consumerBlocked is not None:
            self._consumerBlocked.addCallback(self._sendPrioritisedData)
            return

        remainingWindow = self.conn.local_flow_control_window(stream)
        frameData = self._outboundStreamQueues[stream].popleft()
        maxFrameSize = min(self.conn.max_outbound_frame_size, remainingWindow)

        if frameData is _END_STREAM_SENTINEL:
            # There's no error handling here even though this can throw
            # ProtocolError because we really shouldn't encounter this problem.
            # If we do, that's a nasty bug.
            self.conn.end_stream(stream)
            self.transport.write(self.conn.data_to_send())

            # Clean up the stream
            self._requestDone(stream)
        else:
            # Respect the max frame size.
            if len(frameData) > maxFrameSize:
                excessData = frameData[maxFrameSize:]
                frameData = frameData[:maxFrameSize]
                self._outboundStreamQueues[stream].appendleft(excessData)

            # There's deliberately no error handling here, because this just
            # absolutely should not happen.
            # If for whatever reason the max frame length is zero and so we
            # have no frame data to send, don't send any.
            if frameData:
                self.conn.send_data(stream, frameData)
                self.transport.write(self.conn.data_to_send())

            # If there's no data left, this stream is now blocked.
            if not self._outboundStreamQueues[stream]:
                self.priority.block(stream)

            # Also, if the stream's flow control window is exhausted, tell it
            # to stop.
            if self.remainingOutboundWindow(stream) <= 0:
                self.streams[stream].flowControlBlocked()

        self._reactor.callLater(0, self._sendPrioritisedData)
 def __init__(self, id, index, state):
     super(ActiveSmpcValueWithPublicValue, self).__init__(id, index, state)
     self._public_value_deferred = Deferred()
     self._public_value = None
     self._public_value_dependents = []
Пример #11
0
 def post_json_get_json(url, body):
     d = Deferred()
     self.push_attempts.append((d, url, body))
     return make_deferred_yieldable(d)
Пример #12
0
    def _handle_fetch_response(self, responses):
        """The callback handling the successful response from the fetch request

        Delivers the message list to the processor, handles per-message errors
        (ConsumerFetchSizeTooSmall), triggers another fetch request

        If the processor is still processing the last batch of messages, we
        defer this processing until it's done.  Otherwise, we start another
        fetch request and submit the messages to the processor
        """
        # Successful fetch, reset our retry delay
        self.retry_delay = self.retry_init_delay
        self._fetch_attempt_count = 1

        # Check to see if we are still processing the last block we fetched...
        if self._msg_block_d:
            # We are still working through the last block of messages...
            # We have to wait until it's done, then process this response
            self._msg_block_d.addCallback(
                lambda _: self._handle_fetch_response(responses))
            return

        # No ongoing processing, great, let's get some started.
        # Request no longer outstanding, clear the deferred tracker so we
        # can refetch
        self._request_d = None
        messages = []
        try:
            for resp in responses:  # We should really only ever get one...
                if resp.partition != self.partition:
                    log.warning(
                        "%r: Got response with partition: %r not our own: %r",
                        self, resp.partition, self.partition)
                    continue
                # resp.messages is a KafkaCodec._decode_message_set_iter
                # Note that 'message' here is really an OffsetAndMessage
                for message in resp.messages:
                    # Check for messages included which are from prior to our
                    # desired offset: can happen due to compressed message sets
                    if message.offset < self._fetch_offset:
                        log.debug(
                            'Skipping message at offset: %d, because its '
                            'offset is less that our fetch offset: %d.',
                            message.offset, self._fetch_offset)
                        continue
                    # Create a 'SourcedMessage' and add it to the messages list
                    messages.append(
                        SourcedMessage(message=message.message,
                                       offset=message.offset,
                                       topic=self.topic,
                                       partition=self.partition))
                    # Update our notion of from where to fetch.
                    self._fetch_offset = message.offset + 1
        except ConsumerFetchSizeTooSmall:
            # A message was too large for us to receive, given our current
            # buffer size. Grow it until it works, or we hit our max
            # Grow by 16x up to 1MB (could result in 16MB buf), then by 2x
            factor = 2
            if self.buffer_size <= 2**20:
                factor = 16
            if self.max_buffer_size is None:
                # No limit, increase until we succeed or fail to alloc RAM
                self.buffer_size *= factor
            elif (self.max_buffer_size is not None
                  and self.buffer_size < self.max_buffer_size):
                # Limited, but currently below it.
                self.buffer_size = min(self.buffer_size * factor,
                                       self.max_buffer_size)
            else:
                # We failed, and are already at our max. Nothing we can do but
                # create a Failure and errback() our start() deferred
                log.error("Max fetch size %d too small", self.max_buffer_size)
                failure = Failure(
                    ConsumerFetchSizeTooSmall(
                        "Max buffer size:%d too small for message",
                        self.max_buffer_size))
                self._start_d.errback(failure)
                return

            log.debug(
                "Next message larger than fetch size, increasing "
                "to %d (~2x) and retrying", self.buffer_size)

        finally:
            # If we were able to extract any messages, deliver them to the
            # processor now.
            if messages:
                self._msg_block_d = Deferred()
                self._process_messages(messages)

        # start another fetch, if needed, but use callLater to avoid recursion
        self._retry_fetch(0)
Пример #13
0
    def commit(self):
        """
        Commit the offset of the message we last processed if it is different
        from what we believe is the last offset committed to Kafka.

        .. note::

            It is possible to commit a smaller offset than Kafka has stored.
            This is by design, so we can reprocess a Kafka message stream if
            desired.

        On error, will retry according to :attr:`request_retry_max_attempts`
        (by default, forever).

        If called while a commit operation is in progress, and new messages
        have been processed since the last request was sent then the commit
        will fail with :exc:`OperationInProgress`.  The
        :exc:`OperationInProgress` exception wraps
        a :class:`~twisted.internet.defer.Deferred` which fires when the
        outstanding commit operation completes.

        :returns:
            A :class:`~twisted.internet.defer.Deferred` which resolves with the
            committed offset when the operation has completed.  It will resolve
            immediately if the current offset and the last committed offset do
            not differ.
        """
        # Can't commit without a consumer_group
        if not self.consumer_group:
            return fail(
                Failure(
                    InvalidConsumerGroupError("Bad Group_id:{0!r}".format(
                        self.consumer_group))))
        # short circuit if we are 'up to date', or haven't processed anything
        if ((self._last_processed_offset is None) or
            (self._last_processed_offset == self._last_committed_offset)):
            return succeed(self._last_committed_offset)

        # If we're currently processing a commit we return a failure
        # with a deferred we'll fire when the in-progress one completes
        if self._commit_ds:
            d = Deferred()
            self._commit_ds.append(d)
            return fail(OperationInProgress(d))

        # Ok, we have processed messages since our last commit attempt, and
        # we're not currently waiting on a commit request to complete:
        # Start a new one
        d = Deferred()
        self._commit_ds.append(d)

        # Send the request
        self._send_commit_request()

        # Reset the commit_looper here, rather than on success to give
        # more stability to the commit interval.
        if self._commit_looper is not None:
            self._commit_looper.reset()

        # return the deferred
        return d
Пример #14
0
    def shutdown(self):
        """Gracefully shutdown the consumer

        Consumer will complete any outstanding processing, commit its current
        offsets (if so configured) and stop.

        Returns deferred which callbacks with a tuple of:
        (last processed offset, last committed offset) if it was able to
        successfully commit, or errbacks with the commit failure, if any,
        or fail(RestopError) if consumer is not running.
        """
        def _handle_shutdown_commit_success(result):
            """Handle the result of the commit attempted by shutdown"""
            self._shutdown_d, d = None, self._shutdown_d
            self.stop()
            self._shuttingdown = False  # Shutdown complete
            d.callback(
                (self._last_processed_offset, self._last_committed_offset))

        def _handle_shutdown_commit_failure(failure):
            """Handle failure of commit() attempted by shutdown"""
            if failure.check(OperationInProgress):
                failure.value.deferred.addCallback(_commit_and_stop)
                return

            self._shutdown_d, d = None, self._shutdown_d
            self.stop()
            self._shuttingdown = False  # Shutdown complete
            d.errback(failure)

        def _commit_and_stop(result):
            """Commit the current offsets (if needed) and stop the consumer"""
            if not self.consumer_group:  # No consumer group, no committing
                return _handle_shutdown_commit_success(None)

            # Need to commit prior to stopping
            self.commit().addCallbacks(_handle_shutdown_commit_success,
                                       _handle_shutdown_commit_failure)

        # If we're not running, return an failure
        if self._start_d is None:
            return fail(
                Failure(
                    RestopError("Shutdown called on non-running consumer")))
        # If we're called multiple times, return a failure
        if self._shutdown_d:
            return fail(Failure(
                RestopError("Shutdown called more than once.")))
        # Set our _shuttingdown flag, so our _process_message routine will stop
        # feeding new messages to the processor, and fetches won't be retried
        self._shuttingdown = True
        # Keep track of state for debugging
        self._state = '[shutting down]'

        # Create a deferred to track the shutdown
        self._shutdown_d = d = Deferred()

        # Are we waiting for the processor to complete? If so, when it's done,
        # commit our offsets and stop.
        if self._processor_d:
            self._processor_d.addCallback(_commit_and_stop)
        else:
            # No need to wait for the processor, we can commit and stop now
            _commit_and_stop(None)

        # return the deferred
        return d
Пример #15
0
def test_roundrobin_proxy(request, reactor, virtualenv):
    """
    Confirm that a proxy with two connections does connections to both
    backends.

    Two nodes each with a router-worker for 'realm1'
    Each node rlink-connects to the other.
    One node has a proxy
    """

    tempdir = _create_temp(request)

    # burn in hard-coded keys so we can refer to the public parts in
    # configs more easily.
    node_keys = [
        (node0_pubkey, node0_privkey),
        (node1_pubkey, node1_privkey),
        (node2_pubkey, node2_privkey),
        (node3_pubkey, node3_privkey),
    ]
    for node_num in range(4):
        node_dir = join(tempdir, "node{}".format(node_num))
        os.mkdir(node_dir)

        pub, priv = node_keys[node_num]
        with open(join(node_dir, "key.pub"), "w") as f:
            f.write(pub)
        with open(join(node_dir, "key.priv"), "w") as f:
            f.write(priv)

    # we start the nodes in parallel because we don't know which one
    # will "win" and connect first
    node_setup = [
        (node0_config, join(tempdir, "node0")),
        (node1_config, join(tempdir, "node1")),
        (node2_config, join(tempdir, "node2")),
        (node3_config, join(tempdir, "node3")),
    ]
    node_starts = []
    for node_config, node_dir in node_setup:
        node_d = start_node(request, reactor, virtualenv, node_config, node_dir)
        node_starts.append(node_d)
    print("-" * 80)
    print(node_starts)
    results = yield DeferredList(node_starts)
    print("-" * 80)
    print(results)
    print("-" * 80)
    nodes = []
    for ok, res in results:
        if not ok:
            raise res
        nodes.append(res)
    protocol0, protocol1, protocol2, protocol3 = nodes

    print("Started rlink'd nodes:")

    print("  0: {}".format(protocol0))
    print("  1: {}".format(protocol1))
    print("  2: {}".format(protocol2))
    print("  3: {}".format(protocol3))

    print("-" * 80)

    # we could wait to see text of each node successfully connecting
    # to the other .. or we just wait a bit.
    yield sleep(5)

    subscribed_d = Deferred()
    rpc_call_d = Deferred()
    print("start alice")
    # run alice first

    alice = Component(
        transports=[
            {"url": "ws://localhost:7070/ws", "type": "websocket"},  # proxy0
        ],
        realm="realm1",
    )

    @alice.on_join
    @inlineCallbacks
    def alice_join(session, details):
        print("\n\nalice joined\n")

        def a_thing(*args, **kw):
            print("received: a_thing: args={} kw={}".format(args, kw))
            reactor.callLater(3, session.leave)
        yield session.subscribe(a_thing, "test.a_thing")

        def rpc(*args, **kw):
            print("call: rpc: args={} kw={}".format(args, kw))
            reactor.callLater(1, rpc_call_d.callback, None)
            return "rpc return"
        yield session.register(rpc, "test.rpc")
        # XXX we don't know when the rlink registration goes all the way through...
        reactor.callLater(2.0, subscribed_d.callback, None)

    alice_done = alice.start(reactor)

    # wait until Alice actually subscribes (and thus is also registered) before starting bob
    yield subscribed_d
    print("alice is subscribed + registered")

    print("start bob")

    bob = Component(
        transports=[{
            "url": "ws://localhost:7070/ws",  # node0 XXX should be node1
            "type": "websocket",
        }],
        realm="realm1",
    )

    @bob.on_join
    @inlineCallbacks
    def bob_join(session, details):
        print("bob joined: PID={x_cb_pid}".format(**details.authextra))
        print("publishing 'test.a_thing'")
        p = yield session.publish("test.a_thing", 3, 2, 1, options=types.PublishOptions(acknowledge=True))
        print("published {}".format(p))
        res = yield session.call("test.rpc", 1, 2, 3)
        print("test.rpc returned: {}".format(res))
        reactor.callLater(2, session.leave)

    bob_done = bob.start(reactor)
    print("bob is starting", bob_done, alice_done)
    yield rpc_call_d
    yield bob_done
    yield alice_done

    # do a bunch of pubs in different sessions to prove we're hitting
    # different proxies and different router processes.

    received = []
    connects = []

    carol = Component(
        transports=[{
            "url": "ws://*****:*****@carol.subscribe("multiverse", types.SubscribeOptions(details=True))
    def _(*args, **kwargs):
        print("SUB: {}".format(kwargs.get('details', None)))
        received.append((args, kwargs))

    carol_ready = Deferred()
    carol.on('ready', carol_ready.callback)
    carol.start()

    yield sleep(3)
    yield carol_ready

    GROUPS = 10
    CONNECTS = 5

    for g in range(GROUPS):
        group = []
        for m in range(CONNECTS):
            client = Component(
                transports=[{
                    "url": "ws://localhost:7070/ws",  # proxy0
                    "type": "websocket",
                }],
                realm="realm1",
            )

            @client.on_join
            @inlineCallbacks
            def _(session, details):
                connects.append(details)
                yield session.publish(
                    u"multiverse", group=g, member=m,
                    options=types.PublishOptions(acknowledge=True)
                )
                yield session.leave()

            group.append(client.start())
        res = yield DeferredList(group)
        for ok, value in res:
            if not ok:
                raise value
    print("-" * 80)
    print("Received {} events".format(len(received)))
    for r in received:
        print(r[1]['details'])

    # some client should get each publish() that we sent

    # FIXME: AssertionError: assert 49 == (10 * 5)
    assert len(received) == GROUPS * CONNECTS
    print("-" * 80)

    # figure out which nodes and proxies we've contacted
    workers = set()
    proxies = set()
    for c in connects:
        workers.add(c.authextra['x_cb_worker'])
        proxies.add(c.authextra['x_cb_proxy_worker'])
        print(c.authextra['x_cb_worker'])
    print("workers: {}".format(workers))
    print("proxies: {}".format(proxies))
    print("-" * 80)
    assert workers == set([
        "node0_worker0",
        "node1_worker0",
        "node2_worker0",
        "node3_worker0",
    ])
    assert proxies == set(["node0_proxy0"])
Пример #16
0
 def run(self):
    self.d = Deferred()
    self.started = time.clock()
    self.connectBunch()
    return self.d
Пример #17
0
 def __init__(self):
     self.character_waiter = Deferred()
     self.old_termio_settings = None
Пример #18
0
 def rsp1_func():
     dfd = Deferred().addCallback(_check_downloading)
     reactor.callLater(.1, dfd.callback, rsp1)
     return dfd
Пример #19
0
 def cbJoined(ignored):
     d = self.server.packetReceived = Deferred()
     c.transport.write("hello world", ("225.0.0.250", addr.port))
     return d
Пример #20
0
    def _go(self, w):
        welcome = yield w.get_welcome()
        handle_welcome(welcome, self._args.relay_url, __version__,
                       self._args.stderr)

        # TODO: run the blocking zip-the-directory IO in a thread, let the
        # wormhole exchange happen in parallel
        offer, self._fd_to_send = self._build_offer()
        args = self._args

        other_cmd = u"wormhole receive"
        if args.verify:
            other_cmd = u"wormhole receive --verify"
        if args.zeromode:
            assert not args.code
            args.code = u"0-"
            other_cmd += u" -0"

        if args.code:
            w.set_code(args.code)
        else:
            w.allocate_code(args.code_length)

        code = yield w.get_code()
        if not args.zeromode:
            print(u"Wormhole code is: %s" % code, file=args.stderr)
            other_cmd += u" " + code
        print(u"On the other computer, please run:", file=args.stderr)
        print(u"", file=args.stderr)
        print(other_cmd, file=args.stderr)
        print(u"", file=args.stderr)
        # flush stderr so the code is displayed immediately
        args.stderr.flush()

        # We don't print a "waiting" message for get_unverified_key() here,
        # even though we do that in cmd_receive.py, because it's not at all
        # surprising to we waiting here for a long time. We'll sit in
        # get_unverified_key() until the receiver has typed in the code and
        # their PAKE message makes it to us.
        yield w.get_unverified_key()

        # TODO: don't stall on w.get_verifier() unless they want it
        def on_slow_connection():
            print(
                u"Key established, waiting for confirmation...",
                file=args.stderr)

        notify = self._reactor.callLater(VERIFY_TIMER, on_slow_connection)
        try:
            # The usual sender-chooses-code sequence means the receiver's
            # PAKE should be followed immediately by their VERSION, so
            # w.get_verifier() should fire right away. However if we're
            # using the offline-codes sequence, and the receiver typed in
            # their code first, and then they went offline, we might be
            # sitting here for a while, so printing the "waiting" message
            # seems like a good idea. It might even be appropriate to give up
            # after a while.
            verifier_bytes = yield w.get_verifier()  # might WrongPasswordError
        finally:
            if not notify.called:
                notify.cancel()

        if args.verify:
            # check_verifier() does a blocking call to input(), so stall for
            # a moment to let any outbound messages get written into the
            # kernel. At this point, we're sitting in a callback of
            # get_verifier(), which is triggered by receipt of the other
            # side's VERSION message. But we might have gotten both the PAKE
            # and the VERSION message in the same turn, and our outbound
            # VERSION message (triggered by receipt of their PAKE) is still
            # in Twisted's transmit queue. If we don't wait a moment, it will
            # be stuck there until `input()` returns, and the receiver won't
            # be able to compute a Verifier for the users to compare. #349
            # has more details
            d = Deferred()
            reactor.callLater(0.001, d.callback, None)
            yield d
            self._check_verifier(w,
                                 verifier_bytes)  # blocks, can TransferError

        if self._fd_to_send:
            ts = TransitSender(
                args.transit_helper,
                no_listen=(not args.listen),
                tor=self._tor,
                reactor=self._reactor,
                timing=self._timing)
            self._transit_sender = ts

            # for now, send this before the main offer
            sender_abilities = ts.get_connection_abilities()
            sender_hints = yield ts.get_connection_hints()
            sender_transit = {
                "abilities-v1": sender_abilities,
                "hints-v1": sender_hints,
            }
            self._send_data({u"transit": sender_transit}, w)

            # When I made it possible to override APPID with a CLI argument
            # (issue #113), I forgot to also change this w.derive_key()
            # (issue #339). We're stuck with it now. Use a local constant to
            # make this clear.
            BUG339_APPID = u"lothar.com/wormhole/text-or-file-xfer"

            # TODO: move this down below w.get_message()
            transit_key = w.derive_key(BUG339_APPID + "/transit-key",
                                       ts.TRANSIT_KEY_LENGTH)
            ts.set_transit_key(transit_key)

        self._send_data({"offer": offer}, w)

        want_answer = True

        while True:
            them_d_bytes = yield w.get_message()
            # TODO: get_message() fired, so get_verifier must have fired, so
            # now it's safe to use w.derive_key()
            them_d = bytes_to_dict(them_d_bytes)
            # print("GOT", them_d)
            recognized = False
            if u"error" in them_d:
                raise TransferError(
                    "remote error, transfer abandoned: %s" % them_d["error"])
            if u"transit" in them_d:
                recognized = True
                yield self._handle_transit(them_d[u"transit"])
            if u"answer" in them_d:
                recognized = True
                if not want_answer:
                    raise TransferError("duplicate answer")
                want_answer = True
                yield self._handle_answer(them_d[u"answer"])
                returnValue(None)
            if not recognized:
                log.msg("unrecognized message %r" % (them_d, ))
Пример #21
0
    def update_ui(self, keys, filter_dict):
        """
        Gather the information required for updating the web interface.

        :param keys: the information about the torrents to gather
        :type keys: list
        :param filter_dict: the filters to apply when selecting torrents.
        :type filter_dict: dictionary
        :returns: The torrent and ui information.
        :rtype: dictionary
        """
        d = Deferred()
        ui_info = {
            "connected": client.connected(),
            "torrents": None,
            "filters": None,
            "stats": {
                "max_download": self.core_config.get("max_download_speed"),
                "max_upload": self.core_config.get("max_upload_speed"),
                "max_num_connections": self.core_config.get("max_connections_global")
            }
        }

        if not client.connected():
            d.callback(ui_info)
            return d

        def got_connections(connections):
            ui_info["stats"]["num_connections"] = connections

        def got_stats(stats):
            ui_info["stats"]["upload_rate"] = stats["payload_upload_rate"]
            ui_info["stats"]["download_rate"] = stats["payload_download_rate"]
            ui_info["stats"]["download_protocol_rate"] = stats["download_rate"] - stats["payload_download_rate"]
            ui_info["stats"]["upload_protocol_rate"] = stats["upload_rate"] - stats["payload_upload_rate"]
            ui_info["stats"]["dht_nodes"] = stats["dht_nodes"]
            ui_info["stats"]["has_incoming_connections"] = stats["has_incoming_connections"]

        def got_filters(filters):
            ui_info["filters"] = filters

        def got_free_space(free_space):
            ui_info["stats"]["free_space"] = free_space

        def got_torrents(torrents):
            ui_info["torrents"] = torrents

        def on_complete(result):
            d.callback(ui_info)

        d1 = component.get("SessionProxy").get_torrents_status(filter_dict, keys)
        d1.addCallback(got_torrents)

        d2 = client.core.get_filter_tree()
        d2.addCallback(got_filters)

        d3 = client.core.get_session_status([
            "payload_download_rate",
            "payload_upload_rate",
            "download_rate",
            "upload_rate",
            "dht_nodes",
            "has_incoming_connections"
        ])
        d3.addCallback(got_stats)

        d4 = client.core.get_num_connections()
        d4.addCallback(got_connections)

        d5 = client.core.get_free_space(self.core_config.get("download_location"))
        d5.addCallback(got_free_space)

        dl = DeferredList([d1, d2, d3, d4, d5], consumeErrors=True)
        dl.addCallback(on_complete)
        return d
Пример #22
0
 def delete_all(self, uri):
     defer = Deferred()
     operation = lambda profile: self._delete_all_operation(uri, profile)
     reactor.callInThread(self.retrieve_profile, uri.user.username,
                          uri.user.domain, operation, True, defer)
     return defer
Пример #23
0
 def __init__(self, command, *k, **w):
     self.command = command
     self._stdout_bytes = ''
     self._stderr_bytes = ''
     self._d = Deferred()
Пример #24
0
 def get(self, uri):
     defer = Deferred()
     operation = lambda profile: self._get_operation(uri, profile)
     reactor.callInThread(self.retrieve_profile, uri.user.username,
                          uri.user.domain, operation, False, defer)
     return defer
Пример #25
0
 def callback(response):
     finished = Deferred()
     response.deliverBody(Accumulator(finished))
     return finished
Пример #26
0
 def get_profile(self, username, domain):
     defer = Deferred()
     reactor.callInThread(self.retrieve_profile, username, domain,
                          lambda profile: profile, False, defer)
     return defer
Пример #27
0
def moment():
    d = Deferred()
    reactor.callWhenRunning(d.callback, None)
    return d
Пример #28
0
 def wait(self):
     if self.transport.pid is None:
         return succeed(None)
     self._wait_result = Deferred()
     return self._wait_result
Пример #29
0
 def get_list(self):
     self.d = Deferred()
     reactor.connectTCP("www.blogger.com", 80, self)
     return self.d
Пример #30
0
 def __init__(self, gui=False):
     self.done = Deferred()
     EchoClientFactory.gui = gui
     EchoClientFactory.protocol = GM_Server