class ControlEndpoint(object):
    _peer_addr = attrib(validator=provides(IAddress))
    _subchannel_zero = attrib(validator=provides(ISubChannel))
    _eventual_queue = attrib(repr=False)
    _used = False

    def __attrs_post_init__(self):
        self._once = Once(SingleUseEndpointError)
        self._wait_for_main_channel = OneShotObserver(self._eventual_queue)

    # from manager

    def _main_channel_ready(self):
        self._wait_for_main_channel.fire(None)

    def _main_channel_failed(self, f):
        self._wait_for_main_channel.error(f)

    @inlineCallbacks
    def connect(self, protocolFactory):
        # return Deferred that fires with IProtocol or Failure(ConnectError)
        self._once()
        yield self._wait_for_main_channel.when_fired()
        p = protocolFactory.buildProtocol(self._peer_addr)
        self._subchannel_zero._set_protocol(p)
        # this sets p.transport and calls p.connectionMade()
        p.makeConnection(self._subchannel_zero)
        self._subchannel_zero._deliver_queued_data()
        returnValue(p)
Example #2
0
class SubchannelListenerEndpoint(object):
    _manager = attrib(validator=provides(IDilationManager))
    _host_addr = attrib(validator=provides(IAddress))

    def __attrs_post_init__(self):
        self._factory = None
        self._pending_opens = []

    # from manager
    def _got_open(self, t, peer_addr):
        if self._factory:
            self._connect(t, peer_addr)
        else:
            self._pending_opens.append((t, peer_addr))

    def _connect(self, t, peer_addr):
        p = self._factory.buildProtocol(peer_addr)
        t._set_protocol(p)
        p.makeConnection(t)

    # IStreamServerEndpoint

    def listen(self, protocolFactory):
        self._factory = protocolFactory
        for (t, peer_addr) in self._pending_opens:
            self._connect(t, peer_addr)
        self._pending_opens = []
        lp = SubchannelListeningPort(self._host_addr)
        return succeed(lp)
Example #3
0
class EndpointRecord(Sequence):
    control = attrib(validator=provides(IStreamClientEndpoint))
    connect = attrib(validator=provides(IStreamClientEndpoint))
    listen = attrib(validator=provides(IStreamServerEndpoint))
    def __len__(self):
        return 3
    def __getitem__(self, n):
        return (self.control, self.connect, self.listen)[n]
class SubchannelConnectorEndpoint(object):
    _manager = attrib(validator=provides(IDilationManager))
    _host_addr = attrib(validator=instance_of(_WormholeAddress))
    _eventual_queue = attrib(repr=False)

    def __attrs_post_init__(self):
        self._connection_deferreds = deque()
        self._wait_for_main_channel = OneShotObserver(self._eventual_queue)

    def _main_channel_ready(self):
        self._wait_for_main_channel.fire(None)

    def _main_channel_failed(self, f):
        self._wait_for_main_channel.error(f)

    @inlineCallbacks
    def connect(self, protocolFactory):
        # return Deferred that fires with IProtocol or Failure(ConnectError)
        yield self._wait_for_main_channel.when_fired()
        scid = self._manager.allocate_subchannel_id()
        self._manager.send_open(scid)
        peer_addr = _SubchannelAddress(scid)
        # ? f.doStart()
        # ? f.startedConnecting(CONNECTOR) # ??
        sc = SubChannel(scid, self._manager, self._host_addr, peer_addr)
        self._manager.subchannel_local_open(scid, sc)
        p = protocolFactory.buildProtocol(peer_addr)
        sc._set_protocol(p)
        p.makeConnection(sc)  # set p.transport = sc and call connectionMade()
        returnValue(p)
Example #5
0
 def test_repr(self):
     """
     Returned validator has a useful `__repr__`.
     """
     v = provides(IFoo)
     assert ("<provides validator for interface {interface!r}>".format(
         interface=IFoo)) == repr(v)
Example #6
0
class InboundConnectionFactory(ServerFactory, object):
    _connector = attrib(validator=provides(IDilationConnector))

    def buildProtocol(self, addr):
        p = self._connector.build_protocol(addr)
        p.factory = self
        return p
Example #7
0
class CheckType(object):
    """
    Data model for a MaaS check type (e.g., remote.ping).
    """
    metrics = attr.ib(validator=instance_of(list))
    _clock = attr.ib(validator=provides(IReactorTime))
    test_check_available = attr.ib(validator=instance_of(dict),
                                   default=attr.Factory(dict))
    test_check_status = attr.ib(validator=instance_of(dict),
                                default=attr.Factory(dict))
    test_check_response_code = attr.ib(validator=instance_of(dict),
                                       default=attr.Factory(dict))

    def clear_overrides(self):
        """
        Clears the overrides for test-checks and metrics.
        """
        self.test_check_available = {}
        self.test_check_status = {}
        self.test_check_response_code = {}

        for metric in self.metrics:
            metric.clear_overrides()

    def get_metric_by_name(self, metric_name):
        """
        Gets the metric on this check type.

        This method is useful for setting and clearing overrides on the
        test metrics.
        """
        for metric in self.metrics:
            if metric.name == metric_name:
                return metric
        raise NameError('No metric named "{0}"!'.format(metric_name))

    def get_test_check_response(self, **kwargs):
        """
        Gets the response as would have been returned by the test-check API.
        """
        entity_id = kwargs['entity_id']
        check_id = kwargs.get('check_id', '__test_check')
        monitoring_zones = kwargs.get('monitoring_zones') or ['__AGENT__']

        ench_key = (entity_id, check_id)
        timestamp = int(1000 * self._clock.seconds())

        return (self.test_check_response_code.get(ench_key, 200),
                [{'timestamp': timestamp,
                  'monitoring_zone_id': monitoring_zone,
                  'available': self.test_check_available.get(ench_key, True),
                  'status': self.test_check_status.get(
                      ench_key, 'code=200,rt=0.4s,bytes=99'),
                  'metrics': {m.name: m.get_value_for_test_check(entity_id=entity_id,
                                                                 check_id=check_id,
                                                                 monitoring_zone=monitoring_zone,
                                                                 timestamp=timestamp)
                              for m in self.metrics}}
                 for monitoring_zone in monitoring_zones])
 def test_repr(self):
     """
     Returned validator has a useful `__repr__`.
     """
     v = provides(IFoo)
     assert (
         "<provides validator for interface {interface!r}>"
         .format(interface=IFoo)
     ) == repr(v)
Example #9
0
class OutboundConnectionFactory(ClientFactory, object):
    _connector = attrib(validator=provides(IDilationConnector))
    _relay_handshake = attrib(validator=optional(instance_of(bytes)))

    def buildProtocol(self, addr):
        p = self._connector.build_protocol(addr)
        p.factory = self
        if self._relay_handshake is not None:
            p.use_relay(self._relay_handshake)
        return p
Example #10
0
class IOFount(object):
    """
    Fount that reads from a file-like-object.
    """

    outputType = ISegment

    _source = attrib()  # type: BinaryIO

    drain = attrib(
        validator=optional(provides(IDrain)), default=None, init=False
    )  # type: IDrain
    _paused = attrib(validator=instance_of(bool), default=False, init=False)


    def __attrs_post_init__(self):
        # type: () -> None
        self._pauser = Pauser(self._pause, self._resume)


    def _flowToDrain(self):
        # type: () -> None
        if self.drain is not None and not self._paused:
            data = self._source.read()
            if data:
                self.drain.receive(data)
            self.drain.flowStopped(Failure(StopIteration()))


    def flowTo(self, drain):
        # type: (IDrain) -> IFount
        result = beginFlowingTo(self, drain)
        self._flowToDrain()
        return result


    def pauseFlow(self):
        # type: () -> None
        return self._pauser.pause()


    def stopFlow(self):
        # type: () -> None
        return self._pauser.resume()


    def _pause(self):
        # type: () -> None
        self._paused = True


    def _resume(self):
        # type: () -> None
        self._paused = False
        self._flowToDrain()
class SubchannelListenerEndpoint(object):
    _manager = attrib(validator=provides(IDilationManager))
    _host_addr = attrib(validator=provides(IAddress))
    _eventual_queue = attrib(repr=False)

    def __attrs_post_init__(self):
        self._once = Once(SingleUseEndpointError)
        self._factory = None
        self._pending_opens = deque()
        self._wait_for_main_channel = OneShotObserver(self._eventual_queue)

    # from manager (actually Inbound)
    def _got_open(self, t, peer_addr):
        if self._factory:
            self._connect(t, peer_addr)
        else:
            self._pending_opens.append((t, peer_addr))

    def _connect(self, t, peer_addr):
        p = self._factory.buildProtocol(peer_addr)
        t._set_protocol(p)
        p.makeConnection(t)
        t._deliver_queued_data()

    def _main_channel_ready(self):
        self._wait_for_main_channel.fire(None)

    def _main_channel_failed(self, f):
        self._wait_for_main_channel.error(f)

    # IStreamServerEndpoint

    @inlineCallbacks
    def listen(self, protocolFactory):
        self._once()
        yield self._wait_for_main_channel.when_fired()
        self._factory = protocolFactory
        while self._pending_opens:
            (t, peer_addr) = self._pending_opens.popleft()
            self._connect(t, peer_addr)
        lp = SubchannelListeningPort(self._host_addr)
        returnValue(lp)
Example #12
0
    def test_success(self, zope_interface, ifoo):
        """
        Nothing happens if value provides requested interface.
        """
        @zope_interface.implementer(ifoo)
        class C:
            def f(self):
                pass

        v = provides(ifoo)
        v(None, simple_attr("x"), C())
Example #13
0
    def test_success(self):
        """
        Nothing happens if value provides requested interface.
        """
        @zope.interface.implementer(IFoo)
        class C(object):
            def f(self):
                pass

        v = provides(IFoo)
        v(None, simple_attr("x"), C())
Example #14
0
    def test_success(self):
        """
        Nothing happens if value provides requested interface.
        """
        @zope.interface.implementer(IFoo)
        class C(object):
            def f(self):
                pass

        v = provides(IFoo)
        v(None, simple_attr("x"), C())
Example #15
0
class InboundConnectionFactory(ServerFactory, object):
    _connector = attrib(validator=provides(IDilationConnector))

    def __repr__(self):
        return "InboundConnectionFactory(%s)" % (self._connector._role)

    def buildProtocol(self, addr):
        description = describe_inbound(addr)
        p = self._connector.build_protocol(addr, description)
        p.factory = self
        return p
Example #16
0
class SubchannelListeningPort(object):
    _host_addr = attrib(validator=provides(IAddress))

    def startListening(self):
        pass

    def stopListening(self):
        # TODO
        pass

    def getHost(self):
        return self._host_addr
Example #17
0
    def test_fail(self):
        """
        Raises `TypeError` if interfaces isn't provided by value.
        """
        value = object()
        a = simple_attr("x")

        v = provides(IFoo)
        with pytest.raises(TypeError) as e:
            v(None, a, value)
        assert (
            "'x' must provide {interface!r} which {value!r} doesn't."
            .format(interface=IFoo, value=value),
            a, IFoo, value,
        ) == e.value.args
Example #18
0
    def test_fail(self):
        """
        Raises `TypeError` if interfaces isn't provided by value.
        """
        value = object()
        a = simple_attr("x")

        v = provides(IFoo)
        with pytest.raises(TypeError) as e:
            v(None, a, value)
        assert (
            "'x' must provide {interface!r} which {value!r} doesn't."
            .format(interface=IFoo, value=value),
            a, IFoo, value,
        ) == e.value.args
Example #19
0
class PullToPush(object):
    _producer = attrib(validator=provides(IPullProducer))
    _unregister = attrib(validator=lambda _a, _b, v: callable(v))
    _cooperator = attrib()
    _finished = False

    def _pull(self):
        while True:
            try:
                self._producer.resumeProducing()
            except Exception:
                log.err(
                    None, "%s failed, producing will be stopped:" %
                    (safe_str(self._producer), ))
                try:
                    self._unregister()
                    # The consumer should now call stopStreaming() on us,
                    # thus stopping the streaming.
                except Exception:
                    # Since the consumer blew up, we may not have had
                    # stopStreaming() called, so we just stop on our own:
                    log.err(
                        None, "%s failed to unregister producer:" %
                        (safe_str(self._unregister), ))
                    self._finished = True
                    return
            yield None

    def startStreaming(self, paused):
        self._coopTask = self._cooperator.cooperate(self._pull())
        if paused:
            self.pauseProducing()  # timer is scheduled, but task is removed

    def stopStreaming(self):
        if self._finished:
            return
        self._finished = True
        self._coopTask.stop()

    def pauseProducing(self):
        self._coopTask.pause()

    def resumeProducing(self):
        self._coopTask.resume()

    def stopProducing(self):
        self.stopStreaming()
        self._producer.stopProducing()
Example #20
0
class SubchannelConnectorEndpoint(object):
    _manager = attrib(validator=provides(IDilationManager))
    _host_addr = attrib(validator=instance_of(_WormholeAddress))

    def connect(self, protocolFactory):
        # return Deferred that fires with IProtocol or Failure(ConnectError)
        scid = self._manager.allocate_subchannel_id()
        self._manager.send_open(scid)
        peer_addr = _SubchannelAddress(scid)
        # ? f.doStart()
        # ? f.startedConnecting(CONNECTOR) # ??
        t = SubChannel(scid, self._manager, self._host_addr, peer_addr)
        p = protocolFactory.buildProtocol(peer_addr)
        t._set_protocol(p)
        p.makeConnection(t)  # set p.transport = t and call connectionMade()
        return succeed(p)
Example #21
0
class FrozenHTTPRequest:
    """
    Immutable HTTP request.
    """

    method: str = attrib(validator=instance_of(str))
    uri: DecodedURL = attrib(validator=instance_of(DecodedURL))
    headers: IHTTPHeaders = attrib(validator=provides(IHTTPHeaders))

    _body: Union[bytes, IFount] = attrib(validator=validateBody)

    _state: MessageState = attrib(default=Factory(MessageState), init=False)

    def bodyAsFount(self) -> IFount:
        return bodyAsFount(self._body, self._state)

    def bodyAsBytes(self) -> Deferred:
        return bodyAsBytes(self._body, self._state)
Example #22
0
class FrozenHTTPResponse:
    """
    Immutable HTTP response.
    """

    status: int = attrib(validator=instance_of(int))

    headers: IHTTPHeaders = attrib(validator=provides(IHTTPHeaders))

    _body: Union[bytes, IFount] = attrib(validator=validateBody)

    _state: MessageState = attrib(default=Factory(MessageState), init=False)

    def bodyAsFount(self) -> IFount:
        return bodyAsFount(self._body, self._state)

    def bodyAsBytes(self) -> Deferred:
        return bodyAsBytes(self._body, self._state)
Example #23
0
class _TwistedLoggerToEliotObserver(object):
    """
    An ``ILogObserver`` which re-publishes events as Eliot messages.
    """
    logger = attr.ib(default=None, validator=optional(provides(ILogger)))

    def _observe(self, event):
        flattened = loads(eventAsJSON(event))
        # We get a timestamp from Eliot.
        flattened.pop(u"log_time")
        # This is never serializable anyway.  "Legacy" log events (from
        # twisted.python.log) don't have this so make it optional.
        flattened.pop(u"log_logger", None)

        Message.new(message_type=u"eliot:twisted",
                    **flattened).write(self.logger)

    # The actual ILogObserver interface uses this.
    __call__ = _observe
class FluentdDestination(object):
    """
    ``FluentdDestination`` is an Eliot log destination which sends logs to a
    Fluentd via the HTTP input plugin.

    .. WARNING:: Combining this with ``_EliotLogging`` will immediately ruin
       your day.
    """
    agent = attr.ib(validator=provides(IAgent))
    fluentd_url = attr.ib(validator=instance_of(URL))

    def _observe(self, message):
        self.agent.request(
            b"POST",
            self.fluentd_url.asURI().asText().encode("ascii"),
            Headers({"Content-Type":["application/x-www-form-urlencoded"]}),
            FileBodyProducer(BytesIO(b"json=" + dumps(message))),
        )

    # Eliot wants this interface.
    __call__ = _observe
Example #25
0
class FrozenHTTPRequest(object):
    """
    Immutable HTTP request.
    """

    method = attrib(validator=instance_of(Text))  # type: Text
    uri = attrib(validator=instance_of(DecodedURL))  # type: DecodedURL
    headers = attrib(validator=provides(IHTTPHeaders))  # type: IHTTPHeaders

    _body = attrib(validator=validateBody)  # type: Union[bytes, IFount]

    _state = attrib(default=Factory(MessageState),
                    init=False)  # type: MessageState

    def bodyAsFount(self):
        # type: () -> IFount
        return bodyAsFount(self._body, self._state)

    def bodyAsBytes(self):
        # type: () -> Deferred[bytes]
        return bodyAsBytes(self._body, self._state)
Example #26
0
class FrozenHTTPResponse(object):
    """
    Immutable HTTP response.
    """

    status = attrib(validator=instance_of(int))  # type: int

    headers = attrib(validator=provides(IHTTPHeaders))  # type: IHTTPHeaders

    _body = attrib(validator=validateBody)  # type: Union[bytes, IFount]

    _state = attrib(default=Factory(MessageState),
                    init=False)  # type: MessageState

    def bodyAsFount(self):
        # type: () -> IFount
        return bodyAsFount(self._body, self._state)

    def bodyAsBytes(self):
        # type: () -> Deferred[bytes]
        return bodyAsBytes(self._body, self._state)
Example #27
0
class RRSet:
    """
    https://tools.ietf.org/html/rfc2181#section-5
    http://docs.aws.amazon.com/Route53/latest/APIReference/API_ResourceRecord.html

    @ivar name: The label (name) of the resource record set involved in the change.
    @type name: L{Name}

    @ivar type: The type of the resource record set. For example, NS, SOA, AAAA, etc.
    @type type: L{str}

    @ivar ttl: The time-to-live for this resource record set.
    @type ttl: L{int}

    @ivar records: The resource records involved in the change.
    @type records: L{list} of L{IResourceRecord} providers
    """
    type = RRSetType.RESOURCE

    label = attr.ib(validator=validators.instance_of(Name))
    type = attr.ib(validator=validators.instance_of(str))
    ttl = attr.ib(validator=validators.instance_of(int))
    records = attr.ib(
        validator=set_of(validators.provides(IBasicResourceRecord)))
Example #28
0
class Manager(object):
    _S = attrib(validator=provides(ISend), repr=False)
    _my_side = attrib(validator=instance_of(type(u"")))
    _transit_relay_location = attrib(validator=optional(instance_of(str)))
    _reactor = attrib(repr=False)
    _eventual_queue = attrib(repr=False)
    _cooperator = attrib(repr=False)
    # TODO: can this validator work when the parameter is optional?
    _no_listen = attrib(validator=instance_of(bool), default=False)

    _dilation_key = None
    _tor = None  # TODO
    _timing = None  # TODO
    _next_subchannel_id = None  # initialized in choose_role

    m = MethodicalMachine()
    set_trace = getattr(m, "_setTrace",
                        lambda self, f: None)  # pragma: no cover

    def __attrs_post_init__(self):
        self._got_versions_d = Deferred()

        self._my_role = None  # determined upon rx_PLEASE
        self._host_addr = _WormholeAddress()

        self._connection = None
        self._made_first_connection = False
        self._stopped = OneShotObserver(self._eventual_queue)
        self._debug_stall_connector = False

        self._next_dilation_phase = 0

        # I kept getting confused about which methods were for inbound data
        # (and thus flow-control methods go "out") and which were for
        # outbound data (with flow-control going "in"), so I split them up
        # into separate pieces.
        self._inbound = Inbound(self, self._host_addr)
        self._outbound = Outbound(self, self._cooperator)  # from us to peer

        # We must open subchannel0 early, since messages may arrive very
        # quickly once the connection is established. This subchannel may or
        # may not ever get revealed to the caller, since the peer might not
        # even be capable of dilation.
        scid0 = 0
        peer_addr0 = _SubchannelAddress(scid0)
        sc0 = SubChannel(scid0, self, self._host_addr, peer_addr0)
        self._inbound.set_subchannel_zero(scid0, sc0)

        # we can open non-zero subchannels as soon as we get our first
        # connection, and we can make the Endpoints even earlier
        control_ep = ControlEndpoint(peer_addr0, sc0, self._eventual_queue)
        connect_ep = SubchannelConnectorEndpoint(self, self._host_addr,
                                                 self._eventual_queue)
        listen_ep = SubchannelListenerEndpoint(self, self._host_addr,
                                               self._eventual_queue)
        # TODO: let inbound/outbound create the endpoints, then return them
        # to us
        self._inbound.set_listener_endpoint(listen_ep)

        self._endpoints = EndpointRecord(control_ep, connect_ep, listen_ep)

    def get_endpoints(self):
        return self._endpoints

    def got_dilation_key(self, key):
        assert isinstance(key, bytes)
        self._dilation_key = key

    def got_wormhole_versions(self, their_wormhole_versions):
        # this always happens before received_dilation_message
        dilation_version = None
        their_dilation_versions = set(
            their_wormhole_versions.get("can-dilate", []))
        my_versions = set(DILATION_VERSIONS)
        shared_versions = my_versions.intersection(their_dilation_versions)
        if "1" in shared_versions:
            dilation_version = "1"

        # dilation_version is the best mutually-compatible version we have
        # with the peer, or None if we have nothing in common

        if not dilation_version:  # "1" or None
            # TODO: be more specific about the error. dilation_version==None
            # means we had no version in common with them, which could either
            # be because they're so old they don't dilate at all, or because
            # they're so new that they no longer accommodate our old version
            self.fail(failure.Failure(OldPeerCannotDilateError()))

        self.start()

    def fail(self, f):
        self._endpoints.control._main_channel_failed(f)
        self._endpoints.connect._main_channel_failed(f)
        self._endpoints.listen._main_channel_failed(f)

    def received_dilation_message(self, plaintext):
        # this receives new in-order DILATE-n payloads, decrypted but not
        # de-JSONed.

        message = bytes_to_dict(plaintext)
        type = message["type"]
        if type == "please":
            self.rx_PLEASE(message)
        elif type == "connection-hints":
            self.rx_HINTS(message)
        elif type == "reconnect":
            self.rx_RECONNECT()
        elif type == "reconnecting":
            self.rx_RECONNECTING()
        else:
            log.err(UnknownDilationMessageType(message))
            return

    def when_stopped(self):
        return self._stopped.when_fired()

    def send_dilation_phase(self, **fields):
        dilation_phase = self._next_dilation_phase
        self._next_dilation_phase += 1
        self._S.send("dilate-%d" % dilation_phase, dict_to_bytes(fields))

    def send_hints(self, hints):  # from Connector
        self.send_dilation_phase(type="connection-hints", hints=hints)

    # forward inbound-ish things to _Inbound

    def subchannel_pauseProducing(self, sc):
        self._inbound.subchannel_pauseProducing(sc)

    def subchannel_resumeProducing(self, sc):
        self._inbound.subchannel_resumeProducing(sc)

    def subchannel_stopProducing(self, sc):
        self._inbound.subchannel_stopProducing(sc)

    def subchannel_local_open(self, scid, sc):
        self._inbound.subchannel_local_open(scid, sc)

    # forward outbound-ish things to _Outbound
    def subchannel_registerProducer(self, sc, producer, streaming):
        self._outbound.subchannel_registerProducer(sc, producer, streaming)

    def subchannel_unregisterProducer(self, sc):
        self._outbound.subchannel_unregisterProducer(sc)

    def send_open(self, scid):
        assert isinstance(scid, six.integer_types)
        self._queue_and_send(Open, scid)

    def send_data(self, scid, data):
        assert isinstance(scid, six.integer_types)
        self._queue_and_send(Data, scid, data)

    def send_close(self, scid):
        assert isinstance(scid, six.integer_types)
        self._queue_and_send(Close, scid)

    def _queue_and_send(self, record_type, *args):
        r = self._outbound.build_record(record_type, *args)
        # Outbound owns the send_record() pipe, so that it can stall new
        # writes after a new connection is made until after all queued
        # messages are written (to preserve ordering).
        self._outbound.queue_and_send_record(r)  # may trigger pauseProducing

    def subchannel_closed(self, scid, sc):
        # let everyone clean up. This happens just after we delivered
        # connectionLost to the Protocol, except for the control channel,
        # which might get connectionLost later after they use ep.connect.
        # TODO: is this inversion a problem?
        self._inbound.subchannel_closed(scid, sc)
        self._outbound.subchannel_closed(scid, sc)

    # our Connector calls these

    def connector_connection_made(self, c):
        self.connection_made()  # state machine update
        self._connection = c
        self._inbound.use_connection(c)
        self._outbound.use_connection(c)  # does c.registerProducer
        if not self._made_first_connection:
            self._made_first_connection = True
            self._endpoints.control._main_channel_ready()
            self._endpoints.connect._main_channel_ready()
            self._endpoints.listen._main_channel_ready()
        pass

    def connector_connection_lost(self):
        self._stop_using_connection()
        if self._my_role is LEADER:
            self.connection_lost_leader()  # state machine
        else:
            self.connection_lost_follower()

    def _stop_using_connection(self):
        # the connection is already lost by this point
        self._connection = None
        self._inbound.stop_using_connection()
        self._outbound.stop_using_connection()  # does c.unregisterProducer

    # from our active Connection

    def got_record(self, r):
        # records with sequence numbers: always ack, ignore old ones
        if isinstance(r, (Open, Data, Close)):
            self.send_ack(r.seqnum)  # always ack, even for old ones
            if self._inbound.is_record_old(r):
                return
            self._inbound.update_ack_watermark(r.seqnum)
            if isinstance(r, Open):
                self._inbound.handle_open(r.scid)
            elif isinstance(r, Data):
                self._inbound.handle_data(r.scid, r.data)
            else:  # isinstance(r, Close)
                self._inbound.handle_close(r.scid)
            return
        if isinstance(r, KCM):
            log.err(UnexpectedKCM())
        elif isinstance(r, Ping):
            self.handle_ping(r.ping_id)
        elif isinstance(r, Pong):
            self.handle_pong(r.ping_id)
        elif isinstance(r, Ack):
            self._outbound.handle_ack(r.resp_seqnum)  # retire queued messages
        else:
            log.err(UnknownMessageType("{}".format(r)))

    # pings, pongs, and acks are not queued
    def send_ping(self, ping_id):
        self._outbound.send_if_connected(Ping(ping_id))

    def send_pong(self, ping_id):
        self._outbound.send_if_connected(Pong(ping_id))

    def send_ack(self, resp_seqnum):
        self._outbound.send_if_connected(Ack(resp_seqnum))

    def handle_ping(self, ping_id):
        self.send_pong(ping_id)

    def handle_pong(self, ping_id):
        # TODO: update is-alive timer
        pass

    # subchannel maintenance
    def allocate_subchannel_id(self):
        scid_num = self._next_subchannel_id
        self._next_subchannel_id += 2
        return scid_num

    # state machine

    @m.state(initial=True)
    def WAITING(self):
        pass  # pragma: no cover

    @m.state()
    def WANTING(self):
        pass  # pragma: no cover

    @m.state()
    def CONNECTING(self):
        pass  # pragma: no cover

    @m.state()
    def CONNECTED(self):
        pass  # pragma: no cover

    @m.state()
    def FLUSHING(self):
        pass  # pragma: no cover

    @m.state()
    def ABANDONING(self):
        pass  # pragma: no cover

    @m.state()
    def LONELY(self):
        pass  # pragma: no cover

    @m.state()
    def STOPPING(self):
        pass  # pragma: no cover

    @m.state(terminal=True)
    def STOPPED(self):
        pass  # pragma: no cover

    @m.input()
    def start(self):
        pass  # pragma: no cover

    @m.input()
    def rx_PLEASE(self, message):
        pass  # pragma: no cover

    @m.input()  # only sent by Follower
    def rx_HINTS(self, hint_message):
        pass  # pragma: no cover

    @m.input()  # only Leader sends RECONNECT, so only Follower receives it
    def rx_RECONNECT(self):
        pass  # pragma: no cover

    @m.input()  # only Follower sends RECONNECTING, so only Leader receives it
    def rx_RECONNECTING(self):
        pass  # pragma: no cover

    # Connector gives us connection_made()
    @m.input()
    def connection_made(self):
        pass  # pragma: no cover

    # our connection_lost() fires connection_lost_leader or
    # connection_lost_follower depending upon our role. If either side sees a
    # problem with the connection (timeouts, bad authentication) then they
    # just drop it and let connection_lost() handle the cleanup.
    @m.input()
    def connection_lost_leader(self):
        pass  # pragma: no cover

    @m.input()
    def connection_lost_follower(self):
        pass

    @m.input()
    def stop(self):
        pass  # pragma: no cover

    @m.output()
    def send_please(self):
        self.send_dilation_phase(type="please", side=self._my_side)

    @m.output()
    def choose_role(self, message):
        their_side = message["side"]
        if self._my_side > their_side:
            self._my_role = LEADER
            # scid 0 is reserved for the control channel. the leader uses odd
            # numbers starting with 1
            self._next_subchannel_id = 1
        elif their_side > self._my_side:
            self._my_role = FOLLOWER
            # the follower uses even numbers starting with 2
            self._next_subchannel_id = 2
        else:
            raise ValueError("their side shouldn't be equal: reflection?")

    # these Outputs behave differently for the Leader vs the Follower

    @m.output()
    def start_connecting_ignore_message(self, message):
        del message  # ignored
        return self._start_connecting()

    @m.output()
    def start_connecting(self):
        self._start_connecting()

    def _start_connecting(self):
        assert self._my_role is not None
        assert self._dilation_key is not None
        self._connector = Connector(
            self._dilation_key,
            self._transit_relay_location,
            self,
            self._reactor,
            self._eventual_queue,
            self._no_listen,
            self._tor,
            self._timing,
            self._my_side,  # needed for relay handshake
            self._my_role)
        if self._debug_stall_connector:
            # unit tests use this hook to send messages while we know we
            # don't have a connection
            self._eventual_queue.eventually(self._debug_stall_connector,
                                            self._connector)
            return
        self._connector.start()

    @m.output()
    def send_reconnect(self):
        self.send_dilation_phase(type="reconnect")  # TODO: generation number?

    @m.output()
    def send_reconnecting(self):
        self.send_dilation_phase(type="reconnecting")  # TODO: generation?

    @m.output()
    def use_hints(self, hint_message):
        hint_objs = filter(
            lambda h: h,  # ignore None, unrecognizable
            [parse_hint(hs) for hs in hint_message["hints"]])
        hint_objs = list(hint_objs)
        self._connector.got_hints(hint_objs)

    @m.output()
    def stop_connecting(self):
        self._connector.stop()

    @m.output()
    def abandon_connection(self):
        # we think we're still connected, but the Leader disagrees. Or we've
        # been told to shut down.
        self._connection.disconnect()  # let connection_lost do cleanup

    @m.output()
    def notify_stopped(self):
        self._stopped.fire(None)

    # We are born WAITING after the local app calls w.dilate(). We enter
    # WANTING (and send a PLEASE) when we learn of a mutually-compatible
    # dilation_version.
    WAITING.upon(start, enter=WANTING, outputs=[send_please])

    # we start CONNECTING when we get rx_PLEASE
    WANTING.upon(rx_PLEASE,
                 enter=CONNECTING,
                 outputs=[choose_role, start_connecting_ignore_message])

    CONNECTING.upon(connection_made, enter=CONNECTED, outputs=[])

    # Leader
    CONNECTED.upon(connection_lost_leader,
                   enter=FLUSHING,
                   outputs=[send_reconnect])
    FLUSHING.upon(rx_RECONNECTING,
                  enter=CONNECTING,
                  outputs=[start_connecting])

    # Follower
    # if we notice a lost connection, just wait for the Leader to notice too
    CONNECTED.upon(connection_lost_follower, enter=LONELY, outputs=[])
    LONELY.upon(rx_RECONNECT,
                enter=CONNECTING,
                outputs=[send_reconnecting, start_connecting])
    # but if they notice it first, abandon our (seemingly functional)
    # connection, then tell them that we're ready to try again
    CONNECTED.upon(rx_RECONNECT,
                   enter=ABANDONING,
                   outputs=[abandon_connection])
    ABANDONING.upon(connection_lost_follower,
                    enter=CONNECTING,
                    outputs=[send_reconnecting, start_connecting])
    # and if they notice a problem while we're still connecting, abandon our
    # incomplete attempt and try again. in this case we don't have to wait
    # for a connection to finish shutdown
    CONNECTING.upon(
        rx_RECONNECT,
        enter=CONNECTING,
        outputs=[stop_connecting, send_reconnecting, start_connecting])

    # rx_HINTS never changes state, they're just accepted or ignored
    WANTING.upon(rx_HINTS, enter=WANTING, outputs=[])  # too early
    CONNECTING.upon(rx_HINTS, enter=CONNECTING, outputs=[use_hints])
    CONNECTED.upon(rx_HINTS, enter=CONNECTED, outputs=[])  # too late, ignore
    FLUSHING.upon(rx_HINTS, enter=FLUSHING, outputs=[])  # stale, ignore
    LONELY.upon(rx_HINTS, enter=LONELY, outputs=[])  # stale, ignore
    ABANDONING.upon(rx_HINTS, enter=ABANDONING, outputs=[])  # shouldn't happen
    STOPPING.upon(rx_HINTS, enter=STOPPING, outputs=[])

    WAITING.upon(stop, enter=STOPPED, outputs=[notify_stopped])
    WANTING.upon(stop, enter=STOPPED, outputs=[notify_stopped])
    CONNECTING.upon(stop,
                    enter=STOPPED,
                    outputs=[stop_connecting, notify_stopped])
    CONNECTED.upon(stop, enter=STOPPING, outputs=[abandon_connection])
    ABANDONING.upon(stop, enter=STOPPING, outputs=[])
    FLUSHING.upon(stop, enter=STOPPED, outputs=[notify_stopped])
    LONELY.upon(stop, enter=STOPPED, outputs=[notify_stopped])
    STOPPING.upon(connection_lost_leader,
                  enter=STOPPED,
                  outputs=[notify_stopped])
    STOPPING.upon(connection_lost_follower,
                  enter=STOPPED,
                  outputs=[notify_stopped])
class Key(object):
    _appid = attrib(validator=instance_of(type(u"")))
    _versions = attrib(validator=instance_of(dict))
    _side = attrib(validator=instance_of(type(u"")))
    _timing = attrib(validator=provides(_interfaces.ITiming))
    m = MethodicalMachine()
    set_trace = getattr(m, "_setTrace", lambda self, f: None)

    def __attrs_post_init__(self):
        self._SK = _SortedKey(self._appid, self._versions, self._side,
                              self._timing)
        self._debug_pake_stashed = False  # for tests

    def wire(self, boss, mailbox, receive):
        self._SK.wire(boss, mailbox, receive)

    @m.state(initial=True)
    def S00(self):
        pass  # pragma: no cover

    @m.state()
    def S01(self):
        pass  # pragma: no cover

    @m.state()
    def S10(self):
        pass  # pragma: no cover

    @m.state()
    def S11(self):
        pass  # pragma: no cover

    @m.input()
    def got_code(self, code):
        pass

    @m.input()
    def got_pake(self, body):
        pass

    @m.output()
    def stash_pake(self, body):
        self._pake = body
        self._debug_pake_stashed = True

    @m.output()
    def deliver_code(self, code):
        self._SK.got_code(code)

    @m.output()
    def deliver_pake(self, body):
        self._SK.got_pake(body)

    @m.output()
    def deliver_code_and_stashed_pake(self, code):
        self._SK.got_code(code)
        self._SK.got_pake(self._pake)

    S00.upon(got_code, enter=S10, outputs=[deliver_code])
    S10.upon(got_pake, enter=S11, outputs=[deliver_pake])
    S00.upon(got_pake, enter=S01, outputs=[stash_pake])
    S01.upon(got_code, enter=S11, outputs=[deliver_code_and_stashed_pake])
class _SortedKey(object):
    _appid = attrib(validator=instance_of(type(u"")))
    _versions = attrib(validator=instance_of(dict))
    _side = attrib(validator=instance_of(type(u"")))
    _timing = attrib(validator=provides(_interfaces.ITiming))
    m = MethodicalMachine()
    set_trace = getattr(m, "_setTrace", lambda self, f: None)

    def wire(self, boss, mailbox, receive):
        self._B = _interfaces.IBoss(boss)
        self._M = _interfaces.IMailbox(mailbox)
        self._R = _interfaces.IReceive(receive)

    @m.state(initial=True)
    def S0_know_nothing(self):
        pass  # pragma: no cover

    @m.state()
    def S1_know_code(self):
        pass  # pragma: no cover

    @m.state()
    def S2_know_key(self):
        pass  # pragma: no cover

    @m.state(terminal=True)
    def S3_scared(self):
        pass  # pragma: no cover

    # from Boss
    @m.input()
    def got_code(self, code):
        pass

    # from Ordering
    def got_pake(self, body):
        assert isinstance(body, type(b"")), type(body)
        payload = bytes_to_dict(body)
        if "pake_v1" in payload:
            self.got_pake_good(hexstr_to_bytes(payload["pake_v1"]))
        else:
            self.got_pake_bad()

    @m.input()
    def got_pake_good(self, msg2):
        pass

    @m.input()
    def got_pake_bad(self):
        pass

    @m.output()
    def build_pake(self, code):
        with self._timing.add("pake1", waiting="crypto"):
            self._sp = SPAKE2_Symmetric(to_bytes(code),
                                        idSymmetric=to_bytes(self._appid))
            msg1 = self._sp.start()
        body = dict_to_bytes({"pake_v1": bytes_to_hexstr(msg1)})
        self._M.add_message("pake", body)

    @m.output()
    def scared(self):
        self._B.scared()

    @m.output()
    def compute_key(self, msg2):
        assert isinstance(msg2, type(b""))
        with self._timing.add("pake2", waiting="crypto"):
            key = self._sp.finish(msg2)
        self._B.got_key(key)
        phase = "version"
        data_key = derive_phase_key(key, self._side, phase)
        plaintext = dict_to_bytes(self._versions)
        encrypted = encrypt_data(data_key, plaintext)
        self._M.add_message(phase, encrypted)
        self._R.got_key(key)

    S0_know_nothing.upon(got_code, enter=S1_know_code, outputs=[build_pake])
    S1_know_code.upon(got_pake_good, enter=S2_know_key, outputs=[compute_key])
    S1_know_code.upon(got_pake_bad, enter=S3_scared, outputs=[scared])
Example #31
0
class Connector(object):
    """I manage a single generation of connection.

    The Manager creates one of me at a time, whenever it wants a connection
    (which is always, once w.dilate() has been called and we know the remote
    end can dilate, and is expressed by the Manager calling my .start()
    method). I am discarded when my established connection is lost (and if we
    still want to be connected, a new generation is started and a new
    Connector is created). I am also discarded if we stop wanting to be
    connected (which the Manager expresses by calling my .stop() method).

    I manage the race between multiple connections for a specific generation
    of the dilated connection.

    I send connection hints when my InboundConnectionFactory yields addresses
    (self.listener_ready), and I initiate outbond connections (with
    OutboundConnectionFactory) as I receive connection hints from my peer
    (self.got_hints). Both factories use my build_protocol() method to create
    connection.DilatedConnectionProtocol instances. I track these protocol
    instances until one finishes negotiation and wins the race. I then shut
    down the others, remember the winner as self._winning_connection, and
    deliver the winner to manager.connector_connection_made(c).

    When an active connection is lost, we call manager.connector_connection_lost,
    allowing the manager to decide whether it wants to start a new generation
    or not.
    """

    _dilation_key = attrib(validator=instance_of(type(b"")))
    _transit_relay_location = attrib(
        validator=optional(instance_of(type(u""))))
    _manager = attrib(validator=provides(IDilationManager))
    _reactor = attrib()
    _eventual_queue = attrib()
    _no_listen = attrib(validator=instance_of(bool))
    _tor = attrib()
    _timing = attrib()
    _side = attrib(validator=instance_of(type(u"")))
    # was self._side = bytes_to_hexstr(os.urandom(8)) # unicode
    _role = attrib()

    m = MethodicalMachine()
    set_trace = getattr(m, "_setTrace",
                        lambda self, f: None)  # pragma: no cover

    RELAY_DELAY = 2.0

    def __attrs_post_init__(self):
        if self._transit_relay_location:
            # TODO: allow multiple hints for a single relay
            relay_hint = parse_hint_argv(self._transit_relay_location)
            relay = RelayV1Hint(hints=(relay_hint, ))
            self._transit_relays = [relay]
        else:
            self._transit_relays = []
        self._listeners = set()  # IListeningPorts that can be stopped
        self._pending_connectors = set()  # Deferreds that can be cancelled
        self._pending_connections = EmptyableSet(
            _eventual_queue=self._eventual_queue)  # Protocols to be stopped
        self._contenders = set()  # viable connections
        self._winning_connection = None
        self._timing = self._timing or DebugTiming()
        self._timing.add("transit")

    # this describes what our Connector can do, for the initial advertisement
    @classmethod
    def get_connection_abilities(klass):
        return [
            {
                "type": "direct-tcp-v1"
            },
            {
                "type": "relay-v1"
            },
        ]

    def build_protocol(self, addr, description):
        # encryption: let's use Noise NNpsk0 (or maybe NNpsk2). That uses
        # ephemeral keys plus a pre-shared symmetric key (the Transit key), a
        # different one for each potential connection.
        noise = build_noise()
        noise.set_psks(self._dilation_key)
        if self._role is LEADER:
            noise.set_as_initiator()
            outbound_prologue = PROLOGUE_LEADER
            inbound_prologue = PROLOGUE_FOLLOWER
        else:
            noise.set_as_responder()
            outbound_prologue = PROLOGUE_FOLLOWER
            inbound_prologue = PROLOGUE_LEADER
        p = DilatedConnectionProtocol(self._eventual_queue, self._role,
                                      description, self, noise,
                                      outbound_prologue, inbound_prologue)
        return p

    @m.state(initial=True)
    def connecting(self):
        pass  # pragma: no cover

    @m.state()
    def connected(self):
        pass  # pragma: no cover

    @m.state(terminal=True)
    def stopped(self):
        pass  # pragma: no cover

    # TODO: unify the tense of these method-name verbs

    # add_relay() and got_hints() are called by the Manager as it receives
    # messages from our peer. stop() is called when the Manager shuts down
    @m.input()
    def add_relay(self, hint_objs):
        pass

    @m.input()
    def got_hints(self, hint_objs):
        pass

    @m.input()
    def stop(self):
        pass

    # called by ourselves, when _start_listener() is ready
    @m.input()
    def listener_ready(self, hint_objs):
        pass

    # called when DilatedConnectionProtocol submits itself, after KCM
    # received
    @m.input()
    def add_candidate(self, c):
        pass

    # called by ourselves, via consider()
    @m.input()
    def accept(self, c):
        pass

    @m.output()
    def use_hints(self, hint_objs):
        self._use_hints(hint_objs)

    @m.output()
    def publish_hints(self, hint_objs):
        self._publish_hints(hint_objs)

    def _publish_hints(self, hint_objs):
        self._manager.send_hints([encode_hint(h) for h in hint_objs])

    @m.output()
    def consider(self, c):
        self._contenders.add(c)
        if self._role is LEADER:
            # for now, just accept the first one. TODO: be clever.
            self._eventual_queue.eventually(self.accept, c)
        else:
            # the follower always uses the first contender, since that's the
            # only one the leader picked
            self._eventual_queue.eventually(self.accept, c)

    @m.output()
    def select_and_stop_remaining(self, c):
        self._winning_connection = c
        self._contenders.clear()  # we no longer care who else came close
        # remove this winner from the losers, so we don't shut it down
        self._pending_connections.discard(c)
        # shut down losing connections
        self.stop_listeners()  # TODO: maybe keep it open? NAT/p2p assist
        self.stop_pending_connectors()
        self.stop_pending_connections()

        c.select(self._manager)  # subsequent frames go directly to the manager
        # c.select also wires up when_disconnected() to fire
        # manager.connector_connection_lost(). TODO: rename this, since the
        # Connector is no longer the one calling it
        if self._role is LEADER:
            # TODO: this should live in Connection
            c.send_record(KCM())  # leader sends KCM now
        self._manager.connector_connection_made(
            c)  # manager sends frames to Connection

    @m.output()
    def stop_everything(self):
        self.stop_listeners()
        self.stop_pending_connectors()
        self.stop_pending_connections()
        self.break_cycles()

    def stop_listeners(self):
        d = DeferredList([l.stopListening() for l in self._listeners])
        self._listeners.clear()
        return d  # synchronization for tests

    def stop_pending_connectors(self):
        for d in self._pending_connectors:
            d.cancel()

    def stop_pending_connections(self):
        d = self._pending_connections.when_next_empty()
        [c.disconnect() for c in self._pending_connections]
        return d

    def break_cycles(self):
        # help GC by forgetting references to things that reference us
        self._listeners.clear()
        self._pending_connectors.clear()
        self._pending_connections.clear()
        self._winning_connection = None

    connecting.upon(listener_ready, enter=connecting, outputs=[publish_hints])
    connecting.upon(add_relay,
                    enter=connecting,
                    outputs=[use_hints, publish_hints])
    connecting.upon(got_hints, enter=connecting, outputs=[use_hints])
    connecting.upon(add_candidate, enter=connecting, outputs=[consider])
    connecting.upon(accept,
                    enter=connected,
                    outputs=[select_and_stop_remaining])
    connecting.upon(stop, enter=stopped, outputs=[stop_everything])

    # once connected, we ignore everything except stop
    connected.upon(listener_ready, enter=connected, outputs=[])
    connected.upon(add_relay, enter=connected, outputs=[])
    connected.upon(got_hints, enter=connected, outputs=[])
    # TODO: tell them to disconnect? will they hang out forever? I *think*
    # they'll drop this once they get a KCM on the winning connection.
    connected.upon(add_candidate, enter=connected, outputs=[])
    connected.upon(accept, enter=connected, outputs=[])
    connected.upon(stop, enter=stopped, outputs=[stop_everything])

    # from Manager: start, got_hints, stop
    # maybe add_candidate, accept

    def start(self):
        if not self._no_listen and not self._tor:
            addresses = self._get_listener_addresses()
            self._start_listener(addresses)
        if self._transit_relays:
            self._publish_hints(self._transit_relays)
            self._use_hints(self._transit_relays)

    def _get_listener_addresses(self):
        addresses = ipaddrs.find_addresses()
        non_loopback_addresses = [a for a in addresses if a != "127.0.0.1"]
        if non_loopback_addresses:
            # some test hosts, including the appveyor VMs, *only* have
            # 127.0.0.1, and the tests will hang badly if we remove it.
            addresses = non_loopback_addresses
        return addresses

    def _start_listener(self, addresses):
        # TODO: listen on a fixed port, if possible, for NAT/p2p benefits, also
        # to make firewall configs easier
        # TODO: retain listening port between connection generations?
        ep = serverFromString(self._reactor, "tcp:0")
        f = InboundConnectionFactory(self)
        d = ep.listen(f)

        def _listening(lp):
            # lp is an IListeningPort
            self._listeners.add(lp)  # for shutdown and tests
            portnum = lp.getHost().port
            direct_hints = [
                DirectTCPV1Hint(to_unicode(addr), portnum, 0.0)
                for addr in addresses
            ]
            self.listener_ready(direct_hints)

        d.addCallback(_listening)
        d.addErrback(log.err)

    def _schedule_connection(self, delay, h, is_relay):
        ep = endpoint_from_hint_obj(h, self._tor, self._reactor)
        desc = describe_hint_obj(h, is_relay, self._tor)
        d = deferLater(self._reactor, delay, self._connect, ep, desc, is_relay)
        d.addErrback(lambda f: f.trap(
            ConnectingCancelledError,
            ConnectionRefusedError,
            CancelledError,
        ))
        # TODO: HostnameEndpoint.connect catches CancelledError and replaces
        # it with DNSLookupError. Remove this workaround when
        # https://twistedmatrix.com/trac/ticket/9696 is fixed.
        d.addErrback(lambda f: f.trap(DNSLookupError))
        d.addErrback(log.err)
        self._pending_connectors.add(d)

    def _use_hints(self, hints):
        # first, pull out all the relays, we'll connect to them later
        relays = []
        direct = defaultdict(list)
        for h in hints:
            if isinstance(h, RelayV1Hint):
                relays.append(h)
            else:
                direct[h.priority].append(h)
        delay = 0.0
        made_direct = False
        priorities = sorted(set(direct.keys()), reverse=True)
        for p in priorities:
            for h in direct[p]:
                if isinstance(h, TorTCPV1Hint) and not self._tor:
                    continue
                self._schedule_connection(delay, h, is_relay=False)
                made_direct = True
                # Make all direct connections immediately. Later, we'll change
                # the add_candidate() function to look at the priority when
                # deciding whether to accept a successful connection or not,
                # and it can wait for more options if it sees a higher-priority
                # one still running. But if we bail on that, we might consider
                # putting an inter-direct-hint delay here to influence the
                # process.
                # delay += 1.0

        if made_direct and not self._no_listen:
            # Prefer direct connections by stalling relay connections by a
            # few seconds. We don't wait until direct connections have
            # failed, because many direct hints will be to unused
            # local-network IP address, which won't answer, and can take the
            # full 30s TCP timeout to fail.
            #
            # If we didn't make any direct connections, or we're using
            # --no-listen, then we're probably going to have to use the
            # relay, so don't delay it at all.
            delay += self.RELAY_DELAY

        # It might be nice to wire this so that a failure in the direct hints
        # causes the relay hints to be used right away (fast failover). But
        # none of our current use cases would take advantage of that: if we
        # have any viable direct hints, then they're either going to succeed
        # quickly or hang for a long time.
        for r in relays:
            for h in r.hints:
                self._schedule_connection(delay, h, is_relay=True)
        # TODO:
        # if not contenders:
        #    raise TransitError("No contenders for connection")

    # TODO: add 2*TIMEOUT deadline for first generation, don't wait forever for
    # the initial connection

    def _connect(self, ep, description, is_relay=False):
        relay_handshake = None
        if is_relay:
            relay_handshake = build_sided_relay_handshake(
                self._dilation_key, self._side)
        f = OutboundConnectionFactory(self, relay_handshake, description)
        d = ep.connect(f)

        # fires with protocol, or ConnectError

        def _connected(p):
            self._pending_connections.add(p)
            # c might not be in _pending_connections, if it turned out to be a
            # winner, which is why we use discard() and not remove()
            p.when_disconnected().addCallback(
                self._pending_connections.discard)

        d.addCallback(_connected)
        return d
Example #32
0
class Order(object):
    _side = attrib(validator=instance_of(type(u"")))
    _timing = attrib(validator=provides(_interfaces.ITiming))
    m = MethodicalMachine()
    set_trace = getattr(m, "_setTrace",
                        lambda self, f: None)  # pragma: no cover

    def __attrs_post_init__(self):
        self._key = None
        self._queue = []

    def wire(self, key, receive):
        self._K = _interfaces.IKey(key)
        self._R = _interfaces.IReceive(receive)

    @m.state(initial=True)
    def S0_no_pake(self):
        pass  # pragma: no cover

    @m.state(terminal=True)
    def S1_yes_pake(self):
        pass  # pragma: no cover

    def got_message(self, side, phase, body):
        # print("ORDER[%s].got_message(%s)" % (self._side, phase))
        assert isinstance(side, type("")), type(phase)
        assert isinstance(phase, type("")), type(phase)
        assert isinstance(body, type(b"")), type(body)
        if phase == "pake":
            self.got_pake(side, phase, body)
        else:
            self.got_non_pake(side, phase, body)

    @m.input()
    def got_pake(self, side, phase, body):
        pass

    @m.input()
    def got_non_pake(self, side, phase, body):
        pass

    @m.output()
    def queue(self, side, phase, body):
        assert isinstance(side, type("")), type(phase)
        assert isinstance(phase, type("")), type(phase)
        assert isinstance(body, type(b"")), type(body)
        self._queue.append((side, phase, body))

    @m.output()
    def notify_key(self, side, phase, body):
        self._K.got_pake(body)

    @m.output()
    def drain(self, side, phase, body):
        del phase
        del body
        for (side, phase, body) in self._queue:
            self._deliver(side, phase, body)
        self._queue[:] = []

    @m.output()
    def deliver(self, side, phase, body):
        self._deliver(side, phase, body)

    def _deliver(self, side, phase, body):
        self._R.got_message(side, phase, body)

    S0_no_pake.upon(got_non_pake, enter=S0_no_pake, outputs=[queue])
    S0_no_pake.upon(got_pake, enter=S1_yes_pake, outputs=[notify_key, drain])
    S1_yes_pake.upon(got_non_pake, enter=S1_yes_pake, outputs=[deliver])
class Send(object):
    _side = attrib(validator=instance_of(type(u"")))
    _timing = attrib(validator=provides(_interfaces.ITiming))
    m = MethodicalMachine()
    set_trace = getattr(m, "_setTrace", lambda self, f: None)

    def __attrs_post_init__(self):
        self._queue = []

    def wire(self, mailbox):
        self._M = _interfaces.IMailbox(mailbox)

    @m.state(initial=True)
    def S0_no_key(self):
        pass  # pragma: no cover

    @m.state(terminal=True)
    def S1_verified_key(self):
        pass  # pragma: no cover

    # from Receive
    @m.input()
    def got_verified_key(self, key):
        pass
    # from Boss
    @m.input()
    def send(self, phase, plaintext):
        pass

    @m.output()
    def queue(self, phase, plaintext):
        assert isinstance(phase, type("")), type(phase)
        assert isinstance(plaintext, type(b"")), type(plaintext)
        self._queue.append((phase, plaintext))

    @m.output()
    def record_key(self, key):
        self._key = key

    @m.output()
    def drain(self, key):
        del key
        for (phase, plaintext) in self._queue:
            self._encrypt_and_send(phase, plaintext)
        self._queue[:] = []

    @m.output()
    def deliver(self, phase, plaintext):
        assert isinstance(phase, type("")), type(phase)
        assert isinstance(plaintext, type(b"")), type(plaintext)
        self._encrypt_and_send(phase, plaintext)

    def _encrypt_and_send(self, phase, plaintext):
        assert self._key
        data_key = derive_phase_key(self._key, self._side, phase)
        encrypted = encrypt_data(data_key, plaintext)
        self._M.add_message(phase, encrypted)

    S0_no_key.upon(send, enter=S0_no_key, outputs=[queue])
    S0_no_key.upon(got_verified_key,
                   enter=S1_verified_key,
                   outputs=[record_key, drain])
    S1_verified_key.upon(send, enter=S1_verified_key, outputs=[deliver])