Ejemplo n.º 1
0
class BitcoinDESubscribe(object):
    """Offers subscription services to the bitcoin.de websocket-API.
Register Callback functions in case a market event occures.
Opens a https connection.
Mostly used as a proxy to the underlying subscription-aware factory."""
    def __init__(self, reactor):
        self.reactor = reactor

        print "BitcoinDESubscribeFactory - constructor"

        tlsctx = optionsForClientTLS(u'ws.bitcoin.de')  #,trustRoot=None)
        self.endpoint = endpoints.SSL4ClientEndpoint(self.reactor,
                                                     'ws.bitcoin.de', 443,
                                                     tlsctx)
        self.factory = BitcoinDESubscribeFactory()

        self.connService = ClientService(self.endpoint, self.factory)
        self.connService.startService()


# Proxies

    def SubscribeAdd(self, func):
        return self.factory.SubscribeAdd(func)

    def SubscribeRemove(self, func):
        return self.factory.SubscribeRemove(func)

    def SubscribeManagement(self, func):
        return self.factory.SubscribeManagement(func)

    def SubscribeUpdate(self, func):
        return self.factory.SubscribeUpdate(func)
Ejemplo n.º 2
0
 def connect(self):
     parts = chop(self.options['endpoint'], sep=':')
     if parts[0] == 'serial':
         endpoint = parts[1:]
         self.protocol = self.factory.buildProtocol(0)
         try:
             self.serport = SerialPort(self.protocol,
                                       endpoint[0],
                                       reactor,
                                       baudrate=endpoint[1])
         except Exception as e:
             self.log.error("{excp}", excp=e)
             yield self.stopService()
         else:
             self.gotProtocol(self.protocol)
             self.log.info("Using serial port {tty} at {baud} bps",
                           tty=endpoint[0],
                           baud=endpoint[1])
     else:
         ClientService.startService(self)
         try:
             protocol = yield self.whenConnected(failAfterFailures=1)
         except Exception as e:
             self.log.error("{excp}", excp=e)
             yield self.stopService()
         else:
             self.gotProtocol(protocol)
             self.log.info("Using TCP endpoint {endpoint}",
                           endpoint=self.options['endpoint'])
Ejemplo n.º 3
0
def connect():
    endpoint = TCP4ClientEndpoint(reactor, '127.0.0.1', 8750)
    factory = Factory()
    factory.protocol = amp.AMP
    service = ClientService(endpoint, factory, retryPolicy=backoffPolicy(0.5, 15.0))
    service.startService()
    return service.whenConnected()
Ejemplo n.º 4
0
class _ControllerProxy(ExportedState):
    def __init__(self, reactor, endpoint, elements, encoding, clock=None):
        self.__reactor = reactor
        self.__elements = elements
        self.__encoding = encoding
        
        self.__client_service = ClientService(
            endpoint=endpoint,
            factory=Factory.forProtocol(_ControllerProtocol),
            clock=clock)
        self.__client_service.startService()
    
    def state_def(self):
        for d in super(_ControllerProxy, self).state_def():
            yield d
        for element in self.__elements:
            for d in IElement(element)._cells(self.__send, self.__encoding):
                yield d
    
    def close(self):
        """implements IComponent"""
        self.__client_service.stopService()
    
    def attach_context(self, device_context):
        """implements IComponent"""
    
    @defer.inlineCallbacks
    def __send(self, cmd):
        protocol = yield self.__client_service.whenConnected(failAfterFailures=1)
        
        # returned deferred is not actually used but in the future it would be good if Commands did in fact have a 'done' signal
        defer.returnValue(protocol.send(cmd))
Ejemplo n.º 5
0
    def startService(self):
        """
		Start the MQTT service
		"""
        print("starting MQTT Client Subscriber Service")
        # invoke whenConnected() inherited method
        self.whenConnected().addCallback(self.connectToBroker)
        ClientService.startService(self)
Ejemplo n.º 6
0
 def startService(self):
     log.info("Starting MQTT publisher plugin")
     # subscribe to AQI updates
     aqi_monitor = self.parent.getServiceNamed(aqimon.AqiMonitor.name)
     aqi_monitor.add_listener(self)
     # invoke whenConnected() inherited method
     self.whenConnected().addCallback(self.connectToBroker)
     ClientService.startService(self)
Ejemplo n.º 7
0
 def connect(self, host, port, reconnect=True):
     """Initiate outgoing connection, either persistent or no"""
     print("Initiating connection to %s:%d" % (host, port))
     ep = TCP4ClientEndpoint(self._reactor, host, port)
     if reconnect:
         service = ClientService(ep, self, retryPolicy=lambda x: 1)
         service.startService()
     else:
         ep.connect(self)
Ejemplo n.º 8
0
def main():
    from twisted.internet import reactor, ssl
    # Simple 'TrackMarket' example displaying all events processed
    tlsctx = optionsForClientTLS(u'ws.bitcoin.de', None)
    endpoint = endpoints.SSL4ClientEndpoint(reactor, 'ws.bitcoin.de', 443,
                                            tlsctx)
    factory = BitcoinDEMarket()

    connService = ClientService(endpoint, factory)
    connService.startService()

    reactor.run()
Ejemplo n.º 9
0
    def SetupConnection(self, host, port):
        self.__log.debug("Setting up connection! %s %s " % (host, port))

        factory = Factory.forProtocol(NeoNode)
        endpoint = clientFromString(reactor,"tcp:host=%s:port=%s:timeout=5" % (host,port))

        connectingService = ClientService(
            endpoint,
            factory,
            retryPolicy=backoffPolicy(.5, factor=3.0)
        )
        connectingService.startService()
Ejemplo n.º 10
0
    def connect_samsung(self, p_device_obj):
        def cb_connectedNow(SamsungClient):
            LOG.debug('Connected Now')
            SamsungClient.send_command('1PWRQSTN')

        def eb_failed(fail_reason):
            LOG.warn(
                "initial Samsung connection failed: {}".format(fail_reason))
            l_ReconnectingService.stopService()

        l_reactor = self.m_pyhouse_obj._Twisted.Reactor
        try:
            # l_host = convert.long_to_str(p_device_obj.IPv4)
            l_host = 'samsung-tv'
            l_port = p_device_obj.Port
            l_endpoint_str = 'tcp:{}:port={}'.format(l_host, l_port)
            l_endpoint = clientFromString(l_reactor, l_endpoint_str)
            l_factory = Factory.forProtocol(SamsungProtocol)
            l_ReconnectingService = ClientService(l_endpoint, l_factory)
            l_ReconnectingService.setName('Samsung ')
            waitForConnection = l_ReconnectingService.whenConnected(
                failAfterFailures=1)
            LOG.debug('Endpoint: {}'.format(l_endpoint_str))
            LOG.debug('{}'.format(
                PrettyFormatAny.form(l_endpoint, 'Endpoint', 190)))
            LOG.debug('{}'.format(
                PrettyFormatAny.form(l_factory, 'Factory', 190)))
            LOG.debug('{}'.format(
                PrettyFormatAny.form(l_ReconnectingService, 'ReconnectService',
                                     190)))

            waitForConnection.addCallbacks(cb_connectedNow, eb_failed)
            l_ReconnectingService.startService()
            p_device_obj._Endpoint = l_endpoint
            p_device_obj._Factory = l_factory
            p_device_obj._isRunning = True
            LOG.info("Started Samsung - Host:{}; Port:{}".format(
                l_host, l_port))
        except Exception as e_err:
            LOG.error('Error found: {}'.format(e_err))
        pass
Ejemplo n.º 11
0
 def build_irc(self):
     """The main starting method that creates a protocol object
     according to the config variables, ready for whenever
     the reactor starts running.
     """
     wlog('building irc')
     if self.tx_irc_client:
         raise Exception('irc already built')
     if self.usessl.lower() == 'true':
         ctx = ClientContextFactory()
     if self.usessl.lower() == 'true' and not self.socks5.lower() == 'true':
         factory = TxIRCFactory(self)
         reactor.connectSSL(self.serverport[0], self.serverport[1], factory,
                            ctx)
     elif self.socks5.lower() == 'true':
         factory = TxIRCFactory(self)
         #str() casts needed else unicode error
         torEndpoint = TCP4ClientEndpoint(reactor, str(self.socks5_host),
                                          self.socks5_port)
         if self.usessl.lower() == 'true':
             use_tls = ctx
         else:
             use_tls = False
         ircEndpoint = TorSocksEndpoint(torEndpoint,
                                        self.serverport[0],
                                        self.serverport[1],
                                        tls=use_tls)
         myRS = ClientService(ircEndpoint, factory)
         myRS.startService()
     else:
         try:
             factory = TxIRCFactory(self)
             wlog('build_irc: ', self.serverport[0],
                  str(self.serverport[1]), self.channel)
             self.tcp_connector = reactor.connectTCP(
                 self.serverport[0], self.serverport[1], factory)
         except Exception as e:
             wlog('error in buildirc: ' + repr(e))
Ejemplo n.º 12
0
 def startService(self):
     self.whenConnected().addCallback(self.connectToBroker)
     ClientService.startService(self)
Ejemplo n.º 13
0
class TerseJSONToTCPLogObserver(object):
    """
    An IObserver that writes JSON logs to a TCP target.

    Args:
        hs (HomeServer): The homeserver that is being logged for.
        host: The host of the logging target.
        port: The logging target's port.
        metadata: Metadata to be added to each log entry.
    """

    hs = attr.ib()
    host = attr.ib(type=str)
    port = attr.ib(type=int)
    metadata = attr.ib(type=dict)
    maximum_buffer = attr.ib(type=int)
    _buffer = attr.ib(default=attr.Factory(deque), type=deque)
    _connection_waiter = attr.ib(default=None, type=Optional[Deferred])
    _logger = attr.ib(default=attr.Factory(Logger))
    _producer = attr.ib(default=None, type=Optional[LogProducer])

    def start(self) -> None:

        # Connect without DNS lookups if it's a direct IP.
        try:
            ip = ip_address(self.host)
            if isinstance(ip, IPv4Address):
                endpoint = TCP4ClientEndpoint(self.hs.get_reactor(), self.host,
                                              self.port)
            elif isinstance(ip, IPv6Address):
                endpoint = TCP6ClientEndpoint(self.hs.get_reactor(), self.host,
                                              self.port)
        except ValueError:
            endpoint = HostnameEndpoint(self.hs.get_reactor(), self.host,
                                        self.port)

        factory = Factory.forProtocol(Protocol)
        self._service = ClientService(endpoint,
                                      factory,
                                      clock=self.hs.get_reactor())
        self._service.startService()
        self._connect()

    def stop(self):
        self._service.stopService()

    def _connect(self) -> None:
        """
        Triggers an attempt to connect then write to the remote if not already writing.
        """
        if self._connection_waiter:
            return

        self._connection_waiter = self._service.whenConnected(
            failAfterFailures=1)

        @self._connection_waiter.addErrback
        def fail(r):
            r.printTraceback(file=sys.__stderr__)
            self._connection_waiter = None
            self._connect()

        @self._connection_waiter.addCallback
        def writer(r):
            # We have a connection. If we already have a producer, and its
            # transport is the same, just trigger a resumeProducing.
            if self._producer and r.transport is self._producer.transport:
                self._producer.resumeProducing()
                self._connection_waiter = None
                return

            # If the producer is still producing, stop it.
            if self._producer:
                self._producer.stopProducing()

            # Make a new producer and start it.
            self._producer = LogProducer(buffer=self._buffer,
                                         transport=r.transport)
            r.transport.registerProducer(self._producer, True)
            self._producer.resumeProducing()
            self._connection_waiter = None

    def _handle_pressure(self) -> None:
        """
        Handle backpressure by shedding events.

        The buffer will, in this order, until the buffer is below the maximum:
            - Shed DEBUG events
            - Shed INFO events
            - Shed the middle 50% of the events.
        """
        if len(self._buffer) <= self.maximum_buffer:
            return

        # Strip out DEBUGs
        self._buffer = deque(
            filter(lambda event: event["level"] != "DEBUG", self._buffer))

        if len(self._buffer) <= self.maximum_buffer:
            return

        # Strip out INFOs
        self._buffer = deque(
            filter(lambda event: event["level"] != "INFO", self._buffer))

        if len(self._buffer) <= self.maximum_buffer:
            return

        # Cut the middle entries out
        buffer_split = floor(self.maximum_buffer / 2)

        old_buffer = self._buffer
        self._buffer = deque()

        for i in range(buffer_split):
            self._buffer.append(old_buffer.popleft())

        end_buffer = []
        for i in range(buffer_split):
            end_buffer.append(old_buffer.pop())

        self._buffer.extend(reversed(end_buffer))

    def __call__(self, event: dict) -> None:
        flattened = flatten_event(event, self.metadata, include_time=True)
        self._buffer.append(flattened)

        # Handle backpressure, if it exists.
        try:
            self._handle_pressure()
        except Exception:
            # If handling backpressure fails,clear the buffer and log the
            # exception.
            self._buffer.clear()
            self._logger.failure("Failed clearing backpressure")

        # Try and write immediately.
        self._connect()
Ejemplo n.º 14
0
class MQTTService(object):
    """MQTT Service interface to Azure IoT hub.
    
    Attributes:
        client: (ClientService): Twisted client service
        connected (bool): Service connection flag
        devid (str): Device identifer
        username: (str): Azure IoT Hub MQTT username
        password: (str): Azure IoT Hub MQTT password
        messages (list): Received inbound messages
    """

    TIMEOUT = 10.0

    def __init__(self, endpoint, factory, devid, username, password):

        self.client = ClientService(endpoint, factory)
        self.connected = False
        self.devid = devid
        self.username = username
        self.password = password
        self.messages = []

    @inlineCallbacks
    def publishMessage(self, data):
        """Publish the MQTT message.
        
        Any inbound messages are copied to the messages list attribute,
        and returned to the caller.
        
        Args:
            data (str): Application data to send
            
        Returns:
            A list of received messages.
        """
        # Start the service, and add a timeout to check the connection.
        self.client.startService()
        reactor.callLater(self.TIMEOUT, self.checkConnection)

        # Attempt to connect. If we tiemout and cancel and exception
        # is thrown.
        try:
            yield self.client.whenConnected().addCallback(
                self.azureConnect, data)
        except Exception as e:
            log.error("Azure MQTT service failed to connect to broker.")

        # Stop the service if sucessful, and finally return
        # any inbound messages.
        else:
            yield self.client.stopService()
        finally:
            returnValue(self.messages)

    @inlineCallbacks
    def checkConnection(self):
        """Check if the connected flag is set.
        
        Stop the service if not.
        """
        if not self.connected:
            yield self.client.stopService()

    @inlineCallbacks
    def azureConnect(self, protocol, data):

        self.connected = True
        protocol.setWindowSize(1)
        protocol.onPublish = self.onPublish

        pubtopic = 'devices/{}/messages/events/'.format(self.devid)
        subtopic = 'devices/{}/messages/devicebound/#'.format(self.devid)

        try:
            # Connect and subscribe
            yield protocol.connect(self.devid,
                                   username=self.username,
                                   password=self.password,
                                   cleanStart=False,
                                   keepalive=10)
            yield protocol.subscribe(subtopic, 2)
        except Exception as e:
            log.error(
                "Azure MQTT service could not connect to "
                "Azure IOT Hub using username {name}",
                name=self.username)
            returnValue(None)

        # Publish the outbound message
        yield protocol.publish(topic=pubtopic, qos=0, message=str(data))

    def onPublish(self, topic, payload, qos, dup, retain, msgId):
        """Receive messages from Azure IoT Hub
        
        IoT Hub delivers messages with the Topic Name
        devices/{device_id}/messages/devicebound/ or
        devices/{device_id}/messages/devicebound/{property_bag}
        if there are any message properties. {property_bag} contains
        url-encoded key/value pairs of message properties.
        System property names have the prefix $, application properties
        use the original property name with no prefix.
        """
        message = ''

        # Split the component parameters of topic. Obtain the downstream message
        # using the key name message.
        params = parse_qs(topic)
        if 'message' in params:
            self.messages.append(params['message'])
Ejemplo n.º 15
0
 def startService(self, name):
     log.info("starting MQTT Content Publisher Service")
     # invoke whenConnected() inherited method
     self._system_name = name
     self.whenConnected().addCallback(self.connectToBroker)
     ClientService.startService(self)
    def makeReconnector(self, fireImmediately=True, startService=True,
                        protocolType=Protocol, **kw):
        """
        Create a L{ClientService} along with a L{ConnectInformation} indicating
        the connections in progress on its endpoint.

        @param fireImmediately: Should all of the endpoint connection attempts
            fire synchronously?
        @type fireImmediately: L{bool}

        @param startService: Should the L{ClientService} be started before
            being returned?
        @type startService: L{bool}

        @param protocolType: a 0-argument callable returning a new L{IProtocol}
            provider to be used for application-level protocol connections.

        @param kw: Arbitrary keyword arguments to be passed on to
            L{ClientService}

        @return: a 2-tuple of L{ConnectInformation} (for information about test
            state) and L{ClientService} (the system under test).  The
            L{ConnectInformation} has 2 additional attributes;
            C{applicationFactory} and C{applicationProtocols}, which refer to
            the unwrapped protocol factory and protocol instances passed in to
            L{ClientService} respectively.
        """
        nkw = {}
        nkw.update(clock=Clock())
        nkw.update(kw)
        cq, endpoint = endpointForTesting(fireImmediately=fireImmediately)

        # `endpointForTesting` is totally generic to any LLPI client that uses
        # endpoints, and maintains all its state internally; however,
        # applicationProtocols and applicationFactory are bonus attributes that
        # are only specifically interesitng to tests that use wrapper
        # protocols.  For now, set them here, externally.

        applicationProtocols = cq.applicationProtocols = []

        class RememberingFactory(Factory, object):
            protocol = protocolType
            def buildProtocol(self, addr):
                result = super(RememberingFactory, self).buildProtocol(addr)
                applicationProtocols.append(result)
                return result

        cq.applicationFactory = factory = RememberingFactory()

        service = ClientService(endpoint, factory, **nkw)
        def stop():
            service._protocol = None
            if service.running:
                service.stopService()
            # Ensure that we don't leave any state in the reactor after
            # stopService.
            self.assertEqual(service._clock.getDelayedCalls(), [])
        self.addCleanup(stop)
        if startService:
            service.startService()
        return cq, service
Ejemplo n.º 17
0
class RemoteHandler(logging.Handler):
    """
    An logging handler that writes logs to a TCP target.

    Args:
        host: The host of the logging target.
        port: The logging target's port.
        maximum_buffer: The maximum buffer size.
    """

    def __init__(
        self,
        host: str,
        port: int,
        maximum_buffer: int = 1000,
        level=logging.NOTSET,
        _reactor=None,
    ):
        super().__init__(level=level)
        self.host = host
        self.port = port
        self.maximum_buffer = maximum_buffer

        self._buffer: Deque[logging.LogRecord] = deque()
        self._connection_waiter: Optional[Deferred] = None
        self._producer: Optional[LogProducer] = None

        # Connect without DNS lookups if it's a direct IP.
        if _reactor is None:
            from twisted.internet import reactor

            _reactor = reactor

        try:
            ip = ip_address(self.host)
            if isinstance(ip, IPv4Address):
                endpoint: IStreamClientEndpoint = TCP4ClientEndpoint(
                    _reactor, self.host, self.port
                )
            elif isinstance(ip, IPv6Address):
                endpoint = TCP6ClientEndpoint(_reactor, self.host, self.port)
            else:
                raise ValueError("Unknown IP address provided: %s" % (self.host,))
        except ValueError:
            endpoint = HostnameEndpoint(_reactor, self.host, self.port)

        factory = Factory.forProtocol(Protocol)
        self._service = ClientService(endpoint, factory, clock=_reactor)
        self._service.startService()
        self._stopping = False
        self._connect()

    def close(self):
        self._stopping = True
        self._service.stopService()

    def _connect(self) -> None:
        """
        Triggers an attempt to connect then write to the remote if not already writing.
        """
        # Do not attempt to open multiple connections.
        if self._connection_waiter:
            return

        def fail(failure: Failure) -> None:
            # If the Deferred was cancelled (e.g. during shutdown) do not try to
            # reconnect (this will cause an infinite loop of errors).
            if failure.check(CancelledError) and self._stopping:
                return

            # For a different error, print the traceback and re-connect.
            failure.printTraceback(file=sys.__stderr__)
            self._connection_waiter = None
            self._connect()

        def writer(result: Protocol) -> None:
            # Force recognising transport as a Connection and not the more
            # generic ITransport.
            transport: Connection = result.transport  # type: ignore

            # We have a connection. If we already have a producer, and its
            # transport is the same, just trigger a resumeProducing.
            if self._producer and transport is self._producer.transport:
                self._producer.resumeProducing()
                self._connection_waiter = None
                return

            # If the producer is still producing, stop it.
            if self._producer:
                self._producer.stopProducing()

            # Make a new producer and start it.
            self._producer = LogProducer(
                buffer=self._buffer,
                transport=transport,
                format=self.format,
            )
            transport.registerProducer(self._producer, True)
            self._producer.resumeProducing()
            self._connection_waiter = None

        deferred: Deferred = self._service.whenConnected(failAfterFailures=1)
        deferred.addCallbacks(writer, fail)
        self._connection_waiter = deferred

    def _handle_pressure(self) -> None:
        """
        Handle backpressure by shedding records.

        The buffer will, in this order, until the buffer is below the maximum:
            - Shed DEBUG records.
            - Shed INFO records.
            - Shed the middle 50% of the records.
        """
        if len(self._buffer) <= self.maximum_buffer:
            return

        # Strip out DEBUGs
        self._buffer = deque(
            filter(lambda record: record.levelno > logging.DEBUG, self._buffer)
        )

        if len(self._buffer) <= self.maximum_buffer:
            return

        # Strip out INFOs
        self._buffer = deque(
            filter(lambda record: record.levelno > logging.INFO, self._buffer)
        )

        if len(self._buffer) <= self.maximum_buffer:
            return

        # Cut the middle entries out
        buffer_split = floor(self.maximum_buffer / 2)

        old_buffer = self._buffer
        self._buffer = deque()

        for _ in range(buffer_split):
            self._buffer.append(old_buffer.popleft())

        end_buffer = []
        for _ in range(buffer_split):
            end_buffer.append(old_buffer.pop())

        self._buffer.extend(reversed(end_buffer))

    def emit(self, record: logging.LogRecord) -> None:
        self._buffer.append(record)

        # Handle backpressure, if it exists.
        try:
            self._handle_pressure()
        except Exception:
            # If handling backpressure fails, clear the buffer and log the
            # exception.
            self._buffer.clear()
            logger.warning("Failed clearing backpressure")

        # Try and write immediately.
        self._connect()
Ejemplo n.º 18
0
 def startService(self):
     self.whenConnected().addCallback(self.connectToBroker)
     ClientService.startService(self)
Ejemplo n.º 19
0
class BaseNetwork:

    def __init__(self, ledger):
        self.config = ledger.config
        self.client = None
        self.service = None
        self.running = False

        self._on_connected_controller = StreamController()
        self.on_connected = self._on_connected_controller.stream

        self._on_header_controller = StreamController()
        self.on_header = self._on_header_controller.stream

        self._on_status_controller = StreamController()
        self.on_status = self._on_status_controller.stream

        self.subscription_controllers = {
            'blockchain.headers.subscribe': self._on_header_controller,
            'blockchain.address.subscribe': self._on_status_controller,
        }

    @defer.inlineCallbacks
    def start(self):
        for server in cycle(self.config['default_servers']):
            connection_string = 'tcp:{}:{}'.format(*server)
            endpoint = clientFromString(reactor, connection_string)
            log.debug("Attempting connection to SPV wallet server: %s", connection_string)
            self.service = ClientService(endpoint, StratumClientFactory(self))
            self.service.startService()
            try:
                self.client = yield self.service.whenConnected(failAfterFailures=2)
                yield self.ensure_server_version()
                log.info("Successfully connected to SPV wallet server: %s", connection_string)
                self._on_connected_controller.add(True)
                yield self.client.on_disconnected.first
            except CancelledError:
                return
            except Exception:  # pylint: disable=broad-except
                log.exception("Connecting to %s raised an exception:", connection_string)
            finally:
                self.client = None
            if not self.running:
                return

    def stop(self):
        self.running = False
        if self.service is not None:
            self.service.stopService()
        if self.is_connected:
            return self.client.on_disconnected.first
        else:
            return defer.succeed(True)

    @property
    def is_connected(self):
        return self.client is not None and self.client.connected

    def rpc(self, list_or_method, *args):
        if self.is_connected:
            return self.client.rpc(list_or_method, *args)
        else:
            raise ConnectionError("Attempting to send rpc request when connection is not available.")

    def ensure_server_version(self, required='1.2'):
        return self.rpc('server.version', __version__, required)

    def broadcast(self, raw_transaction):
        return self.rpc('blockchain.transaction.broadcast', raw_transaction)

    def get_history(self, address):
        return self.rpc('blockchain.address.get_history', address)

    def get_transaction(self, tx_hash):
        return self.rpc('blockchain.transaction.get', tx_hash)

    def get_merkle(self, tx_hash, height):
        return self.rpc('blockchain.transaction.get_merkle', tx_hash, height)

    def get_headers(self, height, count=10000):
        return self.rpc('blockchain.block.headers', height, count)

    def subscribe_headers(self):
        return self.rpc('blockchain.headers.subscribe', True)

    def subscribe_address(self, address):
        return self.rpc('blockchain.address.subscribe', address)
Ejemplo n.º 20
0
class TerseJSONToTCPLogObserver(object):
    """
    An IObserver that writes JSON logs to a TCP target.

    Args:
        hs (HomeServer): The homeserver that is being logged for.
        host: The host of the logging target.
        port: The logging target's port.
        metadata: Metadata to be added to each log entry.
    """

    hs = attr.ib()
    host = attr.ib(type=str)
    port = attr.ib(type=int)
    metadata = attr.ib(type=dict)
    maximum_buffer = attr.ib(type=int)
    _buffer = attr.ib(default=attr.Factory(deque), type=deque)
    _writer = attr.ib(default=None)
    _logger = attr.ib(default=attr.Factory(Logger))

    def start(self) -> None:

        # Connect without DNS lookups if it's a direct IP.
        try:
            ip = ip_address(self.host)
            if isinstance(ip, IPv4Address):
                endpoint = TCP4ClientEndpoint(self.hs.get_reactor(), self.host,
                                              self.port)
            elif isinstance(ip, IPv6Address):
                endpoint = TCP6ClientEndpoint(self.hs.get_reactor(), self.host,
                                              self.port)
        except ValueError:
            endpoint = HostnameEndpoint(self.hs.get_reactor(), self.host,
                                        self.port)

        factory = Factory.forProtocol(Protocol)
        self._service = ClientService(endpoint,
                                      factory,
                                      clock=self.hs.get_reactor())
        self._service.startService()

    def _write_loop(self) -> None:
        """
        Implement the write loop.
        """
        if self._writer:
            return

        self._writer = self._service.whenConnected()

        @self._writer.addBoth
        def writer(r):
            if isinstance(r, Failure):
                r.printTraceback(file=sys.__stderr__)
                self._writer = None
                self.hs.get_reactor().callLater(1, self._write_loop)
                return

            try:
                for event in self._buffer:
                    r.transport.write(
                        dumps(event, ensure_ascii=False,
                              separators=(",", ":")).encode("utf8"))
                    r.transport.write(b"\n")
                self._buffer.clear()
            except Exception as e:
                sys.__stderr__.write("Failed writing out logs with %s\n" %
                                     (str(e), ))

            self._writer = False
            self.hs.get_reactor().callLater(1, self._write_loop)

    def _handle_pressure(self) -> None:
        """
        Handle backpressure by shedding events.

        The buffer will, in this order, until the buffer is below the maximum:
            - Shed DEBUG events
            - Shed INFO events
            - Shed the middle 50% of the events.
        """
        if len(self._buffer) <= self.maximum_buffer:
            return

        # Strip out DEBUGs
        self._buffer = deque(
            filter(lambda event: event["level"] != "DEBUG", self._buffer))

        if len(self._buffer) <= self.maximum_buffer:
            return

        # Strip out INFOs
        self._buffer = deque(
            filter(lambda event: event["level"] != "INFO", self._buffer))

        if len(self._buffer) <= self.maximum_buffer:
            return

        # Cut the middle entries out
        buffer_split = floor(self.maximum_buffer / 2)

        old_buffer = self._buffer
        self._buffer = deque()

        for i in range(buffer_split):
            self._buffer.append(old_buffer.popleft())

        end_buffer = []
        for i in range(buffer_split):
            end_buffer.append(old_buffer.pop())

        self._buffer.extend(reversed(end_buffer))

    def __call__(self, event: dict) -> None:
        flattened = flatten_event(event, self.metadata, include_time=True)
        self._buffer.append(flattened)

        # Handle backpressure, if it exists.
        try:
            self._handle_pressure()
        except Exception:
            # If handling backpressure fails,clear the buffer and log the
            # exception.
            self._buffer.clear()
            self._logger.failure("Failed clearing backpressure")

        # Try and write immediately.
        self._write_loop()
Ejemplo n.º 21
0
 def startService(self):
     log.info("starting MQTT Client Subscriber Service")
     # invoke whenConnected() inherited method
     self.whenConnected().addCallback(self.connectToBroker)
     ClientService.startService(self)
Ejemplo n.º 22
0
class Session(ApplicationSession):

    # pylint: disable=too-many-arguments
    def __init__(self,
                 address,
                 mapping=None,
                 cert_manager=None,
                 use_ipv6=False,
                 crsb_user=None,
                 crsb_user_secret=None) -> None:
        self.address = address
        if mapping is None:
            mapping = {}
        self.mapping = mapping

        self.ready = Deferred()
        self.connected = False

        self._cert_manager = cert_manager

        self._client = None
        self._reconnect_service = None
        self._use_ipv6 = use_ipv6

        self.config = types.ComponentConfig(realm=address.realm)
        self.crsb_user = crsb_user
        self.crsb_user_secret = crsb_user_secret

        # pylint:disable=bad-super-call
        super(self.__class__, self).__init__(self.config)  # type: ignore

    def connect(self, auto_reconnect=True):
        def init(proto):
            reactor.addSystemEventTrigger('before', 'shutdown', cleanup, proto)
            return proto

        def cleanup(proto):
            session = getattr(proto, '_session', None)
            if session is None:
                return
            if session.is_attached():
                return session.leave()
            elif session.is_connected():
                return session.disconnect()

        from twisted.internet import reactor

        transport_factory = WampWebSocketClientFactory(self, str(self.address))
        transport_factory.setProtocolOptions(
            maxFramePayloadSize=1048576,
            maxMessagePayloadSize=1048576,
            autoFragmentSize=65536,
            failByDrop=False,
            openHandshakeTimeout=OPEN_HANDSHAKE_TIMEOUT,
            closeHandshakeTimeout=CLOSE_HANDSHAKE_TIMEOUT,
            tcpNoDelay=True,
            autoPingInterval=AUTO_PING_INTERVAL,
            autoPingTimeout=AUTO_PING_TIMEOUT,
            autoPingSize=4,
        )

        if self.address.ssl:
            if self._cert_manager:
                cert_data = self._cert_manager.read_certificate()
                authority = twisted_ssl.Certificate.loadPEM(cert_data)
            else:
                authority = None

            context_factory = optionsForClientTLS(X509_COMMON_NAME,
                                                  trustRoot=authority)
            self._client = SSL4ClientEndpoint(reactor, self.address.host,
                                              self.address.port,
                                              context_factory)
        else:
            if self._use_ipv6:
                endpoint_cls = TCP6ClientEndpoint
            else:
                endpoint_cls = TCP4ClientEndpoint

            self._client = endpoint_cls(reactor, self.address.host,
                                        self.address.port)

        if auto_reconnect:
            self._reconnect_service = ClientService(
                endpoint=self._client,
                factory=transport_factory,
                retryPolicy=backoffPolicy(factor=BACKOFF_POLICY_FACTOR))
            self._reconnect_service.startService()
            deferred = self._reconnect_service.whenConnected()
        else:
            deferred = self._client.connect(transport_factory)

        deferred.addCallback(init)
        deferred.addErrback(self.ready.errback)
        return self.ready

    def onConnect(self):
        if self.crsb_user and self.crsb_user_secret:
            logger.info(f"Client connected. Starting WAMP-Ticket "
                        f"authentication on realm {self.config.realm} "
                        f"as crsb_user {self.crsb_user}")
            self.join(self.config.realm, ["wampcra"], self.crsb_user.name)
        else:
            logger.info("Attempting to log in as anonymous")

    def onChallenge(self, challenge):
        if challenge.method == "wampcra":
            logger.info(f"WAMP-Ticket challenge received: {challenge}")
            signature = auth.compute_wcs(self.crsb_user_secret.encode('utf8'),
                                         challenge.extra['challenge'].encode('utf8'))  # noqa # pylint: disable=line-too-long
            return signature.decode('ascii')

        else:
            raise Exception("Invalid authmethod {}".format(challenge.method))

    @inlineCallbacks
    def onJoin(self, details):
        yield self.register_procedures(self.mapping)
        self.connected = True
        if not self.ready.called:
            self.ready.callback(details)

    def onLeave(self, details):
        self.connected = False
        if not self.ready.called:
            self.ready.errback(details or "Unknown error occurred")
        super(Session, self).onLeave(details)

    def onDisconnect(self):
        self.connected = False
        super(Session, self).onDisconnect()

    @inlineCallbacks
    def add_procedures(self, mapping):
        self.mapping.update(mapping)
        yield self.register_procedures(mapping)

    @inlineCallbacks
    def register_procedures(self, mapping):
        for uri, procedure in mapping.items():
            deferred = self.register(procedure, uri)
            deferred.addErrback(self._on_error)
            yield deferred

    def exposed_procedures(self):
        exposed: typing.Dict[str, str] = {}
        for registration in self._registrations.values():
            fn = registration.endpoint.fn
            qname = '.'.join((fn.__module__, fn.__qualname__))
            exposed[registration.procedure] = qname
        return exposed

    def is_open(self):
        return self.connected and self.is_attached() and not self.is_closing()

    def is_closing(self):
        return self._goodbye_sent or self._transport_is_closing

    @staticmethod
    def _on_error(err):
        logger.error("RPC: Session error: %r", err)
        return err
Ejemplo n.º 23
0
class ZenHubClient(object):
    """A client for connecting to ZenHub as a ZenHub Worker.

    After start is called, this class automatically handles connecting to
    ZenHub, registering the zenhubworker with ZenHub, and automatically
    reconnecting to ZenHub if the connection to ZenHub is corrupted for
    any reason.
    """
    def __init__(
        self,
        reactor,
        endpoint,
        credentials,
        worker,
        timeout,
        worklistId,
    ):
        """Initialize a ZenHubClient instance.

        :type reactor: IReactorCore
        :param endpoint: Where zenhub is found
        :type endpoint: IStreamClientEndpoint
        :param credentials: Credentials to log into ZenHub.
        :type credentials: IUsernamePassword
        :param worker: Reference to worker
        :type worker: IReferenceable
        :param float timeout: Seconds to wait before determining whether
            ZenHub is unresponsive.
        :param str worklistId: Name of the worklist to receive tasks from.
        """
        self.__reactor = reactor
        self.__endpoint = endpoint
        self.__credentials = credentials
        self.__worker = worker
        self.__timeout = timeout
        self.__worklistId = worklistId

        self.__stopping = False
        self.__pinger = None
        self.__service = None

        self.__log = getLogger(self)
        self.__signalFile = ConnectedToZenHubSignalFile()

    def start(self):
        """Start connecting to ZenHub."""
        self.__stopping = False
        factory = ZenPBClientFactory()
        self.__service = ClientService(
            self.__endpoint,
            factory,
            retryPolicy=backoffPolicy(initialDelay=0.5, factor=3.0),
        )
        self.__service.startService()
        self.__prepForConnection()

    def stop(self):
        """Stop connecting to ZenHub."""
        self.__stopping = True
        self.__reset()

    def restart(self):
        """Restart the connect to ZenHub."""
        self.__reset()
        self.start()

    def __reset(self):
        if self.__pinger:
            self.__pinger.stop()
            self.__pinger = None
        if self.__service:
            self.__service.stopService()
            self.__service = None
        self.__signalFile.remove()

    def __prepForConnection(self):
        if not self.__stopping:
            self.__log.info("Prepping for connection")
            self.__service.whenConnected().addCallbacks(
                self.__connected,
                self.__notConnected,
            )

    def __disconnected(self, *args):
        # Called when the connection to ZenHub is lost.
        # Ensures that processing resumes when the connection to ZenHub
        # is restored.
        self.__log.info(
            "Lost connection to ZenHub: %s",
            args[0] if args else "<no reason given>",
        )
        if self.__pinger:
            self.__pinger.stop()
            self.__pinger = None
        self.__signalFile.remove()
        self.__prepForConnection()

    def __notConnected(self, *args):
        self.__log.info("Not connected! %r", args)

    @defer.inlineCallbacks
    def __connected(self, broker):
        # Called when a connection to ZenHub is established.
        # Logs into ZenHub and passes up a worker reference for ZenHub
        # to use to dispatch method calls.

        # Sometimes broker.transport doesn't have a 'socket' attribute
        if not hasattr(broker.transport, "socket"):
            self.restart()
            defer.returnValue(None)

        self.__log.info("Connection to ZenHub established")
        try:
            setKeepAlive(broker.transport.socket)

            zenhub = yield self.__login(broker)
            yield zenhub.callRemote(
                "reportingForWork",
                self.__worker,
                workerId=self.__worker.instanceId,
                worklistId=self.__worklistId,
            )

            ping = PingZenHub(zenhub, self)
            self.__pinger = task.LoopingCall(ping)
            d = self.__pinger.start(self.__timeout, now=False)
            d.addErrback(self.__pingFail)  # Catch and pass on errors
        except defer.CancelledError:
            self.__log.error("Timed out trying to login to ZenHub")
            self.restart()
            defer.returnValue(None)
        except Exception as ex:
            self.__log.error(
                "Unable to report for work: (%s) %s",
                type(ex).__name__,
                ex,
            )
            self.__signalFile.remove()
            self.__reactor.stop()
        else:
            self.__log.info("Logged into ZenHub")
            self.__signalFile.touch()

            # Connection complete; install a listener to be notified if
            # the connection is lost.
            broker.notifyOnDisconnect(self.__disconnected)

    def __login(self, broker):
        d = broker.factory.login(self.__credentials, self.__worker)
        timeoutCall = self.__reactor.callLater(self.__timeout, d.cancel)

        def completedLogin(arg):
            if timeoutCall.active():
                timeoutCall.cancel()
            return arg

        d.addBoth(completedLogin)
        return d

    def __pingFail(self, ex):
        self.__log.error("Pinger failed: %s", ex)
Ejemplo n.º 24
0
class NotificationSourceIntegrationTest(IntegrationTest):

    @inlineCallbacks
    def setUp(self):
        super(NotificationSourceIntegrationTest, self).setUp()
        self.endpoint = AMQEndpoint(
            reactor, self.rabbit.config.hostname, self.rabbit.config.port,
            username="******", password="******", heartbeat=1)
        self.policy = backoffPolicy(initialDelay=0)
        self.factory = AMQFactory(spec=AMQP0_8_SPEC_PATH)
        self.service = ClientService(
            self.endpoint, self.factory, retryPolicy=self.policy)
        self.connector = NotificationConnector(self.service)
        self.source = NotificationSource(self.connector)

        self.client = yield self.endpoint.connect(self.factory)
        self.channel = yield self.client.channel(1)
        yield self.channel.channel_open()
        yield self.channel.queue_declare(queue="uuid")

        self.service.startService()

    @inlineCallbacks
    def tearDown(self):
        self.service.stopService()
        super(NotificationSourceIntegrationTest, self).tearDown()
        # Wrap resetting queues and client in a try/except, since the broker
        # may have been stopped (e.g. when this is the last test being run).
        try:
            yield self.channel.queue_delete(queue="uuid")
        except:
            pass
        finally:
            yield self.client.close()

    @inlineCallbacks
    def test_get_after_publish(self):
        """
        Calling get() after a message has been published in the associated
        queue returns a Notification for that message.
        """
        yield self.channel.basic_publish(
            routing_key="uuid", content=Content("hello"))
        notification = yield self.source.get("uuid", 0)
        self.assertEqual("hello", notification.payload)

    @inlineCallbacks
    def test_get_before_publish(self):
        """
        Calling get() before a message has been published in the associated
        queue will wait until publication.
        """
        deferred = self.source.get("uuid", 0)
        self.assertFalse(deferred.called)
        yield self.channel.basic_publish(
            routing_key="uuid", content=Content("hello"))
        notification = yield deferred
        self.assertEqual("hello", notification.payload)

    @inlineCallbacks
    def test_get_with_error(self):
        """
        If an error occurs in during get(), the client is closed so
        we can query messages again.
        """
        yield self.channel.basic_publish(
            routing_key="uuid", content=Content("hello"))
        with self.assertRaises(NotFound):
            yield self.source.get("uuid-unknown", 0)
        notification = yield self.source.get("uuid", 0)
        self.assertEqual("hello", notification.payload)

    @inlineCallbacks
    def test_get_concurrent_with_error(self):
        """
        If an error occurs in a call to get(), other calls don't
        fail, and are retried on reconnection instead.
        """
        client1 = yield self.service.whenConnected()
        deferred = self.source.get("uuid", 0)

        with self.assertRaises(NotFound):
            yield self.source.get("uuid-unknown", 0)

        yield self.channel.basic_publish(
            routing_key="uuid", content=Content("hello"))

        notification = yield deferred
        self.assertEqual("hello", notification.payload)
        client2 = yield self.service.whenConnected()
        # The ClientService has reconnected, yielding a new client.
        self.assertIsNot(client1, client2)

    @inlineCallbacks
    def test_get_timeout(self):
        """
        Calls to get() timeout after a certain amount of time if no message
        arrived on the queue.
        """
        self.source.timeout = 1
        with self.assertRaises(Timeout):
            yield self.source.get("uuid", 0)
        client = yield self.service.whenConnected()
        channel = yield client.channel(1)
        # The channel is still opened
        self.assertFalse(channel.closed)
        # The consumer has been deleted
        self.assertNotIn("uuid.0", client.queues)

    @inlineCallbacks
    def test_get_with_broker_shutdown_during_consume(self):
        """
        If rabbitmq gets shutdown during the basic-consume call, we wait
        for the reconection and retry transparently.
        """
        # This will make the connector setup the channel before we call
        # get(), so by the time we call it in the next line all
        # connector-related deferreds will fire synchronously and the
        # code will block on basic-consume.
        yield self.connector()

        d = self.source.get("uuid", 0)

        # Restart rabbitmq
        yield self.client.close()
        yield self.client.disconnected.wait()
        self.rabbit.cleanUp()
        self.rabbit.config = RabbitServerResources(
            port=self.rabbit.config.port)  # Ensure that we use the same port
        self.rabbit.setUp()

        # Get a new channel and re-declare the queue, since the restart has
        # destroyed it.
        self.client = yield self.endpoint.connect(self.factory)
        self.channel = yield self.client.channel(1)
        yield self.channel.channel_open()
        yield self.channel.queue_declare(queue="uuid")

        # Publish a message in the queue
        yield self.channel.basic_publish(
            routing_key="uuid", content=Content("hello"))

        notification = yield d
        self.assertEqual("hello", notification.payload)

    @inlineCallbacks
    def test_get_with_broker_die_during_consume(self):
        """
        If rabbitmq dies during the basic-consume call, we wait for the
        reconection and retry transparently.
        """
        # This will make the connector setup the channel before we call
        # get(), so by the time we call it in the next line all
        # connector-related deferreds will fire synchronously and the
        # code will block on basic-consume.
        yield self.connector()

        d = self.source.get("uuid", 0)

        # Kill rabbitmq and start it again
        yield self.client.close()
        yield self.client.disconnected.wait()
        self.rabbit.runner.kill()
        self.rabbit.cleanUp()
        self.rabbit.config = RabbitServerResources(
            port=self.rabbit.config.port)  # Ensure that we use the same port
        self.rabbit.setUp()

        # Get a new channel and re-declare the queue, since the crash has
        # destroyed it.
        self.client = yield self.endpoint.connect(self.factory)
        self.channel = yield self.client.channel(1)
        yield self.channel.channel_open()
        yield self.channel.queue_declare(queue="uuid")

        # Publish a message in the queue
        yield self.channel.basic_publish(
            routing_key="uuid", content=Content("hello"))

        notification = yield d
        self.assertEqual("hello", notification.payload)

    @inlineCallbacks
    def test_wb_get_with_broker_shutdown_during_message_wait(self):
        """
        If rabbitmq gets shutdown while we wait for messages, we transparently
        wait for the reconnection and try again.
        """
        # This will make the connector setup the channel before we call
        # get(), so by the time we call it in the next line all
        # connector-related deferreds will fire synchronously and the
        # code will block on basic-consume.
        yield self.connector()

        d = self.source.get("uuid", 0)

        # Acquiring the channel lock makes sure that basic-consume has
        # succeeded and we started waiting for the message.
        yield self.source._channel_lock.acquire()
        self.source._channel_lock.release()

        # Restart rabbitmq
        yield self.client.close()
        yield self.client.disconnected.wait()
        self.rabbit.cleanUp()
        self.rabbit.config = RabbitServerResources(
            port=self.rabbit.config.port)  # Ensure that we use the same port
        self.rabbit.setUp()

        # Get a new channel and re-declare the queue, since the restart has
        # destroyed it.
        self.client = yield self.endpoint.connect(self.factory)
        self.channel = yield self.client.channel(1)
        yield self.channel.channel_open()
        yield self.channel.queue_declare(queue="uuid")

        # Publish a message in the queue
        yield self.channel.basic_publish(
            routing_key="uuid", content=Content("hello"))

        notification = yield d
        self.assertEqual("hello", notification.payload)

    @inlineCallbacks
    def test_wb_heartbeat(self):
        """
        If heartbeat checks fail due to network issues, we keep re-trying
        until the network recovers.
        """
        self.service.stopService()

        # Put a TCP proxy between NotificationSource and RabbitMQ, to simulate
        # packets getting dropped on the floor.
        proxy = ProxyService(
            self.rabbit.config.hostname, self.rabbit.config.port)
        proxy.startService()
        self.addCleanup(proxy.stopService)
        self.endpoint._port = proxy.port
        self.service = ClientService(
            self.endpoint, self.factory, retryPolicy=self.policy)
        self.connector._service = self.service
        self.service.startService()

        # This will make the connector setup the channel before we call
        # get(), so by the time we call it in the next line all
        # connector-related deferreds will fire synchronously and the
        # code will block on basic-consume.
        channel = yield self.connector()

        deferred = self.source.get("uuid", 0)

        # Start dropping packets on the floor
        proxy.block()

        # Publish a notification, which won't be delivered just yet.
        yield self.channel.basic_publish(
            routing_key="uuid", content=Content("hello"))

        # Wait for the first connection to terminate, because heartbeat
        # checks will fail.
        yield channel.client.disconnected.wait()

        # Now let packets flow again.
        proxy.unblock()

        # The situation got recovered.
        notification = yield deferred
        self.assertEqual("hello", notification.payload)
        self.assertEqual(2, proxy.connections)

    @inlineCallbacks
    def test_reject_notification(self):
        """
        Calling reject() on a Notification puts the associated message back in
        the queue so that it's available to subsequent get() calls.
        """
        yield self.channel.basic_publish(
            routing_key="uuid", content=Content("hello"))
        notification = yield self.source.get("uuid", 0)
        yield notification.reject()

        notification = yield self.source.get("uuid", 1)
        self.assertEqual("hello", notification.payload)

    @inlineCallbacks
    def test_ack_message(self):
        """
        Calling ack() on a Notification confirms the removal of the
        associated message from the queue, making subsequent calls
        waiting for another message.
        """
        yield self.channel.basic_publish(
            routing_key="uuid", content=Content("hello"))
        notification = yield self.source.get("uuid", 0)
        yield notification.ack()

        yield self.channel.basic_publish(
            routing_key="uuid", content=Content("hello 2"))
        notification = yield self.source.get("uuid", 1)
        self.assertEqual("hello 2", notification.payload)

    @inlineCallbacks
    def test_ack_with_broker_shutdown(self):
        """
        If rabbitmq gets shutdown before we ack a Notification, an error is
        raised.
        """
        client = yield self.service.whenConnected()

        yield self.channel.basic_publish(
            routing_key="uuid", content=Content("hello"))
        notification = yield self.source.get("uuid", 0)

        self.rabbit.cleanUp()

        yield client.disconnected.wait()

        try:
            yield notification.ack()
        except Bounced:
            pass
        else:
            self.fail("Notification not bounced")

        self.rabbit.config = RabbitServerResources(
            port=self.rabbit.config.port)  # Ensure that we use the same port
        self.rabbit.setUp()
Ejemplo n.º 25
0
class ApplicationRunner(object):
    """
    This class is a convenience tool mainly for development and quick hosting
    of WAMP application components.

    It can host a WAMP application component in a WAMP-over-WebSocket client
    connecting to a WAMP router.
    """

    log = txaio.make_logger()

    def __init__(self,
                 url: str,
                 realm: Optional[str] = None,
                 extra: Optional[Dict[str, Any]] = None,
                 serializers: Optional[List[ISerializer]] = None,
                 ssl: Optional[CertificateOptions] = None,
                 proxy: Optional[Dict[str, Any]] = None,
                 headers: Optional[Dict[str, Any]] = None,
                 websocket_options: Optional[Dict[str, Any]] = None,
                 max_retries: Optional[int] = None,
                 initial_retry_delay: Optional[float] = None,
                 max_retry_delay: Optional[float] = None,
                 retry_delay_growth: Optional[float] = None,
                 retry_delay_jitter: Optional[float] = None):
        """

        :param url: The WebSocket URL of the WAMP router to connect to (e.g. `ws://example.com:8080/mypath`)
        :param realm: The WAMP realm to join the application session to.
        :param extra: Optional extra configuration to forward to the application component.
        :param serializers: A list of WAMP serializers to use (or None for default serializers).
           Serializers must implement :class:`autobahn.wamp.interfaces.ISerializer`.
        :type serializers: list
        :param ssl: (Optional). If specified this should be an
            instance suitable to pass as ``sslContextFactory`` to
            :class:`twisted.internet.endpoints.SSL4ClientEndpoint`` such
            as :class:`twisted.internet.ssl.CertificateOptions`. Leaving
            it as ``None`` will use the result of calling Twisted
            :meth:`twisted.internet.ssl.platformTrust` which tries to use
            your distribution's CA certificates.
        :param proxy: Explicit proxy server to use; a dict with ``host`` and ``port`` keys.
        :param headers: Additional headers to send (only applies to WAMP-over-WebSocket).
        :param websocket_options: Specific WebSocket options to set (only applies to WAMP-over-WebSocket).
            If not provided, conservative and practical default are chosen.
        :param max_retries: Maximum number of reconnection attempts. Unlimited if set to -1.
        :param initial_retry_delay: Initial delay for reconnection attempt in seconds (Default: 1.0s).
        :param max_retry_delay: Maximum delay for reconnection attempts in seconds (Default: 60s).
        :param retry_delay_growth: The growth factor applied to the retry delay between reconnection
            attempts (Default 1.5).
        :param retry_delay_jitter: A 0-argument callable that introduces noise into the
            delay (Default ``random.random``).
        """
        # IMPORTANT: keep this, as it is tested in
        # autobahn.twisted.test.test_tx_application_runner.TestApplicationRunner.test_runner_bad_proxy
        assert (proxy is None or type(proxy) == dict)

        self.url = url
        self.realm = realm
        self.extra = extra or dict()
        self.serializers = serializers
        self.ssl = ssl
        self.proxy = proxy
        self.headers = headers
        self.websocket_options = websocket_options
        self.max_retries = max_retries
        self.initial_retry_delay = initial_retry_delay
        self.max_retry_delay = max_retry_delay
        self.retry_delay_growth = retry_delay_growth
        self.retry_delay_jitter = retry_delay_jitter

        # this if for auto-reconnection when Twisted ClientService is avail
        self._client_service = None
        # total number of successful connections
        self._connect_successes = 0

    @public
    def stop(self):
        """
        Stop reconnecting, if auto-reconnecting was enabled.
        """
        self.log.debug('{klass}.stop()', klass=self.__class__.__name__)

        if self._client_service:
            return self._client_service.stopService()
        else:
            return succeed(None)

    @public
    def run(self,
            make,
            start_reactor: bool = True,
            auto_reconnect: bool = False,
            log_level: str = 'info',
            endpoint: Optional[IStreamClientEndpoint] = None,
            reactor: Optional[IReactorCore] = None
            ) -> Union[type(None), Deferred]:
        """
        Run the application component.

        :param make: A factory that produces instances of :class:`autobahn.twisted.wamp.ApplicationSession`
           when called with an instance of :class:`autobahn.wamp.types.ComponentConfig`.
        :param start_reactor: When ``True`` (the default) this method starts
           the Twisted reactor and doesn't return until the reactor
           stops. If there are any problems starting the reactor or
           connect()-ing, we stop the reactor and raise the exception
           back to the caller.
        :param auto_reconnect:
        :param log_level:
        :param endpoint:
        :param reactor:
        :return: None is returned, unless you specify
            ``start_reactor=False`` in which case the Deferred that
            connect() returns is returned; this will callback() with
            an IProtocol instance, which will actually be an instance
            of :class:`WampWebSocketClientProtocol`
        """
        self.log.debug('{klass}.run()', klass=self.__class__.__name__)

        if start_reactor:
            # only select framework, set loop and start logging when we are asked
            # start the reactor - otherwise we are running in a program that likely
            # already tool care of all this.
            from twisted.internet import reactor
            txaio.use_twisted()
            txaio.config.loop = reactor
            txaio.start_logging(level=log_level)

        if callable(make):
            # factory for use ApplicationSession
            def create():
                cfg = ComponentConfig(self.realm, self.extra, runner=self)
                try:
                    session = make(cfg)
                except Exception:
                    self.log.failure(
                        'ApplicationSession could not be instantiated: {log_failure.value}'
                    )
                    if start_reactor and reactor.running:
                        reactor.stop()
                    raise
                else:
                    return session
        else:
            create = make

        if self.url.startswith('rs'):
            # try to parse RawSocket URL
            isSecure, host, port = parse_rs_url(self.url)

            # use the first configured serializer if any (which means, auto-choose "best")
            serializer = self.serializers[0] if self.serializers else None

            # create a WAMP-over-RawSocket transport client factory
            transport_factory = WampRawSocketClientFactory(
                create, serializer=serializer)

        else:
            # try to parse WebSocket URL
            isSecure, host, port, resource, path, params = parse_ws_url(
                self.url)

            # create a WAMP-over-WebSocket transport client factory
            transport_factory = WampWebSocketClientFactory(
                create,
                url=self.url,
                serializers=self.serializers,
                proxy=self.proxy,
                headers=self.headers)

            # client WebSocket settings - similar to:
            # - http://crossbar.io/docs/WebSocket-Compression/#production-settings
            # - http://crossbar.io/docs/WebSocket-Options/#production-settings

            # The permessage-deflate extensions offered to the server
            offers = [PerMessageDeflateOffer()]

            # Function to accept permessage-deflate responses from the server
            def accept(response):
                if isinstance(response, PerMessageDeflateResponse):
                    return PerMessageDeflateResponseAccept(response)

            # default WebSocket options for all client connections
            protocol_options = {
                'version': WebSocketProtocol.DEFAULT_SPEC_VERSION,
                'utf8validateIncoming': True,
                'acceptMaskedServerFrames': False,
                'maskClientFrames': True,
                'applyMask': True,
                'maxFramePayloadSize': 1048576,
                'maxMessagePayloadSize': 1048576,
                'autoFragmentSize': 65536,
                'failByDrop': True,
                'echoCloseCodeReason': False,
                'serverConnectionDropTimeout': 1.,
                'openHandshakeTimeout': 2.5,
                'closeHandshakeTimeout': 1.,
                'tcpNoDelay': True,
                'perMessageCompressionOffers': offers,
                'perMessageCompressionAccept': accept,
                'autoPingInterval': 10.,
                'autoPingTimeout': 5.,
                'autoPingSize': 12,

                # see: https://github.com/crossbario/autobahn-python/issues/1327 and
                # _cancelAutoPingTimeoutCall
                'autoPingRestartOnAnyTraffic': True,
            }

            # let user override above default options
            if self.websocket_options:
                protocol_options.update(self.websocket_options)

            # set websocket protocol options on Autobahn/Twisted protocol factory, from where it will
            # be applied for every Autobahn/Twisted protocol instance from the factory
            transport_factory.setProtocolOptions(**protocol_options)

        # supress pointless log noise
        transport_factory.noisy = False

        if endpoint:
            client = endpoint
        else:
            # if user passed ssl= but isn't using isSecure, we'll never
            # use the ssl argument which makes no sense.
            context_factory = None
            if self.ssl is not None:
                if not isSecure:
                    raise RuntimeError(
                        'ssl= argument value passed to %s conflicts with the "ws:" '
                        'prefix of the url argument. Did you mean to use "wss:"?'
                        % self.__class__.__name__)
                context_factory = self.ssl
            elif isSecure:
                from twisted.internet.ssl import optionsForClientTLS
                context_factory = optionsForClientTLS(host)

            from twisted.internet import reactor
            if self.proxy is not None:
                from twisted.internet.endpoints import TCP4ClientEndpoint
                client = TCP4ClientEndpoint(reactor, self.proxy['host'],
                                            self.proxy['port'])
                transport_factory.contextFactory = context_factory
            elif isSecure:
                from twisted.internet.endpoints import SSL4ClientEndpoint
                assert context_factory is not None
                client = SSL4ClientEndpoint(reactor, host, port,
                                            context_factory)
            else:
                from twisted.internet.endpoints import TCP4ClientEndpoint
                client = TCP4ClientEndpoint(reactor, host, port)

        # as the reactor shuts down, we wish to wait until we've sent
        # out our "Goodbye" message; leave() returns a Deferred that
        # fires when the transport gets to STATE_CLOSED
        def cleanup(proto):
            if hasattr(proto, '_session') and proto._session is not None:
                if proto._session.is_attached():
                    return proto._session.leave()
                elif proto._session.is_connected():
                    return proto._session.disconnect()

        # when our proto was created and connected, make sure it's cleaned
        # up properly later on when the reactor shuts down for whatever reason
        def init_proto(proto):
            self._connect_successes += 1
            reactor.addSystemEventTrigger('before', 'shutdown', cleanup, proto)
            return proto

        use_service = False
        if auto_reconnect:
            try:
                # since Twisted 16.1.0
                from twisted.application.internet import ClientService
                from twisted.application.internet import backoffPolicy
                use_service = True
            except ImportError:
                use_service = False

        if use_service:
            # this code path is automatically reconnecting ..
            self.log.debug('using t.a.i.ClientService')

            if (self.max_retries is not None
                    or self.initial_retry_delay is not None
                    or self.max_retry_delay is not None
                    or self.retry_delay_growth is not None
                    or self.retry_delay_jitter is not None):

                if self.max_retry_delay > 0:
                    kwargs = {}

                    def _jitter():
                        j = 1 if self.retry_delay_jitter is None else self.retry_delay_jitter
                        return random.random() * j

                    for key, val in [('initialDelay',
                                      self.initial_retry_delay),
                                     ('maxDelay', self.max_retry_delay),
                                     ('factor', self.retry_delay_growth),
                                     ('jitter', _jitter)]:
                        if val is not None:
                            kwargs[key] = val

                    # retry policy that will only try to reconnect if we connected
                    # successfully at least once before (so it fails on host unreachable etc ..)
                    def retry(failed_attempts):
                        if self._connect_successes > 0 and (
                                self.max_retries == -1
                                or failed_attempts < self.max_retries):
                            return backoffPolicy(**kwargs)(failed_attempts)
                        else:
                            print('hit stop')
                            self.stop()
                            return 100000000000000
                else:
                    # immediately reconnect (zero delay)
                    def retry(_):
                        return 0
            else:
                retry = backoffPolicy()

            # https://twistedmatrix.com/documents/current/api/twisted.application.internet.ClientService.html
            self._client_service = ClientService(client,
                                                 transport_factory,
                                                 retryPolicy=retry)
            self._client_service.startService()

            d = self._client_service.whenConnected()

        else:
            # this code path is only connecting once!
            self.log.debug('using t.i.e.connect()')

            d = client.connect(transport_factory)

        # if we connect successfully, the arg is a WampWebSocketClientProtocol
        d.addCallback(init_proto)

        # if the user didn't ask us to start the reactor, then they
        # get to deal with any connect errors themselves.
        if start_reactor:
            # if an error happens in the connect(), we save the underlying
            # exception so that after the event-loop exits we can re-raise
            # it to the caller.

            class ErrorCollector(object):
                exception = None

                def __call__(self, failure):
                    self.exception = failure.value
                    reactor.stop()

            connect_error = ErrorCollector()
            d.addErrback(connect_error)

            # now enter the Twisted reactor loop
            reactor.run()

            # if the ApplicationSession sets an "error" key on the self.config.extra dictionary, which
            # has been set to the self.extra dictionary, extract the Exception from that and re-raise
            # it as the very last one (see below) exciting back to the caller of self.run()
            app_error = self.extra.get('error', None)

            # if we exited due to a connection error, raise that to the caller
            if connect_error.exception:
                raise connect_error.exception
            elif app_error:
                raise app_error

        else:
            # let the caller handle any errors
            return d
Ejemplo n.º 26
0
 def startService(self):
     log.info("starting MQTT Client Subscriber Service")
     # invoke whenConnected() inherited method
     self.whenConnected().addCallback(self.connectToBroker)
     ClientService.startService(self)
Ejemplo n.º 27
0
        # this is to clean up stuff. it is not our business to
        # possibly reconnect the underlying connection
        self._countdown -= 1
        if self._countdown <= 0:
            try:
                reactor.stop()
            except ReactorNotRunning:
                pass


if __name__ == '__main__':
    txaio.start_logging(level='info')

    # create a WAMP session object. this is reused across multiple
    # reconnects (if automatically reconnected)
    session = MyAppSession(ComponentConfig('realm1', {}))

    # create a WAMP transport factory
    transport = WampWebSocketClientFactory(session,
                                           url='ws://localhost:8080/ws')

    # create a connecting endpoint
    endpoint = TCP4ClientEndpoint(reactor, 'localhost', 8080)

    # create and start an automatically reconnecting client
    service = ClientService(endpoint, transport)
    service.startService()

    # enter the event loop
    reactor.run()
Ejemplo n.º 28
0
class ApplicationRunner(object):
    """
    This class is a convenience tool mainly for development and quick hosting
    of WAMP application components.

    It can host a WAMP application component in a WAMP-over-WebSocket client
    connecting to a WAMP router.
    """

    log = txaio.make_logger()

    def __init__(self,
                 url,
                 realm=None,
                 extra=None,
                 serializers=None,
                 ssl=None,
                 proxy=None,
                 headers=None,
                 max_retries=None,
                 initial_retry_delay=None,
                 max_retry_delay=None,
                 retry_delay_growth=None,
                 retry_delay_jitter=None):
        """

        :param url: The WebSocket URL of the WAMP router to connect to (e.g. `ws://somehost.com:8090/somepath`)
        :type url: str

        :param realm: The WAMP realm to join the application session to.
        :type realm: str

        :param extra: Optional extra configuration to forward to the application component.
        :type extra: dict

        :param serializers: A list of WAMP serializers to use (or None for default serializers).
           Serializers must implement :class:`autobahn.wamp.interfaces.ISerializer`.
        :type serializers: list

        :param ssl: (Optional). If specified this should be an
            instance suitable to pass as ``sslContextFactory`` to
            :class:`twisted.internet.endpoints.SSL4ClientEndpoint`` such
            as :class:`twisted.internet.ssl.CertificateOptions`. Leaving
            it as ``None`` will use the result of calling Twisted's
            :meth:`twisted.internet.ssl.platformTrust` which tries to use
            your distribution's CA certificates.
        :type ssl: :class:`twisted.internet.ssl.CertificateOptions`

        :param proxy: Explicit proxy server to use; a dict with ``host`` and ``port`` keys
        :type proxy: dict or None

        :param headers: Additional headers to send (only applies to WAMP-over-WebSocket).
        :type headers: dict

        :param max_retries: Maximum number of reconnection attempts. Unlimited if set to -1.
        :type max_retries: int

        :param initial_retry_delay: Initial delay for reconnection attempt in seconds (Default: 1.0s).
        :type initial_retry_delay: float

        :param max_retry_delay: Maximum delay for reconnection attempts in seconds (Default: 60s).
        :type max_retry_delay: float

        :param retry_delay_growth: The growth factor applied to the retry delay between reconnection attempts (Default 1.5).
        :type retry_delay_growth: float

        :param retry_delay_jitter: A 0-argument callable that introduces nose into the delay. (Default random.random)
        :type retry_delay_jitter: float
        """
        assert(type(url) == six.text_type)
        assert(realm is None or type(realm) == six.text_type)
        assert(extra is None or type(extra) == dict)
        assert(headers is None or type(headers) == dict)
        assert(proxy is None or type(proxy) == dict)
        self.url = url
        self.realm = realm
        self.extra = extra or dict()
        self.serializers = serializers
        self.ssl = ssl
        self.proxy = proxy
        self.headers = headers
        self.max_retries = max_retries
        self.initial_retry_delay = initial_retry_delay
        self.max_retry_delay = max_retry_delay
        self.retry_delay_growth = retry_delay_growth
        self.retry_delay_jitter = retry_delay_jitter

        # this if for auto-reconnection when Twisted ClientService is avail
        self._client_service = None
        # total number of successful connections
        self._connect_successes = 0

    @public
    def stop(self):
        """
        Stop reconnecting, if auto-reconnecting was enabled.
        """
        self.log.debug('{klass}.stop()', klass=self.__class__.__name__)

        if self._client_service:
            return self._client_service.stopService()
        else:
            return succeed(None)

    @public
    def run(self, make, start_reactor=True, auto_reconnect=False, log_level='info', endpoint=None, reactor=None):
        """
        Run the application component.

        :param make: A factory that produces instances of :class:`autobahn.twisted.wamp.ApplicationSession`
           when called with an instance of :class:`autobahn.wamp.types.ComponentConfig`.
        :type make: callable

        :param start_reactor: When ``True`` (the default) this method starts
           the Twisted reactor and doesn't return until the reactor
           stops. If there are any problems starting the reactor or
           connect()-ing, we stop the reactor and raise the exception
           back to the caller.

        :returns: None is returned, unless you specify
            ``start_reactor=False`` in which case the Deferred that
            connect() returns is returned; this will callback() with
            an IProtocol instance, which will actually be an instance
            of :class:`WampWebSocketClientProtocol`
        """
        self.log.debug('{klass}.run()', klass=self.__class__.__name__)

        if start_reactor:
            # only select framework, set loop and start logging when we are asked
            # start the reactor - otherwise we are running in a program that likely
            # already tool care of all this.
            from twisted.internet import reactor
            txaio.use_twisted()
            txaio.config.loop = reactor
            txaio.start_logging(level=log_level)

        if callable(make):
            # factory for use ApplicationSession
            def create():
                cfg = ComponentConfig(self.realm, self.extra, runner=self)
                try:
                    session = make(cfg)
                except Exception:
                    self.log.failure('ApplicationSession could not be instantiated: {log_failure.value}')
                    if start_reactor and reactor.running:
                        reactor.stop()
                    raise
                else:
                    return session
        else:
            create = make

        if self.url.startswith(u'rs'):
            # try to parse RawSocket URL ..
            isSecure, host, port = parse_rs_url(self.url)

            # use the first configured serializer if any (which means, auto-choose "best")
            serializer = self.serializers[0] if self.serializers else None

            # create a WAMP-over-RawSocket transport client factory
            transport_factory = WampRawSocketClientFactory(create, serializer=serializer)

        else:
            # try to parse WebSocket URL ..
            isSecure, host, port, resource, path, params = parse_ws_url(self.url)

            # create a WAMP-over-WebSocket transport client factory
            transport_factory = WampWebSocketClientFactory(create, url=self.url, serializers=self.serializers, proxy=self.proxy, headers=self.headers)

            # client WebSocket settings - similar to:
            # - http://crossbar.io/docs/WebSocket-Compression/#production-settings
            # - http://crossbar.io/docs/WebSocket-Options/#production-settings

            # The permessage-deflate extensions offered to the server ..
            offers = [PerMessageDeflateOffer()]

            # Function to accept permessage_delate responses from the server ..
            def accept(response):
                if isinstance(response, PerMessageDeflateResponse):
                    return PerMessageDeflateResponseAccept(response)

            # set WebSocket options for all client connections
            transport_factory.setProtocolOptions(maxFramePayloadSize=1048576,
                                                 maxMessagePayloadSize=1048576,
                                                 autoFragmentSize=65536,
                                                 failByDrop=False,
                                                 openHandshakeTimeout=2.5,
                                                 closeHandshakeTimeout=1.,
                                                 tcpNoDelay=True,
                                                 autoPingInterval=10.,
                                                 autoPingTimeout=5.,
                                                 autoPingSize=4,
                                                 perMessageCompressionOffers=offers,
                                                 perMessageCompressionAccept=accept)

        # supress pointless log noise
        transport_factory.noisy = False

        if endpoint:
            client = endpoint
        else:
            # if user passed ssl= but isn't using isSecure, we'll never
            # use the ssl argument which makes no sense.
            context_factory = None
            if self.ssl is not None:
                if not isSecure:
                    raise RuntimeError(
                        'ssl= argument value passed to %s conflicts with the "ws:" '
                        'prefix of the url argument. Did you mean to use "wss:"?' %
                        self.__class__.__name__)
                context_factory = self.ssl
            elif isSecure:
                from twisted.internet.ssl import optionsForClientTLS
                context_factory = optionsForClientTLS(host)

            from twisted.internet import reactor
            if self.proxy is not None:
                from twisted.internet.endpoints import TCP4ClientEndpoint
                client = TCP4ClientEndpoint(reactor, self.proxy['host'], self.proxy['port'])
                transport_factory.contextFactory = context_factory
            elif isSecure:
                from twisted.internet.endpoints import SSL4ClientEndpoint
                assert context_factory is not None
                client = SSL4ClientEndpoint(reactor, host, port, context_factory)
            else:
                from twisted.internet.endpoints import TCP4ClientEndpoint
                client = TCP4ClientEndpoint(reactor, host, port)

        # as the reactor shuts down, we wish to wait until we've sent
        # out our "Goodbye" message; leave() returns a Deferred that
        # fires when the transport gets to STATE_CLOSED
        def cleanup(proto):
            if hasattr(proto, '_session') and proto._session is not None:
                if proto._session.is_attached():
                    return proto._session.leave()
                elif proto._session.is_connected():
                    return proto._session.disconnect()

        # when our proto was created and connected, make sure it's cleaned
        # up properly later on when the reactor shuts down for whatever reason
        def init_proto(proto):
            self._connect_successes += 1
            reactor.addSystemEventTrigger('before', 'shutdown', cleanup, proto)
            return proto

        use_service = False
        if auto_reconnect:
            try:
                # since Twisted 16.1.0
                from twisted.application.internet import ClientService
                from twisted.application.internet import backoffPolicy
                use_service = True
            except ImportError:
                use_service = False

        if use_service:
            # this code path is automatically reconnecting ..
            self.log.debug('using t.a.i.ClientService')

            if self.max_retries or self.initial_retry_delay or self.max_retry_delay or self.retry_delay_growth or self.retry_delay_jitter:
                kwargs = {}
                for key, val in [('initialDelay', self.initial_retry_delay),
                                 ('maxDelay', self.max_retry_delay),
                                 ('factor', self.retry_delay_growth),
                                 ('jitter', lambda: random.random() * self.retry_delay_jitter)]:
                    if val:
                        kwargs[key] = val

                # retry policy that will only try to reconnect if we connected
                # successfully at least once before (so it fails on host unreachable etc ..)
                def retry(failed_attempts):
                    if self._connect_successes > 0 and (self.max_retries == -1 or failed_attempts < self.max_retries):
                        return backoffPolicy(**kwargs)(failed_attempts)
                    else:
                        print('hit stop')
                        self.stop()
                        return 100000000000000
            else:
                retry = backoffPolicy()

            self._client_service = ClientService(client, transport_factory, retryPolicy=retry)
            self._client_service.startService()

            d = self._client_service.whenConnected()

        else:
            # this code path is only connecting once!
            self.log.debug('using t.i.e.connect()')

            d = client.connect(transport_factory)

        # if we connect successfully, the arg is a WampWebSocketClientProtocol
        d.addCallback(init_proto)

        # if the user didn't ask us to start the reactor, then they
        # get to deal with any connect errors themselves.
        if start_reactor:
            # if an error happens in the connect(), we save the underlying
            # exception so that after the event-loop exits we can re-raise
            # it to the caller.

            class ErrorCollector(object):
                exception = None

                def __call__(self, failure):
                    self.exception = failure.value
                    reactor.stop()
            connect_error = ErrorCollector()
            d.addErrback(connect_error)

            # now enter the Twisted reactor loop
            reactor.run()

            # if we exited due to a connection error, raise that to the
            # caller
            if connect_error.exception:
                raise connect_error.exception

        else:
            # let the caller handle any errors
            return d
        self.log.info('transport disconnected')
        # this is to clean up stuff. it is not our business to
        # possibly reconnect the underlying connection
        self._countdown -= 1
        if self._countdown <= 0:
            try:
                reactor.stop()
            except ReactorNotRunning:
                pass


if __name__ == '__main__':
    txaio.start_logging(level='info')

    # create a WAMP session object. this is reused across multiple
    # reconnects (if automatically reconnected)
    session = MyAppSession(ComponentConfig(u'realm1', {}))

    # create a WAMP transport factory
    transport = WampWebSocketClientFactory(session, url=u'ws://localhost:8080/ws')

    # create a connecting endpoint
    endpoint = TCP4ClientEndpoint(reactor, 'localhost', 8080)

    # create and start an automatically reconnecting client
    service = ClientService(endpoint, transport)
    service.startService()

    # enter the event loop
    reactor.run()
Ejemplo n.º 30
0
class NotificationSourceIntegrationTest(IntegrationTest):
    @inlineCallbacks
    def setUp(self):
        super(NotificationSourceIntegrationTest, self).setUp()
        self.endpoint = AMQEndpoint(reactor,
                                    self.rabbit.config.hostname,
                                    self.rabbit.config.port,
                                    username="******",
                                    password="******",
                                    heartbeat=1)
        self.policy = backoffPolicy(initialDelay=0)
        self.factory = AMQFactory(spec=AMQP0_8_SPEC_PATH)
        self.service = ClientService(self.endpoint,
                                     self.factory,
                                     retryPolicy=self.policy)
        self.connector = NotificationConnector(self.service)
        self.source = NotificationSource(self.connector)

        self.client = yield self.endpoint.connect(self.factory)
        self.channel = yield self.client.channel(1)
        yield self.channel.channel_open()
        yield self.channel.queue_declare(queue="uuid")

        self.service.startService()

    @inlineCallbacks
    def tearDown(self):
        self.service.stopService()
        super(NotificationSourceIntegrationTest, self).tearDown()
        # Wrap resetting queues and client in a try/except, since the broker
        # may have been stopped (e.g. when this is the last test being run).
        try:
            yield self.channel.queue_delete(queue="uuid")
        except:
            pass
        finally:
            yield self.client.close()

    @inlineCallbacks
    def test_get_after_publish(self):
        """
        Calling get() after a message has been published in the associated
        queue returns a Notification for that message.
        """
        yield self.channel.basic_publish(routing_key="uuid",
                                         content=Content("hello"))
        notification = yield self.source.get("uuid", 0)
        self.assertEqual("hello", notification.payload)

    @inlineCallbacks
    def test_get_before_publish(self):
        """
        Calling get() before a message has been published in the associated
        queue will wait until publication.
        """
        deferred = self.source.get("uuid", 0)
        self.assertFalse(deferred.called)
        yield self.channel.basic_publish(routing_key="uuid",
                                         content=Content("hello"))
        notification = yield deferred
        self.assertEqual("hello", notification.payload)

    @inlineCallbacks
    def test_get_with_error(self):
        """
        If an error occurs in during get(), the client is closed so
        we can query messages again.
        """
        yield self.channel.basic_publish(routing_key="uuid",
                                         content=Content("hello"))
        with self.assertRaises(NotFound):
            yield self.source.get("uuid-unknown", 0)
        notification = yield self.source.get("uuid", 0)
        self.assertEqual("hello", notification.payload)

    @inlineCallbacks
    def test_get_concurrent_with_error(self):
        """
        If an error occurs in a call to get(), other calls don't
        fail, and are retried on reconnection instead.
        """
        client1 = yield self.service.whenConnected()
        deferred = self.source.get("uuid", 0)

        with self.assertRaises(NotFound):
            yield self.source.get("uuid-unknown", 0)

        yield self.channel.basic_publish(routing_key="uuid",
                                         content=Content("hello"))

        notification = yield deferred
        self.assertEqual("hello", notification.payload)
        client2 = yield self.service.whenConnected()
        # The ClientService has reconnected, yielding a new client.
        self.assertIsNot(client1, client2)

    @inlineCallbacks
    def test_get_timeout(self):
        """
        Calls to get() timeout after a certain amount of time if no message
        arrived on the queue.
        """
        self.source.timeout = 1
        with self.assertRaises(Timeout):
            yield self.source.get("uuid", 0)
        client = yield self.service.whenConnected()
        channel = yield client.channel(1)
        # The channel is still opened
        self.assertFalse(channel.closed)
        # The consumer has been deleted
        self.assertNotIn("uuid.0", client.queues)

    @inlineCallbacks
    def test_get_with_broker_shutdown_during_consume(self):
        """
        If rabbitmq gets shutdown during the basic-consume call, we wait
        for the reconection and retry transparently.
        """
        # This will make the connector setup the channel before we call
        # get(), so by the time we call it in the next line all
        # connector-related deferreds will fire synchronously and the
        # code will block on basic-consume.
        yield self.connector()

        d = self.source.get("uuid", 0)

        # Restart rabbitmq
        yield self.client.close()
        yield self.client.disconnected.wait()
        self.rabbit.cleanUp()
        self.rabbit.config = RabbitServerResources(
            port=self.rabbit.config.port)  # Ensure that we use the same port
        self.rabbit.setUp()

        # Get a new channel and re-declare the queue, since the restart has
        # destroyed it.
        self.client = yield self.endpoint.connect(self.factory)
        self.channel = yield self.client.channel(1)
        yield self.channel.channel_open()
        yield self.channel.queue_declare(queue="uuid")

        # Publish a message in the queue
        yield self.channel.basic_publish(routing_key="uuid",
                                         content=Content("hello"))

        notification = yield d
        self.assertEqual("hello", notification.payload)

    @inlineCallbacks
    def test_get_with_broker_die_during_consume(self):
        """
        If rabbitmq dies during the basic-consume call, we wait for the
        reconection and retry transparently.
        """
        # This will make the connector setup the channel before we call
        # get(), so by the time we call it in the next line all
        # connector-related deferreds will fire synchronously and the
        # code will block on basic-consume.
        yield self.connector()

        d = self.source.get("uuid", 0)

        # Kill rabbitmq and start it again
        yield self.client.close()
        yield self.client.disconnected.wait()
        self.rabbit.runner.kill()
        self.rabbit.cleanUp()
        self.rabbit.config = RabbitServerResources(
            port=self.rabbit.config.port)  # Ensure that we use the same port
        self.rabbit.setUp()

        # Get a new channel and re-declare the queue, since the crash has
        # destroyed it.
        self.client = yield self.endpoint.connect(self.factory)
        self.channel = yield self.client.channel(1)
        yield self.channel.channel_open()
        yield self.channel.queue_declare(queue="uuid")

        # Publish a message in the queue
        yield self.channel.basic_publish(routing_key="uuid",
                                         content=Content("hello"))

        notification = yield d
        self.assertEqual("hello", notification.payload)

    @inlineCallbacks
    def test_wb_get_with_broker_shutdown_during_message_wait(self):
        """
        If rabbitmq gets shutdown while we wait for messages, we transparently
        wait for the reconnection and try again.
        """
        # This will make the connector setup the channel before we call
        # get(), so by the time we call it in the next line all
        # connector-related deferreds will fire synchronously and the
        # code will block on basic-consume.
        yield self.connector()

        d = self.source.get("uuid", 0)

        # Acquiring the channel lock makes sure that basic-consume has
        # succeeded and we started waiting for the message.
        yield self.source._channel_lock.acquire()
        self.source._channel_lock.release()

        # Restart rabbitmq
        yield self.client.close()
        yield self.client.disconnected.wait()
        self.rabbit.cleanUp()
        self.rabbit.config = RabbitServerResources(
            port=self.rabbit.config.port)  # Ensure that we use the same port
        self.rabbit.setUp()

        # Get a new channel and re-declare the queue, since the restart has
        # destroyed it.
        self.client = yield self.endpoint.connect(self.factory)
        self.channel = yield self.client.channel(1)
        yield self.channel.channel_open()
        yield self.channel.queue_declare(queue="uuid")

        # Publish a message in the queue
        yield self.channel.basic_publish(routing_key="uuid",
                                         content=Content("hello"))

        notification = yield d
        self.assertEqual("hello", notification.payload)

    @inlineCallbacks
    def test_wb_heartbeat(self):
        """
        If heartbeat checks fail due to network issues, we keep re-trying
        until the network recovers.
        """
        self.service.stopService()

        # Put a TCP proxy between NotificationSource and RabbitMQ, to simulate
        # packets getting dropped on the floor.
        proxy = ProxyService(self.rabbit.config.hostname,
                             self.rabbit.config.port)
        proxy.startService()
        self.addCleanup(proxy.stopService)
        self.endpoint._port = proxy.port
        self.service = ClientService(self.endpoint,
                                     self.factory,
                                     retryPolicy=self.policy)
        self.connector._service = self.service
        self.service.startService()

        # This will make the connector setup the channel before we call
        # get(), so by the time we call it in the next line all
        # connector-related deferreds will fire synchronously and the
        # code will block on basic-consume.
        channel = yield self.connector()

        deferred = self.source.get("uuid", 0)

        # Start dropping packets on the floor
        proxy.block()

        # Publish a notification, which won't be delivered just yet.
        yield self.channel.basic_publish(routing_key="uuid",
                                         content=Content("hello"))

        # Wait for the first connection to terminate, because heartbeat
        # checks will fail.
        yield channel.client.disconnected.wait()

        # Now let packets flow again.
        proxy.unblock()

        # The situation got recovered.
        notification = yield deferred
        self.assertEqual("hello", notification.payload)
        self.assertEqual(2, proxy.connections)

    @inlineCallbacks
    def test_reject_notification(self):
        """
        Calling reject() on a Notification puts the associated message back in
        the queue so that it's available to subsequent get() calls.
        """
        yield self.channel.basic_publish(routing_key="uuid",
                                         content=Content("hello"))
        notification = yield self.source.get("uuid", 0)
        yield notification.reject()

        notification = yield self.source.get("uuid", 1)
        self.assertEqual("hello", notification.payload)

    @inlineCallbacks
    def test_ack_message(self):
        """
        Calling ack() on a Notification confirms the removal of the
        associated message from the queue, making subsequent calls
        waiting for another message.
        """
        yield self.channel.basic_publish(routing_key="uuid",
                                         content=Content("hello"))
        notification = yield self.source.get("uuid", 0)
        yield notification.ack()

        yield self.channel.basic_publish(routing_key="uuid",
                                         content=Content("hello 2"))
        notification = yield self.source.get("uuid", 1)
        self.assertEqual("hello 2", notification.payload)

    @inlineCallbacks
    def test_ack_with_broker_shutdown(self):
        """
        If rabbitmq gets shutdown before we ack a Notification, an error is
        raised.
        """
        client = yield self.service.whenConnected()

        yield self.channel.basic_publish(routing_key="uuid",
                                         content=Content("hello"))
        notification = yield self.source.get("uuid", 0)

        self.rabbit.cleanUp()

        yield client.disconnected.wait()

        try:
            yield notification.ack()
        except Bounced:
            pass
        else:
            self.fail("Notification not bounced")

        self.rabbit.config = RabbitServerResources(
            port=self.rabbit.config.port)  # Ensure that we use the same port
        self.rabbit.setUp()
Ejemplo n.º 31
0
                self.room_name.encode() + b"\n" + self.user_name.encode() +
                b": " + data)
        else:
            self.messanger.connectionLost()
            self.messanger = None
            sys.stdin = sys.__stdin__
            sys.stdout = sys.__stdout__
            self.transport.write(b"LEAVE" + self.room_name.encode())

    def catch_leave(self,
                    data):  # Data == None. Will change if implement ACK!!/NACK
        self.converse()

    def catch_message(self, data):
        self.messanger.dataSend(data)

    def catch_create(self, data):
        mssg = {"NACK": "No. Try again.\n", "ACK": "We've made your room.\n"}
        sys.stdout.write(mssg[data.decode()])
        self.converse()

    def panic(self):
        sys.stdout.write("PANIC!!!!\n")


socket = TCP4ClientEndpoint(reactor, serverIP, serverPort)
proto = connectProtocol(socket, Client())
theConnection = ClientService(socket, proto)
theConnection.startService()
reactor.run()  # Begin running Twisted's OS interacting processes.
Ejemplo n.º 32
0
class DeepstreamClient(Client):
    '''
    This class instantiates an interface to interact with a Deepstream server.

    This class is the recommended mechanism for interacting with this module. It provides an interface to the other
    classes, each of which encapsulate a feature: Connection, Records, Events, RPC, and Presence.
    '''

    def __init__(self, url=None, conn_string=None, authParams=None, reactor=None, **options):
        ''' Creates the client, but does not connect to the server automatically.
        Optional keyword parameters (**options) for...
           Client: url (required), authParams, reactor, conn_string, debug, factory
           protocol: url (required), authParams, heartbeat_interval
           rpc: rpcAckTimeout, rpcResponseTimeout, subscriptionTimeout
           record: recordReadAckTimeout, merge_strategy, recordReadTimeout, recordDeleteTimeout, recordDeepCopy,
           presence: subscriptionTimeout
        '''

        if not url or url is None:
            raise ValueError(
                "url is None; you must specify a  URL for the deepstream server, e.g. ws://localhost:6020/deepstream")
        parse_result = urlparse(url)
        if not authParams or authParams is None:
            authParams = {}
            if parse_result.username and parse_result.password:
                authParams['username'] = parse_result.username
                authParams['password'] = parse_result.password
        if not conn_string or conn_string is None:
            if parse_result.scheme == 'ws':
                if parse_result.hostname:
                    conn_string = 'tcp:%s' % parse_result.hostname
                if parse_result.port:
                    conn_string += ':%s' % parse_result.port
                else:
                    conn_string += ':6020'
        if not conn_string or conn_string is None:
            raise ValueError(
                "Could not parse conn string from URL; you must specify a Twisted endpoint descriptor for the server, e.g. tcp:127.0.0.1:6020")
        if not reactor or reactor is None:
            from twisted.internet import reactor
        self.reactor = reactor
        factory = options.pop('factory', WSDeepstreamFactory)
        self._factory = factory(url, self, debug=options.pop('debug', False), reactor=reactor, **options)
        self._endpoint = clientFromString(reactor, conn_string)
        self._service = ClientService(self._endpoint, self._factory)  # Handles reconnection for us

        EventEmitter.__init__(self)
        self._connection = ConnectionInterface(self, url)
        self._presence = PresenceHandler(self._connection, self, **options)
        self._event = EventHandler(self._connection, self, **options)
        self._rpc = RPCHandler(self._connection, self, **options)
        self._record = RecordHandler(self._connection, self, **options)
        self._message_callbacks = dict()

        self._message_callbacks[
            constants.topic.PRESENCE] = self._presence.handle
        self._message_callbacks[
            constants.topic.EVENT] = self._event.handle
        self._message_callbacks[
            constants.topic.RPC] = self._rpc.handle
        self._message_callbacks[
            constants.topic.RECORD] = self._record.handle
        self._message_callbacks[
            constants.topic.ERROR] = self._on_error

    def login(self, auth_params):
        '''
        Submit authentication credentials to the server once state is "Awaiting Authentication."

        Expects a dictionary.
        Options:
          User/pass: {'username': '******', 'password': '******'}
          Anonymous login/Open auth: {}
            https://deepstreamhub.com/tutorials/guides/open-auth/
          Email: {'type': 'email', 'email': '*****@*****.**', 'password': '******'}
            https://deepstreamhub.com/tutorials/guides/email-auth/
          Token: {'token': 'abcdefg'}
            https://deepstreamhub.com/tutorials/guides/token-auth/

        Returns a Deferred
        '''
        return self._connection.authenticate(auth_params)

    def connect(self, callback=None):
        '''
        Connect to the server. Optionally, fire a callback once connected.

        Recommended callback is a login function.
        Calling the login function is automatic if the DeepstreamClient is instantiated with auth_params.
        '''
        if callback:
            self._factory._connect_callback = callback
        if not self._service.running:
            self._service.startService()
        return

    def close(self):
        '''Legacy method: disconnect from the server.'''
        return self.disconnect()

    def disconnect(self):
        '''Terminate our connection to the server.'''
        # TODO: Say goodbye; clear message queue?
        self._factory._deliberate_close = True
        self._service.stopService()

    def whenAuthenticated(self, callback, *args):
        '''Execute a callback once authentication has succeeded.'''
        if self._factory._state == constants.connection_state.OPEN:
            callback(*args)
        else:
            self.once(constants.event.CONNECTION_STATE_CHANGED,
                      lambda x: DeepstreamClient.whenAuthenticated(self, callback, *args))

    # These properties are the same as in the parent class, but are repeated here for clarity
    @property
    def connection_state(self):
        return self._connection.state

    @property
    def record(self):
        return self._record

    @property
    def event(self):
        return self._event

    @property
    def rpc(self):
        return self._rpc

    @property
    def presence(self):
        return self._presence
Ejemplo n.º 33
0
 def startService(self):
     # invoke whenConnected() inherited method
     reactor.callLater(10, self.log_retry)
     self.whenConnected().addCallback(self.connectToBroker)
     ClientService.startService(self)
Ejemplo n.º 34
0
    def makeReconnector(self,
                        fireImmediately=True,
                        startService=True,
                        protocolType=Protocol,
                        **kw):
        """
        Create a L{ClientService} along with a L{ConnectInformation} indicating
        the connections in progress on its endpoint.

        @param fireImmediately: Should all of the endpoint connection attempts
            fire synchronously?
        @type fireImmediately: L{bool}

        @param startService: Should the L{ClientService} be started before
            being returned?
        @type startService: L{bool}

        @param protocolType: a 0-argument callable returning a new L{IProtocol}
            provider to be used for application-level protocol connections.

        @param kw: Arbitrary keyword arguments to be passed on to
            L{ClientService}

        @return: a 2-tuple of L{ConnectInformation} (for information about test
            state) and L{ClientService} (the system under test).  The
            L{ConnectInformation} has 2 additional attributes;
            C{applicationFactory} and C{applicationProtocols}, which refer to
            the unwrapped protocol factory and protocol instances passed in to
            L{ClientService} respectively.
        """
        nkw = {}
        nkw.update(clock=Clock())
        nkw.update(kw)
        clock = nkw['clock']
        cq, endpoint = endpointForTesting(fireImmediately=fireImmediately)

        # `endpointForTesting` is totally generic to any LLPI client that uses
        # endpoints, and maintains all its state internally; however,
        # applicationProtocols and applicationFactory are bonus attributes that
        # are only specifically interesitng to tests that use wrapper
        # protocols.  For now, set them here, externally.

        applicationProtocols = cq.applicationProtocols = []

        class RememberingFactory(Factory, object):
            protocol = protocolType

            def buildProtocol(self, addr):
                result = super(RememberingFactory, self).buildProtocol(addr)
                applicationProtocols.append(result)
                return result

        cq.applicationFactory = factory = RememberingFactory()

        service = ClientService(endpoint, factory, **nkw)

        def stop():
            service._protocol = None
            if service.running:
                service.stopService()
            # Ensure that we don't leave any state in the reactor after
            # stopService.
            self.assertEqual(clock.getDelayedCalls(), [])

        self.addCleanup(stop)
        if startService:
            service.startService()
        return cq, service
Ejemplo n.º 35
0
                                        '|'.join(dynamic_commands.keys()) +
                                        ')\s*).*')
    static_commands_regex = re.compile(
        '\s*(' + triggers + ')\s*((' +
        '|'.join(cmd_cfg['static_commands'].keys()) + ')\s*).*')
    help_command_regex = re.compile('\s*(' + triggers + ')\s*(help\s*).*')

    markov = MarkovBrain(config['brain']['brain_file'],
                         config['brain']['chain_length'],
                         config['brain']['max_words'])

    # Lookup actual address for host (twisted only uses ipv6 if given an explicit ipv6 address)
    host_info = socket.getaddrinfo(irc_cfg['host'], irc_cfg['port'], 0, 0,
                                   socket.IPPROTO_TCP,
                                   socket.AI_CANONNAME)[0][4]
    client_string = "%s:%s:%u" % ('tls' if irc_cfg['ssl'] else 'tcp',
                                  host_info[0].replace(':',
                                                       '\:'), host_info[1])

    endpoint = clientFromString(reactor, client_string)
    bot_client_service = ClientService(
        endpoint,
        sadfaceBotFactory(config, markov, dynamic_commands,
                          dynamic_commands_regex, static_commands_regex,
                          help_command_regex))
    bot_client_service.startService()

    reactor.run()

    markov.close()