Exemple #1
0
from twisted.web._stan import Tag, slot, Comment, CDATA, CharRef
from twisted.web.iweb import ITemplateLoader
from twisted.logger import Logger

TEMPLATE_NAMESPACE = 'http://twistedmatrix.com/ns/twisted.web.template/0.1'

# Go read the definition of NOT_DONE_YET. For lulz. This is totally
# equivalent. And this turns out to be necessary, because trying to import
# NOT_DONE_YET in this module causes a circular import which we cannot escape
# from. From which we cannot escape. Etc. glyph is okay with this solution for
# now, and so am I, as long as this comment stays to explain to future
# maintainers what it means. ~ C.
#
# See http://twistedmatrix.com/trac/ticket/5557 for progress on fixing this.
NOT_DONE_YET = 1
_moduleLog = Logger()


class _NSContext:
    """
    A mapping from XML namespaces onto their prefixes in the document.
    """
    def __init__(self, parent=None):
        """
        Pull out the parent's namespaces, if there's no parent then default to
        XML.
        """
        self.parent = parent
        if parent is not None:
            self.nss = OrderedDict(parent.nss)
        else:
Exemple #2
0
class H2Connection(Protocol, TimeoutMixin):
    """
    A class representing a single HTTP/2 connection.

    This implementation of L{IProtocol} works hand in hand with L{H2Stream}.
    This is because we have the requirement to register multiple producers for
    a single HTTP/2 connection, one for each stream. The standard Twisted
    interfaces don't really allow for this, so instead there's a custom
    interface between the two objects that allows them to work hand-in-hand here.

    @ivar conn: The HTTP/2 connection state machine.
    @type conn: L{h2.connection.H2Connection}

    @ivar streams: A mapping of stream IDs to L{H2Stream} objects, used to call
        specific methods on streams when events occur.
    @type streams: L{dict}, mapping L{int} stream IDs to L{H2Stream} objects.

    @ivar priority: A HTTP/2 priority tree used to ensure that responses are
        prioritised appropriately.
    @type priority: L{priority.PriorityTree}

    @ivar _consumerBlocked: A flag tracking whether or not the L{IConsumer}
        that is consuming this data has asked us to stop producing.
    @type _consumerBlocked: L{bool}

    @ivar _sendingDeferred: A L{Deferred} used to restart the data-sending loop
        when more response data has been produced. Will not be present if there
        is outstanding data still to send.
    @type _consumerBlocked: A L{twisted.internet.defer.Deferred}, or L{None}

    @ivar _outboundStreamQueues: A map of stream IDs to queues, used to store
        data blocks that are yet to be sent on the connection. These are used
        both to handle producers that do not respect L{IConsumer} but also to
        allow priority to multiplex data appropriately.
    @type _outboundStreamQueues: A L{dict} mapping L{int} stream IDs to
        L{collections.deque} queues, which contain either L{bytes} objects or
        C{_END_STREAM_SENTINEL}.

    @ivar _sender: A handle to the data-sending loop, allowing it to be
        terminated if needed.
    @type _sender: L{twisted.internet.task.LoopingCall}

    @ivar abortTimeout: The number of seconds to wait after we attempt to shut
        the transport down cleanly to give up and forcibly terminate it. This
        is only used when we time a connection out, to prevent errors causing
        the FD to get leaked. If this is L{None}, we will wait forever.
    @type abortTimeout: L{int}

    @ivar _abortingCall: The L{twisted.internet.base.DelayedCall} that will be
        used to forcibly close the transport if it doesn't close cleanly.
    @type _abortingCall: L{twisted.internet.base.DelayedCall
    """
    factory = None
    site = None
    abortTimeout = 15

    _log = Logger()
    _abortingCall = None

    def __init__(self, reactor=None):
        self.conn = h2.connection.H2Connection(client_side=False,
                                               header_encoding=None)
        self.streams = {}

        self.priority = priority.PriorityTree()
        self._consumerBlocked = None
        self._sendingDeferred = None
        self._outboundStreamQueues = {}
        self._streamCleanupCallbacks = {}
        self._stillProducing = True

        if reactor is None:
            from twisted.internet import reactor
        self._reactor = reactor

        # Start the data sending function.
        self._reactor.callLater(0, self._sendPrioritisedData)

    # Implementation of IProtocol
    def connectionMade(self):
        """
        Called by the reactor when a connection is received. May also be called
        by the L{twisted.web.http._GenericHTTPChannelProtocol} during upgrade
        to HTTP/2.
        """
        self.setTimeout(self.timeOut)
        self.conn.initiate_connection()
        self.transport.write(self.conn.data_to_send())

    def dataReceived(self, data):
        """
        Called whenever a chunk of data is received from the transport.

        @param data: The data received from the transport.
        @type data: L{bytes}
        """
        self.resetTimeout()

        try:
            events = self.conn.receive_data(data)
        except h2.exceptions.ProtocolError:
            # A remote protocol error terminates the connection.
            dataToSend = self.conn.data_to_send()
            self.transport.write(dataToSend)
            self.transport.loseConnection()
            self.connectionLost("Protocol error from peer.")
            return

        for event in events:
            if isinstance(event, h2.events.RequestReceived):
                self._requestReceived(event)
            elif isinstance(event, h2.events.DataReceived):
                self._requestDataReceived(event)
            elif isinstance(event, h2.events.StreamEnded):
                self._requestEnded(event)
            elif isinstance(event, h2.events.StreamReset):
                self._requestAborted(event)
            elif isinstance(event, h2.events.WindowUpdated):
                self._handleWindowUpdate(event)
            elif isinstance(event, h2.events.PriorityUpdated):
                self._handlePriorityUpdate(event)
            elif isinstance(event, h2.events.ConnectionTerminated):
                self.transport.loseConnection()
                self.connectionLost("Shutdown by remote peer")

        dataToSend = self.conn.data_to_send()
        if dataToSend:
            self.transport.write(dataToSend)

    def timeoutConnection(self):
        """
        Called when the connection has been inactive for
        L{self.timeOut<twisted.protocols.policies.TimeoutMixin.timeOut>}
        seconds. Cleanly tears the connection down, attempting to notify the
        peer if needed.

        We override this method to add two extra bits of functionality:

         - We want to log the timeout.
         - We want to send a GOAWAY frame indicating that the connection is
           being terminated, and whether it was clean or not. We have to do this
           before the connection is torn down.
        """
        self._log.info("Timing out client {client}",
                       client=self.transport.getPeer())

        # Check whether there are open streams. If there are, we're going to
        # want to use the error code PROTOCOL_ERROR. If there aren't, use
        # NO_ERROR.
        if (self.conn.open_outbound_streams > 0
                or self.conn.open_inbound_streams > 0):
            error_code = h2.errors.PROTOCOL_ERROR
        else:
            error_code = h2.errors.NO_ERROR

        self.conn.close_connection(error_code=error_code)
        self.transport.write(self.conn.data_to_send())

        # Don't let the client hold this connection open too long.
        if self.abortTimeout is not None:
            # We use self.callLater because that's what TimeoutMixin does, even
            # though we have a perfectly good reactor sitting around. See
            # https://twistedmatrix.com/trac/ticket/8488.
            self._abortingCall = self.callLater(self.abortTimeout,
                                                self.forceAbortClient)

        # We're done, throw the connection away.
        self.transport.loseConnection()

    def forceAbortClient(self):
        """
        Called if C{abortTimeout} seconds have passed since the timeout fired,
        and the connection still hasn't gone away. This can really only happen
        on extremely bad connections or when clients are maliciously attempting
        to keep connections open.
        """
        self._log.info("Forcibly timing out client: {client}",
                       client=self.transport.getPeer())
        self.transport.abortConnection()

    def connectionLost(self, reason):
        """
        Called when the transport connection is lost.

        Informs all outstanding response handlers that the connection has been
        lost, and cleans up all internal state.
        """
        self._stillProducing = False
        self.setTimeout(None)

        for stream in self.streams.values():
            stream.connectionLost(reason)

        for streamID in list(self.streams.keys()):
            self._requestDone(streamID)

        # If we were going to force-close the transport, we don't have to now.
        if self._abortingCall is not None:
            self._abortingCall.cancel()
            self._abortingCall = None

    # Implementation of IPushProducer
    #
    # Here's how we handle IPushProducer. We have multiple outstanding
    # H2Streams. Each of these exposes an IConsumer interface to the response
    # handler that allows it to push data into the H2Stream. The H2Stream then
    # writes the data into the H2Connection object.
    #
    # The H2Connection needs to manage these writes to account for:
    #
    # - flow control
    # - priority
    #
    # We manage each of these in different ways.
    #
    # For flow control, we simply use the equivalent of the IPushProducer
    # interface. We simply tell the H2Stream: "Hey, you can't send any data
    # right now, sorry!". When that stream becomes unblocked, we free it up
    # again. This allows the H2Stream to propagate this backpressure up the
    # chain.
    #
    # For priority, we need to keep a backlog of data frames that we can send,
    # and interleave them appropriately. This backlog is most sensibly kept in
    # the H2Connection object itself. We keep one queue per stream, which is
    # where the writes go, and then we have a loop that manages popping these
    # streams off in priority order.
    #
    # Logically then, we go as follows:
    #
    # 1. Stream calls writeDataToStream(). This causes a DataFrame to be placed
    #    on the queue for that stream. It also informs the priority
    #    implementation that this stream is unblocked.
    # 2. The _sendPrioritisedData() function spins in a tight loop. Each
    #    iteration it asks the priority implementation which stream should send
    #    next, and pops a data frame off that stream's queue. If, after sending
    #    that frame, there is no data left on that stream's queue, the function
    #    informs the priority implementation that the stream is blocked.
    #
    # If all streams are blocked, or if there are no outstanding streams, the
    # _sendPrioritisedData function waits to be awoken when more data is ready
    # to send.
    #
    # Note that all of this only applies to *data*. Headers and other control
    # frames deliberately skip this processing as they are not subject to flow
    # control or priority constraints.
    def stopProducing(self):
        """
        Stop producing data.

        This tells the L{H2Connection} that its consumer has died, so it must
        stop producing data for good.
        """
        self.connectionLost("stopProducing")

    def pauseProducing(self):
        """
        Pause producing data.

        Tells the L{H2Connection} that it has produced too much data to process
        for the time being, and to stop until resumeProducing() is called.
        """
        self._consumerBlocked = Deferred()

    def resumeProducing(self):
        """
        Resume producing data.

        This tells the L{H2Connection} to re-add itself to the main loop and
        produce more data for the consumer.
        """
        if self._consumerBlocked is not None:
            d = self._consumerBlocked
            self._consumerBlocked = None
            d.callback(None)

    def _sendPrioritisedData(self, *args):
        """
        The data sending loop. This function repeatedly calls itself, either
        from L{Deferred}s or from
        L{reactor.callLater<twisted.internet.interfaces.IReactorTime.callLater>}

        This function sends data on streams according to the rules of HTTP/2
        priority. It ensures that the data from each stream is interleved
        according to the priority signalled by the client, making sure that the
        connection is used with maximal efficiency.

        This function will execute if data is available: if all data is
        exhausted, the function will place a deferred onto the L{H2Connection}
        object and wait until it is called to resume executing.
        """
        # If producing has stopped, we're done. Don't reschedule ourselves
        if not self._stillProducing:
            return

        stream = None

        while stream is None:
            try:
                stream = next(self.priority)
            except priority.DeadlockError:
                # All streams are currently blocked or not progressing. Wait
                # until a new one becomes available.
                assert self._sendingDeferred is None
                self._sendingDeferred = Deferred()
                self._sendingDeferred.addCallback(self._sendPrioritisedData)
                return

        # Wait behind the transport.
        if self._consumerBlocked is not None:
            self._consumerBlocked.addCallback(self._sendPrioritisedData)
            return

        remainingWindow = self.conn.local_flow_control_window(stream)
        frameData = self._outboundStreamQueues[stream].popleft()
        maxFrameSize = min(self.conn.max_outbound_frame_size, remainingWindow)

        if frameData is _END_STREAM_SENTINEL:
            # There's no error handling here even though this can throw
            # ProtocolError because we really shouldn't encounter this problem.
            # If we do, that's a nasty bug.
            self.conn.end_stream(stream)
            self.transport.write(self.conn.data_to_send())

            # Clean up the stream
            self._requestDone(stream)
        else:
            # Respect the max frame size.
            if len(frameData) > maxFrameSize:
                excessData = frameData[maxFrameSize:]
                frameData = frameData[:maxFrameSize]
                self._outboundStreamQueues[stream].appendleft(excessData)

            # There's deliberately no error handling here, because this just
            # absolutely should not happen.
            # If for whatever reason the max frame length is zero and so we
            # have no frame data to send, don't send any.
            if frameData:
                self.conn.send_data(stream, frameData)
                self.transport.write(self.conn.data_to_send())

            # If there's no data left, this stream is now blocked.
            if not self._outboundStreamQueues[stream]:
                self.priority.block(stream)

            # Also, if the stream's flow control window is exhausted, tell it
            # to stop.
            if self.remainingOutboundWindow(stream) <= 0:
                self.streams[stream].flowControlBlocked()

        self._reactor.callLater(0, self._sendPrioritisedData)

    # Internal functions.
    def _requestReceived(self, event):
        """
        Internal handler for when a request has been received.

        @param event: The Hyper-h2 event that encodes information about the
            received request.
        @type event: L{h2.events.RequestReceived}
        """
        stream = H2Stream(event.stream_id, self, event.headers,
                          self.requestFactory, self.site, self.factory)
        self.streams[event.stream_id] = stream
        self._streamCleanupCallbacks[event.stream_id] = Deferred()
        self._outboundStreamQueues[event.stream_id] = deque()

        # Add the stream to the priority tree but immediately block it.
        try:
            self.priority.insert_stream(event.stream_id)
        except priority.DuplicateStreamError:
            # Stream already in the tree. This can happen if we received a
            # PRIORITY frame before a HEADERS frame. Just move on: we set the
            # stream up properly in _handlePriorityUpdate.
            pass
        else:
            self.priority.block(event.stream_id)

    def _requestDataReceived(self, event):
        """
        Internal handler for when a chunk of data is received for a given
        request.

        @param event: The Hyper-h2 event that encodes information about the
            received data.
        @type event: L{h2.events.DataReceived}
        """
        stream = self.streams[event.stream_id]
        stream.receiveDataChunk(event.data, event.flow_controlled_length)

    def _requestEnded(self, event):
        """
        Internal handler for when a request is complete, and we expect no
        further data for that request.

        @param event: The Hyper-h2 event that encodes information about the
            completed stream.
        @type event: L{h2.events.StreamEnded}
        """
        stream = self.streams[event.stream_id]
        stream.requestComplete()

    def _requestAborted(self, event):
        """
        Internal handler for when a request is aborted by a remote peer.

        @param event: The Hyper-h2 event that encodes information about the
            reset stream.
        @type event: L{h2.events.StreamReset}
        """
        stream = self.streams[event.stream_id]
        stream.connectionLost("Stream reset")
        self._requestDone(event.stream_id)

    def _handlePriorityUpdate(self, event):
        """
        Internal handler for when a stream priority is updated.

        @param event: The Hyper-h2 event that encodes information about the
            stream reprioritization.
        @type event: L{h2.events.PriorityUpdated}
        """
        try:
            self.priority.reprioritize(
                stream_id=event.stream_id,
                depends_on=event.depends_on or None,
                weight=event.weight,
                exclusive=event.exclusive,
            )
        except priority.MissingStreamError:
            # A PRIORITY frame arrived before the HEADERS frame that would
            # trigger us to insert the stream into the tree. That's fine: we
            # can create the stream here and mark it as blocked.
            self.priority.insert_stream(
                stream_id=event.stream_id,
                depends_on=event.depends_on or None,
                weight=event.weight,
                exclusive=event.exclusive,
            )
            self.priority.block(event.stream_id)

    def writeHeaders(self, version, code, reason, headers, streamID):
        """
        Called by L{twisted.web.http.Request} objects to write a complete set
        of HTTP headers to a stream.

        @param version: The HTTP version in use. Unused in HTTP/2.
        @type version: L{bytes}

        @param code: The HTTP status code to write.
        @type code: L{bytes}

        @param reason: The HTTP reason phrase to write. Unused in HTTP/2.
        @type reason: L{bytes}

        @param headers: The headers to write to the stream.
        @type headers: L{twisted.web.http_headers.Headers}

        @param streamID: The ID of the stream to write the headers to.
        @type streamID: L{int}
        """
        headers.insert(0, (b':status', code))

        try:
            self.conn.send_headers(streamID, headers)
        except h2.exceptions.StreamClosedError:
            # Stream was closed by the client at some point. We need to not
            # explode here: just swallow the error. That's what write() does
            # when a connection is lost, so that's what we do too.
            return
        else:
            self.transport.write(self.conn.data_to_send())

    def writeDataToStream(self, streamID, data):
        """
        May be called by L{H2Stream} objects to write response data to a given
        stream. Writes a single data frame.

        @param streamID: The ID of the stream to write the data to.
        @type streamID: L{int}

        @param data: The data chunk to write to the stream.
        @type data: L{bytes}
        """
        self._outboundStreamQueues[streamID].append(data)

        # There's obviously no point unblocking this stream and the sending
        # loop if the data can't actually be sent, so confirm that there's
        # some room to send data.
        if self.conn.local_flow_control_window(streamID) > 0:
            self.priority.unblock(streamID)
            if self._sendingDeferred is not None:
                d = self._sendingDeferred
                self._sendingDeferred = None
                d.callback(streamID)

        if self.remainingOutboundWindow(streamID) <= 0:
            self.streams[streamID].flowControlBlocked()

    def endRequest(self, streamID):
        """
        Called by L{H2Stream} objects to signal completion of a response.

        @param streamID: The ID of the stream to write the data to.
        @type streamID: L{int}
        """
        self._outboundStreamQueues[streamID].append(_END_STREAM_SENTINEL)
        self.priority.unblock(streamID)
        if self._sendingDeferred is not None:
            d = self._sendingDeferred
            self._sendingDeferred = None
            d.callback(streamID)

    def abortRequest(self, streamID):
        """
        Called by L{H2Stream} objects to request early termination of a stream.
        This emits a RstStream frame and then removes all stream state.

        @param streamID: The ID of the stream to write the data to.
        @type streamID: L{int}
        """
        self.conn.reset_stream(streamID)
        self.transport.write(self.conn.data_to_send())
        self._requestDone(streamID)

    def _requestDone(self, streamID):
        """
        Called internally by the data sending loop to clean up state that was
        being used for the stream. Called when the stream is complete.

        @param streamID: The ID of the stream to clean up state for.
        @type streamID: L{int}
        """
        del self._outboundStreamQueues[streamID]
        self.priority.remove_stream(streamID)
        del self.streams[streamID]
        cleanupCallback = self._streamCleanupCallbacks.pop(streamID)
        cleanupCallback.callback(streamID)

    def remainingOutboundWindow(self, streamID):
        """
        Called to determine how much room is left in the send window for a
        given stream. Allows us to handle blocking and unblocking producers.

        @param streamID: The ID of the stream whose flow control window we'll
            check.
        @type streamID: L{int}

        @return: The amount of room remaining in the send window for the given
            stream, including the data queued to be sent.
        @rtype: L{int}
        """
        # TODO: This involves a fair bit of looping and computation for
        # something that is called a lot. Consider caching values somewhere.
        windowSize = self.conn.local_flow_control_window(streamID)
        sendQueue = self._outboundStreamQueues[streamID]
        alreadyConsumed = sum(
            len(chunk) for chunk in sendQueue
            if chunk is not _END_STREAM_SENTINEL)

        return windowSize - alreadyConsumed

    def _handleWindowUpdate(self, event):
        """
        Manage flow control windows.

        Streams that are blocked on flow control will register themselves with
        the connection. This will fire deferreds that wake those streams up and
        allow them to continue processing.

        @param event: The Hyper-h2 event that encodes information about the
            flow control window change.
        @type event: L{h2.events.WindowUpdated}
        """
        streamID = event.stream_id

        if streamID:
            if not self._streamIsActive(streamID):
                # We may have already cleaned up our stream state, making this
                # a late WINDOW_UPDATE frame. That's fine: the update is
                # unnecessary but benign. We'll ignore it.
                return

            # If we haven't got any data to send, don't unblock the stream. If
            # we do, we'll eventually get an exception inside the
            # _sendPrioritisedData loop some time later.
            if self._outboundStreamQueues.get(streamID):
                self.priority.unblock(streamID)
            self.streams[streamID].windowUpdated()
        else:
            # Update strictly applies to all streams.
            for stream in self.streams.values():
                stream.windowUpdated()

                # If we still have data to send for this stream, unblock it.
                if self._outboundStreamQueues.get(stream.streamID):
                    self.priority.unblock(stream.streamID)

    def getPeer(self):
        """
        Get the remote address of this connection.

        Treat this method with caution.  It is the unfortunate result of the
        CGI and Jabber standards, but should not be considered reliable for
        the usual host of reasons; port forwarding, proxying, firewalls, IP
        masquerading, etc.

        @return: An L{IAddress} provider.
        """
        return self.transport.getPeer()

    def getHost(self):
        """
        Similar to getPeer, but returns an address describing this side of the
        connection.

        @return: An L{IAddress} provider.
        """
        return self.transport.getHost()

    def openStreamWindow(self, streamID, increment):
        """
        Open the stream window by a given increment.

        @param streamID: The ID of the stream whose window needs to be opened.
        @type streamID: L{int}

        @param increment: The amount by which the stream window must be
        incremented.
        @type increment: L{int}
        """
        self.conn.acknowledge_received_data(increment, streamID)
        data = self.conn.data_to_send()
        if data:
            self.transport.write(data)

    def _isSecure(self):
        """
        Returns L{True} if this channel is using a secure transport.

        @returns: L{True} if this channel is secure.
        @rtype: L{bool}
        """
        # A channel is secure if its transport is ISSLTransport.
        return ISSLTransport(self.transport, None) is not None

    def _send100Continue(self, streamID):
        """
        Sends a 100 Continue response, used to signal to clients that further
        processing will be performed.

        @param streamID: The ID of the stream that needs the 100 Continue
        response
        @type streamID: L{int}
        """
        headers = [(b':status', b'100')]
        self.conn.send_headers(headers=headers, stream_id=streamID)
        self.transport.write(self.conn.data_to_send())

    def _respondToBadRequestAndDisconnect(self, streamID):
        """
        This is a quick and dirty way of responding to bad requests.

        As described by HTTP standard we should be patient and accept the
        whole request from the client before sending a polite bad request
        response, even in the case when clients send tons of data.

        Unlike in the HTTP/1.1 case, this does not actually disconnect the
        underlying transport: there's no need. This instead just sends a 400
        response and terminates the stream.

        @param streamID: The ID of the stream that needs the 100 Continue
        response
        @type streamID: L{int}
        """
        headers = [(b':status', b'400')]
        self.conn.send_headers(headers=headers,
                               stream_id=streamID,
                               end_stream=True)
        self.transport.write(self.conn.data_to_send())

        stream = self.streams[streamID]
        stream.connectionLost("Stream reset")
        self._requestDone(streamID)

    def _streamIsActive(self, streamID):
        """
        Checks whether Twisted has still got state for a given stream and so
        can process events for that stream.

        @param streamID: The ID of the stream that needs processing.
        @type streamID: L{int}

        @return: Whether the stream still has state allocated.
        @rtype: L{bool}
        """
        return streamID in self.streams
Exemple #3
0
""""
Base class for all canarydrop channels.
"""

import datetime

import simplejson

import settings
from exception import DuplicateChannel
from twisted.logger import Logger
log = Logger()


class Channel(object):
    CHANNEL = 'Base'

    def __init__(self, switchboard=None, name=None):
        self.switchboard = switchboard
        self.name = name or self.CHANNEL
        log.info('Started channel {name}'.format(name=self.name))


class InputChannel(Channel):
    CHANNEL = 'InputChannel'

    def __init__(self, switchboard=None, name=None, unique_channel=False):
        super(InputChannel, self).__init__(switchboard=switchboard, name=name)
        try:
            self.register_input_channel()
        except DuplicateChannel as e:
Exemple #4
0
    def __init__(
        self,
        domains: Set,
        network_middleware: RestMiddleware = __DEFAULT_MIDDLEWARE_CLASS(),
        start_learning_now: bool = False,
        learn_on_same_thread: bool = False,
        known_nodes: tuple = None,
        seed_nodes: Tuple[tuple] = None,
        node_storage=None,
        save_metadata: bool = False,
        abort_on_learning_error: bool = False,
        lonely: bool = False,
    ) -> None:

        self.log = Logger("learning-loop")  # type: Logger

        self.learning_domains = domains
        self.network_middleware = network_middleware
        self.save_metadata = save_metadata
        self.start_learning_now = start_learning_now
        self.learn_on_same_thread = learn_on_same_thread

        self._abort_on_learning_error = abort_on_learning_error
        self._learning_listeners = defaultdict(list)
        self._node_ids_to_learn_about_immediately = set()

        self.__known_nodes = self.tracker_class()

        self.lonely = lonely
        self.done_seeding = False

        # Read
        if node_storage is None:
            node_storage = self.__DEFAULT_NODE_STORAGE(
                federated_only=self.federated_only,
                # TODO: remove federated_only
                character_class=self.__class__)

        self.node_storage = node_storage
        if save_metadata and node_storage is NO_STORAGE_AVAILIBLE:
            raise ValueError(
                "Cannot save nodes without a configured node storage")

        known_nodes = known_nodes or tuple()
        self.unresponsive_startup_nodes = list(
        )  # TODO: Attempt to use these again later
        for node in known_nodes:
            try:
                self.remember_node(
                    node
                )  # TODO: Need to test this better - do we ever init an Ursula-Learner with Node Storage?
            except self.UnresponsiveTeacher:
                self.unresponsive_startup_nodes.append(node)

        self.teacher_nodes = deque()
        self._current_teacher_node = None  # type: Teacher
        self._learning_task = task.LoopingCall(self.keep_learning_about_nodes)
        self._learning_round = 0  # type: int
        self._rounds_without_new_nodes = 0  # type: int
        self._seed_nodes = seed_nodes or []
        self.unresponsive_seed_nodes = set()

        if self.start_learning_now:
            self.start_learning_loop(now=self.learn_on_same_thread)
Exemple #5
0
class Teacher:
    TEACHER_VERSION = LEARNING_LOOP_VERSION
    verified_stamp = False
    verified_interface = False
    _verified_node = False
    _interface_info_splitter = (int, 4, {'byteorder': 'big'})
    log = Logger("teacher")
    __DEFAULT_MIN_SEED_STAKE = 0

    def __init__(
        self,
        domains: Set,
        certificate: Certificate,
        certificate_filepath: str,
        interface_signature=NOT_SIGNED.bool_value(False),
        timestamp=NOT_SIGNED,
        identity_evidence=NOT_SIGNED,
        substantiate_immediately=False,
        passphrase=None,
    ) -> None:

        self.serving_domains = domains
        self.certificate = certificate
        self.certificate_filepath = certificate_filepath
        self._interface_signature_object = interface_signature
        self._timestamp = timestamp
        self.last_seen = NEVER_SEEN("Haven't connected to this node yet.")
        self.fleet_state_checksum = None
        self.fleet_state_updated = None
        self._evidence_of_decentralized_identity = constant_or_bytes(
            identity_evidence)

        if substantiate_immediately:
            self.substantiate_stamp(
                password=passphrase)  # TODO: Derive from keyring

    class InvalidNode(SuspiciousActivity):
        """
        Raised when a node has an invalid characteristic - stamp, interface, or address.
        """

    class WrongMode(TypeError):
        """
        Raised when a Character tries to use another Character as decentralized when the latter is federated_only.
        """

    class IsFromTheFuture(TypeError):
        """
        Raised when deserializing a Character from a future version.
        """

    @classmethod
    def from_tls_hosting_power(cls, tls_hosting_power: TLSHostingPower, *args,
                               **kwargs) -> 'Teacher':
        certificate_filepath = tls_hosting_power.keypair.certificate_filepath
        certificate = tls_hosting_power.keypair.certificate
        return cls(certificate=certificate,
                   certificate_filepath=certificate_filepath,
                   *args,
                   **kwargs)

    #
    # Known Nodes
    #

    def seed_node_metadata(self, as_teacher_uri=False):
        if as_teacher_uri:
            teacher_uri = f'{self.checksum_public_address}@{self.rest_server.rest_interface.host}:{self.rest_server.rest_interface.port}'
            return teacher_uri
        return SeednodeMetadata(
            self.checksum_public_address,  # type: str
            self.rest_server.rest_interface.host,  # type: str
            self.rest_server.rest_interface.port)  # type: int

    def sorted_nodes(self):
        nodes_to_consider = list(self.known_nodes.values()) + [self]
        return sorted(nodes_to_consider,
                      key=lambda n: n.checksum_public_address)

    def _stamp_has_valid_wallet_signature(self):
        signature_bytes = self._evidence_of_decentralized_identity
        if signature_bytes is NOT_SIGNED:
            return False
        else:
            signature = EthSignature(signature_bytes)
        proper_pubkey = signature.recover_public_key_from_msg(bytes(
            self.stamp))
        proper_address = proper_pubkey.to_checksum_address()
        return proper_address == self.checksum_public_address

    def update_snapshot(self, checksum, updated, number_of_known_nodes):
        # We update the simple snapshot here, but of course if we're dealing with an instance that is also a Learner, it has
        # its own notion of its FleetState, so we probably need a reckoning of sorts here to manage that.  In time.
        self.fleet_state_nickname, self.fleet_state_nickname_metadata = nickname_from_seed(
            checksum, number_of_pairs=1)
        self.fleet_state_checksum = checksum
        self.fleet_state_updated = updated
        self.fleet_state_icon = icon_from_checksum(
            self.fleet_state_checksum,
            nickname_metadata=self.fleet_state_nickname_metadata,
            number_of_nodes=number_of_known_nodes)

    #
    # Stamp
    #

    def _stamp_has_valid_wallet_signature(self):
        signature_bytes = self._evidence_of_decentralized_identity
        if signature_bytes is NOT_SIGNED:
            return False
        else:
            signature = EthSignature(signature_bytes)
        proper_pubkey = signature.recover_public_key_from_msg(bytes(
            self.stamp))
        proper_address = proper_pubkey.to_checksum_address()
        return proper_address == self.checksum_public_address

    def stamp_is_valid(self):
        """
        :return:
        """
        signature = self._evidence_of_decentralized_identity
        if self._stamp_has_valid_wallet_signature():
            self.verified_stamp = True
            return True
        elif self.federated_only and signature is NOT_SIGNED:
            message = "This node can't be verified in this manner, " \
                      "but is OK to use in federated mode if you" \
                      " have reason to believe it is trustworthy."
            raise self.WrongMode(message)
        else:
            raise self.InvalidNode

    def verify_id(self, ursula_id, digest_factory=bytes):
        self.verify()
        if not ursula_id == digest_factory(self.canonical_public_address):
            raise self.InvalidNode

    def validate_metadata(self, accept_federated_only=False):
        if not self.verified_interface:
            self.interface_is_valid()
        if not self.verified_stamp:
            try:
                self.stamp_is_valid()
            except self.WrongMode:
                if not accept_federated_only:
                    raise

    def verify_node(self,
                    network_middleware,
                    certificate_filepath: str = None,
                    accept_federated_only: bool = False,
                    force: bool = False) -> bool:
        """
        Three things happening here:

        * Verify that the stamp matches the address (raises InvalidNode is it's not valid, or WrongMode if it's a federated mode and being verified as a decentralized node)
        * Verify the interface signature (raises InvalidNode if not valid)
        * Connect to the node, make sure that it's up, and that the signature and address we checked are the same ones this node is using now. (raises InvalidNode if not valid; also emits a specific warning depending on which check failed).
        """
        if not force:
            if self._verified_node:
                return True

        self.validate_metadata(accept_federated_only
                               )  # This is both the stamp and interface check.

        if not certificate_filepath:

            if not self.certificate_filepath:
                raise TypeError(
                    "We haven't saved a certificate for this node yet.")
            else:
                certificate_filepath = self.certificate_filepath

        # The node's metadata is valid; let's be sure the interface is in order.
        response_data = network_middleware.node_information(
            host=self.rest_information()[0].host,
            port=self.rest_information()[0].port,
            certificate_filepath=certificate_filepath)

        version, node_bytes = self.version_splitter(response_data,
                                                    return_remainder=True)

        node_details = self.internal_splitter(node_bytes)
        # TODO check timestamp here.  589

        verifying_keys_match = node_details[
            'verifying_key'] == self.public_keys(SigningPower)
        encrypting_keys_match = node_details[
            'encrypting_key'] == self.public_keys(DecryptingPower)
        addresses_match = node_details[
            'public_address'] == self.canonical_public_address
        evidence_matches = node_details[
            'identity_evidence'] == self._evidence_of_decentralized_identity

        if not all((encrypting_keys_match, verifying_keys_match,
                    addresses_match, evidence_matches)):
            # TODO: Optional reporting.  355
            if not addresses_match:
                self.log.warn(
                    "Wallet address swapped out.  It appears that someone is trying to defraud this node."
                )
            if not verifying_keys_match:
                self.log.warn(
                    "Verifying key swapped out.  It appears that someone is impersonating this node."
                )
            raise self.InvalidNode(
                "Wrong cryptographic material for this node - something fishy going on."
            )
        else:
            self._verified_node = True

    def substantiate_stamp(self, password: str):
        blockchain_power = self._crypto_power.power_ups(BlockchainPower)
        blockchain_power.unlock_account(password=password)  # TODO: 349
        signature = blockchain_power.sign_message(bytes(self.stamp))
        self._evidence_of_decentralized_identity = signature

    #
    # Interface
    #

    def interface_is_valid(self):
        """
        Checks that the interface info is valid for this node's canonical address.
        """
        interface_info_message = self._signable_interface_info_message(
        )  # Contains canonical address.
        message = self.timestamp_bytes() + interface_info_message
        interface_is_valid = self._interface_signature.verify(
            message, self.public_keys(SigningPower))
        self.verified_interface = interface_is_valid
        if interface_is_valid:
            return True
        else:
            raise self.InvalidNode

    def _signable_interface_info_message(self):
        message = self.canonical_public_address + self.rest_information()[0]
        return message

    def _sign_and_date_interface_info(self):
        message = self._signable_interface_info_message()
        self._timestamp = maya.now()
        self._interface_signature_object = self.stamp(self.timestamp_bytes() +
                                                      message)

    @property
    def _interface_signature(self):
        if not self._interface_signature_object:
            try:
                self._sign_and_date_interface_info()
            except NoSigningPower:
                raise NoSigningPower(
                    "This Ursula is a stranger and cannot be used to verify.")
        return self._interface_signature_object

    @property
    def timestamp(self):
        if not self._timestamp:
            try:
                self._sign_and_date_interface_info()
            except NoSigningPower:
                raise NoSigningPower(
                    "This Node is a Stranger; you didn't init with a timestamp, so you can't verify."
                )
        return self._timestamp

    def timestamp_bytes(self):
        return self.timestamp.epoch.to_bytes(4, 'big')

    #
    # Nicknames
    #

    @property
    def nickname_icon(self):
        return '{} {}'.format(self.nickname_metadata[0][1],
                              self.nickname_metadata[1][1])

    def nickname_icon_html(self):
        icon_template = """
        <div class="nucypher-nickname-icon" style="border-top-color:{first_color}; border-left-color:{first_color}; border-bottom-color:{second_color}; border-right-color:{second_color};">
        <span class="small">{node_class} v{version}</span>
        <div class="symbols">
            <span class="single-symbol" style="color: {first_color}">{first_symbol}&#xFE0E;</span>
            <span class="single-symbol" style="color: {second_color}">{second_symbol}&#xFE0E;</span>
        </div>
        <br/>
        <span class="small-address">{address_first6}</span>
        </div>
        """.replace("  ", "").replace('\n', "")
        return icon_template.format(**self.nickname_icon_details)

    def nickname_icon_details(self):
        return dict(
            node_class=self.__class__.__name__,
            version=self.TEACHER_VERSION,
            first_color=self.nickname_metadata[0][0]
            ['hex'],  # TODO: These index lookups are awful.
            first_symbol=self.nickname_metadata[0][1],
            second_color=self.nickname_metadata[1][0]['hex'],
            second_symbol=self.nickname_metadata[1][1],
            address_first6=self.checksum_public_address[2:8])
Exemple #6
0
class FCMRouter(object):
    """FCM Router Implementation

    Note: FCM is a newer branch of GCM. While there's not much change
    required for the server, there is significant work required for the
    client. To that end, having a separate router allows the "older" GCM
    to persist and lets the client determine when they want to use the
    newer FCM route.
    """
    log = Logger()
    gcm = None
    dryRun = 0
    collapseKey = "simplepush"
    MAX_TTL = 2419200
    reasonTable = {
        "MissingRegistration": {
            "msg": ("'to' or 'registration_id' is blank or"
                    " invalid: {regid}"),
            "err": 500,
            "errno": 1,
        },
        "InvalidRegistration": {
            "msg": "registration_id is invalid: {regid}",
            "err": 410,
            "errno": 105,
        },
        "NotRegistered": {
            "msg": "device has unregistered with FCM: {regid}",
            "err": 410,
            "errno": 103,
        },
        "InvalidPackageName": {
            "msg": "Invalid Package Name specified",
            "err": 500,
            "errno": 2,
            "crit": True,
        },
        "MismatchSenderid": {
            "msg": "Invalid SenderID used: {senderid}",
            "err": 410,
            "errno": 105,
            "crit": True,
        },
        "MessageTooBig": {
            "msg": "Message length was too big: {nlen}",
            "err": 413,
            "errno": 104,
        },
        "InvalidDataKey": {
            "msg": ("Payload contains an invalid or restricted "
                    "key value"),
            "err": 500,
            "errno": 3,
            "crit": True,
        },
        "InvalidTtl": {
            "msg": "Invalid TimeToLive {ttl}",
            "err": 400,
            "errno": 111,
        },
        "Unavailable": {
            "msg": "Message has timed out or device is unavailable",
            "err": 200,
            "errno": 0,
        },
        "InternalServerError": {
            "msg": "FCM internal server error",
            "err": 500,
            "errno": 999,
        },
        "DeviceMessageRateExceeded": {
            "msg": "Too many messages for this device",
            "err": 503,
            "errno": 4,
        },
        "TopicsMessageRateExceeded": {
            "msg": "Too many subscribers for this topic",
            "err": 503,
            "errno": 5,
            "crit": True,
        },
        "Unreported": {
            "msg": "Error has no reported reason.",
            "err": 500,
            "errno": 999,
            "crit": True,
        }
    }

    def __init__(self, conf, router_conf, metrics):
        """Create a new FCM router and connect to FCM"""
        self.conf = conf
        self.router_conf = router_conf
        self.metrics = metrics
        self.min_ttl = router_conf.get("ttl", 60)
        self.dryRun = router_conf.get("dryrun", False)
        self.collapseKey = router_conf.get("collapseKey", "webpush")
        self.clients = {}
        try:
            for (sid, creds) in router_conf["creds"].items():
                self.clients[sid] = pyfcm.FCMNotification(
                    api_key=creds["auth"])
        except Exception as e:
            self.log.error("Could not instantiate FCM {ex}", ex=e)
            raise IOError("FCM Bridge not initiated in main")
        self._base_tags = ["platform:fcm"]
        self.log.debug("Starting FCM router...")

    def amend_endpoint_response(self, response, router_data):
        # type: (JSONDict, JSONDict) -> None
        response["senderid"] = router_data.get('app_id')

    def register(self, uaid, router_data, app_id, *args, **kwargs):
        # type: (str, JSONDict, str, *Any, **Any) -> None
        """Validate that the FCM Instance Token is in the ``router_data``"""
        # "token" is the FCM registration id token generated by the client.
        if "token" not in router_data:
            raise self._error("connect info missing FCM Instance 'token'",
                              status=401,
                              uri=kwargs.get('uri'),
                              senderid=repr(app_id))
        # senderid is the remote client's senderID value. This value is
        # very difficult for the client to change, and there was a problem
        # where some clients had an older, invalid senderID. We need to
        # be able to match senderID to it's corresponding auth key.
        # If the client has an unexpected or invalid SenderID,
        # it is impossible for us to reach them.
        if app_id not in self.clients:
            raise self._error("Invalid SenderID", status=410, errno=105)
        router_data["app_id"] = app_id

    def route_notification(self, notification, uaid_data):
        """Start the FCM notification routing, returns a deferred"""
        router_data = uaid_data["router_data"]
        # Kick the entire notification routing off to a thread
        return deferToThread(self._route, notification, router_data)

    def _route(self, notification, router_data):
        """Blocking FCM call to route the notification"""
        # THIS MUST MATCH THE CHANNELID GENERATED BY THE REGISTRATION SERVICE
        # Currently this value is in hex form.
        data = {"chid": notification.channel_id.hex}
        if not router_data.get("token"):
            raise self._error(
                "No registration token found. "
                "Rejecting message.",
                410,
                errno=106,
                log_exception=False)
        regid = router_data.get("token")
        # Payload data is optional. The endpoint handler validates that the
        # correct encryption headers are included with the data.
        if notification.data:
            mdata = self.router_conf.get('max_data', 4096)
            if notification.data_length > mdata:
                raise self._error("This message is intended for a " +
                                  "constrained device and is limited " +
                                  "to 3070 bytes. Converted buffer too " +
                                  "long by %d bytes" %
                                  (notification.data_length - mdata),
                                  413,
                                  errno=104,
                                  log_exception=False)

            data['body'] = notification.data
            data['con'] = notification.headers['encoding']

            if 'encryption' in notification.headers:
                data['enc'] = notification.headers['encryption']
            if 'crypto_key' in notification.headers:
                data['cryptokey'] = notification.headers['crypto_key']
            elif 'encryption_key' in notification.headers:
                data['enckey'] = notification.headers['encryption_key']
        try:
            client = self.clients[router_data["app_id"]]
        except KeyError:
            self.log.critical("Missing FCM bridge credentials for {id}",
                              id=router_data['app_id'])
            raise RouterException("Server error", status_code=500, error=901)

        # registration_ids are the FCM instance tokens (specified during
        # registration.
        router_ttl = min(self.MAX_TTL, max(self.min_ttl, notification.ttl
                                           or 0))
        try:
            result = client.notify_single_device(
                collapse_key=self.collapseKey,
                data_message=data,
                dry_run=self.dryRun or ('dryrun' in router_data),
                registration_id=regid,
                time_to_live=router_ttl,
            )
        except pyfcm.errors.AuthenticationError as e:
            self.log.error("Authentication Error: %s" % e)
            raise RouterException("Server error", status_code=500)
        except ConnectionError as e:
            self.metrics.increment("notification.bridge.error",
                                   tags=make_tags(
                                       self._base_tags,
                                       error=502,
                                       errno=0,
                                       reason="connection_unavailable"))
            self.log.warn("Could not connect to FCM server: %s" % e)
            raise RouterException("Server error",
                                  status_code=502,
                                  log_exception=False)
        except Exception as e:
            self.log.error("Unhandled FCM Error: %s" % e)
            raise RouterException("Server error", status_code=500)
        return self._process_reply(result,
                                   notification,
                                   router_data,
                                   ttl=router_ttl)

    def _error(self, err, status, **kwargs):
        """Error handler that raises the RouterException"""
        self.log.debug(err, **kwargs)
        return RouterException(err,
                               status_code=status,
                               response_body=err,
                               **kwargs)

    def _process_reply(self, reply, notification, router_data, ttl):
        """Process FCM send reply"""
        # acks:
        #  for reg_id, msg_id in reply.success.items():
        # updates
        result = reply.get('results', [{}])[0]
        if reply.get('canonical_ids'):
            old_id = router_data['token']
            new_id = result.get('registration_id')
            self.log.debug("FCM id changed : {old} => {new}",
                           old=old_id,
                           new=new_id)
            self.metrics.increment("notification.bridge.error",
                                   tags=make_tags(self._base_tags,
                                                  error=502,
                                                  errno=0,
                                                  reason="reregister"))
            return RouterResponse(status_code=503,
                                  response_body="Please try request again.",
                                  router_data=dict(token=new_id))
        if reply.get('failure'):
            self.metrics.increment("notification.bridge.error",
                                   tags=make_tags(self._base_tags,
                                                  error=502,
                                                  errno=0,
                                                  reason="failure"))
            reason = result.get('error', "Unreported")
            err = self.reasonTable.get(reason)
            if err.get("crit", False):
                self.log.critical(
                    err['msg'],
                    nlen=notification.data_length,
                    regid=router_data["token"],
                    senderid=router_data.get('token'),
                    ttl=notification.ttl,
                )
                raise RouterException("FCM failure to deliver",
                                      status_code=err['err'],
                                      response_body="Please try request "
                                      "later.",
                                      log_exception=False)
            self.log.debug("{msg} : {info}",
                           msg=err['msg'],
                           info={
                               "app_id": router_data["app_id"],
                               "reason": reason
                           })
            return RouterResponse(
                status_code=err['err'],
                errno=err['errno'],
                response_body=err['msg'],
                router_data={},
            )
        self.metrics.increment("notification.bridge.sent",
                               tags=self._base_tags)
        self.metrics.increment("notification.message_data",
                               notification.data_length,
                               tags=make_tags(self._base_tags,
                                              destination="Direct"))
        location = "%s/m/%s" % (self.conf.endpoint_url, notification.version)
        return RouterResponse(status_code=201,
                              response_body="",
                              headers={
                                  "TTL": ttl,
                                  "Location": location
                              },
                              logged_status=200)
Exemple #7
0
class ClientService(service.Service):
    """
    A L{ClientService} maintains a single outgoing connection to a client
    endpoint, reconnecting after a configurable timeout when a connection
    fails, either before or after connecting.

    @since: 16.1.0
    """

    _log = Logger()

    def __init__(
        self, endpoint, factory, retryPolicy=None, clock=None, prepareConnection=None
    ):
        """
        @param endpoint: A L{stream client endpoint
            <interfaces.IStreamClientEndpoint>} provider which will be used to
            connect when the service starts.

        @param factory: A L{protocol factory <interfaces.IProtocolFactory>}
            which will be used to create clients for the endpoint.

        @param retryPolicy: A policy configuring how long L{ClientService} will
            wait between attempts to connect to C{endpoint}.
        @type retryPolicy: callable taking (the number of failed connection
            attempts made in a row (L{int})) and returning the number of
            seconds to wait before making another attempt.

        @param clock: The clock used to schedule reconnection.  It's mainly
            useful to be parametrized in tests.  If the factory is serialized,
            this attribute will not be serialized, and the default value (the
            reactor) will be restored when deserialized.
        @type clock: L{IReactorTime}

        @param prepareConnection: A single argument L{callable} that may return
            a L{Deferred}. It will be called once with the L{protocol
            <interfaces.IProtocol>} each time a new connection is made.  It may
            call methods on the protocol to prepare it for use (e.g.
            authenticate) or validate it (check its health).

            The C{prepareConnection} callable may raise an exception or return
            a L{Deferred} which fails to reject the connection.  A rejected
            connection is not used to fire an L{Deferred} returned by
            L{whenConnected}.  Instead, L{ClientService} handles the failure
            and continues as if the connection attempt were a failure
            (incrementing the counter passed to C{retryPolicy}).

            L{Deferred}s returned by L{whenConnected} will not fire until
            any L{Deferred} returned by the C{prepareConnection} callable
            fire. Otherwise its successful return value is consumed, but
            ignored.

            Present Since Twisted 18.7.0

        @type prepareConnection: L{callable}

        """
        clock = _maybeGlobalReactor(clock)
        retryPolicy = _defaultPolicy if retryPolicy is None else retryPolicy

        self._machine = _ClientMachine(
            endpoint,
            factory,
            retryPolicy,
            clock,
            prepareConnection=prepareConnection,
            log=self._log,
        )

    def whenConnected(self, failAfterFailures=None):
        """
        Retrieve the currently-connected L{Protocol}, or the next one to
        connect.

        @param failAfterFailures: number of connection failures after which
            the Deferred will deliver a Failure (None means the Deferred will
            only fail if/when the service is stopped).  Set this to 1 to make
            the very first connection failure signal an error.  Use 2 to
            allow one failure but signal an error if the subsequent retry
            then fails.
        @type failAfterFailures: L{int} or None

        @return: a Deferred that fires with a protocol produced by the
            factory passed to C{__init__}
        @rtype: L{Deferred} that may:

            - fire with L{IProtocol}

            - fail with L{CancelledError} when the service is stopped

            - fail with e.g.
              L{DNSLookupError<twisted.internet.error.DNSLookupError>} or
              L{ConnectionRefusedError<twisted.internet.error.ConnectionRefusedError>}
              when the number of consecutive failed connection attempts
              equals the value of "failAfterFailures"
        """
        return self._machine.whenConnected(failAfterFailures)

    def startService(self):
        """
        Start this L{ClientService}, initiating the connection retry loop.
        """
        if self.running:
            self._log.warn("Duplicate ClientService.startService {log_source}")
            return
        super().startService()
        self._machine.start()

    def stopService(self):
        """
        Stop attempting to reconnect and close any existing connections.

        @return: a L{Deferred} that fires when all outstanding connections are
            closed and all in-progress connection attempts halted.
        """
        super().stopService()
        return self._machine.stop()
Exemple #8
0
    def __init__(self, json_ipc, verbose, quiet, no_logs, console_logs,
                 file_logs, sentry_logs, log_level, debug):

        self.log = Logger(self.__class__.__name__)

        # Session Emitter for pre and post character control engagement.
        if verbose and quiet:
            raise click.BadOptionUsage(
                option_name="quiet",
                message="--verbose and --quiet are mutually exclusive "
                "and cannot be used at the same time.")

        if verbose:
            GroupGeneralConfig.verbosity = 2
        elif quiet:
            GroupGeneralConfig.verbosity = 0
        else:
            GroupGeneralConfig.verbosity = 1

        if json_ipc:
            emitter = JSONRPCStdoutEmitter(
                verbosity=GroupGeneralConfig.verbosity)
        else:
            emitter = StdoutEmitter(verbosity=GroupGeneralConfig.verbosity)

        self.emitter = emitter

        if verbose:
            self.emitter.message("Verbose mode is enabled", color='blue')

        # Logging

        if debug and no_logs:
            raise click.BadOptionUsage(
                option_name="no-logs",
                message="--debug and --no-logs cannot be used at the same time."
            )

        # Defaults
        if file_logs is None:
            file_logs = self.log_to_file
        if sentry_logs is None:
            sentry_logs = self.log_to_sentry

        if debug:
            console_logs = True
            file_logs = True
            sentry_logs = False
            log_level = 'debug'

        if no_logs:
            console_logs = False
            file_logs = False
            sentry_logs = False
        if json_ipc:
            console_logs = False

        GlobalLoggerSettings.set_log_level(log_level_name=log_level)

        if console_logs:
            GlobalLoggerSettings.start_console_logging()
        if file_logs:
            GlobalLoggerSettings.start_text_file_logging()
            GlobalLoggerSettings.start_json_file_logging()
        if sentry_logs:
            GlobalLoggerSettings.start_sentry_logging(self.sentry_endpoint)
        if json_ipc:
            GlobalLoggerSettings.stop_console_logging()  # JSON-RPC Protection

        self.debug = debug
        self.json_ipc = json_ipc
Exemple #9
0
def main():
    """
    Run the server.
    """

    parser = argparse.ArgumentParser(
        description='Resolve DNS queries from Database')
    parser.add_argument('-c', '--config',
        dest='config_file',
        type=str, action='store',
        default='./config.yml',
        help='Path to the configuration file'
    )
    parser.add_argument('--port', '-p',
        dest='port',
        type=int, action='store',
        default=10053,
        help='Port number for the service'
    )
    parser.add_argument('--dry-run', '-d',
        dest='dry_run',
        action='store_true',
        help='Dry run, just check the config file'
    )
    #parser.add_argument('--verbose', '-v',
    #    dest='verbose',
    #    action='store_true',
    #    help='Be verbose'
    #)
    params = parser.parse_args()

    # Log to stdout, as this is intended to run in docker
    log.startLogging(sys.stdout)
    # Make new logging style compatible to traditional one
    def observer(event, log=log):
        log.msg(event['log_format'].format(**event))
        if 'log_failure' in event:
            log.err(event['log_failure'])
    logger = Logger(namespace='default', observer=observer)

    # Read config file
    config = Config(params.config_file, logger)
    logger.debug("Running with the following parameters:\n{data}", data=config)

    # Dry run
    if params.dry_run:
        sys.exit(0)
    
    # Build a connection lasting the lifetime of the service
    connection = adbapi.ConnectionPool(
        config.db_driver,
        host=config.db_host,
        port=config.db_port,
        user=config.db_user,
        passwd=config.db_passwd,
        db=config.db_name,
        cp_reconnect=True
    )

    # Build a global Resolver lasting the lifetime of the service
    resolver = client.createResolver()
    customResolver = DynamicResolver(config, connection, resolver, logger)

    # Factory and protocol services
    factory  = server.DNSServerFactory(
        caches=[
            cache.CacheResolver(),
        ],
        # Use "clients" instead of "authorities", so caching works
        clients=[
            hosts.Resolver(file=config.dns_hosts, ttl=config.dns_ttl),
            customResolver,
        ]
    )
    protocol = dns.DNSDatagramProtocol(controller=factory)

    # Start polling loop, to avoid timeouts
    poller = LoopingCall(customResolver.poll)
    poller.start(config.poll_time)

    # Listen TCP and UDP
    reactor.listenUDP(params.port, protocol)
    reactor.listenTCP(params.port, factory)
    reactor.run()
Exemple #10
0
class HTTPAuthSessionWrapper:
    """
    Wrap a portal, enforcing supported header-based authentication schemes.

    @ivar _portal: The L{Portal} which will be used to retrieve L{IResource}
        avatars.

    @ivar _credentialFactories: A list of L{ICredentialFactory} providers which
        will be used to decode I{Authorization} headers into L{ICredentials}
        providers.
    """
    isLeaf = False
    _log = Logger()

    def __init__(self, portal, credentialFactories):
        """
        Initialize a session wrapper

        @type portal: C{Portal}
        @param portal: The portal that will authenticate the remote client

        @type credentialFactories: C{Iterable}
        @param credentialFactories: The portal that will authenticate the
            remote client based on one submitted C{ICredentialFactory}
        """
        self._portal = portal
        self._credentialFactories = credentialFactories

    def _authorizedResource(self, request):
        """
        Get the L{IResource} which the given request is authorized to receive.
        If the proper authorization headers are present, the resource will be
        requested from the portal.  If not, an anonymous login attempt will be
        made.
        """
        authheader = request.getHeader(b'authorization')
        if not authheader:
            return util.DeferredResource(self._login(Anonymous()))

        factory, respString = self._selectParseHeader(authheader)
        if factory is None:
            return UnauthorizedResource(self._credentialFactories)
        try:
            credentials = factory.decode(respString, request)
        except error.LoginFailed:
            return UnauthorizedResource(self._credentialFactories)
        except:
            self._log.failure("Unexpected failure from credentials factory")
            return ErrorPage(500, None, None)
        else:
            return util.DeferredResource(self._login(credentials))

    def render(self, request):
        """
        Find the L{IResource} avatar suitable for the given request, if
        possible, and render it.  Otherwise, perhaps render an error page
        requiring authorization or describing an internal server failure.
        """
        return self._authorizedResource(request).render(request)

    def getChildWithDefault(self, path, request):
        """
        Inspect the Authorization HTTP header, and return a deferred which,
        when fired after successful authentication, will return an authorized
        C{Avatar}. On authentication failure, an C{UnauthorizedResource} will
        be returned, essentially halting further dispatch on the wrapped
        resource and all children
        """
        # Don't consume any segments of the request - this class should be
        # transparent!
        request.postpath.insert(0, request.prepath.pop())
        return self._authorizedResource(request)

    def _login(self, credentials):
        """
        Get the L{IResource} avatar for the given credentials.

        @return: A L{Deferred} which will be called back with an L{IResource}
            avatar or which will errback if authentication fails.
        """
        d = self._portal.login(credentials, None, IResource)
        d.addCallbacks(self._loginSucceeded, self._loginFailed)
        return d

    def _loginSucceeded(self, args):
        """
        Handle login success by wrapping the resulting L{IResource} avatar
        so that the C{logout} callback will be invoked when rendering is
        complete.
        """
        interface, avatar, logout = args

        class ResourceWrapper(proxyForInterface(IResource, 'resource')):
            """
            Wrap an L{IResource} so that whenever it or a child of it
            completes rendering, the cred logout hook will be invoked.

            An assumption is made here that exactly one L{IResource} from
            among C{avatar} and all of its children will be rendered.  If
            more than one is rendered, C{logout} will be invoked multiple
            times and probably earlier than desired.
            """
            def getChildWithDefault(self, name, request):
                """
                Pass through the lookup to the wrapped resource, wrapping
                the result in L{ResourceWrapper} to ensure C{logout} is
                called when rendering of the child is complete.
                """
                return ResourceWrapper(
                    self.resource.getChildWithDefault(name, request))

            def render(self, request):
                """
                Hook into response generation so that when rendering has
                finished completely (with or without error), C{logout} is
                called.
                """
                request.notifyFinish().addBoth(lambda ign: logout())
                return super(ResourceWrapper, self).render(request)

        return ResourceWrapper(avatar)

    def _loginFailed(self, result):
        """
        Handle login failure by presenting either another challenge (for
        expected authentication/authorization-related failures) or a server
        error page (for anything else).
        """
        if result.check(error.Unauthorized, error.LoginFailed):
            return UnauthorizedResource(self._credentialFactories)
        else:
            self._log.failure(
                "HTTPAuthSessionWrapper.getChildWithDefault encountered "
                "unexpected error",
                failure=result,
            )
            return ErrorPage(500, None, None)

    def _selectParseHeader(self, header):
        """
        Choose an C{ICredentialFactory} from C{_credentialFactories}
        suitable to use to decode the given I{Authenticate} header.

        @return: A two-tuple of a factory and the remaining portion of the
            header value to be decoded or a two-tuple of L{None} if no
            factory can decode the header value.
        """
        elements = header.split(b' ')
        scheme = elements[0].lower()
        for fact in self._credentialFactories:
            if fact.scheme == scheme:
                return (fact, b' '.join(elements[1:]))
        return (None, None)

    def putChild(self, path, child):
        # IResource.putChild
        raise NotImplementedError()
Exemple #11
0
 def __init__(self):
     self.log = Logger(self.__class__.__name__)
class AppSession(ApplicationSession):

    log = Logger()

    @inlineCallbacks
    def onJoin(self, details):

        middleware = CachingMiddleware(JSONStorage)
        db = TinyDB(os.path.join(os.path.dirname(__file__), 'data',
                                 'user_data.json'),
                    storage=middleware)
        auth_config = {'salt': 'fruitfly', 'iterations': 5000, 'keylen': 32}
        session_user_map = {}
        chars = string.ascii_letters + string.digits + "@!^_()%[]{}"
        pwlen = 12

        self.log.info('auth started')

        def auth(realm, authid, details):
            q = Query()
            results = db.search(q.username == authid)
            if not results:
                raise ApplicationError(
                    "User does not exist",
                    "could not authenticate session - no such user {}".format(
                        authid))
            auth_details = results[0]['auth_details']
            auth_details['authid'] = results[0]['user_details']['fname']
            if results[0]['user_details']['lname']:
                auth_details['authid'] += (" " +
                                           results[0]['user_details']['lname'])
            if not auth_details['authid']: del auth_details['authid']
            session_user_map[details['session']] = {
                'username': authid,
                'details': details
            }
            return auth_details

        yield self.register(auth, six.u('ffbo.auth_server.auth'))
        self.log.info("registered ffbo.auth_server.auth")

        def get_user(session):
            if session in session_user_map:
                return session_user_map[session]
            return {}

        yield self.register(get_user, six.u('ffbo.auth_server.get_user'))
        self.log.info("registered ffbo.auth_server.get_user")

        def generate_password():
            rnd = random.SystemRandom()
            return "".join([rnd.choice(chars) for _ in range(pwlen)])

        def register_user(user_details):
            if user_exists(user_details['username']):
                return {"error": "User already exists. Please try again"}
            try:
                username = user_details['username']
                pw = generate_password()
                salted_pw = ath.derive_key(pw, auth_config['salt'],
                                           auth_config['iterations'],
                                           auth_config['keylen'])
                db_rec = {
                    'username': username,
                    'user_details': user_details,
                    'auth_details': copy.deepcopy(auth_config)
                }
                db_rec['auth_details']['secret'] = salted_pw
                db_rec['auth_details']['role'] = u'user'
                del db_rec['user_details']['username']
                #print "Registered user ",  db_rec
                db.insert(db_rec)
            except Exception as e:
                print e
                return {"error": "Unexpected error occured. Please try again"}
            print "User added to database"
            send_email(user_details, pw, username)
            return {
                "success":
                "Successfuly registered. Please check your email for your password."
            }

        yield self.register(register_user,
                            six.u('ffbo.auth_server.register_user'))
        self.log.info("registered ffbo.auth_server.register_user")

        def send_email(user_details, pw, username):
            title = "Thank you for registering at NeuroNLP"
            text = "Hi {fname},\n\n"
            text += "Here are your login details for NeuroNLP.\n\n"
            text += "Username: {username}\nPassword: {pw}\n\n"
            text += "If you have any suggestions or feedback, we would love to hear it!"
            text += " Please use the feedback button on the top left button of the website to write to us.\n\n"
            text += "Thank you,\nFruit Fly Brain Observatory"
            text = text.format(username=username,
                               fname=user_details['fname'],
                               pw=pw)

            msg = MIMEText(text)

            msg['Subject'] = title
            msg['From'] = 'NeuroNLP(Fruit Fly Brain Observatory) <*****@*****.**>'
            msg['To'] = user_details['email']
            sender = msg['From']

            try:
                s = smtplib.SMTP(host='localhost', port=465)
            except Exception as e:
                print e
                print "Failed to start SMTP server on localhost"
            try:
                # Use a valid smtp server, otherwise the email notification won't be sent out
                s.sendmail('*****@*****.**',
                           [user_details['email']], msg.as_string())
                middleware.flush()
                print "Email sent to " + user_details[
                    'email'] + " for " + user_details[
                        'fname'] + ' ' + user_details['lname']
            except Exception as e:
                print e
                print "Failed to send out email"

        def user_exists(username):
            q = Query()
            results = db.search(q.username == username)
            if not results:
                return False
            return True

        yield self.register(user_exists, six.u('ffbo.auth_server.user_exists'))
        self.log.info("registered ffbo.auth_server.user_exists")

        def get_auth_config():
            return auth_config

        yield self.register(get_auth_config,
                            six.u('ffbo.auth_server.get_auth_config'))
        self.log.info("registered ffbo.auth_server.get_auth_config")

        def change_password():
            pass

        def send_new_password():
            pass
Exemple #13
0
class DataStore(DatabaseStore):
    """
    Incident Management System MySQL data store.
    """

    _log = Logger()

    schemaVersion = 2
    schemaBasePath = Path(__file__).parent / "schema"
    sqlFileExtension = "mysql"

    query = queries


    @attrs(frozen=False)
    class _State(object):
        """
        Internal mutable state for :class:`DataStore`.
        """

        db: Optional[ConnectionPool] = attrib(
            validator=optional(instance_of(ConnectionPool)),
            default=None, init=False,
        )


    hostName: str = attrib(validator=instance_of(str))
    hostPort: int = attrib(validator=instance_of(int))
    database: str = attrib(validator=instance_of(str))
    username: str = attrib(validator=instance_of(str))
    password: str = attrib(validator=instance_of(str))

    _state: _State = attrib(default=Factory(_State), init=False)


    @property
    def _db(self) -> ConnectionPool:
        if self._state.db is None:
            db = ConnectionPool(
                "pymysql",
                host=self.hostName,
                port=self.hostPort,
                database=self.database,
                user=self.username,
                password=self.password,
                cursorclass=Cursor,
                cp_reconnect=True,
            )

            # self._upgradeSchema(db)

            self._state.db = db

        return self._state.db


    async def disconnect(self) -> None:
        """
        See :meth:`DatabaseStore.disconnect`.
        """
        if self._state.db is not None:
            self._state.db.close()
            self._state.db = None


    async def runQuery(
        self, query: Query, parameters: Optional[Parameters] = None
    ) -> Rows:
        if parameters is None:
            parameters = {}

        try:
            return iter(await self._db.runQuery(query.text, parameters))

        except MySQLError as e:
            self._log.critical(
                "Unable to {description}: {error}",
                description=query.description,
                query=query, **parameters, error=e,
            )
            raise StorageError(e)


    async def runOperation(
        self, query: Query, parameters: Optional[Parameters] = None
    ) -> None:
        if parameters is None:
            parameters = {}

        try:
            await self._db.runOperation(query.text, parameters)

        except MySQLError as e:
            self._log.critical(
                "Unable to {description}: {error}",
                description=query.description, error=e,
            )
            raise StorageError(e)


    async def runInteraction(
        self, interaction: Callable, *args: Any, **kwargs: Any
    ) -> Any:
        try:
            return await self._db.runInteraction(interaction, *args, **kwargs)
        except MySQLError as e:
            self._log.critical(
                "Interaction {interaction} failed: {error}",
                interaction=interaction, error=e,
            )
            raise StorageError(e)


    async def dbSchemaVersion(self) -> int:
        """
        See `meth:DatabaseStore.dbSchemaVersion`.
        """
        try:
            for row in await self._db.runQuery(self.query.schemaVersion.text):
                return cast(int, row["VERSION"])
            else:
                raise StorageError("Invalid schema: no version")

        except MySQLError as e:
            message = e.args[1]
            if (
                message.startswith("Table '") and
                message.endswith(".SCHEMA_INFO' doesn't exist")
            ):
                return 0

            self._log.critical(
                "Unable to {description}: {error}",
                description=self.query.schemaVersion.description, error=e,
            )
            raise StorageError(e)


    async def applySchema(self, sql: str) -> None:
        """
        See :meth:`IMSDataStore.applySchema`.
        """
        def applySchema(txn: Transaction) -> None:
            txn.executescript(sql)

        try:
            await self.runInteraction(applySchema)
        except StorageError as e:
            self._log.critical(
                "Unable to apply schema: {error}", sql=sql, error=e
            )
            raise StorageError(f"Unable to apply schema: {e}")
Exemple #14
0
def setup_structured_logging(
    hs,
    config,
    log_config: dict,
    logBeginner: LogBeginner,
    redirect_stdlib_logging: bool = True,
) -> LogPublisher:
    """
    Set up Twisted's structured logging system.

    Args:
        hs: The homeserver to use.
        config (HomeserverConfig): The configuration of the Synapse homeserver.
        log_config (dict): The log configuration to use.
    """
    if config.no_redirect_stdio:
        raise ConfigError(
            "no_redirect_stdio cannot be defined using structured logging.")

    logger = Logger()

    if "drains" not in log_config:
        raise ConfigError(
            "The logging configuration requires a list of drains.")

    observers = []  # type: List[ILogObserver]

    for observer in parse_drain_configs(log_config["drains"]):
        # Pipe drains
        if observer.type == DrainType.CONSOLE:
            logger.debug("Starting up the {name} console logger drain",
                         name=observer.name)
            observers.append(SynapseFileLogObserver(observer.location))
        elif observer.type == DrainType.CONSOLE_JSON:
            logger.debug("Starting up the {name} JSON console logger drain",
                         name=observer.name)
            observers.append(jsonFileLogObserver(observer.location))
        elif observer.type == DrainType.CONSOLE_JSON_TERSE:
            logger.debug(
                "Starting up the {name} terse JSON console logger drain",
                name=observer.name,
            )
            observers.append(
                TerseJSONToConsoleLogObserver(observer.location, metadata={}))

        # File drains
        elif observer.type == DrainType.FILE:
            logger.debug("Starting up the {name} file logger drain",
                         name=observer.name)
            log_file = open(observer.location,
                            "at",
                            buffering=1,
                            encoding="utf8")
            observers.append(SynapseFileLogObserver(log_file))
        elif observer.type == DrainType.FILE_JSON:
            logger.debug("Starting up the {name} JSON file logger drain",
                         name=observer.name)
            log_file = open(observer.location,
                            "at",
                            buffering=1,
                            encoding="utf8")
            observers.append(jsonFileLogObserver(log_file))

        elif observer.type == DrainType.NETWORK_JSON_TERSE:
            metadata = {"server_name": hs.config.server_name}
            log_observer = TerseJSONToTCPLogObserver(
                hs=hs,
                host=observer.location[0],
                port=observer.location[1],
                metadata=metadata,
                maximum_buffer=observer.options.maximum_buffer,
            )
            log_observer.start()
            observers.append(log_observer)
        else:
            # We should never get here, but, just in case, throw an error.
            raise ConfigError("%s drain type cannot be configured" %
                              (observer.type, ))

    publisher = LogPublisher(*observers)
    log_filter = LogLevelFilterPredicate()

    for namespace, namespace_config in log_config.get("loggers",
                                                      DEFAULT_LOGGERS).items():
        # Set the log level for twisted.logger.Logger namespaces
        log_filter.setLogLevelForNamespace(
            namespace,
            stdlib_log_level_to_twisted(namespace_config.get("level", "INFO")),
        )

        # Also set the log levels for the stdlib logger namespaces, to prevent
        # them getting to PythonStdlibToTwistedLogger and having to be formatted
        if "level" in namespace_config:
            logging.getLogger(namespace).setLevel(
                namespace_config.get("level"))

    f = FilteringLogObserver(publisher, [log_filter])
    lco = LogContextObserver(f)

    if redirect_stdlib_logging:
        stuff_into_twisted = PythonStdlibToTwistedLogger(lco)
        stdliblogger = logging.getLogger()
        stdliblogger.addHandler(stuff_into_twisted)

    # Always redirect standard I/O, otherwise other logging outputs might miss
    # it.
    logBeginner.beginLoggingTo([lco], redirectStandardIO=True)

    return publisher
Exemple #15
0
class GCMRouter(object):
    """GCM Router Implementation"""
    log = Logger()
    dryRun = 0
    collapseKey = "simplepush"
    MAX_TTL = 2419200

    def __init__(self, conf, router_conf, metrics):
        """Create a new GCM router and connect to GCM"""
        self.conf = conf
        self.router_conf = router_conf
        self.metrics = metrics
        self.min_ttl = router_conf.get("ttl", 60)
        self.dryRun = router_conf.get("dryrun", False)
        self.collapseKey = router_conf.get("collapseKey", "simplepush")
        self.gcm = {}
        self.senderIDs = {}
        # Flatten the SenderID list from human readable and init gcmclient
        if not router_conf.get("senderIDs"):
            raise IOError("SenderIDs not configured.")
        for sid in router_conf.get("senderIDs"):
            auth = router_conf.get("senderIDs").get(sid).get("auth")
            self.senderIDs[sid] = auth
            self.gcm[sid] = gcmclient.GCM(auth)
        self._base_tags = ["platform:gcm"]
        self.log.debug("Starting GCM router...")

    def amend_endpoint_response(self, response, router_data):
        # type: (JSONDict, JSONDict) -> None
        response["senderid"] = router_data.get('creds', {}).get('senderID')

    def register(self, uaid, router_data, app_id, *args, **kwargs):
        # type: (str, JSONDict, str, *Any, **Any) -> None
        """Validate that the GCM Instance Token is in the ``router_data``"""
        # "token" is the GCM registration id token generated by the client.
        if "token" not in router_data:
            raise self._error("connect info missing GCM Instance 'token'",
                              status=401)
        # senderid is the remote client's senderID value. This value is
        # very difficult for the client to change, and there was a problem
        # where some clients had an older, invalid senderID. We need to
        # be able to match senderID to it's corresponding auth key.
        # If the client has an unexpected or invalid SenderID,
        # it is impossible for us to reach them.
        senderid = app_id
        if senderid not in self.senderIDs:
            raise self._error("Invalid SenderID",
                              status=410,
                              errno=105,
                              uri=kwargs.get('uri'),
                              senderid=senderid)
        # Assign a senderid
        router_data["creds"] = {
            "senderID": senderid,
            "auth": self.senderIDs[senderid]
        }

    def route_notification(self, notification, uaid_data):
        """Start the GCM notification routing, returns a deferred"""
        # Kick the entire notification routing off to a thread
        return deferToThread(self._route, notification, uaid_data)

    def _route(self, notification, uaid_data):
        """Blocking GCM call to route the notification"""
        router_data = uaid_data["router_data"]
        # THIS MUST MATCH THE CHANNELID GENERATED BY THE REGISTRATION SERVICE
        # Currently this value is in hex form.
        data = {"chid": notification.channel_id.hex}
        # Payload data is optional. The endpoint handler validates that the
        # correct encryption headers are included with the data.
        if notification.data:
            mdata = self.router_conf.get('max_data', 4096)
            if notification.data_length > mdata:
                raise self._error("This message is intended for a " +
                                  "constrained device and is limited " +
                                  "to 3070 bytes. Converted buffer too " +
                                  "long by %d bytes" %
                                  (notification.data_length - mdata),
                                  413,
                                  errno=104,
                                  log_exception=False)

            data['body'] = notification.data
            data['con'] = notification.headers['encoding']

            if 'encryption' in notification.headers:
                data['enc'] = notification.headers.get('encryption')
            if 'crypto_key' in notification.headers:
                data['cryptokey'] = notification.headers['crypto_key']
            elif 'encryption_key' in notification.headers:
                data['enckey'] = notification.headers['encryption_key']

        # registration_ids are the GCM instance tokens (specified during
        # registration.
        router_ttl = min(self.MAX_TTL, max(notification.ttl or 0,
                                           self.min_ttl))
        payload = gcmclient.JSONMessage(
            registration_ids=[router_data.get("token")],
            collapse_key=self.collapseKey,
            time_to_live=router_ttl,
            dry_run=self.dryRun or ("dryrun" in router_data),
            data=data,
        )
        try:
            gcm = self.gcm[router_data['creds']['senderID']]
            result = gcm.send(payload)
        except RouterException:
            raise  # pragma nocover
        except KeyError:
            self.log.critical("Missing GCM bridge credentials")
            raise RouterException("Server error", status_code=500, errno=900)
        except gcmclient.GCMAuthenticationError as e:
            self.log.error("GCM Authentication Error: %s" % e)
            raise RouterException("Server error", status_code=500, errno=901)
        except ConnectionError as e:
            self.log.warn("GCM Unavailable: %s" % e)
            self.metrics.increment("notification.bridge.error",
                                   tags=make_tags(
                                       self._base_tags,
                                       reason="connection_unavailable"))
            raise RouterException("Server error",
                                  status_code=502,
                                  errno=902,
                                  log_exception=False)
        except Exception as e:
            self.log.error("Unhandled exception in GCM Routing: %s" % e)
            raise RouterException("Server error", status_code=500)
        return self._process_reply(result,
                                   uaid_data,
                                   ttl=router_ttl,
                                   notification=notification)

    def _error(self, err, status, **kwargs):
        """Error handler that raises the RouterException"""
        self.log.debug(err, **kwargs)
        return RouterException(err,
                               status_code=status,
                               response_body=err,
                               **kwargs)

    def _process_reply(self, reply, uaid_data, ttl, notification):
        """Process GCM send reply"""
        # acks:
        #  for reg_id, msg_id in reply.success.items():
        # updates
        for old_id, new_id in reply.canonicals.items():
            self.log.debug("GCM id changed : {old} => {new}",
                           old=old_id,
                           new=new_id)
            self.metrics.increment("notification.bridge.error",
                                   tags=make_tags(self._base_tags,
                                                  reason="reregister"))
            return RouterResponse(status_code=503,
                                  response_body="Please try request again.",
                                  router_data=dict(token=new_id))
        # naks:
        # uninstall:
        for reg_id in reply.not_registered:
            self.metrics.increment("notification.bridge.error",
                                   tags=make_tags(self._base_tags,
                                                  reason="unregistered"))
            self.log.debug("GCM no longer registered: %s" % reg_id)
            return RouterResponse(
                status_code=410,
                response_body="Endpoint requires client update",
                router_data={},
            )

        #  for reg_id, err_code in reply.failed.items():
        if len(reply.failed.items()) > 0:
            self.metrics.increment("notification.bridge.error",
                                   tags=make_tags(self._base_tags,
                                                  reason="failure"))
            self.log.debug("GCM failures: {failed()}",
                           failed=lambda: repr(reply.failed.items()))
            raise RouterException(
                "GCM unable to deliver",
                status_code=410,
                response_body="GCM recipient not available.",
                log_exception=False,
            )

        # retries:
        if reply.retry_after:
            self.metrics.increment("notification.bridge.error",
                                   tags=make_tags(self._base_tags,
                                                  reason="retry"))
            self.log.warn("GCM retry requested: {failed()}",
                          failed=lambda: repr(reply.failed.items()))
            raise RouterException("GCM failure to deliver, retry",
                                  status_code=503,
                                  headers={"Retry-After": reply.retry_after},
                                  response_body="Please try request "
                                  "in {} seconds.".format(reply.retry_after),
                                  log_exception=False)

        self.metrics.increment("notification.bridge.sent",
                               tags=self._base_tags)
        self.metrics.increment("notification.message_data",
                               notification.data_length,
                               tags=make_tags(self._base_tags,
                                              destination='Direct'))
        location = "%s/m/%s" % (self.conf.endpoint_url, notification.version)
        return RouterResponse(status_code=201,
                              response_body="",
                              headers={
                                  "TTL": ttl,
                                  "Location": location
                              },
                              logged_status=200)
Exemple #16
0
"""

NO_BLOCKCHAIN_CONNECTION.bool_value(False)

CHARACTER_DESTRUCTION = '''
Delete all {name} character files including:
    - Private and Public Keys ({keystore})
    - Known Nodes             ({nodestore})
    - Node Configuration File ({config})
    - Database                ({database})

Are you sure?'''

SUCCESSFUL_DESTRUCTION = "Successfully destroyed NuCypher configuration"

LOG = Logger('cli.actions')


class UnknownIPAddress(RuntimeError):
    pass


def get_password_from_prompt(prompt: str = "Enter password",
                             envvar: str = '',
                             confirm: bool = False) -> str:
    password = os.environ.get(envvar, NO_PASSWORD)
    if password is NO_PASSWORD:  # Collect password, prefer env var
        password = click.prompt(prompt,
                                confirmation_prompt=confirm,
                                hide_input=True)
    return password
Exemple #17
0
class Worker:
    """High-level view of a worker. Protocol-level details are not dealt with."""
    name = None
    kind = None
    protocol = None
    persistent = None
    rate_est = None
    log = Logger()

    def __init__(self, protocol, name, kind):
        self.name = name
        self.kind = kind
        self.protocol = protocol
        self.persistent = WorkerDB().get(name, kind)
        self.rate = RateMeter()
        reactor.callLater(0.5, self._get_to_work)

    def __str__(self):
        return "{}/{} @ {}".format(self.name, self.kind, self.protocol.peer)

    def submit(self):
        """invoked by the protocol when a share is submitted"""
        self.rate.mark()
        self.persistent.submit()

    def _get_to_work(self):
        """A miner goes through the following states
           1) hashrate estimation
           2) optimal difficulty search
           3) production
        """
        if self.persistent.optimal_difficulty:
            self._production()
        elif self.persistent.maximum_hashrate:
            self._find_optimal_difficulty()
        else:
            self._estimate_hashrate()

    #### auto-tuning machinery ####
    def _rate_estimation(self, difficulty, callback, timeout=80, args={}):
        """Set the difficulty to `difficulty`, send work and wait for `timeout` seconds. 
           Then fire `callback(difficulty, rate, **args)`. If the rate could not be computed, it is None.
        """
        self.protocol.set_difficulty(difficulty)
        self.protocol.notify()
        self.rate = RateMeter()
        reactor.callLater(timeout, self._rate_estimation_end, difficulty,
                          callback, args)

    def _rate_estimation_end(self, difficulty, callback, args):
        if self.rate.one_minute_rate() == 0:
            self.protocol.log.info(
                "rate estimation failed for {log_source} at difficulty {difficulty}",
                difficulty=difficulty)
            rate = None
        else:
            rate = self.rate.mean_rate()
            self.protocol.log.info(
                "Est. rate={rate:.1f}/s at D={difficulty} [{hashrate:0.1f}Ghash/s] for {log_source} [{count} in {elapsed:.1f}s]",
                rate=rate,
                difficulty=difficulty,
                count=self.rate.count,
                elapsed=self.rate.elapsed_time(),
                hashrate=rate * difficulty * (1 << 32) / 1e9)
        callback(difficulty, rate, **args)

    def _estimate_hashrate(self):
        """Try to estimate the maximum possible hashrate of the worker by setting a high difficulty
           and measuring the rate of shares. If the observed rate is too low, restart with a lower difficulty."""
        def hashrate_callback(difficulty, rate):
            if rate is None or rate <= HASHRATE_ESTIMATION_MINIMUM:
                if difficulty == 1:
                    hashrate_continuation(
                        50e6)  # educated guess; it's probably a CPU miner
                else:
                    # restart with lower difficulty
                    self._rate_estimation(max(1, difficulty // 16),
                                          hashrate_callback,
                                          timeout=HASHRATE_ESTIMATION_TIMEOUT)
            else:
                hashrate_continuation(rate * difficulty * (1 << 32))

        def hashrate_continuation(h):
            self.log.info("Maximum hashrate found: {hashrate} ({log_source})",
                          hashrate=h)
            self.persistent.set_maximum_hashrate(h)
            self._find_optimal_difficulty()

        self.state = "Estimating hashrate"
        self.log.info("starting maximum hashrate estimation ({log_source})")
        self._rate_estimation(HASHRATE_ESTIMATION_DIFFICULTY,
                              hashrate_callback,
                              timeout=HASHRATE_ESTIMATION_TIMEOUT)

    def _find_optimal_difficulty(self):
        def difficulty_callback(difficulty,
                                rate,
                                best_objective=0,
                                measures={}):
            if rate is None:
                # stop search, difficulty too high, exploit previous results
                difficulty_continuation(measures)
                return
            measures[difficulty] = rate
            hashrate = rate * difficulty * (1 << 32)
            objective = rate * (difficulty**(1 / 3))
            # stop if we are at 95% of full hashrate and objective function is decreasing
            if hashrate >= 0.95 * self.persistent.maximum_hashrate and objective <= 0.95 * best_objective:
                difficulty_continuation(measures)
                return
            self.log.info(
                "difficulty search: score={objective:.1f} @ D={D} (best={best:.1f}) ({log_source})",
                objective=objective,
                D=difficulty,
                best=best_objective)
            best_objective = max(best_objective, objective)
            self._rate_estimation(difficulty + 4,
                                  difficulty_callback,
                                  args={
                                      'best_objective': best_objective,
                                      'measures': measures
                                  },
                                  timeout=DIFFICULTY_ESTIMATION_TIMEOUT)

        def difficulty_continuation(measures):
            best_objective = 1
            best_difficulty = 1
            for difficulty, rate in measures.items():
                objective = rate * (difficulty**(1 / 3))
                if objective > best_objective:
                    best_objective = objective
                    best_difficulty = difficulty
            self.log.info(
                "Optimal difficulty: {difficulty}, with objective={objective:.1f} ({log_source})",
                difficulty=best_difficulty,
                objective=best_objective)
            self.persistent.set_optimal_difficulty(best_difficulty)
            self._production()

        self.log.info("starting optimal difficulty search ({log_source})")
        self.state = "Finding optimal difficulty"
        self._rate_estimation(1,
                              difficulty_callback,
                              args={
                                  'best_objective': 0,
                                  'measures': {}
                              },
                              timeout=DIFFICULTY_ESTIMATION_TIMEOUT)

    def _production(self):
        self.state = "Production"
        self.log.info(
            "going into production ({log_source}) at difficulty {difficulty}",
            difficulty=self.persistent.optimal_difficulty)
        self.protocol.set_difficulty(self.persistent.optimal_difficulty)
        self.protocol.notify()
        self.rate = RateMeter()
Exemple #18
0
class Session(object):

    log = Logger()

    def __init__(self, credentials, api, provider_cert):
        # TODO check if an anonymous credentials is passed.
        # TODO move provider_cert to api object.
        # On creation, it should be able to retrieve all the info it needs
        # (calling bootstrap).
        # TODO could get a "provider" object instead.
        # this provider can have an api attribute,
        # and a "autoconfig" attribute passed on initialization.
        # TODO get a file-descriptor for password if not in credentials
        # TODO merge self._request with config.Provider._http_request ?

        self.username = credentials.username
        self.password = credentials.password
        self._provider_cert = provider_cert
        self._api = api
        self._initialize_session()

    def _initialize_session(self):
        self._agent = cookieAgentFactory(self._provider_cert)
        username = self.username or ''
        password = self.password or ''
        self._srp_auth = _srp.SRPAuthMechanism(username, password)
        self._srp_signup = _srp.SRPSignupMechanism()
        self._srp_password = _srp.SRPPasswordChangeMechanism()
        self._srp_recovery_code = _srp.SRPRecoveryCodeUpdateMechanism()
        self._token = None
        self._uuid = None

    # Session

    @property
    def token(self):
        return self._token

    @property
    def uuid(self):
        return self._uuid

    @property
    def is_authenticated(self):
        return self._srp_auth.srp_user.authenticated()

    @defer.inlineCallbacks
    def authenticate(self):
        uri = self._api.get_handshake_uri()
        met = self._api.get_handshake_method()
        self.log.debug('%s to %s' % (met, uri))
        params = self._srp_auth.get_handshake_params()

        handshake = yield self._request(self._agent,
                                        uri,
                                        values=params,
                                        method=met)

        self._srp_auth.process_handshake(handshake)
        uri = self._api.get_authenticate_uri(login=self.username)
        met = self._api.get_authenticate_method()

        self.log.debug('%s to %s' % (met, uri))
        params = self._srp_auth.get_authentication_params()

        auth = yield self._request(self._agent, uri, values=params, method=met)

        uuid, token = self._srp_auth.process_authentication(auth)
        self._srp_auth.verify_authentication()

        self._uuid = uuid
        self._token = token
        defer.returnValue(OK)

    @_auth_required
    @defer.inlineCallbacks
    def logout(self):
        uri = self._api.get_logout_uri()
        met = self._api.get_logout_method()
        yield self._request(self._agent, uri, method=met)
        self.username = None
        self.password = None
        self._initialize_session()
        defer.returnValue(OK)

    @_auth_required
    @defer.inlineCallbacks
    def change_password(self, password):
        uri = self._api.get_update_user_uri(uid=self._uuid)
        met = self._api.get_update_user_method()
        params = self._srp_password.get_password_params(
            self.username, password)
        update = yield self._request(self._agent,
                                     uri,
                                     values=params,
                                     method=met)
        self.password = password
        self._srp_auth = _srp.SRPAuthMechanism(self.username, password)
        defer.returnValue(OK)

    @_auth_required
    @defer.inlineCallbacks
    def update_recovery_code(self, recovery_code):
        uri = self._api.get_update_user_uri(uid=self._uuid)
        met = self._api.get_update_user_method()
        params = self._srp_recovery_code.get_recovery_code_params(
            self.username, recovery_code)
        update = yield self._request(self._agent,
                                     uri,
                                     values=params,
                                     method=met)
        defer.returnValue(update)

    # User certificates

    def get_vpn_cert(self):
        # TODO pass it to the provider object so that it can save it in the
        # right path.
        uri = self._api.get_vpn_cert_uri()
        met = self._api.get_vpn_cert_method()
        return self._request(self._agent, uri, method=met)

    @_auth_required
    def get_smtp_cert(self):
        # TODO pass it to the provider object so that it can save it in the
        # right path.
        uri = self._api.get_smtp_cert_uri()
        met = self._api.get_smtp_cert_method()
        print met, "to", uri
        return self._request(self._agent, uri, method=met)

    def _request(self, *args, **kw):
        kw['token'] = self._token
        return httpRequest(*args, **kw)

    # User management

    @defer.inlineCallbacks
    def signup(self, username, password, invite=None):
        # XXX should check that it_IS_NOT_authenticated
        provider.validate_username(username)
        uri = self._api.get_signup_uri()
        met = self._api.get_signup_method()
        params = self._srp_signup.get_signup_params(username, password, invite)

        signup = yield self._request(self._agent,
                                     uri,
                                     values=params,
                                     method=met)
        registered_user = self._srp_signup.process_signup(signup)
        self.username = username
        self.password = password
        defer.returnValue((OK, registered_user))

    @_auth_required
    def update_user_record(self):
        # FIXME to be implemented
        pass

    # Authentication-protected configuration

    @defer.inlineCallbacks
    def fetch_provider_configs(self, uri, path):
        config = yield self._request(self._agent, uri)
        with open(path, 'w') as cf:
            cf.write(config)
        defer.returnValue('ok')
Exemple #19
0
GLOBAL_CFG = "/etc/mailmail"
LOCAL_CFG = os.path.expanduser("~/.twisted/mailmail")
SMARTHOST = '127.0.0.1'

ERROR_FMT = """\
Subject: Failed Message Delivery

  Message delivery failed.  The following occurred:

  %s
--
The Twisted sendmail application.
"""

_logObserver = textFileLogObserver(sys.stderr)
_log = Logger(observer=_logObserver)



class Options:
    """
    Store the values of the parsed command-line options to the I{mailmail}
    script.

    @type to: L{list} of L{str}
    @ivar to: The addresses to which to deliver this message.

    @type sender: L{str}
    @ivar sender: The address from which this message is being sent.

    @type body: C{file}
Exemple #20
0
class RTL433Demodulator(gr.hier_block2, ExportedState):
    __log = Logger()  # TODO: log to context/client

    def __init__(self, mode='433', input_rate=0, context=None):
        assert input_rate > 0
        assert context is not None
        gr.hier_block2.__init__(self,
                                type(self).__name__,
                                gr.io_signature(1, 1, gr.sizeof_gr_complex),
                                gr.io_signature(0, 0, 0))

        # The input bandwidth chosen is not primarily determined by the bandwidth of the input signals, but by the frequency error of the transmitters. Therefore it is not too critical, and we can choose the exact rate to make the filtering easy.
        if input_rate <= upper_preferred_demod_rate:
            # Skip having a filter at all.
            self.__band_filter = None
            demod_rate = input_rate
        else:
            # TODO: This gunk is very similar to the stuff that MultistageChannelFilter does. See if we can share some code.
            lower_rate = input_rate
            lower_rate_prev = None
            while lower_rate > upper_preferred_demod_rate and lower_rate != lower_rate_prev:
                lower_rate_prev = lower_rate
                if lower_rate % 5 == 0 and lower_rate > upper_preferred_demod_rate * 3:
                    lower_rate /= 5
                elif lower_rate % 2 == 0:
                    lower_rate /= 2
                else:
                    # non-integer ratio
                    lower_rate = upper_preferred_demod_rate
                    break
            demod_rate = lower_rate

            self.__band_filter = MultistageChannelFilter(
                input_rate=input_rate,
                output_rate=demod_rate,
                cutoff_freq=demod_rate * 0.4,
                transition_width=demod_rate * 0.2)

        # Subprocess
        # using /usr/bin/env because twisted spawnProcess doesn't support path search
        # pylint: disable=no-member
        self.__process = the_reactor.spawnProcess(
            RTL433ProcessProtocol(context.output_message, self.__log),
            '/usr/bin/env',
            env=None,  # inherit environment
            # These arguments were last reviewed for rtl_433 18.12-142-g6c3ca9b
            args=[
                b'env',
                b'rtl_433',
                b'-F',
                b'json',  # output format
                b'-r',
                str(demod_rate) +
                b'sps:iq:cf32:-',  # specify input format and to use stdin
                b'-M',
                'newmodel',
            ],
            childFDs={
                0: 'w',
                1: 'r',
                2: 2
            })
        sink = make_sink_to_process_stdin(self.__process,
                                          itemsize=gr.sizeof_gr_complex)

        agc = analog.agc2_cc(reference=dB(-4))
        agc.set_attack_rate(200 / demod_rate)
        agc.set_decay_rate(200 / demod_rate)

        if self.__band_filter:
            self.connect(self, self.__band_filter, agc)
        else:
            self.connect(self, agc)
        self.connect(agc, sink)

    def _close(self):
        # TODO: This never gets called except in tests. Do this better, like by having an explicit life cycle for demodulators.
        self.__process.loseConnection()

    @exported_value(type=BandShape, changes='never')
    def get_band_shape(self):
        """implements IDemodulator"""
        if self.__band_filter:
            return self.__band_filter.get_shape()
        else:
            # TODO Reuse UnselectiveAMDemodulator's approach to this
            return BandShape(stop_low=0,
                             pass_low=0,
                             pass_high=0,
                             stop_high=0,
                             markers={})

    def get_output_type(self):
        """implements IDemodulator"""
        return no_signal
Exemple #21
0
class WSProtocol(WebSocketServerProtocol):

    wslog = Logger()

    #---------------------------------------------------------------------------
    def __init__(self, *args, **kwargs):
        super(WSProtocol, self).__init__(*args, **kwargs)
        self.actionHandlers = {}
        self.actionHandlers['STEERING_UPDATE'] = self.steering_update

    #---------------------------------------------------------------------------
    def onOpen(self):
        pass

    #---------------------------------------------------------------------------
    def onMessage(self, payload, isBinary):
        #-----------------------------------------------------------------------
        # Check the message validity
        #-----------------------------------------------------------------------
        if isBinary:
            return

        #-----------------------------------------------------------------------
        # Decode the message
        #-----------------------------------------------------------------------
        try:
            payload = payload.decode('utf-8')
            data = json.loads(payload)
        except Exception as e:
            self.wslog.debug('Unable to parse message: {}.'.format(str(e)))
            return

        for header in ['type', 'action']:
            if header not in data:
                msg = 'Header "{}" is missing.'.format(header)
                self.wslog.debug(msg)
                return

        if data['type'] != 'ACTION_NO_RESP':
            msg = 'Rejecting non-action message: {}.'.format(data['type'])
            self.wslog.debug(msg)
            self.send_error_response(data['id'], msg)
            return

        if data['action'] not in self.actionHandlers:
            msg = 'Unknown action: {}.'.format(data['action'])
            self.wslog.debug(msg)
            self.send_error_response(data['id'], msg)
            return

        #-----------------------------------------------------------------------
        # Execute the action
        #-----------------------------------------------------------------------
        self.actionHandlers[data['action']](data)

    #---------------------------------------------------------------------------
    def onClose(self, wasClean, code, reason):
        pass

    #---------------------------------------------------------------------------
    def has_fields(self, data, fields):
        for field in ['channel', 'value']:
            if field not in data:
                msg = 'Message for action {}: field "{}" not specified.' \
                    .format(data['action'], field)
                self.wslog.debug(msg)
                return False
        return True

    #---------------------------------------------------------------------------
    def steering_update(self, data):
        if not self.has_fields(data, ['channel', 'value']):
            return

        try:
            self.controller.update_channel(data['channel'], data['value'])
        except Exception as e:
            pass
Exemple #22
0
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
"""
LEAP Session management.
"""
from twisted.internet import defer, reactor
from twisted.logger import Logger

from leap.bitmask.bonafide import _srp
from leap.bitmask.bonafide import provider
from leap.bitmask.bonafide._http import httpRequest, cookieAgentFactory

logger = Logger()

OK = 'ok'


def _auth_required(func):
    """
    Decorate a method so that it will not be called if the instance
    attribute `is_authenticated` does not evaluate to True.
    """
    def decorated(*args, **kwargs):
        instance = args[0]
        allowed = getattr(instance, 'is_authenticated')
        if not allowed:
            raise RuntimeError('This method requires authentication')
        return func(*args, **kwargs)
Exemple #23
0
class FleetStateTracker:
    """
    A representation of a fleet of NuCypher nodes.
    """
    _checksum = NO_KNOWN_NODES.bool_value(False)
    _nickname = NO_KNOWN_NODES
    _nickname_metadata = NO_KNOWN_NODES
    _tracking = False
    most_recent_node_change = NO_KNOWN_NODES
    snapshot_splitter = BytestringSplitter(32, 4)
    log = Logger("Learning")
    state_template = namedtuple(
        "FleetState", ("nickname", "metadata", "icon", "nodes", "updated"))

    def __init__(self):
        self.additional_nodes_to_track = []
        self.updated = maya.now()
        self._nodes = OrderedDict()
        self.states = OrderedDict()

    def __setitem__(self, key, value):
        self._nodes[key] = value

        if self._tracking:
            self.log.info(
                "Updating fleet state after saving node {}".format(value))
            self.record_fleet_state()
        else:
            self.log.debug("Not updating fleet state.")

    def __getitem__(self, item):
        return self._nodes[item]

    def __bool__(self):
        return bool(self._nodes)

    def __contains__(self, item):
        return item in self._nodes.keys() or item in self._nodes.values()

    def __iter__(self):
        yield from self._nodes.values()

    def __len__(self):
        return len(self._nodes)

    def __eq__(self, other):
        return self._nodes == other._nodes

    def __repr__(self):
        return self._nodes.__repr__()

    @property
    def checksum(self):
        return self._checksum

    @checksum.setter
    def checksum(self, checksum_value):
        self._checksum = checksum_value
        self._nickname, self._nickname_metadata = nickname_from_seed(
            checksum_value, number_of_pairs=1)

    @property
    def nickname(self):
        return self._nickname

    @property
    def nickname_metadata(self):
        return self._nickname_metadata

    @property
    def icon(self) -> str:
        if self.nickname_metadata is NO_KNOWN_NODES:
            return str(NO_KNOWN_NODES)
        return self.nickname_metadata[0][1]

    def addresses(self):
        return self._nodes.keys()

    def icon_html(self):
        return icon_from_checksum(checksum=self.checksum,
                                  number_of_nodes=str(len(self)),
                                  nickname_metadata=self.nickname_metadata)

    def snapshot(self):
        fleet_state_checksum_bytes = binascii.unhexlify(self.checksum)
        fleet_state_updated_bytes = self.updated.epoch.to_bytes(
            4, byteorder="big")
        return fleet_state_checksum_bytes + fleet_state_updated_bytes

    def record_fleet_state(self, additional_nodes_to_track=None):
        if additional_nodes_to_track:
            self.additional_nodes_to_track.extend(additional_nodes_to_track)
        if not self._nodes:
            # No news here.
            return
        sorted_nodes = self.sorted()

        sorted_nodes_joined = b"".join(bytes(n) for n in sorted_nodes)
        checksum = keccak_digest(sorted_nodes_joined).hex()
        if checksum not in self.states:
            self.checksum = keccak_digest(b"".join(
                bytes(n) for n in self.sorted())).hex()
            self.updated = maya.now()
            # For now we store the sorted node list.  Someday we probably spin this out into
            # its own class, FleetState, and use it as the basis for partial updates.
            new_state = self.state_template(
                nickname=self.nickname,
                metadata=self.nickname_metadata,
                nodes=sorted_nodes,
                icon=self.icon,
                updated=self.updated,
            )
            self.states[checksum] = new_state
            return checksum, new_state

    def start_tracking_state(self, additional_nodes_to_track=None):
        if additional_nodes_to_track is None:
            additional_nodes_to_track = list()
        self.additional_nodes_to_track.extend(additional_nodes_to_track)
        self._tracking = True
        self.update_fleet_state()

    def sorted(self):
        nodes_to_consider = list(
            self._nodes.values()) + self.additional_nodes_to_track
        return sorted(nodes_to_consider,
                      key=lambda n: n.checksum_public_address)

    def shuffled(self):
        nodes_we_know_about = list(self._nodes.values())
        random.shuffle(nodes_we_know_about)
        return nodes_we_know_about

    def abridged_states_dict(self):
        abridged_states = {}
        for k, v in self.states.items():
            abridged_states[k] = self.abridged_state_details(v)

        return abridged_states

    def abridged_nodes_dict(self):
        abridged_nodes = {}
        for checksum_address, node in self._nodes.items():
            abridged_nodes[checksum_address] = self.abridged_node_details(node)

        return abridged_nodes

    @staticmethod
    def abridged_state_details(state):
        return {
            "nickname": state.nickname,
            "symbol": state.metadata[0][1],
            "color_hex": state.metadata[0][0]['hex'],
            "color_name": state.metadata[0][0]['color'],
            "updated": state.updated.rfc2822()
        }

    @staticmethod
    def abridged_node_details(node):
        try:
            last_seen = node.last_seen.iso8601()
        except AttributeError:  # TODO: This logic belongs somewhere - anywhere - else.
            last_seen = str(
                node.last_seen)  # In case it's the constant NEVER_SEEN
        return {
            "icon_details":
            node.nickname_icon_details(),  # TODO: Mix this in better.
            "rest_url": node.rest_url(),
            "nickname": node.nickname,
            "checksum_address": node.checksum_public_address,
            "timestamp": node.timestamp.iso8601(),
            "last_seen": last_seen,
            "fleet_state_icon": node.fleet_state_icon,
        }
Exemple #24
0
class TestTwistedClient(unittest.TestCase):
    "Twisted client tests"

    logger = Logger()

    def setUp(self):
        cfg = get_config("ServerConn")
        self.host, self.port = cfg.BEANSTALKD_HOST, int(cfg.BEANSTALKD_PORT)
        self.creator = protocol.ClientCreator(reactor, Beanstalk)

    def tearDown(self):
        self.conn.transport.loseConnection()

    @deferred()  # required for nose integration
    def test_00_Job(self):
        "Twisted beanstalk Job instantiation succeeds"

        def onconn(conn):
            self.conn = conn
            rawjob = {
                'jid': 1,
                'bytes': 13,
                'state': 'ok',
                'data': 'Look!  A job!'
            }
            rawjob.update(conn=conn)
            j = Job(**rawjob)
            assert j["data"] == 'Look!  A job!'
            self.logger.debug("Job created ok")

        connector = self.creator.connectTCP(self.host, self.port)
        return connector.addCallback(onconn)

    @deferred()  # required for nose integration
    def test_01_Stats(self):
        "Twisted client correctly gets stats from server"

        def onconn(conn):
            self.conn = conn
            statist = conn.stats()

            #
            def onstats(stats):
                self.assertEqual(stats["state"], "ok")
                self.logger.debug("got stats ok")

            #
            statist.addCallback(onstats)

        connector = self.creator.connectTCP(self.host, self.port)
        return connector.addCallback(onconn)

    @deferred()  # required for nose integration
    def test_02_Put_Reserve(self):
        "putting and reserving works"

        def check_reserve_ok():
            reserver = self.conn.reserve()

            #
            def on_ok(job, ):
                expected = {
                    'jid': 1,
                    'bytes': 13,
                    'state': 'ok',
                    'data': 'Look!  A job!'
                }
                self.assertEqual(job, expected)
                self.logger.debug("job reservation succeeded")

            reserver.addCallback(on_ok)

            #
            def on_fail(failure):
                raise Exception(failure)

            reserver.addErrback(on_fail)
            return reserver

        def check_put_ok():
            putter = self.conn.put('Look!  A job!', 8192, 0, 300)

            #
            def on_ok(job):
                self.assertEqual(job, {'jid': 1, 'state': 'ok'})
                self.logger.debug("job putting succeeded")

            putter.addCallback(on_ok)

            #
            def on_fail(failure):
                raise Exception(failure)

            putter.addErrback(on_fail)
            return putter

        def onconn(conn):
            "start both putting and reserving"
            self.conn = conn
            p = check_put_ok()
            r = check_reserve_ok()
            # set up the deferred returned to nose
            defer.DeferredList((p, r)).chainDeferred(self.r)

        connector = self.creator.connectTCP(self.host, self.port)
        self.r = defer.Deferred()
        connector.addCallback(onconn)
        return self.r  # chained after completion of putting and reserving
Exemple #25
0
class FilePasswordDB:
    """
    A file-based, text-based username/password database.

    Records in the datafile for this class are delimited by a particular
    string.  The username appears in a fixed field of the columns delimited
    by this string, as does the password.  Both fields are specifiable.  If
    the passwords are not stored plaintext, a hash function must be supplied
    to convert plaintext passwords to the form stored on disk and this
    CredentialsChecker will only be able to check IUsernamePassword
    credentials.  If the passwords are stored plaintext,
    IUsernameHashedPassword credentials will be checkable as well.
    """

    cache = False
    _credCache = None
    _cacheTimestamp = 0
    _log = Logger()

    def __init__(self,
                 filename,
                 delim=b':',
                 usernameField=0,
                 passwordField=1,
                 caseSensitive=True,
                 hash=None,
                 cache=False):
        """
        @type filename: C{str}
        @param filename: The name of the file from which to read username and
        password information.

        @type delim: C{str}
        @param delim: The field delimiter used in the file.

        @type usernameField: C{int}
        @param usernameField: The index of the username after splitting a
        line on the delimiter.

        @type passwordField: C{int}
        @param passwordField: The index of the password after splitting a
        line on the delimiter.

        @type caseSensitive: C{bool}
        @param caseSensitive: If true, consider the case of the username when
        performing a lookup.  Ignore it otherwise.

        @type hash: Three-argument callable or L{None}
        @param hash: A function used to transform the plaintext password
        received over the network to a format suitable for comparison
        against the version stored on disk.  The arguments to the callable
        are the username, the network-supplied password, and the in-file
        version of the password.  If the return value compares equal to the
        version stored on disk, the credentials are accepted.

        @type cache: C{bool}
        @param cache: If true, maintain an in-memory cache of the
        contents of the password file.  On lookups, the mtime of the
        file will be checked, and the file will only be re-parsed if
        the mtime is newer than when the cache was generated.
        """
        self.filename = filename
        self.delim = delim
        self.ufield = usernameField
        self.pfield = passwordField
        self.caseSensitive = caseSensitive
        self.hash = hash
        self.cache = cache

        if self.hash is None:
            # The passwords are stored plaintext.  We can support both
            # plaintext and hashed passwords received over the network.
            self.credentialInterfaces = (credentials.IUsernamePassword,
                                         credentials.IUsernameHashedPassword)
        else:
            # The passwords are hashed on disk.  We can support only
            # plaintext passwords received over the network.
            self.credentialInterfaces = (credentials.IUsernamePassword, )

    def __getstate__(self):
        d = dict(vars(self))
        for k in '_credCache', '_cacheTimestamp':
            try:
                del d[k]
            except KeyError:
                pass
        return d

    def _cbPasswordMatch(self, matched, username):
        if matched:
            return username
        else:
            return failure.Failure(error.UnauthorizedLogin())

    def _loadCredentials(self):
        """
        Loads the credentials from the configured file.

        @return: An iterable of C{username, password} couples.
        @rtype: C{iterable}

        @raise UnauthorizedLogin: when failing to read the credentials from the
            file.
        """
        try:
            with open(self.filename, "rb") as f:
                for line in f:
                    line = line.rstrip()
                    parts = line.split(self.delim)

                    if self.ufield >= len(parts) or self.pfield >= len(parts):
                        continue
                    if self.caseSensitive:
                        yield parts[self.ufield], parts[self.pfield]
                    else:
                        yield parts[self.ufield].lower(), parts[self.pfield]
        except IOError as e:
            self._log.error("Unable to load credentials db: {e!r}", e=e)
            raise error.UnauthorizedLogin()

    def getUser(self, username):
        if not self.caseSensitive:
            username = username.lower()

        if self.cache:
            if self._credCache is None or os.path.getmtime(
                    self.filename) > self._cacheTimestamp:
                self._cacheTimestamp = os.path.getmtime(self.filename)
                self._credCache = dict(self._loadCredentials())
            return username, self._credCache[username]
        else:
            for u, p in self._loadCredentials():
                if u == username:
                    return u, p
            raise KeyError(username)

    def requestAvatarId(self, c):
        try:
            u, p = self.getUser(c.username)
        except KeyError:
            return defer.fail(error.UnauthorizedLogin())
        else:
            up = credentials.IUsernamePassword(c, None)
            if self.hash:
                if up is not None:
                    h = self.hash(up.username, up.password, p)
                    if h == p:
                        return defer.succeed(u)
                return defer.fail(error.UnauthorizedLogin())
            else:
                return defer.maybeDeferred(c.checkPassword, p).addCallback(
                    self._cbPasswordMatch, u)
class SSHUserAuthServer(service.SSHService):
    """
    A service implementing the server side of the 'ssh-userauth' service.  It
    is used to authenticate the user on the other side as being able to access
    this server.

    @ivar name: the name of this service: 'ssh-userauth'
    @type name: L{bytes}
    @ivar authenticatedWith: a list of authentication methods that have
        already been used.
    @type authenticatedWith: L{list}
    @ivar loginTimeout: the number of seconds we wait before disconnecting
        the user for taking too long to authenticate
    @type loginTimeout: L{int}
    @ivar attemptsBeforeDisconnect: the number of failed login attempts we
        allow before disconnecting.
    @type attemptsBeforeDisconnect: L{int}
    @ivar loginAttempts: the number of login attempts that have been made
    @type loginAttempts: L{int}
    @ivar passwordDelay: the number of seconds to delay when the user gives
        an incorrect password
    @type passwordDelay: L{int}
    @ivar interfaceToMethod: a L{dict} mapping credential interfaces to
        authentication methods.  The server checks to see which of the
        cred interfaces have checkers and tells the client that those methods
        are valid for authentication.
    @type interfaceToMethod: L{dict}
    @ivar supportedAuthentications: A list of the supported authentication
        methods.
    @type supportedAuthentications: L{list} of L{bytes}
    @ivar user: the last username the client tried to authenticate with
    @type user: L{bytes}
    @ivar method: the current authentication method
    @type method: L{bytes}
    @ivar nextService: the service the user wants started after authentication
        has been completed.
    @type nextService: L{bytes}
    @ivar portal: the L{twisted.cred.portal.Portal} we are using for
        authentication
    @type portal: L{twisted.cred.portal.Portal}
    @ivar clock: an object with a callLater method.  Stubbed out for testing.
    """

    name = b"ssh-userauth"
    loginTimeout = 10 * 60 * 60
    # 10 minutes before we disconnect them
    attemptsBeforeDisconnect = 20
    # 20 login attempts before a disconnect
    passwordDelay = 1  # number of seconds to delay on a failed password
    clock = reactor
    interfaceToMethod = {
        credentials.ISSHPrivateKey: b"publickey",
        credentials.IUsernamePassword: b"password",
    }
    _log = Logger()

    def serviceStarted(self):
        """
        Called when the userauth service is started.  Set up instance
        variables, check if we should allow password authentication (only
        allow if the outgoing connection is encrypted) and set up a login
        timeout.
        """
        self.authenticatedWith = []
        self.loginAttempts = 0
        self.user = None
        self.nextService = None
        self.portal = self.transport.factory.portal

        self.supportedAuthentications = []
        for i in self.portal.listCredentialsInterfaces():
            if i in self.interfaceToMethod:
                self.supportedAuthentications.append(self.interfaceToMethod[i])

        if not self.transport.isEncrypted("in"):
            # don't let us transport password in plaintext
            if b"password" in self.supportedAuthentications:
                self.supportedAuthentications.remove(b"password")
        self._cancelLoginTimeout = self.clock.callLater(
            self.loginTimeout, self.timeoutAuthentication
        )

    def serviceStopped(self):
        """
        Called when the userauth service is stopped.  Cancel the login timeout
        if it's still going.
        """
        if self._cancelLoginTimeout:
            self._cancelLoginTimeout.cancel()
            self._cancelLoginTimeout = None

    def timeoutAuthentication(self):
        """
        Called when the user has timed out on authentication.  Disconnect
        with a DISCONNECT_NO_MORE_AUTH_METHODS_AVAILABLE message.
        """
        self._cancelLoginTimeout = None
        self.transport.sendDisconnect(
            transport.DISCONNECT_NO_MORE_AUTH_METHODS_AVAILABLE, b"you took too long"
        )

    def tryAuth(self, kind, user, data):
        """
        Try to authenticate the user with the given method.  Dispatches to a
        auth_* method.

        @param kind: the authentication method to try.
        @type kind: L{bytes}
        @param user: the username the client is authenticating with.
        @type user: L{bytes}
        @param data: authentication specific data sent by the client.
        @type data: L{bytes}
        @return: A Deferred called back if the method succeeded, or erred back
            if it failed.
        @rtype: C{defer.Deferred}
        """
        self._log.debug("{user!r} trying auth {kind!r}", user=user, kind=kind)
        if kind not in self.supportedAuthentications:
            return defer.fail(error.ConchError("unsupported authentication, failing"))
        kind = nativeString(kind.replace(b"-", b"_"))
        f = getattr(self, "auth_{}".format(kind), None)
        if f:
            ret = f(data)
            if not ret:
                return defer.fail(
                    error.ConchError(
                        "{} return None instead of a Deferred".format(kind)
                    )
                )
            else:
                return ret
        return defer.fail(error.ConchError("bad auth type: {}".format(kind)))

    def ssh_USERAUTH_REQUEST(self, packet):
        """
        The client has requested authentication.  Payload::
            string user
            string next service
            string method
            <authentication specific data>

        @type packet: L{bytes}
        """
        user, nextService, method, rest = getNS(packet, 3)
        if user != self.user or nextService != self.nextService:
            self.authenticatedWith = []  # clear auth state
        self.user = user
        self.nextService = nextService
        self.method = method
        d = self.tryAuth(method, user, rest)
        if not d:
            self._ebBadAuth(failure.Failure(error.ConchError("auth returned none")))
            return
        d.addCallback(self._cbFinishedAuth)
        d.addErrback(self._ebMaybeBadAuth)
        d.addErrback(self._ebBadAuth)
        return d

    def _cbFinishedAuth(self, result):
        """
        The callback when user has successfully been authenticated.  For a
        description of the arguments, see L{twisted.cred.portal.Portal.login}.
        We start the service requested by the user.
        """
        (interface, avatar, logout) = result
        self.transport.avatar = avatar
        self.transport.logoutFunction = logout
        service = self.transport.factory.getService(self.transport, self.nextService)
        if not service:
            raise error.ConchError(
                "could not get next service: {}".format(self.nextService)
            )
        self._log.debug(
            "{user!r} authenticated with {method!r}", user=self.user, method=self.method
        )
        self.transport.sendPacket(MSG_USERAUTH_SUCCESS, b"")
        self.transport.setService(service())

    def _ebMaybeBadAuth(self, reason):
        """
        An intermediate errback.  If the reason is
        error.NotEnoughAuthentication, we send a MSG_USERAUTH_FAILURE, but
        with the partial success indicator set.

        @type reason: L{twisted.python.failure.Failure}
        """
        reason.trap(error.NotEnoughAuthentication)
        self.transport.sendPacket(
            MSG_USERAUTH_FAILURE, NS(b",".join(self.supportedAuthentications)) + b"\xff"
        )

    def _ebBadAuth(self, reason):
        """
        The final errback in the authentication chain.  If the reason is
        error.IgnoreAuthentication, we simply return; the authentication
        method has sent its own response.  Otherwise, send a failure message
        and (if the method is not 'none') increment the number of login
        attempts.

        @type reason: L{twisted.python.failure.Failure}
        """
        if reason.check(error.IgnoreAuthentication):
            return
        if self.method != b"none":
            self._log.debug(
                "{user!r} failed auth {method!r}", user=self.user, method=self.method
            )
            if reason.check(UnauthorizedLogin):
                self._log.debug(
                    "unauthorized login: {message}", message=reason.getErrorMessage()
                )
            elif reason.check(error.ConchError):
                self._log.debug("reason: {reason}", reason=reason.getErrorMessage())
            else:
                self._log.failure(
                    "Error checking auth for user {user}",
                    failure=reason,
                    user=self.user,
                )
            self.loginAttempts += 1
            if self.loginAttempts > self.attemptsBeforeDisconnect:
                self.transport.sendDisconnect(
                    transport.DISCONNECT_NO_MORE_AUTH_METHODS_AVAILABLE,
                    b"too many bad auths",
                )
                return
        self.transport.sendPacket(
            MSG_USERAUTH_FAILURE, NS(b",".join(self.supportedAuthentications)) + b"\x00"
        )

    def auth_publickey(self, packet):
        """
        Public key authentication.  Payload::
            byte has signature
            string algorithm name
            string key blob
            [string signature] (if has signature is True)

        Create a SSHPublicKey credential and verify it using our portal.
        """
        hasSig = ord(packet[0:1])
        algName, blob, rest = getNS(packet[1:], 2)

        try:
            pubKey = keys.Key.fromString(blob)
        except keys.BadKeyError:
            error = "Unsupported key type {} or bad key".format(algName.decode("ascii"))
            self._log.error(error)
            return defer.fail(UnauthorizedLogin(error))

        signature = hasSig and getNS(rest)[0] or None
        if hasSig:
            b = (
                NS(self.transport.sessionID)
                + bytes((MSG_USERAUTH_REQUEST,))
                + NS(self.user)
                + NS(self.nextService)
                + NS(b"publickey")
                + bytes((hasSig,))
                + NS(pubKey.sshType())
                + NS(blob)
            )
            c = credentials.SSHPrivateKey(self.user, algName, blob, b, signature)
            return self.portal.login(c, None, interfaces.IConchUser)
        else:
            c = credentials.SSHPrivateKey(self.user, algName, blob, None, None)
            return self.portal.login(c, None, interfaces.IConchUser).addErrback(
                self._ebCheckKey, packet[1:]
            )

    def _ebCheckKey(self, reason, packet):
        """
        Called back if the user did not sent a signature.  If reason is
        error.ValidPublicKey then this key is valid for the user to
        authenticate with.  Send MSG_USERAUTH_PK_OK.
        """
        reason.trap(error.ValidPublicKey)
        # if we make it here, it means that the publickey is valid
        self.transport.sendPacket(MSG_USERAUTH_PK_OK, packet)
        return failure.Failure(error.IgnoreAuthentication())

    def auth_password(self, packet):
        """
        Password authentication.  Payload::
            string password

        Make a UsernamePassword credential and verify it with our portal.
        """
        password = getNS(packet[1:])[0]
        c = credentials.UsernamePassword(self.user, password)
        return self.portal.login(c, None, interfaces.IConchUser).addErrback(
            self._ebPassword
        )

    def _ebPassword(self, f):
        """
        If the password is invalid, wait before sending the failure in order
        to delay brute-force password guessing.
        """
        d = defer.Deferred()
        self.clock.callLater(self.passwordDelay, d.callback, f)
        return d
Exemple #27
0
class XMLRPC(resource.Resource):
    """
    A resource that implements XML-RPC.

    You probably want to connect this to '/RPC2'.

    Methods published can return XML-RPC serializable results, Faults,
    Binary, Boolean, DateTime, Deferreds, or Handler instances.

    By default methods beginning with 'xmlrpc_' are published.

    Sub-handlers for prefixed methods (e.g., system.listMethods)
    can be added with putSubHandler. By default, prefixes are
    separated with a '.'. Override self.separator to change this.

    @ivar allowNone: Permit XML translating of Python constant None.
    @type allowNone: C{bool}

    @ivar useDateTime: Present C{datetime} values as C{datetime.datetime}
        objects?
    @type useDateTime: C{bool}
    """

    # Error codes for Twisted, if they conflict with yours then
    # modify them at runtime.
    NOT_FOUND = 8001
    FAILURE = 8002

    isLeaf = 1
    separator = "."
    allowedMethods = (b"POST", )
    _log = Logger()

    def __init__(self, allowNone=False, useDateTime=False):
        resource.Resource.__init__(self)
        self.subHandlers = {}
        self.allowNone = allowNone
        self.useDateTime = useDateTime

    def __setattr__(self, name, value):
        self.__dict__[name] = value

    def putSubHandler(self, prefix, handler):
        self.subHandlers[prefix] = handler

    def getSubHandler(self, prefix):
        return self.subHandlers.get(prefix, None)

    def getSubHandlerPrefixes(self):
        return list(self.subHandlers.keys())

    def render_POST(self, request):
        request.content.seek(0, 0)
        request.setHeader(b"content-type", b"text/xml; charset=utf-8")
        try:
            args, functionPath = xmlrpclib.loads(request.content.read(),
                                                 use_datetime=self.useDateTime)
        except Exception as e:
            f = Fault(self.FAILURE, "Can't deserialize input: %s" % (e, ))
            self._cbRender(f, request)
        else:
            try:
                function = self.lookupProcedure(functionPath)
            except Fault as f:
                self._cbRender(f, request)
            else:
                # Use this list to track whether the response has failed or not.
                # This will be used later on to decide if the result of the
                # Deferred should be written out and Request.finish called.
                responseFailed = []
                request.notifyFinish().addErrback(responseFailed.append)
                if getattr(function, "withRequest", False):
                    d = defer.maybeDeferred(function, request, *args)
                else:
                    d = defer.maybeDeferred(function, *args)
                d.addErrback(self._ebRender)
                d.addCallback(self._cbRender, request, responseFailed)
        return server.NOT_DONE_YET

    def _cbRender(self, result, request, responseFailed=None):
        if responseFailed:
            return

        if isinstance(result, Handler):
            result = result.result
        if not isinstance(result, Fault):
            result = (result, )
        try:
            try:
                content = xmlrpclib.dumps(result,
                                          methodresponse=True,
                                          allow_none=self.allowNone)
            except Exception as e:
                f = Fault(self.FAILURE, "Can't serialize output: %s" % (e, ))
                content = xmlrpclib.dumps(f,
                                          methodresponse=True,
                                          allow_none=self.allowNone)

            if isinstance(content, str):
                content = content.encode("utf8")
            request.setHeader(b"content-length", b"%d" % (len(content), ))
            request.write(content)
        except Exception:
            self._log.failure("")
        request.finish()

    def _ebRender(self, failure):
        if isinstance(failure.value, Fault):
            return failure.value
        self._log.failure("", failure)
        return Fault(self.FAILURE, "error")

    def lookupProcedure(self, procedurePath):
        """
        Given a string naming a procedure, return a callable object for that
        procedure or raise NoSuchFunction.

        The returned object will be called, and should return the result of the
        procedure, a Deferred, or a Fault instance.

        Override in subclasses if you want your own policy.  The base
        implementation that given C{'foo'}, C{self.xmlrpc_foo} will be returned.
        If C{procedurePath} contains C{self.separator}, the sub-handler for the
        initial prefix is used to search for the remaining path.

        If you override C{lookupProcedure}, you may also want to override
        C{listProcedures} to accurately report the procedures supported by your
        resource, so that clients using the I{system.listMethods} procedure
        receive accurate results.

        @since: 11.1
        """
        if procedurePath.find(self.separator) != -1:
            prefix, procedurePath = procedurePath.split(self.separator, 1)
            handler = self.getSubHandler(prefix)
            if handler is None:
                raise NoSuchFunction(self.NOT_FOUND,
                                     "no such subHandler %s" % prefix)
            return handler.lookupProcedure(procedurePath)

        f = getattr(self, "xmlrpc_%s" % procedurePath, None)
        if not f:
            raise NoSuchFunction(self.NOT_FOUND,
                                 "procedure %s not found" % procedurePath)
        elif not callable(f):
            raise NoSuchFunction(self.NOT_FOUND,
                                 "procedure %s not callable" % procedurePath)
        else:
            return f

    def listProcedures(self):
        """
        Return a list of the names of all xmlrpc procedures.

        @since: 11.1
        """
        return reflect.prefixedMethodNames(self.__class__, "xmlrpc_")
Exemple #28
0
                 else:
                     print(vote["time"],\
                           "DOWNVOTE",\
                           vote["voter"],"=>",vote_event["author"],\
                           vote["rshares"],"(",\
                           start_rshares , "->" , start_rshares + float(vote["rshares"]) , ")")
             #Update the total rshares recorded before our downvote
             start_rshares = start_rshares + float(vote["rshares"])
         if found == False:
             print("vote not found, possibly to old.",vote_event["voter"],"=>",vote_event["author"],vote_event["permlink"])
     #Set the above closure as callback.
     opp.on_result(process_content)
 #This is a bit fiddly at this low level,  start nextblock a bit higer than where we start out
 nextblock = 19933100
 obs = textFileLogObserver(sys.stdout)
 log = Logger(observer=obs,namespace="jsonrpc_test")
 #Create our JSON-RPC RpcClient
 rpcclient = RpcClient(reactor,log,nodelist="stage")
 #Count the number of active block queries
 active_block_queries = 0
 sync_block = None
 #Function for fetching a block and its operations.
 def get_block(blk):
     """Request a single block asynchonously."""
     global active_block_queries
     #This one is for processing the results from get_block
     def process_block(event, client):
         """Process the result from block getting request."""
         global active_block_queries
         global nextblock
         global sync_block
Exemple #29
0
class Request(Copyable, http.Request, components.Componentized):
    """
    An HTTP request.

    @ivar defaultContentType: A C{bytes} giving the default I{Content-Type}
        value to send in responses if no other value is set.  L{None} disables
        the default.

    @ivar _insecureSession: The L{Session} object representing state that will
        be transmitted over plain-text HTTP.

    @ivar _secureSession: The L{Session} object representing the state that
        will be transmitted only over HTTPS.
    """

    defaultContentType = b"text/html"

    site = None
    appRootURL = None
    __pychecker__ = 'unusednames=issuer'
    _inFakeHead = False
    _encoder = None
    _log = Logger()

    def __init__(self, *args, **kw):
        http.Request.__init__(self, *args, **kw)
        components.Componentized.__init__(self)

    def getStateToCopyFor(self, issuer):
        x = self.__dict__.copy()
        del x['transport']
        # XXX refactor this attribute out; it's from protocol
        # del x['server']
        del x['channel']
        del x['content']
        del x['site']
        self.content.seek(0, 0)
        x['content_data'] = self.content.read()
        x['remote'] = ViewPoint(issuer, self)

        # Address objects aren't jellyable
        x['host'] = _addressToTuple(x['host'])
        x['client'] = _addressToTuple(x['client'])

        # Header objects also aren't jellyable.
        x['requestHeaders'] = list(x['requestHeaders'].getAllRawHeaders())

        return x

    # HTML generation helpers

    def sibLink(self, name):
        """
        Return the text that links to a sibling of the requested resource.
        """
        if self.postpath:
            return (len(self.postpath) * b"../") + name
        else:
            return name

    def childLink(self, name):
        """
        Return the text that links to a child of the requested resource.
        """
        lpp = len(self.postpath)
        if lpp > 1:
            return ((lpp - 1) * b"../") + name
        elif lpp == 1:
            return name
        else:  # lpp == 0
            if len(self.prepath) and self.prepath[-1]:
                return self.prepath[-1] + b'/' + name
            else:
                return name

    def process(self):
        """
        Process a request.
        """

        # get site from channel
        self.site = self.channel.site

        # set various default headers
        self.setHeader(b'server', version)
        self.setHeader(b'date', http.datetimeToString())

        # Resource Identification
        self.prepath = []
        self.postpath = list(map(unquote, self.path[1:].split(b'/')))

        # Short-circuit for requests whose path is '*'.
        if self.path == b'*':
            self._handleStar()
            return

        try:
            resrc = self.site.getResourceFor(self)
            if resource._IEncodingResource.providedBy(resrc):
                encoder = resrc.getEncoder(self)
                if encoder is not None:
                    self._encoder = encoder
            self.render(resrc)
        except:
            self.processingFailed(failure.Failure())

    def write(self, data):
        """
        Write data to the transport (if not responding to a HEAD request).

        @param data: A string to write to the response.
        """
        if not self.startedWriting:
            # Before doing the first write, check to see if a default
            # Content-Type header should be supplied. We omit it on
            # NOT_MODIFIED and NO_CONTENT responses. We also omit it if there
            # is a Content-Length header set to 0, as empty bodies don't need
            # a content-type.
            needsCT = self.code not in (http.NOT_MODIFIED, http.NO_CONTENT)
            contentType = self.responseHeaders.getRawHeaders(b'content-type')
            contentLength = self.responseHeaders.getRawHeaders(
                b'content-length')
            contentLengthZero = contentLength and (contentLength[0] == b'0')

            if (needsCT and contentType is None
                    and self.defaultContentType is not None
                    and not contentLengthZero):
                self.responseHeaders.setRawHeaders(b'content-type',
                                                   [self.defaultContentType])

        # Only let the write happen if we're not generating a HEAD response by
        # faking out the request method.  Note, if we are doing that,
        # startedWriting will never be true, and the above logic may run
        # multiple times.  It will only actually change the responseHeaders
        # once though, so it's still okay.
        if not self._inFakeHead:
            if self._encoder:
                data = self._encoder.encode(data)
            http.Request.write(self, data)

    def finish(self):
        """
        Override C{http.Request.finish} for possible encoding.
        """
        if self._encoder:
            data = self._encoder.finish()
            if data:
                http.Request.write(self, data)
        return http.Request.finish(self)

    def render(self, resrc):
        """
        Ask a resource to render itself.

        @param resrc: a L{twisted.web.resource.IResource}.
        """
        try:
            body = resrc.render(self)
        except UnsupportedMethod as e:
            allowedMethods = e.allowedMethods
            if (self.method == b"HEAD") and (b"GET" in allowedMethods):
                # We must support HEAD (RFC 2616, 5.1.1).  If the
                # resource doesn't, fake it by giving the resource
                # a 'GET' request and then return only the headers,
                # not the body.
                self._log.info("Using GET to fake a HEAD request for {resrc}",
                               resrc=resrc)
                self.method = b"GET"
                self._inFakeHead = True
                body = resrc.render(self)

                if body is NOT_DONE_YET:
                    self._log.info(
                        "Tried to fake a HEAD request for {resrc}, but "
                        "it got away from me.",
                        resrc=resrc)
                    # Oh well, I guess we won't include the content length.
                else:
                    self.setHeader(b'content-length', intToBytes(len(body)))

                self._inFakeHead = False
                self.method = b"HEAD"
                self.write(b'')
                self.finish()
                return

            if self.method in (supportedMethods):
                # We MUST include an Allow header
                # (RFC 2616, 10.4.6 and 14.7)
                self.setHeader(b'Allow', b', '.join(allowedMethods))
                s = ('''Your browser approached me (at %(URI)s) with'''
                     ''' the method "%(method)s".  I only allow'''
                     ''' the method%(plural)s %(allowed)s here.''' % {
                         'URI':
                         escape(nativeString(self.uri)),
                         'method':
                         nativeString(self.method),
                         'plural': ((len(allowedMethods) > 1) and 's') or '',
                         'allowed':
                         ', '.join([nativeString(x) for x in allowedMethods])
                     })
                epage = resource.ErrorPage(http.NOT_ALLOWED,
                                           "Method Not Allowed", s)
                body = epage.render(self)
            else:
                epage = resource.ErrorPage(
                    http.NOT_IMPLEMENTED, "Huh?",
                    "I don't know how to treat a %s request." %
                    (escape(self.method.decode("charmap")), ))
                body = epage.render(self)
        # end except UnsupportedMethod

        if body == NOT_DONE_YET:
            return
        if not isinstance(body, bytes):
            body = resource.ErrorPage(
                http.INTERNAL_SERVER_ERROR, "Request did not return bytes",
                "Request: " + util._PRE(reflect.safe_repr(self)) + "<br />" +
                "Resource: " + util._PRE(reflect.safe_repr(resrc)) + "<br />" +
                "Value: " + util._PRE(reflect.safe_repr(body))).render(self)

        if self.method == b"HEAD":
            if len(body) > 0:
                # This is a Bad Thing (RFC 2616, 9.4)
                self._log.info(
                    "Warning: HEAD request {slf} for resource {resrc} is"
                    " returning a message body. I think I'll eat it.",
                    slf=self,
                    resrc=resrc)
                self.setHeader(b'content-length', intToBytes(len(body)))
            self.write(b'')
        else:
            self.setHeader(b'content-length', intToBytes(len(body)))
            self.write(body)
        self.finish()

    def processingFailed(self, reason):
        """
        Finish this request with an indication that processing failed and
        possibly display a traceback.

        @param reason: Reason this request has failed.
        @type reason: L{twisted.python.failure.Failure}

        @return: The reason passed to this method.
        @rtype: L{twisted.python.failure.Failure}
        """
        self._log.failure('', failure=reason)
        if self.site.displayTracebacks:
            body = (b"<html><head><title>web.Server Traceback"
                    b" (most recent call last)</title></head>"
                    b"<body><b>web.Server Traceback"
                    b" (most recent call last):</b>\n\n" +
                    util.formatFailure(reason) + b"\n\n</body></html>\n")
        else:
            body = (b"<html><head><title>Processing Failed"
                    b"</title></head><body>"
                    b"<b>Processing Failed</b></body></html>")

        self.setResponseCode(http.INTERNAL_SERVER_ERROR)
        self.setHeader(b'content-type', b"text/html")
        self.setHeader(b'content-length', intToBytes(len(body)))
        self.write(body)
        self.finish()
        return reason

    def view_write(self, issuer, data):
        """Remote version of write; same interface.
        """
        self.write(data)

    def view_finish(self, issuer):
        """Remote version of finish; same interface.
        """
        self.finish()

    def view_addCookie(self, issuer, k, v, **kwargs):
        """Remote version of addCookie; same interface.
        """
        self.addCookie(k, v, **kwargs)

    def view_setHeader(self, issuer, k, v):
        """Remote version of setHeader; same interface.
        """
        self.setHeader(k, v)

    def view_setLastModified(self, issuer, when):
        """Remote version of setLastModified; same interface.
        """
        self.setLastModified(when)

    def view_setETag(self, issuer, tag):
        """Remote version of setETag; same interface.
        """
        self.setETag(tag)

    def view_setResponseCode(self, issuer, code, message=None):
        """
        Remote version of setResponseCode; same interface.
        """
        self.setResponseCode(code, message)

    def view_registerProducer(self, issuer, producer, streaming):
        """Remote version of registerProducer; same interface.
        (requires a remote producer.)
        """
        self.registerProducer(_RemoteProducerWrapper(producer), streaming)

    def view_unregisterProducer(self, issuer):
        self.unregisterProducer()

    ### these calls remain local

    _secureSession = None
    _insecureSession = None

    @property
    def session(self):
        """
        If a session has already been created or looked up with
        L{Request.getSession}, this will return that object.  (This will always
        be the session that matches the security of the request; so if
        C{forceNotSecure} is used on a secure request, this will not return
        that session.)

        @return: the session attribute
        @rtype: L{Session} or L{None}
        """
        if self.isSecure():
            return self._secureSession
        else:
            return self._insecureSession

    def getSession(self, sessionInterface=None, forceNotSecure=False):
        """
        Check if there is a session cookie, and if not, create it.

        By default, the cookie with be secure for HTTPS requests and not secure
        for HTTP requests.  If for some reason you need access to the insecure
        cookie from a secure request you can set C{forceNotSecure = True}.

        @param forceNotSecure: Should we retrieve a session that will be
            transmitted over HTTP, even if this L{Request} was delivered over
            HTTPS?
        @type forceNotSecure: L{bool}
        """
        # Make sure we aren't creating a secure session on a non-secure page
        secure = self.isSecure() and not forceNotSecure

        if not secure:
            cookieString = b"TWISTED_SESSION"
            sessionAttribute = "_insecureSession"
        else:
            cookieString = b"TWISTED_SECURE_SESSION"
            sessionAttribute = "_secureSession"

        session = getattr(self, sessionAttribute)

        if session is not None:
            # We have a previously created session.
            try:
                # Refresh the session, to keep it alive.
                session.touch()
            except (AlreadyCalled, AlreadyCancelled):
                # Session has already expired.
                session = None

        if session is None:
            # No session was created yet for this request.
            cookiename = b"_".join([cookieString] + self.sitepath)
            sessionCookie = self.getCookie(cookiename)
            if sessionCookie:
                try:
                    session = self.site.getSession(sessionCookie)
                except KeyError:
                    pass
            # if it still hasn't been set, fix it up.
            if not session:
                session = self.site.makeSession()
                self.addCookie(cookiename,
                               session.uid,
                               path=b"/",
                               secure=secure)

        setattr(self, sessionAttribute, session)

        if sessionInterface:
            return session.getComponent(sessionInterface)

        return session

    def _prePathURL(self, prepath):
        port = self.getHost().port
        if self.isSecure():
            default = 443
        else:
            default = 80
        if port == default:
            hostport = ''
        else:
            hostport = ':%d' % port
        prefix = networkString(
            'http%s://%s%s/' %
            (self.isSecure() and 's'
             or '', nativeString(self.getRequestHostname()), hostport))
        path = b'/'.join([quote(segment, safe=b'') for segment in prepath])
        return prefix + path

    def prePathURL(self):
        return self._prePathURL(self.prepath)

    def URLPath(self):
        from twisted.python import urlpath
        return urlpath.URLPath.fromRequest(self)

    def rememberRootURL(self):
        """
        Remember the currently-processed part of the URL for later
        recalling.
        """
        url = self._prePathURL(self.prepath[:-1])
        self.appRootURL = url

    def getRootURL(self):
        """
        Get a previously-remembered URL.
        """
        return self.appRootURL

    def _handleStar(self):
        """
        Handle receiving a request whose path is '*'.

        RFC 7231 defines an OPTIONS * request as being something that a client
        can send as a low-effort way to probe server capabilities or readiness.
        Rather than bother the user with this, we simply fast-path it back to
        an empty 200 OK. Any non-OPTIONS verb gets a 405 Method Not Allowed
        telling the client they can only use OPTIONS.
        """
        if self.method == b'OPTIONS':
            self.setResponseCode(http.OK)
        else:
            self.setResponseCode(http.NOT_ALLOWED)
            self.setHeader(b'Allow', b'OPTIONS')

        # RFC 7231 says we MUST set content-length 0 when responding to this
        # with no body.
        self.setHeader(b'Content-Length', b'0')
        self.finish()
Exemple #30
0
class _WSGIResponse:
    """
    Helper for L{WSGIResource} which drives the WSGI application using a
    threadpool and hooks it up to the L{http.Request}.

    @ivar started: A L{bool} indicating whether or not the response status and
        headers have been written to the request yet.  This may only be read or
        written in the WSGI application thread.

    @ivar reactor: An L{IReactorThreads} provider which is used to call methods
        on the request in the I/O thread.

    @ivar threadpool: A L{ThreadPool} which is used to call the WSGI
        application object in a non-I/O thread.

    @ivar application: The WSGI application object.

    @ivar request: The L{http.Request} upon which the WSGI environment is
        based and to which the application's output will be sent.

    @ivar environ: The WSGI environment L{dict}.

    @ivar status: The HTTP response status L{str} supplied to the WSGI
        I{start_response} callable by the application.

    @ivar headers: A list of HTTP response headers supplied to the WSGI
        I{start_response} callable by the application.

    @ivar _requestFinished: A flag which indicates whether it is possible to
        generate more response data or not.  This is L{False} until
        L{http.Request.notifyFinish} tells us the request is done,
        then L{True}.
    """

    _requestFinished = False
    _log = Logger()

    def __init__(self, reactor, threadpool, application, request):
        self.started = False
        self.reactor = reactor
        self.threadpool = threadpool
        self.application = application
        self.request = request
        self.request.notifyFinish().addBoth(self._finished)

        if request.prepath:
            scriptName = b'/' + b'/'.join(request.prepath)
        else:
            scriptName = b''

        if request.postpath:
            pathInfo = b'/' + b'/'.join(request.postpath)
        else:
            pathInfo = b''

        parts = request.uri.split(b'?', 1)
        if len(parts) == 1:
            queryString = b''
        else:
            queryString = parts[1]

        # All keys and values need to be native strings, i.e. of type str in
        # *both* Python 2 and Python 3, so says PEP-3333.
        self.environ = {
            'REQUEST_METHOD':
            _wsgiString(request.method),
            'REMOTE_ADDR':
            _wsgiString(request.getClientIP()),
            'SCRIPT_NAME':
            _wsgiString(scriptName),
            'PATH_INFO':
            _wsgiString(pathInfo),
            'QUERY_STRING':
            _wsgiString(queryString),
            'CONTENT_TYPE':
            _wsgiString(request.getHeader(b'content-type') or ''),
            'CONTENT_LENGTH':
            _wsgiString(request.getHeader(b'content-length') or ''),
            'SERVER_NAME':
            _wsgiString(request.getRequestHostname()),
            'SERVER_PORT':
            _wsgiString(str(request.getHost().port)),
            'SERVER_PROTOCOL':
            _wsgiString(request.clientproto)
        }

        # The application object is entirely in control of response headers;
        # disable the default Content-Type value normally provided by
        # twisted.web.server.Request.
        self.request.defaultContentType = None

        for name, values in request.requestHeaders.getAllRawHeaders():
            name = 'HTTP_' + _wsgiString(name).upper().replace('-', '_')
            # It might be preferable for http.HTTPChannel to clear out
            # newlines.
            self.environ[name] = ','.join(_wsgiString(v)
                                          for v in values).replace('\n', ' ')

        self.environ.update({
            'wsgi.version': (1, 0),
            'wsgi.url_scheme':
            request.isSecure() and 'https' or 'http',
            'wsgi.run_once':
            False,
            'wsgi.multithread':
            True,
            'wsgi.multiprocess':
            False,
            'wsgi.errors':
            _ErrorStream(),
            # Attend: request.content was owned by the I/O thread up until
            # this point.  By wrapping it and putting the result into the
            # environment dictionary, it is effectively being given to
            # another thread.  This means that whatever it is, it has to be
            # safe to access it from two different threads.  The access
            # *should* all be serialized (first the I/O thread writes to
            # it, then the WSGI thread reads from it, then the I/O thread
            # closes it).  However, since the request is made available to
            # arbitrary application code during resource traversal, it's
            # possible that some other code might decide to use it in the
            # I/O thread concurrently with its use in the WSGI thread.
            # More likely than not, this will break.  This seems like an
            # unlikely possibility to me, but if it is to be allowed,
            # something here needs to change. -exarkun
            'wsgi.input':
            _InputStream(request.content)
        })

    def _finished(self, ignored):
        """
        Record the end of the response generation for the request being
        serviced.
        """
        self._requestFinished = True

    def startResponse(self, status, headers, excInfo=None):
        """
        The WSGI I{start_response} callable.  The given values are saved until
        they are needed to generate the response.

        This will be called in a non-I/O thread.
        """
        if self.started and excInfo is not None:
            reraise(excInfo[1], excInfo[2])

        # PEP-3333 mandates that status should be a native string. In practice
        # this is mandated by Twisted's HTTP implementation too, so we enforce
        # on both Python 2 and Python 3.
        if not isinstance(status, str):
            raise TypeError("status must be str, not %r (%s)" %
                            (status, type(status).__name__))

        # PEP-3333 mandates that headers should be a plain list, but in
        # practice we work with any sequence type and only warn when it's not
        # a plain list.
        if isinstance(headers, list):
            pass  # This is okay.
        elif isinstance(headers, Sequence):
            warn("headers should be a list, not %r (%s)" %
                 (headers, type(headers).__name__),
                 category=RuntimeWarning)
        else:
            raise TypeError("headers must be a list, not %r (%s)" %
                            (headers, type(headers).__name__))

        # PEP-3333 mandates that each header should be a (str, str) tuple, but
        # in practice we work with any sequence type and only warn when it's
        # not a plain list.
        for header in headers:
            if isinstance(header, tuple):
                pass  # This is okay.
            elif isinstance(header, Sequence):
                warn("header should be a (str, str) tuple, not %r (%s)" %
                     (header, type(header).__name__),
                     category=RuntimeWarning)
            else:
                raise TypeError(
                    "header must be a (str, str) tuple, not %r (%s)" %
                    (header, type(header).__name__))

            # However, the sequence MUST contain only 2 elements.
            if len(header) != 2:
                raise TypeError("header must be a (str, str) tuple, not %r" %
                                (header, ))

            # Both elements MUST be native strings. Non-native strings will be
            # rejected by the underlying HTTP machinery in any case, but we
            # reject them here in order to provide a more informative error.
            for elem in header:
                if not isinstance(elem, str):
                    raise TypeError("header must be (str, str) tuple, not %r" %
                                    (header, ))

        self.status = status
        self.headers = headers
        return self.write

    def write(self, data):
        """
        The WSGI I{write} callable returned by the I{start_response} callable.
        The given bytes will be written to the response body, possibly flushing
        the status and headers first.

        This will be called in a non-I/O thread.
        """

        # PEP-3333 states:
        #
        #   The server or gateway must transmit the yielded bytestrings to the
        #   client in an unbuffered fashion, completing the transmission of
        #   each bytestring before requesting another one.
        #
        # This write() method is used for the imperative and (indirectly) for
        # the more familiar iterable-of-bytestrings WSGI mechanism. It uses
        # C{blockingCallFromThread} to schedule writes. This allows exceptions
        # to propagate up from the underlying HTTP implementation. However,
        # that underlying implementation does not, as yet, provide any way to
        # know if the written data has been transmitted, so this method
        # violates the above part of PEP-3333.
        #
        # PEP-3333 also says that a server may:
        #
        #   Use a different thread to ensure that the block continues to be
        #   transmitted while the application produces the next block.
        #
        # Which suggests that this is actually compliant with PEP-3333,
        # because writes are done in the reactor thread.
        #
        # However, providing some back-pressure may nevertheless be a Good
        # Thing at some point in the future.

        def wsgiWrite(started):
            if not started:
                self._sendResponseHeaders()
            self.request.write(data)

        try:
            return blockingCallFromThread(self.reactor, wsgiWrite,
                                          self.started)
        finally:
            self.started = True

    def _sendResponseHeaders(self):
        """
        Set the response code and response headers on the request object, but
        do not flush them.  The caller is responsible for doing a write in
        order for anything to actually be written out in response to the
        request.

        This must be called in the I/O thread.
        """
        code, message = self.status.split(None, 1)
        code = int(code)
        self.request.setResponseCode(code, _wsgiStringToBytes(message))

        for name, value in self.headers:
            # Don't allow the application to control these required headers.
            if name.lower() not in ('server', 'date'):
                self.request.responseHeaders.addRawHeader(
                    _wsgiStringToBytes(name), _wsgiStringToBytes(value))

    def start(self):
        """
        Start the WSGI application in the threadpool.

        This must be called in the I/O thread.
        """
        self.threadpool.callInThread(self.run)

    def run(self):
        """
        Call the WSGI application object, iterate it, and handle its output.

        This must be called in a non-I/O thread (ie, a WSGI application
        thread).
        """
        try:
            appIterator = self.application(self.environ, self.startResponse)
            for elem in appIterator:
                if elem:
                    self.write(elem)
                if self._requestFinished:
                    break
            close = getattr(appIterator, 'close', None)
            if close is not None:
                close()
        except:

            def wsgiError(started, type, value, traceback):
                self._log.failure("WSGI application error",
                                  failure=Failure(value, type, traceback))
                if started:
                    self.request.loseConnection()
                else:
                    self.request.setResponseCode(INTERNAL_SERVER_ERROR)
                    self.request.finish()

            self.reactor.callFromThread(wsgiError, self.started, *exc_info())
        else:

            def wsgiFinish(started):
                if not self._requestFinished:
                    if not started:
                        self._sendResponseHeaders()
                    self.request.finish()

            self.reactor.callFromThread(wsgiFinish, self.started)
        self.started = True