Exemplo n.º 1
0
class WireServer(object):
    """
    An AMP server for the remote end of a L{WireWorker}.
    
    Construct me with an endpoint description string and either an
    instance or the fully qualified name of a L{WireWorkerUniverse}
    subclass.

    @ivar service: A C{StreamServerEndpointService} from
        C{twisted.application.internet} that you can include in the
        C{application} of a C{.tac} file, thus accepting connections
        to run tasks.
    """
    triggerID = None

    def __init__(self, description, wwu):
        if isinstance(wwu, str):
            klass = reflect.namedObject(wwu)
            wwu = klass(self)
        WireWorkerUniverse.check(wwu)
        self.factory = Factory()
        self.factory.protocol = lambda: amp.AMP(locator=wwu)
        endpoint = endpoints.serverFromString(reactor, description)
        self.service = StreamServerEndpointService(endpoint, self.factory)

    def start(self):
        self.service.startService()
        self.triggerID = reactor.addSystemEventTrigger('before', 'shutdown',
                                                       self.stop)

    def stop(self):
        if self.triggerID is None:
            return defer.succeed(None)
        self.triggerID = None
        return self.service.stopService()
Exemplo n.º 2
0
class GoApiWorker(BaseWorker):

    class CONFIG_CLASS(BaseWorker.CONFIG_CLASS):
        worker_name = ConfigText(
            "Name of this Go API worker.", required=True, static=True)
        twisted_endpoint = ConfigServerEndpoint(
            "Twisted endpoint to listen on.", required=True, static=True)
        web_path = ConfigText(
            "The path to serve this resource on.", required=True, static=True)
        health_path = ConfigText(
            "The path to server the health resource on.", default='/health/',
            static=True)
        redis_manager = ConfigDict(
            "Redis client configuration.", default={}, static=True)
        riak_manager = ConfigDict(
            "Riak client configuration.", default={}, static=True)

    _web_service = None

    def _rpc_resource_for_user(self, username):
        rpc = GoApiServer(username, self.vumi_api)
        addIntrospection(rpc)
        return rpc

    def get_health_response(self):
        return "OK"

    @inlineCallbacks
    def setup_worker(self):
        config = self.get_static_config()
        self.vumi_api = yield VumiApi.from_config_async({
            'redis_manager': config.redis_manager,
            'riak_manager': config.riak_manager,
        })
        self.realm = GoUserRealm(self._rpc_resource_for_user)
        site = build_web_site({
            config.web_path: GoUserAuthSessionWrapper(
                self.realm, self.vumi_api),
            config.health_path: httprpc.HttpRpcHealthResource(self),
        })
        self._web_service = StreamServerEndpointService(
            config.twisted_endpoint, site)
        self._web_service.startService()

    @inlineCallbacks
    def teardown_worker(self):
        if self._web_service is not None:
            yield self._web_service.stopService()

    def setup_connectors(self):
        pass
Exemplo n.º 3
0
	def startService(self):
		if self.running or self.loop != -1:
			# Already started
			return

		msg(self.name, 'start')

		# Cancel stop
		if self._stopCall:
			self._stopCall.cancel()
			self._stopCall = None

			# Call stop
			self._stopDeferred.callback(0)
			self._stopDeferred = None

		self.loop = 0

		Service.startService(self)
Exemplo n.º 4
0
class EndpointServiceTests(TestCase):
    """
    Tests for L{twisted.application.internet}.
    """
    def setUp(self):
        """
        Construct a stub server, a stub factory, and a
        L{StreamServerEndpointService} to test.
        """
        self.fakeServer = FakeServer()
        self.factory = Factory()
        self.svc = StreamServerEndpointService(self.fakeServer, self.factory)

    def test_privilegedStartService(self):
        """
        L{StreamServerEndpointService.privilegedStartService} calls its
        endpoint's C{listen} method with its factory.
        """
        self.svc.privilegedStartService()
        self.assertIdentical(self.factory, self.fakeServer.factory)

    def test_synchronousRaiseRaisesSynchronously(self, thunk=None):
        """
        L{StreamServerEndpointService.startService} should raise synchronously
        if the L{Deferred} returned by its wrapped
        L{IStreamServerEndpoint.listen} has already fired with an errback and
        the L{StreamServerEndpointService}'s C{_raiseSynchronously} flag has
        been set.  This feature is necessary to preserve compatibility with old
        behavior of L{twisted.internet.strports.service}, which is to return a
        service which synchronously raises an exception from C{startService}
        (so that, among other things, twistd will not start running).  However,
        since L{IStreamServerEndpoint.listen} may fail asynchronously, it is a
        bad idea to rely on this behavior.

        @param thunk: If specified, a callable to execute in place of
            C{startService}.
        """
        self.fakeServer.failImmediately = ZeroDivisionError()
        self.svc._raiseSynchronously = True
        self.assertRaises(ZeroDivisionError, thunk or self.svc.startService)

    def test_synchronousRaisePrivileged(self):
        """
        L{StreamServerEndpointService.privilegedStartService} should behave the
        same as C{startService} with respect to
        L{EndpointServiceTests.test_synchronousRaiseRaisesSynchronously}.
        """
        self.test_synchronousRaiseRaisesSynchronously(
            self.svc.privilegedStartService)

    def test_failReportsError(self):
        """
        L{StreamServerEndpointService.startService} and
        L{StreamServerEndpointService.privilegedStartService} should both log
        an exception when the L{Deferred} returned from their wrapped
        L{IStreamServerEndpoint.listen} fails.
        """
        self.svc.startService()
        self.fakeServer.result.errback(ZeroDivisionError())
        logged = self.flushLoggedErrors(ZeroDivisionError)
        self.assertEqual(len(logged), 1)

    def test_asynchronousFailReportsError(self):
        """
        L{StreamServerEndpointService.startService} and
        L{StreamServerEndpointService.privilegedStartService} should both log
        an exception when the L{Deferred} returned from their wrapped
        L{IStreamServerEndpoint.listen} fails asynchronously, even if
        C{_raiseSynchronously} is set.
        """
        self.svc._raiseSynchronously = True
        self.svc.startService()
        self.fakeServer.result.errback(ZeroDivisionError())
        logged = self.flushLoggedErrors(ZeroDivisionError)
        self.assertEqual(len(logged), 1)

    def test_synchronousFailReportsError(self):
        """
        Without the C{_raiseSynchronously} compatibility flag, failing
        immediately has the same behavior as failing later; it logs the error.
        """
        self.fakeServer.failImmediately = ZeroDivisionError()
        self.svc.startService()
        logged = self.flushLoggedErrors(ZeroDivisionError)
        self.assertEqual(len(logged), 1)

    def test_startServiceUnstarted(self):
        """
        L{StreamServerEndpointService.startService} sets the C{running} flag,
        and calls its endpoint's C{listen} method with its factory, if it
        has not yet been started.
        """
        self.svc.startService()
        self.assertIdentical(self.factory, self.fakeServer.factory)
        self.assertEqual(self.svc.running, True)

    def test_startServiceStarted(self):
        """
        L{StreamServerEndpointService.startService} sets the C{running} flag,
        but nothing else, if the service has already been started.
        """
        self.test_privilegedStartService()
        self.svc.startService()
        self.assertEqual(self.fakeServer.listenAttempts, 1)
        self.assertEqual(self.svc.running, True)

    def test_stopService(self):
        """
        L{StreamServerEndpointService.stopService} calls C{stopListening} on
        the L{IListeningPort} returned from its endpoint, returns the
        C{Deferred} from stopService, and sets C{running} to C{False}.
        """
        self.svc.privilegedStartService()
        self.fakeServer.startedListening()
        # Ensure running gets set to true
        self.svc.startService()
        result = self.svc.stopService()
        l = []
        result.addCallback(l.append)
        self.assertEqual(len(l), 0)
        self.fakeServer.stoppedListening()
        self.assertEqual(len(l), 1)
        self.assertFalse(self.svc.running)

    def test_stopServiceBeforeStartFinished(self):
        """
        L{StreamServerEndpointService.stopService} cancels the L{Deferred}
        returned by C{listen} if it has not yet fired.  No error will be logged
        about the cancellation of the listen attempt.
        """
        self.svc.privilegedStartService()
        result = self.svc.stopService()
        l = []
        result.addBoth(l.append)
        self.assertEqual(l, [None])
        self.assertEqual(self.flushLoggedErrors(CancelledError), [])

    def test_stopServiceCancelStartError(self):
        """
        L{StreamServerEndpointService.stopService} cancels the L{Deferred}
        returned by C{listen} if it has not fired yet.  An error will be logged
        if the resulting exception is not L{CancelledError}.
        """
        self.fakeServer.cancelException = ZeroDivisionError()
        self.svc.privilegedStartService()
        result = self.svc.stopService()
        l = []
        result.addCallback(l.append)
        self.assertEqual(l, [None])
        stoppingErrors = self.flushLoggedErrors(ZeroDivisionError)
        self.assertEqual(len(stoppingErrors), 1)
Exemplo n.º 5
0
class ControlAMPService(Service):
    """
    Control Service AMP server.

    Convergence agents connect to this server.

    :ivar dict _current_command: A dictionary containing information about
        connections to which state updates are currently in progress.  The keys
        are protocol instances.  The values are ``_UpdateState`` instances.
    :ivar IReactorTime _reactor: An ``IReactorTime`` provider to be used to
        schedule delays in sending updates.
    :ivar set _connections_pending_update: A ``set`` of connections that are
        currently pending getting an update of state and configuration. An
        empty set indicates that there is no update pending.
    :ivar IDelayedCall _current_pending_update_delayed_call: The
        ``IDelayedCall`` provider for the currently pending call to update
        state/configuration on connected nodes.
    """
    logger = Logger()

    def __init__(self, reactor, cluster_state, configuration_service, endpoint,
                 context_factory):
        """
        :param reactor: See ``ControlServiceLocator.__init__``.
        :param ClusterStateService cluster_state: Object that records known
            cluster state.
        :param ConfigurationPersistenceService configuration_service:
            Persistence service for desired cluster configuration.
        :param endpoint: Endpoint to listen on.
        :param context_factory: TLS context factory.
        """
        self.connections = set()
        self._reactor = reactor
        self._connections_pending_update = set()
        self._current_pending_update_delayed_call = None
        self._current_command = {}
        self.cluster_state = cluster_state
        self.configuration_service = configuration_service
        self.endpoint_service = StreamServerEndpointService(
            endpoint,
            TLSMemoryBIOFactory(
                context_factory,
                False,
                ServerFactory.forProtocol(lambda: ControlAMP(reactor, self))
            )
        )
        # When configuration changes, notify all connected clients:
        self.configuration_service.register(self._schedule_broadcast_update)

    def startService(self):
        self.endpoint_service.startService()

    def stopService(self):
        if self._current_pending_update_delayed_call:
            self._current_pending_update_delayed_call.cancel()
            self._current_pending_update_delayed_call = None
        self.endpoint_service.stopService()
        for connection in self.connections:
            connection.transport.loseConnection()

    def _send_state_to_connections(self, connections):
        """
        Send desired configuration and cluster state to all given connections.

        :param connections: A collection of ``AMP`` instances.
        """
        configuration = self.configuration_service.get()
        state = self.cluster_state.as_deployment()

        # Connections are separated into three groups to support a scheme which
        # lets us avoid sending certain updates which we know are not
        # necessary.  This reduces traffic and associated costs (CPU, memory).
        #
        # Other schemes are possible and might produce even better performance.
        # See https://clusterhq.atlassian.net/browse/FLOC-3140 for some
        # brainstorming.

        # Collect connections for which there is currently no unacknowledged
        # update.  These can receive a new update right away.
        can_update = []

        # Collect connections for which there is an unacknowledged update.
        # Since something has changed, these should receive another update once
        # that acknowledgement is received.
        delayed_update = []

        # Collect connections which were already set to receive a delayed
        # update and still haven't sent an acknowledgement.  These will still
        # receive a delayed update but we'll also note that we're going to skip
        # sending one intermediate update to them.
        elided_update = []

        for connection in connections:
            try:
                update = self._current_command[connection]
            except KeyError:
                # There's nothing in the tracking state for this connection.
                # That means there's no unacknowledged update.  That means we
                # can send another update right away.
                can_update.append(connection)
            else:
                # These connections do currently have an unacknowledged update
                # outstanding.
                if update.next_scheduled:
                    # And these connections are also already scheduled to
                    # receive another update after the one they're currently
                    # processing.  That update will include the most up-to-date
                    # information so we're effectively skipping an update
                    # that's no longer useful.
                    elided_update.append(connection)
                else:
                    # These don't have another update scheduled yet so we'll
                    # schedule one.
                    delayed_update.append(connection)

        # Make sure to run the logging action inside the caching block.
        # This lets encoding for logging share the cache with encoding for
        # network traffic.
        with LOG_SEND_CLUSTER_STATE() as action:
            if can_update:
                # If there are any protocols that can be updated right now,
                # we also want to see what updates they receive.  Since
                # logging shares the caching context, it shouldn't be any
                # more expensive to serialize this information into the log
                # now.  We specifically avoid logging this information if
                # no protocols are being updated because the serializing is
                # more expensive in that case and at the same time that
                # information isn't actually useful.
                action.add_success_fields(
                    configuration=configuration, state=state
                )
            else:
                # Eliot wants those fields though.
                action.add_success_fields(configuration=None, state=None)

            for connection in can_update:
                self._update_connection(connection, configuration, state)

            for connection in elided_update:
                AGENT_UPDATE_ELIDED(agent=connection).write()

            for connection in delayed_update:
                self._delayed_update_connection(connection)

    def _update_connection(self, connection, configuration, state):
        """
        Send a ``ClusterStatusCommand`` to an agent.

        :param ControlAMP connection: The connection to use to send the
            command.

        :param Deployment configuration: The cluster configuration to send.
        :param DeploymentState state: The current cluster state to send.
        """
        action = LOG_SEND_TO_AGENT(agent=connection)
        with action.context():
            # Use ``maybeDeferred`` so if an exception happens,
            # it will be wrapped in a ``Failure`` - see FLOC-3221
            d = DeferredContext(maybeDeferred(
                connection.callRemote,
                ClusterStatusCommand,
                configuration=configuration,
                state=state,
                eliot_context=action
            ))
            d.addActionFinish()
            d.result.addErrback(lambda _: None)

        update = self._current_command[connection] = _UpdateState(
            response=d.result,
            next_scheduled=False,
        )

        def finished_update(ignored):
            del self._current_command[connection]
        update.response.addCallback(finished_update)

    def _delayed_update_connection(self, connection):
        """
        Send a ``ClusterStatusCommand`` to an agent after it has acknowledged
        the last one.

        :param ControlAMP connection: The connection to use to send the
            command.  This connection is expected to have previously been sent
            such a command and to not yet have acknowledged it.  Internal state
            related to this will be used and then updated.
        """
        AGENT_UPDATE_DELAYED(agent=connection).write()
        update = self._current_command[connection]
        update.response.addCallback(
            lambda ignored: self._schedule_update([connection]),
        )
        self._current_command[connection] = update.set(next_scheduled=True)

    def connected(self, connection):
        """
        A new connection has been made to the server.

        :param ControlAMP connection: The new connection.
        """
        with AGENT_CONNECTED(agent=connection):
            self.connections.add(connection)
            self._schedule_update([connection])

    def disconnected(self, connection):
        """
        An existing connection has been disconnected.

        :param ControlAMP connection: The lost connection.
        """
        self.connections.remove(connection)

    def _execute_update_connections(self):
        """
        Actually executes an update to all pending connections.
        """
        connections_to_update = self._connections_pending_update
        self._connections_pending_update = set()
        self._current_pending_update_delayed_call = None
        self._send_state_to_connections(connections_to_update)

    def _schedule_update(self, connections):
        """
        Schedule a call to send_state_to_connections.

        This function adds a delay in the hopes that additional updates will be
        scheduled and they can all be called at once in a batch.

        :param connections: An iterable of connections that will be passed to
            ``_send_state_to_connections``.
        """
        self._connections_pending_update.update(set(connections))

        # If there is no current pending update and there are connections
        # pending an update, we must schedule the delayed call to update
        # connections.
        if (self._current_pending_update_delayed_call is None
                and self._connections_pending_update):
            self._current_pending_update_delayed_call = (
                self._reactor.callLater(
                    CONTROL_SERVICE_BATCHING_DELAY,
                    self._execute_update_connections
                )
            )

    def _schedule_broadcast_update(self):
        """
        Ensure that there is a pending broadcast update call.

        This is called when the state or configuration is updated, to trigger
        a broadcast of the current state and configuration to all nodes.

        In general, it only schedules an update to be broadcast 1 second later
        so that if we receive multiple updates within that second they are
        coalesced down to a single update.
        """
        self._schedule_update(self.connections)

    def node_changed(self, source, state_changes):
        """
        We've received a node state update from a connected client.

        :param IClusterStateSource source: Representation of where these
            changes were received from.
        :param list state_changes: One or more ``IClusterStateChange``
            providers representing the state change which has taken place.
        """
        self.cluster_state.apply_changes_from_source(source, state_changes)
        self._schedule_broadcast_update()
Exemplo n.º 6
0
class ControlAMPService(Service):
    """
    Control Service AMP server.

    Convergence agents connect to this server.

    :ivar dict _current_command: A dictionary containing information about
        connections to which state updates are currently in progress.  The keys
        are protocol instances.  The values are ``_UpdateState`` instances.
    :ivar IReactorTime _reactor: An ``IReactorTime`` provider to be used to
        schedule delays in sending updates.
    :ivar set _connections_pending_update: A ``set`` of connections that are
        currently pending getting an update of state and configuration. An
        empty set indicates that there is no update pending.
    :ivar IDelayedCall _current_pending_update_delayed_call: The
        ``IDelayedCall`` provider for the currently pending call to update
        state/configuration on connected nodes.
    """
    logger = Logger()

    def __init__(self, reactor, cluster_state, configuration_service, endpoint,
                 context_factory):
        """
        :param reactor: See ``ControlServiceLocator.__init__``.
        :param ClusterStateService cluster_state: Object that records known
            cluster state.
        :param ConfigurationPersistenceService configuration_service:
            Persistence service for desired cluster configuration.
        :param endpoint: Endpoint to listen on.
        :param context_factory: TLS context factory.
        """
        self._connections = set()
        self._reactor = reactor
        self._connections_pending_update = set()
        self._current_pending_update_delayed_call = None
        self._current_command = {}
        self._last_received_generation = defaultdict(
            lambda: _ConfigAndStateGeneration())
        self._configuration_generation_tracker = GenerationTracker(100)
        self._state_generation_tracker = GenerationTracker(100)
        self.cluster_state = cluster_state
        self.configuration_service = configuration_service
        self.endpoint_service = StreamServerEndpointService(
            endpoint,
            TLSMemoryBIOFactory(
                context_factory, False,
                ServerFactory.forProtocol(lambda: ControlAMP(reactor, self))))
        # When configuration changes, notify all connected clients:
        self.configuration_service.register(self._schedule_broadcast_update)

    def startService(self):
        self.endpoint_service.startService()

    def stopService(self):
        if self._current_pending_update_delayed_call:
            self._current_pending_update_delayed_call.cancel()
            self._current_pending_update_delayed_call = None
        self.endpoint_service.stopService()
        for connection in self._connections:
            connection.transport.loseConnection()
        self._connections = set()

    def _send_state_to_connections(self, connections):
        """
        Send desired configuration and cluster state to all given connections.

        :param connections: A collection of ``AMP`` instances.
        """
        configuration = self.configuration_service.get()
        state = self.cluster_state.as_deployment()

        # Connections are separated into three groups to support a scheme which
        # lets us avoid sending certain updates which we know are not
        # necessary.  This reduces traffic and associated costs (CPU, memory).
        #
        # Other schemes are possible and might produce even better performance.
        # See https://clusterhq.atlassian.net/browse/FLOC-3140 for some
        # brainstorming.

        # Collect connections for which there is currently no unacknowledged
        # update.  These can receive a new update right away.
        can_update = []

        # Collect connections for which there is an unacknowledged update.
        # Since something has changed, these should receive another update once
        # that acknowledgement is received.
        delayed_update = []

        # Collect connections which were already set to receive a delayed
        # update and still haven't sent an acknowledgement.  These will still
        # receive a delayed update but we'll also note that we're going to skip
        # sending one intermediate update to them.
        elided_update = []

        for connection in connections:
            try:
                update = self._current_command[connection]
            except KeyError:
                # There's nothing in the tracking state for this connection.
                # That means there's no unacknowledged update.  That means we
                # can send another update right away.
                can_update.append(connection)
            else:
                # These connections do currently have an unacknowledged update
                # outstanding.
                if update.next_scheduled:
                    # And these connections are also already scheduled to
                    # receive another update after the one they're currently
                    # processing.  That update will include the most up-to-date
                    # information so we're effectively skipping an update
                    # that's no longer useful.
                    elided_update.append(connection)
                else:
                    # These don't have another update scheduled yet so we'll
                    # schedule one.
                    delayed_update.append(connection)

        # Make sure to run the logging action inside the caching block.
        # This lets encoding for logging share the cache with encoding for
        # network traffic.
        with LOG_SEND_CLUSTER_STATE() as action:
            if can_update:
                # If there are any protocols that can be updated right now,
                # we also want to see what updates they receive.  Since
                # logging shares the caching context, it shouldn't be any
                # more expensive to serialize this information into the log
                # now.  We specifically avoid logging this information if
                # no protocols are being updated because the serializing is
                # more expensive in that case and at the same time that
                # information isn't actually useful.
                action.add_success_fields(configuration=configuration,
                                          state=state)
            else:
                # Eliot wants those fields though.
                action.add_success_fields(configuration=None, state=None)

            for connection in can_update:
                self._update_connection(connection, configuration, state)

            for connection in elided_update:
                AGENT_UPDATE_ELIDED(agent=connection).write()

            for connection in delayed_update:
                self._delayed_update_connection(connection)

    def _update_connection(self, connection, configuration, state):
        """
        Send the latest cluster configuration and state to ``connection``.

        :param ControlAMP connection: The connection to use to send the
            command.
        """

        # Set the configuration and the state to the latest versions. It is
        # okay to call this even if the latest configuration is the same
        # object.
        self._configuration_generation_tracker.insert_latest(configuration)
        self._state_generation_tracker.insert_latest(state)

        action = LOG_SEND_TO_AGENT(agent=connection)
        with action.context():

            # Attempt to compute a diff to send to the connection
            last_received_generations = (
                self._last_received_generation[connection])

            config_gen_tracker = self._configuration_generation_tracker
            configuration_diff = (
                config_gen_tracker.get_diff_from_hash_to_latest(
                    last_received_generations.config_hash))

            state_gen_tracker = self._state_generation_tracker
            state_diff = (state_gen_tracker.get_diff_from_hash_to_latest(
                last_received_generations.state_hash))

            if configuration_diff is not None and state_diff is not None:
                # If both diffs were successfully computed, send a command to
                # send the diffs along with before and after hashes so the
                # nodes can verify the application of the diffs.
                d = DeferredContext(
                    maybeDeferred(connection.callRemote,
                                  ClusterStatusDiffCommand,
                                  configuration_diff=configuration_diff,
                                  start_configuration_generation=(
                                      last_received_generations.config_hash),
                                  end_configuration_generation=(
                                      config_gen_tracker.get_latest_hash()),
                                  state_diff=state_diff,
                                  start_state_generation=(
                                      last_received_generations.state_hash),
                                  end_state_generation=state_gen_tracker.
                                  get_latest_hash(),
                                  eliot_context=action))
                d.addActionFinish()
            else:
                # Otherwise, just send the lastest configuration and state to
                # the node.
                configuration = config_gen_tracker.get_latest()
                state = state_gen_tracker.get_latest()
                # Use ``maybeDeferred`` so if an exception happens,
                # it will be wrapped in a ``Failure`` - see FLOC-3221
                d = DeferredContext(
                    maybeDeferred(
                        connection.callRemote,
                        ClusterStatusCommand,
                        configuration=configuration,
                        configuration_generation=(
                            config_gen_tracker.get_latest_hash()),
                        state=state,
                        state_generation=state_gen_tracker.get_latest_hash(),
                        eliot_context=action))
                d.addActionFinish()
            d.result.addErrback(lambda _: None)

        update = self._current_command[connection] = _UpdateState(
            response=d.result,
            next_scheduled=False,
        )

        def finished_update(response):
            del self._current_command[connection]
            if response:
                config_gen = response['current_configuration_generation']
                state_gen = response['current_state_generation']
                self._last_received_generation[connection] = (
                    _ConfigAndStateGeneration(config_hash=config_gen,
                                              state_hash=state_gen))
                #  If the latest hash was not returned, schedule an update.
                if (self._configuration_generation_tracker.get_latest_hash() !=
                        config_gen
                        or self._state_generation_tracker.get_latest_hash() !=
                        state_gen):
                    self._schedule_update([connection])

        update.response.addCallback(finished_update)

    def _delayed_update_connection(self, connection):
        """
        Send a ``ClusterStatusCommand`` to an agent after it has acknowledged
        the last one.

        :param ControlAMP connection: The connection to use to send the
            command.  This connection is expected to have previously been sent
            such a command and to not yet have acknowledged it.  Internal state
            related to this will be used and then updated.
        """
        AGENT_UPDATE_DELAYED(agent=connection).write()
        update = self._current_command[connection]
        update.response.addCallback(
            lambda ignored: self._schedule_update([connection]), )
        self._current_command[connection] = update.set(next_scheduled=True)

    def connected(self, connection):
        """
        A new connection has been made to the server.

        :param ControlAMP connection: The new connection.
        """
        with AGENT_CONNECTED(agent=connection):
            self._connections.add(connection)
            self._schedule_update([connection])

    def disconnected(self, connection):
        """
        An existing connection has been disconnected.

        :param ControlAMP connection: The lost connection.
        """
        self._connections.remove(connection)
        if connection in self._connections_pending_update:
            self._connections_pending_update.remove(connection)
        if connection in self._last_received_generation:
            del self._last_received_generation[connection]

    def _execute_update_connections(self):
        """
        Actually executes an update to all pending connections.
        """
        connections_to_update = self._connections_pending_update
        self._connections_pending_update = set()
        self._current_pending_update_delayed_call = None
        self._send_state_to_connections(connections_to_update)

    def _schedule_update(self, connections):
        """
        Schedule a call to send_state_to_connections.

        This function adds a delay in the hopes that additional updates will be
        scheduled and they can all be called at once in a batch.

        :param connections: An iterable of connections that will be passed to
            ``_send_state_to_connections``.
        """
        self._connections_pending_update.update(set(connections))

        # If there is no current pending update and there are connections
        # pending an update, we must schedule the delayed call to update
        # connections.
        if (self._current_pending_update_delayed_call is None
                and self._connections_pending_update):
            self._current_pending_update_delayed_call = (
                self._reactor.callLater(CONTROL_SERVICE_BATCHING_DELAY,
                                        self._execute_update_connections))

    def _schedule_broadcast_update(self):
        """
        Ensure that there is a pending broadcast update call.

        This is called when the state or configuration is updated, to trigger
        a broadcast of the current state and configuration to all nodes.

        In general, it only schedules an update to be broadcast 1 second later
        so that if we receive multiple updates within that second they are
        coalesced down to a single update.
        """
        self._schedule_update(self._connections)

    def node_changed(self, source, state_changes):
        """
        We've received a node state update from a connected client.

        :param IClusterStateSource source: Representation of where these
            changes were received from.
        :param list state_changes: One or more ``IClusterStateChange``
            providers representing the state change which has taken place.
        """
        self.cluster_state.apply_changes_from_source(source, state_changes)
        self._schedule_broadcast_update()
Exemplo n.º 7
0
class ControlAMPService(Service):
    """
    Control Service AMP server.

    Convergence agents connect to this server.

    :ivar dict _current_command: A dictionary containing information about
        connections to which state updates are currently in progress.  The keys
        are protocol instances.  The values are ``_UpdateState`` instances.
    """
    logger = Logger()

    def __init__(self, reactor, cluster_state, configuration_service, endpoint,
                 context_factory):
        """
        :param reactor: See ``ControlServiceLocator.__init__``.
        :param ClusterStateService cluster_state: Object that records known
            cluster state.
        :param ConfigurationPersistenceService configuration_service:
            Persistence service for desired cluster configuration.
        :param endpoint: Endpoint to listen on.
        :param context_factory: TLS context factory.
        """
        self.connections = set()
        self._current_command = {}
        self.cluster_state = cluster_state
        self.configuration_service = configuration_service
        self.endpoint_service = StreamServerEndpointService(
            endpoint,
            TLSMemoryBIOFactory(
                context_factory, False,
                ServerFactory.forProtocol(lambda: ControlAMP(reactor, self))))
        # When configuration changes, notify all connected clients:
        self.configuration_service.register(
            lambda: self._send_state_to_connections(self.connections))

    def startService(self):
        self.endpoint_service.startService()

    def stopService(self):
        self.endpoint_service.stopService()
        for connection in self.connections:
            connection.transport.loseConnection()

    def _send_state_to_connections(self, connections):
        """
        Send desired configuration and cluster state to all given connections.

        :param connections: A collection of ``AMP`` instances.
        """
        configuration = self.configuration_service.get()
        state = self.cluster_state.as_deployment()

        # Connections are separated into three groups to support a scheme which
        # lets us avoid sending certain updates which we know are not
        # necessary.  This reduces traffic and associated costs (CPU, memory).
        #
        # Other schemes are possible and might produce even better performance.
        # See https://clusterhq.atlassian.net/browse/FLOC-3140 for some
        # brainstorming.

        # Collect connections for which there is currently no unacknowledged
        # update.  These can receive a new update right away.
        can_update = []

        # Collect connections for which there is an unacknowledged update.
        # Since something has changed, these should receive another update once
        # that acknowledgement is received.
        delayed_update = []

        # Collect connections which were already set to receive a delayed
        # update and still haven't sent an acknowledgement.  These will still
        # receive a delayed update but we'll also note that we're going to skip
        # sending one intermediate update to them.
        elided_update = []

        for connection in connections:
            try:
                update = self._current_command[connection]
            except KeyError:
                # There's nothing in the tracking state for this connection.
                # That means there's no unacknowledged update.  That means we
                # can send another update right away.
                can_update.append(connection)
            else:
                # These connections do currently have an unacknowledged update
                # outstanding.
                if update.next_scheduled:
                    # And these connections are also already scheduled to
                    # receive another update after the one they're currently
                    # processing.  That update will include the most up-to-date
                    # information so we're effectively skipping an update
                    # that's no longer useful.
                    elided_update.append(connection)
                else:
                    # These don't have another update scheduled yet so we'll
                    # schedule one.
                    delayed_update.append(connection)

        # Make sure to run the logging action inside the caching block.
        # This lets encoding for logging share the cache with encoding for
        # network traffic.
        with LOG_SEND_CLUSTER_STATE() as action:
            if can_update:
                # If there are any protocols that can be updated right now,
                # we also want to see what updates they receive.  Since
                # logging shares the caching context, it shouldn't be any
                # more expensive to serialize this information into the log
                # now.  We specifically avoid logging this information if
                # no protocols are being updated because the serializing is
                # more expensive in that case and at the same time that
                # information isn't actually useful.
                action.add_success_fields(configuration=configuration,
                                          state=state)
            else:
                # Eliot wants those fields though.
                action.add_success_fields(configuration=None, state=None)

            for connection in can_update:
                self._update_connection(connection, configuration, state)

            for connection in elided_update:
                AGENT_UPDATE_ELIDED(agent=connection).write()

            for connection in delayed_update:
                self._delayed_update_connection(connection)

    def _update_connection(self, connection, configuration, state):
        """
        Send a ``ClusterStatusCommand`` to an agent.

        :param ControlAMP connection: The connection to use to send the
            command.

        :param Deployment configuration: The cluster configuration to send.
        :param DeploymentState state: The current cluster state to send.
        """
        action = LOG_SEND_TO_AGENT(agent=connection)
        with action.context():
            # Use ``maybeDeferred`` so if an exception happens,
            # it will be wrapped in a ``Failure`` - see FLOC-3221
            d = DeferredContext(
                maybeDeferred(connection.callRemote,
                              ClusterStatusCommand,
                              configuration=configuration,
                              state=state,
                              eliot_context=action))
            d.addActionFinish()
            d.result.addErrback(lambda _: None)

        update = self._current_command[connection] = _UpdateState(
            response=d.result,
            next_scheduled=False,
        )

        def finished_update(ignored):
            del self._current_command[connection]

        update.response.addCallback(finished_update)

    def _delayed_update_connection(self, connection):
        """
        Send a ``ClusterStatusCommand`` to an agent after it has acknowledged
        the last one.

        :param ControlAMP connection: The connection to use to send the
            command.  This connection is expected to have previously been sent
            such a command and to not yet have acknowledged it.  Internal state
            related to this will be used and then updated.
        """
        AGENT_UPDATE_DELAYED(agent=connection).write()
        update = self._current_command[connection]
        update.response.addCallback(
            lambda ignored: self._send_state_to_connections([connection]), )
        self._current_command[connection] = update.set(next_scheduled=True)

    def connected(self, connection):
        """
        A new connection has been made to the server.

        :param ControlAMP connection: The new connection.
        """
        with AGENT_CONNECTED(agent=connection):
            self.connections.add(connection)
            self._send_state_to_connections([connection])

    def disconnected(self, connection):
        """
        An existing connection has been disconnected.

        :param ControlAMP connection: The lost connection.
        """
        self.connections.remove(connection)

    def node_changed(self, source, state_changes):
        """
        We've received a node state update from a connected client.

        :param IClusterStateSource source: Representation of where these
            changes were received from.
        :param list state_changes: One or more ``IClusterStateChange``
            providers representing the state change which has taken place.
        """
        self.cluster_state.apply_changes_from_source(source, state_changes)
        self._send_state_to_connections(self.connections)
Exemplo n.º 8
0
class ControlAMPService(Service):
    """
    Control Service AMP server.

    Convergence agents connect to this server.
    """
    def __init__(self, cluster_state, configuration_service, endpoint):
        """
        :param ClusterStateService cluster_state: Object that records known
            cluster state.
        :param ConfigurationPersistenceService configuration_service:
            Persistence service for desired cluster configuration.
        :param endpoint: Endpoint to listen on.
        """
        self.connections = set()
        self.cluster_state = cluster_state
        self.configuration_service = configuration_service
        self.endpoint_service = StreamServerEndpointService(
            endpoint, ServerFactory.forProtocol(lambda: ControlAMP(self)))
        # When configuration changes, notify all connected clients:
        self.configuration_service.register(
            lambda: self._send_state_to_connections(self.connections))

    def startService(self):
        self.endpoint_service.startService()

    def stopService(self):
        self.endpoint_service.stopService()
        for connection in self.connections:
            connection.transport.loseConnection()

    def _send_state_to_connections(self, connections):
        """
        Send desired configuration and cluster state to all given connections.

        :param connections: A collection of ``AMP`` instances.
        """
        configuration = self.configuration_service.get()
        state = self.cluster_state.as_deployment()
        for connection in connections:
            connection.callRemote(ClusterStatusCommand,
                                  configuration=configuration,
                                  state=state)
            # Handle errors from callRemote by logging them
            # https://clusterhq.atlassian.net/browse/FLOC-1311

    def connected(self, connection):
        """
        A new connection has been made to the server.

        :param ControlAMP connection: The new connection.
        """
        self.connections.add(connection)
        self._send_state_to_connections([connection])

    def disconnected(self, connection):
        """
        An existing connection has been disconnected.

        :param ControlAMP connection: The lost connection.
        """
        self.connections.remove(connection)

    def node_changed(self, node_state):
        """
        We've received a node state update from a connected client.

        :param bytes hostname: The hostname of the node.
        :param NodeState node_state: The changed state for the node.
        """
        self.cluster_state.update_node_state(node_state)
        self._send_state_to_connections(self.connections)
Exemplo n.º 9
0
class TestEndpointService(TestCase):
    """
    Tests for L{twisted.application.internet}.
    """

    def setUp(self):
        """
        Construct a stub server, a stub factory, and a
        L{StreamServerEndpointService} to test.
        """
        self.fakeServer = FakeServer()
        self.factory = Factory()
        self.svc = StreamServerEndpointService(self.fakeServer, self.factory)


    def test_privilegedStartService(self):
        """
        L{StreamServerEndpointService.privilegedStartService} calls its
        endpoint's C{listen} method with its factory.
        """
        self.svc.privilegedStartService()
        self.assertIdentical(self.factory, self.fakeServer.factory)


    def test_synchronousRaiseRaisesSynchronously(self, thunk=None):
        """
        L{StreamServerEndpointService.startService} should raise synchronously
        if the L{Deferred} returned by its wrapped
        L{IStreamServerEndpoint.listen} has already fired with an errback and
        the L{StreamServerEndpointService}'s C{_raiseSynchronously} flag has
        been set.  This feature is necessary to preserve compatibility with old
        behavior of L{twisted.internet.strports.service}, which is to return a
        service which synchronously raises an exception from C{startService}
        (so that, among other things, twistd will not start running).  However,
        since L{IStreamServerEndpoint.listen} may fail asynchronously, it is
        a bad idea to rely on this behavior.
        """
        self.fakeServer.failImmediately = ZeroDivisionError()
        self.svc._raiseSynchronously = True
        self.assertRaises(ZeroDivisionError, thunk or self.svc.startService)


    def test_synchronousRaisePrivileged(self):
        """
        L{StreamServerEndpointService.privilegedStartService} should behave the
        same as C{startService} with respect to
        L{TestEndpointService.test_synchronousRaiseRaisesSynchronously}.
        """
        self.test_synchronousRaiseRaisesSynchronously(
            self.svc.privilegedStartService)


    def test_failReportsError(self):
        """
        L{StreamServerEndpointService.startService} and
        L{StreamServerEndpointService.privilegedStartService} should both log
        an exception when the L{Deferred} returned from their wrapped
        L{IStreamServerEndpoint.listen} fails.
        """
        self.svc.startService()
        self.fakeServer.result.errback(ZeroDivisionError())
        logged = self.flushLoggedErrors(ZeroDivisionError)
        self.assertEquals(len(logged), 1)


    def test_synchronousFailReportsError(self):
        """
        Without the C{_raiseSynchronously} compatibility flag, failing
        immediately has the same behavior as failing later; it logs the error.
        """
        self.fakeServer.failImmediately = ZeroDivisionError()
        self.svc.startService()
        logged = self.flushLoggedErrors(ZeroDivisionError)
        self.assertEquals(len(logged), 1)


    def test_startServiceUnstarted(self):
        """
        L{StreamServerEndpointService.startService} sets the C{running} flag,
        and calls its endpoint's C{listen} method with its factory, if it
        has not yet been started.
        """
        self.svc.startService()
        self.assertIdentical(self.factory, self.fakeServer.factory)
        self.assertEquals(self.svc.running, True)


    def test_startServiceStarted(self):
        """
        L{StreamServerEndpointService.startService} sets the C{running} flag,
        but nothing else, if the service has already been started.
        """
        self.test_privilegedStartService()
        self.svc.startService()
        self.assertEquals(self.fakeServer.listenAttempts, 1)
        self.assertEquals(self.svc.running, True)


    def test_stopService(self):
        """
        L{StreamServerEndpointService.stopService} calls C{stopListening} on
        the L{IListeningPort} returned from its endpoint, returns the
        C{Deferred} from stopService, and sets C{running} to C{False}.
        """
        self.svc.privilegedStartService()
        self.fakeServer.startedListening()
        # Ensure running gets set to true
        self.svc.startService()
        result = self.svc.stopService()
        l = []
        result.addCallback(l.append)
        self.assertEquals(len(l), 0)
        self.fakeServer.stoppedListening()
        self.assertEquals(len(l), 1)
        self.assertFalse(self.svc.running)


    def test_stopServiceBeforeStartFinished(self):
        """
        L{StreamServerEndpointService.stopService} cancels the L{Deferred}
        returned by C{listen} if it has not yet fired.  No error will be logged
        about the cancellation of the listen attempt.
        """
        self.svc.privilegedStartService()
        result = self.svc.stopService()
        l = []
        result.addBoth(l.append)
        self.assertEquals(l, [None])
        self.assertEquals(self.flushLoggedErrors(CancelledError), [])


    def test_stopServiceCancelStartError(self):
        """
        L{StreamServerEndpointService.stopService} cancels the L{Deferred}
        returned by C{listen} if it has not fired yet.  An error will be logged
        if the resulting exception is not L{CancelledError}.
        """
        self.fakeServer.cancelException = ZeroDivisionError()
        self.svc.privilegedStartService()
        result = self.svc.stopService()
        l = []
        result.addCallback(l.append)
        self.assertEquals(l, [None])
        stoppingErrors = self.flushLoggedErrors(ZeroDivisionError)
        self.assertEquals(len(stoppingErrors), 1)
Exemplo n.º 10
0
class ControlAMPService(Service):
    """
    Control Service AMP server.

    Convergence agents connect to this server.
    """
    logger = Logger()

    def __init__(self, cluster_state, configuration_service, endpoint):
        """
        :param ClusterStateService cluster_state: Object that records known
            cluster state.
        :param ConfigurationPersistenceService configuration_service:
            Persistence service for desired cluster configuration.
        :param endpoint: Endpoint to listen on.
        """
        self.connections = set()
        self.cluster_state = cluster_state
        self.configuration_service = configuration_service
        self.endpoint_service = StreamServerEndpointService(
            endpoint, ServerFactory.forProtocol(lambda: ControlAMP(self)))
        # When configuration changes, notify all connected clients:
        self.configuration_service.register(
            lambda: self._send_state_to_connections(self.connections))

    def startService(self):
        self.endpoint_service.startService()

    def stopService(self):
        self.endpoint_service.stopService()
        for connection in self.connections:
            connection.transport.loseConnection()

    def _send_state_to_connections(self, connections):
        """
        Send desired configuration and cluster state to all given connections.

        :param connections: A collection of ``AMP`` instances.
        """
        configuration = self.configuration_service.get()
        state = self.cluster_state.as_deployment()
        with LOG_SEND_CLUSTER_STATE(self.logger,
                                    configuration=configuration,
                                    state=state):
            for connection in connections:
                action = LOG_SEND_TO_AGENT(self.logger, agent=connection)
                with action.context():
                    d = DeferredContext(
                        connection.callRemote(ClusterStatusCommand,
                                              configuration=configuration,
                                              state=state,
                                              eliot_context=action))
                    d.addActionFinish()
                    d.result.addErrback(lambda _: None)

    def connected(self, connection):
        """
        A new connection has been made to the server.

        :param ControlAMP connection: The new connection.
        """
        self.connections.add(connection)
        self._send_state_to_connections([connection])

    def disconnected(self, connection):
        """
        An existing connection has been disconnected.

        :param ControlAMP connection: The lost connection.
        """
        self.connections.remove(connection)

    def node_changed(self, state_changes):
        """
        We've received a node state update from a connected client.

        :param bytes hostname: The hostname of the node.
        :param list state_changes: One or more ``IClusterStateChange``
            providers representing the state change which has taken place.
        """
        self.cluster_state.apply_changes(state_changes)
        self._send_state_to_connections(self.connections)
Exemplo n.º 11
0
class ControlAMPService(Service):
    """
    Control Service AMP server.

    Convergence agents connect to this server.
    """
    logger = Logger()

    def __init__(self, cluster_state, configuration_service, endpoint):
        """
        :param ClusterStateService cluster_state: Object that records known
            cluster state.
        :param ConfigurationPersistenceService configuration_service:
            Persistence service for desired cluster configuration.
        :param endpoint: Endpoint to listen on.
        """
        self.connections = set()
        self.cluster_state = cluster_state
        self.configuration_service = configuration_service
        self.endpoint_service = StreamServerEndpointService(
            endpoint, ServerFactory.forProtocol(lambda: ControlAMP(self)))
        # When configuration changes, notify all connected clients:
        self.configuration_service.register(
            lambda: self._send_state_to_connections(self.connections))

    def startService(self):
        self.endpoint_service.startService()

    def stopService(self):
        self.endpoint_service.stopService()
        for connection in self.connections:
            connection.transport.loseConnection()

    def _send_state_to_connections(self, connections):
        """
        Send desired configuration and cluster state to all given connections.

        :param connections: A collection of ``AMP`` instances.
        """
        configuration = self.configuration_service.get()
        state = self.cluster_state.as_deployment()
        with LOG_SEND_CLUSTER_STATE(self.logger,
                                    configuration=configuration,
                                    state=state):
            for connection in connections:
                action = LOG_SEND_TO_AGENT(self.logger, agent=connection)
                with action.context():
                    d = DeferredContext(connection.callRemote(
                        ClusterStatusCommand,
                        configuration=configuration,
                        state=state,
                        eliot_context=action
                    ))
                    d.addActionFinish()
                    d.result.addErrback(lambda _: None)

    def connected(self, connection):
        """
        A new connection has been made to the server.

        :param ControlAMP connection: The new connection.
        """
        self.connections.add(connection)
        self._send_state_to_connections([connection])

    def disconnected(self, connection):
        """
        An existing connection has been disconnected.

        :param ControlAMP connection: The lost connection.
        """
        self.connections.remove(connection)

    def node_changed(self, state_changes):
        """
        We've received a node state update from a connected client.

        :param bytes hostname: The hostname of the node.
        :param list state_changes: One or more ``IClusterStateChange``
            providers representing the state change which has taken place.
        """
        self.cluster_state.apply_changes(state_changes)
        self._send_state_to_connections(self.connections)
Exemplo n.º 12
0
class BenchmarkAPITestsMixin(object):
    """
    Tests for BenchmarkAPI.
    """
    # The default timeout of 0.005 seconds is not always enough,
    # because we test HTTP requests via an actual TCP/IP connection.
    run_tests_with = AsynchronousDeferredRunTest.make_factory(timeout=1)

    RESULT = {u"userdata": {u"branch": "master"}, u"run": 1, u"result": 1,
              u"timestamp": datetime(2016, 1, 1, 0, 0, 5).isoformat(), }

    NO_TIMESTAMP = {u"userdata": {u"branch": "master"}, u"run": 1,
                    u"result": 1, }

    BAD_TIMESTAMP = {u"userdata": {u"branch": "master"}, u"run": 1,
                     u"result": 1, u"timestamp": "noonish", }

    def setUp(self):
        super(BenchmarkAPITestsMixin, self).setUp()

        api = BenchmarkAPI_V1(self.backend)
        site = server.Site(api.app.resource())

        def make_client(listening_port):
            addr = listening_port.getHost()
            self.agent = client.ProxyAgent(
                endpoints.TCP4ClientEndpoint(
                    self.reactor,
                    addr.host,
                    addr.port,
                ),
                self.reactor,
            )

        listening = Deferred()
        listening.addCallback(make_client)
        endpoint = TestEndpoint(self.reactor, listening)
        self.service = StreamServerEndpointService(endpoint, site)
        self.service.startService()
        self.addCleanup(self.service.stopService)
        return listening

    def submit(self, result):
        """
        Submit a result.
        """
        json = dumps(result)
        body = StringProducer(json)
        req = self.agent.request("POST", "/benchmark-results",
                                 bodyProducer=body)

        def add_cleanup(response):
            if response.code == http.CREATED:
                location = response.headers.getRawHeaders(b'Location')[0]
                self.addCleanup(lambda: self.agent.request("DELETE", location))
            return response

        req.addCallback(add_cleanup)

        return req

    def check_response_code(self, response, expected_code):
        """
        Response has the expected reponse code.
        """
        self.assertEqual(
            response.code, expected_code, "Incorrect response code")
        return response

    def parse_submit_response_body(self, body):
        """
        Check that response to a submit request has the expected
        structure and version.
        Returns an identifier assigned to the submitted object.
        """
        data = loads(body)
        self.assertIn('version', data)
        self.assertEqual(data['version'], 1)
        self.assertIn('id', data)
        return data['id']

    def test_submit_success(self):
        """
        Valid JSON can be successfully submitted.
        """
        req = self.submit(self.RESULT)
        req.addCallback(self.check_response_code, http.CREATED)
        return req

    def test_no_timestamp(self):
        """
        Valid JSON with a missing timestamp is an HTTP BAD_REQUEST.
        """
        req = self.submit(self.NO_TIMESTAMP)
        req.addCallback(self.check_response_code, http.BAD_REQUEST)
        req.addCallback(lambda _: flush_logged_errors(BadRequest))
        return req

    def test_bad_timestamp(self):
        """
        Valid JSON with an invalid timestamp is an HTTP BAD_REQUEST.
        """
        req = self.submit(self.BAD_TIMESTAMP)
        req.addCallback(self.check_response_code, http.BAD_REQUEST)
        req.addCallback(lambda _: flush_logged_errors(BadRequest))
        return req

    def test_submit_response_format(self):
        """
        Returned content is the expected JSON.
        """
        req = self.submit(self.RESULT)
        req.addCallback(client.readBody)
        req.addCallback(self.parse_submit_response_body)
        return req

    def test_submit_response_location_header(self):
        """
        Returned Location header has the expected value.
        """
        req = self.submit(self.RESULT)

        def check_location(response):
            location = response.headers.getRawHeaders(b'Location')[0]
            base_uri = response.request.absoluteURI + '/'
            d = client.readBody(response)
            d.addCallback(lambda body: loads(body)['id'])
            d.addCallback(lambda id: urljoin(base_uri, id))
            d.addCallback(
                lambda expected: self.assertEqual(expected, location)
            )
            return d

        req.addCallback(check_location)
        return req

    def check_received_result(self, response, expected_result):
        """
        Response body contains the expected result.
        If it does, return the JSON decoded response body.
        """
        got_body = client.readBody(response)

        def compare(body):
            result = loads(body)
            self.assertEqual(expected_result, result)
            return result

        return got_body.addCallback(compare)

    def test_submit_persists(self):
        """
        Submitted result is stored in the backend and it can be retrieved
        using a URI in the Location header.
        """
        req = self.submit(self.RESULT)

        def retrieve(response):
            location = response.headers.getRawHeaders(b'Location')[0]
            return self.agent.request("GET", location)

        req.addCallback(retrieve)
        req.addCallback(self.check_response_code, http.OK)
        req.addCallback(self.check_received_result, self.RESULT)
        return req

    def test_get_idempotent(self):
        """
        Retrieving a result does not modify or remove it.
        """
        req = self.submit(self.RESULT)

        def retrieve_twice(response):
            location = response.headers.getRawHeaders(b'Location')[0]
            got1 = self.agent.request("GET", location)
            got1.addCallback(self.check_response_code, http.OK)
            got1.addCallback(self.check_received_result, self.RESULT)
            got2 = got1.addCallback(
                lambda _: self.agent.request("GET", location)
            )
            got2.addCallback(self.check_response_code, http.OK)
            got2.addCallback(self.check_received_result, self.RESULT)
            return got2

        req.addCallback(retrieve_twice)
        return req

    def test_get_nonexistent(self):
        """
        Getting non-existent resource is correctly handled.
        """
        location = "/benchmark-results/foobar"
        req = self.agent.request("GET", location)
        req.addCallback(self.check_response_code, http.NOT_FOUND)
        return req

    def test_delete(self):
        """
        Submitted result is stored in the backend and it can be deleted
        using a URI in the Location header.
        """
        req = self.submit(self.RESULT)

        def delete(response):
            location = response.headers.getRawHeaders(b'Location')[0]
            deleted = self.agent.request("DELETE", location)
            deleted.addCallback(self.check_response_code, http.NO_CONTENT)
            return deleted

        req.addCallback(delete)
        return req

    def test_get_deleted(self):
        """
        Deleted result can not be retrieved.
        """
        req = self.submit(self.RESULT)
        # Submit another result, so that we can catch a situation
        # where we get a wrong result instead of the requested,
        # already removed one.
        req = req.addCallback(lambda _: self.submit(self.RESULT))

        def delete_and_get(response):
            location = response.headers.getRawHeaders(b'Location')[0]
            deleted = self.agent.request("DELETE", location)
            got = deleted.addCallback(
                lambda _: self.agent.request("GET", location)
            )
            got.addCallback(self.check_response_code, http.NOT_FOUND)
            return got

        req.addCallback(delete_and_get)
        return req

    def test_delete_deleted(self):
        """
        Deleted result can not be deleted again.
        """
        req = self.submit(self.RESULT)
        # Submit another result to make sure that the second delete
        # does not succeed on a wrong result.
        req = req.addCallback(lambda _: self.submit(self.RESULT))

        def delete_twice(response):
            location = response.headers.getRawHeaders(b'Location')[0]
            deleted1 = self.agent.request("DELETE", location)
            deleted2 = deleted1.addCallback(
                lambda _: self.agent.request("DELETE", location)
            )
            deleted2.addCallback(self.check_response_code, http.NOT_FOUND)
            return deleted2

        req.addCallback(delete_twice)
        return req

    def test_delete_nonexistent(self):
        """
        Getting non-existent resource is correctly handled.
        """
        location = "/benchmark-results/foobar"
        req = self.agent.request("DELETE", location)
        req.addCallback(self.check_response_code, http.NOT_FOUND)
        return req

    BRANCH1_RESULT1 = {u"userdata": {u"branch": u"1"}, u"value": 100,
                       u"timestamp": datetime(2016, 1, 1, 0, 0, 5).isoformat()}
    BRANCH1_RESULT2 = {u"userdata": {u"branch": u"1"}, u"value": 120,
                       u"timestamp": datetime(2016, 1, 1, 0, 0, 7).isoformat()}
    BRANCH2_RESULT1 = {u"userdata": {u"branch": u"2"}, u"value": 110,
                       u"timestamp": datetime(2016, 1, 1, 0, 0, 6).isoformat()}
    BRANCH2_RESULT2 = {u"userdata": {u"branch": u"2"}, u"value": 110,
                       u"timestamp": datetime(2016, 1, 1, 0, 0, 8).isoformat()}

    def setup_results(self):
        """
        Submit some results for testing various queries against them.
        """

        # Shuffle the results before submitting them.
        results = [
            self.BRANCH2_RESULT1, self.BRANCH1_RESULT1, self.BRANCH2_RESULT2,
            self.BRANCH1_RESULT2
        ]

        def chained_submit(_, result):
            """
            Discard result of a previous submit and do a new one.
            """
            return self.submit(result)

        # Sequentially submit the results.
        d = succeed(None)
        for result in results:
            d.addCallback(chained_submit, result)
        return d

    def run_query(self, ignored, filter=None, limit=None):
        """
        Invoke the query interface of the HTTP API.

        :param dict filter: The data that the results must include.
        :param int limit: The limit on how many results to return.
        :return: Deferred that fires with a HTTP response.
        """
        query = {}
        if filter:
            query = filter.copy()
        if limit is not None:
            query["limit"] = limit
        if query:
            query_string = "?" + urlencode(query, doseq=True)
        else:
            query_string = ""
        return self.agent.request("GET", "/benchmark-results" + query_string)

    def check_query_result(self, response, expected_results,
                           expected_code=200):
        """
        Check that the given response matches the expected response code
        and that the content is valid JSON that contains the expected
        result.

        :param response: The response to check.
        :param expected_results: The expected results that should be in
            the response.
        :type expected_results: list of dict
        :param expected_code: The expected response code.
        """
        self.check_response_code(response, expected_code)

        d = client.readBody(response)

        def check_body(body):
            data = loads(body)
            self.assertIn('version', data)
            self.assertEqual(data['version'], 1)
            self.assertIn('results', data)
            results = data['results']
            self.assertEqual(expected_results, results)

        d.addCallback(check_body)
        return d

    def test_query_no_filter_no_limit(self):
        """
        All results are returned if no filter and no limit are given.
        """
        d = self.setup_results()
        d.addCallback(self.run_query)
        d.addCallback(
            self.check_query_result,
            expected_results=[
                self.BRANCH2_RESULT2, self.BRANCH1_RESULT2,
                self.BRANCH2_RESULT1, self.BRANCH1_RESULT1
            ],
        )
        return d

    def test_query_with_filter(self):
        """
        All matching results are returned if a filter is given.
        """
        d = self.setup_results()
        d.addCallback(self.run_query, filter={u"branch": u"1"})
        d.addCallback(
            self.check_query_result,
            expected_results=[
                self.BRANCH1_RESULT2, self.BRANCH1_RESULT1,
            ],
        )
        d.addCallback(self.run_query, filter={u"branch": u"2"})
        d.addCallback(
            self.check_query_result,
            expected_results=[
                self.BRANCH2_RESULT2, self.BRANCH2_RESULT1
            ],
        )
        return d

    def test_query_with_zero_limit(self):
        """
        An empty set of results are returned for a limit of zero.
        """
        d = self.setup_results()
        d.addCallback(self.run_query, limit=0)
        d.addCallback(
            self.check_query_result,
            expected_results=[],
        )
        return d

    def test_query_with_limit(self):
        """
        The latest ``limit`` results are returned if no filter is set
        and the specified limit is less than the total number of
        results.
        """
        d = self.setup_results()
        d.addCallback(self.run_query, limit=2)
        d.addCallback(
            self.check_query_result,
            expected_results=[
                self.BRANCH2_RESULT2,
                self.BRANCH1_RESULT2
            ],
        )
        return d

    def test_query_with_filter_and_limit(self):
        """
        The latest ``limit`` results which match the specified filter
        are returned if the limit is less than the total number of
        results.
        """
        d = self.setup_results()
        d.addCallback(self.run_query, filter={u"branch": u"1"}, limit=1)
        d.addCallback(
            self.check_query_result,
            expected_results=[
                self.BRANCH1_RESULT2,
            ],
        )
        return d

    def test_unsupported_query_arg(self):
        """
        ``query`` raises ``BadRequest`` when an unsupported query
        argument is specified.
        """
        d = self.setup_results()
        d.addCallback(self.run_query, filter={u"unsupported": u"ignored"})
        d.addCallback(self.check_response_code, http.BAD_REQUEST)
        d.addCallback(lambda _: flush_logged_errors(BadRequest))
        return d

    def test_multiple_query_args_of_same_type(self):
        """
        ``query`` raises ``BadRequest`` when multiple values for a key
        are specified.
        """
        d = self.setup_results()
        d.addCallback(self.run_query, filter={u"branch": [u"1", u"2"]})
        d.addCallback(self.check_response_code, http.BAD_REQUEST)
        d.addCallback(lambda _: flush_logged_errors(BadRequest))
        return d

    def test_non_integer_limit_query_arg(self):
        """
        ``query`` raises ``BadRequest`` when a non-integer value is
        is specified for the `limit` key.
        """
        d = self.setup_results()
        d.addCallback(self.run_query, limit="one")
        d.addCallback(self.check_response_code, http.BAD_REQUEST)
        d.addCallback(lambda _: flush_logged_errors(BadRequest))
        return d

    def test_query_with_negative_limit(self):
        """
        ``query`` raises ``BadRequest`` when a negative value is
        specified for the `limit` key.
        """
        d = self.setup_results()
        d.addCallback(self.run_query, limit=-1)
        d.addCallback(self.check_response_code, http.BAD_REQUEST)
        d.addCallback(lambda _: flush_logged_errors(BadRequest))
        return d
Exemplo n.º 13
0
    def notify(self, title, message):
        if self.wait > 0:
            reactor.callLater(self.wait + 1,  # @UndefinedVariable
                              self.notify,
                              *(title, message))
            return
        else:
            self.wait += 1
            reactor.callLater(1, self.notified)  # @UndefinedVariable
        if platform == 'win':
            icon = 'logo.ico'
            timeout = 10
        else:
            icon = 'logo.png'
            timeout = 10000
        kwargs = {'app_icon': os.path.join(
            os.path.dirname(os.path.realpath(__file__)), icon),
            'app_name': 'onDemand',
            'title': title,
            'message': message,
            'timeout': timeout}
        notification.notify(**kwargs)

if __name__ == '__main__':
    log.startLogging(sys.stdout)
    endpoint = TCP4ServerEndpoint(reactor, 4343)
    factory = ServiceFactory()
    service = StreamServerEndpointService(endpoint, factory)
    service.startService()
    reactor.run()  # @UndefinedVariable