Exemple #1
0
    def volumedriver_list(self):
        """
        Return information about the current state of all volumes.

        :return: Result indicating success.
        """
        listing = DeferredContext(
            self._flocker_client.list_datasets_configuration())

        def got_configured(configured):
            results = []
            for dataset in configured:
                # Datasets without a name can't be used by the Docker plugin:
                if NAME_FIELD not in dataset.metadata:
                    continue
                dataset_name = dataset.metadata[NAME_FIELD]
                d = self._get_path_from_dataset_id(dataset.dataset_id)
                d.addCallback(lambda path, name=dataset_name: (path, name))
                results.append(d)
            return gatherResults(results)

        listing.addCallback(got_configured)

        def got_paths(results):
            return {u"Err": u"",
                    u"Volumes": sorted([
                        {u"Name": name,
                         u"Mountpoint": u"" if path is None else path.path}
                        for (path, name) in results])}
        listing.addCallback(got_paths)
        return listing.result
Exemple #2
0
 def wrapper(self, *args, **kwargs):
     context = start_action(Logger(), action_type=label, args=args, kwargs=kwargs)
     with context.context():
         d = DeferredContext(function(self, *args, **kwargs))
         d.addCallback(log_result)
         d.addActionFinish()
         return d.result
 def do_join(self, client_num, local_dir, invite_code):
     action = start_action(
         action_type=u"join-magic-folder",
         client_num=client_num,
         local_dir=local_dir,
         invite_code=invite_code,
     )
     with action.context():
         precondition(isinstance(local_dir, unicode), local_dir=local_dir)
         precondition(isinstance(invite_code, str), invite_code=invite_code)
         local_dir_arg = unicode_to_argv(local_dir)
         d = DeferredContext(
             self.do_cli(
                 "magic-folder",
                 "join",
                 invite_code,
                 local_dir_arg,
                 client_num=client_num,
             )
         )
     def _done(args):
         (rc, stdout, stderr) = args
         self.failUnlessEqual(rc, 0)
         self.failUnlessEqual(stdout, "")
         self.failUnlessEqual(stderr, "")
         return (rc, stdout, stderr)
     d.addCallback(_done)
     return d.addActionFinish()
Exemple #4
0
            def got_ip_addresses():
                d = self._async_refresh_node(reactor, node)
                d = DeferredContext(d)

                def is_running(updated_node):
                    if updated_node.state is not NodeState.RUNNING:
                        raise Exception("Node failed to run")
                    return updated_node

                def check_addresses(updated_node):
                    """
                    Check if the node has got at least one IPv4 public address
                    and, if requested, an IPv4 private address.  If yes, then
                    return the node object with the addresses, None otherwise.
                    """
                    public_ips = _filter_ipv4(updated_node.public_ips)
                    if len(public_ips) > 0:
                        if self._use_private_addresses:
                            private_ips = _filter_ipv4(
                                updated_node.private_ips
                            )
                            if len(private_ips) == 0:
                                return None
                        return updated_node
                    else:
                        return None

                d.addCallback(is_running)
                d.addCallback(check_addresses)
                return d.result
Exemple #5
0
def run_state_change(change, deployer):
    """
    Apply the change to local state.

    :param change: Either an ``IStateChange`` provider or the result of an
        ``in_parallel`` or ``sequentially`` call.
    :param IDeployer deployer: The ``IDeployer`` to use.  Specific
        ``IStateChange`` providers may require specific ``IDeployer`` providers
        that provide relevant functionality for applying the change.

    :return: ``Deferred`` firing when the change is done.
    """
    if isinstance(change, _InParallel):
        return gather_deferreds(list(
            run_state_change(subchange, deployer)
            for subchange in change.changes
        ))
    if isinstance(change, _Sequentially):
        d = succeed(None)
        for subchange in change.changes:
            d.addCallback(
                lambda _, subchange=subchange: run_state_change(
                    subchange, deployer
                )
            )
        return d

    with change.eliot_action.context():
        context = DeferredContext(maybeDeferred(change.run, deployer))
        context.addActionFinish()
        return context.result
        def got_some_rrsets(rrsets):
            accum.update(rrsets)
            if len(rrsets) < 100:
                # Fewer results than we asked for means we must be on the last
                # page.
                return accum

            # Otherwise, ask for the next page.  We do this slightly wrong, using
            # max(rrsets) as the starting key because txaws does not give us
            # access to the correct values from the response -
            # NextRecordIdentifier and NextRecordType.  This just means we'll load
            # one duplicate item on each page.  They all go into the dict so it
            # doesn't affect correctness.
            maxkey = max(rrsets)
            # Make sure we also preserve the Eliot context for callbacks of
            # this next Deferred.
            with start_action(action_type="load-next-page").context():
                d = DeferredContext(_load_all_rrsets(
                    route53,
                    zone_identifier,
                    name=maxkey.label,
                    type=maxkey.type,
                    accum=accum,
                ))
                return d.addActionFinish()
Exemple #7
0
    def logger(self, request, **routeArguments):
        logger = _get_logger(self)

        # If this is ever more than ASCII we might have issues? or maybe
        # this is pre-url decoding?
        # https://clusterhq.atlassian.net/browse/FLOC-1602
        action = REQUEST(logger, request_path=request.path,
                         method=request.method)

        # Generate a serialized action context that uniquely identifies
        # position within the logs, though there won't actually be any log
        # message with that particular task level:
        incidentIdentifier = action.serialize_task_id()

        with action.context():
            d = DeferredContext(original(self, request, **routeArguments))

        def failure(reason):
            if reason.check(BadRequest):
                code = reason.value.code
                result = reason.value.result
            else:
                writeFailure(reason, logger, LOG_SYSTEM)
                code = INTERNAL_SERVER_ERROR
                result = incidentIdentifier
            request.setResponseCode(code)
            request.responseHeaders.setRawHeaders(
                b"content-type", [b"application/json"])
            return dumps(result)
        d.addErrback(failure)
        d.addActionFinish()
        return d.result
def _get_converge_inputs(config, subscriptions, k8s, aws):
    a = start_action(action_type=u"load-converge-inputs")
    with a.context():
        d = DeferredContext(
            gatherResults([
                get_active_subscriptions(subscriptions),
                get_customer_grid_configmaps(k8s, config.kubernetes_namespace),
                get_customer_grid_deployments(k8s, config.kubernetes_namespace),
                get_customer_grid_replicasets(k8s, config.kubernetes_namespace),
                get_customer_grid_pods(k8s, config.kubernetes_namespace),
                get_customer_grid_service(k8s, config.kubernetes_namespace),
                get_hosted_zone_by_name(aws.get_route53_client(), Name(config.domain)),
            ]),
        )
        d.addCallback(
            lambda state: _State(**dict(
                zip([
                    u"subscriptions",
                    u"configmaps",
                    u"deployments",
                    u"replicasets",
                    u"pods",
                    u"service",
                    u"zone",
                ], state,
                ),
            )),
        )
        return d.addActionFinish()
def get_hosted_zone_by_name(route53, name):
    """
    Get a ``HostedZone`` with a zone name matching ``name``.

    :param route53: A txaws Route53 client.

    :param txaws.route53.model.Name name: The zone name to look for.

    :raise KeyError: If no matching hosted zone is found.

    :return Deferred(HostedZone): The hosted zone with a matching name.
    """
    action = start_action(action_type=u"get-hosted-zone")
    with action.context():
        d = DeferredContext(route53.list_hosted_zones())
        def filter_results(zones):
            Message.log(zone_names=list(zone.name for zone in zones))
            for zone in zones:
                # XXX Bleuch zone.name should be a Name!
                if Name(zone.name) == name:
                    d = _load_all_rrsets(route53, zone.identifier)
                    d.addCallback(
                        lambda rrsets, zone=zone: _ZoneState(
                            zone=zone,
                            rrsets=rrsets,
                        ),
                    )
                    return d
            raise KeyError(name)
        d.addCallback(filter_results)
        return d.addActionFinish()
Exemple #10
0
def sample(operation, metric, name):
    """
    Perform sampling of the operation.

    :param IOperation operation: An operation to perform.
    :param IMetric metric: A quantity to measure.
    :param int name: Identifier for individual sample.
    :return: Deferred firing with a sample. A sample is a dictionary
        containing a ``success`` boolean.  If ``success is True``, the
        dictionary also contains a ``value`` for the sample measurement.
        If ``success is False``, the dictionary also contains a
        ``reason`` for failure.
    """
    with start_action(action_type=u'flocker:benchmark:sample', sample=name):
        sampling = DeferredContext(maybeDeferred(operation.get_probe))

        def run_probe(probe):
            probing = metric.measure(probe.run)
            probing.addCallbacks(
                lambda interval: dict(success=True, value=interval),
                lambda reason: dict(
                    success=False, reason=reason.getTraceback()),
            )
            probing.addCallback(bypass, probe.cleanup)

            return probing
        sampling.addCallback(run_probe)
        sampling.addActionFinish()
        return sampling.result
Exemple #11
0
def logged_run_process(reactor, command):
    """
    Run a child process, and log the output as we get it.

    :param reactor: An ``IReactorProcess`` to spawn the process on.
    :param command: An argument list specifying the child process to run.

    :return: A ``Deferred`` that calls back with ``_ProcessResult`` if the
        process exited successfully, or errbacks with
        ``_CalledProcessError`` otherwise.
    """
    d = Deferred()
    action = TWISTED_CHILD_PROCESS_ACTION(command=command)
    with action.context():
        d2 = DeferredContext(d)
        protocol = _LoggingProcessProtocol(d, action)
        reactor.spawnProcess(protocol, command[0], command)

        def process_ended((reason, output)):
            status = reason.value.status
            if status:
                raise _CalledProcessError(
                    returncode=status, cmd=command, output=output)
            return _ProcessResult(
                command=command,
                status=status,
                output=output,
            )

        d2.addCallback(process_ended)
        d2.addActionFinish()
        return d2.result
def proxy(upstream, endpoint, header):
    """
    Establish a new connection to ``endpoint`` and begin proxying between that
    connection and ``upstream``.

    :param IProtocol upstream: A connected protocol.  All data received by
        this protocol from this point on will be sent along to another newly
        established connection.

    :param IStreamClientEndpoint endpoint: An endpoint to use to establish a
        new connection.  All data received over this connection will be sent
        along to the upstream connection.

    :param bytes header: Some extra data to write to the new downstream
        connection before proxying begins.
    """
    def failed(reason):
        upstream.transport.resumeProducing()
        upstream.transport.abortConnection()
        return reason

    upstream.transport.pauseProducing()

    peer = upstream.transport.getPeer()
    action = start_action(
        action_type=u"grid-router:proxy",
        **{u"from": (peer.host, peer.port)}
    )
    with action.context():
        d = DeferredContext(endpoint.connect(Factory.forProtocol(_Proxy)))
        d.addCallbacks(
            lambda downstream: DeferredContext(downstream.take_over(upstream, header)),
            failed,
        )
        return d.addActionFinish()
Exemple #13
0
 def stopService(self):
     action = LOG_STOP_SERVICE()
     with action.context():
         self.running = False
         d = DeferredContext(self._endpointService.stopService())
         d.addActionFinish()
         return d.result
Exemple #14
0
    def _update_connection(self, connection, configuration, state):
        """
        Send a ``ClusterStatusCommand`` to an agent.

        :param ControlAMP connection: The connection to use to send the
            command.

        :param Deployment configuration: The cluster configuration to send.
        :param DeploymentState state: The current cluster state to send.
        """
        action = LOG_SEND_TO_AGENT(agent=connection)
        with action.context():
            # Use ``maybeDeferred`` so if an exception happens,
            # it will be wrapped in a ``Failure`` - see FLOC-3221
            d = DeferredContext(maybeDeferred(
                connection.callRemote,
                ClusterStatusCommand,
                configuration=configuration,
                state=state,
                eliot_context=action
            ))
            d.addActionFinish()
            d.result.addErrback(lambda _: None)

        update = self._current_command[connection] = _UpdateState(
            response=d.result,
            next_scheduled=False,
        )

        def finished_update(ignored):
            del self._current_command[connection]
        update.response.addCallback(finished_update)
Exemple #15
0
    def output_CONVERGE(self, context):
        known_local_state = self.cluster_state.get_node(self.deployer.hostname)
        d = DeferredContext(self.deployer.discover_state(known_local_state))

        def got_local_state(state_changes):
            # Current cluster state is likely out of date as regards the local
            # state, so update it accordingly.
            for state in state_changes:
                self.cluster_state = state.update_cluster_state(
                    self.cluster_state
                )
            with LOG_SEND_TO_CONTROL_SERVICE(
                    self.fsm.logger, connection=self.client) as context:
                self.client.callRemote(NodeStateCommand,
                                       state_changes=state_changes,
                                       eliot_context=context)
            action = self.deployer.calculate_changes(
                self.configuration, self.cluster_state
            )
            return action.run(self.deployer)
        d.addCallback(got_local_state)

        # It would be better to have a "quiet time" state in the FSM and
        # transition to that next, then have a timeout input kick the machine
        # back around to the beginning of the loop in the FSM.  However, we're
        # not going to keep this sleep-for-a-bit solution in the long term.
        # Instead, we'll be more event driven.  So just going with the simple
        # solution and inserting a side-effect-y delay directly here.

        d.addCallback(
            lambda _:
                self.reactor.callLater(
                    1.0, self.fsm.receive, ConvergenceLoopInputs.ITERATION_DONE
                )
        )
    def cleanup(self, res):
        d = DeferredContext(defer.succeed(None))
        def _clean(ign):
            return self.magicfolder.disownServiceParent()

        d.addCallback(_clean)
        d.addCallback(lambda ign: res)
        return d.result
Exemple #17
0
 def wrapper(self, *args, **kwargs):
     context = start_action(Logger(),
                            action_type="acceptance:" + function.__name__,
                            args=args, kwargs=kwargs)
     with context.context():
         d = DeferredContext(function(self, *args, **kwargs))
         d.addActionFinish()
         return d.result
Exemple #18
0
 def run(self, deployer):
     d = DeferredContext(succeed(None))
     for subchange in self.changes:
         d.addCallback(
             lambda _, subchange=subchange: run_state_change(
                 subchange, deployer
             )
         )
     return d.result
 def test_returns_unfired_deferred(self):
     Message.log(hello="world")
     # @eliot_logged_test automatically gives us an action context but it's
     # still our responsibility to maintain it across stack-busting
     # operations.
     d = DeferredContext(deferLater(reactor, 0.0, lambda: None))
     d.addCallback(lambda ignored: Message.log(goodbye="world"))
     # We didn't start an action.  We're not finishing an action.
     return d.result
def get_customer_grid_configmaps(k8s, namespace):
    action = start_action(action_type=u"load-configmaps")
    with action.context():
        d = DeferredContext(k8s.get_configmaps(_s4_selector(namespace)))
        def got_configmaps(configmaps):
            configmaps = list(configmaps)
            action.add_success_fields(configmap_count=len(configmaps))
            return configmaps
        d.addCallback(got_configmaps)
        return d.addActionFinish()
 def _notify_failure(self, reason, customer_email, customer_id, subscription_id, plan_id):
     from sys import stdout, stderr
     # XXX Eliot log reason here too
     a = start_action(action_type=u"signup:send-failure")
     with a.context():
         d = DeferredContext(self.send_notify_failure(
             reason, customer_email, None,
             stdout, stderr,
         ))
         return d.addActionFinish()
def get_customer_grid_replicasets(k8s, namespace):
    action = start_action(action_type=u"load-replicasets")
    with action.context():
        d = DeferredContext(k8s.get_replicasets(_s4_selector(namespace)))
        def got_replicasets(replicasets):
            replicasets = list(replicasets)
            action.add_success_fields(replicaset_count=len(replicasets))
            return replicasets
        d.addCallback(got_replicasets)
        return d.addActionFinish()
def create_route53_rrsets(route53, zone, subscriptions):
    a = start_action(action_type=u"create-route53")
    with a.context():
        d = route53.change_resource_record_sets(zone.identifier, list(
            create_rrset(_rrset_for_subscription(subscription.subscription_id, zone.name))
            for subscription
            in subscriptions
        ))
        d = DeferredContext(d)
        return d.addActionFinish()
Exemple #24
0
    def output_CONVERGE(self, context):
        known_local_state = self.cluster_state.get_node(self.deployer.node_uuid, hostname=self.deployer.hostname)

        with LOG_CONVERGE(
            self.fsm.logger, cluster_state=self.cluster_state, desired_configuration=self.configuration
        ).context():
            d = DeferredContext(self.deployer.discover_state(known_local_state))

        def got_local_state(state_changes):
            # Current cluster state is likely out of date as regards the local
            # state, so update it accordingly.
            for state in state_changes:
                self.cluster_state = state.update_cluster_state(self.cluster_state)
            with LOG_SEND_TO_CONTROL_SERVICE(
                self.fsm.logger, connection=self.client, local_changes=list(state_changes)
            ) as context:
                self.client.callRemote(NodeStateCommand, state_changes=state_changes, eliot_context=context)
            action = self.deployer.calculate_changes(self.configuration, self.cluster_state)
            LOG_CALCULATED_ACTIONS(calculated_actions=action).write(self.fsm.logger)
            return run_state_change(action, self.deployer)

        d.addCallback(got_local_state)
        # If an error occurred we just want to log it and then try
        # converging again; hopefully next time we'll have more success.
        d.addErrback(writeFailure, self.fsm.logger, u"")

        # It would be better to have a "quiet time" state in the FSM and
        # transition to that next, then have a timeout input kick the machine
        # back around to the beginning of the loop in the FSM.  However, we're
        # not going to keep this sleep-for-a-bit solution in the long term.
        # Instead, we'll be more event driven.  So just going with the simple
        # solution and inserting a side-effect-y delay directly here.

        d.addCallback(lambda _: self.reactor.callLater(1.0, self.fsm.receive, ConvergenceLoopInputs.ITERATION_DONE))
        d.addActionFinish()
def get_customer_grid_deployments(k8s, namespace):
    action = start_action(action_type=u"load-deployments")
    with action.context():
        d = DeferredContext(k8s.get_deployments(_s4_selector(namespace)))
        def got_deployments(deployments):
            deployments = list(deployments)
            action.add_success_fields(deployment_count=len(deployments))
            _DEPLOYMENTS.set(len(deployments))
            return deployments
        d.addCallback(got_deployments)
        return d.addActionFinish()
    def got_welcome(ignored):
        # we're connected to the wormhole server; send our introduction
        # message
        intro = {u"abilities": {u"server-v1": {}}}
        Message.log(server_intro=intro)
        wormhole.send_message(json.dumps(intro))

        # await the client's introduction
        d = DeferredContext(wormhole.get_message())
        d.addCallback(json.loads)
        return d.result
 def signup(self, *args, **kwargs):
     """
     Provision a subscription and return an ``IClaim`` describing how to
     retrieve the resulting configuration from a magic wormhole server.
     """
     self._start.inc()
     a = start_action(action_type=u"wormhole-signup")
     with a.context():
         d = DeferredContext(self.provisioner.signup(*args, **kwargs))
         d.addCallback(self._details_to_wormhole_code)
         return d.addActionFinish()
 def _check_once(self, k8s, namespace):
     """
     Load the customer grid pods from Kubernetes.
     """
     a = start_action(action_type=u"router-update:check")
     with a.context():
         d = DeferredContext(
             get_customer_grid_pods(KubeClient(k8s=k8s), namespace)
         )
         d.addCallback(self._router.set_pods)
         return d.addActionFinish()
Exemple #29
0
    def output_CONVERGE(self, context):
        known_local_state = self.cluster_state.get_node(
            self.deployer.node_uuid, hostname=self.deployer.hostname)

        with LOG_CONVERGE(self.fsm.logger, cluster_state=self.cluster_state,
                          desired_configuration=self.configuration).context():
            d = DeferredContext(maybeDeferred(
                self.deployer.discover_state, known_local_state))

        def got_local_state(local_state):
            cluster_state_changes = local_state.shared_state_changes()
            # Current cluster state is likely out of date as regards the local
            # state, so update it accordingly.
            #
            # XXX This somewhat side-steps the whole explicit-state-machine
            # thing we're aiming for here.  It would be better for these state
            # changes to arrive as an input to the state machine.
            for state in cluster_state_changes:
                self.cluster_state = state.update_cluster_state(
                    self.cluster_state
                )

            # XXX And for this update to be the side-effect of an output
            # resulting.
            sent_state = self._maybe_send_state_to_control_service(
                cluster_state_changes)

            action = self.deployer.calculate_changes(
                self.configuration, self.cluster_state, local_state
            )
            LOG_CALCULATED_ACTIONS(calculated_actions=action).write(
                self.fsm.logger)
            ran_state_change = run_state_change(action, self.deployer)
            DeferredContext(ran_state_change).addErrback(
                writeFailure, self.fsm.logger)

            # Wait for the control node to acknowledge the new
            # state, and for the convergence actions to run.
            return gather_deferreds([sent_state, ran_state_change])
        d.addCallback(got_local_state)

        # If an error occurred we just want to log it and then try
        # converging again; hopefully next time we'll have more success.
        d.addErrback(writeFailure, self.fsm.logger)

        # It would be better to have a "quiet time" state in the FSM and
        # transition to that next, then have a timeout input kick the machine
        # back around to the beginning of the loop in the FSM.  However, we're
        # not going to keep this sleep-for-a-bit solution in the long term.
        # Instead, we'll be more event driven.  So just going with the simple
        # solution and inserting a side-effect-y delay directly here.

        d.addCallback(
            lambda _:
                self.reactor.callLater(
                    1.0, self.fsm.receive, ConvergenceLoopInputs.ITERATION_DONE
                )
        )
        d.addActionFinish()
 def report(self, result):
     result = self.common.set("result", result)
     context = start_action(system="reporter:post")
     with context.context():
         posting = DeferredContext(
             treq.post(
                 self.location.encode("ascii"),
                 json.dumps(thaw(result)),
                 timeout=30,
             )
         )
         return posting.addActionFinish()
Exemple #31
0
    def _send_request(self, method, url, *args, **kwargs):
        """
        Send HTTP request.

        :param str method: The HTTP method to use.
        :param str url: The URL to make the request to.

        :return: Deferred firing with the HTTP response.
        """
        action = LOG_JWS_REQUEST(url=url)
        with action.context():
            headers = kwargs.setdefault('headers', Headers())
            headers.setRawHeaders(b'user-agent', [self._user_agent])
            kwargs.setdefault('timeout', self.timeout)
            return (DeferredContext(
                self._treq.request(method, url, *args, **kwargs)).addCallback(
                    tap(lambda r: action.add_success_fields(
                        code=r.code,
                        content_type=r.headers.getRawHeaders(
                            b'content-type', [None])[0]))).addActionFinish())
Exemple #32
0
 def poll(self, authzr):
     """
     Update an authorization from the server (usually to check its status).
     """
     action = LOG_ACME_POLL_AUTHORIZATION(authorization=authzr)
     with action.context():
         return (
             DeferredContext(self._client.get(authzr.uri))
             # Spec says we should get 202 while pending, Boulder actually
             # sends us 200 always, so just don't check.
             # .addCallback(self._expect_response, http.ACCEPTED)
             .addCallback(lambda res: self._parse_authorization(
                 res, uri=authzr.uri).addCallback(
                     self._check_authorization, authzr.body.identifier
                 ).addCallback(lambda authzr: (
                     authzr, self.retry_after(res, _now=self._clock.seconds)
                 ))).addCallback(
                     tap(lambda a_r: action.add_success_fields(
                         authorization=a_r[0], retry_after=a_r[1]))
                 ).addActionFinish())
Exemple #33
0
    def register(self, new_reg=None):
        """
        Create a new registration with the ACME server.

        :param ~acme.messages.NewRegistration new_reg: The registration message
            to use, or ``None`` to construct one.

        :return: The registration resource.
        :rtype: Deferred[`~acme.messages.RegistrationResource`]
        """
        if new_reg is None:
            new_reg = messages.NewRegistration()
        action = LOG_ACME_REGISTER(registration=new_reg)
        with action.context():
            return (DeferredContext(
                self.update_registration(
                    new_reg, uri=self.directory[new_reg])).addErrback(
                        self._maybe_registered, new_reg).addCallback(
                            tap(lambda r: action.add_success_fields(
                                registration=r))).addActionFinish())
Exemple #34
0
    def from_url(
        cls,
        reactor,
        url,
        key,
        alg=RS256,
        jws_client=None,
        timeout=_DEFAULT_TIMEOUT,
    ):
        """
        Construct a client from an ACME directory at a given URL.

        At construct time, it validates the ACME directory.

        :param url: The ``twisted.python.url.URL`` to fetch the directory from.
            See `txacme.urls` for constants for various well-known public
            directories.
        :param reactor: The Twisted reactor to use.
        :param ~josepy.jwk.JWK key: The client key to use.
        :param alg: The signing algorithm to use.  Needs to be compatible with
            the type of key used.
        :param JWSClient jws_client: The underlying client to use, or ``None``
            to construct one.
        :param int timeout: Number of seconds to wait for an HTTP response
            during ACME server interaction.

        :return: The constructed client.
        :rtype: Deferred[`Client`]
        """
        action = LOG_ACME_CONSUME_DIRECTORY(url=url,
                                            key_type=key.typ,
                                            alg=alg.name)
        with action.context():
            check_directory_url_type(url)
            directory = url.asText()
            return (DeferredContext(jws_client=_default_client(
                jws_client, reactor, key, alg, directory, timeout
            )).addCallback(
                tap(lambda jws_client: action.add_success_fields(
                    directory=directory))).addCallback(lambda jws_client: cls(
                        reactor, key, jws_client)).addActionFinish())
Exemple #35
0
    def answer_challenge(self, challenge_body, response):
        """
        Respond to an authorization challenge.

        :param ~acme.messages.ChallengeBody challenge_body: The challenge being
            responded to.
        :param ~acme.challenges.ChallengeResponse response: The response to the
            challenge.

        :return: The updated challenge resource.
        :rtype: Deferred[`~acme.messages.ChallengeResource`]
        """
        action = LOG_ACME_ANSWER_CHALLENGE(challenge_body=challenge_body,
                                           response=response)
        with action.context():
            return (DeferredContext(
                self._client.post(challenge_body.uri, response)).addCallback(
                    self._parse_challenge).addCallback(
                        self._check_challenge, challenge_body).addCallback(
                            tap(lambda c: action.add_success_fields(
                                challenge_resource=c))).addActionFinish())
Exemple #36
0
    def wrapper(self, *args, **kwargs):

        serializable_args = tuple(_ensure_encodeable(a) for a in args)
        serializable_kwargs = {}
        for kwarg in kwargs:
            serializable_kwargs[kwarg] = _ensure_encodeable(kwargs[kwarg])

        context = start_action(
            action_type=label,
            args=serializable_args, kwargs=serializable_kwargs,
        )
        with context.context():
            d = DeferredContext(function(self, *args, **kwargs))
            d.addCallback(log_result, context)
            d.addActionFinish()
            return d.result
Exemple #37
0
 def _test_registration(self):
     return (DeferredContext(self._test_create_client()).addCallback(
         partial(setattr, self, 'client')
     ).addCallback(lambda _: self._test_register()).addCallback(
         tap(lambda reg1: self.assertEqual(reg1.body.contact, ()))
     ).addCallback(
         tap(lambda reg1: self._test_register(
             NewRegistration.from_data(email=u'*****@*****.**')
         ).addCallback(
             tap(lambda reg2: self.assertEqual(reg1.uri, reg2.uri))).
             addCallback(lambda reg2: self.assertEqual(
                 reg2.body.contact, (u'mailto:[email protected]', ))))
     ).addCallback(self._test_agree_to_tos).addCallback(
         lambda _: self._test_request_challenges(self.HOST)).addCallback(
             partial(setattr, self, 'authzr')).addCallback(
                 lambda _: self._create_responder()).addCallback(
                     tap(lambda _: self._test_poll_pending(self.authzr))).
             addCallback(self._test_answer_challenge).addCallback(
                 tap(lambda _: self._test_poll(self.authzr))).addCallback(
                     lambda stop_responding: stop_responding()).addCallback(
                         lambda _: self._test_issue(self.HOST)).addCallback(
                             self._test_chain).addActionFinish())
Exemple #38
0
    def request_challenges(self, identifier):
        """
        Create a new authorization.

        :param ~acme.messages.Identifier identifier: The identifier to
            authorize.

        :return: The new authorization resource.
        :rtype: Deferred[`~acme.messages.AuthorizationResource`]
        """
        action = LOG_ACME_CREATE_AUTHORIZATION(identifier=identifier)
        with action.context():
            message = messages.NewAuthorization(identifier=identifier)
            return (
                DeferredContext(
                    self._client.post(self.directory[message], message))
                .addCallback(self._expect_response, http.CREATED)
                .addCallback(self._parse_authorization)
                .addCallback(self._check_authorization, identifier)
                .addCallback(
                    tap(lambda a: action.add_success_fields(authorization=a)))
                .addActionFinish())
Exemple #39
0
def _execute_converge_output(jobs):
    if not jobs:
        return succeed(None)

    a = start_action(action_type=u"execute-converge-step")
    with a.context():
        job = jobs.pop(0)
        d = DeferredContext(job())
        d.addErrback(write_failure)
        d = d.addActionFinish()

    if jobs:
        # Capture whatever action context is active now and make sure it is
        # also active when we get back here to process the next job.
        DeferredContext(d).addCallback(
            lambda ignored: _execute_converge_output(jobs), )
    return d
Exemple #40
0
    def _post(self, url, obj, content_type, **kwargs):
        """
        POST an object and check the response.

        :param str url: The URL to request.
        :param ~josepy.interfaces.JSONDeSerializable obj: The serializable
            payload of the request.
        :param bytes content_type: The expected content type of the response.

        :raises txacme.client.ServerError: If server response body carries HTTP
            Problem (draft-ietf-appsawg-http-problem-00).
        :raises acme.errors.ClientError: In case of other protocol errors.
        """
        with LOG_JWS_POST().context():
            headers = kwargs.setdefault('headers', Headers())
            headers.setRawHeaders(b'content-type', [JSON_CONTENT_TYPE])
            return (DeferredContext(self._get_nonce(url)).addCallback(
                self._wrap_in_jws,
                obj).addCallback(lambda data: self._send_request(
                    u'POST', url, data=data, **kwargs)).addCallback(
                        self._add_nonce).addCallback(
                            self._check_response,
                            content_type=content_type).addActionFinish())
Exemple #41
0
        def loadAndDispatch(self, request, **routeArguments):
            if request.method in (b"GET", b"DELETE"):
                objects = {}
            else:
                contentType = request.requestHeaders.getRawHeaders(
                    b"content-type", [None])[0]
                if contentType != b"application/json":
                    raise ILLEGAL_CONTENT_TYPE

                body = request.content.read()
                try:
                    objects = loads(body)
                except ValueError:
                    raise DECODING_ERROR

                errors = []
                for error in inputValidator.iter_errors(objects):
                    errors.append(error.message)
                if errors:
                    raise InvalidRequestJSON(errors=errors, schema=inputSchema)

            eliot_action = JSON_REQUEST(_get_logger(self), json=objects.copy())
            with eliot_action.context():
                # Just assume there are no conflicts between these collections
                # of arguments right now.  When there is a schema for the JSON
                # hopefully we can do some static verification that no routing
                # arguments conflict with any top-level keys in the request
                # body and then we can be sure there are no conflicts here.
                objects.update(routeArguments)

                d = DeferredContext(maybeDeferred(original, self, **objects))

                def got_result(result):
                    code = OK
                    json = result
                    if isinstance(result, EndpointResponse):
                        code = result.code
                        json = result.result
                    eliot_action.add_success_fields(code=code, json=json)
                    return result

                d.addCallback(got_result)
                d.addActionFinish()
                return d.result
Exemple #42
0
def _configuration_to_wormhole_code(reactor, wormhole, rendezvous_url,
                                    configuration):
    """
    Serialize ``configuration`` to JSON and put it into a new wormhole created
    at the server given by ``rendezvous_url``.

    :return (Deferred(IClaim), Deferred): Two Deferreds representing two
        different events.  The first represents the creation of the magic
        wormhole.  It fires with a claim that describes the wormhole from
        which the configuration can be retrieved.  The second represents the
        complete transfer of the configuration to the client.  It fires with a
        meaningless success result or a ``Failure`` if something is known to
        have gone wrong.
    """
    a = start_action(action_type=u"signup:configuration-to-wormhole")

    with a.context():
        wh = _get_wormhole(reactor, wormhole, rendezvous_url)
        claim_deferred = _get_claim(wh)
        done_deferred = _transfer_configuration(wh, configuration)
        return (
            claim_deferred,
            DeferredContext(done_deferred).addActionFinish(),
        )
Exemple #43
0
def does_not_exist(client, obj):
    """
    Wait for ``obj`` to not exist.

    :param IKubernetesClient client: The client to use to determine existence.

    :param IObject obj: The object the existence of which to check.

    :return: A ``Deferred`` that fires when the object does not exist.
    """
    action = start_action(
        action_type=u"poll:start",
        obj=client.model.iobject_to_raw(obj),
    )
    with action.context():
        from twisted.internet import reactor
        d = poll(reactor, action, lambda: client.get(obj), repeat(0.5, 100))
        d = DeferredContext(d)

        # Once we get a NOT FOUND error, we're done.
        def trap_not_found(reason):
            reason.trap(KubernetesError)
            if reason.value.code == NOT_FOUND:
                return None
            return reason

        d.addErrback(trap_not_found)

        def trap_stop_iteration(reason):
            reason.trap(StopIteration)
            Message.log(
                does_not_exist=u"timeout",
                obj=client.model.iobject_to_raw(obj),
            )
            return None

        d.addErrback(trap_stop_iteration)
        return d.addActionFinish()
Exemple #44
0
def _async_wait_until_running(reactor, instance):
    """
    Wait until a instance is running.

    :param reactor: The reactor.
    :param boto.ec2.instance.Instance instance: The instance to wait for.
    :return: Deferred that fires when the instance has become running
        or failed to run (within a predefined period of time).
    """

    action = start_action(
        action_type=u"flocker:provision:aws:wait_until_running",
        instance_id=instance.id,
    )

    def check_final_state(ignored):
        if instance.state != u'running':
            raise FailedToRun(instance.state_reason)
        action.add_success_fields(
            instance_state=instance.state,
            instance_state_reason=instance.state_reason,
        )
        return instance

    def finished_booting():
        d = maybeDeferred(_node_is_booting, instance)
        d.addCallback(lambda x: not x)
        return d

    with action.context():
        # Since we are refreshing the instance's state once in a while
        # we may miss some transitions.  So, here we are waiting until
        # the node has transitioned out of the original state and then
        # check if the new state is the one that we expect.
        d = loop_until(reactor, finished_booting, repeat(5, INSTANCE_TIMEOUT))
        d = DeferredContext(d)
        d.addCallback(check_final_state)
        d.addActionFinish()
        return d.result
Exemple #45
0
    def volumedriver_get(self, Name):
        """
        Return information about the current state of a particular volume.

        :param unicode Name: The name of the volume.

        :return: Result indicating success.
        """
        d = DeferredContext(self._dataset_id_for_name(Name))
        d.addCallback(self._get_path_from_dataset_id)

        def got_path(path):
            if path is None:
                path = u""
            else:
                path = path.path
            return {u"Err": u"",
                    u"Volume": {
                        u"Name": Name,
                        u"Mountpoint": path}}
        d.addCallback(got_path)
        return d.result
Exemple #46
0
    def volumedriver_path(self, Name):
        """
        Return the path of a locally mounted volume if possible.

        Docker will call this in situations where it's not clear to us
        whether the dataset should be local or not, so we can't wait for a
        result.

        :param unicode Name: The name of the volume.

        :return: Result indicating success.
        """
        d = DeferredContext(self._dataset_id_for_name(Name))
        d.addCallback(self._get_path_from_dataset_id)

        def got_path(path):
            if path is None:
                return {u"Err": u"Volume not available.", u"Mountpoint": u""}
            else:
                return {u"Err": u"", u"Mountpoint": path.path}

        d.addCallback(got_path)
        return d.result
Exemple #47
0
    def volumedriver_list(self):
        """
        Return information about the current state of all volumes.

        :return: Result indicating success.
        """
        listing = DeferredContext(
            self._flocker_client.list_datasets_configuration())

        def got_configured(configured):
            results = []
            for dataset in configured:
                # Datasets without a name can't be used by the Docker plugin:
                if NAME_FIELD not in dataset.metadata:
                    continue
                dataset_name = dataset.metadata[NAME_FIELD]
                d = self._get_path_from_dataset_id(dataset.dataset_id)
                d.addCallback(lambda path, name=dataset_name: (path, name))
                results.append(d)
            return gatherResults(results)

        listing.addCallback(got_configured)

        def got_paths(results):
            return {
                u"Err":
                u"",
                u"Volumes":
                sorted([{
                    u"Name": name,
                    u"Mountpoint": u"" if path is None else path.path
                } for (path, name) in results])
            }

        listing.addCallback(got_paths)
        return listing.result
    def logger(self, request, **routeArguments):
        try:
            logger = self.logger
        except AttributeError:
            logger = _logger
        else:
            if logger is None:
                logger = _logger

        path = repr(request.path).decode("ascii")
        action = REQUEST(logger, request_path=path)

        # Generate a serialized action context that uniquely identifies
        # position within the logs, though there won't actually be any log
        # message with that particular task level:
        incidentIdentifier = action.serialize_task_id()

        with action.context():
            d = DeferredContext(original(self, request, **routeArguments))

        def failure(reason):
            if reason.check(BadRequest):
                code = reason.value.code
                result = reason.value.result
            else:
                writeFailure(reason, logger, LOG_SYSTEM)
                code = INTERNAL_SERVER_ERROR
                result = incidentIdentifier
            request.setResponseCode(code)
            request.responseHeaders.setRawHeaders(b"content-type",
                                                  [b"application/json"])
            return dumps(result)

        d.addErrback(failure)
        d.addActionFinish()
        return d.result
Exemple #49
0
    def logger(self, request, **routeArguments):
        try:
            logger = self.logger
        except AttributeError:
            logger = _logger
        else:
            if logger is None:
                logger = _logger

        path = repr(request.path).decode("ascii")
        action = REQUEST(logger, request_path=path)

        # Can't construct a good identifier without using private things.
        # See https://github.com/ClusterHQ/eliot/issues/29
        uuid = action._identification[u"task_uuid"]
        level = action._identification[u"task_level"]
        incidentIdentifier = uuid + u"," + level

        with action.context():
            d = DeferredContext(original(self, request, **routeArguments))

        def failure(reason):
            if reason.check(BadRequest):
                code = reason.value.code
                result = reason.value.result
            else:
                writeFailure(reason, logger, LOG_SYSTEM)
                code = INTERNAL_SERVER_ERROR
                result = incidentIdentifier
            request.setResponseCode(code)
            request.responseHeaders.setRawHeaders(
                b"content-type", [b"application/json"])
            return dumps({u"error": True, u"result": result})
        d.addErrback(failure)
        d.addActionFinish()
        return d.result
def _execute_converge_outputs(jobs):
    a = start_action(action_type=u"execute-converge-steps")
    with a.context():
        d = DeferredContext(_execute_converge_output(jobs))
        return d.addActionFinish()
 def g(*a, **kw):
     action = start_action(action_type=scope + u":" + f.__name__)
     with action.context():
         d = DeferredContext(maybeDeferred(f, *a, **kw))
         d.addErrback(write_failure)
         return d.addActionFinish()
Exemple #52
0
    def _request_with_headers(self,
                              method,
                              path,
                              body,
                              success_codes,
                              error_codes=None,
                              configuration_tag=None):
        """
        Send a HTTP request to the Flocker API, return decoded JSON body and
        headers.

        :param bytes method: HTTP method, e.g. PUT.
        :param bytes path: Path to add to base URL.
        :param body: If not ``None``, JSON encode this and send as the
            body of the request.
        :param set success_codes: Expected success response codes.
        :param error_codes: Mapping from HTTP response code to exception to be
            raised if it is present, or ``None`` to set no errors.
        :param configuration_tag: If not ``None``, include value as
            ``X-If-Configuration-Matches`` header.

        :return: ``Deferred`` firing a tuple of (decoded JSON,
            response headers).
        """
        url = self._base_url + path
        action = _LOG_HTTP_REQUEST(url=url, method=method, request_body=body)

        if error_codes is None:
            error_codes = {}

        def error(body, code):
            if code in error_codes:
                raise error_codes[code](body)
            raise ResponseError(code, body)

        def got_response(response):
            if response.code in success_codes:
                action.addSuccessFields(response_code=response.code)
                d = json_content(response)
                d.addCallback(lambda decoded_body:
                              (decoded_body, response.headers))
                return d
            else:
                d = content(response)
                d.addCallback(error, response.code)
                return d

        # Serialize the current task ID so we can trace logging across
        # processes:
        headers = {b"X-Eliot-Task-Id": action.serialize_task_id()}
        data = None
        if body is not None:
            headers["content-type"] = b"application/json"
            data = dumps(body)
        if configuration_tag is not None:
            headers["X-If-Configuration-Matches"] = [
                configuration_tag.encode("utf-8")
            ]

        with action.context():
            request = DeferredContext(
                self._treq.request(
                    method,
                    url,
                    data=data,
                    headers=headers,
                    # Keep tests from having dirty reactor problems:
                    persistent=False))
        request.addCallback(got_response)

        def got_body(result):
            action.addSuccessFields(response_body=result[0])
            return result

        request.addCallback(got_body)
        request.addActionFinish()
        return request.result
Exemple #53
0
 def run(self, deployer):
     d = DeferredContext(succeed(None))
     for subchange in self.changes:
         d.addCallback(lambda _, subchange=subchange: run_state_change(
             subchange, deployer))
     return d.result
Exemple #54
0
 def _test_agree_to_tos(self, reg):
     with start_action(action_type=u'integration:agree_to_tos').context():
         return (
             DeferredContext(self.client.agree_to_tos(reg))
             .addActionFinish())
Exemple #55
0
    def stop(self):
        """
        Stop the scenario from being maintained by stopping all the
        loops that may be executing.

        :return Deferred[Optional[Dict[unicode, Any]]]: Scenario metrics.
        """
        self.is_started = False
        if self.monitor_loop.running:
            self.monitor_loop.stop()

        if self.loop.running:
            self.loop.stop()

        outstanding_requests = self.rate_measurer.outstanding()

        if outstanding_requests > 0:
            msg = (
                "There are {num_requests} outstanding requests. "
                "Waiting {num_seconds} seconds for them to complete."
            ).format(
                num_requests=outstanding_requests,
                num_seconds=self.timeout
            )
            Message.log(key='outstanding_requests', value=msg)

        with start_action(
            action_type=u'flocker:benchmark:scenario:stop',
            scenario='request_load'
        ):
            def no_outstanding_requests():
                return self.rate_measurer.outstanding() == 0

            scenario_stopped = loop_until(self.reactor,
                                          no_outstanding_requests,
                                          repeat(1))
            timeout(self.reactor, scenario_stopped, self.timeout)
            scenario = DeferredContext(scenario_stopped)

            def handle_timeout(failure):
                failure.trap(CancelledError)
                msg = (
                    "Force stopping the scenario. "
                    "There are {num_requests} outstanding requests"
                ).format(
                    num_requests=outstanding_requests
                )
                Message.log(key='force_stop_request', value=msg)
            scenario.addErrback(handle_timeout)

            def scenario_cleanup(ignored):
                """
                Calls the scenario cleanup, and wraps it inside an eliot
                start action, so we can see the logs if something goes
                wrong within the cleanup

                :return Deferred: that will fire once the cleanup has been
                    completed
                """
                with start_action(
                    action_type=u'flocker:benchmark:scenario:cleanup',
                    scenario='request_load'
                ):
                    return self.request.run_cleanup()

            scenario.addBoth(scenario_cleanup)

            def return_metrics(_ignore):
                return self.rate_measurer.get_metrics()
            scenario.addCallback(return_metrics)

            return scenario.addActionFinish()
def change_route53_rrsets(route53, zone, rrset):
    a = start_action(action_type=u"change-route53", zone=zone.identifier, rrset=attr.asdict(rrset))
    with a.context():
        d = route53.change_resource_record_sets(zone.identifier, [upsert_rrset(rrset)])
        d = DeferredContext(d)
        return d.addActionFinish()
Exemple #57
0
    def create_nodes(self, reactor, names, distribution, metadata={}):
        """
        Create nodes with the given names.

        :param reactor: The reactor.
        :param name: The names of the nodes.
        :type name: list of str
        :param str distribution: The name of the distribution to
            install on the nodes.
        :param dict metadata: Metadata to associate with the nodes.

        :return: A list of ``Deferred``s each firing with an INode
            when the corresponding node is created.   The list has
            the same order as :param:`names`.
        """
        size = self._default_size
        disk_size = 8

        action = start_action(
            action_type=u"flocker:provision:aws:create_nodes",
            instance_count=len(names),
            distribution=distribution,
            image_size=size,
            disk_size=disk_size,
            metadata=metadata,
        )
        with action.context():
            disk1 = EBSBlockDeviceType()
            disk1.size = disk_size
            disk1.delete_on_termination = True
            diskmap = BlockDeviceMapping()
            diskmap['/dev/sda1'] = disk1

            images = self._connection.get_all_images(
                filters={'name': IMAGE_NAMES[distribution]}, )

            instances = self._run_nodes(count=len(names),
                                        image_id=images[0].id,
                                        size=size,
                                        diskmap=diskmap)

            def make_node(ignored, name, instance):
                return AWSNode(
                    name=name,
                    _provisioner=self,
                    _instance=instance,
                    distribution=distribution,
                )

            results = []
            for name, instance in izip_longest(names, instances):
                if instance is None:
                    results.append(fail(Exception("Could not run instance")))
                else:
                    node_metadata = metadata.copy()
                    node_metadata['Name'] = name
                    d = self._async_get_node(reactor, instance, node_metadata)
                    d = DeferredContext(d)
                    d.addCallback(make_node, name, instance)
                    results.append(d.result)
            action_completion = DeferredContext(DeferredList(results))
            action_completion.addActionFinish()
            # Individual results and errors should be consumed by the caller,
            # so we can leave action_completion alone now.
            return results
Exemple #58
0
 def _test_poll(self, auth):
     action = start_action(action_type=u'integration:poll')
     with action.context():
         return (
             DeferredContext(poll_until_valid(auth, reactor, self.client))
             .addActionFinish())
Exemple #59
0
    def clean_nodes(self):
        """
        Clean containers and datasets via the API.

        :return: A `Deferred` that fires when the cluster is clean.
        """
        def api_clean_state(
            name,
            configuration_method,
            state_method,
            delete_method,
        ):
            """
            Clean entities from the cluster.

            :param unicode name: The name of the entities to clean.
            :param configuration_method: The function to obtain the configured
                entities.
            :param state_method: The function to get the current entities.
            :param delete_method: The method to delete an entity.

            :return: A `Deferred` that fires when the entities have been
                deleted.
            """
            context = start_action(action_type=u"acceptance:cleanup_" + name, )
            with context.context():
                get_items = DeferredContext(configuration_method())

                def delete_items(items):
                    return gather_deferreds(
                        list(delete_method(item) for item in items))

                get_items.addCallback(delete_items)
                get_items.addCallback(lambda ignored: loop_until(
                    reactor, lambda: state_method().addCallback(lambda result:
                                                                [] == result)))
                return get_items.addActionFinish()

        def cleanup_containers(_):
            return api_clean_state(
                u"containers",
                self.configured_containers,
                self.current_containers,
                lambda item: self.remove_container(item[u"name"]),
            )

        def cleanup_datasets(_):
            return api_clean_state(
                u"datasets",
                self.client.list_datasets_configuration,
                self.client.list_datasets_state,
                lambda item: self.client.delete_dataset(item.dataset_id),
            )

        def cleanup_leases():
            context = start_action(action_type="acceptance:cleanup_leases")
            with context.context():
                get_items = DeferredContext(self.client.list_leases())

                def release_all(leases):
                    release_list = []
                    for lease in leases:
                        release_list.append(
                            self.client.release_lease(lease.dataset_id))
                    return gather_deferreds(release_list)

                get_items.addCallback(release_all)
                return get_items.addActionFinish()

        d = DeferredContext(cleanup_leases())
        d.addCallback(cleanup_containers)
        d.addCallback(cleanup_datasets)
        return d.result
Exemple #60
0
    def _update_connection(self, connection, configuration, state):
        """
        Send the latest cluster configuration and state to ``connection``.

        :param ControlAMP connection: The connection to use to send the
            command.
        """

        # Set the configuration and the state to the latest versions. It is
        # okay to call this even if the latest configuration is the same
        # object.
        self._configuration_generation_tracker.insert_latest(configuration)
        self._state_generation_tracker.insert_latest(state)

        action = LOG_SEND_TO_AGENT(agent=connection)
        with action.context():

            # Attempt to compute a diff to send to the connection
            last_received_generations = (
                self._last_received_generation[connection])

            config_gen_tracker = self._configuration_generation_tracker
            configuration_diff = (
                config_gen_tracker.get_diff_from_hash_to_latest(
                    last_received_generations.config_hash))

            state_gen_tracker = self._state_generation_tracker
            state_diff = (state_gen_tracker.get_diff_from_hash_to_latest(
                last_received_generations.state_hash))

            if configuration_diff is not None and state_diff is not None:
                # If both diffs were successfully computed, send a command to
                # send the diffs along with before and after hashes so the
                # nodes can verify the application of the diffs.
                d = DeferredContext(
                    maybeDeferred(connection.callRemote,
                                  ClusterStatusDiffCommand,
                                  configuration_diff=configuration_diff,
                                  start_configuration_generation=(
                                      last_received_generations.config_hash),
                                  end_configuration_generation=(
                                      config_gen_tracker.get_latest_hash()),
                                  state_diff=state_diff,
                                  start_state_generation=(
                                      last_received_generations.state_hash),
                                  end_state_generation=state_gen_tracker.
                                  get_latest_hash(),
                                  eliot_context=action))
                d.addActionFinish()
            else:
                # Otherwise, just send the lastest configuration and state to
                # the node.
                configuration = config_gen_tracker.get_latest()
                state = state_gen_tracker.get_latest()
                # Use ``maybeDeferred`` so if an exception happens,
                # it will be wrapped in a ``Failure`` - see FLOC-3221
                d = DeferredContext(
                    maybeDeferred(
                        connection.callRemote,
                        ClusterStatusCommand,
                        configuration=configuration,
                        configuration_generation=(
                            config_gen_tracker.get_latest_hash()),
                        state=state,
                        state_generation=state_gen_tracker.get_latest_hash(),
                        eliot_context=action))
                d.addActionFinish()
            d.result.addErrback(lambda _: None)

        update = self._current_command[connection] = _UpdateState(
            response=d.result,
            next_scheduled=False,
        )

        def finished_update(response):
            del self._current_command[connection]
            if response:
                config_gen = response['current_configuration_generation']
                state_gen = response['current_state_generation']
                self._last_received_generation[connection] = (
                    _ConfigAndStateGeneration(config_hash=config_gen,
                                              state_hash=state_gen))
                #  If the latest hash was not returned, schedule an update.
                if (self._configuration_generation_tracker.get_latest_hash() !=
                        config_gen
                        or self._state_generation_tracker.get_latest_hash() !=
                        state_gen):
                    self._schedule_update([connection])

        update.response.addCallback(finished_update)