示例#1
0
    def _update_connection(self, connection, configuration, state):
        """
        Send a ``ClusterStatusCommand`` to an agent.

        :param ControlAMP connection: The connection to use to send the
            command.

        :param Deployment configuration: The cluster configuration to send.
        :param DeploymentState state: The current cluster state to send.
        """
        action = LOG_SEND_TO_AGENT(agent=connection)
        with action.context():
            # Use ``maybeDeferred`` so if an exception happens,
            # it will be wrapped in a ``Failure`` - see FLOC-3221
            d = DeferredContext(maybeDeferred(
                connection.callRemote,
                ClusterStatusCommand,
                configuration=configuration,
                state=state,
                eliot_context=action
            ))
            d.addActionFinish()
            d.result.addErrback(lambda _: None)

        update = self._current_command[connection] = _UpdateState(
            response=d.result,
            next_scheduled=False,
        )

        def finished_update(ignored):
            del self._current_command[connection]
        update.response.addCallback(finished_update)
示例#2
0
def run_state_change(change, deployer):
    """
    Apply the change to local state.

    :param change: Either an ``IStateChange`` provider or the result of an
        ``in_parallel`` or ``sequentially`` call.
    :param IDeployer deployer: The ``IDeployer`` to use.  Specific
        ``IStateChange`` providers may require specific ``IDeployer`` providers
        that provide relevant functionality for applying the change.

    :return: ``Deferred`` firing when the change is done.
    """
    if isinstance(change, _InParallel):
        return gather_deferreds(list(
            run_state_change(subchange, deployer)
            for subchange in change.changes
        ))
    if isinstance(change, _Sequentially):
        d = succeed(None)
        for subchange in change.changes:
            d.addCallback(
                lambda _, subchange=subchange: run_state_change(
                    subchange, deployer
                )
            )
        return d

    with change.eliot_action.context():
        context = DeferredContext(maybeDeferred(change.run, deployer))
        context.addActionFinish()
        return context.result
示例#3
0
    def logger(self, request, **routeArguments):
        logger = _get_logger(self)

        # If this is ever more than ASCII we might have issues? or maybe
        # this is pre-url decoding?
        # https://clusterhq.atlassian.net/browse/FLOC-1602
        action = REQUEST(logger,
                         request_path=request.path,
                         method=request.method)

        # Generate a serialized action context that uniquely identifies
        # position within the logs, though there won't actually be any log
        # message with that particular task level:
        incidentIdentifier = action.serialize_task_id()

        with action.context():
            d = DeferredContext(original(self, request, **routeArguments))

        def failure(reason):
            if reason.check(BadRequest):
                code = reason.value.code
                result = reason.value.result
            else:
                writeFailure(reason, logger, LOG_SYSTEM)
                code = INTERNAL_SERVER_ERROR
                result = incidentIdentifier
            request.setResponseCode(code)
            request.responseHeaders.setRawHeaders(b"content-type",
                                                  [b"application/json"])
            return dumps(result)

        d.addErrback(failure)
        d.addActionFinish()
        return d.result
示例#4
0
    def logger(self, request, **routeArguments):
        logger = _get_logger(self)

        # If this is ever more than ASCII we might have issues? or maybe
        # this is pre-url decoding?
        # https://clusterhq.atlassian.net/browse/FLOC-1602
        action = REQUEST(logger, request_path=request.path,
                         method=request.method)

        # Generate a serialized action context that uniquely identifies
        # position within the logs, though there won't actually be any log
        # message with that particular task level:
        incidentIdentifier = action.serialize_task_id()

        with action.context():
            d = DeferredContext(original(self, request, **routeArguments))

        def failure(reason):
            if reason.check(BadRequest):
                code = reason.value.code
                result = reason.value.result
            else:
                writeFailure(reason, logger, LOG_SYSTEM)
                code = INTERNAL_SERVER_ERROR
                result = incidentIdentifier
            request.setResponseCode(code)
            request.responseHeaders.setRawHeaders(
                b"content-type", [b"application/json"])
            return dumps(result)
        d.addErrback(failure)
        d.addActionFinish()
        return d.result
示例#5
0
def logged_run_process(reactor, command):
    """
    Run a child process, and log the output as we get it.

    :param reactor: An ``IReactorProcess`` to spawn the process on.
    :param command: An argument list specifying the child process to run.

    :return: A ``Deferred`` that calls back with ``_ProcessResult`` if the
        process exited successfully, or errbacks with
        ``_CalledProcessError`` otherwise.
    """
    d = Deferred()
    action = TWISTED_CHILD_PROCESS_ACTION(command=command)
    with action.context():
        d2 = DeferredContext(d)
        protocol = _LoggingProcessProtocol(d, action)
        reactor.spawnProcess(protocol, command[0], command)

        def process_ended((reason, output)):
            status = reason.value.status
            if status:
                raise _CalledProcessError(returncode=status,
                                          cmd=command,
                                          output=output)
            return _ProcessResult(
                command=command,
                status=status,
                output=output,
            )

        d2.addCallback(process_ended)
        d2.addActionFinish()
        return d2.result
示例#6
0
    def output_CONVERGE(self, context):
        known_local_state = self.cluster_state.get_node(self.deployer.node_uuid, hostname=self.deployer.hostname)

        with LOG_CONVERGE(
            self.fsm.logger, cluster_state=self.cluster_state, desired_configuration=self.configuration
        ).context():
            d = DeferredContext(self.deployer.discover_state(known_local_state))

        def got_local_state(state_changes):
            # Current cluster state is likely out of date as regards the local
            # state, so update it accordingly.
            for state in state_changes:
                self.cluster_state = state.update_cluster_state(self.cluster_state)
            with LOG_SEND_TO_CONTROL_SERVICE(
                self.fsm.logger, connection=self.client, local_changes=list(state_changes)
            ) as context:
                self.client.callRemote(NodeStateCommand, state_changes=state_changes, eliot_context=context)
            action = self.deployer.calculate_changes(self.configuration, self.cluster_state)
            LOG_CALCULATED_ACTIONS(calculated_actions=action).write(self.fsm.logger)
            return run_state_change(action, self.deployer)

        d.addCallback(got_local_state)
        # If an error occurred we just want to log it and then try
        # converging again; hopefully next time we'll have more success.
        d.addErrback(writeFailure, self.fsm.logger, u"")

        # It would be better to have a "quiet time" state in the FSM and
        # transition to that next, then have a timeout input kick the machine
        # back around to the beginning of the loop in the FSM.  However, we're
        # not going to keep this sleep-for-a-bit solution in the long term.
        # Instead, we'll be more event driven.  So just going with the simple
        # solution and inserting a side-effect-y delay directly here.

        d.addCallback(lambda _: self.reactor.callLater(1.0, self.fsm.receive, ConvergenceLoopInputs.ITERATION_DONE))
        d.addActionFinish()
示例#7
0
 def wrapper(self, *args, **kwargs):
     context = start_action(Logger(), action_type=label, args=args, kwargs=kwargs)
     with context.context():
         d = DeferredContext(function(self, *args, **kwargs))
         d.addCallback(log_result)
         d.addActionFinish()
         return d.result
示例#8
0
 def stopService(self):
     action = LOG_STOP_SERVICE()
     with action.context():
         self.running = False
         d = DeferredContext(self._endpointService.stopService())
         d.addActionFinish()
         return d.result
示例#9
0
def run_state_change(change, deployer):
    """
    Apply the change to local state.

    :param change: Either an ``IStateChange`` provider or the result of an
        ``in_parallel`` or ``sequentially`` call.
    :param IDeployer deployer: The ``IDeployer`` to use.  Specific
        ``IStateChange`` providers may require specific ``IDeployer`` providers
        that provide relevant functionality for applying the change.

    :return: ``Deferred`` firing when the change is done.
    """
    if isinstance(change, _InParallel):
        return gather_deferreds(
            list(
                run_state_change(subchange, deployer)
                for subchange in change.changes))
    if isinstance(change, _Sequentially):
        d = succeed(None)
        for subchange in change.changes:
            d.addCallback(lambda _, subchange=subchange: run_state_change(
                subchange, deployer))
        return d

    with change.eliot_action.context():
        context = DeferredContext(maybeDeferred(change.run, deployer))
        context.addActionFinish()
        return context.result
示例#10
0
def sample(operation, metric, name):
    """
    Perform sampling of the operation.

    :param IOperation operation: An operation to perform.
    :param IMetric metric: A quantity to measure.
    :param int name: Identifier for individual sample.
    :return: Deferred firing with a sample. A sample is a dictionary
        containing a ``success`` boolean.  If ``success is True``, the
        dictionary also contains a ``value`` for the sample measurement.
        If ``success is False``, the dictionary also contains a
        ``reason`` for failure.
    """
    with start_action(action_type=u'flocker:benchmark:sample', sample=name):
        sampling = DeferredContext(maybeDeferred(operation.get_probe))

        def run_probe(probe):
            probing = metric.measure(probe.run)
            probing.addCallbacks(
                lambda interval: dict(success=True, value=interval),
                lambda reason: dict(success=False,
                                    reason=reason.getTraceback()),
            )
            probing.addCallback(bypass, probe.cleanup)

            return probing

        sampling.addCallback(run_probe)
        sampling.addActionFinish()
        return sampling.result
    def _stopInstance(self):
        """
        Shutdown the slave and then stop the instance.

        We need to do both, to avoid the following sequence:
        - Shutdown instance
        - Pending build triggers new instance
        - When new slave connects, duplicate slave detection kicks in, causing
          the original slave to disconnect. That disconnect triggers the new
          slave instance to shutdown.
        - Loop.

        https://clusterhq.atlassian.net/browse/FLOC-1938
        """
        with start_action(
                action_type="ondemand_slave:stop_instance",
                slave=self.slavename,
                ).context():
            with start_action(
                    action_type="ondemand_slave:shutdown"
                    ).context():
                d = DeferredContext(self.shutdown())
                timeoutDeferred(reactor, d, 60)
                d = d.addActionFinish()
            d = DeferredContext(d)
            d.addBoth(lambda _: self.instance_booter.stop())
            d.addActionFinish()
示例#12
0
    def _update_connection(self, connection, configuration, state):
        """
        Send a ``ClusterStatusCommand`` to an agent.

        :param ControlAMP connection: The connection to use to send the
            command.

        :param Deployment configuration: The cluster configuration to send.
        :param DeploymentState state: The current cluster state to send.
        """
        action = LOG_SEND_TO_AGENT(agent=connection)
        with action.context():
            # Use ``maybeDeferred`` so if an exception happens,
            # it will be wrapped in a ``Failure`` - see FLOC-3221
            d = DeferredContext(
                maybeDeferred(connection.callRemote,
                              ClusterStatusCommand,
                              configuration=configuration,
                              state=state,
                              eliot_context=action))
            d.addActionFinish()
            d.result.addErrback(lambda _: None)

        update = self._current_command[connection] = _UpdateState(
            response=d.result,
            next_scheduled=False,
        )

        def finished_update(ignored):
            del self._current_command[connection]

        update.response.addCallback(finished_update)
示例#13
0
def sample(operation, metric, name):
    """
    Perform sampling of the operation.

    :param IOperation operation: An operation to perform.
    :param IMetric metric: A quantity to measure.
    :param int name: Identifier for individual sample.
    :return: Deferred firing with a sample. A sample is a dictionary
        containing a ``success`` boolean.  If ``success is True``, the
        dictionary also contains a ``value`` for the sample measurement.
        If ``success is False``, the dictionary also contains a
        ``reason`` for failure.
    """
    with start_action(action_type=u'flocker:benchmark:sample', sample=name):
        sampling = DeferredContext(maybeDeferred(operation.get_probe))

        def run_probe(probe):
            probing = metric.measure(probe.run)
            probing.addCallbacks(
                lambda interval: dict(success=True, value=interval),
                lambda reason: dict(
                    success=False, reason=reason.getTraceback()),
            )
            probing.addCallback(bypass, probe.cleanup)

            return probing
        sampling.addCallback(run_probe)
        sampling.addActionFinish()
        return sampling.result
示例#14
0
def logged_run_process(reactor, command):
    """
    Run a child process, and log the output as we get it.

    :param reactor: An ``IReactorProcess`` to spawn the process on.
    :param command: An argument list specifying the child process to run.

    :return: A ``Deferred`` that calls back with ``_ProcessResult`` if the
        process exited successfully, or errbacks with
        ``_CalledProcessError`` otherwise.
    """
    d = Deferred()
    action = TWISTED_CHILD_PROCESS_ACTION(command=command)
    with action.context():
        d2 = DeferredContext(d)
        protocol = _LoggingProcessProtocol(d, action)
        reactor.spawnProcess(protocol, command[0], command)

        def process_ended((reason, output)):
            status = reason.value.status
            if status:
                raise _CalledProcessError(
                    returncode=status, cmd=command, output=output)
            return _ProcessResult(
                command=command,
                status=status,
                output=output,
            )

        d2.addCallback(process_ended)
        d2.addActionFinish()
        return d2.result
示例#15
0
    def _stopInstance(self):
        """
        Shutdown the slave and then stop the instance.

        We need to do both, to avoid the following sequence:
        - Shutdown instance
        - Pending build triggers new instance
        - When new slave connects, duplicate slave detection kicks in, causing
          the original slave to disconnect. That disconnect triggers the new
          slave instance to shutdown.
        - Loop.

        https://clusterhq.atlassian.net/browse/FLOC-1938
        """
        with start_action(
                action_type="ondemand_slave:stop_instance",
                slave=self.slavename,
        ).context():
            with start_action(action_type="ondemand_slave:shutdown").context():
                d = DeferredContext(self.shutdown())
                timeoutDeferred(reactor, d, 60)
                d = d.addActionFinish()
            d = DeferredContext(d)
            d.addBoth(lambda _: self.instance_booter.stop())
            d.addActionFinish()
示例#16
0
文件: _aws.py 项目: zendad/flocker
    def _async_get_node(self, reactor, instance, metadata):
        """
        Configure the given AWS instance, wait until it's running
        and create an ``AWSNode`` object for it.

        :param reactor: The reactor.
        :param boto.ec2.instance.Instance instance: The instance to set up.
        :param dict metadata: The metadata to set for the instance.
        :return: Deferred that fires when the instance is ready.
        """
        def instance_error(failure):
            Message.log(
                message_type="flocker:provision:aws:async_get_node:failed")
            instance.terminate()
            write_failure(failure)
            return failure

        action = start_action(
            action_type=u"flocker:provision:aws:async_get_node",
            name=metadata['Name'],
            instance_id=instance.id,
        )
        with action.context():
            d = loop_until(
                reactor,
                lambda: maybeDeferred(self._set_metadata, instance, metadata),
                repeat(5, INSTANCE_TIMEOUT),
            )
            d = DeferredContext(d)
            d.addCallback(
                lambda _: _async_wait_until_running(reactor, instance))
            d.addErrback(instance_error)
            d.addActionFinish()
            return d.result
示例#17
0
    def _request(self, method, path, body, success_codes, error_codes=None):
        """
        Send a HTTP request to the Flocker API, return decoded JSON body.

        :param bytes method: HTTP method, e.g. PUT.
        :param bytes path: Path to add to base URL.
        :param body: If not ``None``, JSON encode this and send as the
            body of the request.
        :param set success_codes: Expected success response codes.
        :param error_codes: Mapping from HTTP response code to exception to be
            raised if it is present, or ``None`` to send no headers.

        :return: ``Deferred`` firing with decoded JSON.
        """
        url = self._base_url + path
        action = _LOG_HTTP_REQUEST(url=url, method=method, request_body=body)

        if error_codes is None:
            error_codes = {}

        def error(body, code):
            if code in error_codes:
                raise error_codes[code](body)
            raise ResponseError(code, body)

        def got_result(result):
            if result.code in success_codes:
                action.addSuccessFields(response_code=result.code)
                return json_content(result)
            else:
                d = content(result)
                d.addCallback(error, result.code)
                return d

        # Serialize the current task ID so we can trace logging across
        # processes:
        headers = {b"X-Eliot-Task-Id": action.serialize_task_id()}
        data = None
        if body is not None:
            headers["content-type"] = b"application/json"
            data = dumps(body)

        with action.context():
            request = DeferredContext(
                self._treq.request(
                    method,
                    url,
                    data=data,
                    headers=headers,
                    # Keep tests from having dirty reactor problems:
                    persistent=False))
        request.addCallback(got_result)

        def got_body(json_body):
            action.addSuccessFields(response_body=json_body)
            return json_body

        request.addCallback(got_body)
        request.addActionFinish()
        return request.result
示例#18
0
文件: _loop.py 项目: uedzen/flocker
    def output_CONVERGE(self, context):
        known_local_state = self.cluster_state.get_node(
            self.deployer.node_uuid, hostname=self.deployer.hostname)

        with LOG_CONVERGE(self.fsm.logger, cluster_state=self.cluster_state,
                          desired_configuration=self.configuration).context():
            d = DeferredContext(maybeDeferred(
                self.deployer.discover_state, known_local_state))

        def got_local_state(local_state):
            cluster_state_changes = local_state.shared_state_changes()
            # Current cluster state is likely out of date as regards the local
            # state, so update it accordingly.
            #
            # XXX This somewhat side-steps the whole explicit-state-machine
            # thing we're aiming for here.  It would be better for these state
            # changes to arrive as an input to the state machine.
            for state in cluster_state_changes:
                self.cluster_state = state.update_cluster_state(
                    self.cluster_state
                )

            # XXX And for this update to be the side-effect of an output
            # resulting.
            sent_state = self._maybe_send_state_to_control_service(
                cluster_state_changes)

            action = self.deployer.calculate_changes(
                self.configuration, self.cluster_state, local_state
            )
            LOG_CALCULATED_ACTIONS(calculated_actions=action).write(
                self.fsm.logger)
            ran_state_change = run_state_change(action, self.deployer)
            DeferredContext(ran_state_change).addErrback(
                writeFailure, self.fsm.logger)

            # Wait for the control node to acknowledge the new
            # state, and for the convergence actions to run.
            return gather_deferreds([sent_state, ran_state_change])
        d.addCallback(got_local_state)

        # If an error occurred we just want to log it and then try
        # converging again; hopefully next time we'll have more success.
        d.addErrback(writeFailure, self.fsm.logger)

        # It would be better to have a "quiet time" state in the FSM and
        # transition to that next, then have a timeout input kick the machine
        # back around to the beginning of the loop in the FSM.  However, we're
        # not going to keep this sleep-for-a-bit solution in the long term.
        # Instead, we'll be more event driven.  So just going with the simple
        # solution and inserting a side-effect-y delay directly here.

        d.addCallback(
            lambda _:
                self.reactor.callLater(
                    1.0, self.fsm.receive, ConvergenceLoopInputs.ITERATION_DONE
                )
        )
        d.addActionFinish()
示例#19
0
 def wrapper(self, *args, **kwargs):
     context = start_action(Logger(),
                            action_type="acceptance:" + function.__name__,
                            args=args, kwargs=kwargs)
     with context.context():
         d = DeferredContext(function(self, *args, **kwargs))
         d.addActionFinish()
         return d.result
 def startService(self):
     with start_action(action_type=u"asyncservice:start"):
         self.running = True
         self._d = self._factory()
         d = DeferredContext(self._d)
         d.addCallback(self._created)
         d.addErrback(self._failed)
         d.addActionFinish()
示例#21
0
    def _request(self, method, path, body, success_codes, error_codes=None):
        """
        Send a HTTP request to the Flocker API, return decoded JSON body.

        :param bytes method: HTTP method, e.g. PUT.
        :param bytes path: Path to add to base URL.
        :param body: If not ``None``, JSON encode this and send as the
            body of the request.
        :param set success_codes: Expected success response codes.
        :param error_codes: Mapping from HTTP response code to exception to be
            raised if it is present, or ``None`` to set no errors.

        :return: ``Deferred`` firing with decoded JSON.
        """
        url = self._base_url + path
        action = _LOG_HTTP_REQUEST(url=url, method=method, request_body=body)

        if error_codes is None:
            error_codes = {}

        def error(body, code):
            if code in error_codes:
                raise error_codes[code](body)
            raise ResponseError(code, body)

        def got_result(result):
            if result.code in success_codes:
                action.addSuccessFields(response_code=result.code)
                return json_content(result)
            else:
                d = content(result)
                d.addCallback(error, result.code)
                return d

        # Serialize the current task ID so we can trace logging across
        # processes:
        headers = {b"X-Eliot-Task-Id": action.serialize_task_id()}
        data = None
        if body is not None:
            headers["content-type"] = b"application/json"
            data = dumps(body)

        with action.context():
            request = DeferredContext(self._treq.request(
                method, url,
                data=data, headers=headers,
                # Keep tests from having dirty reactor problems:
                persistent=False
                ))
        request.addCallback(got_result)

        def got_body(json_body):
            action.addSuccessFields(response_body=json_body)
            return json_body
        request.addCallback(got_body)
        request.addActionFinish()
        return request.result
示例#22
0
文件: _loop.py 项目: jongiddy/flocker
    def output_CONVERGE(self, context):
        known_local_state = self.cluster_state.get_node(
            self.deployer.node_uuid, hostname=self.deployer.hostname)

        with LOG_CONVERGE(self.fsm.logger,
                          cluster_state=self.cluster_state,
                          desired_configuration=self.configuration).context():
            d = DeferredContext(
                maybeDeferred(self.deployer.discover_state, known_local_state))

        def got_local_state(state_changes):
            # Current cluster state is likely out of date as regards the local
            # state, so update it accordingly.
            #
            # XXX This somewhat side-steps the whole explicit-state-machine
            # thing we're aiming for here.  It would be better for these state
            # changes to arrive as an input to the state machine.
            for state in state_changes:
                self.cluster_state = state.update_cluster_state(
                    self.cluster_state)

            # XXX And for this update to be the side-effect of an output
            # resulting.
            sent_state = self._maybe_send_state_to_control_service(
                state_changes)

            action = self.deployer.calculate_changes(self.configuration,
                                                     self.cluster_state)
            LOG_CALCULATED_ACTIONS(calculated_actions=action).write(
                self.fsm.logger)
            ran_state_change = run_state_change(action, self.deployer)
            DeferredContext(ran_state_change).addErrback(
                writeFailure, self.fsm.logger)

            # Wait for the control node to acknowledge the new
            # state, and for the convergence actions to run.
            return gather_deferreds([sent_state, ran_state_change])

        d.addCallback(got_local_state)

        # If an error occurred we just want to log it and then try
        # converging again; hopefully next time we'll have more success.
        d.addErrback(writeFailure, self.fsm.logger)

        # It would be better to have a "quiet time" state in the FSM and
        # transition to that next, then have a timeout input kick the machine
        # back around to the beginning of the loop in the FSM.  However, we're
        # not going to keep this sleep-for-a-bit solution in the long term.
        # Instead, we'll be more event driven.  So just going with the simple
        # solution and inserting a side-effect-y delay directly here.

        d.addCallback(lambda _: self.reactor.callLater(
            1.0, self.fsm.receive, ConvergenceLoopInputs.ITERATION_DONE))
        d.addActionFinish()
示例#23
0
 def wrapper(self, *args, **kwargs):
     context = start_action(
         Logger(),
         action_type=label,
         args=args,
         kwargs=kwargs,
     )
     with context.context():
         d = DeferredContext(function(self, *args, **kwargs))
         d.addCallback(log_result)
         d.addActionFinish()
         return d.result
示例#24
0
    def output_START(self, context):
        """
        Create a node.
        """
        action = start_action(
            action_type="flocker_bb:ec2:start",
            name=self.identifier())
        with action.context():

            def thread_start(task_id):
                # Since loading the image metadata is done separately from
                # booting the node, it's possible the metadata here won't
                # actually match the metadata of the image the node ends up
                # running (someone could replace the image with a different one
                # between this call and the node being started).  Since
                # generating images is currently a manual step, this probably
                # won't happen very often and if it does there's a person there
                # who can deal with it.  Also it will be resolved after the
                # next node restart.  It would be better to extract the image
                # metadata from the booted node, though.
                # FLOC-1905
                with Action.continue_task(task_id=task_id):
                    self.image_metadata = self.driver.get_image_metadata()
                    return self.driver.create()

            d = DeferredContext(
                deferToThread(thread_start, action.serialize_task_id())
            )

            def started(node):
                self.node = node
                instance_metadata = {
                    'instance_id': node.id,
                    'instance_name': node.name,
                }
                self._fsm.receive(InstanceStarted(
                    instance_id=node.id,
                    image_metadata=self.image_metadata,
                    instance_metadata=instance_metadata,
                ))
                self.instance_metadata = instance_metadata

            def failed(f):
                # We log the exception twice.
                # For Zulip
                log.err(f, "while starting %s" % (self.identifier(),))
                self._fsm.receive(StartFailed())
                # For eliot
                return f

            d.addCallbacks(started, failed)
            d.addActionFinish()
示例#25
0
    def stop(self):
        """
        Stop the scenario from being maintained by stopping all the
        loops that may be executing.

        :return: A ``Deferred`` that fires when the scenario has stopped.
        """
        self.is_started = False
        if self.monitor_loop.running:
            self.monitor_loop.stop()

        if self.loop.running:
            self.loop.stop()

        outstanding_requests = self.rate_measurer.outstanding()

        if outstanding_requests > 0:
            msg = (
                "There are {num_requests} outstanding requests. "
                "Waiting {num_seconds} seconds for them to complete."
            ).format(
                num_requests=outstanding_requests,
                num_seconds=self.timeout
            )
            Message.log(key='outstanding_requests', value=msg)

        with start_action(
            action_type=u'flocker:benchmark:scenario:stop',
            scenario='request_load'
        ):
            def handle_timeout(failure):
                failure.trap(CancelledError)
                msg = (
                    "Force stopping the scenario. "
                    "There are {num_requests} outstanding requests"
                ).format(
                    num_requests=outstanding_requests
                )
                Message.log(key='force_stop_request', value=msg)

            def no_outstanding_requests():
                return self.rate_measurer.outstanding() == 0

            scenario_stopped = loop_until(self.reactor,
                                          no_outstanding_requests,
                                          repeat(1))
            timeout(self.reactor, scenario_stopped, self.timeout)
            scenario_stopped.addErrback(handle_timeout)

            scenario = DeferredContext(scenario_stopped)
            scenario.addActionFinish()
            return scenario.result
示例#26
0
    def logger(self, request, **routeArguments):
        serialized_remote_task = request.requestHeaders.getRawHeaders("X-Eliot-Task-Id", [None])[0]
        if serialized_remote_task is None:
            return original(self, request, **routeArguments)

        try:
            action = Action.continue_task(task_id=serialized_remote_task)
        except ValueError:
            return original(self, request, **routeArguments)
        with action.context():
            d = DeferredContext(original(self, request, **routeArguments))
            d.addActionFinish()
            return d.result
示例#27
0
    def logger(self, request, **routeArguments):
        serialized_remote_task = request.requestHeaders.getRawHeaders(
            "X-Eliot-Task-Id", [None])[0]
        if serialized_remote_task is None:
            return original(self, request, **routeArguments)

        try:
            action = Action.continue_task(task_id=serialized_remote_task)
        except ValueError:
            return original(self, request, **routeArguments)
        with action.context():
            d = DeferredContext(original(self, request, **routeArguments))
            d.addActionFinish()
            return d.result
示例#28
0
文件: _effect.py 项目: zendad/flocker
def treq_get(dispatcher, intent):
    """
    Performer to execute an HTTP GET.

    :param dispatcher: The dispatcher used to dispatch this performance.
    :param HTTPGet intent: The intent to be performed.
    """
    action = startAction(action_type=u"flocker:provision:_effect:treq_get")
    with action.context():
        Message.log(url=intent.url)
        # Do not use persistent HTTP connections, because they will not be
        # cleaned up by the end of the test.
        d = DeferredContext(get(intent.url, persistent=False))
        d.addActionFinish()
        return d.result
示例#29
0
 def do_join(self, client_num, local_dir, invite_code):
     action = start_action(
         action_type=u"join-magic-folder",
         client_num=client_num,
         local_dir=local_dir,
         invite_code=invite_code,
     )
     with action.context():
         precondition(isinstance(local_dir, unicode), local_dir=local_dir)
         precondition(isinstance(invite_code, str), invite_code=invite_code)
         local_dir_arg = unicode_to_argv(local_dir)
         d = DeferredContext(
             self.do_cli(
                 "magic-folder",
                 "join",
                 invite_code,
                 local_dir_arg,
                 client_num=client_num,
             )
         )
     def _done(args):
         (rc, stdout, stderr) = args
         self.failUnlessEqual(rc, 0)
         self.failUnlessEqual(stdout, "")
         self.failUnlessEqual(stderr, "")
         return (rc, stdout, stderr)
     d.addCallback(_done)
     return d.addActionFinish()
示例#30
0
文件: _loop.py 项目: jongiddy/flocker
    def _send_state_to_control_service(self, state_changes):
        context = LOG_SEND_TO_CONTROL_SERVICE(
            self.fsm.logger,
            connection=self.client,
            local_changes=list(state_changes),
        )
        with context.context():
            d = DeferredContext(
                self.client.callRemote(NodeStateCommand,
                                       state_changes=state_changes,
                                       eliot_context=context))

            def record_acknowledged_state(ignored):
                self._last_acknowledged_state = state_changes

            def clear_acknowledged_state(failure):
                # We don't know if the control service has processed the update
                # or not. So we clear the last acknowledged state so that we
                # always send the state on the next iteration.
                self._last_acknowledged_state = None
                return failure

            d.addCallbacks(record_acknowledged_state, clear_acknowledged_state)
            d.addErrback(writeFailure, self.fsm.logger,
                         u"Failed to send local state to control node.")
            return d.addActionFinish()
示例#31
0
 def check_joined_config(self, client_num, upload_dircap):
     """Tests that our collective directory has the readonly cap of
     our upload directory.
     """
     action = start_action(action_type=u"check-joined-config")
     with action.context():
         collective_readonly_cap = self.get_caps_from_files(client_num)[0]
         d = DeferredContext(
             self.do_cli(
                 "ls", "--json",
                 collective_readonly_cap,
                 client_num=client_num,
             )
         )
     def _done(args):
         (rc, stdout, stderr) = args
         self.failUnlessEqual(rc, 0)
         return (rc, stdout, stderr)
     d.addCallback(_done)
     def test_joined_magic_folder(args):
         (rc, stdout, stderr) = args
         readonly_cap = unicode(uri.from_string(upload_dircap).get_readonly().to_string(), 'utf-8')
         s = re.search(readonly_cap, stdout)
         self.failUnless(s is not None)
         return None
     d.addCallback(test_joined_magic_folder)
     return d.addActionFinish()
示例#32
0
    def do_join(self, client_num, local_dir, invite_code):
        action = start_action(
            action_type=u"join-magic-folder",
            client_num=client_num,
            local_dir=local_dir,
            invite_code=invite_code,
        )
        with action.context():
            precondition(isinstance(local_dir, unicode), local_dir=local_dir)
            precondition(isinstance(invite_code, str), invite_code=invite_code)
            local_dir_arg = unicode_to_argv(local_dir)
            d = DeferredContext(
                self.do_cli(
                    "magic-folder",
                    "join",
                    "--author",
                    "test-dummy",
                    invite_code,
                    local_dir_arg,
                    client_num=client_num,
                ))

        def _done(args):
            (rc, stdout, stderr) = args
            self.assertEqual(rc, 0)
            self.assertEqual(stdout, "")
            self.assertEqual(stderr, "")
            return (rc, stdout, stderr)

        d.addCallback(_done)
        return d.addActionFinish()
示例#33
0
def _init_magic_folder(reactor, request, temp_dir, name, web_port):
    """
    Create a new magic-folder-daemon configuration

    :param reactor: The reactor to use to launch the process.
    :param request: The pytest request object to use for cleanup.
    :param temp_dir: The directory in which to find a Tahoe-LAFS node.
    :param name: The alias of the Tahoe-LAFS node.

    :return Deferred[IProcessTransport]: The started process.
    """
    node_dir = join(temp_dir, name)
    config_dir = join(temp_dir, "magic-daemon-{}".format(name))
    # proto = _ProcessExitedProtocol()
    proto = _CollectOutputProtocol()

    coverage = request.config.getoption('coverage')

    def optional(flag, elements):
        if flag:
            return elements
        return []

    args = [
        sys.executable,
        "-m",
    ] + optional(coverage, [
        "coverage",
        "run",
        "-m",
    ]) + [
        "magic_folder",
    ] + optional(coverage, [
        "--coverage",
    ]) + [
        "--config",
        config_dir,
        "init",
        "--node-directory",
        node_dir,
        "--listen-endpoint",
        web_port,
    ]
    Message.log(
        message_type=u"integration:init-magic-folder",
        coverage=coverage,
        args=args,
    )
    transport = reactor.spawnProcess(
        proto,
        sys.executable,
        args,
    )

    request.addfinalizer(partial(_cleanup_tahoe_process, transport,
                                 proto.done))
    with start_action(action_type=u"integration:init-magic-folder").context():
        ctx = DeferredContext(proto.done)
        ctx.addCallback(lambda ignored: transport)
        return ctx.addActionFinish()
def get_hosted_zone_by_name(route53, name):
    """
    Get a ``HostedZone`` with a zone name matching ``name``.

    :param route53: A txaws Route53 client.

    :param txaws.route53.model.Name name: The zone name to look for.

    :raise KeyError: If no matching hosted zone is found.

    :return Deferred(HostedZone): The hosted zone with a matching name.
    """
    action = start_action(action_type=u"get-hosted-zone")
    with action.context():
        d = DeferredContext(route53.list_hosted_zones())
        def filter_results(zones):
            Message.log(zone_names=list(zone.name for zone in zones))
            for zone in zones:
                # XXX Bleuch zone.name should be a Name!
                if Name(zone.name) == name:
                    d = _load_all_rrsets(route53, zone.identifier)
                    d.addCallback(
                        lambda rrsets, zone=zone: _ZoneState(
                            zone=zone,
                            rrsets=rrsets,
                        ),
                    )
                    return d
            raise KeyError(name)
        d.addCallback(filter_results)
        return d.addActionFinish()
示例#35
0
文件: _loop.py 项目: uedzen/flocker
    def _send_state_to_control_service(self, state_changes):
        context = LOG_SEND_TO_CONTROL_SERVICE(
            self.fsm.logger, connection=self.client,
            local_changes=list(state_changes),
        )
        with context.context():
            d = DeferredContext(self.client.callRemote(
                NodeStateCommand,
                state_changes=state_changes,
                eliot_context=context)
            )

            def record_acknowledged_state(ignored):
                self._last_acknowledged_state = state_changes

            def clear_acknowledged_state(failure):
                # We don't know if the control service has processed the update
                # or not. So we clear the last acknowledged state so that we
                # always send the state on the next iteration.
                self._last_acknowledged_state = None
                return failure

            d.addCallbacks(record_acknowledged_state, clear_acknowledged_state)
            d.addErrback(
                writeFailure, self.fsm.logger,
                u"Failed to send local state to control node.")
            return d.addActionFinish()
def _get_converge_inputs(config, subscriptions, k8s, aws):
    a = start_action(action_type=u"load-converge-inputs")
    with a.context():
        d = DeferredContext(
            gatherResults([
                get_active_subscriptions(subscriptions),
                get_customer_grid_configmaps(k8s, config.kubernetes_namespace),
                get_customer_grid_deployments(k8s, config.kubernetes_namespace),
                get_customer_grid_replicasets(k8s, config.kubernetes_namespace),
                get_customer_grid_pods(k8s, config.kubernetes_namespace),
                get_customer_grid_service(k8s, config.kubernetes_namespace),
                get_hosted_zone_by_name(aws.get_route53_client(), Name(config.domain)),
            ]),
        )
        d.addCallback(
            lambda state: _State(**dict(
                zip([
                    u"subscriptions",
                    u"configmaps",
                    u"deployments",
                    u"replicasets",
                    u"pods",
                    u"service",
                    u"zone",
                ], state,
                ),
            )),
        )
        return d.addActionFinish()
def _get_converge_inputs(config, subscriptions, k8s, aws):
    a = start_action(action_type=u"load-converge-inputs")
    with a.context():
        d = DeferredContext(
            gatherResults([
                get_active_subscriptions(subscriptions),
                get_customer_grid_configmaps(k8s, config.kubernetes_namespace),
                get_customer_grid_deployments(k8s, config.kubernetes_namespace),
                get_customer_grid_replicasets(k8s, config.kubernetes_namespace),
                get_customer_grid_pods(k8s, config.kubernetes_namespace),
                get_customer_grid_service(k8s, config.kubernetes_namespace),
                get_hosted_zone_by_name(aws.get_route53_client(), Name(config.domain)),
            ]),
        )
        d.addCallback(
            lambda state: _State(**dict(
                zip([
                    u"subscriptions",
                    u"configmaps",
                    u"deployments",
                    u"replicasets",
                    u"pods",
                    u"service",
                    u"zone",
                ], state,
                ),
            )),
        )
        return d.addActionFinish()
示例#38
0
def proxy(upstream, endpoint, header):
    """
    Establish a new connection to ``endpoint`` and begin proxying between that
    connection and ``upstream``.

    :param IProtocol upstream: A connected protocol.  All data received by
        this protocol from this point on will be sent along to another newly
        established connection.

    :param IStreamClientEndpoint endpoint: An endpoint to use to establish a
        new connection.  All data received over this connection will be sent
        along to the upstream connection.

    :param bytes header: Some extra data to write to the new downstream
        connection before proxying begins.
    """
    def failed(reason):
        upstream.transport.resumeProducing()
        upstream.transport.abortConnection()
        return reason

    upstream.transport.pauseProducing()

    peer = upstream.transport.getPeer()
    action = start_action(action_type=u"grid-router:proxy",
                          **{u"from": (peer.host, peer.port)})
    with action.context():
        d = DeferredContext(endpoint.connect(Factory.forProtocol(_Proxy)))
        d.addCallbacks(
            lambda downstream: DeferredContext(
                downstream.take_over(upstream, header)),
            failed,
        )
        return d.addActionFinish()
示例#39
0
    def stop(self):
        """
        Stop the scenario from being maintained by stopping all the
        loops that may be executing.

        :return Deferred[Optional[Dict[unicode, Any]]]: Scenario metrics.
        """
        self.is_started = False
        if self.monitor_loop.running:
            self.monitor_loop.stop()

        if self.loop.running:
            self.loop.stop()

        outstanding_requests = self.rate_measurer.outstanding()

        if outstanding_requests > 0:
            msg = (
                "There are {num_requests} outstanding requests. " "Waiting {num_seconds} seconds for them to complete."
            ).format(num_requests=outstanding_requests, num_seconds=self.timeout)
            Message.log(key="outstanding_requests", value=msg)

        with start_action(action_type=u"flocker:benchmark:scenario:stop", scenario="request_load"):

            def no_outstanding_requests():
                return self.rate_measurer.outstanding() == 0

            scenario_stopped = loop_until(self.reactor, no_outstanding_requests, repeat(1))
            timeout(self.reactor, scenario_stopped, self.timeout)
            scenario = DeferredContext(scenario_stopped)

            def handle_timeout(failure):
                failure.trap(CancelledError)
                msg = ("Force stopping the scenario. " "There are {num_requests} outstanding requests").format(
                    num_requests=outstanding_requests
                )
                Message.log(key="force_stop_request", value=msg)

            scenario.addErrback(handle_timeout)

            def scenario_cleanup(ignored):
                """
                Calls the scenario cleanup, and wraps it inside an eliot
                start action, so we can see the logs if something goes
                wrong within the cleanup

                :return Deferred: that will fire once the cleanup has been
                    completed
                """
                with start_action(action_type=u"flocker:benchmark:scenario:cleanup", scenario="request_load"):
                    return self.request.run_cleanup()

            scenario.addBoth(scenario_cleanup)

            def return_metrics(_ignore):
                return self.rate_measurer.get_metrics()

            scenario.addCallback(return_metrics)

            return scenario.addActionFinish()
示例#40
0
    def do_create_magic_folder(self, client_num):
        with start_action(action_type=u"create-magic-folder",
                          client_num=client_num).context():
            d = DeferredContext(
                self.do_cli(
                    "magic-folder",
                    "--debug",
                    "create",
                    "magic:",
                    client_num=client_num,
                ))

        def _done(args):
            (rc, stdout, stderr) = args
            self.failUnlessEqual(rc, 0, stdout + stderr)
            self.assertIn("Alias 'magic' created", stdout)
            #            self.failUnlessIn("joined new magic-folder", stdout)
            #            self.failUnlessIn("Successfully created magic-folder", stdout)
            self.failUnlessEqual(stderr, "")
            aliases = get_aliases(self.get_clientdir(i=client_num))
            self.assertIn("magic", aliases)
            self.failUnless(aliases["magic"].startswith("URI:DIR2:"))

        d.addCallback(_done)
        return d.addActionFinish()
示例#41
0
    def g(self, request):
        # Bind the method to the instance so it has a better
        # fullyQualifiedName later on.  This is not necessary on Python 3.
        bound_render = render.__get__(self, type(self))

        action = start_action(
            action_type=u"allmydata:web:common-render",
            uri=request.uri,
            method=request.method,
            handler=fullyQualifiedName(bound_render),
        )
        if getattr(request, "dont_apply_extra_processing", False):
            with action:
                return bound_render(request)

        with action.context():
            result = DeferredContext(maybeDeferred(bound_render, request))
            # Apply `_finish` all of our result handling logic to whatever it
            # returned.
            result.addBoth(_finish, bound_render, request)
            d = result.addActionFinish()

        # If the connection is lost then there's no point running our _finish
        # logic because it has nowhere to send anything.  There may also be no
        # point in finishing whatever operation was being performed because
        # the client cannot be informed of its result.  Also, Twisted Web
        # raises exceptions from some Request methods if they're used after
        # the connection is lost.
        request.notifyFinish().addErrback(lambda ignored: d.cancel(), )
        return NOT_DONE_YET
示例#42
0
def create_user_bucket(reactor, client, bucketname):
    """
    Create an S3 bucket for a user's grid.

    If S3 errors are encountered, retries will be attempted.  If too
    many errors are encountered, this will give up and return a
    failure.

    @param reactor: An IReactorTime which can be used to schedule retries.
    @param client: A txaws.s3.client.S3Client which can be used to create the bucket.
    @param bucketname: The name of the S3 bucket to create.

    @return: A Deferred that fires when the bucket has been created or
        fails when too many errors are encountered.
    """
    action = startAction(
        action_type=u"initialize:create_user_bucket",
        name=bucketname,
    )
    with action.context():
        d = DeferredContext(
            retry_failure(
                reactor,
                lambda: client.create_bucket(bucketname),
                expected=[S3Error],
                steps=backoff(),
            ),
        )
        return d.addActionFinish()
示例#43
0
def run_state_change(change, deployer):
    """
    Apply the change to local state.

    :param change: Either an ``IStateChange`` provider or the result of an
        ``in_parallel`` or ``sequentially`` call.
    :param IDeployer deployer: The ``IDeployer`` to use.  Specific
        ``IStateChange`` providers may require specific ``IDeployer`` providers
        that provide relevant functionality for applying the change.

    :return: ``Deferred`` firing when the change is done.
    """
    with change.eliot_action.context():
        context = DeferredContext(maybeDeferred(change.run, deployer))
        context.addActionFinish()
        return context.result
示例#44
0
    def check_joined_config(self, client_num, upload_dircap):
        """Tests that our collective directory has the readonly cap of
        our upload directory.
        """
        action = start_action(action_type=u"check-joined-config")
        with action.context():
            collective_readonly_cap = self.get_caps_from_files(client_num)[0]
            d = DeferredContext(
                self.do_cli(
                    "ls",
                    "--json",
                    collective_readonly_cap,
                    client_num=client_num,
                ))

        def _done(args):
            (rc, stdout, stderr) = args
            self.assertEqual(rc, 0)
            return (rc, stdout, stderr)

        d.addCallback(_done)

        def test_joined_magic_folder(args):
            (rc, stdout, stderr) = args
            readonly_cap = unicode(
                uri.from_string(upload_dircap).get_readonly().to_string(),
                'utf-8')
            s = re.search(readonly_cap, stdout)
            self.assertTrue(s is not None)
            return None

        d.addCallback(test_joined_magic_folder)
        return d.addActionFinish()
        def got_some_rrsets(rrsets):
            accum.update(rrsets)
            if len(rrsets) < 100:
                # Fewer results than we asked for means we must be on the last
                # page.
                return accum

            # Otherwise, ask for the next page.  We do this slightly wrong, using
            # max(rrsets) as the starting key because txaws does not give us
            # access to the correct values from the response -
            # NextRecordIdentifier and NextRecordType.  This just means we'll load
            # one duplicate item on each page.  They all go into the dict so it
            # doesn't affect correctness.
            maxkey = max(rrsets)
            # Make sure we also preserve the Eliot context for callbacks of
            # this next Deferred.
            with start_action(action_type="load-next-page").context():
                d = DeferredContext(_load_all_rrsets(
                    route53,
                    zone_identifier,
                    name=maxkey.label,
                    type=maxkey.type,
                    accum=accum,
                ))
                return d.addActionFinish()
def get_hosted_zone_by_name(route53, name):
    """
    Get a ``HostedZone`` with a zone name matching ``name``.

    :param route53: A txaws Route53 client.

    :param txaws.route53.model.Name name: The zone name to look for.

    :raise KeyError: If no matching hosted zone is found.

    :return Deferred(HostedZone): The hosted zone with a matching name.
    """
    action = start_action(action_type=u"get-hosted-zone")
    with action.context():
        d = DeferredContext(route53.list_hosted_zones())
        def filter_results(zones):
            Message.log(zone_names=list(zone.name for zone in zones))
            for zone in zones:
                # XXX Bleuch zone.name should be a Name!
                if Name(zone.name) == name:
                    d = route53.list_resource_record_sets(zone_id=zone.identifier)
                    d.addCallback(
                        lambda rrsets, zone=zone: _ZoneState(
                            zone=zone,
                            rrsets=rrsets,
                        ),
                    )
                    return d
            raise KeyError(name)
        d.addCallback(filter_results)
        return d.addActionFinish()
def converge(config, subscriptions, k8s, aws):
    """
    Bring provisioned resources in line with active subscriptions.

    :param DeploymentConfig config: S4-global configuration necessary for
        provisioning resources.

    :param subscription_manager.Client subscription_manager: A client for
        interrogating the subscriptions database.

    :param txkube.IKubernetesClient k8s: A client for interacting with
        Kubernetes.

    :param AWSServiceRegion aws: A client for interacting with AWS.

    :return Deferred(NoneType): The returned ``Deferred`` fires after one
        attempt has been made to bring the actual state of provisioned
        resources in line with the desired state of provisioned resources
        based on the currently active subscriptions.
    """
    # Create and destroy deployments as necessary.  Use the
    # subscription manager to find out what subscriptions are active
    # and use look at the Kubernetes configuration to find out what
    # subscription-derived deployments exist.  Also detect port
    # mis-configurations and correct them.
    a = start_action(action_type=u"converge")
    with a.context():
        d = DeferredContext(_get_converge_inputs(config, subscriptions, k8s, aws))
        d.addCallback(_converge_logic, config, subscriptions, k8s, aws)
        d.addCallback(_execute_converge_outputs)
        d.addCallback(lambda result: None)
        return d.addActionFinish()
示例#48
0
def does_not_exist(client, obj):
    """
    Wait for ``obj`` to not exist.

    :param IKubernetesClient client: The client to use to determine existence.

    :param IObject obj: The object the existence of which to check.

    :return: A ``Deferred`` that fires when the object does not exist.
    """
    action = start_action(action_type=u"poll:start", obj=iobject_to_raw(obj))
    with action.context():
        from twisted.internet import reactor
        d = poll(reactor, action, lambda: client.get(obj), repeat(0.5, 100))
        d = DeferredContext(d)

        # Once we get a NOT FOUND error, we're done.
        def trap_not_found(reason):
            reason.trap(KubernetesError)
            if reason.value.code == NOT_FOUND:
                return None
            return reason

        d.addErrback(trap_not_found)

        def trap_stop_iteration(reason):
            reason.trap(StopIteration)
            Message.log(does_not_exist=u"timeout", obj=iobject_to_raw(obj))
            return None

        d.addErrback(trap_stop_iteration)
        return d.addActionFinish()
示例#49
0
def run_state_change(change, deployer):
    """
    Apply the change to local state.

    :param change: Either an ``IStateChange`` provider or the result of an
        ``in_parallel`` or ``sequentially`` call.
    :param IDeployer deployer: The ``IDeployer`` to use.  Specific
        ``IStateChange`` providers may require specific ``IDeployer`` providers
        that provide relevant functionality for applying the change.

    :return: ``Deferred`` firing when the change is done.
    """
    with change.eliot_action.context():
        context = DeferredContext(maybeDeferred(change.run, deployer))
        context.addActionFinish()
        return context.result
示例#50
0
    def wrapper(self, *args, **kwargs):

        serializable_args = tuple(_ensure_encodeable(a) for a in args)
        serializable_kwargs = {}
        for kwarg in kwargs:
            serializable_kwargs[kwarg] = _ensure_encodeable(kwargs[kwarg])

        context = start_action(
            action_type=label,
            args=serializable_args, kwargs=serializable_kwargs,
        )
        with context.context():
            d = DeferredContext(function(self, *args, **kwargs))
            d.addCallback(log_result, context)
            d.addActionFinish()
            return d.result
def converge(config, subscriptions, k8s, aws):
    """
    Bring provisioned resources in line with active subscriptions.

    :param DeploymentConfig config: S4-global configuration necessary for
        provisioning resources.

    :param subscription_manager.Client subscription_manager: A client for
        interrogating the subscriptions database.

    :param txkube.IKubernetesClient k8s: A client for interacting with
        Kubernetes.

    :param AWSServiceRegion aws: A client for interacting with AWS.

    :return Deferred(NoneType): The returned ``Deferred`` fires after one
        attempt has been made to bring the actual state of provisioned
        resources in line with the desired state of provisioned resources
        based on the currently active subscriptions.
    """
    # Create and destroy deployments as necessary.  Use the
    # subscription manager to find out what subscriptions are active
    # and use look at the Kubernetes configuration to find out what
    # subscription-derived deployments exist.  Also detect port
    # mis-configurations and correct them.
    a = start_action(action_type=u"converge")
    with a.context():
        d = DeferredContext(_get_converge_inputs(config, subscriptions, k8s, aws))
        d.addCallback(_converge_logic, config, subscriptions, k8s, aws)
        d.addCallback(_execute_converge_outputs)
        d.addCallback(lambda result: None)
        return d.addActionFinish()
def proxy(upstream, endpoint, header):
    """
    Establish a new connection to ``endpoint`` and begin proxying between that
    connection and ``upstream``.

    :param IProtocol upstream: A connected protocol.  All data received by
        this protocol from this point on will be sent along to another newly
        established connection.

    :param IStreamClientEndpoint endpoint: An endpoint to use to establish a
        new connection.  All data received over this connection will be sent
        along to the upstream connection.

    :param bytes header: Some extra data to write to the new downstream
        connection before proxying begins.
    """
    def failed(reason):
        upstream.transport.resumeProducing()
        upstream.transport.abortConnection()
        return reason

    upstream.transport.pauseProducing()

    peer = upstream.transport.getPeer()
    action = start_action(
        action_type=u"grid-router:proxy",
        **{u"from": (peer.host, peer.port)}
    )
    with action.context():
        d = DeferredContext(endpoint.connect(Factory.forProtocol(_Proxy)))
        d.addCallbacks(
            lambda downstream: DeferredContext(downstream.take_over(upstream, header)),
            failed,
        )
        return d.addActionFinish()
示例#53
0
        def api_clean_state(
            name,
            configuration_method,
            state_method,
            delete_method,
        ):
            """
            Clean entities from the cluster.

            :param unicode name: The name of the entities to clean.
            :param configuration_method: The function to obtain the configured
                entities.
            :param state_method: The function to get the current entities.
            :param delete_method: The method to delete an entity.

            :return: A `Deferred` that fires when the entities have been
                deleted.
            """
            context = start_action(action_type=u"acceptance:cleanup_" + name, )
            with context.context():
                get_items = DeferredContext(configuration_method())

                def delete_items(items):
                    return gather_deferreds(
                        list(delete_method(item) for item in items))

                get_items.addCallback(delete_items)
                get_items.addCallback(lambda ignored: loop_until(
                    reactor, lambda: state_method().addCallback(lambda result:
                                                                [] == result)))
                return get_items.addActionFinish()
示例#54
0
    def add_file(self,
                 namex,
                 uploadable,
                 metadata=None,
                 overwrite=True,
                 progress=None):
        """I upload a file (using the given IUploadable), then attach the
        resulting FileNode to the directory at the given name. I return a
        Deferred that fires (with the IFileNode of the uploaded file) when
        the operation completes."""
        with ADD_FILE(name=namex, metadata=metadata,
                      overwrite=overwrite).context():
            name = normalize(namex)
            if self.is_readonly():
                d = DeferredContext(defer.fail(NotWriteableError()))
            else:
                # XXX should pass reactor arg
                d = DeferredContext(
                    self._uploader.upload(uploadable, progress=progress))
                d.addCallback(lambda results: self._create_and_validate_node(
                    results.get_uri(), None, name))
                d.addCallback(lambda node: self.set_node(
                    name, node, metadata, overwrite))

        return d.addActionFinish()
示例#55
0
    def wrapper(self, *args, **kwargs):

        serializable_args = tuple(_ensure_encodeable(a) for a in args)
        serializable_kwargs = {}
        for kwarg in kwargs:
            serializable_kwargs[kwarg] = _ensure_encodeable(kwargs[kwarg])

        context = start_action(
            action_type=label,
            args=serializable_args, kwargs=serializable_kwargs,
        )
        with context.context():
            d = DeferredContext(function(self, *args, **kwargs))
            d.addCallback(log_result, context)
            d.addActionFinish()
            return d.result
示例#56
0
        def api_clean_state(
            name, configuration_method, state_method, delete_method,
        ):
            """
            Clean entities from the cluster.

            :param unicode name: The name of the entities to clean.
            :param configuration_method: The function to obtain the configured
                entities.
            :param state_method: The function to get the current entities.
            :param delete_method: The method to delete an entity.

            :return: A `Deferred` that fires when the entities have been
                deleted.
            """
            context = start_action(
                action_type=u"acceptance:cleanup_" + name,
            )
            with context.context():
                get_items = DeferredContext(configuration_method())

                def delete_items(items):
                    return gather_deferreds(list(
                        delete_method(item)
                        for item in items
                    ))
                get_items.addCallback(delete_items)
                get_items.addCallback(
                    lambda ignored: loop_until(
                        reactor, lambda: state_method().addCallback(
                            lambda result: [] == result
                        )
                    )
                )
                return get_items.addActionFinish()
示例#57
0
def retry_failure(reactor, function, expected=None, steps=None):
    """
    Retry ``function`` until it returns successfully.

    If it raises one of the expected exceptions, then retry.

    :param IReactorTime reactor: The reactor implementation to use to delay.
    :param callable function: A callable that returns a value.
    :param expected: Iterable of exceptions that trigger a retry. Passed
        through to ``Failure.check``.
    :param [float] steps: An iterable of delay intervals, measured in seconds.
        If not provided, will default to retrying every 0.1 seconds.

    :return: A ``Deferred`` that fires with the first successful return value
        of ``function``.
    """
    if steps is None:
        steps = repeat(0.1)
    steps = iter(steps)

    action = LOOP_UNTIL_ACTION(predicate=function)
    with action.context():
        d = DeferredContext(maybeDeferred(function))

    def loop(failure):
        if expected and not failure.check(*expected):
            return failure

        try:
            interval = steps.next()
        except StopIteration:
            return failure

        d = deferLater(reactor, interval, action.run, function)
        d.addErrback(loop)
        return d

    d.addErrback(loop)

    def got_result(result):
        action.add_success_fields(result=result)
        return result
    d.addCallback(got_result)
    d.addActionFinish()
    return d.result