示例#1
0
        def api_clean_state(
            name,
            configuration_method,
            state_method,
            delete_method,
        ):
            """
            Clean entities from the cluster.

            :param unicode name: The name of the entities to clean.
            :param configuration_method: The function to obtain the configured
                entities.
            :param state_method: The function to get the current entities.
            :param delete_method: The method to delete an entity.

            :return: A `Deferred` that fires when the entities have been
                deleted.
            """
            context = start_action(action_type=u"acceptance:cleanup_" + name, )
            with context.context():
                get_items = DeferredContext(configuration_method())

                def delete_items(items):
                    return gather_deferreds(
                        list(delete_method(item) for item in items))

                get_items.addCallback(delete_items)
                get_items.addCallback(lambda ignored: loop_until(
                    reactor, lambda: state_method().addCallback(lambda result:
                                                                [] == result)))
                return get_items.addActionFinish()
示例#2
0
 def wrapper(self, *args, **kwargs):
     context = start_action(Logger(), action_type=label, args=args, kwargs=kwargs)
     with context.context():
         d = DeferredContext(function(self, *args, **kwargs))
         d.addCallback(log_result)
         d.addActionFinish()
         return d.result
 def do_join(self, client_num, local_dir, invite_code):
     action = start_action(
         action_type=u"join-magic-folder",
         client_num=client_num,
         local_dir=local_dir,
         invite_code=invite_code,
     )
     with action.context():
         precondition(isinstance(local_dir, unicode), local_dir=local_dir)
         precondition(isinstance(invite_code, str), invite_code=invite_code)
         local_dir_arg = unicode_to_argv(local_dir)
         d = DeferredContext(
             self.do_cli(
                 "magic-folder",
                 "join",
                 invite_code,
                 local_dir_arg,
                 client_num=client_num,
             )
         )
     def _done(args):
         (rc, stdout, stderr) = args
         self.failUnlessEqual(rc, 0)
         self.failUnlessEqual(stdout, "")
         self.failUnlessEqual(stderr, "")
         return (rc, stdout, stderr)
     d.addCallback(_done)
     return d.addActionFinish()
def _get_converge_inputs(config, subscriptions, k8s, aws):
    a = start_action(action_type=u"load-converge-inputs")
    with a.context():
        d = DeferredContext(
            gatherResults([
                get_active_subscriptions(subscriptions),
                get_customer_grid_configmaps(k8s, config.kubernetes_namespace),
                get_customer_grid_deployments(k8s, config.kubernetes_namespace),
                get_customer_grid_replicasets(k8s, config.kubernetes_namespace),
                get_customer_grid_pods(k8s, config.kubernetes_namespace),
                get_customer_grid_service(k8s, config.kubernetes_namespace),
                get_hosted_zone_by_name(aws.get_route53_client(), Name(config.domain)),
            ]),
        )
        d.addCallback(
            lambda state: _State(**dict(
                zip([
                    u"subscriptions",
                    u"configmaps",
                    u"deployments",
                    u"replicasets",
                    u"pods",
                    u"service",
                    u"zone",
                ], state,
                ),
            )),
        )
        return d.addActionFinish()
def get_hosted_zone_by_name(route53, name):
    """
    Get a ``HostedZone`` with a zone name matching ``name``.

    :param route53: A txaws Route53 client.

    :param txaws.route53.model.Name name: The zone name to look for.

    :raise KeyError: If no matching hosted zone is found.

    :return Deferred(HostedZone): The hosted zone with a matching name.
    """
    action = start_action(action_type=u"get-hosted-zone")
    with action.context():
        d = DeferredContext(route53.list_hosted_zones())
        def filter_results(zones):
            Message.log(zone_names=list(zone.name for zone in zones))
            for zone in zones:
                # XXX Bleuch zone.name should be a Name!
                if Name(zone.name) == name:
                    d = _load_all_rrsets(route53, zone.identifier)
                    d.addCallback(
                        lambda rrsets, zone=zone: _ZoneState(
                            zone=zone,
                            rrsets=rrsets,
                        ),
                    )
                    return d
            raise KeyError(name)
        d.addCallback(filter_results)
        return d.addActionFinish()
示例#6
0
        def api_clean_state(
            name, configuration_method, state_method, delete_method,
        ):
            """
            Clean entities from the cluster.

            :param unicode name: The name of the entities to clean.
            :param configuration_method: The function to obtain the configured
                entities.
            :param state_method: The function to get the current entities.
            :param delete_method: The method to delete an entity.

            :return: A `Deferred` that fires when the entities have been
                deleted.
            """
            context = start_action(
                action_type=u"acceptance:cleanup_" + name,
            )
            with context.context():
                get_items = DeferredContext(configuration_method())

                def delete_items(items):
                    return gather_deferreds(list(
                        delete_method(item)
                        for item in items
                    ))
                get_items.addCallback(delete_items)
                get_items.addCallback(
                    lambda ignored: loop_until(
                        reactor, lambda: state_method().addCallback(
                            lambda result: [] == result
                        )
                    )
                )
                return get_items.addActionFinish()
示例#7
0
def sample(operation, metric, name):
    """
    Perform sampling of the operation.

    :param IOperation operation: An operation to perform.
    :param IMetric metric: A quantity to measure.
    :param int name: Identifier for individual sample.
    :return: Deferred firing with a sample. A sample is a dictionary
        containing a ``success`` boolean.  If ``success is True``, the
        dictionary also contains a ``value`` for the sample measurement.
        If ``success is False``, the dictionary also contains a
        ``reason`` for failure.
    """
    with start_action(action_type=u'flocker:benchmark:sample', sample=name):
        sampling = DeferredContext(maybeDeferred(operation.get_probe))

        def run_probe(probe):
            probing = metric.measure(probe.run)
            probing.addCallback(
                lambda measurement: dict(success=True, value=measurement)
            )
            probing.addCallback(bypass, probe.cleanup)

            return probing
        sampling.addCallback(run_probe)

        # Convert an error running the probe into a failed sample.
        def convert_to_result(failure):
            return dict(success=False, reason=failure.getTraceback())
        sampling.addErrback(convert_to_result)

        return sampling.addActionFinish()
def _get_converge_inputs(config, subscriptions, k8s, aws):
    a = start_action(action_type=u"load-converge-inputs")
    with a.context():
        d = DeferredContext(
            gatherResults([
                get_active_subscriptions(subscriptions),
                get_customer_grid_configmaps(k8s, config.kubernetes_namespace),
                get_customer_grid_deployments(k8s, config.kubernetes_namespace),
                get_customer_grid_replicasets(k8s, config.kubernetes_namespace),
                get_customer_grid_pods(k8s, config.kubernetes_namespace),
                get_customer_grid_service(k8s, config.kubernetes_namespace),
                get_hosted_zone_by_name(aws.get_route53_client(), Name(config.domain)),
            ]),
        )
        d.addCallback(
            lambda state: _State(**dict(
                zip([
                    u"subscriptions",
                    u"configmaps",
                    u"deployments",
                    u"replicasets",
                    u"pods",
                    u"service",
                    u"zone",
                ], state,
                ),
            )),
        )
        return d.addActionFinish()
 def check_joined_config(self, client_num, upload_dircap):
     """Tests that our collective directory has the readonly cap of
     our upload directory.
     """
     action = start_action(action_type=u"check-joined-config")
     with action.context():
         collective_readonly_cap = self.get_caps_from_files(client_num)[0]
         d = DeferredContext(
             self.do_cli(
                 "ls", "--json",
                 collective_readonly_cap,
                 client_num=client_num,
             )
         )
     def _done(args):
         (rc, stdout, stderr) = args
         self.failUnlessEqual(rc, 0)
         return (rc, stdout, stderr)
     d.addCallback(_done)
     def test_joined_magic_folder(args):
         (rc, stdout, stderr) = args
         readonly_cap = unicode(uri.from_string(upload_dircap).get_readonly().to_string(), 'utf-8')
         s = re.search(readonly_cap, stdout)
         self.failUnless(s is not None)
         return None
     d.addCallback(test_joined_magic_folder)
     return d.addActionFinish()
示例#10
0
文件: _loop.py 项目: LaOnda/flocker
    def output_CONVERGE(self, context):
        known_local_state = self.cluster_state.get_node(self.deployer.hostname)
        d = DeferredContext(self.deployer.discover_state(known_local_state))

        def got_local_state(state_changes):
            # Current cluster state is likely out of date as regards the local
            # state, so update it accordingly.
            for state in state_changes:
                self.cluster_state = state.update_cluster_state(
                    self.cluster_state
                )
            with LOG_SEND_TO_CONTROL_SERVICE(
                    self.fsm.logger, connection=self.client) as context:
                self.client.callRemote(NodeStateCommand,
                                       state_changes=state_changes,
                                       eliot_context=context)
            action = self.deployer.calculate_changes(
                self.configuration, self.cluster_state
            )
            return action.run(self.deployer)
        d.addCallback(got_local_state)

        # It would be better to have a "quiet time" state in the FSM and
        # transition to that next, then have a timeout input kick the machine
        # back around to the beginning of the loop in the FSM.  However, we're
        # not going to keep this sleep-for-a-bit solution in the long term.
        # Instead, we'll be more event driven.  So just going with the simple
        # solution and inserting a side-effect-y delay directly here.

        d.addCallback(
            lambda _:
                self.reactor.callLater(
                    1.0, self.fsm.receive, ConvergenceLoopInputs.ITERATION_DONE
                )
        )
示例#11
0
    def add_file(self,
                 namex,
                 uploadable,
                 metadata=None,
                 overwrite=True,
                 progress=None):
        """I upload a file (using the given IUploadable), then attach the
        resulting FileNode to the directory at the given name. I return a
        Deferred that fires (with the IFileNode of the uploaded file) when
        the operation completes."""
        with ADD_FILE(name=namex, metadata=metadata,
                      overwrite=overwrite).context():
            name = normalize(namex)
            if self.is_readonly():
                d = DeferredContext(defer.fail(NotWriteableError()))
            else:
                # XXX should pass reactor arg
                d = DeferredContext(
                    self._uploader.upload(uploadable, progress=progress))
                d.addCallback(lambda results: self._create_and_validate_node(
                    results.get_uri(), None, name))
                d.addCallback(lambda node: self.set_node(
                    name, node, metadata, overwrite))

        return d.addActionFinish()
示例#12
0
    def check_joined_config(self, client_num, upload_dircap):
        """Tests that our collective directory has the readonly cap of
        our upload directory.
        """
        action = start_action(action_type=u"check-joined-config")
        with action.context():
            collective_readonly_cap = self.get_caps_from_files(client_num)[0]
            d = DeferredContext(
                self.do_cli(
                    "ls",
                    "--json",
                    collective_readonly_cap,
                    client_num=client_num,
                ))

        def _done(args):
            (rc, stdout, stderr) = args
            self.assertEqual(rc, 0)
            return (rc, stdout, stderr)

        d.addCallback(_done)

        def test_joined_magic_folder(args):
            (rc, stdout, stderr) = args
            readonly_cap = unicode(
                uri.from_string(upload_dircap).get_readonly().to_string(),
                'utf-8')
            s = re.search(readonly_cap, stdout)
            self.assertTrue(s is not None)
            return None

        d.addCallback(test_joined_magic_folder)
        return d.addActionFinish()
示例#13
0
            def got_ip_addresses():
                d = self._async_refresh_node(reactor, node)
                d = DeferredContext(d)

                def is_running(updated_node):
                    if updated_node.state is not NodeState.RUNNING:
                        raise Exception("Node failed to run")
                    return updated_node

                def check_addresses(updated_node):
                    """
                    Check if the node has got at least one IPv4 public address
                    and, if requested, an IPv4 private address.  If yes, then
                    return the node object with the addresses, None otherwise.
                    """
                    public_ips = _filter_ipv4(updated_node.public_ips)
                    if len(public_ips) > 0:
                        if self._use_private_addresses:
                            private_ips = _filter_ipv4(
                                updated_node.private_ips
                            )
                            if len(private_ips) == 0:
                                return None
                        return updated_node
                    else:
                        return None

                d.addCallback(is_running)
                d.addCallback(check_addresses)
                return d.result
示例#14
0
    def do_join(self, client_num, local_dir, invite_code):
        action = start_action(
            action_type=u"join-magic-folder",
            client_num=client_num,
            local_dir=local_dir,
            invite_code=invite_code,
        )
        with action.context():
            precondition(isinstance(local_dir, unicode), local_dir=local_dir)
            precondition(isinstance(invite_code, str), invite_code=invite_code)
            local_dir_arg = unicode_to_argv(local_dir)
            d = DeferredContext(
                self.do_cli(
                    "magic-folder",
                    "join",
                    "--author",
                    "test-dummy",
                    invite_code,
                    local_dir_arg,
                    client_num=client_num,
                ))

        def _done(args):
            (rc, stdout, stderr) = args
            self.assertEqual(rc, 0)
            self.assertEqual(stdout, "")
            self.assertEqual(stderr, "")
            return (rc, stdout, stderr)

        d.addCallback(_done)
        return d.addActionFinish()
示例#15
0
    def stop(self):
        """
        Stop the scenario from being maintained by stopping all the
        loops that may be executing.

        :return Deferred[Optional[Dict[unicode, Any]]]: Scenario metrics.
        """
        self.is_started = False
        if self.monitor_loop.running:
            self.monitor_loop.stop()

        if self.loop.running:
            self.loop.stop()

        outstanding_requests = self.rate_measurer.outstanding()

        if outstanding_requests > 0:
            msg = (
                "There are {num_requests} outstanding requests. " "Waiting {num_seconds} seconds for them to complete."
            ).format(num_requests=outstanding_requests, num_seconds=self.timeout)
            Message.log(key="outstanding_requests", value=msg)

        with start_action(action_type=u"flocker:benchmark:scenario:stop", scenario="request_load"):

            def no_outstanding_requests():
                return self.rate_measurer.outstanding() == 0

            scenario_stopped = loop_until(self.reactor, no_outstanding_requests, repeat(1))
            timeout(self.reactor, scenario_stopped, self.timeout)
            scenario = DeferredContext(scenario_stopped)

            def handle_timeout(failure):
                failure.trap(CancelledError)
                msg = ("Force stopping the scenario. " "There are {num_requests} outstanding requests").format(
                    num_requests=outstanding_requests
                )
                Message.log(key="force_stop_request", value=msg)

            scenario.addErrback(handle_timeout)

            def scenario_cleanup(ignored):
                """
                Calls the scenario cleanup, and wraps it inside an eliot
                start action, so we can see the logs if something goes
                wrong within the cleanup

                :return Deferred: that will fire once the cleanup has been
                    completed
                """
                with start_action(action_type=u"flocker:benchmark:scenario:cleanup", scenario="request_load"):
                    return self.request.run_cleanup()

            scenario.addBoth(scenario_cleanup)

            def return_metrics(_ignore):
                return self.rate_measurer.get_metrics()

            scenario.addCallback(return_metrics)

            return scenario.addActionFinish()
示例#16
0
文件: _api.py 项目: ClusterHQ/flocker
    def volumedriver_list(self):
        """
        Return information about the current state of all volumes.

        :return: Result indicating success.
        """
        listing = DeferredContext(
            self._flocker_client.list_datasets_configuration())

        def got_configured(configured):
            results = []
            for dataset in configured:
                # Datasets without a name can't be used by the Docker plugin:
                if NAME_FIELD not in dataset.metadata:
                    continue
                dataset_name = dataset.metadata[NAME_FIELD]
                d = self._get_path_from_dataset_id(dataset.dataset_id)
                d.addCallback(lambda path, name=dataset_name: (path, name))
                results.append(d)
            return gatherResults(results)

        listing.addCallback(got_configured)

        def got_paths(results):
            return {u"Err": u"",
                    u"Volumes": sorted([
                        {u"Name": name,
                         u"Mountpoint": u"" if path is None else path.path}
                        for (path, name) in results])}
        listing.addCallback(got_paths)
        return listing.result
示例#17
0
    def volumedriver_list(self):
        """
        Return information about the current state of all volumes.

        :return: Result indicating success.
        """
        listing = DeferredContext(
            self._flocker_client.list_datasets_configuration())

        def got_configured(configured):
            results = []
            for dataset in configured:
                # Datasets without a name can't be used by the Docker plugin:
                if NAME_FIELD not in dataset.metadata:
                    continue
                dataset_name = dataset.metadata[NAME_FIELD]
                d = self._get_path_from_dataset_id(dataset.dataset_id)
                d.addCallback(lambda path, name=dataset_name: (path, name))
                results.append(d)
            return gatherResults(results)

        listing.addCallback(got_configured)

        def got_paths(results):
            return {u"Err": u"",
                    u"Volumes": sorted([
                        {u"Name": name,
                         u"Mountpoint": u"" if path is None else path.path}
                        for (path, name) in results])}
        listing.addCallback(got_paths)
        return listing.result
示例#18
0
    def _request(self, method, path, body, success_codes, error_codes=None):
        """
        Send a HTTP request to the Flocker API, return decoded JSON body.

        :param bytes method: HTTP method, e.g. PUT.
        :param bytes path: Path to add to base URL.
        :param body: If not ``None``, JSON encode this and send as the
            body of the request.
        :param set success_codes: Expected success response codes.
        :param error_codes: Mapping from HTTP response code to exception to be
            raised if it is present, or ``None`` to send no headers.

        :return: ``Deferred`` firing with decoded JSON.
        """
        url = self._base_url + path
        action = _LOG_HTTP_REQUEST(url=url, method=method, request_body=body)

        if error_codes is None:
            error_codes = {}

        def error(body, code):
            if code in error_codes:
                raise error_codes[code](body)
            raise ResponseError(code, body)

        def got_result(result):
            if result.code in success_codes:
                action.addSuccessFields(response_code=result.code)
                return json_content(result)
            else:
                d = content(result)
                d.addCallback(error, result.code)
                return d

        # Serialize the current task ID so we can trace logging across
        # processes:
        headers = {b"X-Eliot-Task-Id": action.serialize_task_id()}
        data = None
        if body is not None:
            headers["content-type"] = b"application/json"
            data = dumps(body)

        with action.context():
            request = DeferredContext(
                self._treq.request(
                    method,
                    url,
                    data=data,
                    headers=headers,
                    # Keep tests from having dirty reactor problems:
                    persistent=False))
        request.addCallback(got_result)

        def got_body(json_body):
            action.addSuccessFields(response_body=json_body)
            return json_body

        request.addCallback(got_body)
        request.addActionFinish()
        return request.result
示例#19
0
def logged_run_process(reactor, command):
    """
    Run a child process, and log the output as we get it.

    :param reactor: An ``IReactorProcess`` to spawn the process on.
    :param command: An argument list specifying the child process to run.

    :return: A ``Deferred`` that calls back with ``_ProcessResult`` if the
        process exited successfully, or errbacks with
        ``_CalledProcessError`` otherwise.
    """
    d = Deferred()
    action = TWISTED_CHILD_PROCESS_ACTION(command=command)
    with action.context():
        d2 = DeferredContext(d)
        protocol = _LoggingProcessProtocol(d, action)
        reactor.spawnProcess(protocol, command[0], command)

        def process_ended((reason, output)):
            status = reason.value.status
            if status:
                raise _CalledProcessError(
                    returncode=status, cmd=command, output=output)
            return _ProcessResult(
                command=command,
                status=status,
                output=output,
            )

        d2.addCallback(process_ended)
        d2.addActionFinish()
        return d2.result
示例#20
0
    def output_CONVERGE(self, context):
        known_local_state = self.cluster_state.get_node(self.deployer.hostname)
        d = DeferredContext(self.deployer.discover_state(known_local_state))

        def got_local_state(state_changes):
            # Current cluster state is likely out of date as regards the local
            # state, so update it accordingly.
            for state in state_changes:
                self.cluster_state = state.update_cluster_state(
                    self.cluster_state)
            with LOG_SEND_TO_CONTROL_SERVICE(
                    self.fsm.logger, connection=self.client) as context:
                self.client.callRemote(NodeStateCommand,
                                       state_changes=state_changes,
                                       eliot_context=context)
            action = self.deployer.calculate_changes(self.configuration,
                                                     self.cluster_state)
            return action.run(self.deployer)

        d.addCallback(got_local_state)

        # It would be better to have a "quiet time" state in the FSM and
        # transition to that next, then have a timeout input kick the machine
        # back around to the beginning of the loop in the FSM.  However, we're
        # not going to keep this sleep-for-a-bit solution in the long term.
        # Instead, we'll be more event driven.  So just going with the simple
        # solution and inserting a side-effect-y delay directly here.

        d.addCallback(lambda _: self.reactor.callLater(
            1.0, self.fsm.receive, ConvergenceLoopInputs.ITERATION_DONE))
示例#21
0
    def volumedriver_get(self, Name):
        """
        Return information about the current state of a particular volume.

        :param unicode Name: The name of the volume.

        :return: Result indicating success.
        """
        d = DeferredContext(self._dataset_id_for_name(Name))
        d.addCallback(self._get_path_from_dataset_id)

        def got_path(path):
            if path is None:
                path = u""
            else:
                path = path.path
            return {
                u"Err": u"",
                u"Volume": {
                    u"Name": Name,
                    u"Mountpoint": path
                }
            }

        d.addCallback(got_path)
        return d.result
示例#22
0
文件: _aws.py 项目: zendad/flocker
    def _async_get_node(self, reactor, instance, metadata):
        """
        Configure the given AWS instance, wait until it's running
        and create an ``AWSNode`` object for it.

        :param reactor: The reactor.
        :param boto.ec2.instance.Instance instance: The instance to set up.
        :param dict metadata: The metadata to set for the instance.
        :return: Deferred that fires when the instance is ready.
        """
        def instance_error(failure):
            Message.log(
                message_type="flocker:provision:aws:async_get_node:failed")
            instance.terminate()
            write_failure(failure)
            return failure

        action = start_action(
            action_type=u"flocker:provision:aws:async_get_node",
            name=metadata['Name'],
            instance_id=instance.id,
        )
        with action.context():
            d = loop_until(
                reactor,
                lambda: maybeDeferred(self._set_metadata, instance, metadata),
                repeat(5, INSTANCE_TIMEOUT),
            )
            d = DeferredContext(d)
            d.addCallback(
                lambda _: _async_wait_until_running(reactor, instance))
            d.addErrback(instance_error)
            d.addActionFinish()
            return d.result
示例#23
0
def logged_run_process(reactor, command):
    """
    Run a child process, and log the output as we get it.

    :param reactor: An ``IReactorProcess`` to spawn the process on.
    :param command: An argument list specifying the child process to run.

    :return: A ``Deferred`` that calls back with ``_ProcessResult`` if the
        process exited successfully, or errbacks with
        ``_CalledProcessError`` otherwise.
    """
    d = Deferred()
    action = TWISTED_CHILD_PROCESS_ACTION(command=command)
    with action.context():
        d2 = DeferredContext(d)
        protocol = _LoggingProcessProtocol(d, action)
        reactor.spawnProcess(protocol, command[0], command)

        def process_ended((reason, output)):
            status = reason.value.status
            if status:
                raise _CalledProcessError(returncode=status,
                                          cmd=command,
                                          output=output)
            return _ProcessResult(
                command=command,
                status=status,
                output=output,
            )

        d2.addCallback(process_ended)
        d2.addActionFinish()
        return d2.result
示例#24
0
            def got_ip_addresses():
                d = self._async_refresh_node(reactor, node)
                d = DeferredContext(d)

                def is_running(updated_node):
                    if updated_node.state is not NodeState.RUNNING:
                        raise Exception("Node failed to run")
                    return updated_node

                def check_addresses(updated_node):
                    """
                    Check if the node has got at least one IPv4 public address
                    and, if requested, an IPv4 private address.  If yes, then
                    return the node object with the addresses, None otherwise.
                    """
                    public_ips = _filter_ipv4(updated_node.public_ips)
                    if len(public_ips) > 0:
                        if self._use_private_addresses:
                            private_ips = _filter_ipv4(
                                updated_node.private_ips)
                            if len(private_ips) == 0:
                                return None
                        return updated_node
                    else:
                        return None

                d.addCallback(is_running)
                d.addCallback(check_addresses)
                return d.result
示例#25
0
def _init_magic_folder(reactor, request, temp_dir, name, web_port):
    """
    Create a new magic-folder-daemon configuration

    :param reactor: The reactor to use to launch the process.
    :param request: The pytest request object to use for cleanup.
    :param temp_dir: The directory in which to find a Tahoe-LAFS node.
    :param name: The alias of the Tahoe-LAFS node.

    :return Deferred[IProcessTransport]: The started process.
    """
    node_dir = join(temp_dir, name)
    config_dir = join(temp_dir, "magic-daemon-{}".format(name))
    # proto = _ProcessExitedProtocol()
    proto = _CollectOutputProtocol()

    coverage = request.config.getoption('coverage')

    def optional(flag, elements):
        if flag:
            return elements
        return []

    args = [
        sys.executable,
        "-m",
    ] + optional(coverage, [
        "coverage",
        "run",
        "-m",
    ]) + [
        "magic_folder",
    ] + optional(coverage, [
        "--coverage",
    ]) + [
        "--config",
        config_dir,
        "init",
        "--node-directory",
        node_dir,
        "--listen-endpoint",
        web_port,
    ]
    Message.log(
        message_type=u"integration:init-magic-folder",
        coverage=coverage,
        args=args,
    )
    transport = reactor.spawnProcess(
        proto,
        sys.executable,
        args,
    )

    request.addfinalizer(partial(_cleanup_tahoe_process, transport,
                                 proto.done))
    with start_action(action_type=u"integration:init-magic-folder").context():
        ctx = DeferredContext(proto.done)
        ctx.addCallback(lambda ignored: transport)
        return ctx.addActionFinish()
示例#26
0
def sample(operation, metric, name):
    """
    Perform sampling of the operation.

    :param IOperation operation: An operation to perform.
    :param IMetric metric: A quantity to measure.
    :param int name: Identifier for individual sample.
    :return: Deferred firing with a sample. A sample is a dictionary
        containing a ``success`` boolean.  If ``success is True``, the
        dictionary also contains a ``value`` for the sample measurement.
        If ``success is False``, the dictionary also contains a
        ``reason`` for failure.
    """
    with start_action(action_type=u'flocker:benchmark:sample', sample=name):
        sampling = DeferredContext(maybeDeferred(operation.get_probe))

        def run_probe(probe):
            probing = metric.measure(probe.run)
            probing.addCallbacks(
                lambda interval: dict(success=True, value=interval),
                lambda reason: dict(success=False,
                                    reason=reason.getTraceback()),
            )
            probing.addCallback(bypass, probe.cleanup)

            return probing

        sampling.addCallback(run_probe)
        sampling.addActionFinish()
        return sampling.result
示例#27
0
    def do_create_magic_folder(self, client_num):
        with start_action(action_type=u"create-magic-folder",
                          client_num=client_num).context():
            d = DeferredContext(
                self.do_cli(
                    "magic-folder",
                    "--debug",
                    "create",
                    "magic:",
                    client_num=client_num,
                ))

        def _done(args):
            (rc, stdout, stderr) = args
            self.failUnlessEqual(rc, 0, stdout + stderr)
            self.assertIn("Alias 'magic' created", stdout)
            #            self.failUnlessIn("joined new magic-folder", stdout)
            #            self.failUnlessIn("Successfully created magic-folder", stdout)
            self.failUnlessEqual(stderr, "")
            aliases = get_aliases(self.get_clientdir(i=client_num))
            self.assertIn("magic", aliases)
            self.failUnless(aliases["magic"].startswith("URI:DIR2:"))

        d.addCallback(_done)
        return d.addActionFinish()
def get_hosted_zone_by_name(route53, name):
    """
    Get a ``HostedZone`` with a zone name matching ``name``.

    :param route53: A txaws Route53 client.

    :param txaws.route53.model.Name name: The zone name to look for.

    :raise KeyError: If no matching hosted zone is found.

    :return Deferred(HostedZone): The hosted zone with a matching name.
    """
    action = start_action(action_type=u"get-hosted-zone")
    with action.context():
        d = DeferredContext(route53.list_hosted_zones())
        def filter_results(zones):
            Message.log(zone_names=list(zone.name for zone in zones))
            for zone in zones:
                # XXX Bleuch zone.name should be a Name!
                if Name(zone.name) == name:
                    d = route53.list_resource_record_sets(zone_id=zone.identifier)
                    d.addCallback(
                        lambda rrsets, zone=zone: _ZoneState(
                            zone=zone,
                            rrsets=rrsets,
                        ),
                    )
                    return d
            raise KeyError(name)
        d.addCallback(filter_results)
        return d.addActionFinish()
示例#29
0
    def output_CONVERGE(self, context):
        known_local_state = self.cluster_state.get_node(self.deployer.node_uuid, hostname=self.deployer.hostname)

        with LOG_CONVERGE(
            self.fsm.logger, cluster_state=self.cluster_state, desired_configuration=self.configuration
        ).context():
            d = DeferredContext(self.deployer.discover_state(known_local_state))

        def got_local_state(state_changes):
            # Current cluster state is likely out of date as regards the local
            # state, so update it accordingly.
            for state in state_changes:
                self.cluster_state = state.update_cluster_state(self.cluster_state)
            with LOG_SEND_TO_CONTROL_SERVICE(
                self.fsm.logger, connection=self.client, local_changes=list(state_changes)
            ) as context:
                self.client.callRemote(NodeStateCommand, state_changes=state_changes, eliot_context=context)
            action = self.deployer.calculate_changes(self.configuration, self.cluster_state)
            LOG_CALCULATED_ACTIONS(calculated_actions=action).write(self.fsm.logger)
            return run_state_change(action, self.deployer)

        d.addCallback(got_local_state)
        # If an error occurred we just want to log it and then try
        # converging again; hopefully next time we'll have more success.
        d.addErrback(writeFailure, self.fsm.logger, u"")

        # It would be better to have a "quiet time" state in the FSM and
        # transition to that next, then have a timeout input kick the machine
        # back around to the beginning of the loop in the FSM.  However, we're
        # not going to keep this sleep-for-a-bit solution in the long term.
        # Instead, we'll be more event driven.  So just going with the simple
        # solution and inserting a side-effect-y delay directly here.

        d.addCallback(lambda _: self.reactor.callLater(1.0, self.fsm.receive, ConvergenceLoopInputs.ITERATION_DONE))
        d.addActionFinish()
示例#30
0
文件: _loop.py 项目: uedzen/flocker
    def output_CONVERGE(self, context):
        known_local_state = self.cluster_state.get_node(
            self.deployer.node_uuid, hostname=self.deployer.hostname)

        with LOG_CONVERGE(self.fsm.logger, cluster_state=self.cluster_state,
                          desired_configuration=self.configuration).context():
            d = DeferredContext(maybeDeferred(
                self.deployer.discover_state, known_local_state))

        def got_local_state(local_state):
            cluster_state_changes = local_state.shared_state_changes()
            # Current cluster state is likely out of date as regards the local
            # state, so update it accordingly.
            #
            # XXX This somewhat side-steps the whole explicit-state-machine
            # thing we're aiming for here.  It would be better for these state
            # changes to arrive as an input to the state machine.
            for state in cluster_state_changes:
                self.cluster_state = state.update_cluster_state(
                    self.cluster_state
                )

            # XXX And for this update to be the side-effect of an output
            # resulting.
            sent_state = self._maybe_send_state_to_control_service(
                cluster_state_changes)

            action = self.deployer.calculate_changes(
                self.configuration, self.cluster_state, local_state
            )
            LOG_CALCULATED_ACTIONS(calculated_actions=action).write(
                self.fsm.logger)
            ran_state_change = run_state_change(action, self.deployer)
            DeferredContext(ran_state_change).addErrback(
                writeFailure, self.fsm.logger)

            # Wait for the control node to acknowledge the new
            # state, and for the convergence actions to run.
            return gather_deferreds([sent_state, ran_state_change])
        d.addCallback(got_local_state)

        # If an error occurred we just want to log it and then try
        # converging again; hopefully next time we'll have more success.
        d.addErrback(writeFailure, self.fsm.logger)

        # It would be better to have a "quiet time" state in the FSM and
        # transition to that next, then have a timeout input kick the machine
        # back around to the beginning of the loop in the FSM.  However, we're
        # not going to keep this sleep-for-a-bit solution in the long term.
        # Instead, we'll be more event driven.  So just going with the simple
        # solution and inserting a side-effect-y delay directly here.

        d.addCallback(
            lambda _:
                self.reactor.callLater(
                    1.0, self.fsm.receive, ConvergenceLoopInputs.ITERATION_DONE
                )
        )
        d.addActionFinish()
示例#31
0
    def cleanup(self, res):
        d = DeferredContext(defer.succeed(None))
        def _clean(ign):
            return self.magicfolder.disownServiceParent()

        d.addCallback(_clean)
        d.addCallback(lambda ign: res)
        return d.result
 def startService(self):
     with start_action(action_type=u"asyncservice:start"):
         self.running = True
         self._d = self._factory()
         d = DeferredContext(self._d)
         d.addCallback(self._created)
         d.addErrback(self._failed)
         d.addActionFinish()
示例#33
0
 def test_returns_unfired_deferred(self):
     Message.log(hello="world")
     # @eliot_logged_test automatically gives us an action context but it's
     # still our responsibility to maintain it across stack-busting
     # operations.
     d = DeferredContext(deferLater(reactor, 0.0, lambda: None))
     d.addCallback(lambda ignored: Message.log(goodbye="world"))
     # We didn't start an action.  We're not finishing an action.
     return d.result
 def test_returns_unfired_deferred(self):
     Message.log(hello="world")
     # @eliot_logged_test automatically gives us an action context but it's
     # still our responsibility to maintain it across stack-busting
     # operations.
     d = DeferredContext(deferLater(reactor, 0.0, lambda: None))
     d.addCallback(lambda ignored: Message.log(goodbye="world"))
     # We didn't start an action.  We're not finishing an action.
     return d.result
示例#35
0
    def cleanup(self, res):
        d = DeferredContext(defer.succeed(None))

        def _clean(ign):
            return self.magicfolder.disownServiceParent()

        d.addCallback(_clean)
        d.addCallback(lambda ign: res)
        return d.result
示例#36
0
 def run(self, deployer):
     d = DeferredContext(succeed(None))
     for subchange in self.changes:
         d.addCallback(
             lambda _, subchange=subchange: run_state_change(
                 subchange, deployer
             )
         )
     return d.result
示例#37
0
def _run_magic_folder(reactor, request, temp_dir, name, web_port):
    """
    Start a magic-folder process.

    :param reactor: The reactor to use to launch the process.
    :param request: The pytest request object to use for cleanup.
    :param temp_dir: The directory in which to find a Tahoe-LAFS node.
    :param name: The alias of the Tahoe-LAFS node.

    :return Deferred[IProcessTransport]: The started process.
    """
    node_dir = join(temp_dir, name)

    magic_text = "Completed initial Magic Folder setup"
    proto = _MagicTextProtocol(magic_text)

    coverage = request.config.getoption('coverage')

    def optional(flag, elements):
        if flag:
            return elements
        return []

    args = [
        sys.executable,
        "-m",
    ] + optional(coverage, [
        "coverage",
        "run",
        "-m",
    ]) + [
        "magic_folder",
    ] + optional(coverage, [
        "--coverage",
    ]) + [
        "--node-directory",
        node_dir,
        "run",
        "--web-port",
        web_port,
    ]
    Message.log(
        message_type=u"integration:run-magic-folder",
        coverage=coverage,
        args=args,
    )
    transport = reactor.spawnProcess(
        proto,
        sys.executable,
        args,
    )
    request.addfinalizer(
        partial(_cleanup_tahoe_process, transport, proto.exited))
    with start_action(action_type=u"integration:run-magic-folder").context():
        ctx = DeferredContext(proto.magic_seen)
        ctx.addCallback(lambda ignored: transport)
        return ctx.addActionFinish()
示例#38
0
    def _request(self, method, path, body, success_codes, error_codes=None):
        """
        Send a HTTP request to the Flocker API, return decoded JSON body.

        :param bytes method: HTTP method, e.g. PUT.
        :param bytes path: Path to add to base URL.
        :param body: If not ``None``, JSON encode this and send as the
            body of the request.
        :param set success_codes: Expected success response codes.
        :param error_codes: Mapping from HTTP response code to exception to be
            raised if it is present, or ``None`` to set no errors.

        :return: ``Deferred`` firing with decoded JSON.
        """
        url = self._base_url + path
        action = _LOG_HTTP_REQUEST(url=url, method=method, request_body=body)

        if error_codes is None:
            error_codes = {}

        def error(body, code):
            if code in error_codes:
                raise error_codes[code](body)
            raise ResponseError(code, body)

        def got_result(result):
            if result.code in success_codes:
                action.addSuccessFields(response_code=result.code)
                return json_content(result)
            else:
                d = content(result)
                d.addCallback(error, result.code)
                return d

        # Serialize the current task ID so we can trace logging across
        # processes:
        headers = {b"X-Eliot-Task-Id": action.serialize_task_id()}
        data = None
        if body is not None:
            headers["content-type"] = b"application/json"
            data = dumps(body)

        with action.context():
            request = DeferredContext(self._treq.request(
                method, url,
                data=data, headers=headers,
                # Keep tests from having dirty reactor problems:
                persistent=False
                ))
        request.addCallback(got_result)

        def got_body(json_body):
            action.addSuccessFields(response_body=json_body)
            return json_body
        request.addCallback(got_body)
        request.addActionFinish()
        return request.result
示例#39
0
 def run(self, deployer, state_persister):
     d = DeferredContext(succeed(None))
     for subchange in self.changes:
         d.addCallback(lambda _, sub=subchange: run_state_change(
             sub,
             deployer,
             state_persister=state_persister,
         ))
     return d.result
def get_customer_grid_configmaps(k8s, namespace):
    action = start_action(action_type=u"load-configmaps")
    with action.context():
        d = DeferredContext(k8s.get_configmaps(_s4_selector(namespace)))
        def got_configmaps(configmaps):
            configmaps = list(configmaps)
            action.add_success_fields(configmap_count=len(configmaps))
            return configmaps
        d.addCallback(got_configmaps)
        return d.addActionFinish()
def get_customer_grid_configmaps(k8s, namespace):
    action = start_action(action_type=u"load-configmaps")
    with action.context():
        d = DeferredContext(k8s.get_configmaps(_s4_selector(namespace)))
        def got_configmaps(configmaps):
            configmaps = list(configmaps)
            action.add_success_fields(configmap_count=len(configmaps))
            return configmaps
        d.addCallback(got_configmaps)
        return d.addActionFinish()
示例#42
0
 def _check_once(self, k8s, namespace):
     """
     Load the customer grid pods from Kubernetes.
     """
     a = start_action(action_type=u"router-update:check")
     with a.context():
         d = DeferredContext(
             get_customer_grid_pods(KubeClient(k8s=k8s), namespace))
         d.addCallback(self._router.set_pods)
         return d.addActionFinish()
def get_customer_grid_replicasets(k8s, namespace):
    action = start_action(action_type=u"load-replicasets")
    with action.context():
        d = DeferredContext(k8s.get_replicasets(_s4_selector(namespace)))
        def got_replicasets(replicasets):
            replicasets = list(replicasets)
            action.add_success_fields(replicaset_count=len(replicasets))
            return replicasets
        d.addCallback(got_replicasets)
        return d.addActionFinish()
def get_customer_grid_replicasets(k8s, namespace):
    action = start_action(action_type=u"load-replicasets")
    with action.context():
        d = DeferredContext(k8s.get_replicasets(_s4_selector(namespace)))
        def got_replicasets(replicasets):
            replicasets = list(replicasets)
            action.add_success_fields(replicaset_count=len(replicasets))
            return replicasets
        d.addCallback(got_replicasets)
        return d.addActionFinish()
示例#45
0
    def stop(self):
        """
        Stop the scenario from being maintained by stopping all the
        loops that may be executing.

        :return Deferred[Optional[Dict[unicode, Any]]]: Scenario metrics.
        """
        self.is_started = False
        if self.monitor_loop.running:
            self.monitor_loop.stop()

        if self.loop.running:
            self.loop.stop()

        outstanding_requests = self.rate_measurer.outstanding()

        if outstanding_requests > 0:
            msg = (
                "There are {num_requests} outstanding requests. "
                "Waiting {num_seconds} seconds for them to complete."
            ).format(
                num_requests=outstanding_requests,
                num_seconds=self.timeout
            )
            Message.log(key='outstanding_requests', value=msg)

        with start_action(
            action_type=u'flocker:benchmark:scenario:stop',
            scenario='request_load'
        ):
            def no_outstanding_requests():
                return self.rate_measurer.outstanding() == 0

            scenario_stopped = loop_until(self.reactor,
                                          no_outstanding_requests,
                                          repeat(1))
            timeout(self.reactor, scenario_stopped, self.timeout)
            scenario = DeferredContext(scenario_stopped)

            def handle_timeout(failure):
                failure.trap(CancelledError)
                msg = (
                    "Force stopping the scenario. "
                    "There are {num_requests} outstanding requests"
                ).format(
                    num_requests=outstanding_requests
                )
                Message.log(key='force_stop_request', value=msg)
            scenario.addErrback(handle_timeout)

            def return_metrics(_ignore):
                return self.rate_measurer.get_metrics()
            scenario.addCallback(return_metrics)

            return scenario.addActionFinish()
示例#46
0
 def signup(self, *args, **kwargs):
     """
     Provision a subscription and return an ``IClaim`` describing how to
     retrieve the resulting configuration from a magic wormhole server.
     """
     self._start.inc()
     a = start_action(action_type=u"wormhole-signup")
     with a.context():
         d = DeferredContext(self.provisioner.signup(*args, **kwargs))
         d.addCallback(self._details_to_wormhole_code)
         return d.addActionFinish()
示例#47
0
 def signup(self, *args, **kwargs):
     """
     Provision a subscription and return an ``IClaim`` describing how to
     retrieve the resulting configuration from a magic wormhole server.
     """
     self._start.inc()
     a = start_action(action_type=u"wormhole-signup")
     with a.context():
         d = DeferredContext(self.provisioner.signup(*args, **kwargs))
         d.addCallback(self._details_to_wormhole_code)
         return d.addActionFinish()
def get_customer_grid_deployments(k8s, namespace):
    action = start_action(action_type=u"load-deployments")
    with action.context():
        d = DeferredContext(k8s.get_deployments(_s4_selector(namespace)))
        def got_deployments(deployments):
            deployments = list(deployments)
            action.add_success_fields(deployment_count=len(deployments))
            _DEPLOYMENTS.set(len(deployments))
            return deployments
        d.addCallback(got_deployments)
        return d.addActionFinish()
示例#49
0
文件: _loop.py 项目: jongiddy/flocker
    def output_CONVERGE(self, context):
        known_local_state = self.cluster_state.get_node(
            self.deployer.node_uuid, hostname=self.deployer.hostname)

        with LOG_CONVERGE(self.fsm.logger,
                          cluster_state=self.cluster_state,
                          desired_configuration=self.configuration).context():
            d = DeferredContext(
                maybeDeferred(self.deployer.discover_state, known_local_state))

        def got_local_state(state_changes):
            # Current cluster state is likely out of date as regards the local
            # state, so update it accordingly.
            #
            # XXX This somewhat side-steps the whole explicit-state-machine
            # thing we're aiming for here.  It would be better for these state
            # changes to arrive as an input to the state machine.
            for state in state_changes:
                self.cluster_state = state.update_cluster_state(
                    self.cluster_state)

            # XXX And for this update to be the side-effect of an output
            # resulting.
            sent_state = self._maybe_send_state_to_control_service(
                state_changes)

            action = self.deployer.calculate_changes(self.configuration,
                                                     self.cluster_state)
            LOG_CALCULATED_ACTIONS(calculated_actions=action).write(
                self.fsm.logger)
            ran_state_change = run_state_change(action, self.deployer)
            DeferredContext(ran_state_change).addErrback(
                writeFailure, self.fsm.logger)

            # Wait for the control node to acknowledge the new
            # state, and for the convergence actions to run.
            return gather_deferreds([sent_state, ran_state_change])

        d.addCallback(got_local_state)

        # If an error occurred we just want to log it and then try
        # converging again; hopefully next time we'll have more success.
        d.addErrback(writeFailure, self.fsm.logger)

        # It would be better to have a "quiet time" state in the FSM and
        # transition to that next, then have a timeout input kick the machine
        # back around to the beginning of the loop in the FSM.  However, we're
        # not going to keep this sleep-for-a-bit solution in the long term.
        # Instead, we'll be more event driven.  So just going with the simple
        # solution and inserting a side-effect-y delay directly here.

        d.addCallback(lambda _: self.reactor.callLater(
            1.0, self.fsm.receive, ConvergenceLoopInputs.ITERATION_DONE))
        d.addActionFinish()
def get_customer_grid_deployments(k8s, namespace):
    action = start_action(action_type=u"load-deployments")
    with action.context():
        d = DeferredContext(k8s.get_deployments(_s4_selector(namespace)))
        def got_deployments(deployments):
            deployments = list(deployments)
            action.add_success_fields(deployment_count=len(deployments))
            _DEPLOYMENTS.set(len(deployments))
            return deployments
        d.addCallback(got_deployments)
        return d.addActionFinish()
 def _check_once(self, k8s, namespace):
     """
     Load the customer grid pods from Kubernetes.
     """
     a = start_action(action_type=u"router-update:check")
     with a.context():
         d = DeferredContext(
             get_customer_grid_pods(KubeClient(k8s=k8s), namespace)
         )
         d.addCallback(self._router.set_pods)
         return d.addActionFinish()
示例#52
0
    def got_welcome(ignored):
        # we're connected to the wormhole server; send our introduction
        # message
        intro = {u"abilities": {u"server-v1": {}}}
        Message.log(server_intro=intro)
        wormhole.send_message(json.dumps(intro))

        # await the client's introduction
        d = DeferredContext(wormhole.get_message())
        d.addCallback(json.loads)
        return d.result
示例#53
0
    def got_welcome(ignored):
        # we're connected to the wormhole server; send our introduction
        # message
        intro = {u"abilities": {u"server-v1": {}}}
        Message.log(server_intro=intro)
        wormhole.send_message(json.dumps(intro))

        # await the client's introduction
        d = DeferredContext(wormhole.get_message())
        d.addCallback(json.loads)
        return d.result
def get_customer_grid_service(k8s, namespace):
    action = start_action(action_type=u"load-services")
    with action.context():
        d = DeferredContext(k8s.get_services(_s4_selector(namespace)))
        def got_services(services):
            services = list(services)
            action.add_success_fields(service_count=len(services))
            if services:
                return services[0]
            return None
        d.addCallback(got_services)
        return d.addActionFinish()
示例#55
0
文件: _keys.py 项目: sysuwbs/flocker
def ensure_agent_has_ssh_key(reactor, key):
    """
    Check that the running ssh-agent has the private key corresponding to the
    provided key.

    :param reactor: The reactor to use to connect to the agent.
    :param Key key: The ssh key to check for in the agent.

    :return Deferred: That fires with a successful result if the key is found.
       Otherwise, fails with ``AgentNotFound`` or ``KeyNotFound``.
    """
    try:
        agent_socket = os.environ["SSH_AUTH_SOCK"]
    except KeyError:
        return fail(AgentNotFound())

    if not key.isPublic():
        key = key.public()

    action = start_action(
        action_type="flocker.provision.ssh:check_agent_has_ssh_keys",
        key_fingerprint=key.fingerprint(),
        agent_socket=agent_socket,
    )

    with action.context():

        agent_endpoint = UNIXClientEndpoint(reactor, agent_socket)
        agent_protocol = SSHAgentClient()
        connected = DeferredContext(connectProtocol(agent_endpoint, agent_protocol))
        connected.addCallback(lambda _: agent_protocol.requestIdentities())

        def check_keys(results):
            for key_data, comment in results:
                agent_key = Key.fromString(key_data, type="blob")
                Message.new(
                    message_type="flocker.provision.ssh:agent_key",
                    key_fingerprint=agent_key.fingerprint(),
                    commnet=comment,
                ).write()
                if agent_key == key:
                    return True
            raise KeyNotFound(expected_key=key)

        connected.addCallback(check_keys)

        def disconnect(result):
            agent_protocol.transport.loseConnection()
            return result

        connected.addBoth(disconnect)

        return connected.addActionFinish()
    def create_stateful_container(self, node, count):
        """
        Configure a stateful container to mount a new dataset, and wait for
        it to be running.
        """
        with start_action(
            action_type=u'flocker:benchmark:create_stateful_container',
            node=unicode(node.uuid),
            count=count
        ):
            d = DeferredContext(
                self.client.create_dataset(
                    primary=node.uuid,
                    maximum_size=self.max_size,
                )
            )

            def start_container(dataset):
                volume = MountedDataset(
                    dataset_id=dataset.dataset_id,
                    mountpoint=self.mountpoint
                )
                d = create_container(
                    self.reactor,
                    control_service=self.client,
                    node_uuid=node.uuid,
                    name=unicode(uuid4()),
                    image=self.image,
                    volumes=[volume],
                    timeout=self.timeout)

                # If container creation fails, delete dataset as well
                def delete_dataset(failure):
                    d = self.client.delete_dataset(dataset.dataset_id)
                    d.addErrback(write_failure)
                    d.addBoth(lambda _ignore: failure)
                    return d
                d.addErrback(delete_dataset)

                return d
            d.addCallback(start_container)

            def update_container_count(container):
                self.container_count += 1

            def update_error_count(failure):
                self.error_count += 1
                failure.printTraceback(sys.stderr)
                write_failure(failure)

            d.addCallbacks(update_container_count, update_error_count)

            return d.addActionFinish()
示例#57
0
def get_s3_buckets(s3):
    action = start_action(action_type=u"list-buckets")
    with action.context():
        d = DeferredContext(s3.list_buckets())

        def got_buckets(buckets):
            buckets = list(buckets)
            action.add_success_fields(bucket_count=len(buckets))
            return buckets

        d.addCallback(got_buckets)
        return d.addActionFinish()
示例#58
0
 def wrapper(self, *args, **kwargs):
     context = start_action(
         Logger(),
         action_type=label,
         args=args,
         kwargs=kwargs,
     )
     with context.context():
         d = DeferredContext(function(self, *args, **kwargs))
         d.addCallback(log_result)
         d.addActionFinish()
         return d.result
def get_customer_grid_service(k8s, namespace):
    action = start_action(action_type=u"load-services")
    with action.context():
        d = DeferredContext(k8s.get_services(_s4_selector(namespace)))
        def got_services(services):
            services = list(services)
            action.add_success_fields(service_count=len(services))
            if services:
                return services[0]
            return None
        d.addCallback(got_services)
        return d.addActionFinish()
def get_customer_grid_pods(k8s, namespace):
    action = start_action(action_type=u"load-pods")
    with action.context():
        d = DeferredContext(k8s.get_pods(_s4_selector(namespace)))
        def got_pods(pods):
            pods = list(pods)
            running = count(pod for pod in pods if pod.status.phase == u"Running")
            action.add_success_fields(pod_count=len(pods), pod_running_count=running)
            _PODS.set(len(pods))
            _PODS_RUNNING.set(running)
            return pods
        d.addCallback(got_pods)
        return d.addActionFinish()