コード例 #1
0
ファイル: test_protocol.py プロジェクト: ruo91/flocker
class FakeAgent(object):
    """
    Fake agent for testing.
    """
    logger = Logger()

    def connected(self, client):
        self.is_connected = True
        self.client = client

    def disconnected(self):
        self.is_disconnected = True
        self.client = None

    def cluster_updated(self, configuration, cluster_state):
        self.desired = configuration
        self.actual = cluster_state
コード例 #2
0
ファイル: test_fsm.py プロジェクト: sashka/machinist
class AnimalWorld(FancyStrMixin, object):
    logger = Logger()

    def __init__(self, animals):
        """
        @param animals: A L{list} to which output animals will be appended.
        """
        self.animals = animals

    def identifier(self):
        """
        Generate a stable, useful identifier for this L{AnimalWorld}.
        """
        return u"<AnimalWorld>"

    def output_AARDVARK(self, context):
        self.animals.append((Output.aardvark, context))
        LOG_ANIMAL().write(self.logger)
コード例 #3
0
ファイル: testtools.py プロジェクト: zendad/flocker
class ControllableAction(object):
    """
    ``IStateChange`` whose results can be controlled.
    """
    called = False
    deployer = None
    state_persister = None

    _logger = Logger()

    @property
    def eliot_action(self):
        return CONTROLLABLE_ACTION_TYPE(self._logger)

    def run(self, deployer, state_persister):
        self.called = True
        self.deployer = deployer
        self.state_persister = state_persister
        return self.result
コード例 #4
0
class MemoryNetwork(object):
    """
    An isolated, in-memory-only implementation of ``INetwork``.

    :ivar set _proxies: A ``set`` of ``Proxy`` instances representing all of
        the proxies supposedly configured on this network.
    """
    logger = Logger()

    def __init__(self, used_ports):
        self._proxies = set()
        self._used_ports = used_ports
        self._open_ports = set()

    def create_proxy_to(self, ip, port):
        proxy = Proxy(ip=ip, port=port)
        self._proxies.add(proxy)
        return proxy

    def delete_proxy(self, proxy):
        self._proxies.remove(proxy)

    def open_port(self, port):
        open_port = OpenPort(port=port)
        self._open_ports.add(open_port)
        return open_port

    def delete_open_port(self, open_port):
        self._open_ports.remove(open_port)

    def enumerate_proxies(self):
        return list(self._proxies)

    def enumerate_open_ports(self):
        return list(self._open_ports)

    def enumerate_used_ports(self):
        proxy_ports = frozenset(proxy.port for proxy in self._proxies)
        open_ports = frozenset(open_port.port
                               for open_port in self._open_ports)
        return proxy_ports | open_ports | self._used_ports
コード例 #5
0
def flocker_ca(command, *args, **kwargs):
    """
    Run a flocker-ca command and return the output along with an indicator
    as to whether or not the command succeeded.

    :param str command: The flocker-ca subcommand to execute.
    :param args: Additional parameters to pass to the command.
    :return: A ``tuple`` containing the integer return code and
        string output.
    """
    command = [EXECUTABLE, command] + list(args)
    try:
        result = run_process(command, **kwargs)
        output = result.output
        status = 0
    except CalledProcessError as e:
        Message.new(message_type="flocker.ca.functional:ca_initialize_error",
                    error=str(e)).write(Logger())
        output = e.output
        status = e.returncode
    return (status, output)
コード例 #6
0
ファイル: _memory.py プロジェクト: zhgwenming/flocker
class MemoryNetwork(object):
    """
    An isolated, in-memory-only implementation of ``INetwork``.

    :ivar set _proxies: A ``set`` of ``Proxy`` instances representing all of
        the proxies supposedly configured on this network.
    """
    logger = Logger()

    def __init__(self):
        self._proxies = set()

    def create_proxy_to(self, ip, port):
        proxy = Proxy(ip=ip, port=port)
        self._proxies.add(proxy)
        return proxy

    def delete_proxy(self, proxy):
        self._proxies.remove(proxy)

    def enumerate_proxies(self):
        return list(self._proxies)
コード例 #7
0
ファイル: _iptables.py プロジェクト: zhgwenming/flocker
class HostNetwork(object):
    """
    An ``INetwork`` implementation based on ``iptables``.
    """
    logger = Logger()

    def create_proxy_to(self, ip, port):
        """
        Configure iptables to proxy TCP traffic on the given port.

        :see: :meth:`INetwork.create_proxy_to` for parameter documentation.
        """
        return create_proxy_to(self.logger, ip, port)

    def delete_proxy(self, proxy):
        """
        Remove the iptables configuration which makes the given proxy work.

        :see: :meth:`INetwork.delete_proxy` for parameter documentation.
        """
        return delete_proxy(self.logger, proxy)

    enumerate_proxies = staticmethod(enumerate_proxies)
コード例 #8
0
ファイル: _iptables.py プロジェクト: yilab/flocker
class HostNetwork(object):
    """
    An ``INetwork`` implementation based on ``iptables``.
    """
    logger = Logger()

    def create_proxy_to(self, ip, port):
        """
        Configure iptables to proxy TCP traffic on the given port.

        :see: :meth:`INetwork.create_proxy_to` for parameter documentation.
        """
        return create_proxy_to(self.logger, ip, port)

    def delete_proxy(self, proxy):
        """
        Remove the iptables configuration which makes the given proxy work.

        :see: :meth:`INetwork.delete_proxy` for parameter documentation.
        """
        return delete_proxy(self.logger, proxy)

    enumerate_proxies = staticmethod(enumerate_proxies)

    def enumerate_used_ports(self):
        """
        Find all ports that are in use on this node by normal TCP servers or by
        proxies managed by this object.

        :see: :meth:`INetwork.enumerate_used_ports` for parameter
            documentation.
        """
        listening = set(conn.laddr[1] for conn in net_connections(kind='tcp'))
        proxied = set(proxy.port for proxy in self.enumerate_proxies())
        # net_connections won't tell us about ports bound by sockets that
        # haven't entered the TCP state graph yet.
        return frozenset(listening | proxied)
コード例 #9
0
ファイル: blockdevice.py プロジェクト: jaggerliu/flocker
class CreateBlockDeviceDataset(PRecord):
    """
    An operation to create a new dataset on a newly created volume with a newly
    initialized filesystem.

    :ivar Dataset dataset: The dataset for which to create a block device.
    :ivar FilePath mountpoint: The path at which to mount the created device.
    :ivar Logger logger: An Eliot ``Logger``.
    """
    dataset = field(mandatory=True, type=Dataset)
    mountpoint = field(mandatory=True, type=FilePath)

    logger = Logger()

    def run(self, deployer):
        """
        Create a block device, attach it to the local host, create an ``ext4``
        filesystem on the device and mount it.

        Operations are performed synchronously.

        See ``IStateChange.run`` for general argument and return type
        documentation.

        :returns: An already fired ``Deferred`` with result ``None``.
        """
        with CREATE_BLOCK_DEVICE_DATASET(self.logger,
                                         dataset=self.dataset,
                                         mountpoint=self.mountpoint) as action:
            api = deployer.block_device_api
            volume = api.create_volume(
                dataset_id=UUID(self.dataset.dataset_id),
                size=self.dataset.maximum_size,
            )

            # This will be factored into a separate IStateChange to support the
            # case where the volume exists but is not attached.  That object
            # will be used by this one to perform this work.  FLOC-1575
            volume = api.attach_volume(volume.blockdevice_id,
                                       deployer.hostname)
            device = api.get_device_path(volume.blockdevice_id)

            # This will be factored into a separate IStateChange to support the
            # case where the volume is attached but has no filesystem.  That
            # object will be used by this one to perform this work. FLOC-1576
            check_output(["mkfs", "-t", "ext4", device.path])

            # This will be factored into a separate IStateChange to support the
            # case where the only state change necessary is mounting.  That
            # object will be used by this one to perform this mount. It will
            # also gracefully handle the case where the mountpoint directory
            # already exists.  FLOC-1498
            self.mountpoint.makedirs()
            check_output(["mount", device.path, self.mountpoint.path])

            action.add_success_fields(
                block_device_path=device,
                block_device_id=volume.blockdevice_id,
                dataset_id=volume.dataset_id,
                block_device_size=volume.size,
                block_device_host=volume.host,
            )
        return succeed(None)
コード例 #10
0
class ControlAMPService(Service):
    """
    Control Service AMP server.

    Convergence agents connect to this server.

    :ivar dict _current_command: A dictionary containing information about
        connections to which state updates are currently in progress.  The keys
        are protocol instances.  The values are ``_UpdateState`` instances.
    :ivar IReactorTime _reactor: An ``IReactorTime`` provider to be used to
        schedule delays in sending updates.
    :ivar set _connections_pending_update: A ``set`` of connections that are
        currently pending getting an update of state and configuration. An
        empty set indicates that there is no update pending.
    :ivar IDelayedCall _current_pending_update_delayed_call: The
        ``IDelayedCall`` provider for the currently pending call to update
        state/configuration on connected nodes.
    """
    logger = Logger()

    def __init__(self, reactor, cluster_state, configuration_service, endpoint,
                 context_factory):
        """
        :param reactor: See ``ControlServiceLocator.__init__``.
        :param ClusterStateService cluster_state: Object that records known
            cluster state.
        :param ConfigurationPersistenceService configuration_service:
            Persistence service for desired cluster configuration.
        :param endpoint: Endpoint to listen on.
        :param context_factory: TLS context factory.
        """
        self._connections = set()
        self._reactor = reactor
        self._connections_pending_update = set()
        self._current_pending_update_delayed_call = None
        self._current_command = {}
        self._last_received_generation = defaultdict(
            lambda: _ConfigAndStateGeneration())
        self._configuration_generation_tracker = GenerationTracker(100)
        self._state_generation_tracker = GenerationTracker(100)
        self.cluster_state = cluster_state
        self.configuration_service = configuration_service
        self.endpoint_service = StreamServerEndpointService(
            endpoint,
            TLSMemoryBIOFactory(
                context_factory, False,
                ServerFactory.forProtocol(lambda: ControlAMP(reactor, self))))
        # When configuration changes, notify all connected clients:
        self.configuration_service.register(self._schedule_broadcast_update)

    def startService(self):
        self.endpoint_service.startService()

    def stopService(self):
        if self._current_pending_update_delayed_call:
            self._current_pending_update_delayed_call.cancel()
            self._current_pending_update_delayed_call = None
        self.endpoint_service.stopService()
        for connection in self._connections:
            connection.transport.loseConnection()
        self._connections = set()

    def _send_state_to_connections(self, connections):
        """
        Send desired configuration and cluster state to all given connections.

        :param connections: A collection of ``AMP`` instances.
        """
        configuration = self.configuration_service.get()
        state = self.cluster_state.as_deployment()

        # Connections are separated into three groups to support a scheme which
        # lets us avoid sending certain updates which we know are not
        # necessary.  This reduces traffic and associated costs (CPU, memory).
        #
        # Other schemes are possible and might produce even better performance.
        # See https://clusterhq.atlassian.net/browse/FLOC-3140 for some
        # brainstorming.

        # Collect connections for which there is currently no unacknowledged
        # update.  These can receive a new update right away.
        can_update = []

        # Collect connections for which there is an unacknowledged update.
        # Since something has changed, these should receive another update once
        # that acknowledgement is received.
        delayed_update = []

        # Collect connections which were already set to receive a delayed
        # update and still haven't sent an acknowledgement.  These will still
        # receive a delayed update but we'll also note that we're going to skip
        # sending one intermediate update to them.
        elided_update = []

        for connection in connections:
            try:
                update = self._current_command[connection]
            except KeyError:
                # There's nothing in the tracking state for this connection.
                # That means there's no unacknowledged update.  That means we
                # can send another update right away.
                can_update.append(connection)
            else:
                # These connections do currently have an unacknowledged update
                # outstanding.
                if update.next_scheduled:
                    # And these connections are also already scheduled to
                    # receive another update after the one they're currently
                    # processing.  That update will include the most up-to-date
                    # information so we're effectively skipping an update
                    # that's no longer useful.
                    elided_update.append(connection)
                else:
                    # These don't have another update scheduled yet so we'll
                    # schedule one.
                    delayed_update.append(connection)

        # Make sure to run the logging action inside the caching block.
        # This lets encoding for logging share the cache with encoding for
        # network traffic.
        with LOG_SEND_CLUSTER_STATE() as action:
            if can_update:
                # If there are any protocols that can be updated right now,
                # we also want to see what updates they receive.  Since
                # logging shares the caching context, it shouldn't be any
                # more expensive to serialize this information into the log
                # now.  We specifically avoid logging this information if
                # no protocols are being updated because the serializing is
                # more expensive in that case and at the same time that
                # information isn't actually useful.
                action.add_success_fields(configuration=configuration,
                                          state=state)
            else:
                # Eliot wants those fields though.
                action.add_success_fields(configuration=None, state=None)

            for connection in can_update:
                self._update_connection(connection, configuration, state)

            for connection in elided_update:
                AGENT_UPDATE_ELIDED(agent=connection).write()

            for connection in delayed_update:
                self._delayed_update_connection(connection)

    def _update_connection(self, connection, configuration, state):
        """
        Send the latest cluster configuration and state to ``connection``.

        :param ControlAMP connection: The connection to use to send the
            command.
        """

        # Set the configuration and the state to the latest versions. It is
        # okay to call this even if the latest configuration is the same
        # object.
        self._configuration_generation_tracker.insert_latest(configuration)
        self._state_generation_tracker.insert_latest(state)

        action = LOG_SEND_TO_AGENT(agent=connection)
        with action.context():

            # Attempt to compute a diff to send to the connection
            last_received_generations = (
                self._last_received_generation[connection])

            config_gen_tracker = self._configuration_generation_tracker
            configuration_diff = (
                config_gen_tracker.get_diff_from_hash_to_latest(
                    last_received_generations.config_hash))

            state_gen_tracker = self._state_generation_tracker
            state_diff = (state_gen_tracker.get_diff_from_hash_to_latest(
                last_received_generations.state_hash))

            if configuration_diff is not None and state_diff is not None:
                # If both diffs were successfully computed, send a command to
                # send the diffs along with before and after hashes so the
                # nodes can verify the application of the diffs.
                d = DeferredContext(
                    maybeDeferred(connection.callRemote,
                                  ClusterStatusDiffCommand,
                                  configuration_diff=configuration_diff,
                                  start_configuration_generation=(
                                      last_received_generations.config_hash),
                                  end_configuration_generation=(
                                      config_gen_tracker.get_latest_hash()),
                                  state_diff=state_diff,
                                  start_state_generation=(
                                      last_received_generations.state_hash),
                                  end_state_generation=state_gen_tracker.
                                  get_latest_hash(),
                                  eliot_context=action))
                d.addActionFinish()
            else:
                # Otherwise, just send the lastest configuration and state to
                # the node.
                configuration = config_gen_tracker.get_latest()
                state = state_gen_tracker.get_latest()
                # Use ``maybeDeferred`` so if an exception happens,
                # it will be wrapped in a ``Failure`` - see FLOC-3221
                d = DeferredContext(
                    maybeDeferred(
                        connection.callRemote,
                        ClusterStatusCommand,
                        configuration=configuration,
                        configuration_generation=(
                            config_gen_tracker.get_latest_hash()),
                        state=state,
                        state_generation=state_gen_tracker.get_latest_hash(),
                        eliot_context=action))
                d.addActionFinish()
            d.result.addErrback(lambda _: None)

        update = self._current_command[connection] = _UpdateState(
            response=d.result,
            next_scheduled=False,
        )

        def finished_update(response):
            del self._current_command[connection]
            if response:
                config_gen = response['current_configuration_generation']
                state_gen = response['current_state_generation']
                self._last_received_generation[connection] = (
                    _ConfigAndStateGeneration(config_hash=config_gen,
                                              state_hash=state_gen))
                #  If the latest hash was not returned, schedule an update.
                if (self._configuration_generation_tracker.get_latest_hash() !=
                        config_gen
                        or self._state_generation_tracker.get_latest_hash() !=
                        state_gen):
                    self._schedule_update([connection])

        update.response.addCallback(finished_update)

    def _delayed_update_connection(self, connection):
        """
        Send a ``ClusterStatusCommand`` to an agent after it has acknowledged
        the last one.

        :param ControlAMP connection: The connection to use to send the
            command.  This connection is expected to have previously been sent
            such a command and to not yet have acknowledged it.  Internal state
            related to this will be used and then updated.
        """
        AGENT_UPDATE_DELAYED(agent=connection).write()
        update = self._current_command[connection]
        update.response.addCallback(
            lambda ignored: self._schedule_update([connection]), )
        self._current_command[connection] = update.set(next_scheduled=True)

    def connected(self, connection):
        """
        A new connection has been made to the server.

        :param ControlAMP connection: The new connection.
        """
        with AGENT_CONNECTED(agent=connection):
            self._connections.add(connection)
            self._schedule_update([connection])

    def disconnected(self, connection):
        """
        An existing connection has been disconnected.

        :param ControlAMP connection: The lost connection.
        """
        self._connections.remove(connection)
        if connection in self._connections_pending_update:
            self._connections_pending_update.remove(connection)
        if connection in self._last_received_generation:
            del self._last_received_generation[connection]

    def _execute_update_connections(self):
        """
        Actually executes an update to all pending connections.
        """
        connections_to_update = self._connections_pending_update
        self._connections_pending_update = set()
        self._current_pending_update_delayed_call = None
        self._send_state_to_connections(connections_to_update)

    def _schedule_update(self, connections):
        """
        Schedule a call to send_state_to_connections.

        This function adds a delay in the hopes that additional updates will be
        scheduled and they can all be called at once in a batch.

        :param connections: An iterable of connections that will be passed to
            ``_send_state_to_connections``.
        """
        self._connections_pending_update.update(set(connections))

        # If there is no current pending update and there are connections
        # pending an update, we must schedule the delayed call to update
        # connections.
        if (self._current_pending_update_delayed_call is None
                and self._connections_pending_update):
            self._current_pending_update_delayed_call = (
                self._reactor.callLater(CONTROL_SERVICE_BATCHING_DELAY,
                                        self._execute_update_connections))

    def _schedule_broadcast_update(self):
        """
        Ensure that there is a pending broadcast update call.

        This is called when the state or configuration is updated, to trigger
        a broadcast of the current state and configuration to all nodes.

        In general, it only schedules an update to be broadcast 1 second later
        so that if we receive multiple updates within that second they are
        coalesced down to a single update.
        """
        self._schedule_update(self._connections)

    def node_changed(self, source, state_changes):
        """
        We've received a node state update from a connected client.

        :param IClusterStateSource source: Representation of where these
            changes were received from.
        :param list state_changes: One or more ``IClusterStateChange``
            providers representing the state change which has taken place.
        """
        self.cluster_state.apply_changes_from_source(source, state_changes)
        self._schedule_broadcast_update()
コード例 #11
0
class StoragePool(Service):
    """
    A ZFS storage pool.

    Remotely owned filesystems are mounted read-only to prevent changes
    (divergence which would break ``zfs recv``).  This is done by having the
    root dataset be ``readonly=on`` - which is inherited by all child datasets.
    Locally owned datasets have this overridden with an explicit
    ```readonly=off`` property set on them.
    """
    logger = Logger()

    def __init__(self, reactor, name, mount_root):
        """
        :param reactor: A ``IReactorProcess`` provider.
        :param bytes name: The pool's name.
        :param FilePath mount_root: Directory where filesystems should be
            mounted.
        """
        self._reactor = reactor
        self._name = name
        self._mount_root = mount_root

    def startService(self):
        """
        Make sure that the necessary properties are set on the root Flocker zfs
        storage pool.
        """
        Service.startService(self)

        # These next things are logically part of the storage pool creation
        # process.  Since Flocker itself doesn't yet have any involvement with
        # that process, it's difficult to find a better time/place to set these
        # properties than here - ie, "every time we're about to interact with
        # the storage pool".  In the future it would be better if we could do
        # these things one-off - sometime around when the pool is created or
        # when Flocker is first installed, for example.  Then we could get rid
        # of these operations from this method (which eliminates the motivation
        # for StoragePool being an IService implementation).
        # https://clusterhq.atlassian.net/browse/FLOC-635

        # Set the root dataset to be read only; IService.startService
        # doesn't support Deferred results, and in any case startup can be
        # synchronous with no ill effects.
        _sync_command_error_squashed(
            [b"zfs", b"set", b"readonly=on", self._name], self.logger)

        # If the root dataset is read-only then it's not possible to create
        # mountpoints in it for its child datasets.  Avoid mounting it to avoid
        # this problem.  This should be fine since we don't ever intend to put
        # any actual data into the root dataset.
        _sync_command_error_squashed(
            [b"zfs", b"set", b"canmount=off", self._name], self.logger)

    def _check_for_out_of_space(self, reason):
        """
        Translate a ZFS command failure into ``MaximumSizeTooSmall`` if that is
        what the command failure represents.
        """
        # This can't actually check anything.
        # https://clusterhq.atlassian.net/browse/FLOC-992
        return Failure(MaximumSizeTooSmall())

    def create(self, volume):
        filesystem = self.get(volume)
        mount_path = filesystem.get_path().path
        properties = [b"-o", b"mountpoint=" + mount_path]
        if volume.locally_owned():
            properties.extend([b"-o", b"readonly=off"])
        if volume.size.maximum_size is not None:
            properties.extend([
                b"-o", u"refquota={0}".format(
                    volume.size.maximum_size).encode("ascii")
            ])
        d = zfs_command(self._reactor,
                        [b"create"] + properties + [filesystem.name])
        d.addErrback(self._check_for_out_of_space)
        d.addCallback(lambda _: filesystem)
        return d

    def destroy(self, volume):
        filesystem = self.get(volume)
        d = filesystem.snapshots()

        # It would be better to have snapshot destruction logic as part of
        # IFilesystemSnapshots, but that isn't really necessary yet.
        def got_snapshots(snapshots):
            return gatherResults(list(zfs_command(
                self._reactor,
                [b"destroy", b"%s@%s" % (filesystem.name, snapshot.name)])
                for snapshot in snapshots))
        d.addCallback(got_snapshots)
        d.addCallback(lambda _: zfs_command(
            self._reactor, [b"destroy", filesystem.name]))
        return d

    def set_maximum_size(self, volume):
        filesystem = self.get(volume)
        properties = []
        if volume.size.maximum_size is not None:
            properties.extend([
                u"refquota={0}".format(
                    volume.size.maximum_size).encode("ascii")
            ])
        else:
            properties.extend([u"refquota=none"])
        d = zfs_command(self._reactor,
                        [b"set"] + properties + [filesystem.name])
        d.addErrback(self._check_for_out_of_space)
        d.addCallback(lambda _: filesystem)
        return d

    def clone_to(self, parent, volume):
        parent_filesystem = self.get(parent)
        new_filesystem = self.get(volume)
        zfs_snapshots = ZFSSnapshots(self._reactor, parent_filesystem)
        snapshot_name = bytes(uuid4())
        d = zfs_snapshots.create(snapshot_name)
        clone_command = [b"clone",
                         # Snapshot we're cloning from:
                         b"%s@%s" % (parent_filesystem.name, snapshot_name),
                         # New filesystem we're cloning to:
                         new_filesystem.name,
                         ]
        d.addCallback(lambda _: zfs_command(self._reactor, clone_command))
        self._created(d, volume)
        d.addCallback(lambda _: new_filesystem)
        return d

    def change_owner(self, volume, new_volume):
        old_filesystem = self.get(volume)
        new_filesystem = self.get(new_volume)
        d = zfs_command(self._reactor,
                        [b"rename", old_filesystem.name, new_filesystem.name])
        self._created(d, new_volume)

        def remounted(ignored):
            # Use os.rmdir instead of FilePath.remove since we don't want
            # recursive behavior. If the directory is non-empty, something
            # went wrong (or there is a race) and we don't want to lose data.
            os.rmdir(old_filesystem.get_path().path)
        d.addCallback(remounted)
        d.addCallback(lambda _: new_filesystem)
        return d

    def _created(self, result, new_volume):
        """
        Common post-processing for attempts at creating new volumes from other
        volumes.

        In particular this includes error handling and ensuring read-only
        and mountpoint properties are set correctly.

        :param Deferred result: The result of the creation attempt.

        :param Volume new_volume: Volume we're trying to create.
        """
        new_filesystem = self.get(new_volume)
        new_mount_path = new_filesystem.get_path().path

        def creation_failed(f):
            if f.check(CommandFailed):
                # This isn't the only reason the operation could fail. We
                # should figure out why and report it appropriately.
                # https://clusterhq.atlassian.net/browse/FLOC-199
                raise FilesystemAlreadyExists()
            return f
        result.addErrback(creation_failed)

        def exists(ignored):
            if new_volume.locally_owned():
                result = zfs_command(self._reactor,
                                     [b"set", b"readonly=off",
                                      new_filesystem.name])
            else:
                result = zfs_command(self._reactor,
                                     [b"inherit", b"readonly",
                                      new_filesystem.name])
            result.addCallback(lambda _: zfs_command(self._reactor,
                               [b"set", b"mountpoint=" + new_mount_path,
                                new_filesystem.name]))
            return result
        result.addCallback(exists)

    def get(self, volume):
        dataset = volume_to_dataset(volume)
        mount_path = self._mount_root.child(dataset)
        return Filesystem(
            self._name, dataset, mount_path, volume.size)

    def enumerate(self):
        listing = _list_filesystems(self._reactor, self._name)

        def listed(filesystems):
            result = set()
            for entry in filesystems:
                filesystem = Filesystem(
                    self._name, entry.dataset, FilePath(entry.mountpoint),
                    VolumeSize(maximum_size=entry.refquota))
                result.add(filesystem)
            return result

        return listing.addCallback(listed)
コード例 #12
0
class ControlAMPService(Service):
    """
    Control Service AMP server.

    Convergence agents connect to this server.

    :ivar dict _current_command: A dictionary containing information about
        connections to which state updates are currently in progress.  The keys
        are protocol instances.  The values are ``_UpdateState`` instances.
    """
    logger = Logger()

    def __init__(self, reactor, cluster_state, configuration_service, endpoint,
                 context_factory):
        """
        :param reactor: See ``ControlServiceLocator.__init__``.
        :param ClusterStateService cluster_state: Object that records known
            cluster state.
        :param ConfigurationPersistenceService configuration_service:
            Persistence service for desired cluster configuration.
        :param endpoint: Endpoint to listen on.
        :param context_factory: TLS context factory.
        """
        self.connections = set()
        self._current_command = {}
        self.cluster_state = cluster_state
        self.configuration_service = configuration_service
        self.endpoint_service = StreamServerEndpointService(
            endpoint,
            TLSMemoryBIOFactory(
                context_factory, False,
                ServerFactory.forProtocol(lambda: ControlAMP(reactor, self))))
        # When configuration changes, notify all connected clients:
        self.configuration_service.register(
            lambda: self._send_state_to_connections(self.connections))

    def startService(self):
        self.endpoint_service.startService()

    def stopService(self):
        self.endpoint_service.stopService()
        for connection in self.connections:
            connection.transport.loseConnection()

    def _send_state_to_connections(self, connections):
        """
        Send desired configuration and cluster state to all given connections.

        :param connections: A collection of ``AMP`` instances.
        """
        configuration = self.configuration_service.get()
        state = self.cluster_state.as_deployment()

        # Connections are separated into three groups to support a scheme which
        # lets us avoid sending certain updates which we know are not
        # necessary.  This reduces traffic and associated costs (CPU, memory).
        #
        # Other schemes are possible and might produce even better performance.
        # See https://clusterhq.atlassian.net/browse/FLOC-3140 for some
        # brainstorming.

        # Collect connections for which there is currently no unacknowledged
        # update.  These can receive a new update right away.
        can_update = []

        # Collect connections for which there is an unacknowledged update.
        # Since something has changed, these should receive another update once
        # that acknowledgement is received.
        delayed_update = []

        # Collect connections which were already set to receive a delayed
        # update and still haven't sent an acknowledgement.  These will still
        # receive a delayed update but we'll also note that we're going to skip
        # sending one intermediate update to them.
        elided_update = []

        for connection in connections:
            try:
                update = self._current_command[connection]
            except KeyError:
                # There's nothing in the tracking state for this connection.
                # That means there's no unacknowledged update.  That means we
                # can send another update right away.
                can_update.append(connection)
            else:
                # These connections do currently have an unacknowledged update
                # outstanding.
                if update.next_scheduled:
                    # And these connections are also already scheduled to
                    # receive another update after the one they're currently
                    # processing.  That update will include the most up-to-date
                    # information so we're effectively skipping an update
                    # that's no longer useful.
                    elided_update.append(connection)
                else:
                    # These don't have another update scheduled yet so we'll
                    # schedule one.
                    delayed_update.append(connection)

        # Make sure to run the logging action inside the caching block.
        # This lets encoding for logging share the cache with encoding for
        # network traffic.
        with LOG_SEND_CLUSTER_STATE() as action:
            if can_update:
                # If there are any protocols that can be updated right now,
                # we also want to see what updates they receive.  Since
                # logging shares the caching context, it shouldn't be any
                # more expensive to serialize this information into the log
                # now.  We specifically avoid logging this information if
                # no protocols are being updated because the serializing is
                # more expensive in that case and at the same time that
                # information isn't actually useful.
                action.add_success_fields(configuration=configuration,
                                          state=state)
            else:
                # Eliot wants those fields though.
                action.add_success_fields(configuration=None, state=None)

            for connection in can_update:
                self._update_connection(connection, configuration, state)

            for connection in elided_update:
                AGENT_UPDATE_ELIDED(agent=connection).write()

            for connection in delayed_update:
                self._delayed_update_connection(connection)

    def _update_connection(self, connection, configuration, state):
        """
        Send a ``ClusterStatusCommand`` to an agent.

        :param ControlAMP connection: The connection to use to send the
            command.

        :param Deployment configuration: The cluster configuration to send.
        :param DeploymentState state: The current cluster state to send.
        """
        action = LOG_SEND_TO_AGENT(agent=connection)
        with action.context():
            # Use ``maybeDeferred`` so if an exception happens,
            # it will be wrapped in a ``Failure`` - see FLOC-3221
            d = DeferredContext(
                maybeDeferred(connection.callRemote,
                              ClusterStatusCommand,
                              configuration=configuration,
                              state=state,
                              eliot_context=action))
            d.addActionFinish()
            d.result.addErrback(lambda _: None)

        update = self._current_command[connection] = _UpdateState(
            response=d.result,
            next_scheduled=False,
        )

        def finished_update(ignored):
            del self._current_command[connection]

        update.response.addCallback(finished_update)

    def _delayed_update_connection(self, connection):
        """
        Send a ``ClusterStatusCommand`` to an agent after it has acknowledged
        the last one.

        :param ControlAMP connection: The connection to use to send the
            command.  This connection is expected to have previously been sent
            such a command and to not yet have acknowledged it.  Internal state
            related to this will be used and then updated.
        """
        AGENT_UPDATE_DELAYED(agent=connection).write()
        update = self._current_command[connection]
        update.response.addCallback(
            lambda ignored: self._send_state_to_connections([connection]), )
        self._current_command[connection] = update.set(next_scheduled=True)

    def connected(self, connection):
        """
        A new connection has been made to the server.

        :param ControlAMP connection: The new connection.
        """
        with AGENT_CONNECTED(agent=connection):
            self.connections.add(connection)
            self._send_state_to_connections([connection])

    def disconnected(self, connection):
        """
        An existing connection has been disconnected.

        :param ControlAMP connection: The lost connection.
        """
        self.connections.remove(connection)

    def node_changed(self, source, state_changes):
        """
        We've received a node state update from a connected client.

        :param IClusterStateSource source: Representation of where these
            changes were received from.
        :param list state_changes: One or more ``IClusterStateChange``
            providers representing the state change which has taken place.
        """
        self.cluster_state.apply_changes_from_source(source, state_changes)
        self._send_state_to_connections(self.connections)
コード例 #13
0
from eliot import Logger, MessageType, Field, ActionType


def _ascii(s):
    return s.decode("ascii")


F1 = Field.forTypes("integer", [int], "")
F2 = Field("string", _ascii, "")
F3 = Field("string2", _ascii, "")
F4 = Field.forTypes("list", [list], "list of integers")

M = MessageType("system:message", [F1, F2, F3, F4], "description")
A = ActionType("action", [], [], [], "desc")

log = Logger()

N = 100000


def run():
    start = time.time()
    with A(log):
        for i in xrange(N):
            m = M(integer=3,
                  string=b"abcdeft",
                  string2="dgsjdlkgjdsl",
                  list=[1, 2, 3, 4])
            m.write(log)
    end = time.time()
    print "%.6f per message" % ((end - start) / N, )
コード例 #14
0
class ConfigurationPersistenceService(Service):
    """
    Persist configuration to disk, and load it back.

    :ivar Deployment _deployment: The current desired deployment configuration.
    """
    logger = Logger()

    def __init__(self, reactor, path):
        """
        :param reactor: Reactor to use for thread pool.
        :param FilePath path: Directory where desired deployment will be
            persisted.
        """
        self._path = path
        self._change_callbacks = []

    def startService(self):
        if not self._path.exists():
            self._path.makedirs()
        self._config_path = self._path.child(b"current_configuration.v1.json")
        if self._config_path.exists():
            self._deployment = wire_decode(self._config_path.getContent())
        else:
            self._deployment = Deployment(nodes=frozenset())
            self._sync_save(self._deployment)
        _LOG_STARTUP(configuration=self.get()).write(self.logger)

    def register(self, change_callback):
        """
        Register a function to be called whenever the configuration changes.

        :param change_callback: Callable that takes no arguments, will be
            called when configuration changes.
        """
        self._change_callbacks.append(change_callback)

    def _sync_save(self, deployment):
        """
        Save and flush new deployment to disk synchronously.
        """
        self._config_path.setContent(wire_encode(deployment))

    def save(self, deployment):
        """
        Save and flush new deployment to disk.

        :return Deferred: Fires when write is finished.
        """
        with _LOG_SAVE(self.logger, configuration=deployment):
            self._sync_save(deployment)
            self._deployment = deployment
            # At some future point this will likely involve talking to a
            # distributed system (e.g. ZooKeeper or etcd), so the API doesn't
            # guarantee immediate saving of the data.
            for callback in self._change_callbacks:
                try:
                    callback()
                except:
                    # Second argument will be ignored in next Eliot release, so
                    # not bothering with particular value.
                    write_traceback(self.logger, u"")
            return succeed(None)

    def get(self):
        """
        Retrieve current configuration.

        It should not be mutated.

        :return Deployment: The current desired configuration.
        """
        return self._deployment
コード例 #15
0
class ControlAMPService(Service):
    """
    Control Service AMP server.

    Convergence agents connect to this server.
    """
    logger = Logger()

    def __init__(self, cluster_state, configuration_service, endpoint):
        """
        :param ClusterStateService cluster_state: Object that records known
            cluster state.
        :param ConfigurationPersistenceService configuration_service:
            Persistence service for desired cluster configuration.
        :param endpoint: Endpoint to listen on.
        """
        self.connections = set()
        self.cluster_state = cluster_state
        self.configuration_service = configuration_service
        self.endpoint_service = StreamServerEndpointService(
            endpoint, ServerFactory.forProtocol(lambda: ControlAMP(self)))
        # When configuration changes, notify all connected clients:
        self.configuration_service.register(
            lambda: self._send_state_to_connections(self.connections))

    def startService(self):
        self.endpoint_service.startService()

    def stopService(self):
        self.endpoint_service.stopService()
        for connection in self.connections:
            connection.transport.loseConnection()

    def _send_state_to_connections(self, connections):
        """
        Send desired configuration and cluster state to all given connections.

        :param connections: A collection of ``AMP`` instances.
        """
        configuration = self.configuration_service.get()
        state = self.cluster_state.as_deployment()
        with LOG_SEND_CLUSTER_STATE(self.logger,
                                    configuration=configuration,
                                    state=state):
            for connection in connections:
                with LOG_SEND_TO_AGENT(self.logger,
                                       agent=connection) as action:
                    connection.callRemote(ClusterStatusCommand,
                                          configuration=configuration,
                                          state=state,
                                          eliot_context=action)
                # Handle errors from callRemote by logging them
                # https://clusterhq.atlassian.net/browse/FLOC-1311

    def connected(self, connection):
        """
        A new connection has been made to the server.

        :param ControlAMP connection: The new connection.
        """
        self.connections.add(connection)
        self._send_state_to_connections([connection])

    def disconnected(self, connection):
        """
        An existing connection has been disconnected.

        :param ControlAMP connection: The lost connection.
        """
        self.connections.remove(connection)

    def node_changed(self, node_state):
        """
        We've received a node state update from a connected client.

        :param bytes hostname: The hostname of the node.
        :param NodeState node_state: The changed state for the node.
        """
        self.cluster_state.update_node_state(node_state)
        self._send_state_to_connections(self.connections)
コード例 #16
0
class ConfigurationPersistenceService(MultiService):
    """
    Persist configuration to disk, and load it back.

    :ivar Deployment _deployment: The current desired deployment configuration.
    """
    logger = Logger()

    def __init__(self, reactor, path):
        """
        :param reactor: Reactor to use for thread pool.
        :param FilePath path: Directory where desired deployment will be
            persisted.
        """
        MultiService.__init__(self)
        self._path = path
        self._config_path = self._path.child(b"current_configuration.json")
        self._change_callbacks = []
        LeaseService(reactor, self).setServiceParent(self)

    def startService(self):
        if not self._path.exists():
            self._path.makedirs()
        self.load_configuration()
        MultiService.startService(self)
        _LOG_STARTUP(configuration=self.get()).write(self.logger)

    def _process_v1_config(self, file_name, archive_name):
        """
        Check if a v1 configuration file exists and upgrade it if necessary.
        After upgrade, the v1 configuration file is retained with an archived
        file name, which ensures the data is not lost but we do not override
        a newer configuration version next time the service starts.

        :param bytes file_name: The expected file name of a version 1
            configuration.
        :param bytes archive_name: The file name to which a version 1
            configuration should be moved after it has been processed.
        """
        v1_config_path = self._path.child(file_name)
        v1_archived_path = self._path.child(archive_name)
        # Check for a v1 config and upgrade to latest if found.
        if v1_config_path.exists():
            v1_json = v1_config_path.getContent()
            with _LOG_UPGRADE(self.logger,
                              configuration=v1_json,
                              source_version=1,
                              target_version=_CONFIG_VERSION):
                updated_json = migrate_configuration(1, _CONFIG_VERSION,
                                                     v1_json,
                                                     ConfigurationMigration)
                self._config_path.setContent(updated_json)
                v1_config_path.moveTo(v1_archived_path)

    def load_configuration(self):
        """
        Load the persisted configuration, upgrading the configuration format
        if an older version is detected.
        """
        # Version 1 configurations are a special case. They do not store
        # any version information in the configuration data itself, rather they
        # can only be identified by the use of the file name
        # current_configuration.v1.json
        # Therefore we check for a version 1 configuration file and if it is
        # found, the config is upgraded, written to current_configuration.json
        # and the old file archived as current_configuration.v1.old.json
        self._process_v1_config(
            file_name=b"current_configuration.v1.json",
            archive_name=b"current_configuration.v1.old.json")

        # We can now safely attempt to detect and process a >v1 configuration
        # file as normal.
        if self._config_path.exists():
            config_json = self._config_path.getContent()
            config_dict = loads(config_json)
            config_version = config_dict['version']
            if config_version < _CONFIG_VERSION:
                with _LOG_UPGRADE(self.logger,
                                  configuration=config_json,
                                  source_version=config_version,
                                  target_version=_CONFIG_VERSION):
                    config_json = migrate_configuration(
                        config_version, _CONFIG_VERSION, config_json,
                        ConfigurationMigration)
            config = wire_decode(config_json)
            self._deployment = config.deployment
            self._sync_save(config.deployment)
        else:
            self._deployment = Deployment()
            self._sync_save(self._deployment)

    def register(self, change_callback):
        """
        Register a function to be called whenever the configuration changes.

        :param change_callback: Callable that takes no arguments, will be
            called when configuration changes.
        """
        self._change_callbacks.append(change_callback)

    def _sync_save(self, deployment):
        """
        Save and flush new configuration to disk synchronously.
        """
        config = Configuration(version=_CONFIG_VERSION, deployment=deployment)
        self._config_path.setContent(wire_encode(config))

    def save(self, deployment):
        """
        Save and flush new deployment to disk.

        :return Deferred: Fires when write is finished.
        """
        with _LOG_SAVE(self.logger, configuration=deployment):
            self._sync_save(deployment)
            self._deployment = deployment
            # At some future point this will likely involve talking to a
            # distributed system (e.g. ZooKeeper or etcd), so the API doesn't
            # guarantee immediate saving of the data.
            for callback in self._change_callbacks:
                try:
                    callback()
                except:
                    # Second argument will be ignored in next Eliot release, so
                    # not bothering with particular value.
                    write_traceback(self.logger, u"")
            return succeed(None)

    def get(self):
        """
        Retrieve current configuration.

        It should not be mutated.

        :return Deployment: The current desired configuration.
        """
        return self._deployment
コード例 #17
0
def constructFiniteStateMachine(inputs,
                                outputs,
                                states,
                                table,
                                initial,
                                richInputs,
                                inputContext,
                                world,
                                logger=Logger()):
    """
    Construct a new finite state machine from a definition of its states.

    @param inputs: Definitions of all input symbols the resulting machine will
        need to handle, as a L{twisted.python.constants.Names} subclass.

    @param outputs: Definitions of all output symbols the resulting machine is
        allowed to emit, as a L{twisted.python.constants.Names} subclass.

    @param states: Definitions of all possible states the resulting machine
        will be capable of inhabiting, as a L{twisted.python.constants.Names}
        subclass.

    @param table: The state transition table, defining which output and next
        state results from the receipt of any and all inputs in any and all
        states.
    @type table: L{TransitionTable}

    @param initial: The state the machine will start in (one of the symbols
        from C{states}).

    @param richInputs: A L{list} of types which correspond to each of the input
        symbols from C{inputs}.
    @type richInputs: L{list} of L{IRichInput} I{providers}

    @param inputContext: A L{dict} mapping output symbols to L{Interface}
        subclasses describing the requirements of the inputs which lead to
        them.

    @param world: An object responsible for turning FSM outputs into observable
        side-effects.
    @type world: L{IOutputExecutor} provider

    @return: An L{IFiniteStateMachine} provider
    """
    table = table.table

    _missingExtraCheck(set(table.keys()), set(states.iterconstants()),
                       ExtraTransitionState, MissingTransitionState)

    _missingExtraCheck(set(i for s in table.values() for i in s),
                       set(inputs.iterconstants()), ExtraTransitionInput,
                       MissingTransitionInput)

    _missingExtraCheck(
        set(output for s in table.values()
            for transition in s.values() for output in transition.output),
        set(outputs.iterconstants()), ExtraTransitionOutput,
        MissingTransitionOutput)

    try:
        _missingExtraCheck(
            set(transition.nextState
                for s in table.values() for transition in s.values()),
            set(states.iterconstants()), ExtraTransitionNextState,
            MissingTransitionNextState)
    except MissingTransitionNextState as e:
        if e.args != ({initial}, ):
            raise

    if initial not in states.iterconstants():
        raise InvalidInitialState(initial)

    extraInputContext = set(inputContext) - set(outputs.iterconstants())
    if extraInputContext:
        raise ExtraInputContext(extraInputContext)

    _checkConsistency(richInputs, table, inputContext)

    fsm = _FiniteStateMachine(inputs, outputs, states, table, initial)
    executor = IOutputExecutor(world)
    return _FiniteStateLogger(
        _FiniteStateInterpreter(tuple(richInputs), inputContext, fsm,
                                executor), logger, executor.identifier())
コード例 #18
0
 def eliot_action(self):
     return start_action(Logger(), u"flocker:tests:run_spy_state_change")
コード例 #19
0
 def main(self, reactor, options):
     logger = Logger()
     Message.new(key=123).write(logger)
     return succeed(None)
コード例 #20
0
ファイル: _container.py プロジェクト: maskofG/flocker
    Application,
    AttachedVolume,
    NodeState,
    DockerImage,
    Port,
    Link,
    RestartNever,
    pset_field,
    ip_to_uuid,
)
from ..route import make_host_network, Proxy, OpenPort
from ..common import gather_deferreds

from ._deploy import IDeployer, NodeLocalState

_logger = Logger()

NOOP_SLEEP_TIME = timedelta(seconds=5)


def _eliot_system(part):
    return u"flocker:node:container_deployer:" + part


@implementer(IStateChange)
class StartApplication(PClass):
    """
    Launch the supplied application as a container.

    :ivar Application application: The ``Application`` to create and
        start.
コード例 #21
0
class ControlAMPService(Service):
    """
    Control Service AMP server.

    Convergence agents connect to this server.
    """
    logger = Logger()

    def __init__(self, cluster_state, configuration_service, endpoint):
        """
        :param ClusterStateService cluster_state: Object that records known
            cluster state.
        :param ConfigurationPersistenceService configuration_service:
            Persistence service for desired cluster configuration.
        :param endpoint: Endpoint to listen on.
        """
        self.connections = set()
        self.cluster_state = cluster_state
        self.configuration_service = configuration_service
        self.endpoint_service = StreamServerEndpointService(
            endpoint, ServerFactory.forProtocol(lambda: ControlAMP(self)))
        # When configuration changes, notify all connected clients:
        self.configuration_service.register(
            lambda: self._send_state_to_connections(self.connections))

    def startService(self):
        self.endpoint_service.startService()

    def stopService(self):
        self.endpoint_service.stopService()
        for connection in self.connections:
            connection.transport.loseConnection()

    def _send_state_to_connections(self, connections):
        """
        Send desired configuration and cluster state to all given connections.

        :param connections: A collection of ``AMP`` instances.
        """
        configuration = self.configuration_service.get()
        state = self.cluster_state.as_deployment()
        with LOG_SEND_CLUSTER_STATE(self.logger,
                                    configuration=configuration,
                                    state=state):
            for connection in connections:
                action = LOG_SEND_TO_AGENT(self.logger, agent=connection)
                with action.context():
                    d = DeferredContext(
                        connection.callRemote(ClusterStatusCommand,
                                              configuration=configuration,
                                              state=state,
                                              eliot_context=action))
                    d.addActionFinish()
                    d.result.addErrback(lambda _: None)

    def connected(self, connection):
        """
        A new connection has been made to the server.

        :param ControlAMP connection: The new connection.
        """
        self.connections.add(connection)
        self._send_state_to_connections([connection])

    def disconnected(self, connection):
        """
        An existing connection has been disconnected.

        :param ControlAMP connection: The lost connection.
        """
        self.connections.remove(connection)

    def node_changed(self, state_changes):
        """
        We've received a node state update from a connected client.

        :param bytes hostname: The hostname of the node.
        :param list state_changes: One or more ``IClusterStateChange``
            providers representing the state change which has taken place.
        """
        self.cluster_state.apply_changes(state_changes)
        self._send_state_to_connections(self.connections)