Exemplo n.º 1
0
    def start_cluster(self, reactor):
        # Destroy the box to begin, so that we are guaranteed
        # a clean build.
        yield run(reactor, ['vagrant', 'destroy', '-f'],
                  path=self.vagrant_path.path)

        if self.package_source.version:
            env = extend_environ(FLOCKER_BOX_VERSION=vagrant_version(
                self.package_source.version))
        else:
            env = os.environ
        # Boot the VMs
        yield run(reactor, ['vagrant', 'up'],
                  path=self.vagrant_path.path,
                  env=env)

        for node in self.NODE_ADDRESSES:
            yield remove_known_host(reactor, node)

        nodes = pvector(
            ManagedNode(address=address, distribution=self.distribution)
            for address in self.NODE_ADDRESSES)

        certificates = Certificates(self.certificates_path)
        cluster = Cluster(all_nodes=pvector(nodes),
                          control_node=nodes[0],
                          agent_nodes=nodes,
                          dataset_backend=self.dataset_backend,
                          certificates=certificates)

        returnValue(cluster)
Exemplo n.º 2
0
def configured_cluster_for_nodes(reactor, certificates, nodes, dataset_backend,
                                 dataset_backend_configuration):
    """
    Get a ``Cluster`` with Flocker services running on the right nodes.

    :param reactor: The reactor.
    :param Certificates certificates: The certificates to install on the
        cluster.
    :param nodes: The ``ManagedNode``s on which to operate.
    :param NamedConstant dataset_backend: The ``DatasetBackend`` constant
        representing the dataset backend that the nodes will be configured to
        use when they are "started".
    :param dict dataset_backend_configuration: The backend-specific
        configuration the nodes will be given for their dataset backend.

    :returns: A ``Deferred`` which fires with ``Cluster`` when it is
        configured.
    """
    cluster = Cluster(all_nodes=pvector(nodes),
                      control_node=nodes[0],
                      agent_nodes=nodes,
                      dataset_backend=dataset_backend,
                      certificates=certificates)

    configuring = perform(
        make_dispatcher(reactor),
        configure_cluster(cluster, dataset_backend_configuration))
    configuring.addCallback(lambda ignored: cluster)
    return configuring
Exemplo n.º 3
0
def configured_cluster_for_nodes(
    reactor, certificates, nodes, dataset_backend,
    dataset_backend_configuration, dataset_backend_config_file,
    provider=None
):
    """
    Get a ``Cluster`` with Flocker services running on the right nodes.

    :param reactor: The reactor.
    :param Certificates certificates: The certificates to install on the
        cluster.
    :param nodes: The ``ManagedNode``s on which to operate.
    :param NamedConstant dataset_backend: The ``DatasetBackend`` constant
        representing the dataset backend that the nodes will be configured to
        use when they are "started".
    :param dict dataset_backend_configuration: The backend-specific
        configuration the nodes will be given for their dataset backend.
    :param FilePath dataset_backend_config_file: A FilePath that has the
        dataset_backend info stored.

    :returns: A ``Deferred`` which fires with ``Cluster`` when it is
        configured.
    """
    # XXX: There is duplication between the values here and those in
    # f.node.agents.test.blockdevicefactory.MINIMUM_ALLOCATABLE_SIZES. We want
    # the default volume size to be greater than or equal to the minimum
    # allocatable size.
    #
    # Ideally, the minimum allocatable size (and perhaps the default volume
    # size) would be something known by an object that represents the dataset
    # backend. Unfortunately:
    #  1. There is no such object
    #  2. There is existing confusion in the code around 'openstack' and
    #     'rackspace'
    #
    # Here, we special-case Rackspace (presumably) because it has a minimum
    # allocatable size that is different from other Openstack backends.
    #
    # FLOC-2584 also discusses this.
    default_volume_size = GiB(1)
    if dataset_backend_configuration.get('auth_plugin') == 'rackspace':
        default_volume_size = RACKSPACE_MINIMUM_VOLUME_SIZE

    cluster = Cluster(
        all_nodes=pvector(nodes),
        control_node=nodes[0],
        agent_nodes=nodes,
        dataset_backend=dataset_backend,
        default_volume_size=int(default_volume_size.to_Byte().value),
        certificates=certificates,
        dataset_backend_config_file=dataset_backend_config_file
    )

    configuring = perform(
        make_dispatcher(reactor),
        configure_cluster(cluster, dataset_backend_configuration, provider)
    )
    configuring.addCallback(lambda ignored: cluster)
    return configuring
Exemplo n.º 4
0
 def finalize_cluster(cluster):
     # Make node lists immutable.
     return Cluster(all_nodes=pvector(cluster.all_nodes),
                    control_node=cluster.node,
                    agent_nodes=pvector(cluster.agent_nodes),
                    dataset_backend=cluster.dataset_backend,
                    default_volume_size=cluster.default_volume_size,
                    certificates=cluster.certificates,
                    dataset_backend_config_file=cluster.
                    dataset_backend_config_file)
Exemplo n.º 5
0
 def finalize_cluster(cluster):
     """
     :param Cluster cluster: Description of the cluster.
     :return: Cluster
     """
     # Make node lists immutable.
     return Cluster(all_nodes=pvector(cluster.all_nodes),
                    control_node=cluster.node,
                    agent_nodes=pvector(cluster.agent_nodes),
                    dataset_backend=cluster.dataset_backend,
                    default_volume_size=cluster.default_volume_size,
                    certificates=cluster.certificates,
                    dataset_backend_config_file=cluster.
                    dataset_backend_config_file)
Exemplo n.º 6
0
    def _setup_control_node(self, reactor, node, index):
        print "Selecting node {} for control service".format(node.name)
        certificates = Certificates.generate(
            directory=self.cert_path,
            control_hostname=node.address,
            num_nodes=0,
            cluster_name=self.identity.name,
            cluster_id=self.identity.id,
        )
        dataset_backend_config_file = save_backend_configuration(
            self.dataset_backend, self.dataset_backend_configuration
        )
        cluster = Cluster(
            all_nodes=[node],
            control_node=node,
            agent_nodes=[],
            dataset_backend=self.dataset_backend,
            default_volume_size=get_default_volume_size(
                self.dataset_backend_configuration
            ),
            certificates=certificates,
            dataset_backend_config_file=dataset_backend_config_file
        )
        commands = configure_control_node(
            cluster,
            'libcloud',
            logging_config=self.config.get('logging'),
        )
        d = perform(make_dispatcher(reactor), commands)

        def configure_failed(failure):
            print "Failed to configure control node"
            write_failure(failure)
            return failure

        # It should be sufficient to configure just the control service here,
        # but there is an assumption that the control node is both a control
        # node and an agent node.
        d.addCallbacks(
            lambda _: self._add_node_to_cluster(
                reactor, cluster, node, index
            ),
            errback=configure_failed,
        )
        # Return the cluster.
        d.addCallback(lambda _: cluster)
        return d
Exemplo n.º 7
0
    def start_cluster(self, reactor):
        # Destroy the box to begin, so that we are guaranteed
        # a clean build.
        yield run(
            reactor,
            ['vagrant', 'destroy', '-f'],
            path=self.vagrant_path.path)

        if self.package_source.version:
            env = extend_environ(
                FLOCKER_BOX_VERSION=vagrant_version(
                    self.package_source.version))
        else:
            env = os.environ
        # Boot the VMs
        yield run(
            reactor,
            ['vagrant', 'up'],
            path=self.vagrant_path.path,
            env=env)

        for node in self.NODE_ADDRESSES:
            yield remove_known_host(reactor, node)

        nodes = pvector(
            ManagedNode(address=address, distribution=self.distribution)
            for address in self.NODE_ADDRESSES
        )

        certificates = Certificates(self.certificates_path)
        # Default volume size is meaningless here as Vagrant only uses ZFS, and
        # not a block device backend.
        # XXX Change ``Cluster`` to not require default_volume_size
        default_volume_size = int(GiB(1).to_Byte().value)
        cluster = Cluster(
            all_nodes=pvector(nodes),
            control_node=nodes[0],
            agent_nodes=nodes,
            dataset_backend=self.dataset_backend,
            certificates=certificates,
            default_volume_size=default_volume_size,
        )

        returnValue(cluster)
Exemplo n.º 8
0
def main(reactor, args, base_path, top_level):
    """
    :param reactor: Reactor to use.
    :param list args: The arguments passed to the script.
    :param FilePath base_path: The executable being run.
    :param FilePath top_level: The top-level of the Flocker repository.
    """
    configure_eliot_logging_for_acceptance()
    options = RunOptions(top_level=top_level)
    try:
        options.parseOptions(args)
    except UsageError as e:
        sys.stderr.write("%s: %s\n" % (base_path.basename(), e))
        sys.stderr.write("\n")
        sys.stderr.write(str(options))
        raise SystemExit(1)

    # Existing nodes must be described in a managed section
    # of the configuration.
    existing_nodes = make_managed_nodes(
        options['config']['managed']['addresses'],
        options['distribution'],
    )
    # The following code assumes that one of the managed nodes
    # is both a control node and an agent node.
    [control_node] = [
        node for node in existing_nodes
        if node.address == options['control-node']
    ]
    dataset_backend_config_file = save_backend_configuration(
        options.dataset_backend(),
        options.dataset_backend_configuration(),
    )
    cluster = Cluster(
        all_nodes=list(existing_nodes),
        control_node=control_node,
        agent_nodes=list(existing_nodes),
        dataset_backend=options.dataset_backend(),
        default_volume_size=get_default_volume_size(
            options.dataset_backend_configuration()),
        certificates=Certificates(options['cert-directory']),
        dataset_backend_config_file=dataset_backend_config_file,
    )

    flocker_client = make_client(reactor, cluster)
    existing_count = len(existing_nodes)
    yield wait_for_nodes(reactor, flocker_client, existing_count)
    if options['starting-index'] is None:
        options['starting-index'] = existing_count

    print(
        "Adding {} node(s) to the cluster of {} nodes "
        "starting at index {}".format(
            options['number-of-nodes'],
            existing_count,
            options['starting-index'],
        ))

    runner = options.runner
    cleanup_id = reactor.addSystemEventTrigger('before', 'shutdown',
                                               runner.stop_cluster, reactor)

    from flocker.common.script import eliot_logging_service
    log_writer = eliot_logging_service(destination=FileDestination(
        file=open("%s.log" % (base_path.basename(), ), "a")),
                                       reactor=reactor,
                                       capture_stdout=False)
    log_writer.startService()
    reactor.addSystemEventTrigger('before', 'shutdown', log_writer.stopService)

    control_node = options['control-node']
    if options['distribution'] in ('centos-7', ):
        remote_logs_file = open("remote_logs.log", "a")
        capture_journal(reactor, control_node, remote_logs_file)
    elif options['distribution'] in ('ubuntu-14.04', 'ubuntu-15.10'):
        remote_logs_file = open("remote_logs.log", "a")
        capture_upstart(reactor, control_node, remote_logs_file)

    yield runner.ensure_keys(reactor)

    deferreds = runner.extend_cluster(
        reactor,
        cluster,
        options['number-of-nodes'],
        options['tag'],
        options['starting-index'],
    )
    results = yield DeferredList(deferreds)

    failed_count = 0
    for (success, _) in results:
        if not success:
            failed_count += 1
    if failed_count:
        print "Failed to create {} nodes, see logs.".format(failed_count)

    yield wait_for_nodes(
        reactor,
        flocker_client,
        len(cluster.agent_nodes),
    )

    save_managed_config(options['cert-directory'], options['config'], cluster)
    save_environment(options['cert-directory'], cluster,
                     options.package_source())
    reactor.removeSystemEventTrigger(cleanup_id)