def start_cluster(self, reactor): # Destroy the box to begin, so that we are guaranteed # a clean build. yield run(reactor, ['vagrant', 'destroy', '-f'], path=self.vagrant_path.path) if self.package_source.version: env = extend_environ(FLOCKER_BOX_VERSION=vagrant_version( self.package_source.version)) else: env = os.environ # Boot the VMs yield run(reactor, ['vagrant', 'up'], path=self.vagrant_path.path, env=env) for node in self.NODE_ADDRESSES: yield remove_known_host(reactor, node) nodes = pvector( ManagedNode(address=address, distribution=self.distribution) for address in self.NODE_ADDRESSES) certificates = Certificates(self.certificates_path) cluster = Cluster(all_nodes=pvector(nodes), control_node=nodes[0], agent_nodes=nodes, dataset_backend=self.dataset_backend, certificates=certificates) returnValue(cluster)
def generate_certificates(nodes): """ Generate a new set of certificates for the given nodes. :return: A ``Certificates`` instance referring to the newly generated certificates. """ certificates_path = FilePath(mkdtemp()) print ("Generating certificates in: {}".format(certificates_path.path)) certificates = Certificates.generate(certificates_path, nodes[0].address, len(nodes)) return certificates
def generate_certificates(nodes): """ Generate a new set of certificates for the given nodes. :return: A ``Certificates`` instance referring to the newly generated certificates. """ certificates_path = FilePath(mkdtemp()) print("Generating certificates in: {}".format(certificates_path.path)) certificates = Certificates.generate(certificates_path, nodes[0].address, len(nodes)) return certificates
def generate_certificates(cluster_id, nodes): """ Generate a new set of certificates for the given nodes. :param UUID cluster_id: The unique identifier of the cluster for which to generate the certificates. :param list nodes: The ``INode`` providers that make up the cluster. :return: A ``Certificates`` instance referring to the newly generated certificates. """ certificates_path = FilePath(mkdtemp()) print ("Generating certificates in: {}".format(certificates_path.path)) certificates = Certificates.generate(certificates_path, nodes[0].address, len(nodes), cluster_id=cluster_id) return certificates
def _setup_control_node(self, reactor, node, index): print "Selecting node {} for control service".format(node.name) certificates = Certificates.generate( directory=self.cert_path, control_hostname=node.address, num_nodes=0, cluster_name=self.identity.name, cluster_id=self.identity.id, ) dataset_backend_config_file = save_backend_configuration( self.dataset_backend, self.dataset_backend_configuration ) cluster = Cluster( all_nodes=[node], control_node=node, agent_nodes=[], dataset_backend=self.dataset_backend, default_volume_size=get_default_volume_size( self.dataset_backend_configuration ), certificates=certificates, dataset_backend_config_file=dataset_backend_config_file ) commands = configure_control_node( cluster, 'libcloud', logging_config=self.config.get('logging'), ) d = perform(make_dispatcher(reactor), commands) def configure_failed(failure): print "Failed to configure control node" write_failure(failure) return failure # It should be sufficient to configure just the control service here, # but there is an assumption that the control node is both a control # node and an agent node. d.addCallbacks( lambda _: self._add_node_to_cluster( reactor, cluster, node, index ), errback=configure_failed, ) # Return the cluster. d.addCallback(lambda _: cluster) return d
def start_cluster(self, reactor): # Destroy the box to begin, so that we are guaranteed # a clean build. yield run( reactor, ['vagrant', 'destroy', '-f'], path=self.vagrant_path.path) if self.package_source.version: env = extend_environ( FLOCKER_BOX_VERSION=vagrant_version( self.package_source.version)) else: env = os.environ # Boot the VMs yield run( reactor, ['vagrant', 'up'], path=self.vagrant_path.path, env=env) for node in self.NODE_ADDRESSES: yield remove_known_host(reactor, node) nodes = pvector( ManagedNode(address=address, distribution=self.distribution) for address in self.NODE_ADDRESSES ) certificates = Certificates(self.certificates_path) # Default volume size is meaningless here as Vagrant only uses ZFS, and # not a block device backend. # XXX Change ``Cluster`` to not require default_volume_size default_volume_size = int(GiB(1).to_Byte().value) cluster = Cluster( all_nodes=pvector(nodes), control_node=nodes[0], agent_nodes=nodes, dataset_backend=self.dataset_backend, certificates=certificates, default_volume_size=default_volume_size, ) returnValue(cluster)
def generate_certificates(cluster_id, nodes): """ Generate a new set of certificates for the given nodes. :param UUID cluster_id: The unique identifier of the cluster for which to generate the certificates. :param list nodes: The ``INode`` providers that make up the cluster. :return: A ``Certificates`` instance referring to the newly generated certificates. """ certificates_path = FilePath(mkdtemp()) print("Generating certificates in: {}".format(certificates_path.path)) certificates = Certificates.generate( certificates_path, nodes[0].address, len(nodes), cluster_id=cluster_id, ) return certificates
def generate_certificates(cluster_name, cluster_id, nodes, cert_path): """ Generate a new set of certificates for the given nodes. :param bytes cluster_name: The name of the cluster. :param UUID cluster_id: The unique identifier of the cluster for which to generate the certificates. If ``None`` then a new random identifier is generated. :param list nodes: The ``INode`` providers that make up the cluster. :param FilePath cert_path: The directory where the generated certificate files are to be placed. :return: A ``Certificates`` instance referring to the newly generated certificates. """ print("Generating certificates in: {}".format(cert_path.path)) certificates = Certificates.generate( cert_path, nodes[0].address, len(nodes), cluster_name=cluster_name, cluster_id=cluster_id, ) return certificates
def main(reactor, args, base_path, top_level): """ :param reactor: Reactor to use. :param list args: The arguments passed to the script. :param FilePath base_path: The executable being run. :param FilePath top_level: The top-level of the Flocker repository. """ configure_eliot_logging_for_acceptance() options = RunOptions(top_level=top_level) try: options.parseOptions(args) except UsageError as e: sys.stderr.write("%s: %s\n" % (base_path.basename(), e)) sys.stderr.write("\n") sys.stderr.write(str(options)) raise SystemExit(1) # Existing nodes must be described in a managed section # of the configuration. existing_nodes = make_managed_nodes( options['config']['managed']['addresses'], options['distribution'], ) # The following code assumes that one of the managed nodes # is both a control node and an agent node. [control_node] = [ node for node in existing_nodes if node.address == options['control-node'] ] dataset_backend_config_file = save_backend_configuration( options.dataset_backend(), options.dataset_backend_configuration(), ) cluster = Cluster( all_nodes=list(existing_nodes), control_node=control_node, agent_nodes=list(existing_nodes), dataset_backend=options.dataset_backend(), default_volume_size=get_default_volume_size( options.dataset_backend_configuration()), certificates=Certificates(options['cert-directory']), dataset_backend_config_file=dataset_backend_config_file, ) flocker_client = make_client(reactor, cluster) existing_count = len(existing_nodes) yield wait_for_nodes(reactor, flocker_client, existing_count) if options['starting-index'] is None: options['starting-index'] = existing_count print( "Adding {} node(s) to the cluster of {} nodes " "starting at index {}".format( options['number-of-nodes'], existing_count, options['starting-index'], )) runner = options.runner cleanup_id = reactor.addSystemEventTrigger('before', 'shutdown', runner.stop_cluster, reactor) from flocker.common.script import eliot_logging_service log_writer = eliot_logging_service(destination=FileDestination( file=open("%s.log" % (base_path.basename(), ), "a")), reactor=reactor, capture_stdout=False) log_writer.startService() reactor.addSystemEventTrigger('before', 'shutdown', log_writer.stopService) control_node = options['control-node'] if options['distribution'] in ('centos-7', ): remote_logs_file = open("remote_logs.log", "a") capture_journal(reactor, control_node, remote_logs_file) elif options['distribution'] in ('ubuntu-14.04', 'ubuntu-15.10'): remote_logs_file = open("remote_logs.log", "a") capture_upstart(reactor, control_node, remote_logs_file) yield runner.ensure_keys(reactor) deferreds = runner.extend_cluster( reactor, cluster, options['number-of-nodes'], options['tag'], options['starting-index'], ) results = yield DeferredList(deferreds) failed_count = 0 for (success, _) in results: if not success: failed_count += 1 if failed_count: print "Failed to create {} nodes, see logs.".format(failed_count) yield wait_for_nodes( reactor, flocker_client, len(cluster.agent_nodes), ) save_managed_config(options['cert-directory'], options['config'], cluster) save_environment(options['cert-directory'], cluster, options.package_source()) reactor.removeSystemEventTrigger(cleanup_id)
#!/usr/bin/env python # Copyright ClusterHQ Inc. See LICENSE file for details. # Generates a set of cluster, node and user certificates and keys for # use with the tutorial Vagrant box. from twisted.python.filepath import FilePath from flocker.provision._ca import Certificates Certificates.generate( FilePath(__file__).sibling('credentials'), '172.16.255.250', 2 )
def main(reactor, args, base_path, top_level): """ :param reactor: Reactor to use. :param list args: The arguments passed to the script. :param FilePath base_path: The executable being run. :param FilePath top_level: The top-level of the flocker repository. """ options = RunOptions(top_level=top_level) add_destination(eliot_output) try: options.parseOptions(args) except UsageError as e: sys.stderr.write("%s: %s\n" % (base_path.basename(), e)) raise SystemExit(1) runner = options.runner try: nodes = yield runner.start_nodes(reactor) ca_directory = FilePath(mkdtemp()) print("Generating certificates in: {}".format(ca_directory.path)) certificates = Certificates.generate(ca_directory, nodes[0].address, len(nodes)) yield perform( make_dispatcher(reactor), parallel([ run_remotely( username='******', address=node.address, commands=task_pull_docker_images() ) for node in nodes ]), ) control_node = nodes[0] dataset_backend = options.dataset_backend yield perform( make_dispatcher(reactor), configure_cluster(control_node=control_node, agent_nodes=nodes, certificates=certificates, dataset_backend=dataset_backend)) result = yield run_tests( reactor=reactor, nodes=nodes, control_node=control_node, agent_nodes=nodes, dataset_backend=dataset_backend, trial_args=options['trial-args'], certificates_path=ca_directory) except: result = 1 raise finally: # Unless the tests failed, and the user asked to keep the nodes, we # delete them. if not (result != 0 and options['keep']): runner.stop_nodes(reactor) elif options['keep']: print "--keep specified, not destroying nodes." print ("To run acceptance tests against these nodes, " "set the following environment variables: ") environment_variables = { 'FLOCKER_ACCEPTANCE_NODES': ':'.join(node.address for node in nodes), 'FLOCKER_ACCEPTANCE_CONTROL_NODE': control_node.address, 'FLOCKER_ACCEPTANCE_AGENT_NODES': ':'.join(node.address for node in nodes), 'FLOCKER_ACCEPTANCE_VOLUME_BACKEND': dataset_backend.name, 'FLOCKER_ACCEPTANCE_API_CERTIFICATES_PATH': ca_directory.path, } for environment_variable in environment_variables: print "export {name}={value};".format( name=environment_variable, value=environment_variables[environment_variable], ) raise SystemExit(result)