def from_options(cls, reactor, options): """ Create a cluster container deployment object from the options given through command line. :param reactor: reactor :param options: ``ContainerOptions`` container the parsed options given to the script. """ try: image = DockerImage(repository=options['image']) max_size = int(GiB(options['max-size']).to_Byte().value) mountpoint = unicode(options['mountpoint']) control_node_address = options['control-node'] timeout = options['wait'] except Exception as e: sys.stderr.write("%s: %s\n" % ("Missing or wrong arguments", e)) sys.stderr.write(e.args[0]) sys.stderr.write('\n\n') sys.stderr.write(options.getSynopsis()) sys.stderr.write('\n') sys.stderr.write(options.getUsage()) raise SystemExit(1) certificates_path = FilePath(options['cert-directory']) cluster_cert = certificates_path.child(b"cluster.crt") user_cert = certificates_path.child(b"user.crt") user_key = certificates_path.child(b"user.key") # Initialise client client = FlockerClient(reactor, control_node_address, REST_API_PORT, cluster_cert, user_cert, user_key) return cls(reactor, image, max_size, mountpoint, control_node_address, timeout, cluster_cert, user_cert, user_key, client)
def __init__( self, reactor, cluster, image=u'clusterhq/mongodb', volume_size=None, mountpoint=u'/data' ): self.reactor = reactor self.control_service = cluster.get_control_service(reactor) self.image = DockerImage(repository=image) if volume_size is None: self.volume_size = cluster.default_volume_size() else: self.volume_size = volume_size self.mountpoint = mountpoint
try: from pg8000 import connect, InterfaceError, ProgrammingError PG8000_INSTALLED = True except ImportError: PG8000_INSTALLED = False POSTGRES_INTERNAL_PORT = 5432 POSTGRES_EXTERNAL_PORT = 5432 POSTGRES_APPLICATION_NAME = u"postgres-volume-example" POSTGRES_IMAGE = u"postgres" POSTGRES_VOLUME_MOUNTPOINT = u'/var/lib/postgresql/data' POSTGRES_APPLICATION = Application( name=POSTGRES_APPLICATION_NAME, image=DockerImage.from_string(POSTGRES_IMAGE + u':latest'), ports=frozenset([ Port(internal_port=POSTGRES_INTERNAL_PORT, external_port=POSTGRES_EXTERNAL_PORT), ]), volume=AttachedVolume( manifestation=Manifestation( dataset=Dataset( dataset_id=unicode(uuid4()), metadata=pmap({"name": POSTGRES_APPLICATION_NAME}), maximum_size=REALISTIC_BLOCKDEVICE_SIZE), primary=True), mountpoint=FilePath(POSTGRES_VOLUME_MOUNTPOINT), ), )
PYMYSQL_INSTALLED = True except ImportError: PYMYSQL_INSTALLED = False MYSQL_INTERNAL_PORT = 3306 MYSQL_EXTERNAL_PORT = 3306 MYSQL_PASSWORD = u"clusterhq" MYSQL_APPLICATION_NAME = u"mysql-volume-example" MYSQL_IMAGE = u"mysql:5.6.17" MYSQL_ENVIRONMENT = {"MYSQL_ROOT_PASSWORD": MYSQL_PASSWORD} MYSQL_VOLUME_MOUNTPOINT = u'/var/lib/mysql' MYSQL_APPLICATION = Application( name=MYSQL_APPLICATION_NAME, image=DockerImage.from_string(MYSQL_IMAGE), environment=MYSQL_ENVIRONMENT, ports=frozenset([ Port(internal_port=MYSQL_INTERNAL_PORT, external_port=MYSQL_EXTERNAL_PORT), ]), volume=AttachedVolume( manifestation=Manifestation( dataset=Dataset( dataset_id=unicode(uuid4()), metadata=pmap({"name": MYSQL_APPLICATION_NAME})), primary=True), mountpoint=FilePath(MYSQL_VOLUME_MOUNTPOINT), ), )
try: from pg8000 import connect, InterfaceError, ProgrammingError PG8000_INSTALLED = True except ImportError: PG8000_INSTALLED = False POSTGRES_INTERNAL_PORT = 5432 POSTGRES_EXTERNAL_PORT = 5432 POSTGRES_APPLICATION_NAME = u"postgres-volume-example" POSTGRES_IMAGE = u"postgres" POSTGRES_VOLUME_MOUNTPOINT = u'/var/lib/postgresql/data' POSTGRES_APPLICATION = Application( name=POSTGRES_APPLICATION_NAME, image=DockerImage.from_string(POSTGRES_IMAGE + u':latest'), ports=frozenset([ Port(internal_port=POSTGRES_INTERNAL_PORT, external_port=POSTGRES_EXTERNAL_PORT), ]), volume=AttachedVolume( manifestation=Manifestation( dataset=Dataset( dataset_id=unicode(uuid4()), metadata=pmap({"name": POSTGRES_APPLICATION_NAME})), primary=True), mountpoint=FilePath(POSTGRES_VOLUME_MOUNTPOINT), ), )
def setUp(self, cluster): """ Deploy PostgreSQL to a node. """ self.cluster = cluster self.node_1, self.node_2 = cluster.nodes new_dataset_id = unicode(uuid4()) self.POSTGRES_APPLICATION = Application( name=POSTGRES_APPLICATION_NAME, image=DockerImage.from_string(POSTGRES_IMAGE + u':latest'), ports=frozenset([ Port(internal_port=POSTGRES_INTERNAL_PORT, external_port=POSTGRES_EXTERNAL_PORT), ]), volume=AttachedVolume( manifestation=Manifestation( dataset=Dataset( dataset_id=new_dataset_id, metadata=pmap({"name": POSTGRES_APPLICATION_NAME}), maximum_size=REALISTIC_BLOCKDEVICE_SIZE), primary=True), mountpoint=FilePath(POSTGRES_VOLUME_MOUNTPOINT), ), ) self.postgres_deployment = { u"version": 1, u"nodes": { self.node_1.reported_hostname: [POSTGRES_APPLICATION_NAME], self.node_2.reported_hostname: [], }, } self.postgres_deployment_moved = { u"version": 1, u"nodes": { self.node_1.reported_hostname: [], self.node_2.reported_hostname: [POSTGRES_APPLICATION_NAME], }, } self.postgres_application = { u"version": 1, u"applications": { POSTGRES_APPLICATION_NAME: { u"image": POSTGRES_IMAGE, u"ports": [{ u"internal": POSTGRES_INTERNAL_PORT, u"external": POSTGRES_EXTERNAL_PORT, }], u"volume": { u"dataset_id": new_dataset_id, # The location within the container where the data # volume will be mounted; see: # https://github.com/docker-library/postgres/blob/ # docker/Dockerfile.template u"mountpoint": POSTGRES_VOLUME_MOUNTPOINT, u"maximum_size": "%d" % (REALISTIC_BLOCKDEVICE_SIZE,), }, }, }, } self.postgres_application_different_port = thaw(freeze( self.postgres_application).transform( [u"applications", POSTGRES_APPLICATION_NAME, u"ports", 0, u"external"], POSTGRES_EXTERNAL_PORT + 1)) deployed = self.cluster.flocker_deploy( self, self.postgres_deployment, self.postgres_application ) return deployed
Dataset, Manifestation, Link) from flocker.testtools import loop_until from .testtools import (assert_expected_deployment, flocker_deploy, get_nodes, require_flocker_cli) ELASTICSEARCH_INTERNAL_PORT = 9200 ELASTICSEARCH_EXTERNAL_PORT = 9200 ELASTICSEARCH_APPLICATION_NAME = u"elasticsearch" ELASTICSEARCH_IMAGE = u"clusterhq/elasticsearch" ELASTICSEARCH_VOLUME_MOUNTPOINT = u'/var/lib/elasticsearch' ELASTICSEARCH_APPLICATION = Application( name=ELASTICSEARCH_APPLICATION_NAME, image=DockerImage.from_string(ELASTICSEARCH_IMAGE), ports=frozenset([ Port(internal_port=ELASTICSEARCH_INTERNAL_PORT, external_port=ELASTICSEARCH_EXTERNAL_PORT), ]), volume=AttachedVolume( manifestation=Manifestation(dataset=Dataset( dataset_id=unicode(uuid4()), metadata=pmap({"name": ELASTICSEARCH_APPLICATION_NAME})), primary=True), mountpoint=FilePath(ELASTICSEARCH_VOLUME_MOUNTPOINT), ), ) LOGSTASH_INTERNAL_PORT = 5000 LOGSTASH_EXTERNAL_PORT = 5000
def setUp(self, cluster): """ Deploy PostgreSQL to a node. """ self.cluster = cluster self.node_1, self.node_2 = cluster.nodes new_dataset_id = unicode(uuid4()) self.POSTGRES_APPLICATION = Application( name=POSTGRES_APPLICATION_NAME, image=DockerImage.from_string(POSTGRES_IMAGE + u':latest'), ports=frozenset([ Port(internal_port=POSTGRES_INTERNAL_PORT, external_port=POSTGRES_EXTERNAL_PORT), ]), volume=AttachedVolume( manifestation=Manifestation( dataset=Dataset( dataset_id=new_dataset_id, metadata=pmap({"name": POSTGRES_APPLICATION_NAME}), maximum_size=REALISTIC_BLOCKDEVICE_SIZE), primary=True), mountpoint=FilePath(POSTGRES_VOLUME_MOUNTPOINT), ), ) self.postgres_deployment = { u"version": 1, u"nodes": { self.node_1.reported_hostname: [POSTGRES_APPLICATION_NAME], self.node_2.reported_hostname: [], }, } self.postgres_deployment_moved = { u"version": 1, u"nodes": { self.node_1.reported_hostname: [], self.node_2.reported_hostname: [POSTGRES_APPLICATION_NAME], }, } self.postgres_application = { u"version": 1, u"applications": { POSTGRES_APPLICATION_NAME: { u"image": POSTGRES_IMAGE, u"ports": [{ u"internal": POSTGRES_INTERNAL_PORT, u"external": POSTGRES_EXTERNAL_PORT, }], u"volume": { u"dataset_id": new_dataset_id, # The location within the container where the data # volume will be mounted; see: # https://github.com/docker-library/postgres/blob/ # docker/Dockerfile.template u"mountpoint": POSTGRES_VOLUME_MOUNTPOINT, u"maximum_size": "%d" % (REALISTIC_BLOCKDEVICE_SIZE,), }, }, }, } self.postgres_application_different_port = thaw(freeze( self.postgres_application).transform( [u"applications", POSTGRES_APPLICATION_NAME, u"ports", 0, u"external"], POSTGRES_EXTERNAL_PORT + 1)) cluster.flocker_deploy( self, self.postgres_deployment, self.postgres_application ) # We're only testing movement if we actually wait for Postgres to # be running before proceeding with test: return self.cluster.assert_expected_deployment(self, { self.node_1.reported_hostname: set([self.POSTGRES_APPLICATION]), self.node_2.reported_hostname: set([]), })
Manifestation, Link) from flocker.testtools import loop_until from .testtools import (assert_expected_deployment, flocker_deploy, get_nodes, require_flocker_cli) ELASTICSEARCH_INTERNAL_PORT = 9200 ELASTICSEARCH_EXTERNAL_PORT = 9200 ELASTICSEARCH_APPLICATION_NAME = u"elasticsearch" ELASTICSEARCH_IMAGE = u"clusterhq/elasticsearch" ELASTICSEARCH_VOLUME_MOUNTPOINT = u'/var/lib/elasticsearch' ELASTICSEARCH_APPLICATION = Application( name=ELASTICSEARCH_APPLICATION_NAME, image=DockerImage.from_string(ELASTICSEARCH_IMAGE), ports=frozenset([ Port(internal_port=ELASTICSEARCH_INTERNAL_PORT, external_port=ELASTICSEARCH_EXTERNAL_PORT), ]), volume=AttachedVolume( manifestation=Manifestation( dataset=Dataset( dataset_id=unicode(uuid4()), metadata=pmap({"name": ELASTICSEARCH_APPLICATION_NAME})), primary=True), mountpoint=FilePath(ELASTICSEARCH_VOLUME_MOUNTPOINT), ), ) LOGSTASH_INTERNAL_PORT = 5000