def test_validate_instance_flavors(self, create_nova_cli_mock): patch.object(create_nova_cli_mock.return_value, 'flavors', new_callable=PropertyMock(return_value=Mock())) mock_flv = create_nova_cli_mock.return_value.flavors.get.return_value mock_flv.ephemeral = 0 test_instances = [{ 'flavor_id': 1, 'volume_size': 10 }, { 'flavor_id': 1, 'volume_size': 1.5, 'region_name': 'home' }, { 'flavor_id': 2, 'volume_size': 3, 'region_name': 'work' }] models.validate_instance_flavors(Mock(), test_instances, True, True) create_nova_cli_mock.assert_has_calls( [call(ANY, None), call(ANY, 'home'), call(ANY, 'work')]) self.assertRaises(exception.LocalStorageNotSpecified, models.validate_instance_flavors, Mock(), test_instances, False, True) mock_flv.ephemeral = 1 models.validate_instance_flavors(Mock(), test_instances, False, True)
def _create_cluster_instances( cls, context, cluster_id, cluster_name, datastore, datastore_version, instances, extended_properties, locality, configuration_id): LOG.debug("Processing a request for new cluster instances.") cassandra_conf = CONF.get(datastore_version.manager) eph_enabled = cassandra_conf.device_path vol_enabled = cassandra_conf.volume_support # Validate instance flavors. models.validate_instance_flavors(context, instances, vol_enabled, eph_enabled) # Compute the total volume allocation. req_volume_size = models.get_required_volume_size(instances, vol_enabled) # Check requirements against quota. num_new_instances = len(instances) deltas = {'instances': num_new_instances, 'volumes': req_volume_size} check_quotas(context.tenant, deltas) # Creating member instances. num_instances = len( CassandraClusterTasks.find_cluster_node_ids(cluster_id)) new_instances = [] for instance_idx, instance in enumerate(instances, num_instances + 1): instance_az = instance.get('availability_zone', None) member_config = {"id": cluster_id, "instance_type": "member", "dc": cls.DEFAULT_DATA_CENTER, "rack": instance_az or cls.DEFAULT_RACK} instance_name = instance.get('name') if not instance_name: instance_name = cls._build_instance_name( cluster_name, member_config['dc'], member_config['rack'], instance_idx) new_instance = inst_models.Instance.create( context, instance_name, instance['flavor_id'], datastore_version.image_id, [], [], datastore, datastore_version, instance['volume_size'], None, nics=instance.get('nics', None), availability_zone=instance_az, configuration_id=configuration_id, cluster_config=member_config, volume_type=instance.get('volume_type', None), modules=instance.get('modules'), locality=locality, region_name=instance.get('region_name')) new_instances.append(new_instance) return new_instances
def test_validate_instance_flavors(self, create_nove_cli_mock): patch.object( create_nove_cli_mock.return_value, 'flavors', new_callable=PropertyMock(return_value=Mock())) mock_flv = create_nove_cli_mock.return_value.flavors.get.return_value mock_flv.ephemeral = 0 test_instances = [{'flavor_id': 1, 'volume_size': 10}, {'flavor_id': 1, 'volume_size': 1.5, 'region_name': 'home'}, {'flavor_id': 2, 'volume_size': 3, 'region_name': 'work'}] models.validate_instance_flavors(Mock(), test_instances, True, True) create_nove_cli_mock.assert_has_calls([call(ANY, None), call(ANY, 'home'), call(ANY, 'work')]) self.assertRaises(exception.LocalStorageNotSpecified, models.validate_instance_flavors, Mock(), test_instances, False, True) mock_flv.ephemeral = 1 models.validate_instance_flavors(Mock(), test_instances, False, True)
def grow(self, instances): """Grow Cluster API endpoint. main function to grow a cluster is here. https://github.com/openstack/trove/blob/master/trove/cluster/service.py#L60-L86 https://github.com/openstack/trove/blob/master/trove/cluster/models.py#L305 """ LOG.debug("Growing cluster. %s", "{}".format(instances)) # 1. validates args if not instances: LOG.error("no instances") return False models.assert_homogeneous_cluster(instances) manager_conf = CONF.get(self.datastore_version.manager) models.validate_instance_flavors(self.context, instances, manager_conf.volume_support, manager_conf.device_path) self.validate_cluster_available() # 2. updates the cluster status self.db_info.update(task_status=ClusterTasks.GROWING_CLUSTER) # 3. creates new instances by using self._create_insts locality = srv_grp.ServerGroup.convert_to_hint(self.server_group) new_insts = self._create_insts(self.context, self.db_info.id, self.db_info.name, self.ds, self.ds_version, instances, locality, self.db_info.configuration_id) # 4. calls the taskmanager's grow_cluster endpoint task_api.load(self.context, self.ds_version.manager).grow_cluster( self.db_info.id, [instance.id for instance in new_insts]) return True
def test_validate_instance_flavors(self, create_nove_cli_mock): patch.object(create_nove_cli_mock.return_value, 'flavors', new_callable=PropertyMock(return_value=Mock())) mock_flv = create_nove_cli_mock.return_value.flavors.get.return_value mock_flv.ephemeral = 0 test_instances = [{ 'flavor_id': 1, 'volume_size': 10 }, { 'flavor_id': 1, 'volume_size': 1.5 }, { 'flavor_id': 2, 'volume_size': 3 }] models.validate_instance_flavors(Mock(), test_instances, True, True) create_nove_cli_mock.assert_called_once_with(ANY) self.assertRaises(exception.LocalStorageNotSpecified, models.validate_instance_flavors, Mock(), test_instances, False, True) mock_flv.ephemeral = 1 models.validate_instance_flavors(Mock(), test_instances, False, True)
def _create_cluster_instances( cls, context, cluster_id, cluster_name, datastore, datastore_version, instances, extended_properties, locality): LOG.debug("Processing a request for new cluster instances.") cassandra_conf = CONF.get(datastore_version.manager) eph_enabled = cassandra_conf.device_path vol_enabled = cassandra_conf.volume_support # Validate instance flavors. models.validate_instance_flavors(context, instances, vol_enabled, eph_enabled) # Compute the total volume allocation. req_volume_size = models.get_required_volume_size(instances, vol_enabled) # Check requirements against quota. num_new_instances = len(instances) deltas = {'instances': num_new_instances, 'volumes': req_volume_size} check_quotas(context.tenant, deltas) # Creating member instances. num_instances = len( CassandraClusterTasks.find_cluster_node_ids(cluster_id)) new_instances = [] for instance_idx, instance in enumerate(instances, num_instances + 1): instance_az = instance.get('availability_zone', None) member_config = {"id": cluster_id, "instance_type": "member", "dc": cls.DEFAULT_DATA_CENTER, "rack": instance_az or cls.DEFAULT_RACK} instance_name = instance.get('name') if not instance_name: instance_name = cls._build_instance_name( cluster_name, member_config['dc'], member_config['rack'], instance_idx) new_instance = inst_models.Instance.create( context, instance_name, instance['flavor_id'], datastore_version.image_id, [], [], datastore, datastore_version, instance['volume_size'], None, nics=instance.get('nics', None), availability_zone=instance_az, configuration_id=None, cluster_config=member_config, modules=instance.get('modules'), region_name=instance.get('region_name'), locality=locality) new_instances.append(new_instance) return new_instances
def _create_instances(context, db_info, datastore, datastore_version, instances, extended_properties, locality): redis_conf = CONF.get(datastore_version.manager) ephemeral_enabled = redis_conf.device_path volume_enabled = redis_conf.volume_support num_instances = len(instances) models.validate_instance_flavors(context, instances, volume_enabled, ephemeral_enabled) total_volume_allocation = models.get_required_volume_size( instances, volume_enabled) models.assert_homogeneous_cluster(instances) models.validate_instance_nics(context, instances) name_index = 1 for instance in instances: if not instance.get('name'): instance['name'] = "%s-member-%s" % (db_info.name, name_index) name_index += 1 # Check quotas quota_request = { 'instances': num_instances, 'volumes': total_volume_allocation } check_quotas(context.project_id, quota_request) # Creating member instances return [ inst_models.Instance.create( context, instance['name'], instance['flavor_id'], datastore_version.image_id, [], [], datastore, datastore_version, instance.get('volume_size'), None, instance.get('availability_zone', None), instance.get('nics', None), configuration_id=None, cluster_config={ "id": db_info.id, "instance_type": "member" }, volume_type=instance.get('volume_type', None), modules=instance.get('modules'), locality=locality, region_name=instance.get('region_name')) for instance in instances ]
def create(cls, context, name, datastore, datastore_version, instances, extended_properties, locality, configuration ): # pylint: disable=too-many-arguments, too-many-locals """Create Clusters API endpoint. main function to create a cluster is here https://github.com/openstack/trove/blob/master/trove/cluster/service.py#L162-L234 """ # 1. validates args if context is None: LOG.error("no context") return None if name is None: LOG.error("no name") return None if datastore is None: LOG.error("no datastore") return None if datastore_version is None: LOG.error("no datastore_version") return None if instances is None: LOG.error("no instances") return None models.assert_homogeneous_cluster(instances) manager_conf = CONF.get(datastore_version.manager) models.validate_instance_flavors(context, instances, manager_conf.volume_support, manager_conf.device_path) models.validate_instance_nics(context, instances) # 2. Insert a cluster data to clusters table db_info = models.DBCluster.create( name=name, tenant_id=context.project_id, datastore_version_id=datastore_version.id, configuration_id=configuration, task_status=ClusterTasks.BUILDING_INITIAL) # 3. Create instances in OpenStack cls._create_insts(context, db_info.id, db_info.name, datastore, datastore_version, instances, locality, configuration) # 4. Calling taskmanager to further proceed for cluster-configuration LOG.debug( "Calling taskmanager to further proceed for " "cluster-configuration of %s", db_info.id) task_api.load(context, datastore_version.manager).create_cluster(db_info.id) # 5. Returns cluster instance to render HTTP response. return K2hdkcCluster(context, db_info, datastore, datastore_version)
def _create_instances(context, db_info, datastore, datastore_version, instances, extended_properties, locality, new_cluster=True): vertica_conf = CONF.get(datastore_version.manager) num_instances = len(instances) existing = inst_models.DBInstance.find_all(cluster_id=db_info.id).all() num_existing = len(existing) # Matching number of instances with configured cluster_member_count if (new_cluster and num_instances != vertica_conf.cluster_member_count): raise exception.ClusterNumInstancesNotSupported( num_instances=vertica_conf.cluster_member_count) models.validate_instance_flavors( context, instances, vertica_conf.volume_support, vertica_conf.device_path) req_volume_size = models.get_required_volume_size( instances, vertica_conf.volume_support) models.assert_homogeneous_cluster(instances) deltas = {'instances': num_instances, 'volumes': req_volume_size} check_quotas(context.tenant, deltas) flavor_id = instances[0]['flavor_id'] volume_size = instances[0].get('volume_size', None) nics = [instance.get('nics', None) for instance in instances] azs = [instance.get('availability_zone', None) for instance in instances] # Creating member instances minstances = [] for i in range(0, num_instances): if i == 0 and new_cluster: member_config = {"id": db_info.id, "instance_type": "master"} else: member_config = {"id": db_info.id, "instance_type": "member"} instance_name = "%s-member-%s" % (db_info.name, str(i + num_existing + 1)) minstances.append( inst_models.Instance.create( context, instance_name, flavor_id, datastore_version.image_id, [], [], datastore, datastore_version, volume_size, None, nics=nics[i], availability_zone=azs[i], configuration_id=None, cluster_config=member_config, locality=locality, modules=instances[i].get('modules')) ) return minstances
def _validate_cluster_instances(context, instances, datastore, datastore_version): """Validate the flavor and volume""" ds_conf = CONF.get(datastore_version.manager) num_instances = len(instances) # Check number of instances is at least min_cluster_member_count if num_instances < ds_conf.min_cluster_member_count: raise exception.ClusterNumInstancesNotLargeEnough( num_instances=ds_conf.min_cluster_member_count) # Checking volumes and get delta for quota check cluster_models.validate_instance_flavors(context, instances, ds_conf.volume_support, ds_conf.device_path) req_volume_size = cluster_models.get_required_volume_size( instances, ds_conf.volume_support) cluster_models.assert_homogeneous_cluster(instances) deltas = {'instances': num_instances, 'volumes': req_volume_size} # quota check check_quotas(context.tenant, deltas) # Checking networks are same for the cluster instance_nics = [] for instance in instances: nics = instance.get('nics') if nics: instance_nics.append(nics[0].get('net-id')) if len(set(instance_nics)) > 1: raise exception.ClusterNetworksNotEqual() if not instance_nics: return instance_nic = instance_nics[0] try: nova_client = remote.create_nova_client(context) nova_client.networks.get(instance_nic) except nova_exceptions.NotFound: raise exception.NetworkNotFound(uuid=instance_nic)
def _validate_cluster_instances(context, instances, datastore, datastore_version): """Validate the flavor and volume""" ds_conf = CONF.get(datastore_version.manager) num_instances = len(instances) # Check number of instances is at least min_cluster_member_count if num_instances < ds_conf.min_cluster_member_count: raise exception.ClusterNumInstancesNotLargeEnough( num_instances=ds_conf.min_cluster_member_count) # Checking volumes and get delta for quota check cluster_models.validate_instance_flavors( context, instances, ds_conf.volume_support, ds_conf.device_path) req_volume_size = cluster_models.get_required_volume_size( instances, ds_conf.volume_support) cluster_models.assert_homogeneous_cluster(instances) deltas = {'instances': num_instances, 'volumes': req_volume_size} # quota check check_quotas(context.tenant, deltas) # Checking networks are same for the cluster instance_nics = [] for instance in instances: nics = instance.get('nics') if nics: instance_nics.append(nics[0].get('net-id')) if len(set(instance_nics)) > 1: raise exception.ClusterNetworksNotEqual() if not instance_nics: return instance_nic = instance_nics[0] try: nova_client = remote.create_nova_client(context) nova_client.networks.get(instance_nic) except nova_exceptions.NotFound: raise exception.NetworkNotFound(uuid=instance_nic)
def _validate_cluster_instances(context, instances, datastore, datastore_version): """Validate the flavor and volume""" ds_conf = CONF.get(datastore_version.manager) num_instances = len(instances) # Checking volumes and get delta for quota check cluster_models.validate_instance_flavors(context, instances, ds_conf.volume_support, ds_conf.device_path) req_volume_size = cluster_models.get_required_volume_size( instances, ds_conf.volume_support) cluster_models.assert_homogeneous_cluster(instances) deltas = {'instances': num_instances, 'volumes': req_volume_size} # quota check check_quotas(context.tenant, deltas) # Checking networks are same for the cluster cluster_models.validate_instance_nics(context, instances)
def _create_cluster_instances( cls, context, cluster_id, cluster_name, datastore, datastore_version, instances, extended_properties, locality): LOG.debug("Processing a request for new cluster instances.") cluster_node_ids = CouchbaseClusterTasks.find_cluster_node_ids( cluster_id) cluster_password = None # Couchbase imposes cluster wide quota on the memory that get # evenly distributed between node services. # All nodes (including future nodes) need to be able to accommodate # this quota. # We therefore require the cluster to be homogeneous. # Load the flavor and volume information from the existing instances # if any. # Generate the administrative password for a new cluster or reuse the # one from an existing cluster. required_instance_flavor = None required_volume_size = None for_grow = False if cluster_node_ids: cluster_nodes = CouchbaseClusterTasks.load_cluster_nodes( context, cluster_node_ids) coordinator = cluster_nodes[0] required_instance_flavor = coordinator['instance'].flavor_id required_volume_size = coordinator['instance'].volume_size cluster_password = coordinator['guest'].get_cluster_password() for_grow = True else: pwd_len = min(cls.MAX_PASSWORD_LEN, CONF.default_password_length) cluster_password = utils.generate_random_password(pwd_len) models.assert_homogeneous_cluster( instances, required_flavor=required_instance_flavor, required_volume_size=required_volume_size) couchbase_conf = CONF.get(datastore_version.manager) eph_enabled = couchbase_conf.device_path vol_enabled = couchbase_conf.volume_support # Validate instance flavors. models.validate_instance_flavors(context, instances, vol_enabled, eph_enabled) # Compute the total volume allocation. req_volume_size = models.get_required_volume_size(instances, vol_enabled) # Check requirements against quota. num_new_instances = len(instances) deltas = {'instances': num_new_instances, 'volumes': req_volume_size} check_quotas(context.tenant, deltas) instance_types = cls.get_instance_types( instances, datastore_version.manager, for_grow) # Creating member instances. num_instances = len(cluster_node_ids) new_instances = [] for new_inst_idx, instance in enumerate(instances): instance_idx = new_inst_idx + num_instances + 1 instance_az = instance.get('availability_zone', None) instance_type = instance_types[new_inst_idx] cluster_config = {"id": cluster_id, "instance_type": instance_type, "cluster_password": cluster_password} instance_name = instance.get('name') if not instance_name: instance_name = cls._build_instance_name( cluster_name, sorted(instance_type), instance_az, instance_idx) new_instance = inst_models.Instance.create( context, instance_name, instance['flavor_id'], datastore_version.image_id, [], [], datastore, datastore_version, instance.get('volume_size'), None, nics=instance.get('nics', None), availability_zone=instance_az, configuration_id=None, cluster_config=cluster_config, region_name=instance.get('region_name'), locality=locality) new_instances.append(new_instance) return new_instances
def create(cls, context, name, datastore, datastore_version, instances, extended_properties, locality, configuration): if configuration: raise exception.ConfigurationNotSupported() # TODO(amcreynolds): consider moving into CONF and even supporting # TODO(amcreynolds): an array of values, e.g. [3, 5, 7] # TODO(amcreynolds): or introduce a min/max num_instances and set # TODO(amcreynolds): both to 3 num_instances = len(instances) if num_instances != 3: raise exception.ClusterNumInstancesNotSupported(num_instances=3) tidb_conf = CONF.get(datastore_version.manager) num_tidbsvr = int(extended_properties.get( 'num_tidbsvr', tidb_conf.num_tidb_servers_per_cluster)) num_pdsvr = int(extended_properties.get( 'num_pdsvr', tidb_conf.num_pd_servers_per_cluster)) delta_instances = num_instances + num_tidbsvr + num_pdsvr models.validate_instance_flavors( context, instances, tidb_conf.volume_support, tidb_conf.device_path) models.assert_homogeneous_cluster(instances) flavor_id = instances[0]['flavor_id'] volume_size = instances[0].get('volume_size', None) volume_type = instances[0].get('volume_type', None) nics = instances[0].get('nics', None) azs = [instance.get('availability_zone', None) for instance in instances] regions = [instance.get('region_name', None) for instance in instances] db_info = models.DBCluster.create( name=name, tenant_id=context.tenant, datastore_version_id=datastore_version.id, task_status=ClusterTasks.BUILDING_INITIAL) replica_set_name = "rs1" tikv_config = {"id": db_info.id, "shard_id": utils.generate_uuid(), "instance_type": "tikv", "replica_set_name": replica_set_name} tidbsvr_config = {"id": db_info.id, "instance_type": "tidb_server"} pdsvr_config = {"id": db_info.id, "instance_type": "pd_server"} for i in range(1, pdsvr_config + 1): instance_name = "%s-%s-%s" % (name, "tidb", str(i)) inst_models.Instance.create(context, instance_name, flavor_id, datastore_version.image_id, [], [], datastore, datastore_version, volume_size, None, availability_zone=None, nics=nics, configuration_id=None, cluster_config=pdsvr_config, volume_type=volume_type, locality=locality, region_name=regions[i % num_instances] ) for i in range(1, num_tidbsvr + 1): instance_name = "%s-%s-%s" % (name, "configsvr", str(i)) inst_models.Instance.create(context, instance_name, flavor_id, datastore_version.image_id, [], [], datastore, datastore_version, volume_size, None, availability_zone=None, nics=nics, configuration_id=None, cluster_config=tidbsvr_config, volume_type=volume_type, locality=locality, region_name=regions[i % num_instances] ) for i in range(0, num_instances): instance_name = "%s-%s-%s" % (name, replica_set_name, str(i + 1)) inst_models.Instance.create(context, instance_name, flavor_id, datastore_version.image_id, [], [], datastore, datastore_version, volume_size, None, availability_zone=azs[i], nics=nics, configuration_id=None, cluster_config=tikv_config, volume_type=volume_type, modules=instances[i].get('modules'), locality=locality, region_name=regions[i]) task_api.load(context, datastore_version.manager).create_cluster( db_info.id) return TiDbCluster(context, db_info, datastore, datastore_version)
def create(cls, context, name, datastore, datastore_version, instances, extended_properties, locality, configuration): if configuration: raise exception.ConfigurationNotSupported() # TODO(amcreynolds): consider moving into CONF and even supporting # TODO(amcreynolds): an array of values, e.g. [3, 5, 7] # TODO(amcreynolds): or introduce a min/max num_instances and set # TODO(amcreynolds): both to 3 num_instances = len(instances) if num_instances != 3: raise exception.ClusterNumInstancesNotSupported(num_instances=3) mongo_conf = CONF.get(datastore_version.manager) num_configsvr = mongo_conf.num_config_servers_per_cluster num_mongos = mongo_conf.num_query_routers_per_cluster delta_instances = num_instances + num_configsvr + num_mongos models.validate_instance_flavors(context, instances, mongo_conf.volume_support, mongo_conf.device_path) models.assert_homogeneous_cluster(instances) req_volume_size = models.get_required_volume_size( instances, mongo_conf.volume_support) deltas = {'instances': delta_instances, 'volumes': req_volume_size} check_quotas(context.tenant, deltas) flavor_id = instances[0]['flavor_id'] volume_size = instances[0].get('volume_size', None) nics = [instance.get('nics', None) for instance in instances] azs = [ instance.get('availability_zone', None) for instance in instances ] regions = [instance.get('region_name', None) for instance in instances] db_info = models.DBCluster.create( name=name, tenant_id=context.tenant, datastore_version_id=datastore_version.id, task_status=ClusterTasks.BUILDING_INITIAL) replica_set_name = "rs1" member_config = { "id": db_info.id, "shard_id": utils.generate_uuid(), "instance_type": "member", "replica_set_name": replica_set_name } configsvr_config = {"id": db_info.id, "instance_type": "config_server"} mongos_config = {"id": db_info.id, "instance_type": "query_router"} if mongo_conf.cluster_secure: cluster_key = utils.generate_random_password() member_config['key'] = cluster_key configsvr_config['key'] = cluster_key mongos_config['key'] = cluster_key for i in range(0, num_instances): instance_name = "%s-%s-%s" % (name, replica_set_name, str(i + 1)) inst_models.Instance.create(context, instance_name, flavor_id, datastore_version.image_id, [], [], datastore, datastore_version, volume_size, None, availability_zone=azs[i], nics=nics[i], configuration_id=None, cluster_config=member_config, modules=instances[i].get('modules'), locality=locality, region_name=regions[i]) for i in range(1, num_configsvr + 1): instance_name = "%s-%s-%s" % (name, "configsvr", str(i)) inst_models.Instance.create(context, instance_name, flavor_id, datastore_version.image_id, [], [], datastore, datastore_version, volume_size, None, availability_zone=None, nics=None, configuration_id=None, cluster_config=configsvr_config, locality=locality, region_name=regions[i]) for i in range(1, num_mongos + 1): instance_name = "%s-%s-%s" % (name, "mongos", str(i)) inst_models.Instance.create(context, instance_name, flavor_id, datastore_version.image_id, [], [], datastore, datastore_version, volume_size, None, availability_zone=None, nics=None, configuration_id=None, cluster_config=mongos_config, locality=locality, region_name=regions[i]) task_api.load(context, datastore_version.manager).create_cluster(db_info.id) return MongoDbCluster(context, db_info, datastore, datastore_version)
def create(cls, context, name, datastore, datastore_version, instances, extended_properties, locality, configuration): if configuration: raise exception.ConfigurationNotSupported() # TODO(amcreynolds): consider moving into CONF and even supporting # TODO(amcreynolds): an array of values, e.g. [3, 5, 7] # TODO(amcreynolds): or introduce a min/max num_instances and set # TODO(amcreynolds): both to 3 num_instances = len(instances) if num_instances != 3: raise exception.ClusterNumInstancesNotSupported(num_instances=3) mongo_conf = CONF.get(datastore_version.manager) num_configsvr = (1 if mongo_conf.num_config_servers_per_cluster == 1 else 3) num_mongos = mongo_conf.num_query_routers_per_cluster delta_instances = num_instances + num_configsvr + num_mongos models.validate_instance_flavors( context, instances, mongo_conf.volume_support, mongo_conf.device_path) models.assert_homogeneous_cluster(instances) req_volume_size = models.get_required_volume_size( instances, mongo_conf.volume_support) deltas = {'instances': delta_instances, 'volumes': req_volume_size} check_quotas(context.tenant, deltas) flavor_id = instances[0]['flavor_id'] volume_size = instances[0].get('volume_size', None) nics = [instance.get('nics', None) for instance in instances] nic = nics[0] for n in nics[1:]: if n != nic: raise ValueError(_('All cluster nics must be the same. ' '%(nic)s != %(n)s') % {'nic': nic, 'n': n}) azs = [instance.get('availability_zone', None) for instance in instances] regions = [instance.get('region_name', None) for instance in instances] db_info = models.DBCluster.create( name=name, tenant_id=context.tenant, datastore_version_id=datastore_version.id, task_status=ClusterTasks.BUILDING_INITIAL) replica_set_name = "rs1" member_config = {"id": db_info.id, "shard_id": utils.generate_uuid(), "instance_type": "member", "replica_set_name": replica_set_name} configsvr_config = {"id": db_info.id, "instance_type": "config_server"} mongos_config = {"id": db_info.id, "instance_type": "query_router"} if mongo_conf.cluster_secure: cluster_key = base64.b64encode(utils.generate_random_password()) member_config['key'] = cluster_key configsvr_config['key'] = cluster_key mongos_config['key'] = cluster_key for i in range(num_instances): instance_name = "%s-%s-%s" % (name, replica_set_name, str(i + 1)) inst_models.Instance.create(context, instance_name, flavor_id, datastore_version.image_id, [], [], datastore, datastore_version, volume_size, None, availability_zone=azs[i], nics=nic, configuration_id=None, cluster_config=member_config, modules=instances[i].get('modules'), locality=locality, region_name=regions[i]) for i in range(num_configsvr): instance_name = "%s-%s-%s" % (name, "configsvr", str(i + 1)) inst_models.Instance.create(context, instance_name, flavor_id, datastore_version.image_id, [], [], datastore, datastore_version, volume_size, None, availability_zone=azs[i % num_instances], nics=nic, configuration_id=None, cluster_config=configsvr_config, locality=locality, region_name=regions[i]) for i in range(num_mongos): instance_name = "%s-%s-%s" % (name, "mongos", str(i + 1)) inst_models.Instance.create(context, instance_name, flavor_id, datastore_version.image_id, [], [], datastore, datastore_version, volume_size, None, availability_zone=azs[i % num_instances], nics=nic, configuration_id=None, cluster_config=mongos_config, locality=locality, region_name=regions[i]) task_api.load(context, datastore_version.manager).create_cluster( db_info.id) return MongoDbCluster(context, db_info, datastore, datastore_version)
def _create_cluster_instances(cls, context, cluster_id, cluster_name, datastore, datastore_version, instances, extended_properties, locality): LOG.debug("Processing a request for new cluster instances.") cluster_node_ids = CouchbaseClusterTasks.find_cluster_node_ids( cluster_id) cluster_password = None # Couchbase imposes cluster wide quota on the memory that get # evenly distributed between node services. # All nodes (including future nodes) need to be able to accommodate # this quota. # We therefore require the cluster to be homogeneous. # Load the flavor and volume information from the existing instances # if any. # Generate the administrative password for a new cluster or reuse the # one from an existing cluster. required_instance_flavor = None required_volume_size = None if cluster_node_ids: cluster_nodes = CouchbaseClusterTasks.load_cluster_nodes( context, cluster_node_ids) coordinator = cluster_nodes[0] required_instance_flavor = coordinator['instance'].flavor_id required_volume_size = coordinator['instance'].volume_size cluster_password = coordinator['guest'].get_cluster_password() else: pwd_len = min(cls.MAX_PASSWORD_LEN, CONF.default_password_length) cluster_password = utils.generate_random_password(pwd_len) models.assert_homogeneous_cluster( instances, required_flavor=required_instance_flavor, required_volume_size=required_volume_size) couchbase_conf = CONF.get(datastore_version.manager) eph_enabled = couchbase_conf.device_path vol_enabled = couchbase_conf.volume_support # Validate instance flavors. models.validate_instance_flavors(context, instances, vol_enabled, eph_enabled) # Compute the total volume allocation. req_volume_size = models.get_required_volume_size( instances, vol_enabled) # Check requirements against quota. num_new_instances = len(instances) deltas = {'instances': num_new_instances, 'volumes': req_volume_size} check_quotas(context.tenant, deltas) # Creating member instances. num_instances = len(cluster_node_ids) new_instances = [] for instance_idx, instance in enumerate(instances, num_instances + 1): instance_az = instance.get('availability_zone', None) member_config = { "id": cluster_id, "instance_type": "member", "cluster_password": cluster_password } instance_name = instance.get('name') if not instance_name: instance_name = cls._build_instance_name( cluster_name, cls.DEFAULT_SERVICES, instance_az, instance_idx) new_instance = inst_models.Instance.create( context, instance_name, instance['flavor_id'], datastore_version.image_id, [], [], datastore, datastore_version, instance['volume_size'], None, nics=instance.get('nics', None), availability_zone=instance_az, configuration_id=None, cluster_config=member_config, region_name=instance.get('region_name'), locality=locality) new_instances.append(new_instance) return new_instances