def test_load_api(self, get_client_mock): context = Mock() manager = 'mongodb' self.assertIsInstance(task_api.load(context), task_api.API) self.assertIsInstance(task_api.load(context, manager), MongoDbTaskManagerAPI)
def create(cls, context, name, datastore, datastore_version, instances, extended_properties, locality): LOG.debug("Initiating cluster creation.") vertica_conf = CONF.get(datastore_version.manager) num_instances = len(instances) # Matching number of instances with configured cluster_member_count if num_instances != vertica_conf.cluster_member_count: raise exception.ClusterNumInstancesNotSupported( num_instances=vertica_conf.cluster_member_count) db_info = models.DBCluster.create( name=name, tenant_id=context.tenant, datastore_version_id=datastore_version.id, task_status=ClusterTasks.BUILDING_INITIAL) cls._create_instances(context, db_info, datastore, datastore_version, instances, extended_properties, locality, new_cluster=True) # Calling taskmanager to further proceed for cluster-configuration task_api.load(context, datastore_version.manager).create_cluster( db_info.id) return VerticaCluster(context, db_info, datastore, datastore_version)
def test_load_api(self): context = Mock() manager = 'mongodb' self.assertTrue(isinstance(task_api.load(context), task_api.API)) self.assertTrue(isinstance(task_api.load(context, manager), MongoDbTaskManagerAPI))
def shrink(self, instance_ids): self.validate_cluster_available() context = self.context db_info = self.db_info datastore_version = self.ds_version for db_instance in self.db_instances: if db_instance.type == 'master': if db_instance.id in instance_ids: raise exception.ClusterShrinkInstanceInUse( id=db_instance.id, reason="Cannot remove master node." ) all_instance_ids = [db_instance.id for db_instance in self.db_instances] left_instances = [instance_id for instance_id in all_instance_ids if instance_id not in instance_ids] k = self.k_safety(len(left_instances)) vertica_conf = CONF.get(datastore_version.manager) if k < vertica_conf.min_ksafety: raise exception.ClusterNumInstancesBelowSafetyThreshold() db_info.update(task_status=ClusterTasks.SHRINKING_CLUSTER) task_api.load(context, datastore_version.manager).shrink_cluster( self.db_info.id, instance_ids) return VerticaCluster(self.context, db_info, self.ds, self.ds_version)
def grow(self, instances): LOG.debug("Growing cluster %s." % self.id) self.validate_cluster_available() context = self.context db_info = self.db_info datastore = self.ds datastore_version = self.ds_version db_info.update(task_status=ClusterTasks.GROWING_CLUSTER) try: locality = srv_grp.ServerGroup.convert_to_hint(self.server_group) configuration_id = self.db_info.configuration_id new_instances = self._create_instances( context, db_info, datastore, datastore_version, instances, None, locality, configuration_id) task_api.load(context, datastore_version.manager).grow_cluster( db_info.id, [instance.id for instance in new_instances]) except Exception: db_info.update(task_status=ClusterTasks.NONE) raise return self.__class__(context, db_info, datastore, datastore_version)
def grow(self, instances): LOG.debug("Growing cluster %s." % self.id) self.validate_cluster_available() context = self.context db_info = self.db_info datastore = self.ds datastore_version = self.ds_version db_info.update(task_status=ClusterTasks.GROWING_CLUSTER) try: # Get the network of the existing cluster instances. interface_ids = self._get_cluster_network_interfaces() for instance in instances: instance["nics"] = interface_ids locality = srv_grp.ServerGroup.convert_to_hint(self.server_group) new_instances = self._create_instances( context, db_info, datastore, datastore_version, instances, None, locality) task_api.load(context, datastore_version.manager).grow_cluster( db_info.id, [instance.id for instance in new_instances]) except Exception: db_info.update(task_status=ClusterTasks.NONE) return self.__class__(context, db_info, datastore, datastore_version)
def shrink(self, removal_ids): LOG.debug("Processing a request for shrinking cluster: %s" % self.id) self.validate_cluster_available() context = self.context db_info = self.db_info datastore = self.ds datastore_version = self.ds_version # we have to make sure that there are nodes with all the required # services left after the shrink remaining_instance_services = self._get_remaining_instance_services( db_info.id, removal_ids) try: self.validate_instance_types( remaining_instance_services, datastore_version.manager) except exception.ClusterInstanceTypeMissing as ex: raise exception.TroveError( _("The remaining instances would not be valid: %s") % str(ex)) db_info.update(task_status=ClusterTasks.SHRINKING_CLUSTER) task_api.load(context, datastore_version.manager).shrink_cluster( db_info.id, removal_ids) return CouchbaseCluster(context, db_info, datastore, datastore_version)
def create(cls, context, name, datastore, datastore_version, instances, extended_properties, locality, configuration): LOG.debug("Processing a request for creating a new cluster.") if configuration: raise exception.ConfigurationNotSupported() cls.validate_instance_types(instances, datastore_version.manager, for_grow=False) # Updating Cluster Task. db_info = models.DBCluster.create( name=name, tenant_id=context.tenant, datastore_version_id=datastore_version.id, task_status=ClusterTasks.BUILDING_INITIAL) cls._create_cluster_instances( context, db_info.id, db_info.name, datastore, datastore_version, instances, extended_properties, locality) # Calling taskmanager to further proceed for cluster-configuration. task_api.load(context, datastore_version.manager).create_cluster( db_info.id) return CouchbaseCluster(context, db_info, datastore, datastore_version)
def test_load_api(self, get_client_mock): context = trove_testtools.TroveTestContext(self) manager = 'mongodb' self.assertTrue(isinstance(task_api.load(context), task_api.API)) self.assertTrue(isinstance(task_api.load(context, manager), MongoDbTaskManagerAPI))
def rolling_restart(self): self.validate_cluster_available() self.db_info.update(task_status=ClusterTasks.RESTARTING_CLUSTER) try: cluster_id = self.db_info.id task_api.load(self.context, self.ds_version.manager ).restart_cluster(cluster_id) except Exception: self.db_info.update(task_status=ClusterTasks.NONE) raise return self.__class__(self.context, self.db_info, self.ds, self.ds_version)
def add_shard(self): if self.db_info.task_status != ClusterTasks.NONE: current_task = self.db_info.task_status.name msg = _("This action cannot be performed on the cluster while " "the current cluster task is '%s'.") % current_task LOG.error(msg) raise exception.UnprocessableEntity(msg) db_insts = inst_models.DBInstance.find_all(cluster_id=self.id, type='member').all() num_unique_shards = len(set([db_inst.shard_id for db_inst in db_insts])) arbitrary_shard_id = db_insts[0].shard_id members_in_shard = [db_inst for db_inst in db_insts if db_inst.shard_id == arbitrary_shard_id] num_members_per_shard = len(members_in_shard) a_member = inst_models.load_any_instance(self.context, members_in_shard[0].id) deltas = {'instances': num_members_per_shard} volume_size = a_member.volume_size if volume_size: deltas['volumes'] = volume_size * num_members_per_shard check_quotas(self.context.tenant, deltas) new_replica_set_name = "rs" + str(num_unique_shards + 1) new_shard_id = utils.generate_uuid() member_config = {"id": self.id, "shard_id": new_shard_id, "instance_type": "member", "replica_set_name": new_replica_set_name} for i in range(1, num_members_per_shard + 1): instance_name = "%s-%s-%s" % (self.name, new_replica_set_name, str(i)) inst_models.Instance.create(self.context, instance_name, a_member.flavor_id, a_member.datastore_version.image_id, [], [], a_member.datastore, a_member.datastore_version, volume_size, None, availability_zone=None, nics=None, configuration_id=None, cluster_config=member_config) self.update_db(task_status=ClusterTasks.ADDING_SHARD) manager = (datastore_models.DatastoreVersion. load_by_uuid(db_insts[0].datastore_version_id).manager) task_api.load(self.context, manager).mongodb_add_shard_cluster( self.id, new_shard_id, new_replica_set_name)
def shrink(self, instances): """Removes instances from a cluster.""" LOG.debug("Shrinking cluster %s." % self.id) self.validate_cluster_available() removal_instances = [Instance.load(self.context, inst_id) for inst_id in instances] db_instances = DBInstance.find_all(cluster_id=self.db_info.id).all() if len(db_instances) - len(removal_instances) < 1: raise exception.ClusterShrinkMustNotLeaveClusterEmpty() self.db_info.update(task_status=ClusterTasks.SHRINKING_CLUSTER) task_api.load(self.context, self.ds_version.manager).shrink_cluster( self.db_info.id, [instance.id for instance in removal_instances] ) return PXCCluster(self.context, self.db_info, self.ds, self.ds_version)
def shrink(self, removal_ids): LOG.debug("Processing a request for shrinking cluster: %s" % self.id) self.validate_cluster_available() context = self.context db_info = self.db_info datastore = self.ds datastore_version = self.ds_version db_info.update(task_status=ClusterTasks.SHRINKING_CLUSTER) task_api.load(context, datastore_version.manager).shrink_cluster( db_info.id, removal_ids) return CouchbaseCluster(context, db_info, datastore, datastore_version)
def create(cls, context, name, datastore, datastore_version, instances, extended_properties): LOG.debug("Initiating PXC cluster creation.") cls._validate_cluster_instances(context, instances, datastore, datastore_version) # Updating Cluster Task db_info = models.DBCluster.create( name=name, tenant_id=context.tenant, datastore_version_id=datastore_version.id, task_status=ClusterTasks.BUILDING_INITIAL, ) cls._create_instances(context, db_info, datastore, datastore_version, instances) # Calling taskmanager to further proceed for cluster-configuration task_api.load(context, datastore_version.manager).create_cluster(db_info.id) return PXCCluster(context, db_info, datastore, datastore_version)
def rolling_upgrade(self, datastore_version): """Upgrades a cluster to a new datastore version.""" LOG.debug("Upgrading cluster %s." % self.id) self.validate_cluster_available() self.db_info.update(task_status=ClusterTasks.UPGRADING_CLUSTER) try: cluster_id = self.db_info.id ds_ver_id = datastore_version.id task_api.load(self.context, self.ds_version.manager ).upgrade_cluster(cluster_id, ds_ver_id) except Exception: self.db_info.update(task_status=ClusterTasks.NONE) raise return self.__class__(self.context, self.db_info, self.ds, self.ds_version)
def create(cls, context, name, datastore, datastore_version, instances, extended_properties): LOG.debug("Processing a request for creating a new cluster.") # Updating Cluster Task. db_info = models.DBCluster.create( name=name, tenant_id=context.tenant, datastore_version_id=datastore_version.id, task_status=ClusterTasks.BUILDING_INITIAL) cls._create_cluster_instances( context, db_info.id, db_info.name, datastore, datastore_version, instances, extended_properties) # Calling taskmanager to further proceed for cluster-configuration. task_api.load(context, datastore_version.manager).create_cluster( db_info.id) return CouchbaseCluster(context, db_info, datastore, datastore_version)
def create(cls, context, name, datastore, datastore_version, instances, extended_properties, locality): LOG.debug("Processing a request for creating a new cluster.") # Updating Cluster Task. db_info = models.DBCluster.create( name=name, tenant_id=context.tenant, datastore_version_id=datastore_version.id, task_status=ClusterTasks.BUILDING_INITIAL) cls._create_cluster_instances(context, db_info.id, db_info.name, datastore, datastore_version, instances, extended_properties, locality) # Calling taskmanager to further proceed for cluster-configuration. task_api.load(context, datastore_version.manager).create_cluster(db_info.id) return CassandraCluster(context, db_info, datastore, datastore_version)
def grow(self, instances): LOG.debug("Growing cluster.") self.validate_cluster_available() context = self.context db_info = self.db_info datastore = self.ds datastore_version = self.ds_version db_info.update(task_status=ClusterTasks.GROWING_CLUSTER) new_instances = self._create_instances(context, db_info, datastore, datastore_version, instances, new_cluster=False) task_api.load(context, datastore_version.manager).grow_cluster( db_info.id, [instance.id for instance in new_instances]) return VerticaCluster(context, db_info, datastore, datastore_version)
def create(cls, context, name, datastore, datastore_version, instances, extended_properties, locality): LOG.debug("Initiating Galera cluster creation.") cls._validate_cluster_instances(context, instances, datastore, datastore_version) # Updating Cluster Task db_info = cluster_models.DBCluster.create( name=name, tenant_id=context.tenant, datastore_version_id=datastore_version.id, task_status=ClusterTasks.BUILDING_INITIAL) cls._create_instances(context, db_info, datastore, datastore_version, instances, extended_properties, locality) # Calling taskmanager to further proceed for cluster-configuration task_api.load(context, datastore_version.manager).create_cluster(db_info.id) return cls(context, db_info, datastore, datastore_version)
def grow(self, instances): LOG.debug("Processing a request for growing cluster: %s" % self.id) self.validate_cluster_available() context = self.context db_info = self.db_info datastore = self.ds datastore_version = self.ds_version db_info.update(task_status=ClusterTasks.GROWING_CLUSTER) new_instances = self._create_cluster_instances( context, db_info.id, db_info.name, datastore, datastore_version, instances) task_api.load(context, datastore_version.manager).grow_cluster( db_info.id, [instance.id for instance in new_instances]) return CouchbaseCluster(context, db_info, datastore, datastore_version)
def grow(self, instances): LOG.debug("Processing a request for growing cluster: %s" % self.id) self.validate_cluster_available() context = self.context db_info = self.db_info datastore = self.ds datastore_version = self.ds_version db_info.update(task_status=ClusterTasks.GROWING_CLUSTER) new_instances = self._create_cluster_instances(context, db_info.id, db_info.name, datastore, datastore_version, instances) task_api.load(context, datastore_version.manager).grow_cluster( db_info.id, [instance.id for instance in new_instances]) return CassandraCluster(context, db_info, datastore, datastore_version)
def grow(self, instances): LOG.debug("Growing cluster.") self.validate_cluster_available() context = self.context db_info = self.db_info datastore = self.ds datastore_version = self.ds_version db_info.update(task_status=ClusterTasks.GROWING_CLUSTER) locality = srv_grp.ServerGroup.convert_to_hint(self.server_group) new_instances = self._create_instances(context, db_info, datastore, datastore_version, instances, None, locality) task_api.load(context, datastore_version.manager).grow_cluster( db_info.id, [instance.id for instance in new_instances]) return RedisCluster(context, db_info, datastore, datastore_version)
def shrink(self, instances): """Removes instances from a cluster.""" LOG.debug("Shrinking cluster %s." % self.id) self.validate_cluster_available() removal_instances = [Instance.load(self.context, inst_id) for inst_id in instances] db_instances = DBInstance.find_all(cluster_id=self.db_info.id).all() if len(db_instances) - len(removal_instances) < 1: raise exception.ClusterShrinkMustNotLeaveClusterEmpty() self.db_info.update(task_status=ClusterTasks.SHRINKING_CLUSTER) try: task_api.load(self.context, self.ds_version.manager ).shrink_cluster(self.db_info.id, [instance.id for instance in removal_instances]) except Exception: self.db_info.update(task_status=ClusterTasks.NONE) return self.__class__(self.context, self.db_info, self.ds, self.ds_version)
def create(cls, context, name, datastore, datastore_version, instances, extended_properties, locality, configuration): LOG.debug("Initiating cluster creation.") if configuration: raise exception.ConfigurationNotSupported() # Updating Cluster Task db_info = models.DBCluster.create( name=name, tenant_id=context.tenant, datastore_version_id=datastore_version.id, task_status=ClusterTasks.BUILDING_INITIAL) cls._create_instances(context, db_info, datastore, datastore_version, instances, extended_properties, locality) # Calling taskmanager to further proceed for cluster-configuration task_api.load(context, datastore_version.manager).create_cluster( db_info.id) return RedisCluster(context, db_info, datastore, datastore_version)
def shrink(self, removal_ids): # pylint: disable=arguments-differ """Shrink Cluster API endpoint. main function to shrink a cluster is here. https://github.com/openstack/trove/blob/master/trove/cluster/service.py#L60-L86 https://github.com/openstack/trove/blob/master/trove/cluster/models.py#L305 """ LOG.debug("Shrinking cluster {} {}".format(self.id, removal_ids)) # 1. validates args if not removal_ids: LOG.error("no removal_ids") return False self.validate_cluster_available() # 2. updates the cluster status self.db_info.update(task_status=ClusterTasks.SHRINKING_CLUSTER) # 3. calls the taskmanager's grow_cluster endpoint task_api.load(self.context, self.ds_version.manager).shrink_cluster( self.db_info.id, removal_ids) return True
def grow(self, instances): LOG.debug("Processing a request for growing cluster: %s" % self.id) self.validate_cluster_available() context = self.context db_info = self.db_info datastore = self.ds datastore_version = self.ds_version db_info.update(task_status=ClusterTasks.GROWING_CLUSTER) locality = srv_grp.ServerGroup.convert_to_hint(self.server_group) configuration_id = self.db_info.configuration_id new_instances = self._create_cluster_instances( context, db_info.id, db_info.name, datastore, datastore_version, instances, None, locality, configuration_id) task_api.load(context, datastore_version.manager).grow_cluster( db_info.id, [instance.id for instance in new_instances]) return CassandraCluster(context, db_info, datastore, datastore_version)
def grow(self, instances): LOG.debug("Growing cluster %s." % self.id) self.validate_cluster_available() context = self.context db_info = self.db_info datastore = self.ds datastore_version = self.ds_version # Get the network of the existing cluster instances. interface_ids = self._get_cluster_network_interfaces() for instance in instances: instance["nics"] = interface_ids db_info.update(task_status=ClusterTasks.GROWING_CLUSTER) new_instances = self._create_instances(context, db_info, datastore, datastore_version, instances) task_api.load(context, datastore_version.manager).grow_cluster( db_info.id, [instance.id for instance in new_instances] ) return PXCCluster(context, db_info, datastore, datastore_version)
def grow(self, instances): LOG.debug("Processing a request for growing cluster: %s" % self.id) self.validate_cluster_available() context = self.context db_info = self.db_info datastore = self.ds datastore_version = self.ds_version self.validate_instance_types(instances, datastore_version.manager, for_grow=True) db_info.update(task_status=ClusterTasks.GROWING_CLUSTER) locality = srv_grp.ServerGroup.convert_to_hint(self.server_group) new_instances = self._create_cluster_instances( context, db_info.id, db_info.name, datastore, datastore_version, instances, None, locality) task_api.load(context, datastore_version.manager).grow_cluster( db_info.id, [instance.id for instance in new_instances]) return CouchbaseCluster(context, db_info, datastore, datastore_version)
def _prep_resize(self): """Get information about the cluster's current state.""" if self.db_info.task_status != ClusterTasks.NONE: current_task = self.db_info.task_status.name msg = ( _("This action cannot be performed on the cluster while " "the current cluster task is '%s'.") % current_task ) LOG.error(msg) raise exception.UnprocessableEntity(msg) def _instances_of_type(instance_type): return [db_inst for db_inst in self.db_instances if db_inst.type == instance_type] self.config_svrs = _instances_of_type("config_server") self.query_routers = _instances_of_type("query_router") self.members = _instances_of_type("member") self.shard_ids = set([member.shard_id for member in self.members]) self.arbitrary_query_router = inst_models.load_any_instance(self.context, self.query_routers[0].id) self.manager = task_api.load(self.context, self.datastore_version.manager)
def _prep_resize(self): """Get information about the cluster's current state.""" if self.db_info.task_status != ClusterTasks.NONE: current_task = self.db_info.task_status.name msg = _("This action cannot be performed on the cluster while " "the current cluster task is '%s'.") % current_task LOG.error(msg) raise exception.UnprocessableEntity(msg) def _instances_of_type(instance_type): return [db_inst for db_inst in self.db_instances if db_inst.type == instance_type] self.config_svrs = _instances_of_type('config_server') self.query_routers = _instances_of_type('query_router') self.members = _instances_of_type('member') self.shard_ids = set([member.shard_id for member in self.members]) self.arbitrary_query_router = inst_models.load_any_instance( self.context, self.query_routers[0].id ) self.manager = task_api.load(self.context, self.datastore_version.manager)
def add_shard(self): if self.db_info.task_status != ClusterTasks.NONE: current_task = self.db_info.task_status.name msg = _("This action cannot be performed on the cluster while " "the current cluster task is '%s'.") % current_task LOG.error(msg) raise exception.UnprocessableEntity(msg) db_insts = inst_models.DBInstance.find_all(cluster_id=self.id, type='member').all() num_unique_shards = len(set([db_inst.shard_id for db_inst in db_insts])) if num_unique_shards == 0: msg = _("This action cannot be performed on the cluster as no " "reference shard exists.") LOG.error(msg) raise exception.UnprocessableEntity(msg) arbitrary_shard_id = db_insts[0].shard_id members_in_shard = [ db_inst for db_inst in db_insts if db_inst.shard_id == arbitrary_shard_id ] num_members_per_shard = len(members_in_shard) a_member = inst_models.load_any_instance(self.context, members_in_shard[0].id) deltas = {'instances': num_members_per_shard} volume_size = a_member.volume_size if volume_size: deltas['volumes'] = volume_size * num_members_per_shard check_quotas(self.context.tenant, deltas) new_replica_set_name = "rs" + str(num_unique_shards + 1) new_shard_id = utils.generate_uuid() dsv_manager = (datastore_models.DatastoreVersion.load_by_uuid( db_insts[0].datastore_version_id).manager) manager = task_api.load(self.context, dsv_manager) key = manager.get_key(a_member) member_config = { "id": self.id, "shard_id": new_shard_id, "instance_type": "member", "replica_set_name": new_replica_set_name, "key": key } locality = srv_grp.ServerGroup.convert_to_hint(self.server_group) for i in range(1, num_members_per_shard + 1): instance_name = "%s-%s-%s" % (self.name, new_replica_set_name, str(i)) inst_models.Instance.create(self.context, instance_name, a_member.flavor_id, a_member.datastore_version.image_id, [], [], a_member.datastore, a_member.datastore_version, volume_size, None, availability_zone=None, nics=None, configuration_id=None, cluster_config=member_config, locality=locality) self.update_db(task_status=ClusterTasks.ADDING_SHARD) manager.mongodb_add_shard_cluster(self.id, new_shard_id, new_replica_set_name)
def create(cls, context, name, datastore, datastore_version, instances, extended_properties): LOG.debug("Initiating cluster creation.") vertica_conf = CONF.get(datastore_version.manager) num_instances = len(instances) # Matching number of instances with configured cluster_member_count if num_instances != vertica_conf.cluster_member_count: raise exception.ClusterNumInstancesNotSupported( num_instances=vertica_conf.cluster_member_count) # Checking flavors flavor_ids = [instance['flavor_id'] for instance in instances] if len(set(flavor_ids)) != 1: raise exception.ClusterFlavorsNotEqual() flavor_id = flavor_ids[0] nova_client = remote.create_nova_client(context) try: flavor = nova_client.flavors.get(flavor_id) except nova_exceptions.NotFound: raise exception.FlavorNotFound(uuid=flavor_id) deltas = {'instances': num_instances} # Checking volumes volume_sizes = [ instance['volume_size'] for instance in instances if instance.get('volume_size', None) ] volume_size = None if vertica_conf.volume_support: if len(volume_sizes) != num_instances: raise exception.ClusterVolumeSizeRequired() if len(set(volume_sizes)) != 1: raise exception.ClusterVolumeSizesNotEqual() volume_size = volume_sizes[0] models.validate_volume_size(volume_size) deltas['volumes'] = volume_size * num_instances else: if len(volume_sizes) > 0: raise exception.VolumeNotSupported() ephemeral_support = vertica_conf.device_path if ephemeral_support and flavor.ephemeral == 0: raise exception.LocalStorageNotSpecified(flavor=flavor_id) check_quotas(context.tenant, deltas) nics = [instance.get('nics', None) for instance in instances] azs = [ instance.get('availability_zone', None) for instance in instances ] # Updating Cluster Task db_info = models.DBCluster.create( name=name, tenant_id=context.tenant, datastore_version_id=datastore_version.id, task_status=ClusterTasks.BUILDING_INITIAL) # Creating member instances for i in range(0, num_instances): if i == 0: member_config = {"id": db_info.id, "instance_type": "master"} else: member_config = {"id": db_info.id, "instance_type": "member"} instance_name = "%s-member-%s" % (name, str(i + 1)) inst_models.Instance.create(context, instance_name, flavor_id, datastore_version.image_id, [], [], datastore, datastore_version, volume_size, None, nics=nics[i], availability_zone=azs[i], configuration_id=None, cluster_config=member_config) # Calling taskmanager to further proceed for cluster-configuration task_api.load(context, datastore_version.manager).create_cluster(db_info.id) return VerticaCluster(context, db_info, datastore, datastore_version)
def create(cls, context, name, datastore, datastore_version, instances, extended_properties, locality, configuration): if configuration: raise exception.ConfigurationNotSupported() nova_client = remote.create_nova_client(context) network_driver = (importutils.import_class( CONF.network_driver))(context, None) ds_conf = CONF.get(datastore_version.manager) num_instances = len(instances) # Run checks first if not network_driver.subnet_support: raise exception.TroveError(_( "The configured network driver does not support subnet " "management. This is required for Oracle RAC clusters.")) quota.check_quotas(context.tenant, {'instances': num_instances}) for instance in instances: if not instance.get('flavor_id'): raise exception.BadRequest(_("Missing required flavor_id.")) try: nova_client.flavors.get(instance['flavor_id']) except nova_exceptions.NotFound: raise exception.FlavorNotFound(uuid=instance['flavor_id']) if instance.get('volume_size'): raise exception.VolumeNotSupported() if instance.get('region_name'): raise exception.BadRequest(_("Instance region_name option not " "supported.")) database = extended_properties.get('database') if not database: raise exception.BadRequest(_("Missing database name.")) if len(database) > 8: raise exception.BadValue(_("Database name greater than 8 chars.")) storage_info = check_storage_info(extended_properties) subnet, subnetpool, network = check_public_network_info( ds_conf, network_driver, num_instances, extended_properties) ssh_pem, ssh_pub = crypto_utils.generate_ssh_keys() sys_password = utils.generate_random_password( datastore=datastore.name) admin_password = utils.generate_random_password( datastore=datastore.name) # Create the cluster db_info = models.DBCluster.create( name=name, tenant_id=context.tenant, datastore_version_id=datastore_version.id, task_status=tasks.ClusterTasks.BUILDING_INITIAL) if not subnet: LOG.debug("Creating RAC public subnet on network {net} from " "pool {pool}".format(net=network['id'], pool=subnetpool['id'])) subnet = create_public_subnet_from_pool( ds_conf, network_driver, db_info.id, subnetpool, network, extended_properties.get('router'), extended_properties.get('prefixlen')) LOG.debug("Created subnet {sub} with CIDR {cidr}".format( sub=subnet['id'], cidr=subnet['cidr'])) interconnect_network, interconnect_subnet = create_interconnect( ds_conf, network_driver, db_info.id) LOG.debug("Created interconnect network {net} with subnet " "{sub}".format(net=interconnect_network['id'], sub=interconnect_subnet['id'])) public_subnet_manager = rac_utils.RACPublicSubnetManager( subnet['cidr']) interconnect_subnet_manager = rac_utils.CommonSubnetManager( interconnect_subnet['cidr']) subnet = configure_public_subnet( ds_conf, network_driver, db_info.id, subnet, public_subnet_manager.allocation_pool) LOG.debug("RAC public subnet ({sub_id}) info: name='{name}', scans=" "{scans}".format(sub_id=subnet['id'], name=subnet['name'], scans=public_subnet_manager.scan_list)) cluster_config = { 'id': db_info.id, 'instance_type': 'node', 'storage': storage_info, 'ssh_pem': ssh_pem, 'ssh_pub': ssh_pub, 'database': database, 'sys_password': sys_password, 'admin_password': admin_password} vips = (public_subnet_manager.scan_list + [public_subnet_manager.instance_vip(i) for i in range(len(instances))]) for i, instance in enumerate(instances): instance_name = rac_utils.make_instance_hostname(name, i) nics = instance.get('nics') or [] public_port_name = rac_utils.make_object_name( ds_conf, ['public', 'port', str(i + 1)], db_info.id) public_port = create_port( network_driver, public_port_name, i, subnet, public_subnet_manager, vips=vips) interconnect_port_name = rac_utils.make_object_name( ds_conf, ['interconnect', 'port', str(i + 1)], db_info.id) interconnect_port = create_port( network_driver, interconnect_port_name, i, interconnect_subnet, interconnect_subnet_manager) nics.append({'port-id': public_port['id']}) nics.append({'port-id': interconnect_port['id']}) LOG.debug("Creating instance {name} with public ip {pub} and " "interconnect ip {int}".format( name=instance_name, pub=public_port['fixed_ips'][0]['ip_address'], int=interconnect_port['fixed_ips'][0]['ip_address'])) inst_models.Instance.create( context, instance_name, instance['flavor_id'], datastore_version.image_id, [], [], datastore, datastore_version, None, None, availability_zone=instance.get('availability_zone'), nics=nics, cluster_config=cluster_config, modules=instance.get('modules'), locality=locality) task_api.load(context, datastore_version.manager).create_cluster( db_info.id) return OracleRACCluster(context, db_info, datastore, datastore_version)
def create(cls, context, name, datastore, datastore_version, instances): # TODO(amcreynolds): consider moving into CONF and even supporting # TODO(amcreynolds): an array of values, e.g. [3, 5, 7] # TODO(amcreynolds): or introduce a min/max num_instances and set # TODO(amcreynolds): both to 3 num_instances = len(instances) if num_instances != 3: raise exception.ClusterNumInstancesNotSupported(num_instances=3) flavor_ids = [instance['flavor_id'] for instance in instances] if len(set(flavor_ids)) != 1: raise exception.ClusterFlavorsNotEqual() flavor_id = flavor_ids[0] nova_client = remote.create_nova_client(context) try: flavor = nova_client.flavors.get(flavor_id) except nova_exceptions.NotFound: raise exception.FlavorNotFound(uuid=flavor_id) mongo_conf = CONF.get(datastore_version.manager) num_configsvr = mongo_conf.num_config_servers_per_cluster num_mongos = mongo_conf.num_query_routers_per_cluster delta_instances = num_instances + num_configsvr + num_mongos deltas = {'instances': delta_instances} volume_sizes = [instance['volume_size'] for instance in instances if instance.get('volume_size', None)] volume_size = None if mongo_conf.volume_support: if len(volume_sizes) != num_instances: raise exception.ClusterVolumeSizeRequired() if len(set(volume_sizes)) != 1: raise exception.ClusterVolumeSizesNotEqual() volume_size = volume_sizes[0] models.validate_volume_size(volume_size) # TODO(amcreynolds): for now, mongos+configsvr same flavor+disk deltas['volumes'] = volume_size * delta_instances else: # TODO(amcreynolds): is ephemeral possible for mongodb clusters? if len(volume_sizes) > 0: raise exception.VolumeNotSupported() ephemeral_support = mongo_conf.device_path if ephemeral_support and flavor.ephemeral == 0: raise exception.LocalStorageNotSpecified(flavor=flavor_id) check_quotas(context.tenant, deltas) db_info = models.DBCluster.create( name=name, tenant_id=context.tenant, datastore_version_id=datastore_version.id, task_status=ClusterTasks.BUILDING_INITIAL) replica_set_name = "rs1" member_config = {"id": db_info.id, "shard_id": utils.generate_uuid(), "instance_type": "member", "replica_set_name": replica_set_name} for i in range(1, num_instances + 1): instance_name = "%s-%s-%s" % (name, replica_set_name, str(i)) inst_models.Instance.create(context, instance_name, flavor_id, datastore_version.image_id, [], [], datastore, datastore_version, volume_size, None, availability_zone=None, nics=None, configuration_id=None, cluster_config=member_config) configsvr_config = {"id": db_info.id, "instance_type": "config_server"} for i in range(1, num_configsvr + 1): instance_name = "%s-%s-%s" % (name, "configsvr", str(i)) inst_models.Instance.create(context, instance_name, flavor_id, datastore_version.image_id, [], [], datastore, datastore_version, volume_size, None, availability_zone=None, nics=None, configuration_id=None, cluster_config=configsvr_config) mongos_config = {"id": db_info.id, "instance_type": "query_router"} for i in range(1, num_mongos + 1): instance_name = "%s-%s-%s" % (name, "mongos", str(i)) inst_models.Instance.create(context, instance_name, flavor_id, datastore_version.image_id, [], [], datastore, datastore_version, volume_size, None, availability_zone=None, nics=None, configuration_id=None, cluster_config=mongos_config) task_api.load(context, datastore_version.manager).create_cluster( db_info.id) return MongoDbCluster(context, db_info, datastore, datastore_version)
def create(cls, context, name, datastore, datastore_version, instances): # TODO(amcreynolds): consider moving into CONF and even supporting # TODO(amcreynolds): an array of values, e.g. [3, 5, 7] # TODO(amcreynolds): or introduce a min/max num_instances and set # TODO(amcreynolds): both to 3 num_instances = len(instances) if num_instances != 3: raise exception.ClusterNumInstancesNotSupported(num_instances=3) flavor_ids = [instance['flavor_id'] for instance in instances] if len(set(flavor_ids)) != 1: raise exception.ClusterFlavorsNotEqual() flavor_id = flavor_ids[0] nova_client = remote.create_nova_client(context) try: flavor = nova_client.flavors.get(flavor_id) except nova_exceptions.NotFound: raise exception.FlavorNotFound(uuid=flavor_id) mongo_conf = CONF.get(datastore_version.manager) num_configsvr = mongo_conf.num_config_servers_per_cluster num_mongos = mongo_conf.num_query_routers_per_cluster delta_instances = num_instances + num_configsvr + num_mongos deltas = {'instances': delta_instances} volume_sizes = [ instance['volume_size'] for instance in instances if instance.get('volume_size', None) ] volume_size = None if mongo_conf.volume_support: if len(volume_sizes) != num_instances: raise exception.ClusterVolumeSizeRequired() if len(set(volume_sizes)) != 1: raise exception.ClusterVolumeSizesNotEqual() volume_size = volume_sizes[0] models.validate_volume_size(volume_size) # TODO(amcreynolds): for now, mongos+configsvr same flavor+disk deltas['volumes'] = volume_size * delta_instances else: # TODO(amcreynolds): is ephemeral possible for mongodb clusters? if len(volume_sizes) > 0: raise exception.VolumeNotSupported() ephemeral_support = mongo_conf.device_path if ephemeral_support and flavor.ephemeral == 0: raise exception.LocalStorageNotSpecified(flavor=flavor_id) check_quotas(context.tenant, deltas) db_info = models.DBCluster.create( name=name, tenant_id=context.tenant, datastore_version_id=datastore_version.id, task_status=ClusterTasks.BUILDING_INITIAL) replica_set_name = "rs1" member_config = { "id": db_info.id, "shard_id": utils.generate_uuid(), "instance_type": "member", "replica_set_name": replica_set_name } for i in range(1, num_instances + 1): instance_name = "%s-%s-%s" % (name, replica_set_name, str(i)) inst_models.Instance.create(context, instance_name, flavor_id, datastore_version.image_id, [], [], datastore, datastore_version, volume_size, None, availability_zone=None, nics=None, configuration_id=None, cluster_config=member_config) configsvr_config = {"id": db_info.id, "instance_type": "config_server"} for i in range(1, num_configsvr + 1): instance_name = "%s-%s-%s" % (name, "configsvr", str(i)) inst_models.Instance.create(context, instance_name, flavor_id, datastore_version.image_id, [], [], datastore, datastore_version, volume_size, None, availability_zone=None, nics=None, configuration_id=None, cluster_config=configsvr_config) mongos_config = {"id": db_info.id, "instance_type": "query_router"} for i in range(1, num_mongos + 1): instance_name = "%s-%s-%s" % (name, "mongos", str(i)) inst_models.Instance.create(context, instance_name, flavor_id, datastore_version.image_id, [], [], datastore, datastore_version, volume_size, None, availability_zone=None, nics=None, configuration_id=None, cluster_config=mongos_config) task_api.load(context, datastore_version.manager).create_cluster(db_info.id) return MongoDbCluster(context, db_info, datastore, datastore_version)
def create(cls, context, name, datastore, datastore_version, instances, extended_properties, locality, configuration): if configuration: raise exception.ConfigurationNotSupported() # TODO(amcreynolds): consider moving into CONF and even supporting # TODO(amcreynolds): an array of values, e.g. [3, 5, 7] # TODO(amcreynolds): or introduce a min/max num_instances and set # TODO(amcreynolds): both to 3 num_instances = len(instances) if num_instances != 3: raise exception.ClusterNumInstancesNotSupported(num_instances=3) mongo_conf = CONF.get(datastore_version.manager) num_configsvr = (1 if mongo_conf.num_config_servers_per_cluster == 1 else 3) num_mongos = mongo_conf.num_query_routers_per_cluster delta_instances = num_instances + num_configsvr + num_mongos models.validate_instance_flavors( context, instances, mongo_conf.volume_support, mongo_conf.device_path) models.assert_homogeneous_cluster(instances) req_volume_size = models.get_required_volume_size( instances, mongo_conf.volume_support) deltas = {'instances': delta_instances, 'volumes': req_volume_size} check_quotas(context.tenant, deltas) flavor_id = instances[0]['flavor_id'] volume_size = instances[0].get('volume_size', None) nics = [instance.get('nics', None) for instance in instances] nic = nics[0] for n in nics[1:]: if n != nic: raise ValueError(_('All cluster nics must be the same. ' '%(nic)s != %(n)s') % {'nic': nic, 'n': n}) azs = [instance.get('availability_zone', None) for instance in instances] regions = [instance.get('region_name', None) for instance in instances] db_info = models.DBCluster.create( name=name, tenant_id=context.tenant, datastore_version_id=datastore_version.id, task_status=ClusterTasks.BUILDING_INITIAL) replica_set_name = "rs1" member_config = {"id": db_info.id, "shard_id": utils.generate_uuid(), "instance_type": "member", "replica_set_name": replica_set_name} configsvr_config = {"id": db_info.id, "instance_type": "config_server"} mongos_config = {"id": db_info.id, "instance_type": "query_router"} if mongo_conf.cluster_secure: cluster_key = base64.b64encode(utils.generate_random_password()) member_config['key'] = cluster_key configsvr_config['key'] = cluster_key mongos_config['key'] = cluster_key for i in range(num_instances): instance_name = "%s-%s-%s" % (name, replica_set_name, str(i + 1)) inst_models.Instance.create(context, instance_name, flavor_id, datastore_version.image_id, [], [], datastore, datastore_version, volume_size, None, availability_zone=azs[i], nics=nic, configuration_id=None, cluster_config=member_config, modules=instances[i].get('modules'), locality=locality, region_name=regions[i]) for i in range(num_configsvr): instance_name = "%s-%s-%s" % (name, "configsvr", str(i + 1)) inst_models.Instance.create(context, instance_name, flavor_id, datastore_version.image_id, [], [], datastore, datastore_version, volume_size, None, availability_zone=azs[i % num_instances], nics=nic, configuration_id=None, cluster_config=configsvr_config, locality=locality, region_name=regions[i]) for i in range(num_mongos): instance_name = "%s-%s-%s" % (name, "mongos", str(i + 1)) inst_models.Instance.create(context, instance_name, flavor_id, datastore_version.image_id, [], [], datastore, datastore_version, volume_size, None, availability_zone=azs[i % num_instances], nics=nic, configuration_id=None, cluster_config=mongos_config, locality=locality, region_name=regions[i]) task_api.load(context, datastore_version.manager).create_cluster( db_info.id) return MongoDbCluster(context, db_info, datastore, datastore_version)
def create(cls, context, name, datastore, datastore_version, instances, extended_properties, locality, configuration): if configuration: raise exception.ConfigurationNotSupported() # TODO(amcreynolds): consider moving into CONF and even supporting # TODO(amcreynolds): an array of values, e.g. [3, 5, 7] # TODO(amcreynolds): or introduce a min/max num_instances and set # TODO(amcreynolds): both to 3 num_instances = len(instances) if num_instances != 3: raise exception.ClusterNumInstancesNotSupported(num_instances=3) tidb_conf = CONF.get(datastore_version.manager) num_tidbsvr = int(extended_properties.get( 'num_tidbsvr', tidb_conf.num_tidb_servers_per_cluster)) num_pdsvr = int(extended_properties.get( 'num_pdsvr', tidb_conf.num_pd_servers_per_cluster)) delta_instances = num_instances + num_tidbsvr + num_pdsvr models.validate_instance_flavors( context, instances, tidb_conf.volume_support, tidb_conf.device_path) models.assert_homogeneous_cluster(instances) flavor_id = instances[0]['flavor_id'] volume_size = instances[0].get('volume_size', None) volume_type = instances[0].get('volume_type', None) nics = instances[0].get('nics', None) azs = [instance.get('availability_zone', None) for instance in instances] regions = [instance.get('region_name', None) for instance in instances] db_info = models.DBCluster.create( name=name, tenant_id=context.tenant, datastore_version_id=datastore_version.id, task_status=ClusterTasks.BUILDING_INITIAL) replica_set_name = "rs1" tikv_config = {"id": db_info.id, "shard_id": utils.generate_uuid(), "instance_type": "tikv", "replica_set_name": replica_set_name} tidbsvr_config = {"id": db_info.id, "instance_type": "tidb_server"} pdsvr_config = {"id": db_info.id, "instance_type": "pd_server"} for i in range(1, pdsvr_config + 1): instance_name = "%s-%s-%s" % (name, "tidb", str(i)) inst_models.Instance.create(context, instance_name, flavor_id, datastore_version.image_id, [], [], datastore, datastore_version, volume_size, None, availability_zone=None, nics=nics, configuration_id=None, cluster_config=pdsvr_config, volume_type=volume_type, locality=locality, region_name=regions[i % num_instances] ) for i in range(1, num_tidbsvr + 1): instance_name = "%s-%s-%s" % (name, "configsvr", str(i)) inst_models.Instance.create(context, instance_name, flavor_id, datastore_version.image_id, [], [], datastore, datastore_version, volume_size, None, availability_zone=None, nics=nics, configuration_id=None, cluster_config=tidbsvr_config, volume_type=volume_type, locality=locality, region_name=regions[i % num_instances] ) for i in range(0, num_instances): instance_name = "%s-%s-%s" % (name, replica_set_name, str(i + 1)) inst_models.Instance.create(context, instance_name, flavor_id, datastore_version.image_id, [], [], datastore, datastore_version, volume_size, None, availability_zone=azs[i], nics=nics, configuration_id=None, cluster_config=tikv_config, volume_type=volume_type, modules=instances[i].get('modules'), locality=locality, region_name=regions[i]) task_api.load(context, datastore_version.manager).create_cluster( db_info.id) return TiDbCluster(context, db_info, datastore, datastore_version)
def test_load_api(self, get_client_mock): context = Mock() manager = "mongodb" self.assertTrue(isinstance(task_api.load(context), task_api.API)) self.assertTrue(isinstance(task_api.load(context, manager), MongoDbTaskManagerAPI))
def create(cls, context, name, datastore, datastore_version, instances, extended_properties, locality): nova_client = remote.create_nova_client(context) network_driver = (importutils.import_class( CONF.network_driver))(context, None) ds_conf = CONF.get(datastore_version.manager) num_instances = len(instances) # Run checks first if not network_driver.subnet_support: raise exception.TroveError(_( "The configured network driver does not support subnet " "management. This is required for Oracle RAC clusters.")) quota.check_quotas(context.tenant, {'instances': num_instances}) for instance in instances: if not instance.get('flavor_id'): raise exception.BadRequest(_("Missing required flavor_id.")) try: nova_client.flavors.get(instance['flavor_id']) except nova_exceptions.NotFound: raise exception.FlavorNotFound(uuid=instance['flavor_id']) if instance.get('volume_size'): raise exception.VolumeNotSupported() if instance.get('region_name'): raise exception.BadRequest(_("Instance region_name option not " "supported.")) database = extended_properties.get('database') if not database: raise exception.BadRequest(_("Missing database name.")) if len(database) > 8: raise exception.BadValue(_("Database name greater than 8 chars.")) storage_info = check_storage_info(extended_properties) subnet, subnetpool, network = check_public_network_info( ds_conf, network_driver, num_instances, extended_properties) ssh_pem, ssh_pub = crypto_utils.generate_ssh_keys() sys_password = utils.generate_random_password( datastore=datastore.name) admin_password = utils.generate_random_password( datastore=datastore.name) # Create the cluster db_info = models.DBCluster.create( name=name, tenant_id=context.tenant, datastore_version_id=datastore_version.id, task_status=tasks.ClusterTasks.BUILDING_INITIAL) if not subnet: LOG.debug("Creating RAC public subnet on network {net} from " "pool {pool}".format(net=network['id'], pool=subnetpool['id'])) subnet = create_public_subnet_from_pool( ds_conf, network_driver, db_info.id, subnetpool, network, extended_properties.get('router'), extended_properties.get('prefixlen')) LOG.debug("Created subnet {sub} with CIDR {cidr}".format( sub=subnet['id'], cidr=subnet['cidr'])) interconnect_network, interconnect_subnet = create_interconnect( ds_conf, network_driver, db_info.id) LOG.debug("Created interconnect network {net} with subnet " "{sub}".format(net=interconnect_network['id'], sub=interconnect_subnet['id'])) public_subnet_manager = rac_utils.RACPublicSubnetManager( subnet['cidr']) interconnect_subnet_manager = rac_utils.CommonSubnetManager( interconnect_subnet['cidr']) subnet = configure_public_subnet( ds_conf, network_driver, db_info.id, subnet, public_subnet_manager.allocation_pool) LOG.debug("RAC public subnet ({sub_id}) info: name='{name}', scans=" "{scans}".format(sub_id=subnet['id'], name=subnet['name'], scans=public_subnet_manager.scan_list)) cluster_config = { 'id': db_info.id, 'instance_type': 'node', 'storage': storage_info, 'ssh_pem': ssh_pem, 'ssh_pub': ssh_pub, 'database': database, 'sys_password': sys_password, 'admin_password': admin_password} vips = (public_subnet_manager.scan_list + [public_subnet_manager.instance_vip(i) for i in range(len(instances))]) for i, instance in enumerate(instances): instance_name = rac_utils.make_instance_hostname(name, i) nics = instance.get('nics') or [] public_port_name = rac_utils.make_object_name( ds_conf, ['public', 'port', str(i + 1)], db_info.id) public_port = create_port( network_driver, public_port_name, i, subnet, public_subnet_manager, vips=vips) interconnect_port_name = rac_utils.make_object_name( ds_conf, ['interconnect', 'port', str(i + 1)], db_info.id) interconnect_port = create_port( network_driver, interconnect_port_name, i, interconnect_subnet, interconnect_subnet_manager) nics.append({'port-id': public_port['id']}) nics.append({'port-id': interconnect_port['id']}) LOG.debug("Creating instance {name} with public ip {pub} and " "interconnect ip {int}".format( name=instance_name, pub=public_port['fixed_ips'][0]['ip_address'], int=interconnect_port['fixed_ips'][0]['ip_address'])) inst_models.Instance.create( context, instance_name, instance['flavor_id'], datastore_version.image_id, [], [], datastore, datastore_version, None, None, availability_zone=instance.get('availability_zone'), nics=nics, cluster_config=cluster_config, modules=instance.get('modules'), locality=locality) task_api.load(context, datastore_version.manager).create_cluster( db_info.id) return OracleRACCluster(context, db_info, datastore, datastore_version)
def create(cls, context, name, datastore, datastore_version, instances, extended_properties, locality, configuration): if configuration: raise exception.ConfigurationNotSupported() # TODO(amcreynolds): consider moving into CONF and even supporting # TODO(amcreynolds): an array of values, e.g. [3, 5, 7] # TODO(amcreynolds): or introduce a min/max num_instances and set # TODO(amcreynolds): both to 3 num_instances = len(instances) if num_instances != 3: raise exception.ClusterNumInstancesNotSupported(num_instances=3) mongo_conf = CONF.get(datastore_version.manager) num_configsvr = mongo_conf.num_config_servers_per_cluster num_mongos = mongo_conf.num_query_routers_per_cluster delta_instances = num_instances + num_configsvr + num_mongos models.validate_instance_flavors(context, instances, mongo_conf.volume_support, mongo_conf.device_path) models.assert_homogeneous_cluster(instances) req_volume_size = models.get_required_volume_size( instances, mongo_conf.volume_support) deltas = {'instances': delta_instances, 'volumes': req_volume_size} check_quotas(context.tenant, deltas) flavor_id = instances[0]['flavor_id'] volume_size = instances[0].get('volume_size', None) nics = [instance.get('nics', None) for instance in instances] azs = [ instance.get('availability_zone', None) for instance in instances ] regions = [instance.get('region_name', None) for instance in instances] db_info = models.DBCluster.create( name=name, tenant_id=context.tenant, datastore_version_id=datastore_version.id, task_status=ClusterTasks.BUILDING_INITIAL) replica_set_name = "rs1" member_config = { "id": db_info.id, "shard_id": utils.generate_uuid(), "instance_type": "member", "replica_set_name": replica_set_name } configsvr_config = {"id": db_info.id, "instance_type": "config_server"} mongos_config = {"id": db_info.id, "instance_type": "query_router"} if mongo_conf.cluster_secure: cluster_key = utils.generate_random_password() member_config['key'] = cluster_key configsvr_config['key'] = cluster_key mongos_config['key'] = cluster_key for i in range(0, num_instances): instance_name = "%s-%s-%s" % (name, replica_set_name, str(i + 1)) inst_models.Instance.create(context, instance_name, flavor_id, datastore_version.image_id, [], [], datastore, datastore_version, volume_size, None, availability_zone=azs[i], nics=nics[i], configuration_id=None, cluster_config=member_config, modules=instances[i].get('modules'), locality=locality, region_name=regions[i]) for i in range(1, num_configsvr + 1): instance_name = "%s-%s-%s" % (name, "configsvr", str(i)) inst_models.Instance.create(context, instance_name, flavor_id, datastore_version.image_id, [], [], datastore, datastore_version, volume_size, None, availability_zone=None, nics=None, configuration_id=None, cluster_config=configsvr_config, locality=locality, region_name=regions[i]) for i in range(1, num_mongos + 1): instance_name = "%s-%s-%s" % (name, "mongos", str(i)) inst_models.Instance.create(context, instance_name, flavor_id, datastore_version.image_id, [], [], datastore, datastore_version, volume_size, None, availability_zone=None, nics=None, configuration_id=None, cluster_config=mongos_config, locality=locality, region_name=regions[i]) task_api.load(context, datastore_version.manager).create_cluster(db_info.id) return MongoDbCluster(context, db_info, datastore, datastore_version)
def create(cls, context, name, datastore, datastore_version, instances, extended_properties): LOG.debug("Initiating cluster creation.") vertica_conf = CONF.get(datastore_version.manager) num_instances = len(instances) # Matching number of instances with configured cluster_member_count if num_instances != vertica_conf.cluster_member_count: raise exception.ClusterNumInstancesNotSupported(num_instances=vertica_conf.cluster_member_count) # Checking flavors flavor_ids = [instance["flavor_id"] for instance in instances] if len(set(flavor_ids)) != 1: raise exception.ClusterFlavorsNotEqual() flavor_id = flavor_ids[0] nova_client = remote.create_nova_client(context) try: flavor = nova_client.flavors.get(flavor_id) except nova_exceptions.NotFound: raise exception.FlavorNotFound(uuid=flavor_id) deltas = {"instances": num_instances} # Checking volumes volume_sizes = [instance["volume_size"] for instance in instances if instance.get("volume_size", None)] volume_size = None if vertica_conf.volume_support: if len(volume_sizes) != num_instances: raise exception.ClusterVolumeSizeRequired() if len(set(volume_sizes)) != 1: raise exception.ClusterVolumeSizesNotEqual() volume_size = volume_sizes[0] models.validate_volume_size(volume_size) deltas["volumes"] = volume_size * num_instances else: if len(volume_sizes) > 0: raise exception.VolumeNotSupported() ephemeral_support = vertica_conf.device_path if ephemeral_support and flavor.ephemeral == 0: raise exception.LocalStorageNotSpecified(flavor=flavor_id) check_quotas(context.tenant, deltas) nics = [instance.get("nics", None) for instance in instances] azs = [instance.get("availability_zone", None) for instance in instances] # Updating Cluster Task db_info = models.DBCluster.create( name=name, tenant_id=context.tenant, datastore_version_id=datastore_version.id, task_status=ClusterTasks.BUILDING_INITIAL, ) # Creating member instances for i in range(0, num_instances): if i == 0: member_config = {"id": db_info.id, "instance_type": "master"} else: member_config = {"id": db_info.id, "instance_type": "member"} instance_name = "%s-member-%s" % (name, str(i + 1)) inst_models.Instance.create( context, instance_name, flavor_id, datastore_version.image_id, [], [], datastore, datastore_version, volume_size, None, nics=nics[i], availability_zone=azs[i], configuration_id=None, cluster_config=member_config, ) # Calling taskmanager to further proceed for cluster-configuration task_api.load(context, datastore_version.manager).create_cluster(db_info.id) return VerticaCluster(context, db_info, datastore, datastore_version)
def create(cls, context, name, datastore, datastore_version, instances, extended_properties, locality): # TODO(amcreynolds): consider moving into CONF and even supporting # TODO(amcreynolds): an array of values, e.g. [3, 5, 7] # TODO(amcreynolds): or introduce a min/max num_instances and set # TODO(amcreynolds): both to 3 num_instances = len(instances) if num_instances != 3: raise exception.ClusterNumInstancesNotSupported(num_instances=3) flavor_ids = [instance['flavor_id'] for instance in instances] if len(set(flavor_ids)) != 1: raise exception.ClusterFlavorsNotEqual() flavor_id = flavor_ids[0] nova_client = remote.create_nova_client(context) try: flavor = nova_client.flavors.get(flavor_id) except nova_exceptions.NotFound: raise exception.FlavorNotFound(uuid=flavor_id) mongo_conf = CONF.get(datastore_version.manager) num_configsvr = (1 if mongo_conf.num_config_servers_per_cluster == 1 else 3) num_mongos = mongo_conf.num_query_routers_per_cluster delta_instances = num_instances + num_configsvr + num_mongos deltas = {'instances': delta_instances} volume_sizes = [instance['volume_size'] for instance in instances if instance.get('volume_size', None)] volume_size = None if mongo_conf.volume_support: if len(volume_sizes) != num_instances: raise exception.ClusterVolumeSizeRequired() if len(set(volume_sizes)) != 1: raise exception.ClusterVolumeSizesNotEqual() volume_size = volume_sizes[0] models.validate_volume_size(volume_size) # TODO(amcreynolds): for now, mongos+configsvr same flavor+disk deltas['volumes'] = volume_size * delta_instances else: # TODO(amcreynolds): is ephemeral possible for mongodb clusters? if len(volume_sizes) > 0: raise exception.VolumeNotSupported() ephemeral_support = mongo_conf.device_path if ephemeral_support and flavor.ephemeral == 0: raise exception.LocalStorageNotSpecified(flavor=flavor_id) check_quotas(context.tenant, deltas) nics = [instance.get('nics', None) for instance in instances] nic = nics[0] for n in nics[1:]: if n != nic: raise ValueError(_('All cluster nics must be the same. ' '%(nic)s != %(n)s') % {'nic': nic, 'n': n}) azs = [instance.get('availability_zone', None) for instance in instances] regions = [instance.get('region_name', None) for instance in instances] db_info = models.DBCluster.create( name=name, tenant_id=context.tenant, datastore_version_id=datastore_version.id, task_status=ClusterTasks.BUILDING_INITIAL) replica_set_name = "rs1" member_config = {"id": db_info.id, "shard_id": utils.generate_uuid(), "instance_type": "member", "replica_set_name": replica_set_name} configsvr_config = {"id": db_info.id, "instance_type": "config_server"} mongos_config = {"id": db_info.id, "instance_type": "query_router"} if mongo_conf.cluster_secure: cluster_key = base64.b64encode(utils.generate_random_password()) member_config['key'] = cluster_key configsvr_config['key'] = cluster_key mongos_config['key'] = cluster_key for i in range(num_instances): instance_name = "%s-%s-%s" % (name, replica_set_name, str(i + 1)) inst_models.Instance.create(context, instance_name, flavor_id, datastore_version.image_id, [], [], datastore, datastore_version, volume_size, None, availability_zone=azs[i], nics=nic, configuration_id=None, cluster_config=member_config, modules=instances[i].get('modules'), region_name=regions[i], locality=locality) for i in range(num_configsvr): instance_name = "%s-%s-%s" % (name, "configsvr", str(i + 1)) inst_models.Instance.create(context, instance_name, flavor_id, datastore_version.image_id, [], [], datastore, datastore_version, volume_size, None, availability_zone=azs[i % num_instances], nics=nic, configuration_id=None, cluster_config=configsvr_config, region_name=regions[i % num_instances], locality=locality) for i in range(num_mongos): instance_name = "%s-%s-%s" % (name, "mongos", str(i + 1)) inst_models.Instance.create(context, instance_name, flavor_id, datastore_version.image_id, [], [], datastore, datastore_version, volume_size, None, availability_zone=azs[i % num_instances], nics=nic, configuration_id=None, cluster_config=mongos_config, region_name=regions[i % num_instances], locality=locality) task_api.load(context, datastore_version.manager).create_cluster( db_info.id) return MongoDbCluster(context, db_info, datastore, datastore_version)