Exemplo n.º 1
0
    def _create_instances(context, db_info, datastore, datastore_version,
                          instances, extended_properties, locality):
        Redis_conf = CONF.get(datastore_version.manager)
        num_instances = len(instances)
        total_volume_allocation = 0

        # Validate and Cache flavors
        nova_client = remote.create_nova_client(context)
        unique_flavors = set(map(lambda i: i['flavor_id'], instances))
        flavor_cache = {}
        for fid in unique_flavors:
            try:
                flavor_cache.update({fid: nova_client.flavors.get(fid)})
            except nova_exceptions.NotFound:
                raise exception.FlavorNotFound(uuid=fid)

        # Checking volumes
        name_index = 1
        for instance in instances:
            if not instance.get('name'):
                instance['name'] = "%s-member-%s" % (db_info.name, name_index)
                name_index += 1
            volume_size = instance.get('volume_size')
            if Redis_conf.volume_support:
                models.validate_volume_size(volume_size)
                total_volume_allocation += volume_size
            else:
                if volume_size:
                    raise exception.VolumeNotSupported()
                ephemeral_support = Redis_conf.device_path
                flavor_id = instance['flavor_id']
                flavor = flavor_cache[flavor_id]
                if ephemeral_support and flavor.ephemeral == 0:
                    raise exception.LocalStorageNotSpecified(flavor=flavor_id)

        # Check quotas
        quota_request = {'instances': num_instances,
                         'volumes': total_volume_allocation}
        check_quotas(context.tenant, quota_request)

        # Creating member instances
        return map(lambda instance:
                   inst_models.Instance.create(context,
                                               instance['name'],
                                               instance['flavor_id'],
                                               datastore_version.image_id,
                                               [], [],
                                               datastore, datastore_version,
                                               instance.get('volume_size'),
                                               None,
                                               instance.get(
                                                   'availability_zone', None),
                                               instance.get('nics', None),
                                               configuration_id=None,
                                               cluster_config={
                                                   "id": db_info.id,
                                                   "instance_type": "member"},
                                               locality=locality
                                               ),
                   instances)
Exemplo n.º 2
0
    def _validate_cluster_instances(context, instances, datastore,
                                    datastore_version):
        """Validate the flavor and volume"""
        ds_conf = CONF.get(datastore_version.manager)
        num_instances = len(instances)

        # Check number of instances is at least min_cluster_member_count
        if num_instances < ds_conf.min_cluster_member_count:
            raise exception.ClusterNumInstancesNotLargeEnough(
                num_instances=ds_conf.min_cluster_member_count)

        # Checking flavors and get delta for quota check
        flavor_ids = [instance['flavor_id'] for instance in instances]
        if len(set(flavor_ids)) != 1:
            raise exception.ClusterFlavorsNotEqual()
        flavor_id = flavor_ids[0]
        nova_client = remote.create_nova_client(context)
        try:
            flavor = nova_client.flavors.get(flavor_id)
        except nova_exceptions.NotFound:
            raise exception.FlavorNotFound(uuid=flavor_id)
        deltas = {'instances': num_instances}

        # Checking volumes and get delta for quota check
        volume_sizes = [instance['volume_size'] for instance in instances
                        if instance.get('volume_size', None)]
        volume_size = None
        if ds_conf.volume_support:
            if len(volume_sizes) != num_instances:
                raise exception.ClusterVolumeSizeRequired()
            if len(set(volume_sizes)) != 1:
                raise exception.ClusterVolumeSizesNotEqual()
            volume_size = volume_sizes[0]
            cluster_models.validate_volume_size(volume_size)
            deltas['volumes'] = volume_size * num_instances
        else:
            if len(volume_sizes) > 0:
                raise exception.VolumeNotSupported()
            ephemeral_support = ds_conf.device_path
            if ephemeral_support and flavor.ephemeral == 0:
                raise exception.LocalStorageNotSpecified(flavor=flavor_id)

        # quota check
        check_quotas(context.tenant, deltas)

        # Checking networks are same for the cluster
        instance_nics = []
        for instance in instances:
            nics = instance.get('nics')
            if nics:
                instance_nics.append(nics[0].get('net-id'))
        if len(set(instance_nics)) > 1:
            raise exception.ClusterNetworksNotEqual()
        if not instance_nics:
            return
        instance_nic = instance_nics[0]
        try:
            nova_client.networks.get(instance_nic)
        except nova_exceptions.NotFound:
            raise exception.NetworkNotFound(uuid=instance_nic)
Exemplo n.º 3
0
Arquivo: api.py Projeto: fabian4/trove
    def add_shard(self):

        if self.db_info.task_status != ClusterTasks.NONE:
            current_task = self.db_info.task_status.name
            msg = _("This action cannot be performed on the cluster while "
                    "the current cluster task is '%s'.") % current_task
            LOG.error(msg)
            raise exception.UnprocessableEntity(msg)

        db_insts = inst_models.DBInstance.find_all(cluster_id=self.id,
                                                   type='member').all()
        num_unique_shards = len(set([db_inst.shard_id for db_inst
                                     in db_insts]))
        if num_unique_shards == 0:
            msg = _("This action cannot be performed on the cluster as no "
                    "reference shard exists.")
            LOG.error(msg)
            raise exception.UnprocessableEntity(msg)

        arbitrary_shard_id = db_insts[0].shard_id
        members_in_shard = [db_inst for db_inst in db_insts
                            if db_inst.shard_id == arbitrary_shard_id]
        num_members_per_shard = len(members_in_shard)
        a_member = inst_models.load_any_instance(self.context,
                                                 members_in_shard[0].id)
        deltas = {'instances': num_members_per_shard}
        volume_size = a_member.volume_size
        if volume_size:
            deltas['volumes'] = volume_size * num_members_per_shard
        check_quotas(self.context.tenant, deltas)
        new_replica_set_name = "rs" + str(num_unique_shards + 1)
        new_shard_id = utils.generate_uuid()
        dsv_manager = (datastore_models.DatastoreVersion.
                       load_by_uuid(db_insts[0].datastore_version_id).manager)
        manager = task_api.load(self.context, dsv_manager)
        key = manager.get_key(a_member)
        member_config = {"id": self.id,
                         "shard_id": new_shard_id,
                         "instance_type": "member",
                         "replica_set_name": new_replica_set_name,
                         "key": key}
        for i in range(1, num_members_per_shard + 1):
            instance_name = "%s-%s-%s" % (self.name, new_replica_set_name,
                                          str(i))
            inst_models.Instance.create(self.context, instance_name,
                                        a_member.flavor_id,
                                        a_member.datastore_version.image_id,
                                        [], [], a_member.datastore,
                                        a_member.datastore_version,
                                        volume_size, None,
                                        availability_zone=None,
                                        nics=None,
                                        configuration_id=None,
                                        cluster_config=member_config)

        self.update_db(task_status=ClusterTasks.ADDING_SHARD)
        manager.mongodb_add_shard_cluster(
            self.id,
            new_shard_id,
            new_replica_set_name)
Exemplo n.º 4
0
    def _validate_cluster_instances(context, instances, datastore,
                                    datastore_version):
        """Validate the flavor and volume"""
        ds_conf = CONF.get(datastore_version.manager)
        num_instances = len(instances)

        # Check number of instances is at least min_cluster_member_count
        if num_instances < ds_conf.min_cluster_member_count:
            raise exception.ClusterNumInstancesNotLargeEnough(
                num_instances=ds_conf.min_cluster_member_count)

        # Checking flavors and get delta for quota check
        flavor_ids = [instance['flavor_id'] for instance in instances]
        if len(set(flavor_ids)) != 1:
            raise exception.ClusterFlavorsNotEqual()
        flavor_id = flavor_ids[0]
        nova_client = remote.create_nova_client(context)
        try:
            flavor = nova_client.flavors.get(flavor_id)
        except nova_exceptions.NotFound:
            raise exception.FlavorNotFound(uuid=flavor_id)
        deltas = {'instances': num_instances}

        # Checking volumes and get delta for quota check
        volume_sizes = [instance['volume_size'] for instance in instances
                        if instance.get('volume_size', None)]
        volume_size = None
        if ds_conf.volume_support:
            if len(volume_sizes) != num_instances:
                raise exception.ClusterVolumeSizeRequired()
            if len(set(volume_sizes)) != 1:
                raise exception.ClusterVolumeSizesNotEqual()
            volume_size = volume_sizes[0]
            cluster_models.validate_volume_size(volume_size)
            deltas['volumes'] = volume_size * num_instances
        else:
            if len(volume_sizes) > 0:
                raise exception.VolumeNotSupported()
            ephemeral_support = ds_conf.device_path
            if ephemeral_support and flavor.ephemeral == 0:
                raise exception.LocalStorageNotSpecified(flavor=flavor_id)

        # quota check
        check_quotas(context.tenant, deltas)

        # Checking networks are same for the cluster
        instance_nics = []
        for instance in instances:
            nics = instance.get('nics')
            if nics:
                instance_nics.append(nics[0].get('net-id'))
        if len(set(instance_nics)) > 1:
            raise exception.ClusterNetworksNotEqual()
        if not instance_nics:
            return
        instance_nic = instance_nics[0]
        try:
            nova_client.networks.get(instance_nic)
        except nova_exceptions.NotFound:
            raise exception.NetworkNotFound(uuid=instance_nic)
Exemplo n.º 5
0
    def _create_cluster_instances(cls, context, cluster_id, cluster_name,
                                  datastore, datastore_version, instances,
                                  extended_properties, locality):
        LOG.debug("Processing a request for new cluster instances.")

        cassandra_conf = CONF.get(datastore_version.manager)
        eph_enabled = cassandra_conf.device_path
        vol_enabled = cassandra_conf.volume_support

        # Validate instance flavors.
        models.get_flavors_from_instance_defs(context, instances, vol_enabled,
                                              eph_enabled)

        # Compute the total volume allocation.
        req_volume_size = models.get_required_volume_size(
            instances, vol_enabled)

        # Check requirements against quota.
        num_new_instances = len(instances)
        deltas = {'instances': num_new_instances, 'volumes': req_volume_size}
        check_quotas(context.tenant, deltas)

        # Creating member instances.
        num_instances = len(
            CassandraClusterTasks.find_cluster_node_ids(cluster_id))
        new_instances = []
        for instance_idx, instance in enumerate(instances, num_instances + 1):
            instance_az = instance.get('availability_zone', None)

            member_config = {
                "id": cluster_id,
                "instance_type": "member",
                "dc": cls.DEFAULT_DATA_CENTER,
                "rack": instance_az or cls.DEFAULT_RACK
            }

            instance_name = instance.get('name')
            if not instance_name:
                instance_name = cls._build_instance_name(
                    cluster_name, member_config['dc'], member_config['rack'],
                    instance_idx)

            new_instance = inst_models.Instance.create(
                context,
                instance_name,
                instance['flavor_id'],
                datastore_version.image_id, [], [],
                datastore,
                datastore_version,
                instance['volume_size'],
                None,
                nics=instance.get('nics', None),
                availability_zone=instance_az,
                configuration_id=None,
                cluster_config=member_config,
                locality=locality)

            new_instances.append(new_instance)

        return new_instances
Exemplo n.º 6
0
    def add_shard(self):

        if self.db_info.task_status != ClusterTasks.NONE:
            current_task = self.db_info.task_status.name
            msg = _("This action cannot be performed on the cluster while "
                    "the current cluster task is '%s'.") % current_task
            LOG.error(msg)
            raise exception.UnprocessableEntity(msg)

        db_insts = inst_models.DBInstance.find_all(cluster_id=self.id,
                                                   type='member').all()
        num_unique_shards = len(set([db_inst.shard_id for db_inst
                                     in db_insts]))
        if num_unique_shards == 0:
            msg = _("This action cannot be performed on the cluster as no "
                    "reference shard exists.")
            LOG.error(msg)
            raise exception.UnprocessableEntity(msg)

        arbitrary_shard_id = db_insts[0].shard_id
        members_in_shard = [db_inst for db_inst in db_insts
                            if db_inst.shard_id == arbitrary_shard_id]
        num_members_per_shard = len(members_in_shard)
        a_member = inst_models.load_any_instance(self.context,
                                                 members_in_shard[0].id)
        deltas = {'instances': num_members_per_shard}
        volume_size = a_member.volume_size
        if volume_size:
            deltas['volumes'] = volume_size * num_members_per_shard
        check_quotas(self.context.tenant, deltas)
        new_replica_set_name = "rs" + str(num_unique_shards + 1)
        new_shard_id = utils.generate_uuid()
        dsv_manager = (datastore_models.DatastoreVersion.
                       load_by_uuid(db_insts[0].datastore_version_id).manager)
        manager = task_api.load(self.context, dsv_manager)
        key = manager.get_key(a_member)
        member_config = {"id": self.id,
                         "shard_id": new_shard_id,
                         "instance_type": "member",
                         "replica_set_name": new_replica_set_name,
                         "key": key}
        for i in range(1, num_members_per_shard + 1):
            instance_name = "%s-%s-%s" % (self.name, new_replica_set_name,
                                          str(i))
            inst_models.Instance.create(self.context, instance_name,
                                        a_member.flavor_id,
                                        a_member.datastore_version.image_id,
                                        [], [], a_member.datastore,
                                        a_member.datastore_version,
                                        volume_size, None,
                                        availability_zone=None,
                                        nics=None,
                                        configuration_id=None,
                                        cluster_config=member_config)

        self.update_db(task_status=ClusterTasks.ADDING_SHARD)
        manager.mongodb_add_shard_cluster(
            self.id,
            new_shard_id,
            new_replica_set_name)
Exemplo n.º 7
0
    def _create_instances(context, db_info, datastore, datastore_version,
                          instances):
        Redis_conf = CONF.get(datastore_version.manager)
        num_instances = len(instances)
        total_volume_allocation = 0

        # Validate and Cache flavors
        nova_client = remote.create_nova_client(context)
        unique_flavors = set(map(lambda i: i['flavor_id'], instances))
        flavor_cache = {}
        for fid in unique_flavors:
            try:
                flavor_cache.update({fid: nova_client.flavors.get(fid)})
            except nova_exceptions.NotFound:
                raise exception.FlavorNotFound(uuid=fid)

        # Checking volumes
        name_index = 1
        for instance in instances:
            if not instance.get('name'):
                instance['name'] = "%s-member-%s" % (db_info.name, name_index)
                name_index += 1
            volume_size = instance.get('volume_size')
            if Redis_conf.volume_support:
                models.validate_volume_size(volume_size)
                total_volume_allocation += volume_size
            else:
                if volume_size:
                    raise exception.VolumeNotSupported()
                ephemeral_support = Redis_conf.device_path
                flavor_id = instance['flavor_id']
                flavor = flavor_cache[flavor_id]
                if ephemeral_support and flavor.ephemeral == 0:
                    raise exception.LocalStorageNotSpecified(flavor=flavor_id)

        # Check quotas
        quota_request = {
            'instances': num_instances,
            'volumes': total_volume_allocation
        }
        check_quotas(context.tenant, quota_request)

        # Creating member instances
        return map(
            lambda instance: inst_models.Instance.create(
                context,
                instance['name'],
                instance['flavor_id'],
                datastore_version.image_id, [], [],
                datastore,
                datastore_version,
                instance.get('volume_size'),
                None,
                instance.get('availability_zone', None),
                instance.get('nics', None),
                configuration_id=None,
                cluster_config={
                    "id": db_info.id,
                    "instance_type": "member"
                }), instances)
Exemplo n.º 8
0
    def _create_cluster_instances(
            cls, context, cluster_id, cluster_name,
            datastore, datastore_version, instances, extended_properties,
            locality):
        LOG.debug("Processing a request for new cluster instances.")

        cassandra_conf = CONF.get(datastore_version.manager)
        eph_enabled = cassandra_conf.device_path
        vol_enabled = cassandra_conf.volume_support

        # Validate instance flavors.
        models.validate_instance_flavors(context, instances,
                                         vol_enabled, eph_enabled)

        # Compute the total volume allocation.
        req_volume_size = models.get_required_volume_size(instances,
                                                          vol_enabled)

        # Check requirements against quota.
        num_new_instances = len(instances)
        deltas = {'instances': num_new_instances, 'volumes': req_volume_size}
        check_quotas(context.tenant, deltas)

        # Creating member instances.
        num_instances = len(
            CassandraClusterTasks.find_cluster_node_ids(cluster_id))
        new_instances = []
        for instance_idx, instance in enumerate(instances, num_instances + 1):
            instance_az = instance.get('availability_zone', None)

            member_config = {"id": cluster_id,
                             "instance_type": "member",
                             "dc": cls.DEFAULT_DATA_CENTER,
                             "rack": instance_az or cls.DEFAULT_RACK}

            instance_name = instance.get('name')
            if not instance_name:
                instance_name = cls._build_instance_name(
                    cluster_name, member_config['dc'], member_config['rack'],
                    instance_idx)

            new_instance = inst_models.Instance.create(
                context, instance_name,
                instance['flavor_id'],
                datastore_version.image_id,
                [], [],
                datastore, datastore_version,
                instance['volume_size'], None,
                nics=instance.get('nics', None),
                availability_zone=instance_az,
                configuration_id=None,
                cluster_config=member_config,
                modules=instance.get('modules'),
                region_name=instance.get('region_name'),
                locality=locality)

            new_instances.append(new_instance)

        return new_instances
Exemplo n.º 9
0
    def _create_instances(context, db_info, datastore, datastore_version,
                          instances, extended_properties, locality):
        redis_conf = CONF.get(datastore_version.manager)
        ephemeral_enabled = redis_conf.device_path
        volume_enabled = redis_conf.volume_support

        num_instances = len(instances)

        models.validate_instance_flavors(context, instances, volume_enabled,
                                         ephemeral_enabled)

        total_volume_allocation = models.get_required_volume_size(
            instances, volume_enabled)

        models.assert_homogeneous_cluster(instances)

        models.validate_instance_nics(context, instances)

        name_index = 1
        for instance in instances:
            if not instance.get('name'):
                instance['name'] = "%s-member-%s" % (db_info.name, name_index)
                name_index += 1

        # Check quotas
        quota_request = {
            'instances': num_instances,
            'volumes': total_volume_allocation
        }
        check_quotas(context.project_id, quota_request)

        # Creating member instances
        return [
            inst_models.Instance.create(
                context,
                instance['name'],
                instance['flavor_id'],
                datastore_version.image_id, [], [],
                datastore,
                datastore_version,
                instance.get('volume_size'),
                None,
                instance.get('availability_zone', None),
                instance.get('nics', None),
                configuration_id=None,
                cluster_config={
                    "id": db_info.id,
                    "instance_type": "member"
                },
                volume_type=instance.get('volume_type', None),
                modules=instance.get('modules'),
                locality=locality,
                region_name=instance.get('region_name'))
            for instance in instances
        ]
Exemplo n.º 10
0
    def _create_instances(context, db_info, datastore, datastore_version,
                          instances, extended_properties, locality,
                          new_cluster=True):
        vertica_conf = CONF.get(datastore_version.manager)
        num_instances = len(instances)

        existing = inst_models.DBInstance.find_all(cluster_id=db_info.id).all()
        num_existing = len(existing)

        # Matching number of instances with configured cluster_member_count
        if (new_cluster and
                num_instances != vertica_conf.cluster_member_count):
            raise exception.ClusterNumInstancesNotSupported(
                num_instances=vertica_conf.cluster_member_count)

        models.validate_instance_flavors(
            context, instances, vertica_conf.volume_support,
            vertica_conf.device_path)

        req_volume_size = models.get_required_volume_size(
            instances, vertica_conf.volume_support)
        models.assert_homogeneous_cluster(instances)

        deltas = {'instances': num_instances, 'volumes': req_volume_size}

        check_quotas(context.tenant, deltas)

        flavor_id = instances[0]['flavor_id']
        volume_size = instances[0].get('volume_size', None)

        nics = [instance.get('nics', None) for instance in instances]

        azs = [instance.get('availability_zone', None)
               for instance in instances]

        # Creating member instances
        minstances = []
        for i in range(0, num_instances):
            if i == 0 and new_cluster:
                member_config = {"id": db_info.id, "instance_type": "master"}
            else:
                member_config = {"id": db_info.id, "instance_type": "member"}
            instance_name = "%s-member-%s" % (db_info.name,
                                              str(i + num_existing + 1))
            minstances.append(
                inst_models.Instance.create(
                    context, instance_name, flavor_id,
                    datastore_version.image_id, [], [], datastore,
                    datastore_version, volume_size, None,
                    nics=nics[i], availability_zone=azs[i],
                    configuration_id=None, cluster_config=member_config,
                    locality=locality, modules=instances[i].get('modules'))
            )
        return minstances
Exemplo n.º 11
0
    def _create_insts(context, cluster_id, cluster_name, datastore,
                      datastore_version, instances, locality, configuration_id
                     ):  # pylint: disable=too-many-arguments, too-many-locals

        # 1. Check quotas
        num = len(instances)
        manager_conf = CONF.get(datastore_version.manager)
        total_volume_allocation = models.get_required_volume_size(
            instances, manager_conf.volume_support)
        quota_request = {'instances': num, 'volumes': total_volume_allocation}
        check_quotas(context.project_id, quota_request)

        # 2. Name new instances
        alls = inst_models.DBInstance.find_all(cluster_id=cluster_id).all()
        index = 1
        if alls:
            index += len(alls)

        # 3. Create instances
        new_insts = []
        member_config = {"id": cluster_id, "instance_type": "member"}
        for instance in instances:
            if not instance.get('name'):
                instance['name'] = "%s-member-%s" % (cluster_name, index)
                index += 1
            instance_name = instance.get('name')
            instance_az = instance.get('availability_zone', None)
            LOG.debug("new instance_name=%s instance_az=%s", instance_name,
                      instance_az)
            new_inst = inst_models.Instance.create(
                context,
                instance_name,
                instance['flavor_id'],
                datastore_version.image_id, [], [],
                datastore,
                datastore_version,
                instance['volume_size'],
                None,
                nics=instance.get('nics', None),
                availability_zone=instance_az,
                configuration_id=configuration_id,
                cluster_config=member_config,
                volume_type=instance.get('volume_type', None),
                modules=instance.get('modules'),
                locality=locality,
                region_name=instance.get('region_name'))
            new_insts.append(new_inst)
        return new_insts
Exemplo n.º 12
0
Arquivo: api.py Projeto: no2a/trove
    def _validate_cluster_instances(context, instances, datastore,
                                    datastore_version):
        """Validate the flavor and volume"""
        ds_conf = CONF.get(datastore_version.manager)
        num_instances = len(instances)

        # Check number of instances is at least min_cluster_member_count
        if num_instances < ds_conf.min_cluster_member_count:
            raise exception.ClusterNumInstancesNotLargeEnough(
                num_instances=ds_conf.min_cluster_member_count)

        # Checking volumes and get delta for quota check
        cluster_models.validate_instance_flavors(context, instances,
                                                 ds_conf.volume_support,
                                                 ds_conf.device_path)

        req_volume_size = cluster_models.get_required_volume_size(
            instances, ds_conf.volume_support)

        cluster_models.assert_homogeneous_cluster(instances)

        deltas = {'instances': num_instances, 'volumes': req_volume_size}

        # quota check
        check_quotas(context.tenant, deltas)

        # Checking networks are same for the cluster
        instance_nics = []
        for instance in instances:
            nics = instance.get('nics')
            if nics:
                instance_nics.append(nics[0].get('net-id'))
        if len(set(instance_nics)) > 1:
            raise exception.ClusterNetworksNotEqual()
        if not instance_nics:
            return
        instance_nic = instance_nics[0]
        try:
            nova_client = remote.create_nova_client(context)
            nova_client.networks.get(instance_nic)
        except nova_exceptions.NotFound:
            raise exception.NetworkNotFound(uuid=instance_nic)
Exemplo n.º 13
0
    def _validate_cluster_instances(context, instances, datastore,
                                    datastore_version):
        """Validate the flavor and volume"""
        ds_conf = CONF.get(datastore_version.manager)
        num_instances = len(instances)

        # Check number of instances is at least min_cluster_member_count
        if num_instances < ds_conf.min_cluster_member_count:
            raise exception.ClusterNumInstancesNotLargeEnough(
                num_instances=ds_conf.min_cluster_member_count)

        # Checking volumes and get delta for quota check
        cluster_models.validate_instance_flavors(
            context, instances, ds_conf.volume_support, ds_conf.device_path)

        req_volume_size = cluster_models.get_required_volume_size(
            instances, ds_conf.volume_support)

        cluster_models.assert_homogeneous_cluster(instances)

        deltas = {'instances': num_instances, 'volumes': req_volume_size}

        # quota check
        check_quotas(context.tenant, deltas)

        # Checking networks are same for the cluster
        instance_nics = []
        for instance in instances:
            nics = instance.get('nics')
            if nics:
                instance_nics.append(nics[0].get('net-id'))
        if len(set(instance_nics)) > 1:
            raise exception.ClusterNetworksNotEqual()
        if not instance_nics:
            return
        instance_nic = instance_nics[0]
        try:
            nova_client = remote.create_nova_client(context)
            nova_client.networks.get(instance_nic)
        except nova_exceptions.NotFound:
            raise exception.NetworkNotFound(uuid=instance_nic)
Exemplo n.º 14
0
    def _validate_cluster_instances(context, instances, datastore,
                                    datastore_version):
        """Validate the flavor and volume"""
        ds_conf = CONF.get(datastore_version.manager)
        num_instances = len(instances)

        # Checking volumes and get delta for quota check
        cluster_models.validate_instance_flavors(context, instances,
                                                 ds_conf.volume_support,
                                                 ds_conf.device_path)

        req_volume_size = cluster_models.get_required_volume_size(
            instances, ds_conf.volume_support)

        cluster_models.assert_homogeneous_cluster(instances)

        deltas = {'instances': num_instances, 'volumes': req_volume_size}

        # quota check
        check_quotas(context.tenant, deltas)

        # Checking networks are same for the cluster
        cluster_models.validate_instance_nics(context, instances)
Exemplo n.º 15
0
    def _create_instances(context, db_info, datastore, datastore_version,
                          instances, new_cluster):
        vertica_conf = CONF.get(datastore_version.manager)
        num_instances = len(instances)

        existing = inst_models.DBInstance.find_all(cluster_id=db_info.id).all()
        num_existing = len(existing)

        # Matching number of instances with configured cluster_member_count
        if new_cluster \
                and num_instances != vertica_conf.cluster_member_count:
            raise exception.ClusterNumInstancesNotSupported(
                num_instances=vertica_conf.cluster_member_count)

        # Checking flavors
        flavor_ids = [instance['flavor_id'] for instance in instances]
        if len(set(flavor_ids)) != 1:
            raise exception.ClusterFlavorsNotEqual()
        flavor_id = flavor_ids[0]
        nova_client = remote.create_nova_client(context)
        try:
            flavor = nova_client.flavors.get(flavor_id)
        except nova_exceptions.NotFound:
            raise exception.FlavorNotFound(uuid=flavor_id)
        deltas = {'instances': num_instances}

        # Checking volumes
        volume_sizes = [instance['volume_size'] for instance in instances
                        if instance.get('volume_size', None)]
        volume_size = None
        if vertica_conf.volume_support:
            if len(volume_sizes) != num_instances:
                raise exception.ClusterVolumeSizeRequired()
            if len(set(volume_sizes)) != 1:
                raise exception.ClusterVolumeSizesNotEqual()
            volume_size = volume_sizes[0]
            models.validate_volume_size(volume_size)
            deltas['volumes'] = volume_size * num_instances
        else:
            if len(volume_sizes) > 0:
                raise exception.VolumeNotSupported()
            ephemeral_support = vertica_conf.device_path
            if ephemeral_support and flavor.ephemeral == 0:
                raise exception.LocalStorageNotSpecified(flavor=flavor_id)

        check_quotas(context.tenant, deltas)

        nics = [instance.get('nics', None) for instance in instances]

        azs = [instance.get('availability_zone', None)
               for instance in instances]

        # Creating member instances
        minstances = []
        for i in range(0, num_instances):
            if i == 0 and new_cluster:
                member_config = {"id": db_info.id, "instance_type": "master"}
            else:
                member_config = {"id": db_info.id, "instance_type": "member"}
            instance_name = "%s-member-%s" % (db_info.name,
                                              str(i + num_existing + 1))
            minstances.append(
                inst_models.Instance.create(context, instance_name,
                                            flavor_id,
                                            datastore_version.image_id,
                                            [], [], datastore,
                                            datastore_version,
                                            volume_size, None,
                                            nics=nics[i],
                                            availability_zone=azs[i],
                                            configuration_id=None,
                                            cluster_config=member_config)
            )
        return minstances
Exemplo n.º 16
0
    def create(cls, context, name, datastore, datastore_version, instances,
               extended_properties, locality, configuration):

        if configuration:
            raise exception.ConfigurationNotSupported()

        # TODO(amcreynolds): consider moving into CONF and even supporting
        # TODO(amcreynolds): an array of values, e.g. [3, 5, 7]
        # TODO(amcreynolds): or introduce a min/max num_instances and set
        # TODO(amcreynolds): both to 3
        num_instances = len(instances)
        if num_instances != 3:
            raise exception.ClusterNumInstancesNotSupported(num_instances=3)

        mongo_conf = CONF.get(datastore_version.manager)
        num_configsvr = mongo_conf.num_config_servers_per_cluster
        num_mongos = mongo_conf.num_query_routers_per_cluster
        delta_instances = num_instances + num_configsvr + num_mongos

        models.validate_instance_flavors(context, instances,
                                         mongo_conf.volume_support,
                                         mongo_conf.device_path)
        models.assert_homogeneous_cluster(instances)

        req_volume_size = models.get_required_volume_size(
            instances, mongo_conf.volume_support)

        deltas = {'instances': delta_instances, 'volumes': req_volume_size}

        check_quotas(context.tenant, deltas)

        flavor_id = instances[0]['flavor_id']
        volume_size = instances[0].get('volume_size', None)

        nics = [instance.get('nics', None) for instance in instances]

        azs = [
            instance.get('availability_zone', None) for instance in instances
        ]

        regions = [instance.get('region_name', None) for instance in instances]

        db_info = models.DBCluster.create(
            name=name,
            tenant_id=context.tenant,
            datastore_version_id=datastore_version.id,
            task_status=ClusterTasks.BUILDING_INITIAL)

        replica_set_name = "rs1"

        member_config = {
            "id": db_info.id,
            "shard_id": utils.generate_uuid(),
            "instance_type": "member",
            "replica_set_name": replica_set_name
        }

        configsvr_config = {"id": db_info.id, "instance_type": "config_server"}

        mongos_config = {"id": db_info.id, "instance_type": "query_router"}

        if mongo_conf.cluster_secure:
            cluster_key = utils.generate_random_password()
            member_config['key'] = cluster_key
            configsvr_config['key'] = cluster_key
            mongos_config['key'] = cluster_key

        for i in range(0, num_instances):
            instance_name = "%s-%s-%s" % (name, replica_set_name, str(i + 1))
            inst_models.Instance.create(context,
                                        instance_name,
                                        flavor_id,
                                        datastore_version.image_id, [], [],
                                        datastore,
                                        datastore_version,
                                        volume_size,
                                        None,
                                        availability_zone=azs[i],
                                        nics=nics[i],
                                        configuration_id=None,
                                        cluster_config=member_config,
                                        modules=instances[i].get('modules'),
                                        locality=locality,
                                        region_name=regions[i])

        for i in range(1, num_configsvr + 1):
            instance_name = "%s-%s-%s" % (name, "configsvr", str(i))
            inst_models.Instance.create(context,
                                        instance_name,
                                        flavor_id,
                                        datastore_version.image_id, [], [],
                                        datastore,
                                        datastore_version,
                                        volume_size,
                                        None,
                                        availability_zone=None,
                                        nics=None,
                                        configuration_id=None,
                                        cluster_config=configsvr_config,
                                        locality=locality,
                                        region_name=regions[i])

        for i in range(1, num_mongos + 1):
            instance_name = "%s-%s-%s" % (name, "mongos", str(i))
            inst_models.Instance.create(context,
                                        instance_name,
                                        flavor_id,
                                        datastore_version.image_id, [], [],
                                        datastore,
                                        datastore_version,
                                        volume_size,
                                        None,
                                        availability_zone=None,
                                        nics=None,
                                        configuration_id=None,
                                        cluster_config=mongos_config,
                                        locality=locality,
                                        region_name=regions[i])

        task_api.load(context,
                      datastore_version.manager).create_cluster(db_info.id)

        return MongoDbCluster(context, db_info, datastore, datastore_version)
Exemplo n.º 17
0
 def _check_quotas(context, instances):
     deltas = {
         'instances': len(instances),
         'volumes': sum([instance['volume_size'] for instance in instances])
     }
     check_quotas(context.tenant, deltas)
Exemplo n.º 18
0
 def _check_quotas(context, instances):
     deltas = {'instances': len(instances),
               'volumes': sum([instance['volume_size']
                               for instance in instances])}
     check_quotas(context.tenant, deltas)
Exemplo n.º 19
0
    def create(cls, context, name, datastore, datastore_version,
               instances, extended_properties, locality, configuration):

        if configuration:
            raise exception.ConfigurationNotSupported()

        # TODO(amcreynolds): consider moving into CONF and even supporting
        # TODO(amcreynolds): an array of values, e.g. [3, 5, 7]
        # TODO(amcreynolds): or introduce a min/max num_instances and set
        # TODO(amcreynolds): both to 3
        num_instances = len(instances)
        if num_instances != 3:
            raise exception.ClusterNumInstancesNotSupported(num_instances=3)

        mongo_conf = CONF.get(datastore_version.manager)
        num_configsvr = (1 if mongo_conf.num_config_servers_per_cluster == 1
                         else 3)
        num_mongos = mongo_conf.num_query_routers_per_cluster
        delta_instances = num_instances + num_configsvr + num_mongos

        models.validate_instance_flavors(
            context, instances, mongo_conf.volume_support,
            mongo_conf.device_path)
        models.assert_homogeneous_cluster(instances)

        req_volume_size = models.get_required_volume_size(
            instances, mongo_conf.volume_support)

        deltas = {'instances': delta_instances, 'volumes': req_volume_size}

        check_quotas(context.tenant, deltas)

        flavor_id = instances[0]['flavor_id']
        volume_size = instances[0].get('volume_size', None)

        nics = [instance.get('nics', None) for instance in instances]
        nic = nics[0]
        for n in nics[1:]:
            if n != nic:
                raise ValueError(_('All cluster nics must be the same. '
                                   '%(nic)s != %(n)s')
                                 % {'nic': nic, 'n': n})

        azs = [instance.get('availability_zone', None)
               for instance in instances]

        regions = [instance.get('region_name', None)
                   for instance in instances]

        db_info = models.DBCluster.create(
            name=name, tenant_id=context.tenant,
            datastore_version_id=datastore_version.id,
            task_status=ClusterTasks.BUILDING_INITIAL)

        replica_set_name = "rs1"

        member_config = {"id": db_info.id,
                         "shard_id": utils.generate_uuid(),
                         "instance_type": "member",
                         "replica_set_name": replica_set_name}

        configsvr_config = {"id": db_info.id,
                            "instance_type": "config_server"}

        mongos_config = {"id": db_info.id,
                         "instance_type": "query_router"}

        if mongo_conf.cluster_secure:
            cluster_key = base64.b64encode(utils.generate_random_password())
            member_config['key'] = cluster_key
            configsvr_config['key'] = cluster_key
            mongos_config['key'] = cluster_key

        for i in range(num_instances):
            instance_name = "%s-%s-%s" % (name, replica_set_name, str(i + 1))
            inst_models.Instance.create(context, instance_name,
                                        flavor_id,
                                        datastore_version.image_id,
                                        [], [], datastore,
                                        datastore_version,
                                        volume_size, None,
                                        availability_zone=azs[i],
                                        nics=nic,
                                        configuration_id=None,
                                        cluster_config=member_config,
                                        modules=instances[i].get('modules'),
                                        locality=locality,
                                        region_name=regions[i])

        for i in range(num_configsvr):
            instance_name = "%s-%s-%s" % (name, "configsvr", str(i + 1))
            inst_models.Instance.create(context, instance_name,
                                        flavor_id,
                                        datastore_version.image_id,
                                        [], [], datastore,
                                        datastore_version,
                                        volume_size, None,
                                        availability_zone=azs[i %
                                                              num_instances],
                                        nics=nic,
                                        configuration_id=None,
                                        cluster_config=configsvr_config,
                                        locality=locality,
                                        region_name=regions[i])

        for i in range(num_mongos):
            instance_name = "%s-%s-%s" % (name, "mongos", str(i + 1))
            inst_models.Instance.create(context, instance_name,
                                        flavor_id,
                                        datastore_version.image_id,
                                        [], [], datastore,
                                        datastore_version,
                                        volume_size, None,
                                        availability_zone=azs[i %
                                                              num_instances],
                                        nics=nic,
                                        configuration_id=None,
                                        cluster_config=mongos_config,
                                        locality=locality,
                                        region_name=regions[i])

        task_api.load(context, datastore_version.manager).create_cluster(
            db_info.id)

        return MongoDbCluster(context, db_info, datastore, datastore_version)
Exemplo n.º 20
0
Arquivo: api.py Projeto: flg77/trove
    def create(cls, context, name, datastore, datastore_version, instances):

        # TODO(amcreynolds): consider moving into CONF and even supporting
        # TODO(amcreynolds): an array of values, e.g. [3, 5, 7]
        # TODO(amcreynolds): or introduce a min/max num_instances and set
        # TODO(amcreynolds): both to 3
        num_instances = len(instances)
        if num_instances != 3:
            raise exception.ClusterNumInstancesNotSupported(num_instances=3)

        flavor_ids = [instance['flavor_id'] for instance in instances]
        if len(set(flavor_ids)) != 1:
            raise exception.ClusterFlavorsNotEqual()
        flavor_id = flavor_ids[0]
        nova_client = remote.create_nova_client(context)
        try:
            flavor = nova_client.flavors.get(flavor_id)
        except nova_exceptions.NotFound:
            raise exception.FlavorNotFound(uuid=flavor_id)
        mongo_conf = CONF.get(datastore_version.manager)
        num_configsvr = mongo_conf.num_config_servers_per_cluster
        num_mongos = mongo_conf.num_query_routers_per_cluster
        delta_instances = num_instances + num_configsvr + num_mongos
        deltas = {'instances': delta_instances}

        volume_sizes = [
            instance['volume_size'] for instance in instances
            if instance.get('volume_size', None)
        ]
        volume_size = None
        if mongo_conf.volume_support:
            if len(volume_sizes) != num_instances:
                raise exception.ClusterVolumeSizeRequired()
            if len(set(volume_sizes)) != 1:
                raise exception.ClusterVolumeSizesNotEqual()
            volume_size = volume_sizes[0]
            models.validate_volume_size(volume_size)
            # TODO(amcreynolds): for now, mongos+configsvr same flavor+disk
            deltas['volumes'] = volume_size * delta_instances
        else:
            # TODO(amcreynolds): is ephemeral possible for mongodb clusters?
            if len(volume_sizes) > 0:
                raise exception.VolumeNotSupported()
            ephemeral_support = mongo_conf.device_path
            if ephemeral_support and flavor.ephemeral == 0:
                raise exception.LocalStorageNotSpecified(flavor=flavor_id)

        check_quotas(context.tenant, deltas)

        db_info = models.DBCluster.create(
            name=name,
            tenant_id=context.tenant,
            datastore_version_id=datastore_version.id,
            task_status=ClusterTasks.BUILDING_INITIAL)

        replica_set_name = "rs1"

        member_config = {
            "id": db_info.id,
            "shard_id": utils.generate_uuid(),
            "instance_type": "member",
            "replica_set_name": replica_set_name
        }
        for i in range(1, num_instances + 1):
            instance_name = "%s-%s-%s" % (name, replica_set_name, str(i))
            inst_models.Instance.create(context,
                                        instance_name,
                                        flavor_id,
                                        datastore_version.image_id, [], [],
                                        datastore,
                                        datastore_version,
                                        volume_size,
                                        None,
                                        availability_zone=None,
                                        nics=None,
                                        configuration_id=None,
                                        cluster_config=member_config)

        configsvr_config = {"id": db_info.id, "instance_type": "config_server"}
        for i in range(1, num_configsvr + 1):
            instance_name = "%s-%s-%s" % (name, "configsvr", str(i))
            inst_models.Instance.create(context,
                                        instance_name,
                                        flavor_id,
                                        datastore_version.image_id, [], [],
                                        datastore,
                                        datastore_version,
                                        volume_size,
                                        None,
                                        availability_zone=None,
                                        nics=None,
                                        configuration_id=None,
                                        cluster_config=configsvr_config)

        mongos_config = {"id": db_info.id, "instance_type": "query_router"}
        for i in range(1, num_mongos + 1):
            instance_name = "%s-%s-%s" % (name, "mongos", str(i))
            inst_models.Instance.create(context,
                                        instance_name,
                                        flavor_id,
                                        datastore_version.image_id, [], [],
                                        datastore,
                                        datastore_version,
                                        volume_size,
                                        None,
                                        availability_zone=None,
                                        nics=None,
                                        configuration_id=None,
                                        cluster_config=mongos_config)

        task_api.load(context,
                      datastore_version.manager).create_cluster(db_info.id)

        return MongoDbCluster(context, db_info, datastore, datastore_version)
Exemplo n.º 21
0
 def _check_quotas(context, instances):
     deltas = {"instances": len(instances), "volumes": sum([instance["volume_size"] for instance in instances])}
     check_quotas(context.tenant, deltas)
Exemplo n.º 22
0
    def create(cls, context, name, datastore, datastore_version, instances,
               extended_properties):
        LOG.debug("Initiating cluster creation.")
        vertica_conf = CONF.get(datastore_version.manager)
        num_instances = len(instances)

        # Matching number of instances with configured cluster_member_count
        if num_instances != vertica_conf.cluster_member_count:
            raise exception.ClusterNumInstancesNotSupported(
                num_instances=vertica_conf.cluster_member_count)

        # Checking flavors
        flavor_ids = [instance['flavor_id'] for instance in instances]
        if len(set(flavor_ids)) != 1:
            raise exception.ClusterFlavorsNotEqual()
        flavor_id = flavor_ids[0]
        nova_client = remote.create_nova_client(context)
        try:
            flavor = nova_client.flavors.get(flavor_id)
        except nova_exceptions.NotFound:
            raise exception.FlavorNotFound(uuid=flavor_id)
        deltas = {'instances': num_instances}

        # Checking volumes
        volume_sizes = [
            instance['volume_size'] for instance in instances
            if instance.get('volume_size', None)
        ]
        volume_size = None
        if vertica_conf.volume_support:
            if len(volume_sizes) != num_instances:
                raise exception.ClusterVolumeSizeRequired()
            if len(set(volume_sizes)) != 1:
                raise exception.ClusterVolumeSizesNotEqual()
            volume_size = volume_sizes[0]
            models.validate_volume_size(volume_size)
            deltas['volumes'] = volume_size * num_instances
        else:
            if len(volume_sizes) > 0:
                raise exception.VolumeNotSupported()
            ephemeral_support = vertica_conf.device_path
            if ephemeral_support and flavor.ephemeral == 0:
                raise exception.LocalStorageNotSpecified(flavor=flavor_id)

        check_quotas(context.tenant, deltas)

        nics = [instance.get('nics', None) for instance in instances]

        azs = [
            instance.get('availability_zone', None) for instance in instances
        ]

        # Updating Cluster Task
        db_info = models.DBCluster.create(
            name=name,
            tenant_id=context.tenant,
            datastore_version_id=datastore_version.id,
            task_status=ClusterTasks.BUILDING_INITIAL)

        # Creating member instances
        for i in range(0, num_instances):
            if i == 0:
                member_config = {"id": db_info.id, "instance_type": "master"}
            else:
                member_config = {"id": db_info.id, "instance_type": "member"}
            instance_name = "%s-member-%s" % (name, str(i + 1))
            inst_models.Instance.create(context,
                                        instance_name,
                                        flavor_id,
                                        datastore_version.image_id, [], [],
                                        datastore,
                                        datastore_version,
                                        volume_size,
                                        None,
                                        nics=nics[i],
                                        availability_zone=azs[i],
                                        configuration_id=None,
                                        cluster_config=member_config)

        # Calling taskmanager to further proceed for cluster-configuration
        task_api.load(context,
                      datastore_version.manager).create_cluster(db_info.id)

        return VerticaCluster(context, db_info, datastore, datastore_version)
Exemplo n.º 23
0
    def _create_instances(context,
                          db_info,
                          datastore,
                          datastore_version,
                          instances,
                          extended_properties,
                          locality,
                          new_cluster=True):
        vertica_conf = CONF.get(datastore_version.manager)
        num_instances = len(instances)

        existing = inst_models.DBInstance.find_all(cluster_id=db_info.id).all()
        num_existing = len(existing)

        # Matching number of instances with configured cluster_member_count
        if (new_cluster
                and num_instances != vertica_conf.cluster_member_count):
            raise exception.ClusterNumInstancesNotSupported(
                num_instances=vertica_conf.cluster_member_count)

        # Checking flavors
        flavor_ids = [instance['flavor_id'] for instance in instances]
        if len(set(flavor_ids)) != 1:
            raise exception.ClusterFlavorsNotEqual()
        flavor_id = flavor_ids[0]
        nova_client = remote.create_nova_client(context)
        try:
            flavor = nova_client.flavors.get(flavor_id)
        except nova_exceptions.NotFound:
            raise exception.FlavorNotFound(uuid=flavor_id)
        deltas = {'instances': num_instances}

        # Checking volumes
        volume_sizes = [
            instance['volume_size'] for instance in instances
            if instance.get('volume_size', None)
        ]
        volume_size = None
        if vertica_conf.volume_support:
            if len(volume_sizes) != num_instances:
                raise exception.ClusterVolumeSizeRequired()
            if len(set(volume_sizes)) != 1:
                raise exception.ClusterVolumeSizesNotEqual()
            volume_size = volume_sizes[0]
            models.validate_volume_size(volume_size)
            deltas['volumes'] = volume_size * num_instances
        else:
            if len(volume_sizes) > 0:
                raise exception.VolumeNotSupported()
            ephemeral_support = vertica_conf.device_path
            if ephemeral_support and flavor.ephemeral == 0:
                raise exception.LocalStorageNotSpecified(flavor=flavor_id)

        check_quotas(context.tenant, deltas)

        nics = [instance.get('nics', None) for instance in instances]

        azs = [
            instance.get('availability_zone', None) for instance in instances
        ]

        # Creating member instances
        minstances = []
        for i in range(0, num_instances):
            if i == 0 and new_cluster:
                member_config = {"id": db_info.id, "instance_type": "master"}
            else:
                member_config = {"id": db_info.id, "instance_type": "member"}
            instance_name = "%s-member-%s" % (db_info.name,
                                              str(i + num_existing + 1))
            minstances.append(
                inst_models.Instance.create(context,
                                            instance_name,
                                            flavor_id,
                                            datastore_version.image_id, [], [],
                                            datastore,
                                            datastore_version,
                                            volume_size,
                                            None,
                                            nics=nics[i],
                                            availability_zone=azs[i],
                                            configuration_id=None,
                                            cluster_config=member_config,
                                            locality=locality))
        return minstances
Exemplo n.º 24
0
    def create(cls, context, name, datastore, datastore_version,
               instances, extended_properties, locality):
        nova_client = remote.create_nova_client(context)
        network_driver = (importutils.import_class(
            CONF.network_driver))(context, None)
        ds_conf = CONF.get(datastore_version.manager)
        num_instances = len(instances)

        # Run checks first
        if not network_driver.subnet_support:
            raise exception.TroveError(_(
                "The configured network driver does not support subnet "
                "management. This is required for Oracle RAC clusters."))

        quota.check_quotas(context.tenant, {'instances': num_instances})
        for instance in instances:
            if not instance.get('flavor_id'):
                raise exception.BadRequest(_("Missing required flavor_id."))
            try:
                nova_client.flavors.get(instance['flavor_id'])
            except nova_exceptions.NotFound:
                raise exception.FlavorNotFound(uuid=instance['flavor_id'])
            if instance.get('volume_size'):
                raise exception.VolumeNotSupported()
            if instance.get('region_name'):
                raise exception.BadRequest(_("Instance region_name option not "
                                             "supported."))

        database = extended_properties.get('database')
        if not database:
            raise exception.BadRequest(_("Missing database name."))
        if len(database) > 8:
            raise exception.BadValue(_("Database name greater than 8 chars."))
        storage_info = check_storage_info(extended_properties)
        subnet, subnetpool, network = check_public_network_info(
            ds_conf, network_driver, num_instances, extended_properties)

        ssh_pem, ssh_pub = crypto_utils.generate_ssh_keys()

        sys_password = utils.generate_random_password(
            datastore=datastore.name)
        admin_password = utils.generate_random_password(
            datastore=datastore.name)

        # Create the cluster
        db_info = models.DBCluster.create(
            name=name, tenant_id=context.tenant,
            datastore_version_id=datastore_version.id,
            task_status=tasks.ClusterTasks.BUILDING_INITIAL)

        if not subnet:
            LOG.debug("Creating RAC public subnet on network {net} from "
                      "pool {pool}".format(net=network['id'],
                                           pool=subnetpool['id']))
            subnet = create_public_subnet_from_pool(
                ds_conf, network_driver, db_info.id, subnetpool, network,
                extended_properties.get('router'),
                extended_properties.get('prefixlen'))
            LOG.debug("Created subnet {sub} with CIDR {cidr}".format(
                sub=subnet['id'], cidr=subnet['cidr']))

        interconnect_network, interconnect_subnet = create_interconnect(
            ds_conf, network_driver, db_info.id)
        LOG.debug("Created interconnect network {net} with subnet "
                  "{sub}".format(net=interconnect_network['id'],
                                 sub=interconnect_subnet['id']))

        public_subnet_manager = rac_utils.RACPublicSubnetManager(
            subnet['cidr'])
        interconnect_subnet_manager = rac_utils.CommonSubnetManager(
            interconnect_subnet['cidr'])

        subnet = configure_public_subnet(
            ds_conf, network_driver, db_info.id, subnet,
            public_subnet_manager.allocation_pool)
        LOG.debug("RAC public subnet ({sub_id}) info: name='{name}', scans="
                  "{scans}".format(sub_id=subnet['id'], name=subnet['name'],
                                   scans=public_subnet_manager.scan_list))

        cluster_config = {
            'id': db_info.id,
            'instance_type': 'node',
            'storage': storage_info,
            'ssh_pem': ssh_pem,
            'ssh_pub': ssh_pub,
            'database': database,
            'sys_password': sys_password,
            'admin_password': admin_password}

        vips = (public_subnet_manager.scan_list +
                [public_subnet_manager.instance_vip(i)
                 for i in range(len(instances))])

        for i, instance in enumerate(instances):
            instance_name = rac_utils.make_instance_hostname(name, i)
            nics = instance.get('nics') or []
            public_port_name = rac_utils.make_object_name(
                ds_conf, ['public', 'port', str(i + 1)], db_info.id)
            public_port = create_port(
                network_driver, public_port_name, i,
                subnet, public_subnet_manager, vips=vips)
            interconnect_port_name = rac_utils.make_object_name(
                ds_conf, ['interconnect', 'port', str(i + 1)], db_info.id)
            interconnect_port = create_port(
                network_driver, interconnect_port_name, i,
                interconnect_subnet, interconnect_subnet_manager)
            nics.append({'port-id': public_port['id']})
            nics.append({'port-id': interconnect_port['id']})
            LOG.debug("Creating instance {name} with public ip {pub} and "
                      "interconnect ip {int}".format(
                        name=instance_name,
                        pub=public_port['fixed_ips'][0]['ip_address'],
                        int=interconnect_port['fixed_ips'][0]['ip_address']))
            inst_models.Instance.create(
                context,
                instance_name,
                instance['flavor_id'],
                datastore_version.image_id,
                [], [], datastore,
                datastore_version,
                None, None,
                availability_zone=instance.get('availability_zone'),
                nics=nics,
                cluster_config=cluster_config,
                modules=instance.get('modules'),
                locality=locality)

        task_api.load(context, datastore_version.manager).create_cluster(
            db_info.id)

        return OracleRACCluster(context, db_info, datastore, datastore_version)
Exemplo n.º 25
0
    def create(cls, context, name, datastore, datastore_version,
               instances, extended_properties, locality, configuration):

        if configuration:
            raise exception.ConfigurationNotSupported()

        nova_client = remote.create_nova_client(context)
        network_driver = (importutils.import_class(
            CONF.network_driver))(context, None)
        ds_conf = CONF.get(datastore_version.manager)
        num_instances = len(instances)

        # Run checks first
        if not network_driver.subnet_support:
            raise exception.TroveError(_(
                "The configured network driver does not support subnet "
                "management. This is required for Oracle RAC clusters."))

        quota.check_quotas(context.tenant, {'instances': num_instances})
        for instance in instances:
            if not instance.get('flavor_id'):
                raise exception.BadRequest(_("Missing required flavor_id."))
            try:
                nova_client.flavors.get(instance['flavor_id'])
            except nova_exceptions.NotFound:
                raise exception.FlavorNotFound(uuid=instance['flavor_id'])
            if instance.get('volume_size'):
                raise exception.VolumeNotSupported()
            if instance.get('region_name'):
                raise exception.BadRequest(_("Instance region_name option not "
                                             "supported."))

        database = extended_properties.get('database')
        if not database:
            raise exception.BadRequest(_("Missing database name."))
        if len(database) > 8:
            raise exception.BadValue(_("Database name greater than 8 chars."))
        storage_info = check_storage_info(extended_properties)
        subnet, subnetpool, network = check_public_network_info(
            ds_conf, network_driver, num_instances, extended_properties)

        ssh_pem, ssh_pub = crypto_utils.generate_ssh_keys()

        sys_password = utils.generate_random_password(
            datastore=datastore.name)
        admin_password = utils.generate_random_password(
            datastore=datastore.name)

        # Create the cluster
        db_info = models.DBCluster.create(
            name=name, tenant_id=context.tenant,
            datastore_version_id=datastore_version.id,
            task_status=tasks.ClusterTasks.BUILDING_INITIAL)

        if not subnet:
            LOG.debug("Creating RAC public subnet on network {net} from "
                      "pool {pool}".format(net=network['id'],
                                           pool=subnetpool['id']))
            subnet = create_public_subnet_from_pool(
                ds_conf, network_driver, db_info.id, subnetpool, network,
                extended_properties.get('router'),
                extended_properties.get('prefixlen'))
            LOG.debug("Created subnet {sub} with CIDR {cidr}".format(
                sub=subnet['id'], cidr=subnet['cidr']))

        interconnect_network, interconnect_subnet = create_interconnect(
            ds_conf, network_driver, db_info.id)
        LOG.debug("Created interconnect network {net} with subnet "
                  "{sub}".format(net=interconnect_network['id'],
                                 sub=interconnect_subnet['id']))

        public_subnet_manager = rac_utils.RACPublicSubnetManager(
            subnet['cidr'])
        interconnect_subnet_manager = rac_utils.CommonSubnetManager(
            interconnect_subnet['cidr'])

        subnet = configure_public_subnet(
            ds_conf, network_driver, db_info.id, subnet,
            public_subnet_manager.allocation_pool)
        LOG.debug("RAC public subnet ({sub_id}) info: name='{name}', scans="
                  "{scans}".format(sub_id=subnet['id'], name=subnet['name'],
                                   scans=public_subnet_manager.scan_list))

        cluster_config = {
            'id': db_info.id,
            'instance_type': 'node',
            'storage': storage_info,
            'ssh_pem': ssh_pem,
            'ssh_pub': ssh_pub,
            'database': database,
            'sys_password': sys_password,
            'admin_password': admin_password}

        vips = (public_subnet_manager.scan_list +
                [public_subnet_manager.instance_vip(i)
                 for i in range(len(instances))])

        for i, instance in enumerate(instances):
            instance_name = rac_utils.make_instance_hostname(name, i)
            nics = instance.get('nics') or []
            public_port_name = rac_utils.make_object_name(
                ds_conf, ['public', 'port', str(i + 1)], db_info.id)
            public_port = create_port(
                network_driver, public_port_name, i,
                subnet, public_subnet_manager, vips=vips)
            interconnect_port_name = rac_utils.make_object_name(
                ds_conf, ['interconnect', 'port', str(i + 1)], db_info.id)
            interconnect_port = create_port(
                network_driver, interconnect_port_name, i,
                interconnect_subnet, interconnect_subnet_manager)
            nics.append({'port-id': public_port['id']})
            nics.append({'port-id': interconnect_port['id']})
            LOG.debug("Creating instance {name} with public ip {pub} and "
                      "interconnect ip {int}".format(
                        name=instance_name,
                        pub=public_port['fixed_ips'][0]['ip_address'],
                        int=interconnect_port['fixed_ips'][0]['ip_address']))
            inst_models.Instance.create(
                context,
                instance_name,
                instance['flavor_id'],
                datastore_version.image_id,
                [], [], datastore,
                datastore_version,
                None, None,
                availability_zone=instance.get('availability_zone'),
                nics=nics,
                cluster_config=cluster_config,
                modules=instance.get('modules'),
                locality=locality)

        task_api.load(context, datastore_version.manager).create_cluster(
            db_info.id)

        return OracleRACCluster(context, db_info, datastore, datastore_version)
Exemplo n.º 26
0
    def create(cls, context, name, datastore, datastore_version, instances):

        # TODO(amcreynolds): consider moving into CONF and even supporting
        # TODO(amcreynolds): an array of values, e.g. [3, 5, 7]
        # TODO(amcreynolds): or introduce a min/max num_instances and set
        # TODO(amcreynolds): both to 3
        num_instances = len(instances)
        if num_instances != 3:
            raise exception.ClusterNumInstancesNotSupported(num_instances=3)

        flavor_ids = [instance['flavor_id'] for instance in instances]
        if len(set(flavor_ids)) != 1:
            raise exception.ClusterFlavorsNotEqual()
        flavor_id = flavor_ids[0]
        nova_client = remote.create_nova_client(context)
        try:
            flavor = nova_client.flavors.get(flavor_id)
        except nova_exceptions.NotFound:
            raise exception.FlavorNotFound(uuid=flavor_id)
        mongo_conf = CONF.get(datastore_version.manager)
        num_configsvr = mongo_conf.num_config_servers_per_cluster
        num_mongos = mongo_conf.num_query_routers_per_cluster
        delta_instances = num_instances + num_configsvr + num_mongos
        deltas = {'instances': delta_instances}

        volume_sizes = [instance['volume_size'] for instance in instances
                        if instance.get('volume_size', None)]
        volume_size = None
        if mongo_conf.volume_support:
            if len(volume_sizes) != num_instances:
                raise exception.ClusterVolumeSizeRequired()
            if len(set(volume_sizes)) != 1:
                raise exception.ClusterVolumeSizesNotEqual()
            volume_size = volume_sizes[0]
            models.validate_volume_size(volume_size)
            # TODO(amcreynolds): for now, mongos+configsvr same flavor+disk
            deltas['volumes'] = volume_size * delta_instances
        else:
            # TODO(amcreynolds): is ephemeral possible for mongodb clusters?
            if len(volume_sizes) > 0:
                raise exception.VolumeNotSupported()
            ephemeral_support = mongo_conf.device_path
            if ephemeral_support and flavor.ephemeral == 0:
                raise exception.LocalStorageNotSpecified(flavor=flavor_id)

        check_quotas(context.tenant, deltas)

        db_info = models.DBCluster.create(
            name=name, tenant_id=context.tenant,
            datastore_version_id=datastore_version.id,
            task_status=ClusterTasks.BUILDING_INITIAL)

        replica_set_name = "rs1"

        member_config = {"id": db_info.id,
                         "shard_id": utils.generate_uuid(),
                         "instance_type": "member",
                         "replica_set_name": replica_set_name}
        for i in range(1, num_instances + 1):
            instance_name = "%s-%s-%s" % (name, replica_set_name, str(i))
            inst_models.Instance.create(context, instance_name,
                                        flavor_id,
                                        datastore_version.image_id,
                                        [], [], datastore,
                                        datastore_version,
                                        volume_size, None,
                                        availability_zone=None,
                                        nics=None,
                                        configuration_id=None,
                                        cluster_config=member_config)

        configsvr_config = {"id": db_info.id,
                            "instance_type": "config_server"}
        for i in range(1, num_configsvr + 1):
            instance_name = "%s-%s-%s" % (name, "configsvr", str(i))
            inst_models.Instance.create(context, instance_name,
                                        flavor_id,
                                        datastore_version.image_id,
                                        [], [], datastore,
                                        datastore_version,
                                        volume_size, None,
                                        availability_zone=None,
                                        nics=None,
                                        configuration_id=None,
                                        cluster_config=configsvr_config)

        mongos_config = {"id": db_info.id,
                         "instance_type": "query_router"}
        for i in range(1, num_mongos + 1):
            instance_name = "%s-%s-%s" % (name, "mongos", str(i))
            inst_models.Instance.create(context, instance_name,
                                        flavor_id,
                                        datastore_version.image_id,
                                        [], [], datastore,
                                        datastore_version,
                                        volume_size, None,
                                        availability_zone=None,
                                        nics=None,
                                        configuration_id=None,
                                        cluster_config=mongos_config)

        task_api.load(context, datastore_version.manager).create_cluster(
            db_info.id)

        return MongoDbCluster(context, db_info, datastore, datastore_version)
Exemplo n.º 27
0
    def _create_cluster_instances(
            cls, context, cluster_id, cluster_name,
            datastore, datastore_version, instances, extended_properties=None):
        LOG.debug("Processing a request for new cluster instances.")

        cluster_node_ids = CouchbaseClusterTasks.find_cluster_node_ids(
            cluster_id)

        cluster_password = None

        # Couchbase imposes cluster wide quota on the memory that get
        # evenly distributed between node services.
        # All nodes (including future nodes) need to be able to accommodate
        # this quota.
        # We therefore require the cluster to be homogeneous.

        # Load the flavor and volume information from the existing instances
        # if any.
        # Generate the administrative password for a new cluster or reuse the
        # one from an existing cluster.
        required_instance_flavor = None
        required_volume_size = None
        if cluster_node_ids:
            cluster_nodes = CouchbaseClusterTasks.load_cluster_nodes(
                context,
                cluster_node_ids)
            coordinator = cluster_nodes[0]
            required_instance_flavor = coordinator['instance'].flavor_id
            required_volume_size = coordinator['instance'].volume_size

            cluster_password = coordinator['guest'].get_cluster_password()
        else:
            cluster_password = utils.generate_random_password()

        models.assert_homogeneous_cluster(
            instances,
            required_flavor=required_instance_flavor,
            required_volume_size=required_volume_size)

        couchbase_conf = CONF.get(datastore_version.manager)
        eph_enabled = couchbase_conf.device_path
        vol_enabled = couchbase_conf.volume_support

        # Validate instance flavors.
        models.get_flavors_from_instance_defs(context, instances,
                                              vol_enabled, eph_enabled)

        # Compute the total volume allocation.
        req_volume_size = models.get_required_volume_size(instances,
                                                          vol_enabled)

        # Check requirements against quota.
        num_new_instances = len(instances)
        deltas = {'instances': num_new_instances, 'volumes': req_volume_size}
        check_quotas(context.tenant, deltas)

        # Creating member instances.
        num_instances = len(cluster_node_ids)
        new_instances = []
        for instance_idx, instance in enumerate(instances, num_instances + 1):
            instance_az = instance.get('availability_zone', None)

            member_config = {"id": cluster_id,
                             "instance_type": "member",
                             "cluster_password": cluster_password}

            instance_name = instance.get('name')
            if not instance_name:
                instance_name = cls._build_instance_name(
                    cluster_name, cls.DEFAULT_SERVICES, instance_az,
                    instance_idx)

            new_instance = inst_models.Instance.create(
                context, instance_name,
                instance['flavor_id'],
                datastore_version.image_id,
                [], [],
                datastore, datastore_version,
                instance['volume_size'], None,
                nics=instance.get('nics', None),
                availability_zone=instance_az,
                configuration_id=None,
                cluster_config=member_config)

            new_instances.append(new_instance)

        return new_instances
Exemplo n.º 28
0
    def _create_cluster_instances(cls, context, cluster_id, cluster_name,
                                  datastore, datastore_version, instances,
                                  extended_properties, locality):
        LOG.debug("Processing a request for new cluster instances.")

        cluster_node_ids = CouchbaseClusterTasks.find_cluster_node_ids(
            cluster_id)

        cluster_password = None

        # Couchbase imposes cluster wide quota on the memory that get
        # evenly distributed between node services.
        # All nodes (including future nodes) need to be able to accommodate
        # this quota.
        # We therefore require the cluster to be homogeneous.

        # Load the flavor and volume information from the existing instances
        # if any.
        # Generate the administrative password for a new cluster or reuse the
        # one from an existing cluster.
        required_instance_flavor = None
        required_volume_size = None
        if cluster_node_ids:
            cluster_nodes = CouchbaseClusterTasks.load_cluster_nodes(
                context, cluster_node_ids)
            coordinator = cluster_nodes[0]
            required_instance_flavor = coordinator['instance'].flavor_id
            required_volume_size = coordinator['instance'].volume_size

            cluster_password = coordinator['guest'].get_cluster_password()
        else:
            pwd_len = min(cls.MAX_PASSWORD_LEN, CONF.default_password_length)
            cluster_password = utils.generate_random_password(pwd_len)

        models.assert_homogeneous_cluster(
            instances,
            required_flavor=required_instance_flavor,
            required_volume_size=required_volume_size)

        couchbase_conf = CONF.get(datastore_version.manager)
        eph_enabled = couchbase_conf.device_path
        vol_enabled = couchbase_conf.volume_support

        # Validate instance flavors.
        models.validate_instance_flavors(context, instances, vol_enabled,
                                         eph_enabled)

        # Compute the total volume allocation.
        req_volume_size = models.get_required_volume_size(
            instances, vol_enabled)

        # Check requirements against quota.
        num_new_instances = len(instances)
        deltas = {'instances': num_new_instances, 'volumes': req_volume_size}
        check_quotas(context.tenant, deltas)

        # Creating member instances.
        num_instances = len(cluster_node_ids)
        new_instances = []
        for instance_idx, instance in enumerate(instances, num_instances + 1):
            instance_az = instance.get('availability_zone', None)

            member_config = {
                "id": cluster_id,
                "instance_type": "member",
                "cluster_password": cluster_password
            }

            instance_name = instance.get('name')
            if not instance_name:
                instance_name = cls._build_instance_name(
                    cluster_name, cls.DEFAULT_SERVICES, instance_az,
                    instance_idx)

            new_instance = inst_models.Instance.create(
                context,
                instance_name,
                instance['flavor_id'],
                datastore_version.image_id, [], [],
                datastore,
                datastore_version,
                instance['volume_size'],
                None,
                nics=instance.get('nics', None),
                availability_zone=instance_az,
                configuration_id=None,
                cluster_config=member_config,
                region_name=instance.get('region_name'),
                locality=locality)

            new_instances.append(new_instance)

        return new_instances
Exemplo n.º 29
0
    def create(cls, context, name, datastore, datastore_version, instances, extended_properties):
        LOG.debug("Initiating cluster creation.")
        vertica_conf = CONF.get(datastore_version.manager)
        num_instances = len(instances)

        # Matching number of instances with configured cluster_member_count
        if num_instances != vertica_conf.cluster_member_count:
            raise exception.ClusterNumInstancesNotSupported(num_instances=vertica_conf.cluster_member_count)

        # Checking flavors
        flavor_ids = [instance["flavor_id"] for instance in instances]
        if len(set(flavor_ids)) != 1:
            raise exception.ClusterFlavorsNotEqual()
        flavor_id = flavor_ids[0]
        nova_client = remote.create_nova_client(context)
        try:
            flavor = nova_client.flavors.get(flavor_id)
        except nova_exceptions.NotFound:
            raise exception.FlavorNotFound(uuid=flavor_id)
        deltas = {"instances": num_instances}

        # Checking volumes
        volume_sizes = [instance["volume_size"] for instance in instances if instance.get("volume_size", None)]
        volume_size = None
        if vertica_conf.volume_support:
            if len(volume_sizes) != num_instances:
                raise exception.ClusterVolumeSizeRequired()
            if len(set(volume_sizes)) != 1:
                raise exception.ClusterVolumeSizesNotEqual()
            volume_size = volume_sizes[0]
            models.validate_volume_size(volume_size)
            deltas["volumes"] = volume_size * num_instances
        else:
            if len(volume_sizes) > 0:
                raise exception.VolumeNotSupported()
            ephemeral_support = vertica_conf.device_path
            if ephemeral_support and flavor.ephemeral == 0:
                raise exception.LocalStorageNotSpecified(flavor=flavor_id)

        check_quotas(context.tenant, deltas)

        nics = [instance.get("nics", None) for instance in instances]

        azs = [instance.get("availability_zone", None) for instance in instances]

        # Updating Cluster Task
        db_info = models.DBCluster.create(
            name=name,
            tenant_id=context.tenant,
            datastore_version_id=datastore_version.id,
            task_status=ClusterTasks.BUILDING_INITIAL,
        )

        # Creating member instances
        for i in range(0, num_instances):
            if i == 0:
                member_config = {"id": db_info.id, "instance_type": "master"}
            else:
                member_config = {"id": db_info.id, "instance_type": "member"}
            instance_name = "%s-member-%s" % (name, str(i + 1))
            inst_models.Instance.create(
                context,
                instance_name,
                flavor_id,
                datastore_version.image_id,
                [],
                [],
                datastore,
                datastore_version,
                volume_size,
                None,
                nics=nics[i],
                availability_zone=azs[i],
                configuration_id=None,
                cluster_config=member_config,
            )

        # Calling taskmanager to further proceed for cluster-configuration
        task_api.load(context, datastore_version.manager).create_cluster(db_info.id)

        return VerticaCluster(context, db_info, datastore, datastore_version)
Exemplo n.º 30
0
    def create(cls, context, name, datastore, datastore_version,
               instances, extended_properties, locality):

        # TODO(amcreynolds): consider moving into CONF and even supporting
        # TODO(amcreynolds): an array of values, e.g. [3, 5, 7]
        # TODO(amcreynolds): or introduce a min/max num_instances and set
        # TODO(amcreynolds): both to 3
        num_instances = len(instances)
        if num_instances != 3:
            raise exception.ClusterNumInstancesNotSupported(num_instances=3)

        flavor_ids = [instance['flavor_id'] for instance in instances]
        if len(set(flavor_ids)) != 1:
            raise exception.ClusterFlavorsNotEqual()
        flavor_id = flavor_ids[0]
        nova_client = remote.create_nova_client(context)
        try:
            flavor = nova_client.flavors.get(flavor_id)
        except nova_exceptions.NotFound:
            raise exception.FlavorNotFound(uuid=flavor_id)
        mongo_conf = CONF.get(datastore_version.manager)
        num_configsvr = (1 if mongo_conf.num_config_servers_per_cluster == 1
                         else 3)
        num_mongos = mongo_conf.num_query_routers_per_cluster
        delta_instances = num_instances + num_configsvr + num_mongos
        deltas = {'instances': delta_instances}

        volume_sizes = [instance['volume_size'] for instance in instances
                        if instance.get('volume_size', None)]
        volume_size = None
        if mongo_conf.volume_support:
            if len(volume_sizes) != num_instances:
                raise exception.ClusterVolumeSizeRequired()
            if len(set(volume_sizes)) != 1:
                raise exception.ClusterVolumeSizesNotEqual()
            volume_size = volume_sizes[0]
            models.validate_volume_size(volume_size)
            # TODO(amcreynolds): for now, mongos+configsvr same flavor+disk
            deltas['volumes'] = volume_size * delta_instances
        else:
            # TODO(amcreynolds): is ephemeral possible for mongodb clusters?
            if len(volume_sizes) > 0:
                raise exception.VolumeNotSupported()
            ephemeral_support = mongo_conf.device_path
            if ephemeral_support and flavor.ephemeral == 0:
                raise exception.LocalStorageNotSpecified(flavor=flavor_id)

        check_quotas(context.tenant, deltas)

        nics = [instance.get('nics', None) for instance in instances]
        nic = nics[0]
        for n in nics[1:]:
            if n != nic:
                raise ValueError(_('All cluster nics must be the same. '
                                   '%(nic)s != %(n)s')
                                 % {'nic': nic, 'n': n})

        azs = [instance.get('availability_zone', None)
               for instance in instances]

        regions = [instance.get('region_name', None)
                   for instance in instances]

        db_info = models.DBCluster.create(
            name=name, tenant_id=context.tenant,
            datastore_version_id=datastore_version.id,
            task_status=ClusterTasks.BUILDING_INITIAL)

        replica_set_name = "rs1"

        member_config = {"id": db_info.id,
                         "shard_id": utils.generate_uuid(),
                         "instance_type": "member",
                         "replica_set_name": replica_set_name}

        configsvr_config = {"id": db_info.id,
                            "instance_type": "config_server"}

        mongos_config = {"id": db_info.id,
                         "instance_type": "query_router"}

        if mongo_conf.cluster_secure:
            cluster_key = base64.b64encode(utils.generate_random_password())
            member_config['key'] = cluster_key
            configsvr_config['key'] = cluster_key
            mongos_config['key'] = cluster_key

        for i in range(num_instances):
            instance_name = "%s-%s-%s" % (name, replica_set_name, str(i + 1))
            inst_models.Instance.create(context, instance_name,
                                        flavor_id,
                                        datastore_version.image_id,
                                        [], [], datastore,
                                        datastore_version,
                                        volume_size, None,
                                        availability_zone=azs[i],
                                        nics=nic,
                                        configuration_id=None,
                                        cluster_config=member_config,
                                        modules=instances[i].get('modules'),
                                        region_name=regions[i],
                                        locality=locality)

        for i in range(num_configsvr):
            instance_name = "%s-%s-%s" % (name, "configsvr", str(i + 1))
            inst_models.Instance.create(context, instance_name,
                                        flavor_id,
                                        datastore_version.image_id,
                                        [], [], datastore,
                                        datastore_version,
                                        volume_size, None,
                                        availability_zone=azs[i %
                                                              num_instances],
                                        nics=nic,
                                        configuration_id=None,
                                        cluster_config=configsvr_config,
                                        region_name=regions[i % num_instances],
                                        locality=locality)

        for i in range(num_mongos):
            instance_name = "%s-%s-%s" % (name, "mongos", str(i + 1))
            inst_models.Instance.create(context, instance_name,
                                        flavor_id,
                                        datastore_version.image_id,
                                        [], [], datastore,
                                        datastore_version,
                                        volume_size, None,
                                        availability_zone=azs[i %
                                                              num_instances],
                                        nics=nic,
                                        configuration_id=None,
                                        cluster_config=mongos_config,
                                        region_name=regions[i % num_instances],
                                        locality=locality)

        task_api.load(context, datastore_version.manager).create_cluster(
            db_info.id)

        return MongoDbCluster(context, db_info, datastore, datastore_version)