def test_quota_no_mem_no_cpu(self): num_instances = quota.allowed_instances(self.context, 100, self._get_instance_type('m1.nocpu')) self.assertEqual(num_instances, 2) num_instances = quota.allowed_instances(self.context, 100, self._get_instance_type('m1.nomem')) self.assertEqual(num_instances, 2)
def test_quota_overrides(self): """Make sure overriding a projects quotas works""" num_instances = quota.allowed_instances(self.context, 100, self._get_instance_type('m1.small')) self.assertEqual(num_instances, 2) db.quota_create(self.context, self.project_id, 'instances', 10) num_instances = quota.allowed_instances(self.context, 100, self._get_instance_type('m1.small')) self.assertEqual(num_instances, 4) db.quota_create(self.context, self.project_id, 'cores', 100) num_instances = quota.allowed_instances(self.context, 100, self._get_instance_type('m1.small')) self.assertEqual(num_instances, 10) db.quota_create(self.context, self.project_id, 'ram', 3 * 2048) num_instances = quota.allowed_instances(self.context, 100, self._get_instance_type('m1.small')) self.assertEqual(num_instances, 3) # metadata_items too_many_items = FLAGS.quota_metadata_items + 1000 num_metadata_items = quota.allowed_metadata_items(self.context, too_many_items) self.assertEqual(num_metadata_items, FLAGS.quota_metadata_items) db.quota_create(self.context, self.project_id, 'metadata_items', 5) num_metadata_items = quota.allowed_metadata_items(self.context, too_many_items) self.assertEqual(num_metadata_items, 5) # Cleanup db.quota_destroy_all_by_project(self.context, self.project_id)
def test_quota_overrides(self): """Make sure overriding a projects quotas works""" num_instances = quota.allowed_instances(self.context, 100, self._get_instance_type('m1.small')) self.assertEqual(num_instances, 2) db.quota_create(self.context, {'project_id': self.project.id, 'instances': 10}) num_instances = quota.allowed_instances(self.context, 100, self._get_instance_type('m1.small')) self.assertEqual(num_instances, 4) db.quota_update(self.context, self.project.id, {'cores': 100}) num_instances = quota.allowed_instances(self.context, 100, self._get_instance_type('m1.small')) self.assertEqual(num_instances, 10) # metadata_items too_many_items = FLAGS.quota_metadata_items + 1000 num_metadata_items = quota.allowed_metadata_items(self.context, too_many_items) self.assertEqual(num_metadata_items, FLAGS.quota_metadata_items) db.quota_update(self.context, self.project.id, {'metadata_items': 5}) num_metadata_items = quota.allowed_metadata_items(self.context, too_many_items) self.assertEqual(num_metadata_items, 5) # Cleanup db.quota_destroy(self.context, self.project.id)
def test_unlimited_cores(self): self.flags(quota_instances=-1, quota_ram=-1, quota_cores=2) instance_type = self._get_instance_type("m1.small") num_instances = quota.allowed_instances(self.context, 100, instance_type) self.assertEqual(num_instances, 2) db.quota_create(self.context, self.project_id, "cores", None) num_instances = quota.allowed_instances(self.context, 100, instance_type) self.assertEqual(num_instances, 100) num_instances = quota.allowed_instances(self.context, 101, instance_type) self.assertEqual(num_instances, 101)
def test_unlimited_cores(self): FLAGS.quota_instances = 1000 FLAGS.quota_cores = 2 instance_type = self._get_instance_type("m1.small") num_instances = quota.allowed_instances(self.context, 100, instance_type) self.assertEqual(num_instances, 2) db.quota_create(self.context, self.project.id, "cores", None) num_instances = quota.allowed_instances(self.context, 100, instance_type) self.assertEqual(num_instances, 100) num_instances = quota.allowed_instances(self.context, 101, instance_type) self.assertEqual(num_instances, 101)
def test_unlimited_cores(self): self.flags(quota_instances=-1, quota_ram=-1, quota_cores=2) instance_type = self._get_instance_type('m1.small') num_instances = quota.allowed_instances(self.context, 100, instance_type) self.assertEqual(num_instances, 2) db.quota_create(self.context, self.project_id, 'cores', None) num_instances = quota.allowed_instances(self.context, 100, instance_type) self.assertEqual(num_instances, 100) num_instances = quota.allowed_instances(self.context, 101, instance_type) self.assertEqual(num_instances, 101)
def test_unlimited_ram(self): self.flags(quota_instances=-1, quota_ram=2 * 2048, quota_cores=-1) instance_type = self._get_instance_type('m1.small') num_instances = quota.allowed_instances(self.context, 100, instance_type) self.assertEqual(num_instances, 2) db.quota_create(self.context, self.project_id, 'ram', -1) num_instances = quota.allowed_instances(self.context, 100, instance_type) self.assertEqual(num_instances, 100) num_instances = quota.allowed_instances(self.context, 101, instance_type) self.assertEqual(num_instances, 101)
def test_quota_overrides(self): """Make sure overriding a projects quotas works""" num_instances = quota.allowed_instances(self.context, 100, instance_types.INSTANCE_TYPES['m1.small']) self.assertEqual(num_instances, 2) db.quota_create(self.context, {'project_id': self.project.id, 'instances': 10}) num_instances = quota.allowed_instances(self.context, 100, instance_types.INSTANCE_TYPES['m1.small']) self.assertEqual(num_instances, 4) db.quota_update(self.context, self.project.id, {'cores': 100}) num_instances = quota.allowed_instances(self.context, 100, instance_types.INSTANCE_TYPES['m1.small']) self.assertEqual(num_instances, 10) db.quota_destroy(self.context, self.project.id)
def test_unlimited_ram(self): FLAGS.quota_instances = -1 FLAGS.quota_ram = 2 * 2048 FLAGS.quota_cores = -1 instance_type = self._get_instance_type('m1.small') num_instances = quota.allowed_instances(self.context, 100, instance_type) self.assertEqual(num_instances, 2) db.quota_create(self.context, self.project_id, 'ram', None) num_instances = quota.allowed_instances(self.context, 100, instance_type) self.assertEqual(num_instances, 100) num_instances = quota.allowed_instances(self.context, 101, instance_type) self.assertEqual(num_instances, 101)
def test_unlimited_cores(self): FLAGS.quota_instances = -1 FLAGS.quota_ram = -1 FLAGS.quota_cores = 2 instance_type = self._get_instance_type('m1.small') num_instances = quota.allowed_instances(self.context, 100, instance_type) self.assertEqual(num_instances, 2) db.quota_create(self.context, self.project_id, 'cores', None) num_instances = quota.allowed_instances(self.context, 100, instance_type) self.assertEqual(num_instances, 100) num_instances = quota.allowed_instances(self.context, 101, instance_type) self.assertEqual(num_instances, 101)
def _check_quota(self, context, instance_uuid): # Check the quota to see if we can launch a new instance. instance = self.get(context, instance_uuid) instance_type = instance['instance_type'] metadata = instance['metadata'] # check the quota to if we can launch a single instance. num_instances = quota.allowed_instances(context, 1, instance['instance_type']) if num_instances < 1: pid = context.project_id LOG.warn(_("Quota exceeded for %(pid)s," " tried to launch an instance")) if num_instances <= 0: message = _("Instance quota exceeded. You cannot run any " "more instances of this type.") else: message = _("Instance quota exceeded. You can only run %s " "more instances of this type.") % num_instances raise novaexc.QuotaError(code="InstanceLimitExceeded") # check against metadata metadata = self.db.instance_metadata_get(context, instance['id']) self.compute_api._check_metadata_properties_quota(context, metadata)
def create( self, context, instance_type, image_id, kernel_id=None, ramdisk_id=None, min_count=1, max_count=1, display_name="", display_description="", key_name=None, key_data=None, security_group="default", availability_zone=None, user_data=None, metadata={}, injected_files=None, admin_password=None, ): """Create the number and type of instances requested. Verifies that quota and other arguments are valid. """ if not instance_type: instance_type = instance_types.get_default_instance_type() num_instances = quota.allowed_instances(context, max_count, instance_type) if num_instances < min_count: pid = context.project_id LOG.warn(_("Quota exceeeded for %(pid)s," " tried to run %(min_count)s instances") % locals()) if num_instances <= 0: message = _("Instance quota exceeded. You cannot run any " "more instances of this type.") else: message = ( _("Instance quota exceeded. You can only run %s " "more instances of this type.") % num_instances ) raise quota.QuotaError(message, "InstanceLimitExceeded") self._check_metadata_properties_quota(context, metadata) self._check_injected_file_quota(context, injected_files) image = self.image_service.show(context, image_id) os_type = None if "properties" in image and "os_type" in image["properties"]: os_type = image["properties"]["os_type"] if kernel_id is None: kernel_id = image["properties"].get("kernel_id", None) if ramdisk_id is None: ramdisk_id = image["properties"].get("ramdisk_id", None) # FIXME(sirp): is there a way we can remove null_kernel? # No kernel and ramdisk for raw images if kernel_id == str(FLAGS.null_kernel): kernel_id = None ramdisk_id = None LOG.debug(_("Creating a raw instance")) # Make sure we have access to kernel and ramdisk (if not raw) logging.debug("Using Kernel=%s, Ramdisk=%s" % (kernel_id, ramdisk_id)) if kernel_id: self.image_service.show(context, kernel_id) if ramdisk_id: self.image_service.show(context, ramdisk_id) if security_group is None: security_group = ["default"] if not type(security_group) is list: security_group = [security_group] security_groups = [] self.ensure_default_security_group(context) for security_group_name in security_group: group = db.security_group_get_by_name(context, context.project_id, security_group_name) security_groups.append(group["id"]) if key_data is None and key_name: key_pair = db.key_pair_get(context, context.user_id, key_name) key_data = key_pair["public_key"] base_options = { "reservation_id": utils.generate_uid("r"), "image_id": image_id, "kernel_id": kernel_id or "", "ramdisk_id": ramdisk_id or "", "state": 0, "state_description": "scheduling", "user_id": context.user_id, "project_id": context.project_id, "launch_time": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()), "instance_type_id": instance_type["id"], "memory_mb": instance_type["memory_mb"], "vcpus": instance_type["vcpus"], "local_gb": instance_type["local_gb"], "display_name": display_name, "display_description": display_description, "user_data": user_data or "", "key_name": key_name, "key_data": key_data, "locked": False, "metadata": metadata, "availability_zone": availability_zone, "os_type": os_type, } elevated = context.elevated() instances = [] LOG.debug(_("Going to run %s instances..."), num_instances) for num in range(num_instances): instance = dict(mac_address=utils.generate_mac(), launch_index=num, **base_options) instance = self.db.instance_create(context, instance) instance_id = instance["id"] elevated = context.elevated() if not security_groups: security_groups = [] for security_group_id in security_groups: self.db.instance_add_security_group(elevated, instance_id, security_group_id) # Set sane defaults if not specified updates = dict(hostname=self.hostname_factory(instance_id)) if not hasattr(instance, "display_name") or instance.display_name is None: updates["display_name"] = "Server %s" % instance_id instance = self.update(context, instance_id, **updates) instances.append(instance) pid = context.project_id uid = context.user_id LOG.debug(_("Casting to scheduler for %(pid)s/%(uid)s's" " instance %(instance_id)s") % locals()) # NOTE(sandy): For now we're just going to pass in the # instance_type record to the scheduler. In a later phase # we'll be ripping this whole for-loop out and deferring the # creation of the Instance record. At that point all this will # change. rpc.cast( context, FLAGS.scheduler_topic, { "method": "run_instance", "args": { "topic": FLAGS.compute_topic, "instance_id": instance_id, "instance_type": instance_type, "availability_zone": availability_zone, "injected_files": injected_files, "admin_password": admin_password, }, }, ) for group_id in security_groups: self.trigger_security_group_members_refresh(elevated, group_id) return [dict(x.iteritems()) for x in instances]
def _check_create_parameters(self, context, instance_type, image_href, kernel_id=None, ramdisk_id=None, min_count=None, max_count=None, display_name='', display_description='', key_name=None, key_data=None, security_group='default', availability_zone=None, user_data=None, metadata={}, injected_files=None, admin_password=None, zone_blob=None, reservation_id=None): """Verify all the input parameters regardless of the provisioning strategy being performed.""" if not instance_type: instance_type = instance_types.get_default_instance_type() if not min_count: min_count = 1 if not max_count: max_count = min_count num_instances = quota.allowed_instances(context, max_count, instance_type) if num_instances < min_count: pid = context.project_id LOG.warn(_("Quota exceeeded for %(pid)s," " tried to run %(min_count)s instances") % locals()) if num_instances <= 0: message = _("Instance quota exceeded. You cannot run any " "more instances of this type.") else: message = _("Instance quota exceeded. You can only run %s " "more instances of this type.") % num_instances raise quota.QuotaError(message, "InstanceLimitExceeded") self._check_metadata_properties_quota(context, metadata) self._check_injected_file_quota(context, injected_files) (image_service, image_id) = nova.image.get_image_service(image_href) image = image_service.show(context, image_id) os_type = None if 'properties' in image and 'os_type' in image['properties']: os_type = image['properties']['os_type'] architecture = None if 'properties' in image and 'arch' in image['properties']: architecture = image['properties']['arch'] vm_mode = None if 'properties' in image and 'vm_mode' in image['properties']: vm_mode = image['properties']['vm_mode'] if kernel_id is None: kernel_id = image['properties'].get('kernel_id', None) if ramdisk_id is None: ramdisk_id = image['properties'].get('ramdisk_id', None) # FIXME(sirp): is there a way we can remove null_kernel? # No kernel and ramdisk for raw images if kernel_id == str(FLAGS.null_kernel): kernel_id = None ramdisk_id = None LOG.debug(_("Creating a raw instance")) # Make sure we have access to kernel and ramdisk (if not raw) logging.debug("Using Kernel=%s, Ramdisk=%s" % (kernel_id, ramdisk_id)) if kernel_id: image_service.show(context, kernel_id) if ramdisk_id: image_service.show(context, ramdisk_id) self.ensure_default_security_group(context) if key_data is None and key_name: key_pair = db.key_pair_get(context, context.user_id, key_name) key_data = key_pair['public_key'] if reservation_id is None: reservation_id = utils.generate_uid('r') root_device_name = ec2utils.properties_root_device_name( image['properties']) base_options = { 'reservation_id': reservation_id, 'image_ref': image_href, 'kernel_id': kernel_id or '', 'ramdisk_id': ramdisk_id or '', 'state': 0, 'state_description': 'scheduling', 'user_id': context.user_id, 'project_id': context.project_id, 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), 'instance_type_id': instance_type['id'], 'memory_mb': instance_type['memory_mb'], 'vcpus': instance_type['vcpus'], 'local_gb': instance_type['local_gb'], 'display_name': display_name, 'display_description': display_description, 'user_data': user_data or '', 'key_name': key_name, 'key_data': key_data, 'locked': False, 'metadata': metadata, 'availability_zone': availability_zone, 'os_type': os_type, 'architecture': architecture, 'vm_mode': vm_mode, 'root_device_name': root_device_name} return (num_instances, base_options, image)
def create(self, context, instance_type, image_id, kernel_id=None, ramdisk_id=None, min_count=1, max_count=1, display_name='', display_description='', key_name=None, key_data=None, security_group='default', availability_zone=None, user_data=None, metadata={}, injected_files=None): """Create the number and type of instances requested. Verifies that quota and other arguments are valid. """ if not instance_type: instance_type = instance_types.get_default_instance_type() num_instances = quota.allowed_instances(context, max_count, instance_type) if num_instances < min_count: pid = context.project_id LOG.warn( _("Quota exceeeded for %(pid)s," " tried to run %(min_count)s instances") % locals()) raise quota.QuotaError( _("Instance quota exceeded. You can only " "run %s more instances of this type.") % num_instances, "InstanceLimitExceeded") self._check_metadata_properties_quota(context, metadata) self._check_injected_file_quota(context, injected_files) image = self.image_service.show(context, image_id) os_type = None if 'properties' in image and 'os_type' in image['properties']: os_type = image['properties']['os_type'] if kernel_id is None: kernel_id = image['properties'].get('kernel_id', None) if ramdisk_id is None: ramdisk_id = image['properties'].get('ramdisk_id', None) # FIXME(sirp): is there a way we can remove null_kernel? # No kernel and ramdisk for raw images if kernel_id == str(FLAGS.null_kernel): kernel_id = None ramdisk_id = None LOG.debug(_("Creating a raw instance")) # Make sure we have access to kernel and ramdisk (if not raw) logging.debug("Using Kernel=%s, Ramdisk=%s" % (kernel_id, ramdisk_id)) if kernel_id: self.image_service.show(context, kernel_id) if ramdisk_id: self.image_service.show(context, ramdisk_id) if security_group is None: security_group = ['default'] if not type(security_group) is list: security_group = [security_group] security_groups = [] self.ensure_default_security_group(context) for security_group_name in security_group: group = db.security_group_get_by_name(context, context.project_id, security_group_name) security_groups.append(group['id']) if key_data is None and key_name: key_pair = db.key_pair_get(context, context.user_id, key_name) key_data = key_pair['public_key'] base_options = { 'reservation_id': utils.generate_uid('r'), 'image_id': image_id, 'kernel_id': kernel_id or '', 'ramdisk_id': ramdisk_id or '', 'state': 0, 'state_description': 'scheduling', 'user_id': context.user_id, 'project_id': context.project_id, 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), 'instance_type_id': instance_type['id'], 'memory_mb': instance_type['memory_mb'], 'vcpus': instance_type['vcpus'], 'local_gb': instance_type['local_gb'], 'display_name': display_name, 'display_description': display_description, 'user_data': user_data or '', 'key_name': key_name, 'key_data': key_data, 'locked': False, 'metadata': metadata, 'availability_zone': availability_zone, 'os_type': os_type } elevated = context.elevated() instances = [] LOG.debug(_("Going to run %s instances..."), num_instances) for num in range(num_instances): instance = dict(mac_address=utils.generate_mac(), launch_index=num, **base_options) instance = self.db.instance_create(context, instance) instance_id = instance['id'] elevated = context.elevated() if not security_groups: security_groups = [] for security_group_id in security_groups: self.db.instance_add_security_group(elevated, instance_id, security_group_id) # Set sane defaults if not specified updates = dict(hostname=self.hostname_factory(instance_id)) if (not hasattr(instance, 'display_name') or instance.display_name is None): updates['display_name'] = "Server %s" % instance_id instance = self.update(context, instance_id, **updates) instances.append(instance) pid = context.project_id uid = context.user_id LOG.debug( _("Casting to scheduler for %(pid)s/%(uid)s's" " instance %(instance_id)s") % locals()) rpc.cast( context, FLAGS.scheduler_topic, { "method": "run_instance", "args": { "topic": FLAGS.compute_topic, "instance_id": instance_id, "availability_zone": availability_zone, "injected_files": injected_files } }) for group_id in security_groups: self.trigger_security_group_members_refresh(elevated, group_id) return [dict(x.iteritems()) for x in instances]
def create(self, context, instance_type, image_id, kernel_id=None, ramdisk_id=None, min_count=1, max_count=1, display_name='', display_description='', key_name=None, key_data=None, security_group='default', availability_zone=None, user_data=None, metadata={}, injected_files=None): """Create the number and type of instances requested. Verifies that quota and other arguments are valid. """ if not instance_type: instance_type = instance_types.get_default_instance_type() num_instances = quota.allowed_instances(context, max_count, instance_type) if num_instances < min_count: pid = context.project_id LOG.warn(_("Quota exceeeded for %(pid)s," " tried to run %(min_count)s instances") % locals()) raise quota.QuotaError(_("Instance quota exceeded. You can only " "run %s more instances of this type.") % num_instances, "InstanceLimitExceeded") self._check_metadata_properties_quota(context, metadata) self._check_injected_file_quota(context, injected_files) image = self.image_service.show(context, image_id) os_type = None if 'properties' in image and 'os_type' in image['properties']: os_type = image['properties']['os_type'] if kernel_id is None: kernel_id = image['properties'].get('kernel_id', None) if ramdisk_id is None: ramdisk_id = image['properties'].get('ramdisk_id', None) # FIXME(sirp): is there a way we can remove null_kernel? # No kernel and ramdisk for raw images if kernel_id == str(FLAGS.null_kernel): kernel_id = None ramdisk_id = None LOG.debug(_("Creating a raw instance")) # Make sure we have access to kernel and ramdisk (if not raw) logging.debug("Using Kernel=%s, Ramdisk=%s" % (kernel_id, ramdisk_id)) if kernel_id: self.image_service.show(context, kernel_id) if ramdisk_id: self.image_service.show(context, ramdisk_id) if security_group is None: security_group = ['default'] if not type(security_group) is list: security_group = [security_group] security_groups = [] self.ensure_default_security_group(context) for security_group_name in security_group: group = db.security_group_get_by_name(context, context.project_id, security_group_name) security_groups.append(group['id']) if key_data is None and key_name: key_pair = db.key_pair_get(context, context.user_id, key_name) key_data = key_pair['public_key'] base_options = { 'reservation_id': utils.generate_uid('r'), 'image_id': image_id, 'kernel_id': kernel_id or '', 'ramdisk_id': ramdisk_id or '', 'state': 0, 'state_description': 'scheduling', 'user_id': context.user_id, 'project_id': context.project_id, 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), 'instance_type_id': instance_type['id'], 'memory_mb': instance_type['memory_mb'], 'vcpus': instance_type['vcpus'], 'local_gb': instance_type['local_gb'], 'display_name': display_name, 'display_description': display_description, 'user_data': user_data or '', 'key_name': key_name, 'key_data': key_data, 'locked': False, 'metadata': metadata, 'availability_zone': availability_zone, 'os_type': os_type} elevated = context.elevated() instances = [] LOG.debug(_("Going to run %s instances..."), num_instances) for num in range(num_instances): instance = dict(mac_address=utils.generate_mac(), launch_index=num, **base_options) instance = self.db.instance_create(context, instance) instance_id = instance['id'] elevated = context.elevated() if not security_groups: security_groups = [] for security_group_id in security_groups: self.db.instance_add_security_group(elevated, instance_id, security_group_id) # Set sane defaults if not specified updates = dict(hostname=self.hostname_factory(instance_id)) if (not hasattr(instance, 'display_name') or instance.display_name is None): updates['display_name'] = "Server %s" % instance_id instance = self.update(context, instance_id, **updates) instances.append(instance) pid = context.project_id uid = context.user_id LOG.debug(_("Casting to scheduler for %(pid)s/%(uid)s's" " instance %(instance_id)s") % locals()) rpc.cast(context, FLAGS.scheduler_topic, {"method": "run_instance", "args": {"topic": FLAGS.compute_topic, "instance_id": instance_id, "availability_zone": availability_zone, "injected_files": injected_files}}) for group_id in security_groups: self.trigger_security_group_members_refresh(elevated, group_id) return [dict(x.iteritems()) for x in instances]
def create( self, context, instance_type, image_id, kernel_id=None, ramdisk_id=None, min_count=1, max_count=1, display_name="", display_description="", key_name=None, key_data=None, security_group="default", availability_zone=None, user_data=None, onset_files=None, ): """Create the number of instances requested if quota and other arguments check out ok. """ type_data = instance_types.INSTANCE_TYPES[instance_type] num_instances = quota.allowed_instances(context, max_count, type_data) if num_instances < min_count: pid = context.project_id LOG.warn(_("Quota exceeeded for %(pid)s," " tried to run %(min_count)s instances") % locals()) raise quota.QuotaError( _("Instance quota exceeded. You can only " "run %s more instances of this type.") % num_instances, "InstanceLimitExceeded", ) image = self.image_service.show(context, image_id) if kernel_id is None: kernel_id = image.get("kernel_id", None) if ramdisk_id is None: ramdisk_id = image.get("ramdisk_id", None) # No kernel and ramdisk for raw images if kernel_id == str(FLAGS.null_kernel): kernel_id = None ramdisk_id = None LOG.debug(_("Creating a raw instance")) # Make sure we have access to kernel and ramdisk (if not raw) logging.debug("Using Kernel=%s, Ramdisk=%s" % (kernel_id, ramdisk_id)) if kernel_id: self.image_service.show(context, kernel_id) if ramdisk_id: self.image_service.show(context, ramdisk_id) if security_group is None: security_group = ["default"] if not type(security_group) is list: security_group = [security_group] security_groups = [] self.ensure_default_security_group(context) for security_group_name in security_group: group = db.security_group_get_by_name(context, context.project_id, security_group_name) security_groups.append(group["id"]) if key_data is None and key_name: key_pair = db.key_pair_get(context, context.user_id, key_name) key_data = key_pair["public_key"] base_options = { "reservation_id": utils.generate_uid("r"), "image_id": image_id, "kernel_id": kernel_id or "", "ramdisk_id": ramdisk_id or "", "state_description": "scheduling", "user_id": context.user_id, "project_id": context.project_id, "launch_time": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()), "instance_type": instance_type, "memory_mb": type_data["memory_mb"], "vcpus": type_data["vcpus"], "local_gb": type_data["local_gb"], "display_name": display_name, "display_description": display_description, "user_data": user_data or "", "key_name": key_name, "key_data": key_data, "locked": False, "availability_zone": availability_zone, } elevated = context.elevated() instances = [] LOG.debug(_("Going to run %s instances..."), num_instances) for num in range(num_instances): instance = dict(mac_address=utils.generate_mac(), launch_index=num, **base_options) instance = self.db.instance_create(context, instance) instance_id = instance["id"] elevated = context.elevated() if not security_groups: security_groups = [] for security_group_id in security_groups: self.db.instance_add_security_group(elevated, instance_id, security_group_id) # Set sane defaults if not specified updates = dict(hostname=self.hostname_factory(instance_id)) if not hasattr(instance, "display_name") or instance.display_name == None: updates["display_name"] = "Server %s" % instance_id instance = self.update(context, instance_id, **updates) instances.append(instance) pid = context.project_id uid = context.user_id LOG.debug(_("Casting to scheduler for %(pid)s/%(uid)s's" " instance %(instance_id)s") % locals()) rpc.cast( context, FLAGS.scheduler_topic, { "method": "run_instance", "args": { "topic": FLAGS.compute_topic, "instance_id": instance_id, "availability_zone": availability_zone, "onset_files": onset_files, }, }, ) for group_id in security_groups: self.trigger_security_group_members_refresh(elevated, group_id) return [dict(x.iteritems()) for x in instances]
def create(self, context, instance_type, image_id, kernel_id=None, ramdisk_id=None, min_count=1, max_count=1, display_name='', display_description='', key_name=None, key_data=None, security_group='default', availability_zone=None, user_data=None, metadata=[], onset_files=None): """Create the number of instances requested if quota and other arguments check out ok.""" type_data = instance_types.get_instance_type(instance_type) num_instances = quota.allowed_instances(context, max_count, type_data) if num_instances < min_count: pid = context.project_id LOG.warn(_("Quota exceeeded for %(pid)s," " tried to run %(min_count)s instances") % locals()) raise quota.QuotaError(_("Instance quota exceeded. You can only " "run %s more instances of this type.") % num_instances, "InstanceLimitExceeded") num_metadata = len(metadata) quota_metadata = quota.allowed_metadata_items(context, num_metadata) if quota_metadata < num_metadata: pid = context.project_id msg = (_("Quota exceeeded for %(pid)s," " tried to set %(num_metadata)s metadata properties") % locals()) LOG.warn(msg) raise quota.QuotaError(msg, "MetadataLimitExceeded") # Because metadata is stored in the DB, we hard-code the size limits # In future, we may support more variable length strings, so we act # as if this is quota-controlled for forwards compatibility for metadata_item in metadata: k = metadata_item['key'] v = metadata_item['value'] if len(k) > 255 or len(v) > 255: pid = context.project_id msg = (_("Quota exceeeded for %(pid)s," " metadata property key or value too long") % locals()) LOG.warn(msg) raise quota.QuotaError(msg, "MetadataLimitExceeded") image = self.image_service.show(context, image_id) if kernel_id is None: kernel_id = image.get('kernel_id', None) if ramdisk_id is None: ramdisk_id = image.get('ramdisk_id', None) # FIXME(sirp): is there a way we can remove null_kernel? # No kernel and ramdisk for raw images if kernel_id == str(FLAGS.null_kernel): kernel_id = None ramdisk_id = None LOG.debug(_("Creating a raw instance")) # Make sure we have access to kernel and ramdisk (if not raw) logging.debug("Using Kernel=%s, Ramdisk=%s" % (kernel_id, ramdisk_id)) if kernel_id: self.image_service.show(context, kernel_id) if ramdisk_id: self.image_service.show(context, ramdisk_id) if security_group is None: security_group = ['default'] if not type(security_group) is list: security_group = [security_group] security_groups = [] self.ensure_default_security_group(context) for security_group_name in security_group: group = db.security_group_get_by_name(context, context.project_id, security_group_name) security_groups.append(group['id']) if key_data is None and key_name: key_pair = db.key_pair_get(context, context.user_id, key_name) key_data = key_pair['public_key'] base_options = { 'reservation_id': utils.generate_uid('r'), 'image_id': image_id, 'kernel_id': kernel_id or '', 'ramdisk_id': ramdisk_id or '', 'state_description': 'scheduling', 'user_id': context.user_id, 'project_id': context.project_id, 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), 'instance_type': instance_type, 'memory_mb': type_data['memory_mb'], 'vcpus': type_data['vcpus'], 'local_gb': type_data['local_gb'], 'display_name': display_name, 'display_description': display_description, 'user_data': user_data or '', 'key_name': key_name, 'key_data': key_data, 'locked': False, 'metadata': metadata, 'availability_zone': availability_zone} elevated = context.elevated() instances = [] LOG.debug(_("Going to run %s instances..."), num_instances) for num in range(num_instances): instance = dict(mac_address=utils.generate_mac(), launch_index=num, **base_options) instance = self.db.instance_create(context, instance) instance_id = instance['id'] elevated = context.elevated() if not security_groups: security_groups = [] for security_group_id in security_groups: self.db.instance_add_security_group(elevated, instance_id, security_group_id) # Set sane defaults if not specified updates = dict(hostname=self.hostname_factory(instance_id)) if (not hasattr(instance, 'display_name') or instance.display_name == None): updates['display_name'] = "Server %s" % instance_id instance = self.update(context, instance_id, **updates) instances.append(instance) pid = context.project_id uid = context.user_id LOG.debug(_("Casting to scheduler for %(pid)s/%(uid)s's" " instance %(instance_id)s") % locals()) rpc.cast(context, FLAGS.scheduler_topic, {"method": "run_instance", "args": {"topic": FLAGS.compute_topic, "instance_id": instance_id, "availability_zone": availability_zone, "onset_files": onset_files}}) for group_id in security_groups: self.trigger_security_group_members_refresh(elevated, group_id) return [dict(x.iteritems()) for x in instances]