Exemple #1
0
    def create(self, context, size, name, description):
        if quota.allowed_volumes(context, 1, size) < 1:
            pid = context.project_id
            LOG.warn(_("Quota exceeeded for %(pid)s, tried to create"
                    " %(size)sG volume") % locals())
            raise quota.QuotaError(_("Volume quota exceeded. You cannot "
                                     "create a volume of size %sG") % size)

        options = {
            'size': size,
            'user_id': context.user_id,
            'project_id': context.project_id,
            'availability_zone': FLAGS.storage_availability_zone,
            'status': "creating",
            'attach_status': "detached",
            'display_name': name,
            'display_description': description}

        volume = self.db.volume_create(context, options)
        rpc.cast(context,
                 FLAGS.scheduler_topic,
                 {"method": "create_volume",
                  "args": {"topic": FLAGS.volume_topic,
                           "volume_id": volume['id']}})
        return volume
Exemple #2
0
    def create(self,
               context,
               size,
               snapshot_id,
               name,
               description,
               volume_type=None,
               metadata=None,
               availability_zone=None):
        if snapshot_id != None:
            snapshot = self.get_snapshot(context, snapshot_id)
            if snapshot['status'] != "available":
                raise exception.ApiError(
                    _("Snapshot status must be available"))
            if not size:
                size = snapshot['volume_size']

        if quota.allowed_volumes(context, 1, size) < 1:
            pid = context.project_id
            LOG.warn(
                _("Quota exceeded for %(pid)s, tried to create"
                  " %(size)sG volume") % locals())
            raise quota.QuotaError(
                _("Volume quota exceeded. You cannot "
                  "create a volume of size %sG") % size)

        if availability_zone is None:
            availability_zone = FLAGS.storage_availability_zone

        if volume_type is None:
            volume_type_id = None
        else:
            volume_type_id = volume_type.get('id', None)

        options = {
            'size': size,
            'user_id': context.user_id,
            'project_id': context.project_id,
            'snapshot_id': snapshot_id,
            'availability_zone': availability_zone,
            'status': "creating",
            'attach_status': "detached",
            'display_name': name,
            'display_description': description,
            'volume_type_id': volume_type_id,
            'metadata': metadata,
        }

        volume = self.db.volume_create(context, options)
        rpc.cast(
            context, FLAGS.scheduler_topic, {
                "method": "create_volume",
                "args": {
                    "topic": FLAGS.volume_topic,
                    "volume_id": volume['id'],
                    "snapshot_id": snapshot_id
                }
            })
        return volume
Exemple #3
0
    def _check_injected_file_quota(self, context, injected_files):
        """Enforce quota limits on injected files.

        Raises a QuotaError if any limit is exceeded.

        """
        if injected_files is None:
            return
        limit = quota.allowed_injected_files(context)
        if len(injected_files) > limit:
            raise quota.QuotaError(code="OnsetFileLimitExceeded")
        path_limit = quota.allowed_injected_file_path_bytes(context)
        content_limit = quota.allowed_injected_file_content_bytes(context)
        for path, content in injected_files:
            if len(path) > path_limit:
                raise quota.QuotaError(code="OnsetFilePathLimitExceeded")
            if len(content) > content_limit:
                raise quota.QuotaError(code="OnsetFileContentLimitExceeded")
Exemple #4
0
 def allocate_floating_ip(self, context, project_id):
     """Gets an floating ip from the pool."""
     # NOTE(tr3buchet): all networks hosts in zone now use the same pool
     LOG.debug("QUOTA: %s" % quota.allowed_floating_ips(context, 1))
     if quota.allowed_floating_ips(context, 1) < 1:
         LOG.warn(_('Quota exceeded for %s, tried to allocate '
                    'address'), context.project_id)
         raise quota.QuotaError(
             _('Address quota exceeded. You cannot '
               'allocate any more addresses'))
     # TODO(vish): add floating ips through manage command
     return self.db.floating_ip_allocate_address(context, project_id)
Exemple #5
0
    def _check_metadata_properties_quota(self, context, metadata={}):
        """Enforce quota limits on metadata properties."""
        num_metadata = len(metadata)
        quota_metadata = quota.allowed_metadata_items(context, num_metadata)
        if quota_metadata < num_metadata:
            pid = context.project_id
            msg = _("Quota exceeeded for %(pid)s, tried to set "
                    "%(num_metadata)s metadata properties") % locals()
            LOG.warn(msg)
            raise quota.QuotaError(msg, "MetadataLimitExceeded")

        # Because metadata is stored in the DB, we hard-code the size limits
        # In future, we may support more variable length strings, so we act
        #  as if this is quota-controlled for forwards compatibility
        for k, v in metadata.iteritems():
            if len(k) > 255 or len(v) > 255:
                pid = context.project_id
                msg = _("Quota exceeeded for %(pid)s, metadata property "
                        "key or value too long") % locals()
                LOG.warn(msg)
                raise quota.QuotaError(msg, "MetadataLimitExceeded")
Exemple #6
0
 def allocate_floating_ip(self, context):
     if quota.allowed_floating_ips(context, 1) < 1:
         LOG.warn(_("Quota exceeeded for %s, tried to allocate "
                    "address"), context.project_id)
         raise quota.QuotaError(
             _("Address quota exceeded. You cannot "
               "allocate any more addresses"))
     # NOTE(vish): We don't know which network host should get the ip
     #             when we allocate, so just send it to any one.  This
     #             will probably need to move into a network supervisor
     #             at some point.
     return rpc.call(
         context, FLAGS.network_topic, {
             "method": "allocate_floating_ip",
             "args": {
                 "project_id": context.project_id
             }
         })
Exemple #7
0
    def create(self,
               context,
               instance_type,
               image_id,
               kernel_id=None,
               ramdisk_id=None,
               min_count=1,
               max_count=1,
               display_name='',
               display_description='',
               key_name=None,
               key_data=None,
               security_group='default',
               availability_zone=None,
               user_data=None,
               metadata={},
               injected_files=None):
        """Create the number and type of instances requested.

        Verifies that quota and other arguments are valid.

        """
        if not instance_type:
            instance_type = instance_types.get_default_instance_type()

        num_instances = quota.allowed_instances(context, max_count,
                                                instance_type)
        if num_instances < min_count:
            pid = context.project_id
            LOG.warn(
                _("Quota exceeeded for %(pid)s,"
                  " tried to run %(min_count)s instances") % locals())
            raise quota.QuotaError(
                _("Instance quota exceeded. You can only "
                  "run %s more instances of this type.") % num_instances,
                "InstanceLimitExceeded")

        self._check_metadata_properties_quota(context, metadata)
        self._check_injected_file_quota(context, injected_files)

        image = self.image_service.show(context, image_id)

        os_type = None
        if 'properties' in image and 'os_type' in image['properties']:
            os_type = image['properties']['os_type']

        if kernel_id is None:
            kernel_id = image['properties'].get('kernel_id', None)
        if ramdisk_id is None:
            ramdisk_id = image['properties'].get('ramdisk_id', None)
        # FIXME(sirp): is there a way we can remove null_kernel?
        # No kernel and ramdisk for raw images
        if kernel_id == str(FLAGS.null_kernel):
            kernel_id = None
            ramdisk_id = None
            LOG.debug(_("Creating a raw instance"))
        # Make sure we have access to kernel and ramdisk (if not raw)
        logging.debug("Using Kernel=%s, Ramdisk=%s" % (kernel_id, ramdisk_id))
        if kernel_id:
            self.image_service.show(context, kernel_id)
        if ramdisk_id:
            self.image_service.show(context, ramdisk_id)

        if security_group is None:
            security_group = ['default']
        if not type(security_group) is list:
            security_group = [security_group]

        security_groups = []
        self.ensure_default_security_group(context)
        for security_group_name in security_group:
            group = db.security_group_get_by_name(context, context.project_id,
                                                  security_group_name)
            security_groups.append(group['id'])

        if key_data is None and key_name:
            key_pair = db.key_pair_get(context, context.user_id, key_name)
            key_data = key_pair['public_key']

        base_options = {
            'reservation_id': utils.generate_uid('r'),
            'image_id': image_id,
            'kernel_id': kernel_id or '',
            'ramdisk_id': ramdisk_id or '',
            'state': 0,
            'state_description': 'scheduling',
            'user_id': context.user_id,
            'project_id': context.project_id,
            'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
            'instance_type_id': instance_type['id'],
            'memory_mb': instance_type['memory_mb'],
            'vcpus': instance_type['vcpus'],
            'local_gb': instance_type['local_gb'],
            'display_name': display_name,
            'display_description': display_description,
            'user_data': user_data or '',
            'key_name': key_name,
            'key_data': key_data,
            'locked': False,
            'metadata': metadata,
            'availability_zone': availability_zone,
            'os_type': os_type
        }
        elevated = context.elevated()
        instances = []
        LOG.debug(_("Going to run %s instances..."), num_instances)
        for num in range(num_instances):
            instance = dict(mac_address=utils.generate_mac(),
                            launch_index=num,
                            **base_options)
            instance = self.db.instance_create(context, instance)
            instance_id = instance['id']

            elevated = context.elevated()
            if not security_groups:
                security_groups = []
            for security_group_id in security_groups:
                self.db.instance_add_security_group(elevated, instance_id,
                                                    security_group_id)

            # Set sane defaults if not specified
            updates = dict(hostname=self.hostname_factory(instance_id))
            if (not hasattr(instance, 'display_name')
                    or instance.display_name is None):
                updates['display_name'] = "Server %s" % instance_id

            instance = self.update(context, instance_id, **updates)
            instances.append(instance)

            pid = context.project_id
            uid = context.user_id
            LOG.debug(
                _("Casting to scheduler for %(pid)s/%(uid)s's"
                  " instance %(instance_id)s") % locals())
            rpc.cast(
                context, FLAGS.scheduler_topic, {
                    "method": "run_instance",
                    "args": {
                        "topic": FLAGS.compute_topic,
                        "instance_id": instance_id,
                        "availability_zone": availability_zone,
                        "injected_files": injected_files
                    }
                })

        for group_id in security_groups:
            self.trigger_security_group_members_refresh(elevated, group_id)

        return [dict(x.iteritems()) for x in instances]