Exemplo n.º 1
0
 def direct_snapshot(self, snapshot_name, image_format, image_id):
     deletion_marker = '_to_be_deleted_by_glance'
     if image_format != 'raw':
         reason = _('only raw format is supported')
         raise exception.ImageUnacceptable(image_id=image_id, reason=reason)
     if not self.driver.supports_layering():
         reason = _('librbd is too old')
         raise exception.ImageUnacceptable(image_id=image_id, reason=reason)
     rbd_snap_name = snapshot_name + deletion_marker
     self.driver.create_snapshot(self.rbd_name, rbd_snap_name)
     fsid = self.driver.get_fsid()
     location = {'url' : 'rbd://{fsid}/{pool}/{image}/{snap}'.format(
         fsid=fsid,
         pool=self.pool,
         image=self.rbd_name,
         snap=rbd_snap_name)}
     clone_name = self.rbd_name + '_clone_' + snapshot_name
     clone_snap = 'snap'
     self.driver.clone(location, clone_name)
     self.driver.create_snapshot(clone_name, clone_snap)
     return 'rbd://{fsid}/{pool}/{image}/{snap}'.format(
         fsid=fsid,
         pool=self.pool,
         image=clone_name,
         snap=clone_snap)
Exemplo n.º 2
0
def limited(items, request, max_limit=CONF.osapi_max_limit):
    """Return a slice of items according to requested offset and limit.

    :param items: A sliceable entity
    :param request: ``wsgi.Request`` possibly containing 'offset' and 'limit'
                    GET variables. 'offset' is where to start in the list,
                    and 'limit' is the maximum number of items to return. If
                    'limit' is not specified, 0, or > max_limit, we default
                    to max_limit. Negative values for either offset or limit
                    will cause exc.HTTPBadRequest() exceptions to be raised.
    :kwarg max_limit: The maximum number of items to return from 'items'
    """
    try:
        offset = int(request.GET.get('offset', 0))
    except ValueError:
        msg = _('offset param must be an integer')
        raise webob.exc.HTTPBadRequest(explanation=msg)

    try:
        limit = int(request.GET.get('limit', max_limit))
    except ValueError:
        msg = _('limit param must be an integer')
        raise webob.exc.HTTPBadRequest(explanation=msg)

    if limit < 0:
        msg = _('limit param must be positive')
        raise webob.exc.HTTPBadRequest(explanation=msg)

    if offset < 0:
        msg = _('offset param must be positive')
        raise webob.exc.HTTPBadRequest(explanation=msg)

    limit = min(max_limit, limit or max_limit)
    range_end = offset + limit
    return items[offset:range_end]
Exemplo n.º 3
0
    def _wait_for_active(self, ironicclient, instance):
        """Wait for the node to be marked as ACTIVE in Ironic."""
        instance.refresh()
        if (instance.task_state == task_states.DELETING or
            instance.vm_state in (vm_states.ERROR, vm_states.DELETED)):
            raise exception.InstanceDeployFailure(
                _("Instance %s provisioning was aborted") % instance.uuid)

        node = _validate_instance_and_node(ironicclient, instance)
        if node.provision_state == ironic_states.ACTIVE:
            # job is done
            LOG.debug("Ironic node %(node)s is now ACTIVE",
                      dict(node=node.uuid), instance=instance)
            raise loopingcall.LoopingCallDone()

        if node.target_provision_state in (ironic_states.DELETED,
                                           ironic_states.AVAILABLE):
            # ironic is trying to delete it now
            raise exception.InstanceNotFound(instance_id=instance.uuid)

        if node.provision_state in (ironic_states.NOSTATE,
                                    ironic_states.AVAILABLE):
            # ironic already deleted it
            raise exception.InstanceNotFound(instance_id=instance.uuid)

        if node.provision_state == ironic_states.DEPLOYFAIL:
            # ironic failed to deploy
            msg = (_("Failed to provision instance %(inst)s: %(reason)s")
                   % {'inst': instance.uuid, 'reason': node.last_error})
            raise exception.InstanceDeployFailure(msg)

        _log_ironic_polling('become ACTIVE', node, instance)
Exemplo n.º 4
0
    def _check_prerequisites(self):
        """Sanity checks before attempting to mount SOFS."""

        # config is mandatory
        config = CONF.libvirt.scality_sofs_config
        if not config:
            msg = _("Value required for 'scality_sofs_config'")
            LOG.warn(msg)
            raise exception.NovaException(msg)

        # config can be a file path or a URL, check it
        if urlparse.urlparse(config).scheme == '':
            # turn local path into URL
            config = 'file://%s' % config
        try:
            urllib.request.urlopen(config, timeout=5).close()
        except urllib.error.URLError as e:
            msg = _("Cannot access 'scality_sofs_config': %s") % e
            LOG.warn(msg)
            raise exception.NovaException(msg)

        # mount.sofs must be installed
        if not os.access('/sbin/mount.sofs', os.X_OK):
            msg = _("Cannot execute /sbin/mount.sofs")
            LOG.warn(msg)
            raise exception.NovaException(msg)
Exemplo n.º 5
0
    def _validate(self, bdm_dict):
        """Basic data format validations."""
        dict_fields = set(key for key, _ in six.iteritems(bdm_dict))

        # Check that there are no bogus fields
        if not (dict_fields <=
                (self._fields | self._db_only_fields)):
            raise exception.InvalidBDMFormat(
                details=_("Some fields are invalid."))

        if bdm_dict.get('no_device'):
            return

        # Check that all required fields are there
        if (self._required_fields and
                not ((dict_fields & self._required_fields) ==
                      self._required_fields)):
            raise exception.InvalidBDMFormat(
                details=_("Some required fields are missing"))

        if 'delete_on_termination' in bdm_dict:
            bdm_dict['delete_on_termination'] = strutils.bool_from_string(
                bdm_dict['delete_on_termination'])

        if bdm_dict.get('device_name') is not None:
            validate_device_name(bdm_dict['device_name'])

        validate_and_default_volume_size(bdm_dict)

        if bdm_dict.get('boot_index'):
            try:
                bdm_dict['boot_index'] = int(bdm_dict['boot_index'])
            except ValueError:
                raise exception.InvalidBDMFormat(
                    details=_("Boot index is invalid."))
Exemplo n.º 6
0
    def _attach_volume_iscsi(self, connection_info, instance, mountpoint):
        """Attach iscsi volume storage to VM instance."""
        instance_name = instance['name']
        vm_ref = vm_util.get_vm_ref(self._session, instance)
        # Attach Volume to VM
        LOG.debug("Attach_volume: %(connection_info)s, %(instance_name)s, "
                  "%(mountpoint)s",
                  {'connection_info': connection_info,
                   'instance_name': instance_name,
                   'mountpoint': mountpoint},
                  instance=instance)

        data = connection_info['data']

        # Discover iSCSI Target
        device_name = self._iscsi_discover_target(data)[0]
        if device_name is None:
            raise exception.StorageError(
                reason=_("Unable to find iSCSI Target"))

        # Get the vmdk file name that the VM is pointing to
        hardware_devices = self._session._call_method(vim_util,
                        "get_dynamic_property", vm_ref,
                        "VirtualMachine", "config.hardware.device")
        (vmdk_file_path, adapter_type,
         disk_type) = vm_util.get_vmdk_path_and_adapter_type(hardware_devices)

        self.attach_disk_to_vm(vm_ref, instance,
                               adapter_type, 'rdmp',
                               device_name=device_name)
        LOG.info(_("Mountpoint %(mountpoint)s attached to "
                   "instance %(instance_name)s"),
                 {'mountpoint': mountpoint, 'instance_name': instance_name},
                 instance=instance)
Exemplo n.º 7
0
    def _detach_volume_iscsi(self, connection_info, instance, mountpoint):
        """Detach volume storage to VM instance."""
        instance_name = instance['name']
        vm_ref = vm_util.get_vm_ref(self._session, instance)
        # Detach Volume from VM
        LOG.debug("Detach_volume: %(instance_name)s, %(mountpoint)s",
                  {'mountpoint': mountpoint, 'instance_name': instance_name},
                  instance=instance)
        data = connection_info['data']

        # Discover iSCSI Target
        device_name, uuid = self._iscsi_get_target(data)
        if device_name is None:
            raise exception.StorageError(
                reason=_("Unable to find iSCSI Target"))

        # Get the vmdk file name that the VM is pointing to
        hardware_devices = self._session._call_method(vim_util,
                        "get_dynamic_property", vm_ref,
                        "VirtualMachine", "config.hardware.device")
        device = vm_util.get_rdm_disk(hardware_devices, uuid)
        if device is None:
            raise exception.StorageError(reason=_("Unable to find volume"))
        self.detach_disk_from_vm(vm_ref, instance, device, destroy_disk=True)
        LOG.info(_("Mountpoint %(mountpoint)s detached from "
                   "instance %(instance_name)s"),
                 {'mountpoint': mountpoint, 'instance_name': instance_name},
                 instance=instance)
    def _reset_state(self, req, id, body):
        """Permit admins to reset the state of a server."""
        context = req.environ["nova.context"]
        authorize(context, 'resetState')

        # Identify the desired state from the body
        try:
            state = state_map[body["os-resetState"]["state"]]
        except (TypeError, KeyError):
            msg = _("Desired state must be specified.  Valid states "
                    "are: %s") % ', '.join(sorted(state_map.keys()))
            raise exc.HTTPBadRequest(explanation=msg)

        instance = common.get_instance(self.compute_api, context, id)
        try:
            instance.vm_state = state
            instance.task_state = None
            instance.save(admin_state_reset=True)
        except exception.InstanceNotFound:
            msg = _("Server not found")
            raise exc.HTTPNotFound(explanation=msg)
        except Exception:
            readable = traceback.format_exc()
            LOG.exception(_LE("Compute.api::resetState %s"), readable)
            raise exc.HTTPUnprocessableEntity()
        return webob.Response(status_int=202)
Exemplo n.º 9
0
Arquivo: vm.py Projeto: Juniper/nova
    def _spp_pool_id(self, pool_name):
        """Returns the shared proc pool id for a given pool name.

        :param pool_name: The shared proc pool name.
        :return: The internal API id for the shared proc pool.
        """
        if (pool_name is None or
                pool_name == pvm_spp.DEFAULT_POOL_DISPLAY_NAME):
            # The default pool is 0
            return 0

        # Search for the pool with this name
        pool_wraps = pvm_spp.SharedProcPool.search(
            self.adapter, name=pool_name, parent=self.host_w)

        # Check to make sure there is a pool with the name, and only one pool.
        if len(pool_wraps) > 1:
            msg = (_('Multiple Shared Processing Pools with name %(pool)s.') %
                   {'pool': pool_name})
            raise exc.ValidationError(msg)
        elif len(pool_wraps) == 0:
            msg = (_('Unable to find Shared Processing Pool %(pool)s') %
                   {'pool': pool_name})
            raise exc.ValidationError(msg)

        # Return the singular pool id.
        return pool_wraps[0].id
Exemplo n.º 10
0
def check_string_length(value, name=None, min_length=0, max_length=None):
    """Check the length of specified string
    :param value: the value of the string
    :param name: the name of the string
    :param min_length: the min_length of the string
    :param max_length: the max_length of the string
    """
    if not isinstance(value, six.string_types):
        if name is None:
            msg = _("The input is not a string or unicode")
        else:
            msg = _("%s is not a string or unicode") % name
        raise exception.InvalidInput(message=msg)

    if name is None:
        name = value

    if len(value) < min_length:
        msg = _("%(name)s has a minimum character requirement of "
                "%(min_length)s.") % {'name': name, 'min_length': min_length}
        raise exception.InvalidInput(message=msg)

    if max_length and len(value) > max_length:
        msg = _("%(name)s has more than %(max_length)s "
                "characters.") % {'name': name, 'max_length': max_length}
        raise exception.InvalidInput(message=msg)
Exemplo n.º 11
0
 def _setup_instance_group(self, context, filter_properties):
     update_group_hosts = False
     scheduler_hints = filter_properties.get('scheduler_hints') or {}
     group_hint = scheduler_hints.get('group', None)
     if group_hint:
         group = objects.InstanceGroup.get_by_hint(context, group_hint)
         policies = set(('anti-affinity', 'affinity'))
         if any((policy in policies) for policy in group.policies):
             if ('affinity' in group.policies and
                     not self._supports_affinity):
                     msg = _("ServerGroupAffinityFilter not configured")
                     LOG.error(msg)
                     raise exception.NoValidHost(reason=msg)
             if ('anti-affinity' in group.policies and
                     not self._supports_anti_affinity):
                     msg = _("ServerGroupAntiAffinityFilter not configured")
                     LOG.error(msg)
                     raise exception.NoValidHost(reason=msg)
             update_group_hosts = True
             filter_properties.setdefault('group_hosts', set())
             user_hosts = set(filter_properties['group_hosts'])
             group_hosts = set(group.get_hosts(context))
             filter_properties['group_hosts'] = user_hosts | group_hosts
             filter_properties['group_policies'] = group.policies
     return update_group_hosts
Exemplo n.º 12
0
    def _action_reboot(self, req, id, body):
        if 'reboot' in body and 'type' in body['reboot']:
            if not isinstance(body['reboot']['type'], six.string_types):
                msg = _("Argument 'type' for reboot must be a string")
                LOG.error(msg)
                raise exc.HTTPBadRequest(explanation=msg)
            valid_reboot_types = ['HARD', 'SOFT']
            reboot_type = body['reboot']['type'].upper()
            if not valid_reboot_types.count(reboot_type):
                msg = _("Argument 'type' for reboot is not HARD or SOFT")
                LOG.error(msg)
                raise exc.HTTPBadRequest(explanation=msg)
        else:
            msg = _("Missing argument 'type' for reboot")
            LOG.error(msg)
            raise exc.HTTPBadRequest(explanation=msg)

        context = req.environ['nova.context']
        instance = self._get_server(context, req, id)

        try:
            self.compute_api.reboot(context, instance, reboot_type)
        except exception.InstanceIsLocked as e:
            raise exc.HTTPConflict(explanation=e.format_message())
        except exception.InstanceInvalidState as state_error:
            common.raise_http_conflict_for_instance_invalid_state(state_error,
                    'reboot', id)
Exemplo n.º 13
0
def check_shadow_table(migrate_engine, table_name):
    """This method checks that table with ``table_name`` and
    corresponding shadow table have same columns.
    """
    meta = MetaData()
    meta.bind = migrate_engine

    table = Table(table_name, meta, autoload=True)
    shadow_table = Table(db._SHADOW_TABLE_PREFIX + table_name, meta,
                         autoload=True)

    columns = {c.name: c for c in table.columns}
    shadow_columns = {c.name: c for c in shadow_table.columns}

    for name, column in columns.iteritems():
        if name not in shadow_columns:
            raise exception.NovaException(
                _("Missing column %(table)s.%(column)s in shadow table")
                        % {'column': name, 'table': shadow_table.name})
        shadow_column = shadow_columns[name]

        if not isinstance(shadow_column.type, type(column.type)):
            raise exception.NovaException(
                _("Different types in %(table)s.%(column)s and shadow table: "
                  "%(c_type)s %(shadow_c_type)s")
                        % {'column': name, 'table': table.name,
                           'c_type': column.type,
                           'shadow_c_type': shadow_column.type})

    for name, column in shadow_columns.iteritems():
        if name not in columns:
            raise exception.NovaException(
                _("Extra column %(table)s.%(column)s in shadow table")
                        % {'column': name, 'table': shadow_table.name})
    return True
Exemplo n.º 14
0
    def _report_final_resource_view(self, resources):
        """Report final calculate of physical memory, used virtual memory,
        disk, usable vCPUs, used virtual CPUs and PCI devices,
        including instance calculations and in-progress resource claims. These
        values will be exposed via the compute node table to the scheduler.
        """
        LOG.audit(_("Total physical ram (MB): %(pram)s, "
                    "total allocated virtual ram (MB): %(vram)s"),
                    {'pram': resources['memory_mb'],
                     'vram': resources['memory_mb_used']})
        LOG.audit(_("Total physical disk (GB): %(pdisk)s, "
                    "total allocated virtual disk (GB): %(vdisk)s"),
                  {'pdisk': resources['local_gb'],
                   'vdisk': resources['local_gb_used']})

        vcpus = resources['vcpus']
        if vcpus:
            LOG.audit(_("Total usable vcpus: %(tcpu)s, "
                        "total allocated vcpus: %(ucpu)s"),
                        {'tcpu': vcpus, 'ucpu': resources['vcpus_used']})
        else:
            LOG.audit(_("Free VCPU information unavailable"))

        if 'pci_stats' in resources:
            LOG.audit(_("PCI stats: %s"), resources['pci_stats'])
Exemplo n.º 15
0
    def update(self, req, id, body):
        """Update server then pass on to version-specific controller."""

        ctxt = req.environ['nova.context']
        update_dict = {}

        if 'name' in body['server']:
            update_dict['display_name'] = body['server']['name']

        # TODO(oomichi): The following host_id validation code can be removed
        # when setting "'additionalProperties': False" in base_update schema.
        if 'host_id' in body['server']:
            msg = _("host_id cannot be updated.")
            raise exc.HTTPBadRequest(explanation=msg)

        if list(self.update_extension_manager):
            self.update_extension_manager.map(self._update_extension_point,
                                              body['server'], update_dict)

        instance = common.get_instance(self.compute_api, ctxt, id,
                                       want_objects=True,
                                       expected_attrs=['pci_devices'])
        try:
            # NOTE(mikal): this try block needs to stay because save() still
            # might throw an exception.
            req.cache_db_instance(instance)
            policy.enforce(ctxt, 'compute:update', instance)
            instance.update(update_dict)
            instance.save()
            return self._view_builder.show(req, instance)
        except exception.NotFound:
            msg = _("Instance could not be found")
            raise exc.HTTPNotFound(explanation=msg)
Exemplo n.º 16
0
    def create(self, req, body=None):
        context = req.environ["nova.context"]
        authorize(context)

        pool = None
        if body and "pool" in body:
            pool = body["pool"]
        try:
            address = self.network_api.allocate_floating_ip(context, pool)
            ip = self.network_api.get_floating_ip_by_address(context, address)
        except exception.NoMoreFloatingIps:
            if pool:
                msg = _("No more floating ips in pool %s.") % pool
            else:
                msg = _("No more floating ips available.")
            raise webob.exc.HTTPNotFound(explanation=msg)
        except exception.FloatingIpLimitExceeded:
            if pool:
                msg = _("IP allocation over quota in pool %s.") % pool
            else:
                msg = _("IP allocation over quota.")
            raise webob.exc.HTTPForbidden(explanation=msg)
        except exception.FloatingIpPoolNotFound as e:
            raise webob.exc.HTTPNotFound(explanation=e.format_message())

        return _translate_floating_ip_view(ip)
Exemplo n.º 17
0
    def update_available_resource(self, context):
        """Override in-memory calculations of compute node resource usage based
        on data audited from the hypervisor layer.

        Add in resource claims in progress to account for operations that have
        declared a need for resources, but not necessarily retrieved them from
        the hypervisor layer yet.
        """
        LOG.audit(_("Auditing locally available compute resources"))
        resources = self.driver.get_available_resource(self.nodename)

        if not resources:
            # The virt driver does not support this function
            LOG.audit(_("Virt driver does not support "
                 "'get_available_resource'  Compute tracking is disabled."))
            self.compute_node = None
            return
        resources['host_ip'] = CONF.my_ip

        # TODO(berrange): remove this once all virt drivers are updated
        # to report topology
        if "numa_topology" not in resources:
            resources["numa_topology"] = None

        self._verify_resources(resources)

        self._report_hypervisor_resource_view(resources)

        return self._update_available_resource(context, resources)
Exemplo n.º 18
0
    def plug(self, instance, vif):
        vif_type = vif['type']

        LOG.debug('plug vif_type=%(vif_type)s instance=%(instance)s '
                  'vif=%(vif)s',
                  {'vif_type': vif_type, 'instance': instance,
                   'vif': vif})

        if vif_type is None:
            raise exception.NovaException(
                _("vif_type parameter must be present "
                  "for this vif_driver implementation"))

        if vif_type == network_model.VIF_TYPE_BRIDGE:
            self.plug_bridge(instance, vif)
        elif vif_type == network_model.VIF_TYPE_OVS:
            if self.ovs_hybrid_required(vif):
                self.plug_ovs_hybrid(instance, vif)
            else:
                self.plug_ovs(instance, vif)
        elif vif_type == network_model.VIF_TYPE_MIDONET:
            self.plug_midonet(instance, vif)
        else:
            raise exception.NovaException(
                _("Unexpected vif_type=%s") % vif_type)
Exemplo n.º 19
0
    def _sync_compute_node(self, context, resources):
        """Create or update the compute node DB record."""
        if not self.compute_node:
            # we need a copy of the ComputeNode record:
            service = self._get_service(context)
            if not service:
                # no service record, disable resource
                return

            compute_node_refs = service['compute_node']
            if compute_node_refs:
                for cn in compute_node_refs:
                    if cn.get('hypervisor_hostname') == self.nodename:
                        self.compute_node = cn
                        if self.pci_tracker:
                            self.pci_tracker.set_compute_node_id(cn['id'])
                        break

        if not self.compute_node:
            # Need to create the ComputeNode record:
            resources['service_id'] = service['id']
            self._create(context, resources)
            if self.pci_tracker:
                self.pci_tracker.set_compute_node_id(self.compute_node['id'])
            LOG.info(_('Compute_service record created for %(host)s:%(node)s')
                    % {'host': self.host, 'node': self.nodename})

        else:
            # just update the record:
            self._update(context, resources)
            LOG.info(_('Compute_service record updated for %(host)s:%(node)s')
                    % {'host': self.host, 'node': self.nodename})
Exemplo n.º 20
0
    def _pull_missing_image(self, context, image_meta, instance):
        msg = 'Image name "%s" does not exist, fetching it...'
        LOG.debug(msg, image_meta['name'])

        # TODO(imain): It would be nice to do this with file like object
        # passing but that seems a bit complex right now.
        snapshot_directory = CONF.docker.snapshots_directory
        fileutils.ensure_tree(snapshot_directory)
        with utils.tempdir(dir=snapshot_directory) as tmpdir:
            try:
                out_path = os.path.join(tmpdir, uuid.uuid4().hex)

                images.fetch(context, image_meta['id'], out_path,
                             instance['user_id'], instance['project_id'])
                self.docker.load_repository_file(
                    self._encode_utf8(image_meta['name']),
                    out_path
                )
            except Exception as e:
                LOG.warning(_('Cannot load repository file: %s'),
                            e, instance=instance, exc_info=True)
                msg = _('Cannot load repository file: {0}')
                raise exception.NovaException(msg.format(e),
                                              instance_id=image_meta['name'])

        return self.docker.inspect_image(self._encode_utf8(image_meta['name']))
Exemplo n.º 21
0
    def _test(self, type_, unit, total, used, requested, limit):
        """Test if the given type of resource needed for a claim can be safely
        allocated.
        """
        LOG.audit(_('Total %(type)s: %(total)d %(unit)s, used: %(used).02f '
                    '%(unit)s'),
                  {'type': type_, 'total': total, 'unit': unit, 'used': used},
                  instance=self.instance)

        if limit is None:
            # treat resource as unlimited:
            LOG.audit(_('%(type)s limit not specified, defaulting to '
                        'unlimited'), {'type': type_}, instance=self.instance)
            return

        free = limit - used

        # Oversubscribed resource policy info:
        LOG.audit(_('%(type)s limit: %(limit).02f %(unit)s, free: %(free).02f '
                    '%(unit)s'),
                  {'type': type_, 'limit': limit, 'free': free, 'unit': unit},
                  instance=self.instance)

        if requested > free:
            return (_('Free %(type)s %(free).02f '
                      '%(unit)s < requested %(requested)d %(unit)s') %
                      {'type': type_, 'free': free, 'unit': unit,
                       'requested': requested})
Exemplo n.º 22
0
    def setup_os_root(self, root):
        LOG.debug("Inspecting guest OS root filesystem %s", root)
        mounts = self.handle.inspect_get_mountpoints(root)

        if len(mounts) == 0:
            raise exception.NovaException(
                _("No mount points found in %(root)s of %(imgfile)s") %
                {'root': root, 'imgfile': self.imgfile})

        # the root directory must be mounted first
        mounts.sort(key=lambda mount: mount[0])

        root_mounted = False
        for mount in mounts:
            LOG.debug("Mounting %(dev)s at %(dir)s",
                      {'dev': mount[1], 'dir': mount[0]})
            try:
                self.handle.mount_options("", mount[1], mount[0])
                root_mounted = True
            except RuntimeError as e:
                msg = _("Error mounting %(device)s to %(dir)s in image"
                        " %(imgfile)s with libguestfs (%(e)s)") % \
                      {'imgfile': self.imgfile, 'device': mount[1],
                       'dir': mount[0], 'e': e}
                if root_mounted:
                    LOG.debug(msg)
                else:
                    raise exception.NovaException(msg)
Exemplo n.º 23
0
    def from_api(cls, api_dict, image_uuid_specified):
        """Transform the API format of data to the internally used one.

        Only validate if the source_type field makes sense.
        """
        if not api_dict.get('no_device'):

            source_type = api_dict.get('source_type')
            device_uuid = api_dict.get('uuid')
            destination_type = api_dict.get('destination_type')

            if source_type not in ('volume', 'image', 'snapshot', 'blank'):
                raise exception.InvalidBDMFormat(
                    details=_("Invalid source_type field."))
            elif source_type == 'blank' and device_uuid:
                raise exception.InvalidBDMFormat(
                    details=_("Invalid device UUID."))
            elif source_type != 'blank':
                if not device_uuid:
                    raise exception.InvalidBDMFormat(
                        details=_("Missing device UUID."))
                api_dict[source_type + '_id'] = device_uuid
            if source_type == 'image' and destination_type == 'local':
                boot_index = api_dict.get('boot_index', -1)

                # if this bdm is generated from --image ,then
                # source_type = image and destination_type = local is allowed
                if not (image_uuid_specified and boot_index == 0):
                    raise exception.InvalidBDMFormat(
                        details=_("Mapping image to local is not supported."))

        api_dict.pop('uuid', None)
        return cls(api_dict)
Exemplo n.º 24
0
    def __call__(self, req):
        util = nova.api.openstack.placement.util
        try:
            microversion = extract_version(req.headers)
        except ValueError as exc:
            raise webob.exc.HTTPNotAcceptable(
                _('Invalid microversion: %(error)s') % {'error': exc},
                json_formatter=util.json_error_formatter)
        except TypeError as exc:
            raise webob.exc.HTTPBadRequest(
                _('Invalid microversion: %(error)s') % {'error': exc},
                json_formatter=util.json_error_formatter)

        req.environ[MICROVERSION_ENVIRON] = microversion
        microversion_header = '%s %s' % (SERVICE_TYPE, microversion)

        try:
            response = req.get_response(self.application)
        except webob.exc.HTTPError as exc:
            # If there was an error in the application we still need
            # to send the microversion header, so add the header and
            # re-raise the exception.
            exc.headers.add(Version.HEADER, microversion_header)
            raise exc

        response.headers.add(Version.HEADER, microversion_header)
        response.headers.add('vary', Version.HEADER)
        return response
Exemplo n.º 25
0
    def _handle_instance_id_request(self, req):
        instance_id = req.headers.get('X-Instance-ID')
        tenant_id = req.headers.get('X-Tenant-ID')
        signature = req.headers.get('X-Instance-ID-Signature')
        remote_address = req.headers.get('X-Forwarded-For')

        # Ensure that only one header was passed

        if instance_id is None:
            msg = _('X-Instance-ID header is missing from request.')
        elif signature is None:
            msg = _('X-Instance-ID-Signature header is missing from request.')
        elif tenant_id is None:
            msg = _('X-Tenant-ID header is missing from request.')
        elif not isinstance(instance_id, six.string_types):
            msg = _('Multiple X-Instance-ID headers found within request.')
        elif not isinstance(tenant_id, six.string_types):
            msg = _('Multiple X-Tenant-ID headers found within request.')
        else:
            msg = None

        if msg:
            raise webob.exc.HTTPBadRequest(explanation=msg)

        self._validate_shared_secret(instance_id, signature,
                                     remote_address)

        return self._get_meta_by_instance_id(instance_id, tenant_id,
                                             remote_address)
Exemplo n.º 26
0
        def wrapped(*args, **kwargs):
            try:
                return f(*args, **kwargs)
            except Exception as exc:
                if isinstance(exc, webob.exc.WSGIHTTPException):
                    if isinstance(errors, int):
                        t_errors = (errors,)
                    else:
                        t_errors = errors
                    if exc.code in t_errors:
                        raise
                elif isinstance(exc, exception.PolicyNotAuthorized):
                    # Note(cyeoh): Special case to handle
                    # PolicyNotAuthorized exceptions so every
                    # extension method does not need to wrap authorize
                    # calls. ResourceExceptionHandler silently
                    # converts NotAuthorized to HTTPForbidden
                    raise
                elif isinstance(exc, exception.ValidationError):
                    # Note(oomichi): Handle a validation error, which
                    # happens due to invalid API parameters, as an
                    # expected error.
                    raise

                LOG.exception(_("Unexpected exception in API method"))
                msg = _('Unexpected API Error. Please report this at '
                    'http://bugs.launchpad.net/nova/ and attach the Nova '
                    'API log if possible.\n%s') % type(exc)
                raise webob.exc.HTTPInternalServerError(explanation=msg)
Exemplo n.º 27
0
def qemu_img_info(path, format=None):
    """Return an object containing the parsed output from qemu-img info."""
    # TODO(mikal): this code should not be referring to a libvirt specific
    # flag.
    # NOTE(sirp): The config option import must go here to avoid an import
    # cycle
    CONF.import_opt('images_type', 'nova.virt.libvirt.imagebackend',
                    group='libvirt')
    if not os.path.exists(path) and CONF.libvirt.images_type != 'rbd':
        raise exception.DiskNotFound(location=path)

    try:
        cmd = ('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path)
        if format is not None:
            cmd = cmd + ('-f', format)
        out, err = utils.execute(*cmd)
    except processutils.ProcessExecutionError as exp:
        msg = (_("qemu-img failed to execute on %(path)s : %(exp)s") %
                {'path': path, 'exp': exp})
        raise exception.InvalidDiskInfo(reason=msg)

    if not out:
        msg = (_("Failed to run qemu-img info on %(path)s : %(error)s") %
               {'path': path, 'error': err})
        raise exception.InvalidDiskInfo(reason=msg)

    return imageutils.QemuImgInfo(out)
Exemplo n.º 28
0
    def teardown(self):
        LOG.debug("Tearing down appliance")

        try:
            try:
                self.handle.aug_close()
            except RuntimeError as e:
                LOG.warn(_("Failed to close augeas %s"), e)

            try:
                self.handle.shutdown()
            except AttributeError:
                # Older libguestfs versions haven't an explicit shutdown
                pass
            except RuntimeError as e:
                LOG.warn(_("Failed to shutdown appliance %s"), e)

            try:
                self.handle.close()
            except AttributeError:
                # Older libguestfs versions haven't an explicit close
                pass
            except RuntimeError as e:
                LOG.warn(_("Failed to close guest handle %s"), e)
        finally:
            # dereference object and implicitly close()
            self.handle = None
Exemplo n.º 29
0
    def _set_metadata(self, req, id, body):
        """Replaces the aggregate's existing metadata with new metadata."""
        context = _get_context(req)
        authorize(context)

        if len(body) != 1:
            raise exc.HTTPBadRequest()
        try:
            metadata = body["metadata"]
        except KeyError:
            raise exc.HTTPBadRequest()

        # The metadata should be a dict
        if not isinstance(metadata, dict):
            msg = _('The value of metadata must be a dict')
            raise exc.HTTPBadRequest(explanation=msg)
        try:
            for key, value in metadata.items():
                utils.check_string_length(key, "metadata.key", 1, 255)
                if value is not None:
                    utils.check_string_length(value, "metadata.value", 0, 255)
        except exception.InvalidInput as e:
            raise exc.HTTPBadRequest(explanation=e.format_message())
        try:
            aggregate = self.api.update_aggregate_metadata(context,
                                                           id, metadata)
        except exception.AggregateNotFound:
            msg = _('Cannot set metadata %(metadata)s in aggregate'
                    ' %(id)s') % {'metadata': metadata, 'id': id}
            raise exc.HTTPNotFound(explanation=msg)
        except exception.InvalidAggregateAction as e:
            raise exc.HTTPBadRequest(explanation=e.format_message())

        return self._marshall_aggregate(aggregate)
Exemplo n.º 30
0
    def sync_instances(self, req, body):
        """Tell all cells to sync instance info."""
        context = req.environ['nova.context']

        authorize(context)
        authorize(context, action="sync_instances")

        project_id = body.pop('project_id', None)
        deleted = body.pop('deleted', False)
        updated_since = body.pop('updated_since', None)
        if body:
            msg = _("Only 'updated_since', 'project_id' and 'deleted' are "
                    "understood.")
            raise exc.HTTPBadRequest(explanation=msg)
        if isinstance(deleted, six.string_types):
            try:
                deleted = strutils.bool_from_string(deleted, strict=True)
            except ValueError as err:
                raise exc.HTTPBadRequest(explanation=six.text_type(err))
        if updated_since:
            try:
                timeutils.parse_isotime(updated_since)
            except ValueError:
                msg = _('Invalid changes-since value')
                raise exc.HTTPBadRequest(explanation=msg)
        self.cells_rpcapi.sync_instances(context, project_id=project_id,
                updated_since=updated_since, deleted=deleted)
Exemplo n.º 31
0
    def update(self, req, id, body):
        """Perform service update

        Starting with microversion 2.53, the service uuid is passed in on the
        path of the request to uniquely identify the service record on which to
        perform a given update, which is defined in the body of the request.
        """
        service_id = id
        # Validate that the service ID is a UUID.
        if not uuidutils.is_uuid_like(service_id):
            msg = _('Invalid uuid %s') % service_id
            raise webob.exc.HTTPBadRequest(explanation=msg)

        # Validate the request context against the policy.
        context = req.environ['nova.context']
        context.can(services_policies.BASE_POLICY_NAME)

        # Get the service by uuid.
        try:
            service = self.host_api.service_get_by_id(context, service_id)
            # At this point the context is targeted to the cell that the
            # service was found in so we don't need to do any explicit cell
            # targeting below.
        except exception.ServiceNotFound as e:
            raise webob.exc.HTTPNotFound(explanation=e.format_message())

        # Return 400 if service.binary is not nova-compute.
        # Before the earlier PUT handlers were made cells-aware, you could
        # technically disable a nova-scheduler service, although that doesn't
        # really do anything within Nova and is just confusing. Now trying to
        # do that will fail as a nova-scheduler service won't have a host
        # mapping so you'll get a 404. In this new microversion, we close that
        # old gap and make sure you can only enable/disable and set forced_down
        # on nova-compute services since those are the only ones that make
        # sense to update for those operations.
        if service.binary != 'nova-compute':
            msg = (_('Updating a %(binary)s service is not supported. Only '
                     'nova-compute services can be updated.') % {
                         'binary': service.binary
                     })
            raise webob.exc.HTTPBadRequest(explanation=msg)

        # Now determine the update to perform based on the body. We are
        # intentionally not using _perform_action or the other old-style
        # action functions.
        if 'status' in body:
            # This is a status update for either enabled or disabled.
            if body['status'] == 'enabled':

                # Fail if 'disabled_reason' was requested when enabling the
                # service since those two combined don't make sense.
                if body.get('disabled_reason'):
                    msg = _("Specifying 'disabled_reason' with status "
                            "'enabled' is invalid.")
                    raise webob.exc.HTTPBadRequest(explanation=msg)

                service.disabled = False
                service.disabled_reason = None
            elif body['status'] == 'disabled':
                service.disabled = True
                # The disabled reason is optional.
                service.disabled_reason = body.get('disabled_reason')

        # This is intentionally not an elif, i.e. it's in addition to the
        # status update.
        if 'forced_down' in body:
            service.forced_down = strutils.bool_from_string(
                body['forced_down'], strict=True)

        # Check to see if anything was actually updated since the schema does
        # not define any required fields.
        if not service.obj_what_changed():
            msg = _("No updates were requested. Fields 'status' or "
                    "'forced_down' should be specified.")
            raise webob.exc.HTTPBadRequest(explanation=msg)

        # Now save our updates to the service record in the database.
        service.save()

        # Return the full service record details.
        additional_fields = ['forced_down']
        return {
            'service': self._get_service_detail(service, additional_fields,
                                                req)
        }
Exemplo n.º 32
0
    def create_image(self, prepare_template, base, size, *args, **kwargs):
        def encrypt_lvm_image():
            dmcrypt.create_volume(
                self.path.rpartition('/')[2], self.lv_path,
                CONF.ephemeral_storage_encryption.cipher,
                CONF.ephemeral_storage_encryption.key_size, key)

        filename = self._get_lock_name(base)

        @utils.synchronized(filename, external=True, lock_path=self.lock_path)
        def create_lvm_image(base, size):
            base_size = disk.get_disk_size(base)
            self.verify_base_size(base, size, base_size=base_size)
            resize = size > base_size if size else False
            size = size if resize else base_size
            lvm.create_volume(self.vg, self.lv, size, sparse=self.sparse)
            if self.ephemeral_key_uuid is not None:
                encrypt_lvm_image()
            # NOTE: by calling convert_image_unsafe here we're
            # telling qemu-img convert to do format detection on the input,
            # because we don't know what the format is. For example,
            # we might have downloaded a qcow2 image, or created an
            # ephemeral filesystem locally, we just don't know here. Having
            # audited this, all current sources have been sanity checked,
            # either because they're locally generated, or because they have
            # come from images.fetch_to_raw. However, this is major code smell.
            images.convert_image_unsafe(base,
                                        self.path,
                                        self.driver_format,
                                        run_as_root=True)
            if resize:
                disk.resize2fs(self.path, run_as_root=True)

        generated = 'ephemeral_size' in kwargs
        if self.ephemeral_key_uuid is not None:
            if 'context' in kwargs:
                try:
                    # NOTE(dgenin): Key manager corresponding to the
                    # specific backend catches and reraises an
                    # an exception if key retrieval fails.
                    key = self.key_manager.get(
                        kwargs['context'],
                        self.ephemeral_key_uuid).get_encoded()
                except Exception:
                    with excutils.save_and_reraise_exception():
                        LOG.error("Failed to retrieve ephemeral "
                                  "encryption key")
            else:
                raise exception.InternalError(
                    _("Instance disk to be encrypted but no context provided"))
        # Generate images with specified size right on volume
        if generated and size:
            lvm.create_volume(self.vg, self.lv, size, sparse=self.sparse)
            with self.remove_volume_on_error(self.path):
                if self.ephemeral_key_uuid is not None:
                    encrypt_lvm_image()
                prepare_template(target=self.path, *args, **kwargs)
        else:
            if not os.path.exists(base):
                prepare_template(target=base, *args, **kwargs)
            with self.remove_volume_on_error(self.path):
                create_lvm_image(base, size)
Exemplo n.º 33
0
def create(name,
           memory,
           vcpus,
           root_gb,
           ephemeral_gb=0,
           flavorid=None,
           swap=0,
           rxtx_factor=1.0,
           is_public=True,
           description=None):
    """Creates flavors."""
    if not flavorid:
        flavorid = uuidutils.generate_uuid()

    kwargs = {
        'memory_mb': memory,
        'vcpus': vcpus,
        'root_gb': root_gb,
        'ephemeral_gb': ephemeral_gb,
        'swap': swap,
        'rxtx_factor': rxtx_factor,
        'description': description
    }

    if isinstance(name, str):
        name = name.strip()

    # NOTE(vish): Internally, flavorid is stored as a string but it comes
    #             in through json as an integer, so we convert it here.
    flavorid = str(flavorid)

    # NOTE(wangbo): validate attributes of the creating flavor.
    # ram and vcpus should be positive ( > 0) integers.
    # disk, ephemeral and swap should be non-negative ( >= 0) integers.
    flavor_attributes = {
        'memory_mb': ('ram', 1),
        'vcpus': ('vcpus', 1),
        'root_gb': ('disk', 0),
        'ephemeral_gb': ('ephemeral', 0),
        'swap': ('swap', 0)
    }

    for key, value in flavor_attributes.items():
        kwargs[key] = utils.validate_integer(kwargs[key], value[0], value[1],
                                             db_const.MAX_INT)

    # rxtx_factor should be a positive float
    try:
        kwargs['rxtx_factor'] = float(kwargs['rxtx_factor'])
        if (kwargs['rxtx_factor'] <= 0
                or kwargs['rxtx_factor'] > db_const.SQL_SP_FLOAT_MAX):
            raise ValueError()
    except ValueError:
        msg = _("'rxtx_factor' argument must be a float between 0 and %g")
        raise exception.InvalidInput(reason=msg % db_const.SQL_SP_FLOAT_MAX)

    kwargs['name'] = name
    kwargs['flavorid'] = flavorid
    # ensure is_public attribute is boolean
    try:
        kwargs['is_public'] = strutils.bool_from_string(is_public, strict=True)
    except ValueError:
        raise exception.InvalidInput(reason=_("is_public must be a boolean"))

    flavor = objects.Flavor(context=context.get_admin_context(), **kwargs)
    flavor.create()
    return flavor
Exemplo n.º 34
0
def validate_extra_spec_keys(key_names_list):
    for key_name in key_names_list:
        if not VALID_EXTRASPEC_NAME_REGEX.match(key_name):
            expl = _('Key Names can only contain alphanumeric characters, '
                     'periods, dashes, underscores, colons and spaces.')
            raise exception.InvalidInput(message=expl)
Exemplo n.º 35
0
def set_inventories(req):
    """PUT to set all inventory for a resource provider.

    Create, update and delete inventory as required to reset all
    the inventory.

    If the resource generation is out of sync, return a 409.
    If an inventory to be deleted is in use, return a 409.
    If any inventory to be created or updated has settings which are
    invalid (for example reserved exceeds capacity), return a 400.

    On success return a 200 with an application/json body representing
    the inventories.
    """
    context = req.environ['placement.context']
    uuid = util.wsgi_path_item(req.environ, 'uuid')
    resource_provider = rp_obj.ResourceProvider.get_by_uuid(context, uuid)

    data = _extract_inventories(req.body, schema.PUT_INVENTORY_SCHEMA)
    if data['resource_provider_generation'] != resource_provider.generation:
        raise webob.exc.HTTPConflict(
            _('resource provider generation conflict'))

    inv_list = []
    for res_class, inventory_data in data['inventories'].items():
        inventory = _make_inventory_object(resource_provider, res_class,
                                           **inventory_data)
        inv_list.append(inventory)
    inventories = rp_obj.InventoryList(objects=inv_list)

    try:
        resource_provider.set_inventory(inventories)
    except exception.ResourceClassNotFound as exc:
        raise webob.exc.HTTPBadRequest(
            _('Unknown resource class in inventory for resource provider '
              '%(rp_uuid)s: %(error)s') % {
                  'rp_uuid': resource_provider.uuid,
                  'error': exc
              })
    except exception.InventoryWithResourceClassNotFound as exc:
        raise webob.exc.HTTPConflict(
            _('Race condition detected when setting inventory. No inventory '
              'record with resource class for resource provider '
              '%(rp_uuid)s: %(error)s') % {
                  'rp_uuid': resource_provider.uuid,
                  'error': exc
              })
    except (exception.ConcurrentUpdateDetected,
            db_exc.DBDuplicateEntry) as exc:
        raise webob.exc.HTTPConflict(
            _('update conflict: %(error)s') % {'error': exc})
    except exception.InventoryInUse as exc:
        raise webob.exc.HTTPConflict(_('update conflict: %(error)s') %
                                     {'error': exc},
                                     comment=errors.INVENTORY_INUSE)
    except exception.InvalidInventoryCapacity as exc:
        raise webob.exc.HTTPBadRequest(
            _('Unable to update inventory for resource provider '
              '%(rp_uuid)s: %(error)s') % {
                  'rp_uuid': resource_provider.uuid,
                  'error': exc
              })

    return _send_inventories(req, resource_provider, inventories)
Exemplo n.º 36
0
        def delayed_create():
            """This handles the fetching and decrypting of the part files."""
            context.update_store()
            log_vars = {'image_location': image_location,
                        'image_path': image_path}

            def _update_image_state(context, image_uuid, image_state):
                metadata = {'properties': {'image_state': image_state}}
                self.service.update(context, image_uuid, metadata,
                                    purge_props=False)

            def _update_image_data(context, image_uuid, image_data):
                metadata = {}
                self.service.update(context, image_uuid, metadata, image_data,
                                    purge_props=False)

            try:
                _update_image_state(context, image_uuid, 'downloading')

                try:
                    parts = []
                    elements = manifest.find('image').getiterator('filename')
                    for fn_element in elements:
                        part = self._download_file(bucket,
                                                   fn_element.text,
                                                   image_path)
                        parts.append(part)

                    # NOTE(vish): this may be suboptimal, should we use cat?
                    enc_filename = os.path.join(image_path, 'image.encrypted')
                    with open(enc_filename, 'w') as combined:
                        for filename in parts:
                            with open(filename) as part:
                                shutil.copyfileobj(part, combined)

                except Exception:
                    LOG.exception(_LE("Failed to download %(image_location)s "
                                      "to %(image_path)s"), log_vars)
                    _update_image_state(context, image_uuid, 'failed_download')
                    return

                _update_image_state(context, image_uuid, 'decrypting')

                try:
                    hex_key = manifest.find('image/ec2_encrypted_key').text
                    encrypted_key = binascii.a2b_hex(hex_key)
                    hex_iv = manifest.find('image/ec2_encrypted_iv').text
                    encrypted_iv = binascii.a2b_hex(hex_iv)

                    dec_filename = os.path.join(image_path, 'image.tar.gz')
                    self._decrypt_image(context, enc_filename, encrypted_key,
                                        encrypted_iv, dec_filename)
                except Exception:
                    LOG.exception(_LE("Failed to decrypt %(image_location)s "
                                      "to %(image_path)s"), log_vars)
                    _update_image_state(context, image_uuid, 'failed_decrypt')
                    return

                _update_image_state(context, image_uuid, 'untarring')

                try:
                    unz_filename = self._untarzip_image(image_path,
                                                        dec_filename)
                except Exception:
                    LOG.exception(_LE("Failed to untar %(image_location)s "
                                      "to %(image_path)s"), log_vars)
                    _update_image_state(context, image_uuid, 'failed_untar')
                    return

                _update_image_state(context, image_uuid, 'uploading')
                try:
                    with open(unz_filename) as image_file:
                        _update_image_data(context, image_uuid, image_file)
                except Exception:
                    LOG.exception(_LE("Failed to upload %(image_location)s "
                                      "to %(image_path)s"), log_vars)
                    _update_image_state(context, image_uuid, 'failed_upload')
                    return

                metadata = {'status': 'active',
                            'properties': {'image_state': 'available'}}
                self.service.update(context, image_uuid, metadata,
                        purge_props=False)

                shutil.rmtree(image_path)
            except exception.ImageNotFound:
                LOG.info(_("Image %s was deleted underneath us"), image_uuid)
                return
Exemplo n.º 37
0
    def spawn(self,
              context,
              instance,
              image_meta,
              injected_files,
              admin_password,
              allocations,
              network_info=None,
              block_device_info=None,
              power_on=True,
              accel_info=None):

        LOG.info("Spawning new instance %s on zVM hypervisor",
                 instance.name,
                 instance=instance)

        if self._hypervisor.guest_exists(instance):
            raise exception.InstanceExists(name=instance.name)

        os_distro = image_meta.properties.get('os_distro')
        if os_distro is None or len(os_distro) == 0:
            reason = _("The `os_distro` image metadata property is required")
            raise exception.InvalidInput(reason=reason)

        try:
            spawn_start = time.time()

            transportfiles = zvmutils.generate_configdrive(
                context, instance, injected_files, network_info,
                admin_password)

            spawn_image_name = self._get_image_info(context, image_meta.id,
                                                    os_distro)
            disk_list, eph_list = self._set_disk_list(instance,
                                                      spawn_image_name,
                                                      block_device_info)

            # Create the guest vm
            self._hypervisor.guest_create(instance.name, instance.vcpus,
                                          instance.memory_mb, disk_list)

            # Deploy image to the guest vm
            self._hypervisor.guest_deploy(instance.name,
                                          spawn_image_name,
                                          transportfiles=transportfiles)

            # Handle ephemeral disks
            if eph_list:
                self._hypervisor.guest_config_minidisks(
                    instance.name, eph_list)
            # Setup network for z/VM instance
            self._wait_vif_plug_events(instance.name, os_distro, network_info,
                                       instance)

            self._hypervisor.guest_start(instance.name)
            spawn_time = time.time() - spawn_start
            LOG.info("Instance spawned successfully in %s seconds",
                     spawn_time,
                     instance=instance)
        except Exception as err:
            with excutils.save_and_reraise_exception():
                LOG.error(
                    "Deploy instance %(instance)s "
                    "failed with reason: %(err)s", {
                        'instance': instance.name,
                        'err': err
                    },
                    instance=instance)
                try:
                    self.destroy(context, instance, network_info,
                                 block_device_info)
                except Exception:
                    LOG.exception("Failed to destroy instance",
                                  instance=instance)
Exemplo n.º 38
0
class ConsoleTypeInvalid(Invalid):
    msg_fmt = _("Invalid console type %(console_type)s")
Exemplo n.º 39
0
class ConsoleNotFoundForInstance(ConsoleNotFound):
    msg_fmt = _("Console for instance %(instance_uuid)s could not be found.")
    def _add_floating_ip(self, req, id, body):
        """Associate floating_ip to an instance."""
        context = req.environ['nova.context']
        context.can(fi_policies.BASE_POLICY_NAME)

        address = body['addFloatingIp']['address']

        instance = common.get_instance(self.compute_api,
                                       context,
                                       id,
                                       expected_attrs=['flavor'])
        cached_nwinfo = instance.get_network_info()
        if not cached_nwinfo:
            LOG.warning(
                'Info cache is %r during associate with no nw_info cache',
                instance.info_cache,
                instance=instance)
            msg = _('Instance network is not ready yet')
            raise webob.exc.HTTPBadRequest(explanation=msg)

        fixed_ips = cached_nwinfo.fixed_ips()
        if not fixed_ips:
            msg = _('No fixed IPs associated to instance')
            raise webob.exc.HTTPBadRequest(explanation=msg)

        fixed_address = None
        if 'fixed_address' in body['addFloatingIp']:
            fixed_address = body['addFloatingIp']['fixed_address']
            for fixed in fixed_ips:
                if fixed['address'] == fixed_address:
                    break
            else:
                msg = _('Specified fixed address not assigned to instance')
                raise webob.exc.HTTPBadRequest(explanation=msg)

        if not fixed_address:
            try:
                fixed_address = next(ip['address'] for ip in fixed_ips
                                     if netutils.is_valid_ipv4(ip['address']))
            except StopIteration:
                msg = _('Unable to associate floating IP %(address)s '
                        'to any fixed IPs for instance %(id)s. '
                        'Instance has no fixed IPv4 addresses to '
                        'associate.') % ({
                            'address': address,
                            'id': id
                        })
                raise webob.exc.HTTPBadRequest(explanation=msg)
            if len(fixed_ips) > 1:
                LOG.warning(
                    'multiple fixed_ips exist, using the first '
                    'IPv4 fixed_ip: %s', fixed_address)

        try:
            self.network_api.associate_floating_ip(context,
                                                   instance,
                                                   floating_address=address,
                                                   fixed_address=fixed_address)
        except exception.FloatingIpAssociated:
            msg = _('floating IP is already associated')
            raise webob.exc.HTTPBadRequest(explanation=msg)
        except exception.FloatingIpAssociateFailed as e:
            raise webob.exc.HTTPBadRequest(explanation=e.format_message())
        except exception.NoFloatingIpInterface:
            msg = _('l3driver call to add floating IP failed')
            raise webob.exc.HTTPBadRequest(explanation=msg)
        except exception.FloatingIpNotFoundForAddress:
            msg = _('floating IP not found')
            raise webob.exc.HTTPNotFound(explanation=msg)
        except exception.Forbidden as e:
            raise webob.exc.HTTPForbidden(explanation=e.format_message())
        except Exception as e:
            msg = _('Unable to associate floating IP %(address)s to '
                    'fixed IP %(fixed_address)s for instance %(id)s. '
                    'Error: %(error)s') % ({
                        'address': address,
                        'fixed_address': fixed_address,
                        'id': id,
                        'error': e
                    })
            LOG.exception(msg)
            raise webob.exc.HTTPBadRequest(explanation=msg)

        return webob.Response(status_int=202)
Exemplo n.º 41
0
class ConsolePoolNotFoundForHostType(NotFound):
    msg_fmt = _("Console pool of type %(console_type)s "
                "for compute host %(compute_host)s "
                "on proxy host %(host)s not found.")
Exemplo n.º 42
0
class ConsoleNotFoundInPoolForInstance(ConsoleNotFound):
    msg_fmt = _("Console for instance %(instance_uuid)s "
                "in pool %(pool_id)s could not be found.")
Exemplo n.º 43
0
class ConsolePoolNotFound(NotFound):
    msg_fmt = _("Console pool %(pool_id)s could not be found.")
Exemplo n.º 44
0
class ConsoleNotFound(NotFound):
    msg_fmt = _("Console %(console_id)s could not be found.")
Exemplo n.º 45
0
class MigrationNotFound(NotFound):
    msg_fmt = _("Migration %(migration_id)s could not be found.")
Exemplo n.º 46
0
class ConsolePoolExists(NovaException):
    msg_fmt = _("Console pool with host %(host)s, console_type "
                "%(console_type)s and compute_host %(compute_host)s "
                "already exists.")
Exemplo n.º 47
0
class SecurityGroupRuleExists(Invalid):
    ec2_code = 'InvalidPermission.Duplicate'
    msg_fmt = _("Rule already exists in group: %(rule)s")
Exemplo n.º 48
0
class MigrationNotFoundByStatus(MigrationNotFound):
    msg_fmt = _("Migration not found for instance %(instance_id)s "
                "with status %(status)s.")
Exemplo n.º 49
0
class SecurityGroupDefaultRuleNotFound(Invalid):
    msg_fmt = _("Security group default rule (%rule_id)s not found.")
Exemplo n.º 50
0
class NoUniqueMatch(NovaException):
    msg_fmt = _("No Unique Match Found.")
    code = 409
Exemplo n.º 51
0
class SecurityGroupExistsForInstance(Invalid):
    msg_fmt = _("Security group %(security_group_id)s is already associated"
                " with the instance %(instance_id)s")
Exemplo n.º 52
0
class SecurityGroupCannotBeApplied(Invalid):
    msg_fmt = _("Network requires port_security_enabled and subnet associated"
                " in order to apply security groups.")
Exemplo n.º 53
0
class SecurityGroupNotFoundForRule(SecurityGroupNotFound):
    msg_fmt = _("Security group with rule %(rule_id)s not found.")
Exemplo n.º 54
0
class SecurityGroupNotExistsForInstance(Invalid):
    msg_fmt = _("Security group %(security_group_id)s is not associated with"
                " the instance %(instance_id)s")
Exemplo n.º 55
0
class SecurityGroupNotFound(NotFound):
    msg_fmt = _("Security group %(security_group_id)s not found.")
Exemplo n.º 56
0
class SecurityGroupExists(Invalid):
    ec2_code = 'InvalidGroup.Duplicate'
    msg_fmt = _("Security group %(security_group_name)s already exists "
                "for project %(project_id)s.")
Exemplo n.º 57
0
class ReservationNotFound(QuotaNotFound):
    msg_fmt = _("Quota reservation %(uuid)s could not be found.")
Exemplo n.º 58
0
class SecurityGroupNotFoundForProject(SecurityGroupNotFound):
    msg_fmt = _("Security group %(security_group_id)s not found "
                "for project %(project_id)s.")
Exemplo n.º 59
0
class QuotaUsageNotFound(QuotaNotFound):
    msg_fmt = _("Quota usage for project %(project_id)s could not be found.")
Exemplo n.º 60
0
class OverQuota(NovaException):
    msg_fmt = _("Quota exceeded for resources: %(overs)s")