Esempio n. 1
0
 def get(self, context, name=None, id=None, map_exception=False):
     neutron = neutronapi.get_client(context)
     try:
         if not id and name:
             # NOTE(flwang): The project id should be honoured so as to get
             # the correct security group id when user(with admin role but
             # non-admin project) try to query by name, so as to avoid
             # getting more than duplicated records with the same name.
             id = neutronv20.find_resourceid_by_name_or_id(
                 neutron, 'security_group', name, context.project_id)
         group = neutron.show_security_group(id).get('security_group')
         return self._convert_to_nova_security_group_format(group)
     except n_exc.NeutronClientNoUniqueMatch as e:
         raise exception.NoUniqueMatch(six.text_type(e))
     except n_exc.NeutronClientException as e:
         exc_info = sys.exc_info()
         if e.status_code == 404:
             LOG.debug("Neutron security group %s not found", name)
             raise exception.SecurityGroupNotFound(six.text_type(e))
         else:
             LOG.error(_LE("Neutron Error: %s"), e)
             raise exc_info[0], exc_info[1], exc_info[2]
     except TypeError as e:
         LOG.error(_LE("Neutron Error: %s"), e)
         msg = _("Invalid security group name: %(name)s.") % {"name": name}
         raise exception.SecurityGroupNotFound(six.text_type(msg))
Esempio n. 2
0
    def remove_from_instance(self, context, instance, security_group_name):
        """Remove the security group associated with the instance."""
        neutron = neutronapi.get_client(context)
        try:
            security_group_id = neutronv20.find_resourceid_by_name_or_id(
                neutron, 'security_group',
                security_group_name,
                context.project_id)
        except n_exc.NeutronClientException as e:
            exc_info = sys.exc_info()
            if e.status_code == 404:
                msg = (_("Security group %(name)s is not found for "
                         "project %(project)s") %
                       {'name': security_group_name,
                        'project': context.project_id})
                self.raise_not_found(msg)
            else:
                LOG.exception(_LE("Neutron Error:"))
                raise exc_info[0], exc_info[1], exc_info[2]
        params = {'device_id': instance.uuid}
        try:
            ports = neutron.list_ports(**params).get('ports')
        except n_exc.NeutronClientException:
            with excutils.save_and_reraise_exception():
                LOG.exception(_LE("Neutron Error:"))

        if not ports:
            msg = (_("instance_id %s could not be found as device id on"
                   " any ports") % instance.uuid)
            self.raise_not_found(msg)

        found_security_group = False
        for port in ports:
            try:
                port.get('security_groups', []).remove(security_group_id)
            except ValueError:
                # When removing a security group from an instance the security
                # group should be on both ports since it was added this way if
                # done through the nova api. In case it is not a 404 is only
                # raised if the security group is not found on any of the
                # ports on the instance.
                continue

            updated_port = {'security_groups': port['security_groups']}
            try:
                LOG.info(_LI("Adding security group %(security_group_id)s to "
                             "port %(port_id)s"),
                         {'security_group_id': security_group_id,
                          'port_id': port['id']})
                neutron.update_port(port['id'], {'port': updated_port})
                found_security_group = True
            except Exception:
                with excutils.save_and_reraise_exception():
                    LOG.exception(_LE("Neutron Error:"))
        if not found_security_group:
            msg = (_("Security group %(security_group_name)s not associated "
                     "with the instance %(instance)s") %
                   {'security_group_name': security_group_name,
                    'instance': instance.uuid})
            self.raise_not_found(msg)
Esempio n. 3
0
    def _run(self, name, method_type, args, kwargs, func=None):
        if method_type not in ('pre', 'post'):
            msg = _("Wrong type of hook method. "
                    "Only 'pre' and 'post' type allowed")
            raise ValueError(msg)

        for e in self.extensions:
            obj = e.obj
            hook_method = getattr(obj, method_type, None)
            if hook_method:
                LOG.debug("Running %(name)s %(type)s-hook: %(obj)s",
                          {'name': name, 'type': method_type, 'obj': obj})
                try:
                    if func:
                        hook_method(func, *args, **kwargs)
                    else:
                        hook_method(*args, **kwargs)
                except FatalHookException:
                    msg = _LE("Fatal Exception running %(name)s "
                              "%(type)s-hook: %(obj)s")
                    LOG.exception(msg, {'name': name, 'type': method_type,
                                        'obj': obj})
                    raise
                except Exception:
                    msg = _LE("Exception running %(name)s "
                              "%(type)s-hook: %(obj)s")
                    LOG.exception(msg, {'name': name, 'type': method_type,
                                        'obj': obj})
Esempio n. 4
0
 def initialize_connection(self, context, volume_id, connector):
     try:
         connection_info = cinderclient(
             context).volumes.initialize_connection(volume_id, connector)
         connection_info['connector'] = connector
         return connection_info
     except cinder_exception.ClientException as ex:
         with excutils.save_and_reraise_exception():
             LOG.error(_LE('Initialize connection failed for volume '
                           '%(vol)s on host %(host)s. Error: %(msg)s '
                           'Code: %(code)s. Attempting to terminate '
                           'connection.'),
                       {'vol': volume_id,
                        'host': connector.get('host'),
                        'msg': six.text_type(ex),
                        'code': ex.code})
             try:
                 self.terminate_connection(context, volume_id, connector)
             except Exception as exc:
                 LOG.error(_LE('Connection between volume %(vol)s and host '
                               '%(host)s might have succeeded, but attempt '
                               'to terminate connection has failed. '
                               'Validate the connection and determine if '
                               'manual cleanup is needed. Error: %(msg)s '
                               'Code: %(code)s.'),
                           {'vol': volume_id,
                            'host': connector.get('host'),
                            'msg': six.text_type(exc),
                            'code': (
                             exc.code if hasattr(exc, 'code') else None)})
Esempio n. 5
0
    def container_start(self, context, instance, image_meta, injected_files,
                        admin_password, network_info=None, block_device_info=None,
                        flavor=None):
        LOG.info(_LI('Spawning new instance'), instance=instance)
        if self.client.container_defined(instance.uuid):
            raise exception.InstanceExists(name=instance.uuid)

        try:
            LOG.debug('Fetching image from Glance.')
            self.image.fetch_image(context, instance, image_meta)
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.error(_LE('Failed to create image for: %(instance)s'),
                          {'instance': instance.uuid})
                self.container_destroy(context, instance, network_info,
                                       block_device_info,
                                       destroy_disks=None, migrate_data=None)

        try:
            LOG.debug('Setting up container profiles')
            self.setup_container(instance, network_info)
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.error(_LE('Failed to setup container for: %(instance)s'),
                          {'instance': instance.uuid})
                self.container_destroy(context, instance, network_info,
                                       block_device_info,
                                       destroy_disks=None, migrate_data=None)

        try:
            LOG.debug('Setup Networking')
            self._start_network(instance, network_info)
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.error(_LE('Failed to setup container for: %(instance)s'),
                          {'instance': instance.uuid})
                self.container_destroy(context, instance, network_info,
                                       block_device_info,
                                       destroy_disks=None, migrate_data=None)

        try:
            LOG.debug('Start container')
            self._start_container(instance, network_info)
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.error(_LE('Failed to setup container for: %(instance)s'),
                          {'instance': instance.uuid})
                self.container_destroy(context, instance, network_info,
                                       block_device_info,
                                       destroy_disks=None, migrate_data=None)

        def _wait_for_boot():
            state = self.container_info(instance)
            if state == power_state.RUNNING:
                LOG.info(_LI("Instance spawned successfully."),
                         instance=instance)
                raise loopingcall.LoopingCallDone()

        timer = loopingcall.FixedIntervalLoopingCall(_wait_for_boot)
        timer.start(interval=0.6).wait()
Esempio n. 6
0
 def _determine_version_cap(self, target):
     global LAST_VERSION
     if LAST_VERSION:
         return LAST_VERSION
     service_version = objects.Service.get_minimum_version(context.get_admin_context(), "nova-compute")
     history = service_obj.SERVICE_VERSION_HISTORY
     try:
         version_cap = history[service_version]["compute_rpc"]
     except IndexError:
         LOG.error(
             _LE(
                 "Failed to extract compute RPC version from "
                 "service history because I am too "
                 "old (minimum version is now %(version)i)"
             ),
             {"version": service_version},
         )
         raise exception.ServiceTooOld(thisver=service_obj.SERVICE_VERSION, minver=service_version)
     except KeyError:
         LOG.error(
             _LE("Failed to extract compute RPC version from " "service history for version %(version)i"),
             {"version": service_version},
         )
         return target.version
     LAST_VERSION = version_cap
     LOG.info(
         _LI("Automatically selected compute RPC version %(rpc)s " "from minimum service version %(service)i"),
         {"rpc": version_cap, "service": service_version},
     )
     return version_cap
Esempio n. 7
0
 def _determine_version_cap(self, target):
     global LAST_VERSION
     if LAST_VERSION:
         return LAST_VERSION
     service_version = objects.Service.get_minimum_version(
         context.get_admin_context(), 'nova-compute')
     history = service_obj.SERVICE_VERSION_HISTORY
     try:
         version_cap = history[service_version]['compute_rpc']
     except IndexError:
         LOG.error(_LE('Failed to extract compute RPC version from '
                       'service history because I am too '
                       'old (minimum version is now %(version)i)'),
                   {'version': service_version})
         raise exception.ServiceTooOld(thisver=service_obj.SERVICE_VERSION,
                                       minver=service_version)
     except KeyError:
         LOG.error(_LE('Failed to extract compute RPC version from '
                       'service history for version %(version)i'),
                   {'version': service_version})
         return target.version
     LAST_VERSION = version_cap
     LOG.info(_LI('Automatically selected compute RPC version %(rpc)s '
                  'from minimum service version %(service)i'),
              {'rpc': version_cap,
               'service': service_version})
     return version_cap
Esempio n. 8
0
    def add_rules(self, context, id, name, vals):
        """Add security group rule(s) to security group.

        Note: the Nova security group API doesn't support adding multiple
        security group rules at once but the EC2 one does. Therefore,
        this function is written to support both. Multiple rules are
        installed to a security group in neutron using bulk support.
        """

        neutron = neutronapi.get_client(context)
        body = self._make_neutron_security_group_rules_list(vals)
        try:
            rules = neutron.create_security_group_rule(
                body).get('security_group_rules')
        except n_exc.NeutronClientException as e:
            exc_info = sys.exc_info()
            if e.status_code == 404:
                LOG.exception(_LE("Neutron Error getting security group %s"),
                              name)
                self.raise_not_found(six.text_type(e))
            elif e.status_code == 409:
                LOG.exception(_LE("Neutron Error adding rules to security "
                                  "group %s"), name)
                self.raise_over_quota(six.text_type(e))
            elif e.status_code == 400:
                LOG.exception(_LE("Neutron Error: %s"), six.text_type(e))
                self.raise_invalid_property(six.text_type(e))
            else:
                LOG.exception(_LE("Neutron Error:"))
                raise exc_info[0], exc_info[1], exc_info[2]
        converted_rules = []
        for rule in rules:
            converted_rules.append(
                self._convert_to_nova_security_group_rule_format(rule))
        return converted_rules
Esempio n. 9
0
def set_allocations(req):
    context = req.environ['placement.context']
    consumer_uuid = util.wsgi_path_item(req.environ, 'consumer_uuid')
    data = _extract_allocations(req.body, ALLOCATION_SCHEMA)
    allocation_data = data['allocations']

    # If the body includes an allocation for a resource provider
    # that does not exist, raise a 400.
    allocation_objects = []
    for allocation in allocation_data:
        resource_provider_uuid = allocation['resource_provider']['uuid']

        try:
            resource_provider = objects.ResourceProvider.get_by_uuid(
                context, resource_provider_uuid)
        except exception.NotFound:
            raise webob.exc.HTTPBadRequest(
                _("Allocation for resource provider '%(rp_uuid)s' "
                  "that does not exist.") %
                {'rp_uuid': resource_provider_uuid},
                json_formatter=util.json_error_formatter)

        resources = allocation['resources']
        for resource_class in resources:
            allocation = objects.Allocation(
                resource_provider=resource_provider,
                consumer_id=consumer_uuid,
                resource_class=resource_class,
                used=resources[resource_class])
            allocation_objects.append(allocation)

    allocations = objects.AllocationList(context, objects=allocation_objects)

    try:
        allocations.create_all()
        LOG.debug("Successfully wrote allocations %s", allocations)
    # InvalidInventory is a parent for several exceptions that
    # indicate either that Inventory is not present, or that
    # capacity limits have been exceeded.
    except exception.NotFound as exc:
        raise webob.exc.HTTPBadRequest(
                _("Unable to allocate inventory for resource provider "
                  "%(rp_uuid)s: %(error)s") %
            {'rp_uuid': resource_provider_uuid, 'error': exc})
    except exception.InvalidInventory as exc:
        LOG.exception(_LE("Bad inventory"))
        raise webob.exc.HTTPConflict(
            _('Unable to allocate inventory: %(error)s') % {'error': exc},
            json_formatter=util.json_error_formatter)
    except exception.ConcurrentUpdateDetected as exc:
        LOG.exception(_LE("Concurrent Update"))
        raise webob.exc.HTTPConflict(
            _('Inventory changed while attempting to allocate: %(error)s') %
            {'error': exc},
            json_formatter=util.json_error_formatter)

    req.response.status = 204
    req.response.content_type = None
    return req.response
Esempio n. 10
0
    def unshelve_instance(self, context, instance):
        sys_meta = instance.system_metadata

        def safe_image_show(ctx, image_id):
            if image_id:
                return self.image_api.get(ctx, image_id, show_deleted=False)
            else:
                raise exception.ImageNotFound(image_id="")

        if instance.vm_state == vm_states.SHELVED:
            instance.task_state = task_states.POWERING_ON
            instance.save(expected_task_state=task_states.UNSHELVING)
            self.compute_rpcapi.start_instance(context, instance)
        elif instance.vm_state == vm_states.SHELVED_OFFLOADED:
            image = None
            image_id = sys_meta.get("shelved_image_id")
            # No need to check for image if image_id is None as
            # "shelved_image_id" key is not set for volume backed
            # instance during the shelve process
            if image_id:
                with compute_utils.EventReporter(context, "get_image_info", instance.uuid):
                    try:
                        image = safe_image_show(context, image_id)
                    except exception.ImageNotFound:
                        instance.vm_state = vm_states.ERROR
                        instance.save()

                        reason = _("Unshelve attempted but the image %s " "cannot be found.") % image_id

                        LOG.error(reason, instance=instance)
                        raise exception.UnshelveException(instance_id=instance.uuid, reason=reason)

            try:
                with compute_utils.EventReporter(context, "schedule_instances", instance.uuid):
                    filter_properties = {}
                    scheduler_utils.populate_retry(filter_properties, instance.uuid)
                    request_spec = scheduler_utils.build_request_spec(context, image, [instance])
                    hosts = self._schedule_instances(context, request_spec, filter_properties)
                    host_state = hosts[0]
                    scheduler_utils.populate_filter_properties(filter_properties, host_state)
                    (host, node) = (host_state["host"], host_state["nodename"])
                    self.compute_rpcapi.unshelve_instance(
                        context, instance, host, image=image, filter_properties=filter_properties, node=node
                    )
            except (exception.NoValidHost, exception.UnsupportedPolicyException):
                instance.task_state = None
                instance.save()
                LOG.warning(_LW("No valid host found for unshelve instance"), instance=instance)
                return
            except Exception:
                with excutils.save_and_reraise_exception():
                    instance.task_state = None
                    instance.save()
                    LOG.error(_LE("Unshelve attempted but an error " "has occurred"), instance=instance)
        else:
            LOG.error(_LE("Unshelve attempted but vm_state not SHELVED or " "SHELVED_OFFLOADED"), instance=instance)
            instance.vm_state = vm_states.ERROR
            instance.save()
            return
Esempio n. 11
0
    def add_to_instance(self, context, instance, security_group_name):
        """Add security group to the instance."""

        neutron = neutronapi.get_client(context)
        try:
            security_group_id = neutronv20.find_resourceid_by_name_or_id(
                neutron, 'security_group',
                security_group_name,
                context.project_id)
        except n_exc.NeutronClientNoUniqueMatch as e:
            raise exception.NoUniqueMatch(six.text_type(e))
        except n_exc.NeutronClientException as e:
            exc_info = sys.exc_info()
            if e.status_code == 404:
                msg = (_("Security group %(name)s is not found for "
                         "project %(project)s") %
                       {'name': security_group_name,
                        'project': context.project_id})
                self.raise_not_found(msg)
            else:
                LOG.exception(_LE("Neutron Error:"))
                raise exc_info[0], exc_info[1], exc_info[2]
        params = {'device_id': instance.uuid}
        try:
            ports = neutron.list_ports(**params).get('ports')
        except n_exc.NeutronClientException:
            with excutils.save_and_reraise_exception():
                LOG.exception(_LE("Neutron Error:"))

        if not ports:
            msg = (_("instance_id %s could not be found as device id on"
                   " any ports") % instance.uuid)
            self.raise_not_found(msg)

        for port in ports:
            if not self._has_security_group_requirements(port):
                LOG.warning(_LW("Cannot add security group %(name)s to "
                                "%(instance)s since the port %(port_id)s "
                                "does not meet security requirements"),
                            {'name': security_group_name,
                             'instance': instance.uuid,
                             'port_id': port['id']})
                raise exception.SecurityGroupCannotBeApplied()
            if 'security_groups' not in port:
                port['security_groups'] = []
            port['security_groups'].append(security_group_id)
            updated_port = {'security_groups': port['security_groups']}
            try:
                LOG.info(_LI("Adding security group %(security_group_id)s to "
                             "port %(port_id)s"),
                         {'security_group_id': security_group_id,
                          'port_id': port['id']})
                neutron.update_port(port['id'], {'port': updated_port})
            except Exception:
                with excutils.save_and_reraise_exception():
                    LOG.exception(_LE("Neutron Error:"))
Esempio n. 12
0
def umount_volume(mnt_base):
    """Wraps execute calls for unmouting a Quobyte volume"""
    try:
        utils.execute('umount.quobyte', mnt_base)
    except processutils.ProcessExecutionError as exc:
        if 'Device or resource busy' in exc.message:
            LOG.error(_LE("The Quobyte volume at %s is still in use."),
                      mnt_base)
        else:
            LOG.exception(_LE("Couldn't unmount the Quobyte Volume at %s"),
                          mnt_base)
Esempio n. 13
0
    def execute(self, lpar_wrap):
        LOG.info(_LI('Plugging the Network Interfaces to instance %s'),
                 self.instance.name)

        # Get the current adapters on the system
        cna_w_list = vm.get_cnas(self.adapter, self.instance, self.host_uuid)

        # Trim the VIFs down to the ones that haven't yet been created.
        crt_vifs = []
        for vif in self.network_info:
            for cna_w in cna_w_list:
                if vm.norm_mac(cna_w.mac) == vif['address']:
                    break
            else:
                crt_vifs.append(vif)

        # If there are no vifs to create, then just exit immediately.
        if len(crt_vifs) == 0:
            return []

        # Check to see if the LPAR is OK to add VIFs to.
        modifiable, reason = lpar_wrap.can_modify_io()
        if not modifiable:
            LOG.error(_LE('Unable to create VIF(s) for instance %(sys)s.  The '
                          'VM was in a state where VIF plugging is not '
                          'acceptable.  The reason from the system is: '
                          '%(reason)s'),
                      {'sys': self.instance.name, 'reason': reason},
                      instance=self.instance)
            raise exception.VirtualInterfaceCreateException()

        # For the VIFs, run the creates (and wait for the events back)
        try:
            with self.virt_api.wait_for_instance_event(
                    self.instance, self._get_vif_events(),
                    deadline=CONF.vif_plugging_timeout,
                    error_callback=self._vif_callback_failed):
                for vif in crt_vifs:
                    LOG.info(_LI('Creating VIF with mac %(mac)s for instance '
                                 '%(sys)s'),
                             {'mac': vif['address'],
                              'sys': self.instance.name},
                             instance=self.instance)
                    vm.crt_vif(self.adapter, self.instance, self.host_uuid,
                               vif)
        except eventlet.timeout.Timeout:
            LOG.error(_LE('Error waiting for VIF to be created for instance '
                          '%(sys)s'), {'sys': self.instance.name},
                      instance=self.instance)
            raise exception.VirtualInterfaceCreateException()

        # Return the list of created VIFs.
        return cna_w_list
Esempio n. 14
0
def get_available_attach_point(instance, controller_name):
    storage_info = get_controllers(instance)
    controller = storage_info.get(controller_name)
    if not controller:
        details = _LE("Controller %(controller)s do not exists!")
        raise vbox_exc.VBoxException(details % {"controller": controller_name})

    for attach_point, disk in controller.items():
        if not disk["uuid"]:
            return attach_point

    raise vbox_exc.VBoxException(_LE("Exceeded the maximum number of slots"))
Esempio n. 15
0
def ec2_error_ex(ex, req, code=None, message=None, unexpected=False):
    """Return an EC2 error response based on passed exception and log
    the exception on an appropriate log level:

        * DEBUG: expected errors
        * ERROR: unexpected errors

    All expected errors are treated as client errors and 4xx HTTP
    status codes are always returned for them.

    Unexpected 5xx errors may contain sensitive information,
    suppress their messages for security.
    """
    if not code:
        code = exception_to_ec2code(ex)
    status = getattr(ex, 'code', None)
    if not status:
        status = 500

    if unexpected:
        log_fun = LOG.error
        log_msg = _LE("Unexpected %(ex_name)s raised: %(ex_str)s")
    else:
        log_fun = LOG.debug
        log_msg = "%(ex_name)s raised: %(ex_str)s"
        # NOTE(jruzicka): For compatibility with EC2 API, treat expected
        # exceptions as client (4xx) errors. The exception error code is 500
        # by default and most exceptions inherit this from NovaException even
        # though they are actually client errors in most cases.
        if status >= 500:
            status = 400

    context = req.environ['nova.context']
    request_id = context.request_id
    log_msg_args = {
        'ex_name': type(ex).__name__,
        'ex_str': ex
    }
    log_fun(log_msg, log_msg_args, context=context)

    if ex.args and not message and (not unexpected or status < 500):
        message = unicode(ex.args[0])
    if unexpected:
        # Log filtered environment for unexpected errors.
        env = req.environ.copy()
        for k in env.keys():
            if not isinstance(env[k], six.string_types):
                env.pop(k)
        log_fun(_LE('Environment: %s'), jsonutils.dumps(env))
    if not message:
        message = _('Unknown error occurred.')
    return faults.ec2_error_response(request_id, code, message, status=status)
Esempio n. 16
0
 def refresh_auth(self):
     flags = kerberos.GSS_C_MUTUAL_FLAG | kerberos.GSS_C_SEQUENCE_FLAG
     try:
         (unused, vc) = kerberos.authGSSClientInit(self.service, flags)
     except kerberos.GSSError as e:
         LOG.error(_LE("caught kerberos exception %r") % e)
         raise IPAAuthError(str(e))
     try:
         kerberos.authGSSClientStep(vc, "")
     except kerberos.GSSError as e:
         LOG.error(_LE("caught kerberos exception %r") % e)
         raise IPAAuthError(str(e))
     self.token = kerberos.authGSSClientResponse(vc)
Esempio n. 17
0
def introduce_vdi(session, sr_ref, vdi_uuid=None, target_lun=None):
    """Introduce VDI in the host."""
    try:
        vdi_ref = _get_vdi_ref(session, sr_ref, vdi_uuid, target_lun)
        if vdi_ref is None:
            greenthread.sleep(CONF.xenserver.introduce_vdi_retry_wait)
            session.call_xenapi("SR.scan", sr_ref)
            vdi_ref = _get_vdi_ref(session, sr_ref, vdi_uuid, target_lun)
    except session.XenAPI.Failure:
        LOG.exception(_LE('Unable to introduce VDI on SR'))
        raise exception.StorageError(
                reason=_('Unable to introduce VDI on SR %s') % sr_ref)

    if not vdi_ref:
        raise exception.StorageError(
                reason=_('VDI not found on SR %(sr)s (vdi_uuid '
                         '%(vdi_uuid)s, target_lun %(target_lun)s)') %
                            {'sr': sr_ref, 'vdi_uuid': vdi_uuid,
                             'target_lun': target_lun})

    try:
        vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref)
        LOG.debug(vdi_rec)
    except session.XenAPI.Failure:
        LOG.exception(_LE('Unable to get record of VDI'))
        raise exception.StorageError(
                reason=_('Unable to get record of VDI %s on') % vdi_ref)

    if vdi_rec['managed']:
        # We do not need to introduce the vdi
        return vdi_ref

    try:
        return session.call_xenapi("VDI.introduce",
                                    vdi_rec['uuid'],
                                    vdi_rec['name_label'],
                                    vdi_rec['name_description'],
                                    vdi_rec['SR'],
                                    vdi_rec['type'],
                                    vdi_rec['sharable'],
                                    vdi_rec['read_only'],
                                    vdi_rec['other_config'],
                                    vdi_rec['location'],
                                    vdi_rec['xenstore_data'],
                                    vdi_rec['sm_config'])
    except session.XenAPI.Failure:
        LOG.exception(_LE('Unable to introduce VDI for SR'))
        raise exception.StorageError(
                reason=_('Unable to introduce VDI for SR %s') % sr_ref)
Esempio n. 18
0
    def execute(self, lpar_wrap):
        LOG.info(_LI('Unplugging the Network Interfaces to instance %s'),
                 self.instance.name)

        # If the state is not in an OK state for deleting, then throw an
        # error up front.
        modifiable, reason = lpar_wrap.can_modify_io()
        if not modifiable:
            LOG.error(_LE('Unable to remove VIFs from instance %(inst)s '
                          'because the system is not in a correct state.  '
                          'The reason reported by the system is: %(reason)s'),
                      {'inst': self.instance.name, 'reason': reason},
                      instance=self.instance)
            raise VirtualInterfaceUnplugException()

        # Get all the current Client Network Adapters (CNA) on the VM itself.
        cna_w_list = vm.get_cnas(self.adapter, self.instance, self.host_uuid)

        # Walk through the VIFs and delete the corresponding CNA on the VM.
        for vif in self.network_info:
            for cna_w in cna_w_list:
                # If the MAC address matched, attempt the delete.
                if vm.norm_mac(cna_w.mac) == vif['address']:
                    LOG.info(_LI('Deleting VIF with mac %(mac)s for instance '
                                 '%(inst)s.'), {'mac': vif['address'],
                                                'inst': self.instance.name},
                             instance=self.instance)
                    try:
                        cna_w.delete()
                    except Exception as e:
                        LOG.error(_LE('Unable to unplug VIF with mac %(mac)s '
                                      'for instance %(inst)s.'),
                                  {'mac': vif['address'],
                                   'inst': self.instance.name},
                                  instance=self.instance)
                        LOG.error(e)
                        raise VirtualInterfaceUnplugException()

                    # Break from the loop as we had a successful unplug.
                    # This prevents from going to 'else' loop.
                    break
            else:
                LOG.warn(_LW('Unable to unplug VIF with mac %(mac)s for '
                             'instance %(inst)s.  The VIF was not found on '
                             'the instance.'),
                         {'mac': vif['address'], 'inst': self.instance.name},
                         instance=self.instance)
        return cna_w_list
Esempio n. 19
0
def create_shadow_table(migrate_engine, table_name=None, table=None,
                        **col_name_col_instance):
    """This method create shadow table for table with name ``table_name``
    or table instance ``table``.
    :param table_name: Autoload table with this name and create shadow table
    :param table: Autoloaded table, so just create corresponding shadow table.
    :param col_name_col_instance:   contains pair column_name=column_instance.
    column_instance is instance of Column. These params are required only for
    columns that have unsupported types by sqlite. For example BigInteger.
    :returns: The created shadow_table object.
    """
    meta = MetaData(bind=migrate_engine)

    if table_name is None and table is None:
        raise exception.NovaException(_("Specify `table_name` or `table` "
                                        "param"))
    if not (table_name is None or table is None):
        raise exception.NovaException(_("Specify only one param `table_name` "
                                        "`table`"))

    if table is None:
        table = Table(table_name, meta, autoload=True)

    columns = []
    for column in table.columns:
        if isinstance(column.type, NullType):
            new_column = oslodbutils._get_not_supported_column(
                col_name_col_instance, column.name)
            columns.append(new_column)
        else:
            columns.append(column.copy())

    shadow_table_name = db._SHADOW_TABLE_PREFIX + table.name
    shadow_table = Table(shadow_table_name, meta, *columns,
                         mysql_engine='InnoDB')
    try:
        shadow_table.create()
        return shadow_table
    except (db_exc.DBError, OperationalError):
        # NOTE(ekudryashova): At the moment there is a case in oslo.db code,
        # which raises unwrapped OperationalError, so we should catch it until
        # oslo.db would wraps all such exceptions
        LOG.info(repr(shadow_table))
        LOG.exception(_LE('Exception while creating table.'))
        raise exception.ShadowTableExists(name=shadow_table_name)
    except Exception:
        LOG.info(repr(shadow_table))
        LOG.exception(_LE('Exception while creating table.'))
 def _refresh_default_networks(self):
     self._default_networks = []
     if CONF.use_neutron_default_nets == "True":
         try:
             self._default_networks = self._get_default_networks()
         except Exception:
             LOG.exception(_LE("Failed to get default networks"))
Esempio n. 21
0
    def _error(self, inner, req):
        LOG.exception(_LE("Caught error: %s"), unicode(inner))

        safe = getattr(inner, 'safe', False)
        headers = getattr(inner, 'headers', None)
        status = getattr(inner, 'code', 500)
        if status is None:
            status = 500

        msg_dict = dict(url=req.url, status=status)
        LOG.info(_LI("%(url)s returned with HTTP %(status)d"), msg_dict)
        outer = self.status_to_type(status)
        if headers:
            outer.headers = headers
        # NOTE(johannes): We leave the explanation empty here on
        # purpose. It could possibly have sensitive information
        # that should not be returned back to the user. See
        # bugs 868360 and 874472
        # NOTE(eglynn): However, it would be over-conservative and
        # inconsistent with the EC2 API to hide every exception,
        # including those that are safe to expose, see bug 1021373
        if safe:
            user_locale = req.best_match_language()
            inner_msg = translate(inner.message, user_locale)
            outer.explanation = '%s: %s' % (inner.__class__.__name__,
                                            inner_msg)

        notifications.send_api_fault(req.url, status, inner)
        return wsgi.Fault(outer)
Esempio n. 22
0
        def do_associate():
            # associate floating ip
            floating = objects.FloatingIP.associate(context, floating_address,
                                                    fixed_address, self.host)
            fixed = floating.fixed_ip
            if not fixed:
                # NOTE(vish): ip was already associated
                return
            try:
                # gogo driver time
                self.l3driver.add_floating_ip(floating_address, fixed_address,
                        interface, fixed['network'])
            except processutils.ProcessExecutionError as e:
                with excutils.save_and_reraise_exception():
                    try:
                        objects.FloatingIP.disassociate(context,
                                                        floating_address)
                    except Exception:
                        LOG.warning(_LW('Failed to disassociated floating '
                                        'address: %s'), floating_address)
                        pass
                    if "Cannot find device" in six.text_type(e):
                        try:
                            LOG.error(_LE('Interface %s not found'), interface)
                        except Exception:
                            pass
                        raise exception.NoFloatingIpInterface(
                                interface=interface)

            payload = dict(project_id=context.project_id,
                           instance_id=instance_uuid,
                           floating_ip=floating_address)
            self.notifier.info(context,
                               'network.floating_ip.associate', payload)
Esempio n. 23
0
    def verify_base_size(base, size, base_size=0):
        """Check that the base image is not larger than size.
           Since images can't be generally shrunk, enforce this
           constraint taking account of virtual image size.
        """

        # Note(pbrady): The size and min_disk parameters of a glance
        #  image are checked against the instance size before the image
        #  is even downloaded from glance, but currently min_disk is
        #  adjustable and doesn't currently account for virtual disk size,
        #  so we need this extra check here.
        # NOTE(cfb): Having a flavor that sets the root size to 0 and having
        #  nova effectively ignore that size and use the size of the
        #  image is considered a feature at this time, not a bug.

        if size is None:
            return

        if size and not base_size:
            base_size = disk.get_disk_size(base)

        if size < base_size:
            msg = _LE('%(base)s virtual size %(base_size)s '
                      'larger than flavor root disk size %(size)s')
            LOG.error(msg % {'base': base,
                              'base_size': base_size,
                              'size': size})
            raise exception.FlavorDiskTooSmall()
Esempio n. 24
0
    def __exit__(self, ex_type, ex_value, ex_traceback):
        if not ex_value:
            return True

        if isinstance(ex_value, exception.Forbidden):
            raise Fault(webob.exc.HTTPForbidden(
                    explanation=ex_value.format_message()))
        elif isinstance(ex_value, exception.VersionNotFoundForAPIMethod):
            raise
        elif isinstance(ex_value, exception.Invalid):
            raise Fault(exception.ConvertedException(
                    code=ex_value.code,
                    explanation=ex_value.format_message()))
        elif isinstance(ex_value, TypeError):
            exc_info = (ex_type, ex_value, ex_traceback)
            LOG.error(_LE('Exception handling resource: %s'), ex_value,
                      exc_info=exc_info)
            raise Fault(webob.exc.HTTPBadRequest())
        elif isinstance(ex_value, Fault):
            LOG.info(_LI("Fault thrown: %s"), ex_value)
            raise ex_value
        elif isinstance(ex_value, webob.exc.HTTPException):
            LOG.info(_LI("HTTP exception thrown: %s"), ex_value)
            raise Fault(ex_value)

        # We didn't handle the exception
        return False
    def _reset_state(self, req, id, body):
        """Permit admins to reset the state of a server."""
        context = req.environ["nova.context"]
        authorize(context, 'resetState')

        # Identify the desired state from the body
        try:
            state = state_map[body["os-resetState"]["state"]]
        except (TypeError, KeyError):
            msg = _("Desired state must be specified.  Valid states "
                    "are: %s") % ', '.join(sorted(state_map.keys()))
            raise exc.HTTPBadRequest(explanation=msg)

        instance = common.get_instance(self.compute_api, context, id)
        try:
            instance.vm_state = state
            instance.task_state = None
            instance.save(admin_state_reset=True)
        except exception.InstanceNotFound:
            msg = _("Server not found")
            raise exc.HTTPNotFound(explanation=msg)
        except Exception:
            readable = traceback.format_exc()
            LOG.exception(_LE("Compute.api::resetState %s"), readable)
            raise exc.HTTPUnprocessableEntity()
        return webob.Response(status_int=202)
Esempio n. 26
0
    def destroy(self, instance, network_info=None, block_device_info=None,
                destroy_disks=True):
        instance_name = instance.name
        LOG.info(_LI("Got request to destroy instance"), instance=instance)
        try:
            if self._vmutils.vm_exists(instance_name):

                # Stop the VM first.
                self._vmutils.stop_vm_jobs(instance_name)
                self.power_off(instance)

                if network_info:
                    for vif in network_info:
                        self._vif_driver.unplug(instance, vif)

                self._vmutils.destroy_vm(instance_name)
                self._volumeops.disconnect_volumes(block_device_info)
            else:
                LOG.debug("Instance not found", instance=instance)

            if destroy_disks:
                self._delete_disk_files(instance_name)
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.exception(_LE('Failed to destroy instance: %s'),
                              instance_name)
Esempio n. 27
0
    def create_secret(self, usage_type, usage_id, password=None):
        """Create a secret.

        :param usage_type: one of 'iscsi', 'ceph', 'rbd' or 'volume'
                           'rbd' will be converted to 'ceph'.
        :param usage_id: name of resource in secret
        :param password: optional secret value to set
        """
        secret_conf = vconfig.LibvirtConfigSecret()
        secret_conf.ephemeral = False
        secret_conf.private = False
        secret_conf.usage_id = usage_id
        if usage_type in ('rbd', 'ceph'):
            secret_conf.usage_type = 'ceph'
        elif usage_type == 'iscsi':
            secret_conf.usage_type = 'iscsi'
        elif usage_type == 'volume':
            secret_conf.usage_type = 'volume'
        else:
            msg = _("Invalid usage_type: %s")
            raise exception.NovaException(msg % usage_type)

        xml = secret_conf.to_xml()
        try:
            LOG.debug('Secret XML: %s' % xml)
            conn = self.get_connection()
            secret = conn.secretDefineXML(xml)
            if password is not None:
                secret.setValue(password)
            return secret
        except libvirt.libvirtError:
            with excutils.save_and_reraise_exception():
                LOG.error(_LE('Error defining a secret with XML: %s') % xml)
Esempio n. 28
0
def parse_server_string(server_str):
    """Parses the given server_string and returns a tuple of host and port.
    If it's not a combination of host part and port, the port element
    is an empty string. If the input is invalid expression, return a tuple of
    two empty strings.
    """
    try:
        # First of all, exclude pure IPv6 address (w/o port).
        if netaddr.valid_ipv6(server_str):
            return (server_str, '')

        # Next, check if this is IPv6 address with a port number combination.
        if server_str.find("]:") != -1:
            (address, port) = server_str.replace('[', '', 1).split(']:')
            return (address, port)

        # Third, check if this is a combination of an address and a port
        if server_str.find(':') == -1:
            return (server_str, '')

        # This must be a combination of an address and a port
        (address, port) = server_str.split(':')
        return (address, port)

    except (ValueError, netaddr.AddrFormatError):
        LOG.error(_LE('Invalid server_string: %s'), server_str)
        return ('', '')
Esempio n. 29
0
    def consume_requests(self, pci_requests):
        alloc_devices = []
        for request in pci_requests:
            count = request.count
            spec = request.spec
            # For now, keep the same algorithm as during scheduling:
            # a spec may be able to match multiple pools.
            pools = self._filter_pools_for_spec(self.pools, spec)
            # Failed to allocate the required number of devices
            # Return the devices already allocated back to their pools
            if sum([pool['count'] for pool in pools]) < count:
                LOG.error(_LE("Failed to allocate PCI devices for instance."
                          " Unassigning devices back to pools."
                          " This should not happen, since the scheduler"
                          " should have accurate information, and allocation"
                          " during claims is controlled via a hold"
                          " on the compute node semaphore"))
                for d in range(len(alloc_devices)):
                    self.add_device(alloc_devices.pop())
                raise exception.PciDeviceRequestFailed(requests=pci_requests)

            for pool in pools:
                if pool['count'] >= count:
                    num_alloc = count
                else:
                    num_alloc = pool['count']
                count -= num_alloc
                pool['count'] -= num_alloc
                for d in range(num_alloc):
                    pci_dev = pool['devices'].pop()
                    pci_dev.request_id = request.request_id
                    alloc_devices.append(pci_dev)
                if count == 0:
                    break
        return alloc_devices
    def delete(self, req, id):
        context = req.environ['nova.context']
        authorize(context)
        reservation = None
        try:
            if CONF.enable_network_quota:
                reservation = QUOTAS.reserve(context, networks=-1)
        except Exception:
            reservation = None
            LOG.exception(_LE("Failed to update usages deallocating "
                              "network."))

        def _rollback_quota(reservation):
            if CONF.enable_network_quota and reservation:
                QUOTAS.rollback(context, reservation)

        try:
            self.network_api.delete(context, id)
        except exception.PolicyNotAuthorized as e:
            _rollback_quota(reservation)
            raise exc.HTTPForbidden(explanation=six.text_type(e))
        except exception.NetworkInUse as e:
            _rollback_quota(reservation)
            raise exc.HTTPConflict(explanation=e.format_message())
        except exception.NetworkNotFound:
            _rollback_quota(reservation)
            msg = _("Network not found")
            raise exc.HTTPNotFound(explanation=msg)

        if CONF.enable_network_quota and reservation:
            QUOTAS.commit(context, reservation)
        response = webob.Response(status_int=202)

        return response
Esempio n. 31
0
    def download(self, context, image_id, data=None, dst_path=None):
        """Calls out to Glance for data and writes data."""
        if CONF.glance.allowed_direct_url_schemes and dst_path is not None:
            image = self.show(context, image_id, include_locations=True)
            for entry in image.get('locations', []):
                loc_url = entry['url']
                loc_meta = entry['metadata']
                o = urlparse.urlparse(loc_url)
                xfer_mod = self._get_transfer_module(o.scheme)
                if xfer_mod:
                    try:
                        xfer_mod.download(context, o, dst_path, loc_meta)
                        LOG.info(_LI("Successfully transferred "
                                     "using %s"), o.scheme)
                        return
                    except Exception:
                        LOG.exception(_LE("Download image error"))

        try:
            image_chunks = self._client.call(context, 1, 'data', image_id)
        except Exception:
            _reraise_translated_image_exception(image_id)

        # Retrieve properties for verification of Glance image signature
        verifier = None
        if CONF.glance.verify_glance_signatures:
            image_meta_dict = self.show(context,
                                        image_id,
                                        include_locations=False)
            image_meta = objects.ImageMeta.from_dict(image_meta_dict)
            img_signature = image_meta.properties.get('img_signature')
            img_sig_hash_method = image_meta.properties.get(
                'img_signature_hash_method')
            img_sig_cert_uuid = image_meta.properties.get(
                'img_signature_certificate_uuid')
            img_sig_key_type = image_meta.properties.get(
                'img_signature_key_type')
            try:
                verifier = signature_utils.get_verifier(
                    context, img_sig_cert_uuid, img_sig_hash_method,
                    img_signature, img_sig_key_type)
            except exception.SignatureVerificationError:
                with excutils.save_and_reraise_exception():
                    LOG.error(
                        _LE('Image signature verification failed '
                            'for image: %s'), image_id)

        close_file = False
        if data is None and dst_path:
            data = open(dst_path, 'wb')
            close_file = True

        if data is None:

            # Perform image signature verification
            if verifier:
                try:
                    for chunk in image_chunks:
                        verifier.update(chunk)
                    verifier.verify()

                    LOG.info(
                        _LI('Image signature verification succeeded '
                            'for image: %s'), image_id)

                except cryptography.exceptions.InvalidSignature:
                    with excutils.save_and_reraise_exception():
                        LOG.error(
                            _LE('Image signature verification failed '
                                'for image: %s'), image_id)
            return image_chunks
        else:
            try:
                for chunk in image_chunks:
                    if verifier:
                        verifier.update(chunk)
                    data.write(chunk)
                if verifier:
                    verifier.verify()
                    LOG.info(
                        _LI('Image signature verification succeeded '
                            'for image %s'), image_id)
            except cryptography.exceptions.InvalidSignature:
                data.truncate(0)
                with excutils.save_and_reraise_exception():
                    LOG.error(
                        _LE('Image signature verification failed '
                            'for image: %s'), image_id)
            except Exception as ex:
                with excutils.save_and_reraise_exception():
                    LOG.error(_LE("Error writing to %(path)s: %(exception)s"),
                              {
                                  'path': dst_path,
                                  'exception': ex
                              })
            finally:
                if close_file:
                    data.close()
Esempio n. 32
0
    def unshelve_instance(self, context, instance, request_spec=None):
        sys_meta = instance.system_metadata

        def safe_image_show(ctx, image_id):
            if image_id:
                return self.image_api.get(ctx, image_id, show_deleted=False)
            else:
                raise exception.ImageNotFound(image_id='')

        if instance.vm_state == vm_states.SHELVED:
            instance.task_state = task_states.POWERING_ON
            instance.save(expected_task_state=task_states.UNSHELVING)
            self.compute_rpcapi.start_instance(context, instance)
        elif instance.vm_state == vm_states.SHELVED_OFFLOADED:
            image = None
            image_id = sys_meta.get('shelved_image_id')
            # No need to check for image if image_id is None as
            # "shelved_image_id" key is not set for volume backed
            # instance during the shelve process
            if image_id:
                with compute_utils.EventReporter(
                    context, 'get_image_info', instance.uuid):
                    try:
                        image = safe_image_show(context, image_id)
                    except exception.ImageNotFound:
                        instance.vm_state = vm_states.ERROR
                        instance.save()

                        reason = _('Unshelve attempted but the image %s '
                                   'cannot be found.') % image_id

                        LOG.error(reason, instance=instance)
                        raise exception.UnshelveException(
                            instance_id=instance.uuid, reason=reason)

            try:
                with compute_utils.EventReporter(context, 'schedule_instances',
                                                 instance.uuid):
                    if not request_spec:
                        # NOTE(sbauza): We were unable to find an original
                        # RequestSpec object - probably because the instance is
                        # old. We need to mock that the old way
                        filter_properties = {}
                        request_spec = scheduler_utils.build_request_spec(
                            context, image, [instance])
                    else:
                        # NOTE(sbauza): Force_hosts/nodes needs to be reset
                        # if we want to make sure that the next destination
                        # is not forced to be the original host
                        request_spec.reset_forced_destinations()
                        # TODO(sbauza): Provide directly the RequestSpec object
                        # when _schedule_instances(),
                        # populate_filter_properties and populate_retry()
                        # accept it
                        filter_properties = request_spec.\
                            to_legacy_filter_properties_dict()
                        request_spec = request_spec.\
                            to_legacy_request_spec_dict()
                    scheduler_utils.populate_retry(filter_properties,
                                                   instance.uuid)
                    hosts = self._schedule_instances(
                            context, request_spec, filter_properties)
                    host_state = hosts[0]
                    scheduler_utils.populate_filter_properties(
                            filter_properties, host_state)
                    (host, node) = (host_state['host'], host_state['nodename'])
                    self.compute_rpcapi.unshelve_instance(
                            context, instance, host, image=image,
                            filter_properties=filter_properties, node=node)
            except (exception.NoValidHost,
                    exception.UnsupportedPolicyException):
                instance.task_state = None
                instance.save()
                LOG.warning(_LW("No valid host found for unshelve instance"),
                            instance=instance)
                return
            except Exception:
                with excutils.save_and_reraise_exception():
                    instance.task_state = None
                    instance.save()
                    LOG.error(_LE("Unshelve attempted but an error "
                                  "has occurred"), instance=instance)
        else:
            LOG.error(_LE('Unshelve attempted but vm_state not SHELVED or '
                          'SHELVED_OFFLOADED'), instance=instance)
            instance.vm_state = vm_states.ERROR
            instance.save()
            return
Esempio n. 33
0
    def schedule_and_build_instances(self, context, build_requests,
                                     request_specs, image,
                                     admin_password, injected_files,
                                     requested_networks, block_device_mapping):
        legacy_spec = request_specs[0].to_legacy_request_spec_dict()
        try:
            hosts = self._schedule_instances(context, legacy_spec,
                        request_specs[0].to_legacy_filter_properties_dict())
        except Exception as exc:
            LOG.exception(_LE('Failed to schedule instances'))
            self._bury_in_cell0(context, request_specs[0], exc,
                                build_requests=build_requests)
            return

        host_mapping_cache = {}

        for (build_request, request_spec, host) in six.moves.zip(
                build_requests, request_specs, hosts):
            filter_props = request_spec.to_legacy_filter_properties_dict()
            scheduler_utils.populate_filter_properties(filter_props,
                                                       host)
            instance = build_request.get_new_instance(context)

            # Convert host from the scheduler into a cell record
            if host['host'] not in host_mapping_cache:
                try:
                    host_mapping = objects.HostMapping.get_by_host(
                        context, host['host'])
                    host_mapping_cache[host['host']] = host_mapping
                except exception.HostMappingNotFound as exc:
                    LOG.error(_LE('No host-to-cell mapping found for selected '
                                  'host %(host)s. Setup is incomplete.'),
                              {'host': host['host']})
                    self._bury_in_cell0(context, request_spec, exc,
                                        build_requests=[build_request],
                                        instances=[instance])
                    continue
            else:
                host_mapping = host_mapping_cache[host['host']]

            cell = host_mapping.cell_mapping

            with obj_target_cell(instance, cell):
                instance.create()

            # send a state update notification for the initial create to
            # show it going from non-existent to BUILDING
            notifications.send_update_with_states(context, instance, None,
                    vm_states.BUILDING, None, None, service="conductor")

            objects.InstanceAction.action_start(
                context, instance.uuid, instance_actions.CREATE,
                want_result=False)

            with obj_target_cell(instance, cell):
                instance_bdms = self._create_block_device_mapping(
                    instance.flavor, instance.uuid, block_device_mapping)

            # Update mapping for instance. Normally this check is guarded by
            # a try/except but if we're here we know that a newer nova-api
            # handled the build process and would have created the mapping
            inst_mapping = objects.InstanceMapping.get_by_instance_uuid(
                context, instance.uuid)
            inst_mapping.cell_mapping = cell
            inst_mapping.save()

            if not self._delete_build_request(
                    build_request, instance, cell, instance_bdms):
                # The build request was deleted before/during scheduling so
                # the instance is gone and we don't have anything to build for
                # this one.
                continue

            # NOTE(danms): Compute RPC expects security group names or ids
            # not objects, so convert this to a list of names until we can
            # pass the objects.
            legacy_secgroups = [s.identifier
                                for s in request_spec.security_groups]

            with obj_target_cell(instance, cell):
                self.compute_rpcapi.build_and_run_instance(
                    context, instance=instance, image=image,
                    request_spec=request_spec,
                    filter_properties=filter_props,
                    admin_password=admin_password,
                    injected_files=injected_files,
                    requested_networks=requested_networks,
                    security_groups=legacy_secgroups,
                    block_device_mapping=instance_bdms,
                    host=host['host'], node=host['nodename'],
                    limits=host['limits'])
Esempio n. 34
0
        def delayed_create():
            """This handles the fetching and decrypting of the part files."""
            context.update_store()
            log_vars = {'image_location': image_location,
                        'image_path': image_path}

            def _update_image_state(context, image_uuid, image_state):
                metadata = {'properties': {'image_state': image_state}}
                self.service.update(context, image_uuid, metadata,
                                    purge_props=False)

            def _update_image_data(context, image_uuid, image_data):
                metadata = {}
                self.service.update(context, image_uuid, metadata, image_data,
                                    purge_props=False)

            try:
                _update_image_state(context, image_uuid, 'downloading')

                try:
                    parts = []
                    elements = manifest.find('image').getiterator('filename')
                    for fn_element in elements:
                        part = self._download_file(bucket,
                                                   fn_element.text,
                                                   image_path)
                        parts.append(part)

                    # NOTE(vish): this may be suboptimal, should we use cat?
                    enc_filename = os.path.join(image_path, 'image.encrypted')
                    with open(enc_filename, 'w') as combined:
                        for filename in parts:
                            with open(filename) as part:
                                shutil.copyfileobj(part, combined)

                except Exception:
                    LOG.exception(_LE("Failed to download %(image_location)s "
                                      "to %(image_path)s"), log_vars)
                    _update_image_state(context, image_uuid, 'failed_download')
                    return

                _update_image_state(context, image_uuid, 'decrypting')

                try:
                    hex_key = manifest.find('image/ec2_encrypted_key').text
                    encrypted_key = binascii.a2b_hex(hex_key)
                    hex_iv = manifest.find('image/ec2_encrypted_iv').text
                    encrypted_iv = binascii.a2b_hex(hex_iv)

                    dec_filename = os.path.join(image_path, 'image.tar.gz')
                    self._decrypt_image(context, enc_filename, encrypted_key,
                                        encrypted_iv, dec_filename)
                except Exception:
                    LOG.exception(_LE("Failed to decrypt %(image_location)s "
                                      "to %(image_path)s"), log_vars)
                    _update_image_state(context, image_uuid, 'failed_decrypt')
                    return

                _update_image_state(context, image_uuid, 'untarring')

                try:
                    unz_filename = self._untarzip_image(image_path,
                                                        dec_filename)
                except Exception:
                    LOG.exception(_LE("Failed to untar %(image_location)s "
                                      "to %(image_path)s"), log_vars)
                    _update_image_state(context, image_uuid, 'failed_untar')
                    return

                _update_image_state(context, image_uuid, 'uploading')
                try:
                    with open(unz_filename) as image_file:
                        _update_image_data(context, image_uuid, image_file)
                except Exception:
                    LOG.exception(_LE("Failed to upload %(image_location)s "
                                      "to %(image_path)s"), log_vars)
                    _update_image_state(context, image_uuid, 'failed_upload')
                    return

                metadata = {'status': 'active',
                            'properties': {'image_state': 'available'}}
                self.service.update(context, image_uuid, metadata,
                        purge_props=False)

                shutil.rmtree(image_path)
            except exception.ImageNotFound:
                LOG.info(_("Image %s was deleted underneath us"), image_uuid)
                return
Esempio n. 35
0
def _call_agent(session,
                instance,
                vm_ref,
                method,
                addl_args=None,
                timeout=None,
                success_codes=None):
    """Abstracts out the interaction with the agent xenapi plugin."""
    if addl_args is None:
        addl_args = {}
    if timeout is None:
        timeout = CONF.xenserver.agent_timeout
    if success_codes is None:
        success_codes = ['0']

    # always fetch domid because VM may have rebooted
    dom_id = session.VM.get_domid(vm_ref)

    args = {
        'id': str(uuid.uuid4()),
        'dom_id': str(dom_id),
        'timeout': str(timeout),
    }
    args.update(addl_args)

    try:
        ret = session.call_plugin('agent', method, args)
    except session.XenAPI.Failure as e:
        err_msg = e.details[-1].splitlines()[-1]
        if 'TIMEOUT:' in err_msg:
            LOG.error(_LE('TIMEOUT: The call to %(method)s timed out. '
                          'args=%(args)r'), {
                              'method': method,
                              'args': args
                          },
                      instance=instance)
            raise exception.AgentTimeout(method=method)
        elif 'REBOOT:' in err_msg:
            LOG.debug(
                'REBOOT: The call to %(method)s detected a reboot. '
                'args=%(args)r', {
                    'method': method,
                    'args': args
                },
                instance=instance)
            _wait_for_new_dom_id(session, vm_ref, dom_id, method)
            return _call_agent(session, instance, vm_ref, method, addl_args,
                               timeout, success_codes)
        elif 'NOT IMPLEMENTED:' in err_msg:
            LOG.error(_LE('NOT IMPLEMENTED: The call to %(method)s is not '
                          'supported by the agent. args=%(args)r'), {
                              'method': method,
                              'args': args
                          },
                      instance=instance)
            raise exception.AgentNotImplemented(method=method)
        else:
            LOG.error(_LE('The call to %(method)s returned an error: %(e)s. '
                          'args=%(args)r'), {
                              'method': method,
                              'args': args,
                              'e': e
                          },
                      instance=instance)
            raise exception.AgentError(method=method)

    if not isinstance(ret, dict):
        try:
            ret = jsonutils.loads(ret)
        except TypeError:
            LOG.error(_LE('The agent call to %(method)s returned an invalid '
                          'response: %(ret)r. args=%(args)r'), {
                              'method': method,
                              'ret': ret,
                              'args': args
                          },
                      instance=instance)
            raise exception.AgentError(method=method)

    if ret['returncode'] not in success_codes:
        LOG.error(_LE('The agent call to %(method)s returned an '
                      'an error: %(ret)r. args=%(args)r'), {
                          'method': method,
                          'ret': ret,
                          'args': args
                      },
                  instance=instance)
        raise exception.AgentError(method=method)

    LOG.debug(
        'The agent call to %(method)s was successful: '
        '%(ret)r. args=%(args)r', {
            'method': method,
            'ret': ret,
            'args': args
        },
        instance=instance)

    # Some old versions of the Windows agent have a trailing \\r\\n
    # (ie CRLF escaped) for some reason. Strip that off.
    return ret['message'].replace('\\r\\n', '')
Esempio n. 36
0
    def attach(self,
               context,
               instance,
               volume_api,
               virt_driver,
               do_driver_attach=False,
               **kwargs):
        volume = volume_api.get(context, self.volume_id)
        volume_api.check_availability_zone(context, volume, instance=instance)

        volume_id = volume['id']
        context = context.elevated()

        connector = virt_driver.get_volume_connector(instance)
        connection_info = volume_api.initialize_connection(
            context, volume_id, connector)
        if 'serial' not in connection_info:
            connection_info['serial'] = self.volume_id
        self._preserve_multipath_id(connection_info)

        # If do_driver_attach is False, we will attach a volume to an instance
        # at boot time. So actual attach is done by instance creation code.
        if do_driver_attach:
            encryption = encryptors.get_encryption_metadata(
                context, volume_api, volume_id, connection_info)

            try:
                virt_driver.attach_volume(context,
                                          connection_info,
                                          instance,
                                          self['mount_device'],
                                          disk_bus=self['disk_bus'],
                                          device_type=self['device_type'],
                                          encryption=encryption)
            except Exception:
                with excutils.save_and_reraise_exception():
                    LOG.exception(_LE("Driver failed to attach volume "
                                      "%(volume_id)s at %(mountpoint)s"), {
                                          'volume_id': volume_id,
                                          'mountpoint': self['mount_device']
                                      },
                                  instance=instance)
                    volume_api.terminate_connection(context, volume_id,
                                                    connector)
        self['connection_info'] = connection_info
        if self.volume_size is None:
            self.volume_size = volume.get('size')

        mode = 'rw'
        if 'data' in connection_info:
            mode = connection_info['data'].get('access_mode', 'rw')
        if volume['attach_status'] == "detached":
            # NOTE(mriedem): save our current state so connection_info is in
            # the database before the volume status goes to 'in-use' because
            # after that we can detach and connection_info is required for
            # detach.
            self.save()
            try:
                volume_api.attach(context,
                                  volume_id,
                                  instance.uuid,
                                  self['mount_device'],
                                  mode=mode)
            except Exception:
                with excutils.save_and_reraise_exception():
                    if do_driver_attach:
                        try:
                            virt_driver.detach_volume(connection_info,
                                                      instance,
                                                      self['mount_device'],
                                                      encryption=encryption)
                        except Exception:
                            LOG.warning(_LW(
                                "Driver failed to detach volume "
                                "%(volume_id)s at %(mount_point)s."), {
                                    'volume_id': volume_id,
                                    'mount_point': self['mount_device']
                                },
                                        exc_info=True,
                                        instance=instance)
                    volume_api.terminate_connection(context, volume_id,
                                                    connector)

                    # Cinder-volume might have completed volume attach. So
                    # we should detach the volume. If the attach did not
                    # happen, the detach request will be ignored.
                    volume_api.detach(context, volume_id)
Esempio n. 37
0
def fetch_to_raw(context, image_href, path, user_id, project_id, max_size=0):
    path_tmp = "%s.part" % path
    fetch(context,
          image_href,
          path_tmp,
          user_id,
          project_id,
          max_size=max_size)

    with fileutils.remove_path_on_error(path_tmp):
        data = qemu_img_info(path_tmp)

        fmt = data.file_format
        if fmt is None:
            raise exception.ImageUnacceptable(
                reason=_("'qemu-img info' parsing failed."),
                image_id=image_href)

        backing_file = data.backing_file
        if backing_file is not None:
            raise exception.ImageUnacceptable(
                image_id=image_href,
                reason=(_("fmt=%(fmt)s backed by: %(backing_file)s") % {
                    'fmt': fmt,
                    'backing_file': backing_file
                }))

        # We can't generally shrink incoming images, so disallow
        # images > size of the flavor we're booting.  Checking here avoids
        # an immediate DoS where we convert large qcow images to raw
        # (which may compress well but not be sparse).
        # TODO(p-draigbrady): loop through all flavor sizes, so that
        # we might continue here and not discard the download.
        # If we did that we'd have to do the higher level size checks
        # irrespective of whether the base image was prepared or not.
        disk_size = data.virtual_size
        if max_size and max_size < disk_size:
            LOG.error(
                _LE('%(base)s virtual size %(disk_size)s '
                    'larger than flavor root disk size %(size)s'), {
                        'base': path,
                        'disk_size': disk_size,
                        'size': max_size
                    })
            raise exception.FlavorDiskTooSmall()

        if fmt != "raw" and CONF.force_raw_images:
            staged = "%s.converted" % path
            LOG.debug("%s was %s, converting to raw" % (image_href, fmt))
            with fileutils.remove_path_on_error(staged):
                convert_image(path_tmp, staged, 'raw')
                os.unlink(path_tmp)

                data = qemu_img_info(staged)
                if data.file_format != "raw":
                    raise exception.ImageUnacceptable(
                        image_id=image_href,
                        reason=_("Converted to raw, but format is now %s") %
                        data.file_format)

                os.rename(staged, path)
        else:
            os.rename(path_tmp, path)
Esempio n. 38
0
    def create_image(self, prepare_template, base, size, *args, **kwargs):
        def encrypt_lvm_image():
            dmcrypt.create_volume(self.path.rpartition('/')[2],
                                  self.lv_path,
                                  CONF.ephemeral_storage_encryption.cipher,
                                  CONF.ephemeral_storage_encryption.key_size,
                                  key)

        filename = self._get_lock_name(base)

        @utils.synchronized(filename, external=True, lock_path=self.lock_path)
        def create_lvm_image(base, size):
            base_size = disk.get_disk_size(base)
            self.verify_base_size(base, size, base_size=base_size)
            resize = size > base_size
            size = size if resize else base_size
            lvm.create_volume(self.vg, self.lv,
                                         size, sparse=self.sparse)
            if self.ephemeral_key_uuid is not None:
                encrypt_lvm_image()
            # NOTE: by calling convert_image_unsafe here we're
            # telling qemu-img convert to do format detection on the input,
            # because we don't know what the format is. For example,
            # we might have downloaded a qcow2 image, or created an
            # ephemeral filesystem locally, we just don't know here. Having
            # audited this, all current sources have been sanity checked,
            # either because they're locally generated, or because they have
            # come from images.fetch_to_raw. However, this is major code smell.
            images.convert_image_unsafe(base, self.path, self.driver_format,
                                        run_as_root=True)
            if resize:
                disk.resize2fs(self.path, run_as_root=True)

        generated = 'ephemeral_size' in kwargs
        if self.ephemeral_key_uuid is not None:
            if 'context' in kwargs:
                try:
                    # NOTE(dgenin): Key manager corresponding to the
                    # specific backend catches and reraises an
                    # an exception if key retrieval fails.
                    key = self.key_manager.get_key(kwargs['context'],
                            self.ephemeral_key_uuid).get_encoded()
                except Exception:
                    with excutils.save_and_reraise_exception():
                        LOG.error(_LE("Failed to retrieve ephemeral encryption"
                                      " key"))
            else:
                raise exception.NovaException(
                    _("Instance disk to be encrypted but no context provided"))
        # Generate images with specified size right on volume
        if generated and size:
            lvm.create_volume(self.vg, self.lv,
                                         size, sparse=self.sparse)
            with self.remove_volume_on_error(self.path):
                if self.ephemeral_key_uuid is not None:
                    encrypt_lvm_image()
                prepare_template(target=self.path, *args, **kwargs)
        else:
            if not os.path.exists(base):
                prepare_template(target=base, max_size=size, *args, **kwargs)
            with self.remove_volume_on_error(self.path):
                create_lvm_image(base, size)
Esempio n. 39
0
    def _migrate(self, req, id, body):
        """Permit admins to migrate a server to a new host."""

        param_dict = body.get('migrate')
        no_sys_vol = param_dict.get('no_sys_vol', False)
        az = param_dict.get('az')
        boot_system_volume = not no_sys_vol
        context = req.environ['nova.context']
        authorize(context, 'migrate')
        instance = common.get_instance(self.compute_api,
                                       context,
                                       id,
                                       want_objects=True)
        bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
            context, instance['uuid'])
        if az is not None:
            availability_zone = instance.availability_zone
            checkResut = self._check_migrate_conditions(
                context, az, instance, boot_system_volume)
            if checkResut is False:
                if 'vcloud' in az:
                    msg = _("The vm can't migrate to the az")
                    raise exc.HTTPBadRequest(explanation=msg)
                if 'aws' in az:
                    msg = _("The vm can only migrate data volume to the az")
                    raise exc.HTTPBadRequest(explanation=msg)
                if 'aws' in availability_zone:
                    msg = _("The vm can only migrate data volume from the az")
                    raise exc.HTTPBadRequest(explanation=msg)
            if az == availability_zone:
                msg = _("The target azone can't be the same one.")
                raise exc.HTTPBadRequest(explanation=msg)

            migrateThread = MigrateThread(context, instance, az,
                                          boot_system_volume)
            migrateThread.start()

        else:
            host = None
            if self.ext_mgr.is_loaded('os-migrate-host'):
                migrate_body = body.get('migrate')
                host = migrate_body.get('host') if migrate_body else None
            LOG.debug("Going to try to cold migrate %(uuid)s to %(host)s", {
                "uuid": instance["uuid"],
                "host": (host or "another host")
            })
            try:
                self.compute_api.resize(req.environ['nova.context'], instance)
            except exception.QuotaError as error:
                raise exc.HTTPForbidden(explanation=error.format_message())
            except exception.InstanceIsLocked as e:
                raise exc.HTTPConflict(explanation=e.format_message())
            except exception.InstanceInvalidState as state_error:
                common.raise_http_conflict_for_instance_invalid_state(
                    state_error, 'migrate')
            except exception.InstanceNotFound as e:
                raise exc.HTTPNotFound(explanation=e.format_message())
            except exception.NoValidHost as e:
                raise exc.HTTPBadRequest(explanation=e.format_message())
            except Exception as e:
                LOG.exception(_LE("Error in migrate %s"), e)
                raise exc.HTTPBadRequest()
            return webob.Response(status_int=202)
Esempio n. 40
0
    def _delete_inventory(self, rp_uuid):
        """Deletes all inventory records for a resource provider with the
        supplied UUID.
        """
        curr = self._get_inventory_and_update_provider_generation(rp_uuid)

        # Check to see if we need to update placement's view
        if not curr.get('inventories', {}):
            msg = "No inventory to delete from resource provider %s."
            LOG.debug(msg, rp_uuid)
            return

        msg = _LI("Compute node %s reported no inventory but previous "
                  "inventory was detected. Deleting existing inventory "
                  "records.")
        LOG.info(msg, rp_uuid)

        url = '/resource_providers/%s/inventories' % rp_uuid
        cur_rp_gen = self._resource_providers[rp_uuid].generation
        payload = {
            'resource_provider_generation': cur_rp_gen,
            'inventories': {},
        }
        r = self.put(url, payload)
        placement_req_id = get_placement_request_id(r)
        if r.status_code == 200:
            # Update our view of the generation for next time
            updated_inv = r.json()
            new_gen = updated_inv['resource_provider_generation']

            self._resource_providers[rp_uuid].generation = new_gen
            msg_args = {
                'rp_uuid': rp_uuid,
                'generation': new_gen,
                'placement_req_id': placement_req_id,
            }
            LOG.info(
                _LI('[%(placement_req_id)s] Deleted all inventory for '
                    'resource provider %(rp_uuid)s at generation '
                    '%(generation)i'), msg_args)
            return
        elif r.status_code == 409:
            rc_str = _extract_inventory_in_use(r.text)
            if rc_str is not None:
                msg = _LW("[%(placement_req_id)s] We cannot delete inventory "
                          "%(rc_str)s for resource provider %(rp_uuid)s "
                          "because the inventory is in use.")
                msg_args = {
                    'rp_uuid': rp_uuid,
                    'rc_str': rc_str,
                    'placement_req_id': placement_req_id,
                }
                LOG.warning(msg, msg_args)
                return

        msg = _LE("[%(placement_req_id)s] Failed to delete inventory for "
                  "resource provider %(rp_uuid)s. Got error response: %(err)s")
        msg_args = {
            'rp_uuid': rp_uuid,
            'err': r.text,
            'placement_req_id': placement_req_id,
        }
        LOG.error(msg, msg_args)
Esempio n. 41
0
    def _handle_instance_id_request(self, req):
        instance_id = req.headers.get('X-Instance-ID')
        tenant_id = req.headers.get('X-Tenant-ID')
        signature = req.headers.get('X-Instance-ID-Signature')
        remote_address = req.headers.get('X-Forwarded-For')

        # Ensure that only one header was passed

        if instance_id is None:
            msg = _('X-Instance-ID header is missing from request.')
        elif tenant_id is None:
            msg = _('X-Tenant-ID header is missing from request.')
        elif not isinstance(instance_id, six.string_types):
            msg = _('Multiple X-Instance-ID headers found within request.')
        elif not isinstance(tenant_id, six.string_types):
            msg = _('Multiple X-Tenant-ID headers found within request.')
        else:
            msg = None

        if msg:
            raise webob.exc.HTTPBadRequest(explanation=msg)

        expected_signature = hmac.new(
            CONF.neutron.metadata_proxy_shared_secret,
            instance_id,
            hashlib.sha256).hexdigest()

        if not utils.constant_time_compare(expected_signature, signature):
            if instance_id:
                LOG.warn(_LW('X-Instance-ID-Signature: %(signature)s does '
                             'not match the expected value: '
                             '%(expected_signature)s for id: %(instance_id)s.'
                             '  Request From: %(remote_address)s'),
                         {'signature': signature,
                          'expected_signature': expected_signature,
                          'instance_id': instance_id,
                          'remote_address': remote_address})

            msg = _('Invalid proxy request signature.')
            raise webob.exc.HTTPForbidden(explanation=msg)

        try:
            meta_data = self.get_metadata_by_instance_id(instance_id,
                                                         remote_address)
        except Exception:
            LOG.exception(_('Failed to get metadata for instance id: %s'),
                          instance_id)
            msg = _('An unknown error has occurred. '
                    'Please try your request again.')
            raise webob.exc.HTTPInternalServerError(explanation=unicode(msg))

        if meta_data is None:
            LOG.error(_LE('Failed to get metadata for instance id: %s'),
                      instance_id)
        elif meta_data.instance['project_id'] != tenant_id:
            LOG.warn(_LW("Tenant_id %(tenant_id)s does not match tenant_id "
                         "of instance %(instance_id)s."),
                     {'tenant_id': tenant_id, 'instance_id': instance_id})
            # causes a 404 to be raised
            meta_data = None

        return meta_data
Esempio n. 42
0
    def spawn(self,
              context,
              instance,
              image_meta,
              injected_files,
              admin_password,
              network_info=None,
              block_device_info=None,
              instance_type=None):
        """Deploy an instance.

        :param context: The security context.
        :param instance: The instance object.
        :param image_meta: Image dict returned by nova.image.glance
            that defines the image from which to boot this instance.
        :param injected_files: User files to inject into instance. Ignored
            by this driver.
        :param admin_password: Administrator password to set in
            instance. Ignored by this driver.
        :param network_info: Instance network information.
        :param block_device_info: Instance block device
            information. Ignored by this driver.
        :param instance_type: The instance_type for the instance to be spawned.
        """
        # The compute manager is meant to know the node uuid, so missing uuid
        # is a significant issue. It may mean we've been passed the wrong data.
        node_uuid = instance.get('node')
        if not node_uuid:
            raise ironic.exc.BadRequest(
                _("Ironic node uuid not supplied to "
                  "driver for instance %s.") % instance['uuid'])

        ironicclient = client_wrapper.IronicClientWrapper()
        node = ironicclient.call("node.get", node_uuid)
        flavor = objects.Flavor.get_by_id(context,
                                          instance['instance_type_id'])

        self._add_driver_fields(node, instance, image_meta, flavor)

        # NOTE(Shrews): The default ephemeral device needs to be set for
        # services (like cloud-init) that depend on it being returned by the
        # metadata server. Addresses bug https://launchpad.net/bugs/1324286.
        if flavor['ephemeral_gb']:
            instance.default_ephemeral_device = '/dev/sda1'
            instance.save()

        # validate we are ready to do the deploy
        validate_chk = ironicclient.call("node.validate", node_uuid)
        if not validate_chk.deploy or not validate_chk.power:
            # something is wrong. undo what we have done
            self._cleanup_deploy(context,
                                 node,
                                 instance,
                                 network_info,
                                 flavor=flavor)
            raise exception.ValidationError(
                _("Ironic node: %(id)s failed to validate."
                  " (deploy: %(deploy)s, power: %(power)s)") % {
                      'id': node.uuid,
                      'deploy': validate_chk.deploy,
                      'power': validate_chk.power
                  })

        # prepare for the deploy
        try:
            self._plug_vifs(node, instance, network_info)
            self._start_firewall(instance, network_info)
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.error(
                    _LE("Error preparing deploy for instance "
                        "%(instance)s on baremetal node %(node)s."), {
                            'instance': instance['uuid'],
                            'node': node_uuid
                        })
                self._cleanup_deploy(context,
                                     node,
                                     instance,
                                     network_info,
                                     flavor=flavor)

        # trigger the node deploy
        try:
            ironicclient.call("node.set_provision_state", node_uuid,
                              ironic_states.ACTIVE)
        except Exception as e:
            with excutils.save_and_reraise_exception():
                msg = (_LE("Failed to request Ironic to provision instance "
                           "%(inst)s: %(reason)s"), {
                               'inst': instance['uuid'],
                               'reason': six.text_type(e)
                           })
                LOG.error(msg)
                self._cleanup_deploy(context,
                                     node,
                                     instance,
                                     network_info,
                                     flavor=flavor)

        timer = loopingcall.FixedIntervalLoopingCall(self._wait_for_active,
                                                     ironicclient, instance)
        try:
            timer.start(interval=CONF.ironic.api_retry_interval).wait()
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.error(
                    _LE("Error deploying instance %(instance)s on "
                        "baremetal node %(node)s."), {
                            'instance': instance['uuid'],
                            'node': node_uuid
                        })
                self.destroy(context, instance, network_info)
Esempio n. 43
0
def create(name,
           memory,
           vcpus,
           root_gb,
           ephemeral_gb=0,
           flavorid=None,
           swap=0,
           rxtx_factor=1.0,
           is_public=True):
    """Creates flavors."""
    if not flavorid:
        flavorid = uuid.uuid4()

    kwargs = {
        'memory_mb': memory,
        'vcpus': vcpus,
        'root_gb': root_gb,
        'ephemeral_gb': ephemeral_gb,
        'swap': swap,
        'rxtx_factor': rxtx_factor,
    }

    if isinstance(name, six.string_types):
        name = name.strip()
    # ensure name do not exceed 255 characters
    utils.check_string_length(name, 'name', min_length=1, max_length=255)

    # ensure name does not contain any special characters
    valid_name = VALID_NAME_REGEX.search(name)
    if not valid_name:
        msg = _("Flavor names can only contain alphanumeric characters, "
                "periods, dashes, underscores and spaces.")
        raise exception.InvalidInput(reason=msg)

    # NOTE(vish): Internally, flavorid is stored as a string but it comes
    #             in through json as an integer, so we convert it here.
    flavorid = unicode(flavorid)

    # ensure leading/trailing whitespaces not present.
    if flavorid.strip() != flavorid:
        msg = _("id cannot contain leading and/or trailing whitespace(s)")
        raise exception.InvalidInput(reason=msg)

    # ensure flavor id does not exceed 255 characters
    utils.check_string_length(flavorid, 'id', min_length=1, max_length=255)

    # ensure flavor id does not contain any special characters
    valid_flavor_id = VALID_ID_REGEX.search(flavorid)
    if not valid_flavor_id:
        msg = _("Flavor id can only contain letters from A-Z (both cases), "
                "periods, dashes, underscores and spaces.")
        raise exception.InvalidInput(reason=msg)

    # NOTE(wangbo): validate attributes of the creating flavor.
    # ram and vcpus should be positive ( > 0) integers.
    # disk, ephemeral and swap should be non-negative ( >= 0) integers.
    flavor_attributes = {
        'memory_mb': ('ram', 1),
        'vcpus': ('vcpus', 1),
        'root_gb': ('disk', 0),
        'ephemeral_gb': ('ephemeral', 0),
        'swap': ('swap', 0)
    }

    for key, value in flavor_attributes.items():
        kwargs[key] = utils.validate_integer(kwargs[key], value[0], value[1],
                                             db.MAX_INT)

    # rxtx_factor should be a positive float
    try:
        kwargs['rxtx_factor'] = float(kwargs['rxtx_factor'])
        if (kwargs['rxtx_factor'] <= 0
                or kwargs['rxtx_factor'] > SQL_SP_FLOAT_MAX):
            raise ValueError()
    except ValueError:
        msg = (_("'rxtx_factor' argument must be a float between 0 and %g") %
               SQL_SP_FLOAT_MAX)
        raise exception.InvalidInput(reason=msg)

    kwargs['name'] = name
    kwargs['flavorid'] = flavorid
    # ensure is_public attribute is boolean
    try:
        kwargs['is_public'] = strutils.bool_from_string(is_public, strict=True)
    except ValueError:
        raise exception.InvalidInput(reason=_("is_public must be a boolean"))

    try:
        return db.flavor_create(context.get_admin_context(), kwargs)
    except db_exc.DBError as e:
        LOG.exception(_LE('DB error: %s'), e)
        raise exception.FlavorCreateFailed()
Esempio n. 44
0
    def unshelve_instance(self, context, instance):
        sys_meta = instance.system_metadata

        def safe_image_show(ctx, image_id):
            if image_id:
                return self.image_api.get(ctx, image_id, show_deleted=False)
            else:
                raise exception.ImageNotFound(image_id='')

        if instance.vm_state == vm_states.SHELVED:
            instance.task_state = task_states.POWERING_ON
            instance.save(expected_task_state=task_states.UNSHELVING)
            self.compute_rpcapi.start_instance(context, instance)
            snapshot_id = sys_meta.get('shelved_image_id')
            if snapshot_id:
                self._delete_image(context, snapshot_id)
        elif instance.vm_state == vm_states.SHELVED_OFFLOADED:
            image_id = sys_meta.get('shelved_image_id')
            with compute_utils.EventReporter(context, 'get_image_info',
                                             instance.uuid):
                try:
                    image = safe_image_show(context, image_id)
                except exception.ImageNotFound:
                    instance.vm_state = vm_states.ERROR
                    instance.save()

                    if image_id:
                        reason = _('Unshelve attempted but the image %s '
                                   'cannot be found.') % image_id
                    else:
                        reason = _('Unshelve attempted but the image_id is '
                                   'not provided')

                    LOG.error(reason, instance=instance)
                    raise exception.UnshelveException(
                        instance_id=instance.uuid, reason=reason)

            try:
                with compute_utils.EventReporter(context, 'schedule_instances',
                                                 instance.uuid):
                    filter_properties = {}
                    hosts = self._schedule_instances(context, image,
                                                     filter_properties,
                                                     instance)
                    host_state = hosts[0]
                    scheduler_utils.populate_filter_properties(
                        filter_properties, host_state)
                    (host, node) = (host_state['host'], host_state['nodename'])
                    self.compute_rpcapi.unshelve_instance(
                        context,
                        instance,
                        host,
                        image=image,
                        filter_properties=filter_properties,
                        node=node)
            except exception.NoValidHost:
                instance.task_state = None
                instance.save()
                LOG.warning(_("No valid host found for unshelve instance"),
                            instance=instance)
                return
        else:
            LOG.error(_LE('Unshelve attempted but vm_state not SHELVED or '
                          'SHELVED_OFFLOADED'),
                      instance=instance)
            instance.vm_state = vm_states.ERROR
            instance.save()
            return

        for key in ['shelved_at', 'shelved_image_id', 'shelved_host']:
            if key in sys_meta:
                del (sys_meta[key])
        instance.system_metadata = sys_meta
        instance.save()
Esempio n. 45
0
    def __call__(self, req):
        request_id = common_context.generate_request_id()
        signature = req.params.get('Signature')
        if not signature:
            msg = _("Signature not provided")
            return faults.ec2_error_response(request_id,
                                             "AuthFailure",
                                             msg,
                                             status=400)
        access = req.params.get('AWSAccessKeyId')
        if not access:
            msg = _("Access key not provided")
            return faults.ec2_error_response(request_id,
                                             "AuthFailure",
                                             msg,
                                             status=400)

        # Make a copy of args for authentication and signature verification.
        auth_params = dict(req.params)
        # Not part of authentication args
        auth_params.pop('Signature')

        cred_dict = {
            'access': access,
            'signature': signature,
            'host': req.host,
            'verb': req.method,
            'path': req.path,
            'params': auth_params,
        }
        if "ec2" in CONF.keystone_ec2_url:
            creds = {'ec2Credentials': cred_dict}
        else:
            creds = {'auth': {'OS-KSEC2:ec2Credentials': cred_dict}}
        creds_json = jsonutils.dumps(creds)
        headers = {'Content-Type': 'application/json'}

        o = urlparse.urlparse(CONF.keystone_ec2_url)
        if o.scheme == "http":
            conn = httplib.HTTPConnection(o.netloc)
        else:
            conn = httplib.HTTPSConnection(o.netloc)
        conn.request('POST', o.path, body=creds_json, headers=headers)
        response = conn.getresponse()
        data = response.read()
        if response.status != 200:
            if response.status == 401:
                msg = response.reason
            else:
                msg = _("Failure communicating with keystone")
            return faults.ec2_error_response(request_id,
                                             "AuthFailure",
                                             msg,
                                             status=response.status)
        result = jsonutils.loads(data)
        conn.close()

        try:
            token_id = result['access']['token']['id']
            user_id = result['access']['user']['id']
            project_id = result['access']['token']['tenant']['id']
            user_name = result['access']['user'].get('name')
            project_name = result['access']['token']['tenant'].get('name')
            roles = [
                role['name'] for role in result['access']['user']['roles']
            ]
        except (AttributeError, KeyError) as e:
            LOG.error(_LE("Keystone failure: %s"), e)
            msg = _("Failure communicating with keystone")
            return faults.ec2_error_response(request_id,
                                             "AuthFailure",
                                             msg,
                                             status=400)

        remote_address = req.remote_addr
        if CONF.use_forwarded_for:
            remote_address = req.headers.get('X-Forwarded-For', remote_address)

        catalog = result['access']['serviceCatalog']
        ctxt = context.RequestContext(user_id,
                                      project_id,
                                      user_name=user_name,
                                      project_name=project_name,
                                      roles=roles,
                                      auth_token=token_id,
                                      remote_address=remote_address,
                                      service_catalog=catalog)

        req.environ['nova.context'] = ctxt

        return self.application
Esempio n. 46
0
    def save(self,
             context,
             expected_vm_state=None,
             expected_task_state=None,
             admin_state_reset=False):
        """Save updates to this instance

        Column-wise updates will be made based on the result of
        self.what_changed(). If expected_task_state is provided,
        it will be checked against the in-database copy of the
        instance before updates are made.

        :param:context: Security context
        :param:expected_task_state: Optional tuple of valid task states
        for the instance to be in
        :param:expected_vm_state: Optional tuple of valid vm states
        for the instance to be in
        :param admin_state_reset: True if admin API is forcing setting
        of task_state/vm_state

        """

        cell_type = cells_opts.get_cell_type()
        if cell_type == 'api' and self.cell_name:
            # NOTE(comstud): We need to stash a copy of ourselves
            # before any updates are applied.  When we call the save
            # methods on nested objects, we will lose any changes to
            # them.  But we need to make sure child cells can tell
            # what is changed.
            #
            # We also need to nuke any updates to vm_state and task_state
            # unless admin_state_reset is True.  compute cells are
            # authoritative for their view of vm_state and task_state.
            stale_instance = self.obj_clone()

            def _handle_cell_update_from_api():
                cells_api = cells_rpcapi.CellsAPI()
                cells_api.instance_update_from_api(context, stale_instance,
                                                   expected_vm_state,
                                                   expected_task_state,
                                                   admin_state_reset)
        else:
            stale_instance = None

        updates = {}
        changes = self.obj_what_changed()

        for field in self.fields:
            if (self.obj_attr_is_set(field)
                    and isinstance(self.fields[field], fields.ObjectField)):
                try:
                    getattr(self, '_save_%s' % field)(context)
                except AttributeError:
                    LOG.exception(_LE('No save handler for %s'),
                                  field,
                                  instance=self)
            elif field in changes:
                updates[field] = self[field]

        if not updates:
            if stale_instance:
                _handle_cell_update_from_api()
            return

        # Cleaned needs to be turned back into an int here
        if 'cleaned' in updates:
            if updates['cleaned']:
                updates['cleaned'] = 1
            else:
                updates['cleaned'] = 0

        if expected_task_state is not None:
            if (self.VERSION == '1.9'
                    and expected_task_state == 'image_snapshot'):
                # NOTE(danms): Icehouse introduced a pending state which
                # Havana doesn't know about. If we're an old instance,
                # tolerate the pending state as well
                expected_task_state = [
                    expected_task_state, 'image_snapshot_pending'
                ]
            updates['expected_task_state'] = expected_task_state
        if expected_vm_state is not None:
            updates['expected_vm_state'] = expected_vm_state

        expected_attrs = [
            attr for attr in _INSTANCE_OPTIONAL_JOINED_FIELDS
            if self.obj_attr_is_set(attr)
        ]
        if 'pci_devices' in expected_attrs:
            # NOTE(danms): We don't refresh pci_devices on save right now
            expected_attrs.remove('pci_devices')

        # NOTE(alaski): We need to pull system_metadata for the
        # notification.send_update() below.  If we don't there's a KeyError
        # when it tries to extract the flavor.
        if 'system_metadata' not in expected_attrs:
            expected_attrs.append('system_metadata')
        old_ref, inst_ref = db.instance_update_and_get_original(
            context,
            self.uuid,
            updates,
            update_cells=False,
            columns_to_join=_expected_cols(expected_attrs))

        if stale_instance:
            _handle_cell_update_from_api()
        elif cell_type == 'compute':
            cells_api = cells_rpcapi.CellsAPI()
            cells_api.instance_update_at_top(context, inst_ref)

        self._from_db_object(context,
                             self,
                             inst_ref,
                             expected_attrs=expected_attrs)
        notifications.send_update(context, old_ref, self)
        self.obj_reset_changes()
Esempio n. 47
0
    def __call__(self, req):
        # NOTE(alevine) We need to calculate the hash here because
        # subsequent access to request modifies the req.body so the hash
        # calculation will yield invalid results.
        body_hash = hashlib.sha256(req.body).hexdigest()

        request_id = common_context.generate_request_id()
        signature = self._get_signature(req)
        if not signature:
            msg = _("Signature not provided")
            return faults.ec2_error_response(request_id,
                                             "AuthFailure",
                                             msg,
                                             status=400)
        access = self._get_access(req)
        if not access:
            msg = _("Access key not provided")
            return faults.ec2_error_response(request_id,
                                             "AuthFailure",
                                             msg,
                                             status=400)

        if 'X-Amz-Signature' in req.params or 'Authorization' in req.headers:
            auth_params = {}
        else:
            # Make a copy of args for authentication and signature verification
            auth_params = dict(req.params)
            # Not part of authentication args
            auth_params.pop('Signature', None)

        cred_dict = {
            'access': access,
            'signature': signature,
            'host': req.host,
            'verb': req.method,
            'path': req.path,
            'params': auth_params,
            'headers': req.headers,
            'body_hash': body_hash
        }
        if "ec2" in CONF.keystone_ec2_url:
            creds = {'ec2Credentials': cred_dict}
        else:
            creds = {'auth': {'OS-KSEC2:ec2Credentials': cred_dict}}
        creds_json = jsonutils.dumps(creds)
        headers = {'Content-Type': 'application/json'}

        verify = not CONF.keystone_ec2_insecure
        if verify and CONF.ssl.ca_file:
            verify = CONF.ssl.ca_file

        cert = None
        if CONF.ssl.cert_file and CONF.ssl.key_file:
            cert = (CONF.ssl.cert_file, CONF.ssl.key_file)
        elif CONF.ssl.cert_file:
            cert = CONF.ssl.cert_file

        response = requests.request('POST',
                                    CONF.keystone_ec2_url,
                                    data=creds_json,
                                    headers=headers,
                                    verify=verify,
                                    cert=cert)
        status_code = response.status_code
        if status_code != 200:
            msg = response.reason
            return faults.ec2_error_response(request_id,
                                             "AuthFailure",
                                             msg,
                                             status=status_code)
        result = response.json()

        try:
            token_id = result['access']['token']['id']
            user_id = result['access']['user']['id']
            project_id = result['access']['token']['tenant']['id']
            user_name = result['access']['user'].get('name')
            project_name = result['access']['token']['tenant'].get('name')
            roles = [
                role['name'] for role in result['access']['user']['roles']
            ]
        except (AttributeError, KeyError) as e:
            LOG.error(_LE("Keystone failure: %s"), e)
            msg = _("Failure parsing response from keystone: %s") % e
            return faults.ec2_error_response(request_id,
                                             "AuthFailure",
                                             msg,
                                             status=400)

        remote_address = req.remote_addr
        if CONF.use_forwarded_for:
            remote_address = req.headers.get('X-Forwarded-For', remote_address)

        catalog = result['access']['serviceCatalog']
        ctxt = context.RequestContext(user_id,
                                      project_id,
                                      user_name=user_name,
                                      project_name=project_name,
                                      roles=roles,
                                      auth_token=token_id,
                                      remote_address=remote_address,
                                      service_catalog=catalog)

        req.environ['nova.context'] = ctxt

        return self.application
Esempio n. 48
0
    def detach(self,
               context,
               instance,
               volume_api,
               virt_driver,
               attachment_id=None,
               destroy_bdm=False):

        connector = virt_driver.get_volume_connector(instance)
        connection_info = self['connection_info']
        volume_id = self.volume_id

        # Only attempt to detach and disconnect from the volume if the instance
        # is currently associated with the local compute host.
        if CONF.host == instance.host:
            self.driver_detach(context, instance, volume_api, virt_driver)
        elif not destroy_bdm:
            LOG.debug("Skipping driver_detach during remote rebuild.",
                      instance=instance)
        elif destroy_bdm:
            LOG.error(_LE("Unable to call for a driver detach of volume "
                          "%(vol_id)s due to the instance being "
                          "registered to the remote host %(inst_host)s."), {
                              'vol_id': volume_id,
                              'inst_host': instance.host
                          },
                      instance=instance)

        if connection_info and not destroy_bdm and (connector.get('host') !=
                                                    instance.host):
            # If the volume is attached to another host (evacuate) then
            # this connector is for the wrong host. Use the connector that
            # was stored in connection_info instead (if we have one, and it
            # is for the expected host).
            stashed_connector = connection_info.get('connector')
            if not stashed_connector:
                # Volume was attached before we began stashing connectors
                LOG.warning(
                    _LW("Host mismatch detected, but stashed "
                        "volume connector not found. Instance host is "
                        "%(ihost)s, but volume connector host is "
                        "%(chost)s."), {
                            'ihost': instance.host,
                            'chost': connector.get('host')
                        })
            elif stashed_connector.get('host') != instance.host:
                # Unexpected error. The stashed connector is also not matching
                # the needed instance host.
                LOG.error(
                    _LE("Host mismatch detected in stashed volume "
                        "connector. Will use local volume connector. "
                        "Instance host is %(ihost)s. Local volume "
                        "connector host is %(chost)s. Stashed volume "
                        "connector host is %(schost)s."), {
                            'ihost': instance.host,
                            'chost': connector.get('host'),
                            'schost': stashed_connector.get('host')
                        })
            else:
                # Fix found. Use stashed connector.
                LOG.debug(
                    "Host mismatch detected. Found usable stashed "
                    "volume connector. Instance host is %(ihost)s. "
                    "Local volume connector host was %(chost)s. "
                    "Stashed volume connector host is %(schost)s.", {
                        'ihost': instance.host,
                        'chost': connector.get('host'),
                        'schost': stashed_connector.get('host')
                    })
                connector = stashed_connector

        # NOTE(jdg): For now we need to actually inspect the bdm for an
        # attachment_id as opposed to relying on what may have been passed
        # in, we want to force usage of the old detach flow for now and only
        # use the new flow when we explicitly used it for the attach.
        if not self['attachment_id']:
            volume_api.terminate_connection(context, volume_id, connector)
            volume_api.detach(context.elevated(), volume_id, instance.uuid,
                              attachment_id)
        else:
            volume_api.attachment_delete(context, self['attachment_id'])
Esempio n. 49
0
 def __call__(self, *args, **kwargs):
     stacktrace = "".join(traceback.format_stack())
     LOG = logging.getLogger('nova.compute')
     LOG.error(_LE('No db access allowed in nova-compute: %s'),
               stacktrace)
     raise exception.DBNotAllowed('nova-compute')
Esempio n. 50
0
 def __call__(self, req):
     try:
         return req.get_response(self.application)
     except Exception:
         LOG.exception(_LE("FaultWrapper error"))
         return faults.Fault(webob.exc.HTTPInternalServerError())
Esempio n. 51
0
 def _neutron_failed_callback(self, event_name, instance):
     LOG.error(_LE('Neutron Reported failure on event %s'),
               event_name,
               instance=instance)
     if CONF.vif_plugging_is_fatal:
         raise exception.VirtualInterfaceCreateException()
Esempio n. 52
0
    def host_maintenance_mode(self, host, mode):
        """Start/Stop host maintenance window. On start, it triggers
        guest VMs evacuation.
        """
        if not mode:
            return 'off_maintenance'
        host_list = [
            host_ref for host_ref in self._session.host.get_all()
            if host_ref != self._session.host_ref
        ]
        migrations_counter = vm_counter = 0
        ctxt = context.get_admin_context()
        for vm_ref, vm_rec in vm_utils.list_vms(self._session):
            for host_ref in host_list:
                try:
                    # Ensure only guest instances are migrated
                    uuid = vm_rec['other_config'].get('nova_uuid')
                    if not uuid:
                        name = vm_rec['name_label']
                        uuid = _uuid_find(ctxt, host, name)
                        if not uuid:
                            LOG.info(
                                _LI('Instance %(name)s running on '
                                    '%(host)s could not be found in '
                                    'the database: assuming it is a '
                                    'worker VM and skip ping migration '
                                    'to a new host'), {
                                        'name': name,
                                        'host': host
                                    })
                            continue
                    instance = objects.Instance.get_by_uuid(ctxt, uuid)
                    vm_counter = vm_counter + 1

                    aggregate = objects.AggregateList.get_by_host(
                        ctxt, host, key=pool_states.POOL_FLAG)
                    if not aggregate:
                        msg = _('Aggregate for host %(host)s count not be'
                                ' found.') % dict(host=host)
                        raise exception.NotFound(msg)

                    dest = _host_find(ctxt, self._session, aggregate[0],
                                      host_ref)
                    instance.host = dest
                    instance.task_state = task_states.MIGRATING
                    instance.save()

                    self._session.VM.pool_migrate(vm_ref, host_ref,
                                                  {"live": "true"})
                    migrations_counter = migrations_counter + 1

                    instance.vm_state = vm_states.ACTIVE
                    instance.save()

                    break
                except self._session.XenAPI.Failure:
                    LOG.exception(
                        _LE('Unable to migrate VM %(vm_ref)s '
                            'from %(host)s'), {
                                'vm_ref': vm_ref,
                                'host': host
                            })
                    instance.host = host
                    instance.vm_state = vm_states.ACTIVE
                    instance.save()

        if vm_counter == migrations_counter:
            return 'on_maintenance'
        else:
            raise exception.NoValidHost(reason='Unable to find suitable '
                                        'host for VMs evacuation')
Esempio n. 53
0
    def unshelve_instance(self, context, instance):
        sys_meta = instance.system_metadata

        def safe_image_show(ctx, image_id):
            if image_id:
                return self.image_api.get(ctx, image_id, show_deleted=False)
            else:
                raise exception.ImageNotFound(image_id='')

        if instance.vm_state == vm_states.SHELVED:
            instance.task_state = task_states.POWERING_ON
            instance.save(expected_task_state=task_states.UNSHELVING)
            self.compute_rpcapi.start_instance(context, instance)
        elif instance.vm_state == vm_states.SHELVED_OFFLOADED:
            image = None
            image_id = sys_meta.get('shelved_image_id')
            # No need to check for image if image_id is None as
            # "shelved_image_id" key is not set for volume backed
            # instance during the shelve process
            if image_id:
                with compute_utils.EventReporter(
                    context, 'get_image_info', instance.uuid):
                    try:
                        image = safe_image_show(context, image_id)
                    except exception.ImageNotFound:
                        instance.vm_state = vm_states.ERROR
                        instance.save()

                        reason = _('Unshelve attempted but the image %s '
                                   'cannot be found.') % image_id

                        LOG.error(reason, instance=instance)
                        raise exception.UnshelveException(
                            instance_id=instance.uuid, reason=reason)

            try:
                with compute_utils.EventReporter(context, 'schedule_instances',
                                                 instance.uuid):
                    filter_properties = {}
                    scheduler_utils.populate_retry(filter_properties,
                                                   instance.uuid)
                    request_spec = scheduler_utils.build_request_spec(
                            context, image, [instance])
                    hosts = self._schedule_instances(
                            context, request_spec, filter_properties)
                    host_state = hosts[0]
                    scheduler_utils.populate_filter_properties(
                            filter_properties, host_state)
                    (host, node) = (host_state['host'], host_state['nodename'])
                    self.compute_rpcapi.unshelve_instance(
                            context, instance, host, image=image,
                            filter_properties=filter_properties, node=node)
            except (exception.NoValidHost,
                    exception.UnsupportedPolicyException):
                instance.task_state = None
                instance.save()
                LOG.warning(_LW("No valid host found for unshelve instance"),
                            instance=instance)
                return
            except Exception:
                with excutils.save_and_reraise_exception():
                    instance.task_state = None
                    instance.save()
                    LOG.error(_LE("Unshelve attempted but an error "
                                  "has occurred"), instance=instance)
        else:
            LOG.error(_LE('Unshelve attempted but vm_state not SHELVED or '
                          'SHELVED_OFFLOADED'), instance=instance)
            instance.vm_state = vm_states.ERROR
            instance.save()
            return
Esempio n. 54
0
    def _live_migrate(self, context, instance, scheduler_hint,
                      block_migration, disk_over_commit):
        destination = scheduler_hint.get("host")

        def _set_vm_state(context, instance, ex, vm_state=None,
                          task_state=None):
            request_spec = {'instance_properties': {
                'uuid': instance.uuid, },
            }
            scheduler_utils.set_vm_state_and_notify(context,
                instance.uuid,
                'compute_task', 'migrate_server',
                dict(vm_state=vm_state,
                     task_state=task_state,
                     expected_task_state=task_states.MIGRATING,),
                ex, request_spec, self.db)

        migration = objects.Migration(context=context.elevated())
        migration.dest_compute = destination
        migration.status = 'accepted'
        migration.instance_uuid = instance.uuid
        migration.source_compute = instance.host
        migration.migration_type = 'live-migration'
        if instance.obj_attr_is_set('flavor'):
            migration.old_instance_type_id = instance.flavor.id
            migration.new_instance_type_id = instance.flavor.id
        else:
            migration.old_instance_type_id = instance.instance_type_id
            migration.new_instance_type_id = instance.instance_type_id
        migration.create()

        task = self._build_live_migrate_task(context, instance, destination,
                                             block_migration, disk_over_commit,
                                             migration)
        try:
            task.execute()
        except (exception.NoValidHost,
                exception.ComputeServiceUnavailable,
                exception.InvalidHypervisorType,
                exception.InvalidCPUInfo,
                exception.UnableToMigrateToSelf,
                exception.DestinationHypervisorTooOld,
                exception.InvalidLocalStorage,
                exception.InvalidSharedStorage,
                exception.HypervisorUnavailable,
                exception.InstanceInvalidState,
                exception.MigrationPreCheckError,
                exception.LiveMigrationWithOldNovaNotSafe) as ex:
            with excutils.save_and_reraise_exception():
                # TODO(johngarbutt) - eventually need instance actions here
                _set_vm_state(context, instance, ex, instance.vm_state)
                migration.status = 'error'
                migration.save()
        except Exception as ex:
            LOG.error(_LE('Migration of instance %(instance_id)s to host'
                          ' %(dest)s unexpectedly failed.'),
                      {'instance_id': instance.uuid, 'dest': destination},
                      exc_info=True)
            _set_vm_state(context, instance, ex, vm_states.ERROR,
                          instance.task_state)
            migration.status = 'failed'
            migration.save()
            raise exception.MigrationError(reason=six.text_type(ex))
Esempio n. 55
0
 def _get_os_obj(self, os_name):
     try:
         return _OsInfoDatabase.get_instance().get_os(os_name)
     except exception.NovaException as e:
         LOG.error(_LE("Cannot find OS information - Reason: (%s)"), e)
Esempio n. 56
0
    def _delete_inventory(self, rp_uuid):
        """Deletes all inventory records for a resource provider with the
        supplied UUID.

        First attempt to DELETE the inventory using microversion 1.5. If
        this results in a 406, fail over to a PUT.
        """
        curr = self._get_inventory_and_update_provider_generation(rp_uuid)

        # Check to see if we need to update placement's view
        if not curr.get('inventories', {}):
            msg = "No inventory to delete from resource provider %s."
            LOG.debug(msg, rp_uuid)
            return

        msg = _LI("Compute node %s reported no inventory but previous "
                  "inventory was detected. Deleting existing inventory "
                  "records.")
        LOG.info(msg, rp_uuid)

        url = '/resource_providers/%s/inventories' % rp_uuid
        r = self.delete(url, version="1.5")
        placement_req_id = get_placement_request_id(r)
        cur_rp_gen = self._resource_providers[rp_uuid]['generation']
        msg_args = {
            'rp_uuid': rp_uuid,
            'placement_req_id': placement_req_id,
        }
        if r.status_code == 406:
            # microversion 1.5 not available so try the earlier way
            # TODO(cdent): When we're happy that all placement
            # servers support microversion 1.5 we can remove this
            # call and the associated code.
            LOG.debug('Falling back to placement API microversion 1.0 '
                      'for deleting all inventory for a resource provider.')
            payload = {
                'resource_provider_generation': cur_rp_gen,
                'inventories': {},
            }
            r = self.put(url, payload)
            placement_req_id = get_placement_request_id(r)
            msg_args['placement_req_id'] = placement_req_id
            if r.status_code == 200:
                # Update our view of the generation for next time
                updated_inv = r.json()
                new_gen = updated_inv['resource_provider_generation']

                self._resource_providers[rp_uuid]['generation'] = new_gen
                msg_args['generation'] = new_gen
                LOG.info(
                    _LI("[%(placement_req_id)s] Deleted all inventory "
                        "for resource provider %(rp_uuid)s at generation "
                        "%(generation)i."), msg_args)
                return

        if r.status_code == 204:
            self._resource_providers[rp_uuid]['generation'] = cur_rp_gen + 1
            LOG.info(
                _LI("[%(placement_req_id)s] Deleted all inventory for "
                    "resource provider %(rp_uuid)s."), msg_args)
            return
        elif r.status_code == 404:
            # This can occur if another thread deleted the inventory and the
            # resource provider already
            LOG.debug(
                "[%(placement_req_id)s] Resource provider %(rp_uuid)s "
                "deleted by another thread when trying to delete "
                "inventory. Ignoring.", msg_args)
            self._resource_providers.pop(rp_uuid, None)
            self._provider_aggregate_map.pop(rp_uuid, None)
            return
        elif r.status_code == 409:
            rc_str = _extract_inventory_in_use(r.text)
            if rc_str is not None:
                msg = _LW("[%(placement_req_id)s] We cannot delete inventory "
                          "%(rc_str)s for resource provider %(rp_uuid)s "
                          "because the inventory is in use.")
                msg_args['rc_str'] = rc_str
                LOG.warning(msg, msg_args)
                return

        msg = _LE("[%(placement_req_id)s] Failed to delete inventory for "
                  "resource provider %(rp_uuid)s. Got error response: "
                  "%(err)s.")
        msg_args['err'] = r.text
        LOG.error(msg, msg_args)
Esempio n. 57
0
if  __name__ == "__main__":
    try:
        physics_cpu_count = psutil.cpu_count(logical=False)
        cpumap = tuple(range(0,physics_cpu_count))
        qemu_base_path = CONF.libvirt.base_path
        virt_dom = LibvirtDriver()._list_instance_domains_fast()
        for i in range(len(virt_dom)):
			vcpus = virt_dom[i].vcpus()
			#vcpus = virt_dom.vcpus()
			for j in range(len(vcpus[0])):
                virtual_cpu,real_cpu = vcpus[0][j][0],vcpus[0][j][len(vcpus[0])-1]
                LOG.info(_LW('---zztest pin cpu start---- '
                    'virtual_cpu=%(virtual_cpu)s real_cpu=%(real_cpu)s cpumap=%(cpumap)s'),
                    {'virtual_cpu':virtual_cpu,
                     'real_cpu':real_cpu,
                     'cpumap':cpumap})
                try:
                    res = virt_dom.pinVcpu(virtual_cpu, cpumap)
                    if res != 0:
                        vcpu_quota = dict(vcpu_quota = 50000)
                        virt_dom[i].setSchedulerParameters(vcpu_quota)
				except virt_dom.exceptions as e:
				   LOG.error(_LE('zztest failed to pin and scheduler cpu of  %(instance)s:'
							'%(e)s'),{'instance':virt_dom.name(),'e':e})
    except virt_dom.exceptions as e:
        LOG.error(_LE('zztest failed to pin and scheduler cpu of  %(instance)s:'
                                            '%(e)s'),
                       {'instance':virt_dom.name(),'e':e}) 


Esempio n. 58
0
 def __call__(self, *args, **kwargs):
     stacktrace = "".join(traceback.format_stack())
     LOG.error(_LE('No db access allowed in nova-dhcpbridge: %s'),
               stacktrace)
     raise exception.DBNotAllowed('nova-dhcpbridge')