예제 #1
0
    def remove_from_instance(self, context, instance, security_group_name):
        """Remove the security group associated with the instance."""
        neutron = neutronv2.get_client(context)
        try:
            security_group_id = neutronv20.find_resourceid_by_name_or_id(
                neutron, 'security_group',
                security_group_name,
                context.project_id)
        except n_exc.NeutronClientException as e:
            exc_info = sys.exc_info()
            if e.status_code == 404:
                msg = (_("Security group %(name)s is not found for "
                         "project %(project)s") %
                       {'name': security_group_name,
                        'project': context.project_id})
                self.raise_not_found(msg)
            else:
                LOG.exception(_("Neutron Error:"))
                raise exc_info[0], exc_info[1], exc_info[2]
        params = {'device_id': instance['uuid']}
        try:
            ports = neutron.list_ports(**params).get('ports')
        except n_exc.NeutronClientException:
            with excutils.save_and_reraise_exception():
                LOG.exception(_("Neutron Error:"))

        if not ports:
            msg = (_("instance_id %s could not be found as device id on"
                   " any ports") % instance['uuid'])
            self.raise_not_found(msg)

        found_security_group = False
        for port in ports:
            try:
                port.get('security_groups', []).remove(security_group_id)
            except ValueError:
                # When removing a security group from an instance the security
                # group should be on both ports since it was added this way if
                # done through the nova api. In case it is not a 404 is only
                # raised if the security group is not found on any of the
                # ports on the instance.
                continue

            updated_port = {'security_groups': port['security_groups']}
            try:
                LOG.info(_("Adding security group %(security_group_id)s to "
                           "port %(port_id)s"),
                         {'security_group_id': security_group_id,
                          'port_id': port['id']})
                neutron.update_port(port['id'], {'port': updated_port})
                found_security_group = True
            except Exception:
                with excutils.save_and_reraise_exception():
                    LOG.exception(_("Neutron Error:"))
        if not found_security_group:
            msg = (_("Security group %(security_group_name)s not associated "
                     "with the instance %(instance)s") %
                   {'security_group_name': security_group_name,
                    'instance': instance['uuid']})
            self.raise_not_found(msg)
예제 #2
0
파일: manager.py 프로젝트: nickguthrie/nova
 def _live_migrate(self, context, instance, scheduler_hint,
                   block_migration, disk_over_commit):
     destination = scheduler_hint.get("host")
     try:
         live_migrate.execute(context, instance, destination,
                          block_migration, disk_over_commit)
     except (exception.NoValidHost,
             exception.ComputeServiceUnavailable,
             exception.InvalidHypervisorType,
             exception.UnableToMigrateToSelf,
             exception.DestinationHypervisorTooOld,
             exception.InvalidLocalStorage,
             exception.InvalidSharedStorage,
             exception.MigrationPreCheckError) as ex:
         with excutils.save_and_reraise_exception():
             #TODO(johngarbutt) - eventually need instance actions here
             request_spec = {'instance_properties': {
                 'uuid': instance['uuid'], },
             }
             scheduler_utils.set_vm_state_and_notify(context,
                     'compute_task', 'migrate_server',
                     dict(vm_state=instance['vm_state'],
                          task_state=None,
                          expected_task_state=task_states.MIGRATING,),
                     ex, request_spec, self.db)
     except Exception as ex:
         with excutils.save_and_reraise_exception():
             request_spec = {'instance_properties': {
                 'uuid': instance['uuid'], },
             }
             scheduler_utils.set_vm_state_and_notify(context,
                     'compute_task', 'migrate_server',
                     {'vm_state': vm_states.ERROR},
                     ex, request_spec, self.db)
예제 #3
0
파일: manager.py 프로젝트: gukai/su-nova
 def live_migration(self, context, instance, dest,
                    block_migration, disk_over_commit):
     try:
         self._schedule_live_migration(context, instance, dest,
                 block_migration, disk_over_commit)
     except (exception.NoValidHost,
             exception.ComputeServiceUnavailable,
             exception.InvalidHypervisorType,
             exception.UnableToMigrateToSelf,
             exception.DestinationHypervisorTooOld,
             exception.InvalidLocalStorage,
             exception.InvalidSharedStorage,
             exception.MigrationPreCheckError) as ex:
         request_spec = {'instance_properties': {
             'uuid': instance['uuid'], },
         }
         with excutils.save_and_reraise_exception():
             self._set_vm_state_and_notify('live_migration',
                         dict(vm_state=instance['vm_state'],
                              task_state=None,
                              expected_task_state=task_states.MIGRATING,),
                                           context, ex, request_spec)
     except Exception as ex:
         request_spec = {'instance_properties': {
             'uuid': instance['uuid'], },
         }
         with excutils.save_and_reraise_exception():
             self._set_vm_state_and_notify('live_migration',
                                          {'vm_state': vm_states.ERROR},
                                          context, ex, request_spec)
예제 #4
0
파일: impl_zmq.py 프로젝트: CiscoAS/nova
    def consume_in_thread(self):
        """Runs the ZmqProxy service"""
        ipc_dir = CONF.rpc_zmq_ipc_dir
        consume_in = "tcp://%s:%s" % \
            (CONF.rpc_zmq_bind_address,
             CONF.rpc_zmq_port)
        consumption_proxy = InternalContext(None)

        if not os.path.isdir(ipc_dir):
            try:
                utils.execute('mkdir', '-p', ipc_dir, run_as_root=True)
                utils.execute('chown', "%s:%s" % (os.getuid(), os.getgid()),
                              ipc_dir, run_as_root=True)
                utils.execute('chmod', '750', ipc_dir, run_as_root=True)
            except utils.ProcessExecutionError:
                with excutils.save_and_reraise_exception():
                    LOG.error(_("Could not create IPC directory %s") %
                              (ipc_dir, ))

        try:
            self.register(consumption_proxy,
                          consume_in,
                          zmq.PULL,
                          out_bind=True)
        except zmq.ZMQError:
            with excutils.save_and_reraise_exception():
                LOG.error(_("Could not create ZeroMQ receiver daemon. "
                            "Socket may already be in use."))

        super(ZmqProxy, self).consume_in_thread()
예제 #5
0
    def consume_in_thread(self):
        """Runs the ZmqProxy service."""
        ipc_dir = CONF.rpc_zmq_ipc_dir
        consume_in = "tcp://%s:%s" % \
            (CONF.rpc_zmq_bind_address,
             CONF.rpc_zmq_port)
        consumption_proxy = InternalContext(None)

        try:
            os.makedirs(ipc_dir)
        except os.error:
            if not os.path.isdir(ipc_dir):
                with excutils.save_and_reraise_exception():
                    LOG.error(_("Required IPC directory does not exist at"
                                " %s") % (ipc_dir, ))
        try:
            self.register(consumption_proxy,
                          consume_in,
                          zmq.PULL)
        except zmq.ZMQError:
            if os.access(ipc_dir, os.X_OK):
                with excutils.save_and_reraise_exception():
                    LOG.error(_("Permission denied to IPC directory at"
                                " %s") % (ipc_dir, ))
            with excutils.save_and_reraise_exception():
                LOG.error(_("Could not create ZeroMQ receiver daemon. "
                            "Socket may already be in use."))

        super(ZmqProxy, self).consume_in_thread()
예제 #6
0
    def add_to_instance(self, context, instance, security_group_name):
        """Add security group to the instance."""

        neutron = neutronv2.get_client(context)
        try:
            security_group_id = neutronv20.find_resourceid_by_name_or_id(neutron, "security_group", security_group_name)
        except n_exc.NeutronClientNoUniqueMatch as e:
            raise exception.NoUniqueMatch(six.text_type(e))
        except n_exc.NeutronClientException as e:
            exc_info = sys.exc_info()
            if e.status_code == 404:
                msg = _("Security group %(name)s is not found for " "project %(project)s") % {
                    "name": security_group_name,
                    "project": context.project_id,
                }
                self.raise_not_found(msg)
            else:
                LOG.exception(_("Neutron Error:"))
                raise exc_info[0], exc_info[1], exc_info[2]
        params = {"device_id": instance["uuid"]}
        try:
            ports = neutron.list_ports(**params).get("ports")
        except n_exc.NeutronClientException:
            with excutils.save_and_reraise_exception():
                LOG.exception(_("Neutron Error:"))

        if not ports:
            msg = _("instance_id %s could not be found as device id on" " any ports") % instance["uuid"]
            self.raise_not_found(msg)

        for port in ports:
            if not self._has_security_group_requirements(port):
                LOG.warn(
                    _(
                        "Cannot add security group %(name)s to %(instance)s"
                        " since the port %(port_id)s does not meet security"
                        " requirements"
                    ),
                    {"name": security_group_name, "instance": instance["uuid"], "port_id": port["id"]},
                )
                raise exception.SecurityGroupCannotBeApplied()
            if "security_groups" not in port:
                port["security_groups"] = []
            port["security_groups"].append(security_group_id)
            updated_port = {"security_groups": port["security_groups"]}
            try:
                LOG.info(
                    _("Adding security group %(security_group_id)s to " "port %(port_id)s"),
                    {"security_group_id": security_group_id, "port_id": port["id"]},
                )
                neutron.update_port(port["id"], {"port": updated_port})
            except Exception:
                with excutils.save_and_reraise_exception():
                    LOG.exception(_("Neutron Error:"))
예제 #7
0
    def add_to_instance(self, context, instance, security_group_name):
        """Add security group to the instance."""

        neutron = neutronv2.get_client(context)
        try:
            security_group_id = neutronv20.find_resourceid_by_name_or_id(
                neutron, 'security_group', security_group_name)
        except n_exc.NeutronClientNoUniqueMatch as e:
            raise exception.NoUniqueMatch(six.text_type(e))
        except n_exc.NeutronClientException as e:
            exc_info = sys.exc_info()
            if e.status_code == 404:
                msg = ("Security group %s is not found for project %s" %
                       (security_group_name, context.project_id))
                self.raise_not_found(msg)
            else:
                LOG.exception(_("Neutron Error:"))
                raise exc_info[0], exc_info[1], exc_info[2]
        params = {'device_id': instance['uuid']}
        try:
            ports = neutron.list_ports(**params).get('ports')
        except n_exc.NeutronClientException:
            with excutils.save_and_reraise_exception():
                LOG.exception(_("Neutron Error:"))

        if not ports:
            msg = ("instance_id %s could not be found as device id on"
                   " any ports" % instance['uuid'])
            self.raise_not_found(msg)

        for port in ports:
            if not self._has_security_group_requirements(port):
                LOG.warn(_("Cannot add security group %(name)s to %(instance)s"
                           " since the port %(port_id)s does not meet security"
                           " requirements"), {'name': security_group_name,
                         'instance': instance['uuid'], 'port_id': port['id']})
                raise exception.SecurityGroupCannotBeApplied()
            if 'security_groups' not in port:
                port['security_groups'] = []
            port['security_groups'].append(security_group_id)
            updated_port = {'security_groups': port['security_groups']}
            try:
                LOG.info(_("Adding security group %(security_group_id)s to "
                           "port %(port_id)s"),
                         {'security_group_id': security_group_id,
                          'port_id': port['id']})
                neutron.update_port(port['id'], {'port': updated_port})
            except Exception:
                with excutils.save_and_reraise_exception():
                    LOG.exception(_("Neutron Error:"))
예제 #8
0
파일: api.py 프로젝트: OpenStack-Kha/nova
 def allocate_for_instance(self, context, instance, **kwargs):
     """Allocate all network resources for the instance."""
     LOG.debug(_('allocate_for_instance() for %s'),
               instance['display_name'])
     search_opts = {}
     if instance['project_id']:
         search_opts.update({"tenant_id": instance['project_id']})
     else:
         msg = _('empty project id for instance %s')
         raise exception.InvalidInput(
             reason=msg % instance['display_name'])
     data = quantumv2.get_client(context).list_networks(**search_opts)
     nets = data.get('networks', [])
     created_port_ids = []
     for network in nets:
         port_req_body = {'port': {'network_id': network['id'],
                                   'admin_state_up': True,
                                   'device_id': instance['uuid'],
                                   'tenant_id': instance['project_id']},
         }
         try:
             created_port_ids.append(
                 quantumv2.get_client(context).create_port(
                     port_req_body)['port']['id'])
         except Exception:
             with excutils.save_and_reraise_exception():
                 for port_id in created_port_ids:
                     try:
                         quantumv2.get_client(context).delete_port(port_id)
                     except Exception as ex:
                         msg = _("Fail to delete port %(portid)s with"
                                 " failure: %(exception)s")
                         LOG.debug(msg, {'portid': port_id,
                                         'exception': ex})
     return self.get_instance_nw_info(context, instance, networks=nets)
예제 #9
0
        def wrapped(*args, **kw):
            try:
                return f(*args, **kw)
            except Exception, e:
                with excutils.save_and_reraise_exception():
                    if notifier:
                        payload = dict(args=args, exception=e)
                        payload.update(kw)

                        # Use a temp vars so we don't shadow
                        # our outer definitions.
                        temp_level = level
                        if not temp_level:
                            temp_level = notifier.ERROR

                        temp_type = event_type
                        if not temp_type:
                            # If f has multiple decorators, they must use
                            # functools.wraps to ensure the name is
                            # propagated.
                            temp_type = f.__name__

                        context = get_context_from_function_and_args(f,
                                                                     args,
                                                                     kw)

                        notifier.notify(context, publisher_id, temp_type,
                                        temp_level, payload)
예제 #10
0
파일: manager.py 프로젝트: gukai/su-nova
    def run_instance(self, context, request_spec, admin_password,
            injected_files, requested_networks, is_first_time,
            filter_properties, legacy_bdm_in_spec=True):
        """Tries to call schedule_run_instance on the driver.
        Sets instance vm_state to ERROR on exceptions
        """
        instance_uuids = request_spec['instance_uuids']
        with compute_utils.EventReporter(context, conductor_api.LocalAPI(),
                                         'schedule', *instance_uuids):
            try:
                return self.driver.schedule_run_instance(context,
                        request_spec, admin_password, injected_files,
                        requested_networks, is_first_time, filter_properties,
                        legacy_bdm_in_spec)

            except exception.NoValidHost as ex:
                # don't re-raise
                self._set_vm_state_and_notify('run_instance',
                                              {'vm_state': vm_states.ERROR,
                                              'task_state': None},
                                              context, ex, request_spec)
            except Exception as ex:
                with excutils.save_and_reraise_exception():
                    self._set_vm_state_and_notify('run_instance',
                                                  {'vm_state': vm_states.ERROR,
                                                  'task_state': None},
                                                  context, ex, request_spec)
예제 #11
0
    def _merge_base_vhd(self, diff_vhd_path, base_vhd_path):
        base_vhd_copy_path = os.path.join(os.path.dirname(diff_vhd_path),
                                          os.path.basename(base_vhd_path))
        try:
            LOG.debug(_('Copying base disk %(base_vhd_path)s to '
                        '%(base_vhd_copy_path)s'),
                      {'base_vhd_path': base_vhd_path,
                       'base_vhd_copy_path': base_vhd_copy_path})
            self._pathutils.copyfile(base_vhd_path, base_vhd_copy_path)

            LOG.debug(_("Reconnecting copied base VHD "
                        "%(base_vhd_copy_path)s and diff "
                        "VHD %(diff_vhd_path)s"),
                      {'base_vhd_copy_path': base_vhd_copy_path,
                       'diff_vhd_path': diff_vhd_path})
            self._vhdutils.reconnect_parent_vhd(diff_vhd_path,
                                                base_vhd_copy_path)

            LOG.debug(_("Merging base disk %(base_vhd_copy_path)s and "
                        "diff disk %(diff_vhd_path)s"),
                      {'base_vhd_copy_path': base_vhd_copy_path,
                       'diff_vhd_path': diff_vhd_path})
            self._vhdutils.merge_vhd(diff_vhd_path, base_vhd_copy_path)

            # Replace the differential VHD with the merged one
            self._pathutils.rename(base_vhd_copy_path, diff_vhd_path)
        except Exception:
            with excutils.save_and_reraise_exception():
                if self._pathutils.exists(base_vhd_copy_path):
                    self._pathutils.remove(base_vhd_copy_path)
예제 #12
0
파일: exception.py 프로젝트: djipko/nova
        def wrapped(self, context, *args, **kw):
            # Don't store self or context in the payload, it now seems to
            # contain confidential information.
            try:
                return f(self, context, *args, **kw)
            except Exception as e:
                with excutils.save_and_reraise_exception():
                    if notifier:
                        payload = dict(exception=e)
                        call_dict = safe_utils.getcallargs(f, *args, **kw)
                        cleansed = _cleanse_dict(call_dict)
                        payload.update({'args': cleansed})

                        # Use a temp vars so we don't shadow
                        # our outer definitions.
                        temp_level = level
                        if not temp_level:
                            temp_level = notifier.ERROR

                        temp_type = event_type
                        if not temp_type:
                            # If f has multiple decorators, they must use
                            # functools.wraps to ensure the name is
                            # propagated.
                            temp_type = f.__name__

                        notifier.notify(context, publisher_id, temp_type,
                                        temp_level, payload)
예제 #13
0
    def destroy(self, context, instance, network_info, block_device_info=None):
        context = nova_context.get_admin_context()

        try:
            node = _get_baremetal_node_by_instance_uuid(instance['uuid'])
        except exception.InstanceNotFound:
            LOG.warning(_("Destroy called on non-existing instance %s")
                    % instance['uuid'])
            return

        try:
            self.driver.deactivate_node(context, node, instance)
            self.power_off(instance, node)
            self.driver.deactivate_bootloader(context, node, instance)
            self.driver.destroy_images(context, node, instance)

            self._detach_block_devices(instance, block_device_info)
            self._stop_firewall(instance, network_info)
            self._unplug_vifs(instance, network_info)

            _update_state(context, node, None, baremetal_states.DELETED)
        except Exception as e:
            with excutils.save_and_reraise_exception():
                try:
                    LOG.error(_("Error from baremetal driver "
                                "during destroy: %s") % e)
                    _update_state(context, node, instance,
                                  baremetal_states.ERROR)
                except Exception:
                    LOG.error(_("Error while recording destroy failure in "
                                "baremetal database: %s") % e)
예제 #14
0
파일: driver.py 프로젝트: jtfrey/xcat-core
    def reboot(self, context, instance, network_info, reboot_type, block_device_info=None, bad_volumes_callback=None):
        """Reboot the specified instance.

        After this is called successfully, the instance's state
        goes back to power_state.RUNNING. The virtualization
        platform should ensure that the reboot action has completed
        successfully even in cases in which the underlying domain/vm
        is paused or halted/stopped.

        :param instance: Instance object as returned by DB layer.
        :param network_info:
           :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
        :param reboot_type: Either a HARD or SOFT reboot
        :param block_device_info: Info pertaining to attached volumes
        :param bad_volumes_callback: Function to handle any bad volumes
            encountered
        """
        try:
            node = bm_driver._get_baremetal_node_by_instance_uuid(instance["uuid"])
            macs = self.macs_for_instance(instance)
            nodename = self.xcat.get_xcat_node_name(macs)
            self.xcat.reboot_node(nodename)
            bm_driver._update_state(context, node, instance, baremetal_states.RUNNING)
        except xcat_exception.xCATCommandError as e:
            with excutils.save_and_reraise_exception():
                LOG.error(
                    _("Error occured while rebooting instance %(instance)s " "on baremetal node %(node)s: %(error)s")
                    % {"instance": instance["uuid"], "node": node["uuid"], "error": str(e)}
                )
                bm_driver._update_state(context, node, instance, baremetal_states.ERROR)
예제 #15
0
파일: manager.py 프로젝트: tr3buchet/nova
 def live_migration(self, context, instance, dest, block_migration, disk_over_commit):
     try:
         return self.driver.schedule_live_migration(context, instance, dest, block_migration, disk_over_commit)
     except exception.ComputeServiceUnavailable as ex:
         request_spec = {"instance_properties": {"uuid": instance["uuid"]}}
         with excutils.save_and_reraise_exception():
             self._set_vm_state_and_notify(
                 "live_migration",
                 dict(vm_state=instance["vm_state"], task_state=None, expected_task_state=task_states.MIGRATING),
                 context,
                 ex,
                 request_spec,
             )
     except Exception as ex:
         with excutils.save_and_reraise_exception():
             self._set_vm_state_and_notify("live_migration", {"vm_state": vm_states.ERROR}, context, ex, {})
예제 #16
0
파일: crypto.py 프로젝트: RibeiroAna/nova
def _sign_csr(csr_text, ca_folder):
    with utils.tempdir() as tmpdir:
        inbound = os.path.join(tmpdir, 'inbound.csr')
        outbound = os.path.join(tmpdir, 'outbound.csr')

        try:
            with open(inbound, 'w') as csrfile:
                csrfile.write(csr_text)
        except IOError:
            with excutils.save_and_reraise_exception():
                LOG.exception(_('Failed to write inbound.csr'))

        LOG.debug('Flags path: %s', ca_folder)
        start = os.getcwd()

        # Change working dir to CA
        fileutils.ensure_tree(ca_folder)
        os.chdir(ca_folder)
        utils.execute('openssl', 'ca', '-batch', '-out', outbound, '-config',
                      './openssl.cnf', '-infiles', inbound)
        out, _err = utils.execute('openssl', 'x509', '-in', outbound,
                                  '-serial', '-noout')
        serial = string.strip(out.rpartition('=')[2])
        os.chdir(start)

        with open(outbound, 'r') as crtfile:
            return (serial, crtfile.read())
예제 #17
0
def deploy(address, port, iqn, lun, image_path, pxe_config_path,
           root_mb, swap_mb, ephemeral_mb, preserve_ephemeral=False):
    """All-in-one function to deploy a node.

    :param preserve_ephemeral: If True, no filesystem is written to the
        ephemeral block device, preserving whatever content it had (if the
        partition table has not changed).
    """
    dev = get_dev(address, port, iqn, lun)
    image_mb = get_image_mb(image_path)
    if image_mb > root_mb:
        root_mb = image_mb
    discovery(address, port)
    login_iscsi(address, port, iqn)
    try:
        root_uuid = work_on_disk(dev, root_mb, swap_mb, ephemeral_mb,
                image_path, preserve_ephemeral)
    except processutils.ProcessExecutionError as err:
        with excutils.save_and_reraise_exception():
            # Log output if there was a error
            LOG.error(_("Cmd     : %s"), err.cmd)
            LOG.error(_("StdOut  : %r"), err.stdout)
            LOG.error(_("StdErr  : %r"), err.stderr)
    finally:
        logout_iscsi(address, port, iqn)
    switch_pxe_config(pxe_config_path, root_uuid)
    # Ensure the node started netcat on the port after POST the request.
    time.sleep(3)
    notify(address, 10000)
예제 #18
0
파일: manager.py 프로젝트: xww/nova-old
 def prep_resize(self, context, image, request_spec, filter_properties,
                 instance, instance_type, reservations,
                 keep_ephemeral=True):
     """Tries to call schedule_prep_resize on the driver.
     Sets instance vm_state to ACTIVE on NoHostFound
     Sets vm_state to ERROR on other exceptions
     """
     try:
         kwargs = {
             'context': context,
             'image': image,
             'request_spec': request_spec,
             'filter_properties': filter_properties,
             'instance': instance,
             'instance_type': instance_type,
             'reservations': reservations,
             'keep_ephemeral': keep_ephemeral
         }
         return self.driver.schedule_prep_resize(**kwargs)
     except exception.NoValidHost as ex:
         self._set_vm_state_and_notify('prep_resize',
                                      {'vm_state': vm_states.ACTIVE,
                                       'task_state': None},
                                      context, ex, request_spec)
         if reservations:
             QUOTAS.rollback(context, reservations)
     except Exception as ex:
         with excutils.save_and_reraise_exception():
             self._set_vm_state_and_notify('prep_resize',
                                          {'vm_state': vm_states.ERROR,
                                           'task_state': None},
                                          context, ex, request_spec)
             if reservations:
                 QUOTAS.rollback(context, reservations)
예제 #19
0
파일: manager.py 프로젝트: artofwar/stack
 def run_instance(
     self,
     context,
     request_spec,
     admin_password,
     injected_files,
     requested_networks,
     is_first_time,
     filter_properties,
 ):
     """Tries to call schedule_run_instance on the driver.
     Sets instance vm_state to ERROR on exceptions
     """
     try:
         return self.driver.schedule_run_instance(
             context,
             request_spec,
             admin_password,
             injected_files,
             requested_networks,
             is_first_time,
             filter_properties,
         )
     except exception.NoValidHost as ex:
         # don't re-raise
         self._set_vm_state_and_notify(
             "run_instance", {"vm_state": vm_states.ERROR, "task_state": None}, context, ex, request_spec
         )
     except Exception as ex:
         with excutils.save_and_reraise_exception():
             self._set_vm_state_and_notify(
                 "run_instance", {"vm_state": vm_states.ERROR, "task_state": None}, context, ex, request_spec
             )
예제 #20
0
파일: manager.py 프로젝트: dlq84/nova
 def _live_migrate(self, context, instance, scheduler_hint,
                   block_migration, disk_over_commit):
     destination = scheduler_hint.get("host")
     try:
         live_migrate.execute(context, instance, destination,
                          block_migration, disk_over_commit)
     except (exception.NoValidHost,
             exception.ComputeServiceUnavailable,
             exception.InvalidHypervisorType,
             exception.InvalidCPUInfo,
             exception.UnableToMigrateToSelf,
             exception.DestinationHypervisorTooOld,
             exception.InvalidLocalStorage,
             exception.InvalidSharedStorage,
             exception.HypervisorUnavailable,
             exception.InstanceNotRunning,
             exception.MigrationPreCheckError) as ex:
         with excutils.save_and_reraise_exception():
             # TODO(johngarbutt) - eventually need instance actions here
             request_spec = {'instance_properties': {
                 'uuid': instance['uuid'], },
             }
             scheduler_utils.set_vm_state_and_notify(context,
                     'compute_task', 'migrate_server',
                     dict(vm_state=instance['vm_state'],
                          task_state=None,
                          expected_task_state=task_states.MIGRATING,),
                     ex, request_spec, self.db)
     except Exception as ex:
         LOG.error(_('Migration of instance %(instance_id)s to host'
                    ' %(dest)s unexpectedly failed.'),
                    {'instance_id': instance['uuid'], 'dest': destination},
                    exc_info=True)
         raise exception.MigrationError(reason=ex)
예제 #21
0
    def _create_root_vhd(self, context, instance):
        base_vhd_path = self._imagecache.get_cached_image(context, instance)
        root_vhd_path = self._pathutils.get_vhd_path(instance["name"])

        try:
            if CONF.use_cow_images:
                LOG.debug(
                    _("Creating differencing VHD. Parent: " "%(base_vhd_path)s, Target: %(root_vhd_path)s") % locals()
                )
                self._vhdutils.create_differencing_vhd(root_vhd_path, base_vhd_path)
            else:
                LOG.debug(_("Copying VHD image %(base_vhd_path)s to target: " "%(root_vhd_path)s") % locals())
                self._pathutils.copyfile(base_vhd_path, root_vhd_path)

                base_vhd_info = self._vhdutils.get_vhd_info(base_vhd_path)
                base_vhd_size = base_vhd_info["MaxInternalSize"]
                root_vhd_size = instance["root_gb"] * 1024 ** 3

                if root_vhd_size < base_vhd_size:
                    raise vmutils.HyperVException(_("Cannot resize a VHD to a " "smaller size"))
                elif root_vhd_size > base_vhd_size:
                    LOG.debug(_("Resizing VHD %(root_vhd_path)s to new " "size %(root_vhd_size)s") % locals())
                    self._vhdutils.resize_vhd(root_vhd_path, root_vhd_size)
        except Exception:
            with excutils.save_and_reraise_exception():
                if self._pathutils.exists(root_vhd_path):
                    self._pathutils.remove(root_vhd_path)

        return root_vhd_path
예제 #22
0
파일: manager.py 프로젝트: migue/nova
    def create_snapshot(self, context, volume_id, snapshot_id):
        """Creates and exports the snapshot."""
        context = context.elevated()
        snapshot_ref = self.db.snapshot_get(context, snapshot_id)
        LOG.info(_("snapshot %s: creating"), snapshot_ref['name'])

        try:
            snap_name = snapshot_ref['name']
            LOG.debug(_("snapshot %(snap_name)s: creating") % locals())
            model_update = self.driver.create_snapshot(snapshot_ref)
            if model_update:
                self.db.snapshot_update(context, snapshot_ref['id'],
                                        model_update)

        except Exception:
            with excutils.save_and_reraise_exception():
                self.db.snapshot_update(context,
                                        snapshot_ref['id'],
                                        {'status': 'error'})

        self.db.snapshot_update(context,
                                snapshot_ref['id'], {'status': 'available',
                                                     'progress': '100%'})
        LOG.debug(_("snapshot %s: created successfully"), snapshot_ref['name'])
        return snapshot_id
예제 #23
0
    def spawn(self, context, instance, image_meta, injected_files,
              admin_password, network_info, block_device_info=None):
        """Create a new VM and start it."""
        LOG.info(_("Spawning new instance"), instance=instance)

        instance_name = instance['name']
        if self._vmutils.vm_exists(instance_name):
            raise exception.InstanceExists(name=instance_name)

        # Make sure we're starting with a clean slate.
        self._delete_disk_files(instance_name)

        if self._volumeops.ebs_root_in_block_devices(block_device_info):
            root_vhd_path = None
        else:
            root_vhd_path = self._create_root_vhd(context, instance)

        eph_vhd_path = self.create_ephemeral_vhd(instance)

        try:
            self.create_instance(instance, network_info, block_device_info,
                                 root_vhd_path, eph_vhd_path)

            if configdrive.required_by(instance):
                configdrive_path = self._create_config_drive(instance,
                                                             injected_files,
                                                             admin_password)
                self.attach_config_drive(instance, configdrive_path)

            self.power_on(instance)
        except Exception:
            with excutils.save_and_reraise_exception():
                self.destroy(instance)
예제 #24
0
def work_on_disk(dev, root_mb, swap_mb, ephemeral_mb, image_path):
    """Creates partitions and write an image to the root partition."""
    if ephemeral_mb:
        ephemeral_part = "%s-part1" % dev
        swap_part = "%s-part2" % dev
        root_part = "%s-part3" % dev
    else:
        root_part = "%s-part1" % dev
        swap_part = "%s-part2" % dev

    if not is_block_device(dev):
        LOG.warn(_("parent device '%s' not found"), dev)
        return
    make_partitions(dev, root_mb, swap_mb, ephemeral_mb)
    if not is_block_device(root_part):
        LOG.warn(_("root device '%s' not found"), root_part)
        return
    if not is_block_device(swap_part):
        LOG.warn(_("swap device '%s' not found"), swap_part)
        return
    dd(image_path, root_part)
    mkswap(swap_part)
    if ephemeral_mb and not is_block_device(ephemeral_part):
        LOG.warn(_("ephemeral device '%s' not found"), ephemeral_part)
    elif ephemeral_mb:
        mkfs_ephemeral(ephemeral_part)

    try:
        root_uuid = block_uuid(root_part)
    except processutils.ProcessExecutionError as err:
        with excutils.save_and_reraise_exception():
            LOG.error(_("Failed to detect root device UUID."))
    return root_uuid
예제 #25
0
파일: floating_ips.py 프로젝트: devoid/nova
    def allocate_floating_ip(self, context, project_id, auto_assigned=False,
                             pool=None):
        """Gets a floating ip from the pool."""
        # NOTE(tr3buchet): all network hosts in zone now use the same pool
        pool = pool or CONF.default_floating_pool
        use_quota = not auto_assigned

        # Check the quota; can't put this in the API because we get
        # called into from other places
        try:
            if use_quota:
                reservations = QUOTAS.reserve(context, floating_ips=1,
                                              project_id=project_id)
        except exception.OverQuota:
            LOG.warn(_("Quota exceeded for %s, tried to allocate "
                       "floating IP"), context.project_id)
            raise exception.FloatingIpLimitExceeded()

        try:
            floating_ip = floating_ip_obj.FloatingIP.allocate_address(
                context, project_id, pool, auto_assigned=auto_assigned)
            payload = dict(project_id=project_id, floating_ip=floating_ip)
            self.notifier.info(context,
                               'network.floating_ip.allocate', payload)

            # Commit the reservations
            if use_quota:
                QUOTAS.commit(context, reservations, project_id=project_id)
        except Exception:
            with excutils.save_and_reraise_exception():
                if use_quota:
                    QUOTAS.rollback(context, reservations,
                                    project_id=project_id)

        return floating_ip
예제 #26
0
파일: floating_ips.py 프로젝트: devoid/nova
        def do_associate():
            # associate floating ip
            fixed = self.db.floating_ip_fixed_ip_associate(context,
                                                           floating_address,
                                                           fixed_address,
                                                           self.host)
            if not fixed:
                # NOTE(vish): ip was already associated
                return
            try:
                # gogo driver time
                self.l3driver.add_floating_ip(floating_address, fixed_address,
                        interface, fixed['network'])
            except processutils.ProcessExecutionError as e:
                with excutils.save_and_reraise_exception() as exc_ctxt:
                    try:
                        self.db.floating_ip_disassociate(context,
                                floating_address)
                    except Exception:
                        LOG.warn(_('Failed to disassociated floating '
                                   'address: %s'), floating_address)
                        pass
                    if "Cannot find device" in str(e):
                        try:
                            LOG.error(_('Interface %s not found'), interface)
                        except Exception:
                            pass
                        raise exception.NoFloatingIpInterface(
                                interface=interface)

            payload = dict(project_id=context.project_id,
                           instance_id=instance_uuid,
                           floating_ip=floating_address)
            self.notifier.info(context,
                               'network.floating_ip.associate', payload)
예제 #27
0
파일: manager.py 프로젝트: tr3buchet/nova
 def prep_resize(self, context, image, request_spec, filter_properties, instance, instance_type, reservations):
     """Tries to call schedule_prep_resize on the driver.
     Sets instance vm_state to ACTIVE on NoHostFound
     Sets vm_state to ERROR on other exceptions
     """
     instance_uuid = instance["uuid"]
     with compute_utils.EventReporter(context, conductor_api.LocalAPI(), "schedule", instance_uuid):
         try:
             kwargs = {
                 "context": context,
                 "image": image,
                 "request_spec": request_spec,
                 "filter_properties": filter_properties,
                 "instance": instance,
                 "instance_type": instance_type,
                 "reservations": reservations,
             }
             return self.driver.schedule_prep_resize(**kwargs)
         except exception.NoValidHost as ex:
             self._set_vm_state_and_notify(
                 "prep_resize", {"vm_state": vm_states.ACTIVE, "task_state": None}, context, ex, request_spec
             )
             if reservations:
                 QUOTAS.rollback(context, reservations)
         except Exception as ex:
             with excutils.save_and_reraise_exception():
                 self._set_vm_state_and_notify(
                     "prep_resize", {"vm_state": vm_states.ERROR, "task_state": None}, context, ex, request_spec
                 )
                 if reservations:
                     QUOTAS.rollback(context, reservations)
예제 #28
0
파일: manager.py 프로젝트: dlzh/nova
 def prep_resize(self, context, image, request_spec, filter_properties,
                 instance, instance_type, reservations):
     """Tries to call schedule_prep_resize on the driver.
     Sets instance vm_state to ACTIVE on NoHostFound
     Sets vm_state to ERROR on other exceptions
     """
     instance_uuid = instance['uuid']
     with compute_utils.EventReporter(context, conductor_api.LocalAPI(),
                                      'schedule', instance_uuid):
         try:
             kwargs = {
                 'context': context,
                 'image': image,
                 'request_spec': request_spec,
                 'filter_properties': filter_properties,
                 'instance': instance,
                 'instance_type': instance_type,
                 'reservations': reservations,
             }
             return self.driver.schedule_prep_resize(**kwargs)
         except exception.NoValidHost as ex:
             self._set_vm_state_and_notify('prep_resize',
                                          {'vm_state': vm_states.ACTIVE,
                                           'task_state': None},
                                          context, ex, request_spec)
             if reservations:
                 QUOTAS.rollback(context, reservations)
         except Exception as ex:
             with excutils.save_and_reraise_exception():
                 self._set_vm_state_and_notify('prep_resize',
                                              {'vm_state': vm_states.ERROR,
                                               'task_state': None},
                                              context, ex, request_spec)
                 if reservations:
                     QUOTAS.rollback(context, reservations)
예제 #29
0
    def _create_config_drive(self, instance, injected_files, admin_password):
        if CONF.config_drive_format != 'iso9660':
            vmutils.HyperVException(_('Invalid config_drive_format "%s"') %
                                    CONF.config_drive_format)

        LOG.info(_('Using config drive for instance: %s'), instance=instance)

        extra_md = {}
        if admin_password and CONF.hyperv.config_drive_inject_password:
            extra_md['admin_pass'] = admin_password

        inst_md = instance_metadata.InstanceMetadata(instance,
                                                     content=injected_files,
                                                     extra_md=extra_md)

        instance_path = self._pathutils.get_instance_dir(
            instance['name'])
        configdrive_path_iso = os.path.join(instance_path, 'configdrive.iso')
        LOG.info(_('Creating config drive at %(path)s'),
                 {'path': configdrive_path_iso}, instance=instance)

        with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
            try:
                cdb.make_drive(configdrive_path_iso)
            except exception.ProcessExecutionError, e:
                with excutils.save_and_reraise_exception():
                    LOG.error(_('Creating config drive failed with error: %s'),
                              e, instance=instance)
예제 #30
0
파일: driver.py 프로젝트: CrazyTeaFs/nova
    def _spawn(self, node, context, instance, image_meta, injected_files,
            admin_password, network_info=None, block_device_info=None):
        try:
            self._plug_vifs(instance, network_info, context=context)
            self._attach_block_devices(instance, block_device_info)
            self._start_firewall(instance, network_info)

            # Caching images is both CPU and I/O expensive. When running many
            # machines from a single nova-compute server, deploys of multiple
            # machines can easily thrash the nova-compute server - unlike a
            # virt hypervisor which is limited by CPU for VMs, baremetal only
            # uses CPU and I/O when deploying. By only downloading one image
            # at a time we serialise rather than thrashing, which leads to a
            # lower average time-to-complete during overload situations, and
            # a (relatively) insignificant delay for compute servers which
            # have sufficient IOPS to handle multiple concurrent image
            # conversions.
            with lockutils.lock('nova-baremetal-cache-images', external=True):
                self.driver.cache_images(
                            context, node, instance,
                            admin_password=admin_password,
                            image_meta=image_meta,
                            injected_files=injected_files,
                            network_info=network_info,
                        )
            self.driver.activate_bootloader(context, node, instance,
                                            network_info=network_info)
            # NOTE(deva): ensure node is really off before we turn it on
            #             fixes bug https://code.launchpad.net/bugs/1178919
            self.power_off(instance, node)
            self.power_on(context, instance, network_info, block_device_info,
                          node)
            _update_state(context, node, instance, baremetal_states.PREPARED)

            self.driver.activate_node(context, node, instance)
            _update_state(context, node, instance, baremetal_states.ACTIVE)
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.error(_("Error deploying instance %(instance)s "
                            "on baremetal node %(node)s.") %
                            {'instance': instance['uuid'],
                             'node': node['uuid']})

                # Do not set instance=None yet. This prevents another
                # spawn() while we are cleaning up.
                _update_state(context, node, instance, baremetal_states.ERROR)

                self.driver.deactivate_node(context, node, instance)
                self.power_off(instance, node)
                self.driver.deactivate_bootloader(context, node, instance)
                self.driver.destroy_images(context, node, instance)

                self._detach_block_devices(instance, block_device_info)
                self._stop_firewall(instance, network_info)
                self._unplug_vifs(instance, network_info)

                _update_state(context, node, None, baremetal_states.DELETED)
        else:
            # We no longer need the image since we successfully deployed.
            self.driver.destroy_images(context, node, instance)
예제 #31
0
파일: api.py 프로젝트: stuartbyma/nova
    def allocate_for_instance(self, context, instance, **kwargs):
        """Allocate all network resources for the instance."""
        quantum = quantumv2.get_client(context)
        LOG.debug(_('allocate_for_instance() for %s'),
                  instance['display_name'])
        if not instance['project_id']:
            msg = _('empty project id for instance %s')
            raise exception.InvalidInput(
                reason=msg % instance['display_name'])
        requested_networks = kwargs.get('requested_networks')
        ports = {}
        fixed_ips = {}
        net_ids = []
        if requested_networks:
            for network_id, fixed_ip, port_id in requested_networks:
                if port_id:
                    port = quantum.show_port(port_id).get('port')
                    network_id = port['network_id']
                    ports[network_id] = port
                elif fixed_ip:
                    fixed_ips[network_id] = fixed_ip
                net_ids.append(network_id)

        nets = self._get_available_networks(context, instance['project_id'],
                                            net_ids)

        touched_port_ids = []
        created_port_ids = []
        for network in nets:
            network_id = network['id']
            zone = 'compute:%s' % FLAGS.node_availability_zone
            port_req_body = {'port': {'device_id': instance['uuid'],
                                      'device_owner': zone}}
            try:
                port = ports.get(network_id)
                if port:
                    quantum.update_port(port['id'], port_req_body)
                    touched_port_ids.append(port['id'])
                else:
                    if fixed_ips.get(network_id):
                        port_req_body['port']['fixed_ip'] = fixed_ip
                    port_req_body['port']['network_id'] = network_id
                    port_req_body['port']['admin_state_up'] = True
                    port_req_body['port']['tenant_id'] = instance['project_id']
                    created_port_ids.append(
                        quantum.create_port(port_req_body)['port']['id'])
            except Exception:
                with excutils.save_and_reraise_exception():
                    for port_id in touched_port_ids:
                        port_in_server = quantum.show_port(port_id).get('port')
                        if not port_in_server:
                            raise Exception('Port have already lost')
                        port_req_body = {'port': {'device_id': None}}
                        quantum.update_port(port_id, port_req_body)

                    for port_id in created_port_ids:
                        try:
                            quantum.delete_port(port_id)
                        except Exception as ex:
                            msg = _("Fail to delete port %(portid)s with"
                                    " failure: %(exception)s")
                            LOG.debug(msg, {'portid': port_id,
                                            'exception': ex})

        self.trigger_security_group_members_refresh(context, instance)

        return self.get_instance_nw_info(context, instance, networks=nets)
예제 #32
0
    def spawn(self,
              context,
              instance,
              image_meta,
              injected_files,
              admin_password,
              network_info=None,
              block_device_info=None):
        """
        Create a new instance/VM/domain on the virtualization platform.

        Once this successfully completes, the instance should be
        running (power_state.RUNNING).

        If this fails, any partial instance should be completely
        cleaned up, and the virtualization platform should be in the state
        that it was before this call began.

        :param context: security context
        :param instance: Instance object as returned by DB layer.
                         This function should use the data there to guide
                         the creation of the new instance.
        :param image_meta: image object returned by nova.image.glance that
                           defines the image from which to boot this instance
        :param injected_files: User files to inject into instance.
        :param admin_password: Administrator password to set in instance.
        :param network_info:
           :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
        :param block_device_info: Information about block devices to be
                                  attached to the instance.
        """
        #import pdb
        #pdb.set_trace()
        node_uuid = self._require_node(instance)
        node = db.bm_node_associate_and_update(
            context, node_uuid, {
                'instance_uuid': instance['uuid'],
                'instance_name': instance['hostname'],
                'task_state': baremetal_states.BUILDING
            })

        try:
            self._plug_vifs(instance, network_info, context=context)
            self._attach_block_devices(instance, block_device_info)
            self._start_firewall(instance, network_info)

            macs = self.macs_for_instance(instance)
            nodename = self.xcat.get_xcat_node_name(macs)
            imagename = self._get_xCAT_image_name(image_meta)
            hostname = instance.get('hostname')

            #get the network information for the new node
            interfaces = bm_utils.map_network_interfaces(
                network_info, CONF.use_ipv6)
            if CONF.use_ipv6:
                fixed_ip = interfaces[0].get('address_v6')
                netmask = interfaces[0].get('netmask_v6')
                gateway = interfaces[0].get('gateway_v6')
            else:
                fixed_ip = interfaces[0].get('address')
                netmask = interfaces[0].get('netmask')
                gateway = interfaces[0].get('gateway')
            #convert netmask from IPAddress to unicode string
            if netmask:
                netmask = unicode(netmask)

            #let xCAT install it
            bm_driver._update_state(context, node, instance,
                                    baremetal_states.DEPLOYING)
            self.xcat.deploy_node(nodename, imagename, hostname, fixed_ip,
                                  netmask, gateway)
            bm_driver._update_state(context, node, instance,
                                    baremetal_states.ACTIVE)
        except Exception as e:
            with excutils.save_and_reraise_exception():
                LOG.error(
                    _("Error occured while deploying instance %(instance)s "
                      "on baremetal node %(node)s: %(error)s") % {
                          'instance': instance['uuid'],
                          'node': node['uuid'],
                          'error': str(e)
                      })
                bm_driver._update_state(context, node, instance,
                                        baremetal_states.ERROR)
예제 #33
0
    def spawn(self,
              context,
              instance,
              image_meta,
              injected_files,
              admin_password,
              network_info=None,
              block_device_info=None):
        # The compute manager is meant to know the node uuid, so missing uuid
        # is a significant issue. It may mean we've been passed the wrong data.
        node_uuid = instance.get('node')
        if not node_uuid:
            raise exception.NovaException(
                _("Ironic node uuid not supplied to "
                  "driver for instance %s.") % instance['uuid'])
        icli = client_wrapper.IronicClientWrapper()
        node = icli.call("node.get", node_uuid)

        flavor = flavor_obj.Flavor.get_by_id(context,
                                             instance['instance_type_id'])
        self._add_driver_fields(node, instance, image_meta, flavor)

        #validate we ready to do the deploy
        validate_chk = icli.call("node.validate", node_uuid)
        if not validate_chk.deploy or not validate_chk.power:
            # something is wrong. undo we we have done
            self._cleanup_deploy(node, instance, network_info)
            raise exception.ValidationError(
                _("Ironic node: %(id)s failed to validate."
                  " (deploy: %(deploy)s, power: %(power)s)") % {
                      'id': node.uuid,
                      'deploy': validate_chk.deploy,
                      'power': validate_chk.power
                  })

        # prepare for the deploy
        try:
            self._plug_vifs(node, instance, network_info)
            self._start_firewall(instance, network_info)
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.error(
                    _("Error preparing deploy for instance %(instance)s "
                      "on baremetal node %(node)s.") % {
                          'instance': instance['uuid'],
                          'node': node_uuid
                      })
                self._cleanup_deploy(node, instance, network_info)

        # trigger the node deploy
        try:
            icli.call("node.set_provision_state", node_uuid,
                      ironic_states.ACTIVE)
        except (
                exception.NovaException,  # Retry failed
                ironic_exception.HTTPInternalServerError,  # Validations
                ironic_exception.HTTPBadRequest) as e:  # Maintenance
            msg = (_("Failed to request Ironic to provision instance "
                     "%(inst)s: %(reason)s") % {
                         'inst': instance['uuid'],
                         'reason': str(e)
                     })
            LOG.error(msg)
            self._cleanup_deploy(node, instance, network_info)
            raise exception.InstanceDeployFailure(msg)

        # wait for the node to be marked as ACTIVE in Ironic
        def _wait_for_active():
            try:
                node = icli.call("node.get_by_instance_uuid", instance['uuid'])
            except ironic_exception.HTTPNotFound:
                raise exception.InstanceNotFound(instance_id=instance['uuid'])

            if node.provision_state == ironic_states.ACTIVE:
                # job is done
                raise loopingcall.LoopingCallDone()

            if node.target_provision_state == ironic_states.DELETED:
                # ironic is trying to delete it now
                raise exception.InstanceNotFound(instance_id=instance['uuid'])

            if node.provision_state == ironic_states.NOSTATE:
                # ironic already deleted it
                raise exception.InstanceNotFound(instance_id=instance['uuid'])

            if node.provision_state == ironic_states.DEPLOYFAIL:
                # ironic failed to deploy
                msg = (_("Failed to provision instance %(inst)s: %(reason)s") %
                       {
                           'inst': instance['uuid'],
                           'reason': node.last_error
                       })
                LOG.error(msg)
                raise exception.InstanceDeployFailure(msg)

        timer = loopingcall.FixedIntervalLoopingCall(_wait_for_active)
        # TODO(lucasagomes): Make the time configurable
        timer.start(interval=10).wait()
예제 #34
0
    def _spawn(self,
               node,
               context,
               instance,
               image_meta,
               injected_files,
               admin_password,
               network_info=None,
               block_device_info=None):
        try:
            self._plug_vifs(instance, network_info, context=context)
            self._attach_block_devices(instance, block_device_info)
            self._start_firewall(instance, network_info)

            self.driver.cache_images(
                context,
                node,
                instance,
                admin_password=admin_password,
                image_meta=image_meta,
                injected_files=injected_files,
                network_info=network_info,
            )
            self.driver.activate_bootloader(context,
                                            node,
                                            instance,
                                            network_info=network_info)
            # NOTE(deva): ensure node is really off before we turn it on
            #             fixes bug https://code.launchpad.net/bugs/1178919
            self.power_off(instance, node)
            self.power_on(context, instance, network_info, block_device_info,
                          node)
            _update_state(context, node, instance, baremetal_states.PREPARED)

            self.driver.activate_node(context, node, instance)
            _update_state(context, node, instance, baremetal_states.ACTIVE)
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.error(
                    _("Error deploying instance %(instance)s "
                      "on baremetal node %(node)s.") % {
                          'instance': instance['uuid'],
                          'node': node['uuid']
                      })

                # Do not set instance=None yet. This prevents another
                # spawn() while we are cleaning up.
                _update_state(context, node, instance, baremetal_states.ERROR)

                self.driver.deactivate_node(context, node, instance)
                self.power_off(instance, node)
                self.driver.deactivate_bootloader(context, node, instance)
                self.driver.destroy_images(context, node, instance)

                self._detach_block_devices(instance, block_device_info)
                self._stop_firewall(instance, network_info)
                self._unplug_vifs(instance, network_info)

                _update_state(context, node, None, baremetal_states.DELETED)
        else:
            # We no longer need the image since we successfully deployed.
            self.driver.destroy_images(context, node, instance)
예제 #35
0
class PowerVMOperator(object):
    """PowerVM main operator.

    The PowerVMOperator is intended to wrap all operations
    from the driver and handle either IVM or HMC managed systems.
    """
    def __init__(self):
        self._operator = get_powervm_operator()
        self._disk_adapter = get_powervm_disk_adapter()
        self._host_stats = {}
        self._update_host_stats()

    def get_info(self, instance_name):
        """Get the current status of an LPAR instance.

        Returns a dict containing:

        :state:           the running state, one of the power_state codes
        :max_mem:         (int) the maximum memory in KBytes allowed
        :mem:             (int) the memory in KBytes used by the domain
        :num_cpu:         (int) the number of virtual CPUs for the domain
        :cpu_time:        (int) the CPU time used in nanoseconds

        :raises: PowerVMLPARInstanceNotFound
        """
        lpar_instance = self._get_instance(instance_name)

        state = constants.POWERVM_POWER_STATE.get(lpar_instance['state'],
                                                  power_state.NOSTATE)
        return {
            'state': state,
            'max_mem': lpar_instance['max_mem'],
            'mem': lpar_instance['desired_mem'],
            'num_cpu': lpar_instance['max_procs'],
            'cpu_time': lpar_instance['uptime']
        }

    def instance_exists(self, instance_name):
        lpar_instance = self._operator.get_lpar(instance_name)
        return True if lpar_instance else False

    def _get_instance(self, instance_name):
        """Check whether or not the LPAR instance exists and return it."""
        lpar_instance = self._operator.get_lpar(instance_name)

        if lpar_instance is None:
            LOG.error(_("LPAR instance '%s' not found") % instance_name)
            raise exception.PowerVMLPARInstanceNotFound(
                instance_name=instance_name)
        return lpar_instance

    def list_instances(self):
        """
        Return the names of all the instances known to the virtualization
        layer, as a list.
        """
        lpar_instances = self._operator.list_lpar_instances()
        return lpar_instances

    def get_available_resource(self):
        """Retrieve resource info.

        :returns: dictionary containing resource info
        """
        data = self.get_host_stats()
        # Memory data is in MB already.
        memory_mb_used = data['host_memory_total'] - data['host_memory_free']

        # Convert to GB
        local_gb = data['disk_total'] / 1024
        local_gb_used = data['disk_used'] / 1024

        dic = {
            'vcpus': data['vcpus'],
            'memory_mb': data['host_memory_total'],
            'local_gb': local_gb,
            'vcpus_used': data['vcpus_used'],
            'memory_mb_used': memory_mb_used,
            'local_gb_used': local_gb_used,
            'hypervisor_type': data['hypervisor_type'],
            'hypervisor_version': data['hypervisor_version'],
            'hypervisor_hostname': self._operator.get_hostname(),
            'cpu_info': ','.join(data['cpu_info']),
            'disk_available_least': data['disk_total']
        }
        return dic

    def get_host_stats(self, refresh=False):
        """Return currently known host stats."""
        if refresh:
            self._update_host_stats()
        return self._host_stats

    def _update_host_stats(self):
        memory_info = self._operator.get_memory_info()
        cpu_info = self._operator.get_cpu_info()

        # Note: disk avail information is not accurate. The value
        # is a sum of all Volume Groups and the result cannot
        # represent the real possibility. Example: consider two
        # VGs both 10G, the avail disk will be 20G however,
        # a 15G image does not fit in any VG. This can be improved
        # later on.
        disk_info = self._operator.get_disk_info()

        data = {}
        data['vcpus'] = cpu_info['total_procs']
        data['vcpus_used'] = cpu_info['total_procs'] - cpu_info['avail_procs']
        data['cpu_info'] = constants.POWERVM_CPU_INFO
        data['disk_total'] = disk_info['disk_total']
        data['disk_used'] = disk_info['disk_used']
        data['disk_available'] = disk_info['disk_avail']
        data['host_memory_total'] = memory_info['total_mem']
        data['host_memory_free'] = memory_info['avail_mem']
        data['hypervisor_type'] = constants.POWERVM_HYPERVISOR_TYPE
        data['hypervisor_version'] = constants.POWERVM_HYPERVISOR_VERSION
        data['hypervisor_hostname'] = self._operator.get_hostname()
        data['supported_instances'] = constants.POWERVM_SUPPORTED_INSTANCES
        data['extres'] = ''

        self._host_stats = data

    def spawn(self, context, instance, image_id, network_info):
        def _create_image(context, instance, image_id):
            """Fetch image from glance and copy it to the remote system."""
            try:
                root_volume = self._disk_adapter.create_volume_from_image(
                    context, instance, image_id)

                self._disk_adapter.attach_volume_to_host(root_volume)

                lpar_id = self._operator.get_lpar(instance['name'])['lpar_id']
                vhost = self._operator.get_vhost_by_instance_id(lpar_id)
                self._operator.attach_disk_to_vhost(root_volume['device_name'],
                                                    vhost)
            except Exception, e:
                LOG.exception(_("PowerVM image creation failed: %s") % str(e))
                raise exception.PowerVMImageCreationFailed()

        spawn_start = time.time()

        try:
            try:
                host_stats = self.get_host_stats(refresh=True)
                lpar_inst = self._create_lpar_instance(instance, network_info,
                                                       host_stats)
                #TODO(mjfork) capture the error and handle the error when the
                #             MAC prefix already exists on the
                #             system (1 in 2^28)
                self._operator.create_lpar(lpar_inst)
                LOG.debug(_("Creating LPAR instance '%s'") % instance['name'])
            except nova_exception.ProcessExecutionError:
                LOG.exception(
                    _("LPAR instance '%s' creation failed") % instance['name'])
                raise exception.PowerVMLPARCreationFailed()

            _create_image(context, instance, image_id)
            LOG.debug(
                _("Activating the LPAR instance '%s'") % instance['name'])
            self._operator.start_lpar(instance['name'])

            # TODO(mrodden): probably do this a better way
            #                that actually relies on the time module
            #                and nonblocking threading
            # Wait for boot
            timeout_count = range(10)
            while timeout_count:
                state = self.get_info(instance['name'])['state']
                if state == power_state.RUNNING:
                    LOG.info(_("Instance spawned successfully."),
                             instance=instance)
                    break
                timeout_count.pop()
                if len(timeout_count) == 0:
                    LOG.error(
                        _("Instance '%s' failed to boot") % instance['name'])
                    self._cleanup(instance['name'])
                    break
                time.sleep(1)

        except exception.PowerVMImageCreationFailed:
            with excutils.save_and_reraise_exception():
                # log errors in cleanup
                try:
                    self._cleanup(instance['name'])
                except Exception:
                    LOG.exception(
                        _('Error while attempting to '
                          'clean up failed instance launch.'))

        spawn_time = time.time() - spawn_start
        LOG.info(_("Instance spawned in %s seconds") % spawn_time,
                 instance=instance)
예제 #36
0
    def destroy(self,
                instance,
                network_info,
                block_device_info=None,
                context=None):
        """Destroy (shutdown and delete) the specified instance.

        If the instance is not found (for example if networking failed), this
        function should still succeed.  It's probably a good idea to log a
        warning in that case.

        :param context: security context
        :param instance: Instance object as returned by DB layer.
        :param network_info:
           :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
        :param block_device_info: Information about block devices that should
                                  be detached from the instance.
        :param destroy_disks: Indicates if disks should be destroyed
        """
        #import pdb
        #pdb.set_trace()
        context = nova_context.get_admin_context()
        try:
            node = bm_driver._get_baremetal_node_by_instance_uuid(
                instance['uuid'])

        except exception.InstanceNotFound:
            LOG.warning(
                _("Destroy function called on a non-existing instance %s") %
                instance['uuid'])
            return

        try:
            macs = self.macs_for_instance(instance)
            nodename = self.xcat.get_xcat_node_name(macs)
            interfaces = bm_utils.map_network_interfaces(
                network_info, CONF.use_ipv6)
            fixed_ip = None
            if interfaces and interfaces[0]:
                if CONF.use_ipv6:
                    fixed_ip = interfaces[0].get('address_v6')
                else:
                    fixed_ip = interfaces[0].get('address')
            if fixed_ip:
                self.xcat.cleanup_node(nodename, fixed_ip)
            else:
                self.xcat.cleanup_node(nodename)
        except Exception as e:
            #just log it and move on
            LOG.warning(_("Destroy called with xCAT error:" + str(e)))

        try:
            self._detach_block_devices(instance, block_device_info)
            self._stop_firewall(instance, network_info)
            self._unplug_vifs(instance, network_info)

            bm_driver._update_state(context, node, None,
                                    baremetal_states.DELETED)
        except Exception as e:
            with excutils.save_and_reraise_exception():
                LOG.error(
                    _("Error occurred while destroying instance %s: %s") %
                    (instance['uuid'], str(e)))
                bm_driver._update_state(context, node, instance,
                                        baremetal_states.ERROR)
예제 #37
0
    def unshelve_instance(self, context, instance):
        sys_meta = instance.system_metadata

        if instance.vm_state == vm_states.SHELVED:
            instance.task_state = task_states.POWERING_ON
            instance.save(expected_task_state=task_states.UNSHELVING)
            self.compute_rpcapi.start_instance(context, instance)
            snapshot_id = sys_meta.get('shelved_image_id')
            if snapshot_id:
                self._delete_image(context, snapshot_id)
        elif instance.vm_state == vm_states.SHELVED_OFFLOADED:
            try:
                with compute_utils.EventReporter(context, self.db,
                                                 'get_image_info',
                                                 instance.uuid):
                    image = self._get_image(context,
                                            sys_meta['shelved_image_id'])
            except exception.ImageNotFound:
                with excutils.save_and_reraise_exception():
                    LOG.error(_('Unshelve attempted but vm_state not SHELVED '
                                'or SHELVED_OFFLOADED'),
                              instance=instance)
                    instance.vm_state = vm_states.ERROR
                    instance.save()

            try:
                with compute_utils.EventReporter(context, self.db,
                                                 'schedule_instances',
                                                 instance.uuid):
                    filter_properties = {}
                    hosts = self._schedule_instances(context, image,
                                                     filter_properties,
                                                     instance)
                    host_state = hosts[0]
                    scheduler_utils.populate_filter_properties(
                        filter_properties, host_state)
                    (host, node) = (host_state['host'], host_state['nodename'])
                    self.compute_rpcapi.unshelve_instance(
                        context,
                        instance,
                        host,
                        image=image,
                        filter_properties=filter_properties,
                        node=node)
            except exception.NoValidHost as ex:
                instance.task_state = None
                instance.save()
                LOG.warning(_("No valid host found for unshelve instance"),
                            instance=instance)
                return
        else:
            LOG.error(_('Unshelve attempted but vm_state not SHELVED or '
                        'SHELVED_OFFLOADED'),
                      instance=instance)
            instance.vm_state = vm_states.ERROR
            instance.save()
            return

        for key in ['shelved_at', 'shelved_image_id', 'shelved_host']:
            if key in sys_meta:
                del (sys_meta[key])
        instance.system_metadata = sys_meta
        instance.save()
예제 #38
0
class SchedulerManager(manager.Manager):
    """Chooses a host to run instances on."""

    RPC_API_VERSION = '1.0'

    def __init__(self, scheduler_driver=None, *args, **kwargs):
        if not scheduler_driver:
            scheduler_driver = FLAGS.scheduler_driver
        self.driver = importutils.import_object(scheduler_driver)
        super(SchedulerManager, self).__init__(*args, **kwargs)

    def __getattr__(self, key):
        """Converts all method calls to use the schedule method"""
        # NOTE(russellb) Because of what this is doing, we must be careful
        # when changing the API of the scheduler drivers, as that changes
        # the rpc API as well, and the version should be updated accordingly.
        return functools.partial(self._schedule, key)

    def get_host_list(self, context):
        """Get a list of hosts from the HostManager."""
        return self.driver.get_host_list()

    def get_service_capabilities(self, context):
        """Get the normalized set of capabilities for this zone."""
        return self.driver.get_service_capabilities()

    def update_service_capabilities(self,
                                    context,
                                    service_name=None,
                                    host=None,
                                    capabilities=None,
                                    **kwargs):
        """Process a capability update from a service node."""
        if capabilities is None:
            capabilities = {}
        self.driver.update_service_capabilities(service_name, host,
                                                capabilities)

    def _schedule(self, method, context, topic, *args, **kwargs):
        """Tries to call schedule_* method on the driver to retrieve host.
        Falls back to schedule(context, topic) if method doesn't exist.
        """
        driver_method_name = 'schedule_%s' % method
        try:
            driver_method = getattr(self.driver, driver_method_name)
            args = (context, ) + args
        except AttributeError, e:
            LOG.warning(
                _("Driver Method %(driver_method_name)s missing: "
                  "%(e)s. Reverting to schedule()") % locals())
            driver_method = self.driver.schedule
            args = (context, topic, method) + args

        # Scheduler methods are responsible for casting.
        try:
            return driver_method(*args, **kwargs)
        except Exception as ex:
            with excutils.save_and_reraise_exception():
                self._set_vm_state_and_notify(method,
                                              {'vm_state': vm_states.ERROR},
                                              context, ex, *args, **kwargs)
예제 #39
0
    def live_migration(self,
                       context,
                       instance_ref,
                       dest,
                       post_method,
                       recover_method,
                       block_migration=False):
        LOG.debug(_("live_migration called"), instance=instance_ref)
        instance_name = instance_ref["name"]

        try:
            self._check_live_migration_config()

            vm_name = self._vmutils.lookup(self._conn, instance_name)
            if vm_name is None:
                raise exception.InstanceNotFound(instance=instance_name)
            vm = self._conn_v2.Msvm_ComputerSystem(
                ElementName=instance_name)[0]
            vm_settings = vm.associators(
                wmi_association_class='Msvm_SettingsDefineState',
                wmi_result_class='Msvm_VirtualSystemSettingData')[0]

            new_resource_setting_data = []
            sasds = vm_settings.associators(
                wmi_association_class='Msvm_VirtualSystemSettingDataComponent',
                wmi_result_class='Msvm_StorageAllocationSettingData')
            for sasd in sasds:
                if sasd.ResourceType == 31 and \
                    sasd.ResourceSubType == \
                    "Microsoft:Hyper-V:Virtual Hard Disk":
                    #sasd.PoolId = ""
                    new_resource_setting_data.append(sasd.GetText_(1))

            LOG.debug(
                _("Getting live migration networks for remote "
                  "host: %s"), dest)
            _conn_v2_remote = wmi.WMI(moniker='//' + dest +
                                      '/root/virtualization/v2')
            migration_svc_remote = \
                _conn_v2_remote.Msvm_VirtualSystemMigrationService()[0]
            remote_ip_address_list = \
                migration_svc_remote.MigrationServiceListenerIPAddressList

            # VirtualSystemAndStorage
            vsmsd = self._conn_v2.query(
                "select * from "
                "Msvm_VirtualSystemMigrationSettingData "
                "where MigrationType = 32771")[0]
            vsmsd.DestinationIPAddressList = remote_ip_address_list
            migration_setting_data = vsmsd.GetText_(1)

            migration_svc =\
                self._conn_v2.Msvm_VirtualSystemMigrationService()[0]

            LOG.debug(_("Starting live migration for instance: %s"),
                      instance_name)
            (job_path, ret_val) = migration_svc.MigrateVirtualSystemToHost(
                ComputerSystem=vm.path_(),
                DestinationHost=dest,
                MigrationSettingData=migration_setting_data,
                NewResourceSettingData=new_resource_setting_data)
            if ret_val == constants.WMI_JOB_STATUS_STARTED:
                success = self._vmutils.check_job_status(job_path)
            else:
                success = (ret_val == 0)
            if not success:
                raise vmutils.HyperVException(
                    _('Failed to live migrate VM %s') % instance_name)
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.debug(
                    _("Calling live migration recover_method "
                      "for instance: %s"), instance_name)
                recover_method(context, instance_ref, dest, block_migration)

        LOG.debug(_("Calling live migration post_method for instance: %s"),
                  instance_name)
        post_method(context, instance_ref, dest, block_migration)
예제 #40
0
파일: operator.py 프로젝트: whitekid/nova
class PowerVMOperator(object):
    """PowerVM main operator.

    The PowerVMOperator is intented to wrapper all operations
    from the driver and handle either IVM or HMC managed systems.
    """
    def __init__(self):
        self._operator = get_powervm_operator()
        self._host_stats = {}
        self._update_host_stats()

    def get_info(self, instance_name):
        """Get the current status of an LPAR instance.

        Returns a dict containing:

        :state:           the running state, one of the power_state codes
        :max_mem:         (int) the maximum memory in KBytes allowed
        :mem:             (int) the memory in KBytes used by the domain
        :num_cpu:         (int) the number of virtual CPUs for the domain
        :cpu_time:        (int) the CPU time used in nanoseconds

        :raises: PowerVMLPARInstanceNotFound
        """
        lpar_instance = self._get_instance(instance_name)

        state = constants.POWERVM_POWER_STATE.get(lpar_instance['state'],
                                                  power_state.NOSTATE)
        return {
            'state': state,
            'max_mem': lpar_instance['max_mem'],
            'mem': lpar_instance['desired_mem'],
            'num_cpu': lpar_instance['max_procs'],
            'cpu_time': lpar_instance['uptime']
        }

    def instance_exists(self, instance_name):
        lpar_instance = self._operator.get_lpar(instance_name)
        return True if lpar_instance else False

    def _get_instance(self, instance_name):
        """Check whether or not the LPAR instance exists and return it."""
        lpar_instance = self._operator.get_lpar(instance_name)

        if lpar_instance is None:
            LOG.error(_("LPAR instance '%s' not found") % instance_name)
            raise exception.PowerVMLPARInstanceNotFound(
                instance_name=instance_name)
        return lpar_instance

    def list_instances(self):
        """
        Return the names of all the instances known to the virtualization
        layer, as a list.
        """
        lpar_instances = self._operator.list_lpar_instances()
        return lpar_instances

    def get_available_resource(self):
        """Retrieve resource info.

        :returns: dictionary containing resource info
        """
        data = self.get_host_stats()
        # Memory data is in MB already.
        memory_mb_used = data['host_memory_total'] - data['host_memory_free']

        # Convert to GB
        local_gb = data['disk_total'] / 1024
        local_gb_used = data['disk_used'] / 1024

        dic = {
            'vcpus': data['vcpus'],
            'memory_mb': data['host_memory_total'],
            'local_gb': local_gb,
            'vcpus_used': data['vcpus_used'],
            'memory_mb_used': memory_mb_used,
            'local_gb_used': local_gb_used,
            'hypervisor_type': data['hypervisor_type'],
            'hypervisor_version': data['hypervisor_version'],
            'hypervisor_hostname': self._operator.get_hostname(),
            'cpu_info': ','.join(data['cpu_info']),
            'disk_available_least': data['disk_total']
        }
        return dic

    def get_host_stats(self, refresh=False):
        """Return currently known host stats"""
        if refresh:
            self._update_host_stats()
        return self._host_stats

    def _update_host_stats(self):
        memory_info = self._operator.get_memory_info()
        cpu_info = self._operator.get_cpu_info()

        # Note: disk avail information is not accurate. The value
        # is a sum of all Volume Groups and the result cannot
        # represent the real possibility. Example: consider two
        # VGs both 10G, the avail disk will be 20G however,
        # a 15G image does not fit in any VG. This can be improved
        # later on.
        disk_info = self._operator.get_disk_info()

        data = {}
        data['vcpus'] = cpu_info['total_procs']
        data['vcpus_used'] = cpu_info['total_procs'] - cpu_info['avail_procs']
        data['cpu_info'] = constants.POWERVM_CPU_INFO
        data['disk_total'] = disk_info['disk_total']
        data['disk_used'] = disk_info['disk_used']
        data['disk_available'] = disk_info['disk_avail']
        data['host_memory_total'] = memory_info['total_mem']
        data['host_memory_free'] = memory_info['avail_mem']
        data['hypervisor_type'] = constants.POWERVM_HYPERVISOR_TYPE
        data['hypervisor_version'] = constants.POWERVM_HYPERVISOR_VERSION
        data['hypervisor_hostname'] = self._operator.get_hostname()
        data['extres'] = ''

        self._host_stats = data

    def spawn(self, context, instance, image_id):
        def _create_lpar_instance(instance):
            host_stats = self.get_host_stats(refresh=True)
            inst_name = instance['name']

            # CPU/Memory min and max can be configurable. Lets assume
            # some default values for now.

            # Memory
            mem = instance['memory_mb']
            if mem > host_stats['host_memory_free']:
                LOG.error(_('Not enough free memory in the host'))
                raise exception.PowerVMInsufficientFreeMemory(
                    instance_name=instance['name'])
            mem_min = min(mem, constants.POWERVM_MIN_MEM)
            mem_max = mem + constants.POWERVM_MAX_MEM

            # CPU
            cpus = instance['vcpus']
            avail_cpus = host_stats['vcpus'] - host_stats['vcpus_used']
            if cpus > avail_cpus:
                LOG.error(_('Insufficient available CPU on PowerVM'))
                raise exception.PowerVMInsufficientCPU(
                    instance_name=instance['name'])
            cpus_min = min(cpus, constants.POWERVM_MIN_CPUS)
            cpus_max = cpus + constants.POWERVM_MAX_CPUS
            cpus_units_min = decimal.Decimal(cpus_min) / decimal.Decimal(10)
            cpus_units = decimal.Decimal(cpus) / decimal.Decimal(10)

            try:
                # Network
                eth_id = self._operator.get_virtual_eth_adapter_id()

                # LPAR configuration data
                lpar_inst = LPAR.LPAR(name=inst_name,
                                      lpar_env='aixlinux',
                                      min_mem=mem_min,
                                      desired_mem=mem,
                                      max_mem=mem_max,
                                      proc_mode='shared',
                                      sharing_mode='uncap',
                                      min_procs=cpus_min,
                                      desired_procs=cpus,
                                      max_procs=cpus_max,
                                      min_proc_units=cpus_units_min,
                                      desired_proc_units=cpus_units,
                                      max_proc_units=cpus_max,
                                      virtual_eth_adapters='4/0/%s//0/0' %
                                      eth_id)

                LOG.debug(_("Creating LPAR instance '%s'") % instance['name'])
                self._operator.create_lpar(lpar_inst)
            except nova_exception.ProcessExecutionError:
                LOG.exception(
                    _("LPAR instance '%s' creation failed") % instance['name'])
                raise exception.PowerVMLPARCreationFailed()

        def _create_image(context, instance, image_id):
            """Fetch image from glance and copy it to the remote system."""
            try:
                file_name = '.'.join([image_id, 'gz'])
                file_path = os.path.join(CONF.powervm_img_local_path,
                                         file_name)
                LOG.debug(_("Fetching image '%s' from glance") % image_id)
                images.fetch_to_raw(context,
                                    image_id,
                                    file_path,
                                    instance['user_id'],
                                    project_id=instance['project_id'])
                LOG.debug(_("Copying image '%s' to IVM") % file_path)
                remote_path = CONF.powervm_img_remote_path
                remote_file_name, size = self._operator.copy_image_file(
                    file_path, remote_path)
                # Logical volume
                LOG.debug(_("Creating logical volume"))
                lpar_id = self._operator.get_lpar(instance['name'])['lpar_id']
                vhost = self._operator.get_vhost_by_instance_id(lpar_id)
                disk_name = self._operator.create_logical_volume(size)
                self._operator.attach_disk_to_vhost(disk_name, vhost)
                LOG.debug(_("Copying image to the device '%s'") % disk_name)
                self._operator.copy_file_to_device(remote_file_name, disk_name)
            except Exception, e:
                LOG.exception(_("PowerVM image creation failed: %s") % str(e))
                raise exception.PowerVMImageCreationFailed()

        try:
            _create_lpar_instance(instance)
            _create_image(context, instance, image_id)
            LOG.debug(
                _("Activating the LPAR instance '%s'") % instance['name'])
            self._operator.start_lpar(instance['name'])

            # Wait for boot
            timeout_count = range(10)
            while timeout_count:
                state = self.get_info(instance['name'])['state']
                if state == power_state.RUNNING:
                    LOG.info(_("Instance spawned successfully."),
                             instance=instance)
                    break
                timeout_count.pop()
                if len(timeout_count) == 0:
                    LOG.error(
                        _("Instance '%s' failed to boot") % instance['name'])
                    self._cleanup(instance['name'])
                    break
                time.sleep(1)

        except exception.PowerVMImageCreationFailed:
            with excutils.save_and_reraise_exception():
                # log errors in cleanup
                try:
                    self._cleanup(instance['name'])
                except Exception:
                    LOG.exception(
                        _('Error while attempting to '
                          'clean up failed instance launch.'))
예제 #41
0
    def _copy_image_file(self, source_path, remote_path, decompress=False):
        """Copy file to VIOS, decompress it, and return its new size and name.

        :param source_path: source file path
        :param remote_path remote file path
        :param decompress: if True, decompressess the file after copying;
                           if False (default), just copies the file
        """
        # Calculate source image checksum
        source_cksum = self._checksum_local_file(source_path)

        comp_path = os.path.join(remote_path, os.path.basename(source_path))
        if comp_path.endswith(".gz"):
            uncomp_path = os.path.splitext(comp_path)[0]
        else:
            uncomp_path = comp_path
        if not decompress:
            final_path = comp_path
        else:
            final_path = uncomp_path

        # Check whether the image is already on IVM
        output = self.run_vios_command("ls %s" % final_path,
                                       check_exit_code=False)

        # If the image does not exist already
        if not output:
            try:
                # Copy file to IVM
                common.ftp_put_command(self.connection_data, source_path,
                                       remote_path)
            except exception.PowerVMFTPTransferFailed:
                with excutils.save_and_reraise_exception():
                    cmd = "/usr/bin/rm -f %s" % final_path
                    self.run_vios_command_as_root(cmd)

            # Verify image file checksums match
            output = self._md5sum_remote_file(final_path)
            if not output:
                LOG.error(_("Unable to get checksum"))
                # Cleanup inconsistent remote file
                cmd = "/usr/bin/rm -f %s" % final_path
                self.run_vios_command_as_root(cmd)

                raise exception.PowerVMFileTransferFailed(file_path=final_path)
            if source_cksum != output.split(' ')[0]:
                LOG.error(_("Image checksums do not match"))
                # Cleanup inconsistent remote file
                cmd = "/usr/bin/rm -f %s" % final_path
                self.run_vios_command_as_root(cmd)

                raise exception.PowerVMFileTransferFailed(file_path=final_path)

            if decompress:
                # Unzip the image
                cmd = "/usr/bin/gunzip %s" % comp_path
                output = self.run_vios_command_as_root(cmd)

                # Remove existing image file
                cmd = "/usr/bin/rm -f %s.*" % uncomp_path
                output = self.run_vios_command_as_root(cmd)

                # Rename unzipped image
                cmd = "/usr/bin/mv %s %s" % (uncomp_path, final_path)
                output = self.run_vios_command_as_root(cmd)

                # Remove compressed image file
                cmd = "/usr/bin/rm -f %s" % comp_path
                output = self.run_vios_command_as_root(cmd)

        else:
            LOG.debug(_("Image found on host at '%s'") % final_path)

        # Calculate file size in multiples of 512 bytes
        output = self.run_vios_command("ls -o %s|awk '{print $4}'" %
                                       final_path,
                                       check_exit_code=False)
        if output:
            size = int(output[0])
        else:
            LOG.error(_("Uncompressed image file not found"))
            raise exception.PowerVMFileTransferFailed()
        if (size % 512 != 0):
            size = (int(size / 512) + 1) * 512

        return final_path, size
예제 #42
0
파일: api.py 프로젝트: HybridCloud-dew/hws
    def allocate_for_instance(self, context, instance, **kwargs):
        """Allocate network resources for the instance.

        :param context: The request context.
        :param instance: nova.objects.instance.Instance object.
        :param requested_networks: optional value containing
            network_id, fixed_ip, and port_id
        :param security_groups: security groups to allocate for instance
        :param macs: None or a set of MAC addresses that the instance
            should use. macs is supplied by the hypervisor driver (contrast
            with requested_networks which is user supplied).
            NB: NeutronV2 currently assigns hypervisor supplied MAC addresses
            to arbitrary networks, which requires openflow switches to
            function correctly if more than one network is being used with
            the bare metal hypervisor (which is the only one known to limit
            MAC addresses).
        :param dhcp_options: None or a set of key/value pairs that should
            determine the DHCP BOOTP response, eg. for PXE booting an instance
            configured with the baremetal hypervisor. It is expected that these
            are already formatted for the neutron v2 api.
            See nova/virt/driver.py:dhcp_options_for_instance for an example.
        """
        hypervisor_macs = kwargs.get('macs', None)
        available_macs = None
        if hypervisor_macs is not None:
            # Make a copy we can mutate: records macs that have not been used
            # to create a port on a network. If we find a mac with a
            # pre-allocated port we also remove it from this set.
            available_macs = set(hypervisor_macs)
        neutron = neutronv2.get_client(context)
        LOG.debug('allocate_for_instance()', instance=instance)
        if not instance.project_id:
            msg = _('empty project id for instance %s')
            raise exception.InvalidInput(reason=msg % instance.uuid)
        requested_networks = kwargs.get('requested_networks')
        dhcp_opts = kwargs.get('dhcp_options', None)
        ports = {}
        net_ids = []
        ordered_networks = []
        if requested_networks:
            for request in requested_networks:
                if request.port_id:
                    try:
                        port = neutron.show_port(request.port_id)['port']
                    except neutron_client_exc.PortNotFoundClient:
                        raise exception.PortNotFound(port_id=request.port_id)
                    if port['tenant_id'] != instance.project_id:
                        raise exception.PortNotUsable(port_id=request.port_id,
                                                      instance=instance.uuid)
                    if (port.get('device_id')
                            and port.get('device_id') != instance['uuid']):
                        raise exception.PortInUse(port_id=request.port_id)
                    if hypervisor_macs is not None:
                        if port['mac_address'] not in hypervisor_macs:
                            raise exception.PortNotUsable(
                                port_id=request.port_id,
                                instance=instance.uuid)
                        else:
                            # Don't try to use this MAC if we need to create a
                            # port on the fly later. Identical MACs may be
                            # configured by users into multiple ports so we
                            # discard rather than popping.
                            available_macs.discard(port['mac_address'])
                    request.network_id = port['network_id']
                    ports[request.port_id] = port
                if request.network_id:
                    net_ids.append(request.network_id)
                    ordered_networks.append(request)

        nets = self._get_available_networks(context, instance.project_id,
                                            net_ids)
        if not nets:
            LOG.warn(_LW("No network configured!"), instance=instance)
            return network_model.NetworkInfo([])

        # if this function is directly called without a requested_network param
        # or if it is indirectly called through allocate_port_for_instance()
        # with None params=(network_id=None, requested_ip=None, port_id=None,
        # pci_request_id=None):
        if (not requested_networks
                or requested_networks.is_single_unspecified):
            # bug/1267723 - if no network is requested and more
            # than one is available then raise NetworkAmbiguous Exception
            if len(nets) > 1:
                msg = _("Multiple possible networks found, use a Network "
                        "ID to be more specific.")
                raise exception.NetworkAmbiguous(msg)
            ordered_networks.append(
                objects.NetworkRequest(network_id=nets[0]['id']))
            db_req_networks = list()
            db_obj = huawei_instance_extra.HuaweiInstanceExtra(
                instance_uuid=instance.uuid)
            db_instance = db_obj.get_by_instance_uuid(
                context, instance_uuid=instance.uuid)
            if db_instance.request_network:
                db_req_networks = jsonutils.loads(db_instance.request_network)
            db_req_networks.append([nets[0]['id'], None, None])
            db_obj.request_network = jsonutils.dumps(db_req_networks)
            db_obj.create(context)

        # NOTE(): check external net attach permission after the
        #                check for ambiguity, there could be another
        #                available net which is permitted bug/1364344
        self._check_external_network_attach(context, nets)

        security_groups = kwargs.get('security_groups', [])
        security_group_ids = []

        # TODO() Should optimize more to do direct query for security
        # group if len(security_groups) == 1
        if len(security_groups):
            search_opts = {'tenant_id': instance.project_id}
            user_security_groups = neutron.list_security_groups(
                **search_opts).get('security_groups')

        for security_group in security_groups:
            name_match = None
            uuid_match = None
            for user_security_group in user_security_groups:
                if user_security_group['name'] == security_group:
                    if name_match:
                        raise exception.NoUniqueMatch(
                            _("Multiple security groups found matching"
                              " '%s'. Use an ID to be more specific.") %
                            security_group)

                    name_match = user_security_group['id']
                if user_security_group['id'] == security_group:
                    uuid_match = user_security_group['id']

            # If a user names the security group the same as
            # another's security groups uuid, the name takes priority.
            if not name_match and not uuid_match:
                raise exception.SecurityGroupNotFound(
                    security_group_id=security_group)
            elif name_match:
                security_group_ids.append(name_match)
            elif uuid_match:
                security_group_ids.append(uuid_match)

        touched_port_ids = []
        created_port_ids = []
        ports_in_requested_order = []
        nets_in_requested_order = []
        for request in ordered_networks:
            # Network lookup for available network_id
            network = None
            for net in nets:
                if net['id'] == request.network_id:
                    network = net
                    break
            # if network_id did not pass validate_networks() and not available
            # here then skip it safely not continuing with a None Network
            else:
                continue

            nets_in_requested_order.append(network)
            # If security groups are requested on an instance then the
            # network must has a subnet associated with it. Some plugins
            # implement the port-security extension which requires
            # 'port_security_enabled' to be True for security groups.
            # That is why True is returned if 'port_security_enabled'
            # is not found.
            if (security_groups
                    and not (network['subnets']
                             and network.get('port_security_enabled', True))):
                # add for roll back
                self._delete_ports(neutron, instance, created_port_ids)
                raise exception.SecurityGroupCannotBeApplied()
            request.network_id = network['id']
            zone = 'compute:%s' % instance.availability_zone
            port_req_body = {
                'port': {
                    'device_id': instance.uuid,
                    'device_owner': zone
                }
            }
            try:
                self._populate_neutron_extension_values(
                    context, instance, request.pci_request_id, port_req_body)
                # Requires admin creds to set port bindings
                port_client = (neutron
                               if not self._has_port_binding_extension(context)
                               else neutronv2.get_client(context, admin=True))
                if request.port_id:
                    port = ports[request.port_id]
                    port_client.update_port(port['id'], port_req_body)
                    touched_port_ids.append(port['id'])
                    ports_in_requested_order.append(port['id'])
                else:
                    created_port = self._create_port(port_client, instance,
                                                     request.network_id,
                                                     port_req_body,
                                                     request.address,
                                                     security_group_ids,
                                                     available_macs, dhcp_opts)
                    created_port_ids.append(created_port)
                    ports_in_requested_order.append(created_port)
            except Exception:
                with excutils.save_and_reraise_exception():
                    for port_id in touched_port_ids:
                        try:
                            port_req_body = {'port': {'device_id': ''}}
                            # Requires admin creds to set port bindings
                            if self._has_port_binding_extension(context):
                                port_req_body['port']['binding:host_id'] = None
                                port_client = neutronv2.get_client(context,
                                                                   admin=True)
                            else:
                                port_client = neutron
                            port_client.update_port(port_id, port_req_body)
                        except Exception:
                            msg = _LE("Failed to update port %s")
                            LOG.exception(msg, port_id)

                    self._delete_ports(neutron, instance, created_port_ids)

        pci_list = kwargs.get('pci_list', [])
        nw_info = self.get_instance_nw_info(context,
                                            instance,
                                            networks=nets_in_requested_order,
                                            port_ids=ports_in_requested_order,
                                            pci_list=pci_list)
        # NOTE(): Only return info about ports we created in this run.
        # In the initial allocation case, this will be everything we created,
        # and in later runs will only be what was created that time. Thus,
        # this only affects the attach case, not the original use for this
        # method.
        return network_model.NetworkInfo([
            vif for vif in nw_info
            if vif['id'] in created_port_ids + touched_port_ids
        ])
예제 #43
0
    def create_volume(self,
                      context,
                      volume_id,
                      snapshot_id=None,
                      image_id=None,
                      reservations=None):
        """Creates and exports the volume."""
        context = context.elevated()
        volume_ref = self.db.volume_get(context, volume_id)
        self._notify_about_volume_usage(context, volume_ref, "create.start")
        LOG.info(_("volume %s: creating"), volume_ref['name'])

        self.db.volume_update(context, volume_id, {'host': self.host})
        # NOTE(vish): so we don't have to get volume from db again
        #             before passing it to the driver.
        volume_ref['host'] = self.host

        status = 'available'
        model_update = False

        try:
            vol_name = volume_ref['name']
            vol_size = volume_ref['size']
            LOG.debug(
                _("volume %(vol_name)s: creating lv of"
                  " size %(vol_size)sG") % locals())
            if snapshot_id is None and image_id is None:
                model_update = self.driver.create_volume(volume_ref)
            elif snapshot_id is not None:
                snapshot_ref = self.db.snapshot_get(context, snapshot_id)
                model_update = self.driver.create_volume_from_snapshot(
                    volume_ref, snapshot_ref)
            else:
                # create the volume from an image
                image_service, image_id = \
                               glance.get_remote_image_service(context,
                                                               image_id)
                image_location = image_service.get_location(context, image_id)
                cloned = self.driver.clone_image(volume_ref, image_location)
                if not cloned:
                    model_update = self.driver.create_volume(volume_ref)
                    status = 'downloading'

            if model_update:
                self.db.volume_update(context, volume_ref['id'], model_update)

            LOG.debug(_("volume %s: creating export"), volume_ref['name'])
            model_update = self.driver.create_export(context, volume_ref)
            if model_update:
                self.db.volume_update(context, volume_ref['id'], model_update)
        except Exception:
            with excutils.save_and_reraise_exception():
                self.db.volume_update(context, volume_ref['id'],
                                      {'status': 'error'})

        now = timeutils.utcnow()
        volume_ref = self.db.volume_update(context, volume_ref['id'], {
            'status': status,
            'launched_at': now
        })
        LOG.debug(_("volume %s: created successfully"), volume_ref['name'])
        self._reset_stats()
        self._notify_about_volume_usage(context, volume_ref, "create.end")

        if image_id and not cloned:
            #copy the image onto the volume.
            self._copy_image_to_volume(context, volume_ref, image_id)
        return volume_id