def test_power_on_revert(self, mock_pwroff, mock_pwron): flow = tf_lf.Flow('revert_power_on') pwron = tf_vm.PowerOn(self.apt, self.instance) flow.add(pwron) # Dummy Task that fails, triggering flow revert def failure(*a, **k): raise ValueError() flow.add(tf_tsk.FunctorTask(failure)) # When PowerOn.execute doesn't fail, revert calls power_off self.assertRaises(ValueError, tf_eng.run, flow) mock_pwron.assert_called_once_with(self.apt, self.instance) mock_pwroff.assert_called_once_with(self.apt, self.instance, force_immediate=True) mock_pwron.reset_mock() mock_pwroff.reset_mock() # When PowerOn.execute fails, revert doesn't call power_off mock_pwron.side_effect = exception.NovaException() self.assertRaises(exception.NovaException, tf_eng.run, flow) mock_pwron.assert_called_once_with(self.apt, self.instance) mock_pwroff.assert_not_called()
def get_dev_prefix_for_disk_bus(disk_bus): """Determine the dev prefix for a disk bus. Determine the dev prefix to be combined with a disk number to fix a disk_dev. eg 'hd' for 'ide' bus can be used to form a disk dev 'hda' Returns the dev prefix or raises an exception if the disk bus is unknown. """ if CONF.libvirt.disk_prefix: return CONF.libvirt.disk_prefix if disk_bus == "ide": return "hd" elif disk_bus == "virtio": return "vd" elif disk_bus == "xen": # Two possible mappings for Xen, xvda or sda # which are interchangeable, so we pick sda return "sd" elif disk_bus == "scsi": return "sd" elif disk_bus == "usb": return "sd" elif disk_bus == "fdc": return "fd" elif disk_bus == "uml": return "ubd" elif disk_bus == "lxc": return None else: raise exception.NovaException( _("Unable to determine disk prefix for %s") % disk_bus)
def setup_container(image, container_dir): """Setup the LXC container. :param image: instance of nova.virt.image.model.Image :param container_dir: directory to mount the image at It will mount the loopback image to the container directory in order to create the root filesystem for the container. Returns path of image device which is mounted to the container directory. """ img = _DiskImage(image=image, mount_dir=container_dir) dev = img.mount() if dev is None: LOG.error( "Failed to mount container filesystem '%(image)s' " "on '%(target)s': %(errors)s", { "image": img, "target": container_dir, "errors": img.errors }) raise exception.NovaException(img.errors) return dev
def test_rebuild_failures(self, mock_save, mock_get, mock_driver_fields, mock_fg_bid, mock_set_pstate): node_uuid = uuidutils.generate_uuid() instance_uuid = uuidutils.generate_uuid() node = ironic_utils.get_test_node(uuid=node_uuid, instance_uuid=instance_uuid, instance_type_id=5) mock_get.return_value = node image_meta = ironic_utils.get_test_image_meta() flavor_id = 5 flavor = {'id': flavor_id, 'name': 'baremetal'} mock_fg_bid.return_value = flavor instance = fake_instance.fake_instance_obj(self.ctx, uuid=instance_uuid, node=node_uuid, instance_type_id=flavor_id) exceptions = [ exception.NovaException(), ironic_exception.BadRequest(), ironic_exception.InternalServerError(), ] for e in exceptions: mock_set_pstate.side_effect = e self.assertRaises(exception.InstanceDeployFailure, self.driver.rebuild, context=self.ctx, instance=instance, image_meta=image_meta, injected_files=None, admin_password=None, bdms=None, detach_block_devices=None, attach_block_devices=None)
def create_alias(self, alias, instance): """Creates an alias for a given image :param alias: The alias to be crerated :param instance: The nove instance :return: true if alias is created, false otherwise """ LOG.debug('create_alias called for instance', instance=instance) try: client = self.get_session() return client.alias_create(alias) except lxd_exceptions.APIError as ex: msg = _('Failed to communicate with LXD API %(instance)s:' ' %(reason)s') % {'instance': instance.image_ref, 'reason': ex} LOG.error(msg) raise exception.NovaException(msg) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error(_LE('Error from LXD during create alias' '%(instance)s: %(reason)s'), {'instance': instance.image_ref, 'reason': e}, instance=instance)
def container_publish(self, image, instance): """Publish a container to the local LXD image store :param image: LXD fingerprint :param instance: nova instance object :return: True if published, False otherwise """ LOG.debug('container_publish called for instance', instance=instance) try: client = self.get_session() return client.container_publish(image) except lxd_exceptions.APIError as ex: msg = _('Failed to communicate with LXD API %(instance)s:' ' %(reason)s') % {'instance': instance.name, 'reason': ex} raise exception.NovaException(msg) except Exception as ex: with excutils.save_and_reraise_exception(): LOG.error( _LE('Failed to publish container %(instance)s: ' '%(reason)s'), {'instance': instance.name, 'reason': ex}, instance=instance)
def container_destroy(self, instance_name, instance): """Destroy a LXD container :param instance_name: container name :param instance: nova instance object """ LOG.debug('container_destroy for instance', instance=instance) try: if not self.container_defined(instance_name, instance): return LOG.info(_LI('Destroying instance %(instance)s with' ' %(image)s'), {'instance': instance.name, 'image': instance.image_ref}) # Destroying container self.container_stop(instance_name, instance) client = self.get_session() (state, data) = client.container_destroy(instance_name) self.operation_wait(data.get('operation'), instance) LOG.info(_LI('Successfully destroyed instance %(instance)s with' ' %(image)s'), {'instance': instance.name, 'image': instance.image_ref}) except lxd_exceptions.APIError as ex: msg = _('Failed to communicate with LXD API %(instance)s:' ' %(reason)s') % {'instance': instance.name, 'reason': ex} raise exception.NovaException(msg) except Exception as ex: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to destroy container %(instance)s: ' '%(reason)s'), {'instance': instance_name, 'reason': ex})
def container_unpause(self, instance_name, instance): """Unpause a LXD container :param instance_name: container name :param instance: nova instance object """ LOG.debug('container_unpause called for instance', instance=instance) try: if not self.container_defined(instance_name, instance): msg = _('Instance is not found %s') % instance_name raise exception.InstanceNotFound(msg) LOG.info(_LI('Unpausing instance %(instance)s with' ' %(image)s'), {'instance': instance.name, 'image': instance.image_ref}) client = self.get_session() (state, data) = client.container_resume(instance_name, CONF.lxd.timeout) self.operation_wait(data.get('operation'), instance) LOG.info(_LI('Successfully unpaused instance %(instance)s with' ' %(image)s'), {'instance': instance.name, 'image': instance.image_ref}) except lxd_exceptions.APIError as ex: msg = _('Failed to communicate with LXD API %(instance)s:' ' %(reason)s') % {'instance': instance.name, 'reason': ex} raise exception.NovaException(msg) except Exception as ex: with excutils.save_and_reraise_exception(): LOG.error( _LE('Failed to unpause container %(instance)s: ' '%(reason)s'), {'instance': instance_name, 'reason': ex})
def get_tftp_image_info(instance, flavor): """Generate the paths for tftp files for this instance Raises NovaException if - instance does not contain kernel_id or ramdisk_id - deploy_kernel_id or deploy_ramdisk_id can not be read from flavor['extra_specs'] and defaults are not set """ image_info = { 'kernel': [None, None], 'ramdisk': [None, None], 'deploy_kernel': [None, None], 'deploy_ramdisk': [None, None], } try: image_info['kernel'][0] = str(instance['kernel_id']) image_info['ramdisk'][0] = str(instance['ramdisk_id']) image_info['deploy_kernel'][0] = get_deploy_aki_id(flavor) image_info['deploy_ramdisk'][0] = get_deploy_ari_id(flavor) except KeyError: pass missing_labels = [] for label in image_info.keys(): (uuid, path) = image_info[label] if not uuid: missing_labels.append(label) else: image_info[label][1] = os.path.join(CONF.baremetal.tftp_root, instance['uuid'], label) if missing_labels: raise exception.NovaException( _("Can not activate PXE bootloader. The following boot parameters " "were not passed to baremetal driver: %s") % missing_labels) return image_info
def container_defined(self, instance_name, instance): """Determine if the container exists :param instance_name: container anme :param instance: nova instance opbject :return: True if exists otherwise False """ LOG.debug('container_defined for instance', instance=instance) try: client = self.get_session() return client.container_defined(instance_name) except lxd_exceptions.APIError as ex: if ex.status_code == 404: return False else: msg = _('Failed to get container status: %s') % ex raise exception.NovaException(msg) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error(_LE('Error from LXD during container_defined' '%(instance)s: %(reason)s'), {'instance': instance.name, 'reason': e}, instance=instance)
def setup_container(image, container_dir, use_cow=False): """Setup the LXC container. It will mount the loopback image to the container directory in order to create the root filesystem for the container. Returns path of image device which is mounted to the container directory. """ img = _DiskImage(image=image, use_cow=use_cow, mount_dir=container_dir, partition=1) dev = img.mount() if dev is None: LOG.error( _LE("Failed to mount container filesystem '%(image)s' " "on '%(target)s': %(errors)s"), { "image": img, "target": container_dir, "errors": img.errors }) raise exception.NovaException(img.errors) return dev
def install_policy(self, context, policy_ini_string, wait): validated = False faults = [] for host in self._list_cobalt_hosts(context): queue = rpc.queue_get_for(context, CONF.cobalt_topic, host) args = { "method": "install_policy", "args": { "policy_ini_string": policy_ini_string }, } if (not validated) or wait: try: rpc.call(context, queue, args) validated = True except Exception, ex: faults.append((host, str(ex))) if not wait: raise exception.NovaException( _("Failed to install policy on host %s: %s" % \ (host, str(ex)))) else: rpc.cast(context, queue, args)
def _remove_destroy(self, name, project): """Remove the LUN from the dataset, also destroying it. Remove the LUN from the dataset and destroy the actual LUN on the storage system. """ lun = self._lookup_lun_for_volume(name, project) member = self.client.factory.create('DatasetMemberParameter') member.ObjectNameOrId = lun.id members = self.client.factory.create('ArrayOfDatasetMemberParameter') members.DatasetMemberParameter = [member] server = self.client.service lock_id = server.DatasetEditBegin(DatasetNameOrId=lun.dataset.id) try: server.DatasetRemoveMember(EditLockId=lock_id, Destroy=True, DatasetMemberParameters=members) server.DatasetEditCommit(EditLockId=lock_id, AssumeConfirmation=True) except (suds.WebFault, Exception): server.DatasetEditRollback(EditLockId=lock_id) msg = _('Failed to remove and delete dataset member') raise exception.NovaException(msg)
def _parse_pci_device_string(dev_string): """ Exctract information from the device string about the slot, the vendor and the product ID. The string is as follow: "Slot:\tBDF\nClass:\txxxx\nVendor:\txxxx\nDevice:\txxxx\n..." Return a dictionary with informations about the device. """ slot_regex = _compile_hex(r"Slot:\t" r"((?:hex{4}:)?" # Domain: (optional) r"hex{2}:" # Bus: r"hex{2}\." # Device. r"hex{1})") # Function vendor_regex = _compile_hex(r"\nVendor:\t(hex+)") product_regex = _compile_hex(r"\nDevice:\t(hex+)") slot_id = slot_regex.findall(dev_string) vendor_id = vendor_regex.findall(dev_string) product_id = product_regex.findall(dev_string) if not slot_id or not vendor_id or not product_id: raise exception.NovaException( _("Failed to parse information about" " a pci device for passthrough")) type_pci = self._session.call_plugin_serialized( 'xenhost', 'get_pci_type', slot_id[0]) return { 'label': '_'.join(['label', vendor_id[0], product_id[0]]), 'vendor_id': vendor_id[0], 'product_id': product_id[0], 'address': slot_id[0], 'dev_id': '_'.join(['pci', slot_id[0]]), 'dev_type': type_pci, 'status': 'available' }
def _plug_vifs(self, node, instance, network_info): # NOTE(PhilDay): Accessing network_info will block if the thread # it wraps hasn't finished, so do this ahead of time so that we # don't block while holding the logging lock. network_info_str = str(network_info) LOG.debug("plug: instance_uuid=%(uuid)s vif=%(network_info)s", { 'uuid': instance['uuid'], 'network_info': network_info_str }) # start by ensuring the ports are clear self._unplug_vifs(node, instance, network_info) icli = client_wrapper.IronicClientWrapper() ports = icli.call("node.list_ports", node.uuid) if len(network_info) > len(ports): raise exception.NovaException( _("Ironic node: %(id)s virtual to physical interface count" " missmatch" " (Vif count: %(vif_count)d, Pif count: %(pif_count)d)") % { 'id': node.uuid, 'vif_count': len(network_info), 'pif_count': len(ports) }) if len(network_info) > 0: # not needed if no vif are defined for vif, pif in zip(network_info, ports): # attach what neutron needs directly to the port port_id = unicode(vif['id']) patch = [{ 'op': 'add', 'path': '/extra/vif_port_id', 'value': port_id }] icli.call("port.update", pif.uuid, patch)
def inject_data(image, key=None, net=None, metadata=None, admin_password=None, partition=None, use_cow=False): """Injects a ssh key and optionally net data into a disk image. it will mount the image as a fully partitioned disk and attempt to inject into the specified partition number. If partition is not specified it mounts the image as a single partition. """ img = _DiskImage(image=image, partition=partition, use_cow=use_cow) if img.mount(): try: inject_data_into_fs(img.mount_dir, key, net, metadata, admin_password, utils.execute) finally: img.umount() else: raise exception.NovaException(img.errors)
def connect_volume(self, connection_info, disk_info): """Attach the volume to instance_name.""" fc_properties = connection_info['data'] mount_device = disk_info["dev"] ports = fc_properties['target_wwn'] wwns = [] # we support a list of wwns or a single wwn if isinstance(ports, list): for wwn in ports: wwns.append(wwn) elif isinstance(ports, str): wwns.append(ports) # We need to look for wwns on every hba # because we don't know ahead of time # where they will show up. hbas = virtutils.get_fc_hbas_info() host_devices = [] for hba in hbas: pci_num = self._get_pci_num(hba) if pci_num is not None: for wwn in wwns: target_wwn = "0x%s" % wwn.lower() host_device = ("/dev/disk/by-path/pci-%s-fc-%s-lun-%s" % (pci_num, target_wwn, fc_properties.get('target_lun', 0))) host_devices.append(host_device) if len(host_devices) == 0: # this is empty because we don't have any FC HBAs msg = _("We are unable to locate any Fibre Channel devices") raise exception.NovaException(msg) # The /dev/disk/by-path/... node is not always present immediately # We only need to find the first device. Once we see the first device # multipath will have any others. def _wait_for_device_discovery(host_devices, mount_device): tries = self.tries for device in host_devices: LOG.debug(_("Looking for Fibre Channel dev %(device)s"), {'device': device}) if os.path.exists(device): self.host_device = device # get the /dev/sdX device. This is used # to find the multipath device. self.device_name = os.path.realpath(device) raise loopingcall.LoopingCallDone() if self.tries >= CONF.num_iscsi_scan_tries: msg = _("Fibre Channel device not found.") raise exception.NovaException(msg) LOG.warn( _("Fibre volume not yet found at: %(mount_device)s. " "Will rescan & retry. Try number: %(tries)s"), { 'mount_device': mount_device, 'tries': tries }) linuxscsi.rescan_hosts(hbas) self.tries = self.tries + 1 self.host_device = None self.device_name = None self.tries = 0 timer = loopingcall.FixedIntervalLoopingCall( _wait_for_device_discovery, host_devices, mount_device) timer.start(interval=2).wait() tries = self.tries if self.host_device is not None and self.device_name is not None: LOG.debug( _("Found Fibre Channel volume %(mount_device)s " "(after %(tries)s rescans)"), { 'mount_device': mount_device, 'tries': tries }) # see if the new drive is part of a multipath # device. If so, we'll use the multipath device. mdev_info = linuxscsi.find_multipath_device(self.device_name) if mdev_info is not None: LOG.debug( _("Multipath device discovered %(device)s") % {'device': mdev_info['device']}) device_path = mdev_info['device'] connection_info['data']['devices'] = mdev_info['devices'] connection_info['data']['multipath_id'] = mdev_info['id'] else: # we didn't find a multipath device. # so we assume the kernel only sees 1 device device_path = self.host_device device_info = linuxscsi.get_device_info(self.device_name) connection_info['data']['devices'] = [device_info] conf = super(LibvirtFibreChannelVolumeDriver, self).connect_volume(connection_info, disk_info) conf.source_type = "block" conf.source_path = device_path return conf
def legacy(self): """ Return the legacy network_info representation of self """ def get_ip(ip): if not ip: return None return ip['address'] def fixed_ip_dict(ip, subnet): netmask = get_netmask(ip, subnet) return { 'ip': ip['address'], 'enabled': '1', 'netmask': netmask, 'gateway': get_ip(subnet['gateway']) } def convert_routes(routes): routes_list = [] for route in routes: r = { 'route': str(netaddr.IPNetwork(route['cidr']).network), 'netmask': str(netaddr.IPNetwork(route['cidr']).netmask), 'gateway': get_ip(route['gateway']) } routes_list.append(r) return routes_list network_info = [] for vif in self: # if vif doesn't have network or that network has no subnets, quit if not vif['network'] or not vif['network']['subnets']: continue network = vif['network'] # NOTE(jkoelker) The legacy format only supports one subnet per # network, so we only use the 1st one of each type # NOTE(tr3buchet): o.O v4_subnets = [] v6_subnets = [] for subnet in vif['network']['subnets']: if subnet['version'] == 4: v4_subnets.append(subnet) else: v6_subnets.append(subnet) subnet_v4 = None subnet_v6 = None if v4_subnets: subnet_v4 = v4_subnets[0] if v6_subnets: subnet_v6 = v6_subnets[0] if not subnet_v4: msg = _('v4 subnets are required for legacy nw_info') raise exception.NovaException(message=msg) routes = convert_routes(subnet_v4['routes']) should_create_bridge = network.get_meta('should_create_bridge', False) should_create_vlan = network.get_meta('should_create_vlan', False) gateway = get_ip(subnet_v4['gateway']) dhcp_server = subnet_v4.get_meta('dhcp_server', gateway) network_dict = { 'bridge': network['bridge'], 'id': network['id'], 'cidr': subnet_v4['cidr'], 'cidr_v6': subnet_v6['cidr'] if subnet_v6 else None, 'vlan': network.get_meta('vlan'), 'injected': network.get_meta('injected', False), 'multi_host': network.get_meta('multi_host', False), 'bridge_interface': network.get_meta('bridge_interface') } # NOTE(tr3buchet): 'ips' bit here is tricky, we support a single # subnet but we want all the IPs to be there # so use the v4_subnets[0] and its IPs are first # so that eth0 will be from subnet_v4, the rest of # the IPs will be aliased eth0:1 etc and the # gateways from their subnets will not be used info_dict = { 'label': network['label'], 'broadcast': str(subnet_v4.as_netaddr().broadcast), 'mac': vif['address'], 'vif_type': vif['type'], 'vif_devname': vif.get('devname'), 'vif_uuid': vif['id'], 'ovs_interfaceid': vif.get('ovs_interfaceid'), 'qbh_params': vif.get('qbh_params'), 'qbg_params': vif.get('qbg_params'), 'rxtx_cap': vif.get_meta('rxtx_cap', 0), 'dns': [get_ip(ip) for ip in subnet_v4['dns']], 'gateway': gateway, 'ips': [ fixed_ip_dict(ip, subnet) for subnet in v4_subnets for ip in subnet['ips'] ], 'should_create_bridge': should_create_bridge, 'should_create_vlan': should_create_vlan, 'dhcp_server': dhcp_server } if routes: info_dict['routes'] = routes if v6_subnets: info_dict['gateway_v6'] = get_ip(subnet_v6['gateway']) # NOTE(tr3buchet): only supporting single v6 subnet here info_dict['ip6s'] = [ fixed_ip_dict(ip, subnet_v6) for ip in subnet_v6['ips'] ] network_info.append((network_dict, info_dict)) return network_info
def connect_volume(self, connection_info, disk_info): """Attach the volume to instance_name.""" conf = super(LibvirtISCSIVolumeDriver, self).connect_volume(connection_info, disk_info) iscsi_properties = connection_info['data'] libvirt_iscsi_use_multipath = CONF.libvirt_iscsi_use_multipath if libvirt_iscsi_use_multipath: #multipath installed, discovering other targets if available #multipath should be configured on the nova-compute node, #in order to fit storage vendor out = self._run_iscsiadm_bare(['-m', 'discovery', '-t', 'sendtargets', '-p', iscsi_properties['target_portal']], check_exit_code=[0, 255])[0] \ or "" for ip in self._get_target_portals_from_iscsiadm_output(out): props = iscsi_properties.copy() props['target_portal'] = ip self._connect_to_iscsi_portal(props) self._rescan_iscsi() else: self._connect_to_iscsi_portal(iscsi_properties) host_device = ( "/dev/disk/by-path/ip-%s-iscsi-%s-lun-%s" % (iscsi_properties['target_portal'], iscsi_properties['target_iqn'], iscsi_properties.get('target_lun', 0))) # The /dev/disk/by-path/... node is not always present immediately # TODO(justinsb): This retry-with-delay is a pattern, move to utils? tries = 0 disk_dev = disk_info['dev'] while not os.path.exists(host_device): if tries >= CONF.num_iscsi_scan_tries: raise exception.NovaException( _("iSCSI device not found at %s") % (host_device)) LOG.warn( _("ISCSI volume not yet found at: %(disk_dev)s. " "Will rescan & retry. Try number: %(tries)s"), { 'disk_dev': disk_dev, 'tries': tries }) # The rescan isn't documented as being necessary(?), but it helps self._run_iscsiadm(iscsi_properties, ("--rescan", )) tries = tries + 1 if not os.path.exists(host_device): time.sleep(tries**2) if tries != 0: LOG.debug( _("Found iSCSI node %(disk_dev)s " "(after %(tries)s rescans)"), { 'disk_dev': disk_dev, 'tries': tries }) if libvirt_iscsi_use_multipath: #we use the multipath device instead of the single path device self._rescan_multipath() multipath_device = self._get_multipath_device_name(host_device) if multipath_device is not None: host_device = multipath_device conf.source_type = "block" conf.source_path = host_device return conf
def _connect_auth_cb(creds, opaque): if len(creds) == 0: return 0 raise exception.NovaException( _("Can not handle authentication request for %d credentials") % len(creds))
def test_force_complete_unexpected_error(self): self._test_force_complete_failed_with_exception( exception.NovaException(), webob.exc.HTTPInternalServerError)
def _get_default_deleted_value(table): if isinstance(table.c.id.type, Integer): return 0 if isinstance(table.c.id.type, String): return "" raise exception.NovaException(_("Unsupported id columns type"))
def check_for_session_leaks(): if len(_db_content['session']) > 0: raise exception.NovaException('Sessions have leaked: %s' % _db_content['session'])
def _get_client(self, retry_on_conflict=True): max_retries = CONF.ironic.api_max_retries if retry_on_conflict else 1 retry_interval = (CONF.ironic.api_retry_interval if retry_on_conflict else 0) # If we've already constructed a valid, authed client, just return # that. if retry_on_conflict and self._cached_client is not None: return self._cached_client auth_plugin = self._get_auth_plugin() sess = ks_loading.load_session_from_conf_options(CONF, IRONIC_GROUP.name, auth=auth_plugin) # Retries for Conflict exception kwargs = {} kwargs['max_retries'] = max_retries kwargs['retry_interval'] = retry_interval # NOTE(TheJulia): The ability for a list of available versions to be # accepted was added in python-ironicclient 2.2.0. The highest # available version will be utilized by the client for the lifetime # of the client. kwargs['os_ironic_api_version'] = [ '%d.%d' % IRONIC_API_VERSION, '%d.%d' % PRIOR_IRONIC_API_VERSION] ironic_conf = CONF[IRONIC_GROUP.name] # valid_interfaces is a list. ironicclient passes this kwarg through to # ksa, which is set up to handle 'interface' as either a list or a # single value. kwargs['interface'] = ironic_conf.valid_interfaces # NOTE(clenimar/efried): by default, the endpoint is taken from the # service catalog. Use `endpoint_override` if you want to override it. if CONF.ironic.api_endpoint: # NOTE(efried): `api_endpoint` still overrides service catalog and # `endpoint_override` conf options. This will be removed in a # future release. ironic_url = CONF.ironic.api_endpoint else: try: ksa_adap = utils.get_ksa_adapter( nova.conf.ironic.DEFAULT_SERVICE_TYPE, ksa_auth=auth_plugin, ksa_session=sess, min_version=(IRONIC_API_VERSION[0], 0), max_version=(IRONIC_API_VERSION[0], ks_disc.LATEST)) ironic_url = ksa_adap.get_endpoint() ironic_url_none_reason = 'returned None' except exception.ServiceNotFound: # NOTE(efried): No reason to believe service catalog lookup # won't also fail in ironic client init, but this way will # yield the expected exception/behavior. ironic_url = None ironic_url_none_reason = 'raised ServiceNotFound' if ironic_url is None: LOG.warning("Could not discover ironic_url via keystoneauth1: " "Adapter.get_endpoint %s", ironic_url_none_reason) # NOTE(eandersson): We pass in region here to make sure # that the Ironic client can make an educated decision when # we don't have a valid endpoint to pass on. kwargs['region_name'] = ironic_conf.region_name try: cli = ironic.client.get_client(IRONIC_API_VERSION[0], endpoint=ironic_url, session=sess, **kwargs) # Cache the client so we don't have to reconstruct and # reauthenticate it every time we need it. if retry_on_conflict: self._cached_client = cli except ironic.exc.Unauthorized: msg = _("Unable to authenticate Ironic client.") LOG.error(msg) raise exception.NovaException(msg) return cli
def test_error_msg(self): self.assertEquals(unicode(exception.NovaException('test')), 'test')
def delete_volume_for_sm(self, vdi_uuid): vdi_ref = self._session.call_xenapi("VDI.get_by_uuid", vdi_uuid) if vdi_ref is None: raise exception.NovaException(_('Could not find VDI ref')) vm_utils.destroy_vdi(self._session, vdi_ref)
def new_websocket_client(self): """Called after a new WebSocket connection has been established.""" # Reopen the eventlet hub to make sure we don't share an epoll # fd with parent and/or siblings, which would be bad from eventlet import hubs hubs.use_hub() # The nova expected behavior is to have token # passed to the method GET of the request parse = urlparse.urlparse(self.path) if parse.scheme not in ('http', 'https'): # From a bug in urlparse in Python < 2.7.4 we cannot support # special schemes (cf: http://bugs.python.org/issue9374) if sys.version_info < (2, 7, 4): raise exception.NovaException( _("We do not support scheme '%s' under Python < 2.7.4, " "please use http or https") % parse.scheme) query = parse.query token = urlparse.parse_qs(query).get("token", [""]).pop() if not token: # NoVNC uses it's own convention that forward token # from the request to a cookie header, we should check # also for this behavior hcookie = self.headers.get('cookie') if hcookie: cookie = Cookie.SimpleCookie() for hcookie_part in hcookie.split(';'): hcookie_part = hcookie_part.lstrip() try: cookie.load(hcookie_part) except Cookie.CookieError: # NOTE(stgleb): Do not print out cookie content # for security reasons. LOG.warning('Found malformed cookie') else: if 'token' in cookie: token = cookie['token'].value ctxt = context.get_admin_context() connect_info = self._get_connect_info(ctxt, token) # Verify Origin expected_origin_hostname = self.headers.get('Host') if ':' in expected_origin_hostname: e = expected_origin_hostname if '[' in e and ']' in e: expected_origin_hostname = e.split(']')[0][1:] else: expected_origin_hostname = e.split(':')[0] expected_origin_hostnames = CONF.console.allowed_origins expected_origin_hostnames.append(expected_origin_hostname) origin_url = self.headers.get('Origin') # missing origin header indicates non-browser client which is OK if origin_url is not None: origin = urlparse.urlparse(origin_url) origin_hostname = origin.hostname origin_scheme = origin.scheme # If the console connection was forwarded by a proxy (example: # haproxy), the original protocol could be contained in the # X-Forwarded-Proto header instead of the Origin header. Prefer the # forwarded protocol if it is present. forwarded_proto = self.headers.get('X-Forwarded-Proto') if forwarded_proto is not None: origin_scheme = forwarded_proto if origin_hostname == '' or origin_scheme == '': detail = _("Origin header not valid.") raise exception.ValidationError(detail=detail) if origin_hostname not in expected_origin_hostnames: detail = _("Origin header does not match this host.") raise exception.ValidationError(detail=detail) if not self.verify_origin_proto(connect_info, origin_scheme): detail = _("Origin header protocol does not match this host.") raise exception.ValidationError(detail=detail) self.msg(_('connect info: %s'), str(connect_info)) host = connect_info.host port = connect_info.port # Connect to the target self.msg( _("connecting to: %(host)s:%(port)s") % { 'host': host, 'port': port }) tsock = self.socket(host, port, connect=True) # Handshake as necessary if 'internal_access_path' in connect_info: path = connect_info.internal_access_path if path: tsock.send( encodeutils.safe_encode('CONNECT %s HTTP/1.1\r\n\r\n' % path)) end_token = "\r\n\r\n" while True: data = tsock.recv(4096, socket.MSG_PEEK) token_loc = data.find(end_token) if token_loc != -1: if data.split("\r\n")[0].find("200") == -1: raise exception.InvalidConnectionInfo() # remove the response from recv buffer tsock.recv(token_loc + len(end_token)) break if self.server.security_proxy is not None: tenant_sock = TenantSock(self) try: tsock = self.server.security_proxy.connect(tenant_sock, tsock) except exception.SecurityProxyNegotiationFailed: LOG.exception("Unable to perform security proxying, shutting " "down connection") tenant_sock.close() tsock.shutdown(socket.SHUT_RDWR) tsock.close() raise tenant_sock.finish_up() # Start proxying try: self.do_proxy(tsock) except Exception: if tsock: tsock.shutdown(socket.SHUT_RDWR) tsock.close() self.vmsg( _("%(host)s:%(port)s: " "Websocket client or target closed") % { 'host': host, 'port': port }) raise
def setup(self, mount=True): LOG.debug("Setting up appliance for %(image)s", {'image': self.image}) try: self.handle = tpool.Proxy( guestfs.GuestFS(python_return_dict=False, close_on_exit=False)) except TypeError as e: if 'close_on_exit' in str(e) or 'python_return_dict' in str(e): # NOTE(russellb) In case we're not using a version of # libguestfs new enough to support parameters close_on_exit # and python_return_dict which were added in libguestfs 1.20. self.handle = tpool.Proxy(guestfs.GuestFS()) else: raise if CONF.guestfs.debug: self.configure_debug() try: if forceTCG: # TODO(mriedem): Should we be using set_backend_setting # instead to just set the single force_tcg setting? Because # according to the guestfs docs, set_backend_settings will # overwrite all backend settings. The question is, what would # the value be? True? "set_backend_setting" is available # starting in 1.27.2 which should be new enough at this point # on modern distributions. ret = self.handle.set_backend_settings(["force_tcg"]) if ret != 0: LOG.warning('Failed to force guestfs TCG mode. ' 'guestfs_set_backend_settings returned: %s', ret) except AttributeError as ex: # set_backend_settings method doesn't exist in older # libguestfs versions, so nothing we can do but ignore LOG.warning("Unable to force TCG mode, " "libguestfs too old? %s", ex) pass try: if isinstance(self.image, imgmodel.LocalImage): self.handle.add_drive_opts(self.image.path, format=self.image.format) elif isinstance(self.image, imgmodel.RBDImage): self.handle.add_drive_opts("%s/%s" % (self.image.pool, self.image.name), protocol="rbd", format=imgmodel.FORMAT_RAW, server=self.image.servers, username=self.image.user, secret=self.image.password) else: raise exception.UnsupportedImageModel( self.image.__class__.__name__) self.handle.launch() if mount: self.setup_os() self.handle.aug_init("/", 0) self.mount = True except RuntimeError as e: # explicitly teardown instead of implicit close() # to prevent orphaned VMs in cases when an implicit # close() is not enough self.teardown() raise exception.NovaException( _("Error mounting %(image)s with libguestfs (%(e)s)") % {'image': self.image, 'e': e}) except Exception: # explicitly teardown instead of implicit close() # to prevent orphaned VMs in cases when an implicit # close() is not enough self.teardown() raise
def connect_volume(self, connection_info, disk_info): """Attach the volume to instance_name.""" conf = super(LibvirtISCSIVolumeDriver, self).connect_volume(connection_info, disk_info) iscsi_properties = connection_info['data'] # NOTE(vish): If we are on the same host as nova volume, the # discovery makes the target so we don't need to # run --op new. Therefore, we check to see if the # target exists, and if we get 255 (Not Found), then # we run --op new. This will also happen if another # volume is using the same target. try: self._run_iscsiadm(iscsi_properties, ()) except exception.ProcessExecutionError as exc: # iscsiadm returns 21 for "No records found" after version 2.0-871 if exc.exit_code in [21, 255]: self._run_iscsiadm(iscsi_properties, ('--op', 'new')) else: raise if iscsi_properties.get('auth_method'): self._iscsiadm_update(iscsi_properties, "node.session.auth.authmethod", iscsi_properties['auth_method']) self._iscsiadm_update(iscsi_properties, "node.session.auth.username", iscsi_properties['auth_username']) self._iscsiadm_update(iscsi_properties, "node.session.auth.password", iscsi_properties['auth_password']) # NOTE(vish): If we have another lun on the same target, we may # have a duplicate login self._run_iscsiadm(iscsi_properties, ("--login", ), check_exit_code=[0, 255]) self._iscsiadm_update(iscsi_properties, "node.startup", "automatic") host_device = ( "/dev/disk/by-path/ip-%s-iscsi-%s-lun-%s" % (iscsi_properties['target_portal'], iscsi_properties['target_iqn'], iscsi_properties.get('target_lun', 0))) # The /dev/disk/by-path/... node is not always present immediately # TODO(justinsb): This retry-with-delay is a pattern, move to utils? tries = 0 disk_dev = disk_info['dev'] while not os.path.exists(host_device): if tries >= CONF.num_iscsi_scan_tries: raise exception.NovaException( _("iSCSI device not found at %s") % (host_device)) LOG.warn( _("ISCSI volume not yet found at: %(disk_dev)s. " "Will rescan & retry. Try number: %(tries)s") % locals()) # The rescan isn't documented as being necessary(?), but it helps self._run_iscsiadm(iscsi_properties, ("--rescan", )) tries = tries + 1 if not os.path.exists(host_device): time.sleep(tries**2) if tries != 0: LOG.debug( _("Found iSCSI node %(disk_dev)s " "(after %(tries)s rescans)") % locals()) conf.source_type = "block" conf.source_path = host_device return conf
def init_host(self, host): if self._is_daemon_running() is False: raise exception.NovaException( _('Docker daemon is not running or is not reachable' ' (check the rights on /var/run/docker.sock)'))