def get(self, context, name=None, id=None, map_exception=False): neutron = neutronapi.get_client(context) try: if not id and name: # NOTE(flwang): The project id should be honoured so as to get # the correct security group id when user(with admin role but # non-admin project) try to query by name, so as to avoid # getting more than duplicated records with the same name. id = neutronv20.find_resourceid_by_name_or_id( neutron, 'security_group', name, context.project_id) group = neutron.show_security_group(id).get('security_group') return self._convert_to_patron_security_group_format(group) except n_exc.NeutronClientNoUniqueMatch as e: raise exception.NoUniqueMatch(six.text_type(e)) except n_exc.NeutronClientException as e: exc_info = sys.exc_info() if e.status_code == 404: LOG.debug("Neutron security group %s not found", name) raise exception.SecurityGroupNotFound(six.text_type(e)) else: LOG.error(_LE("Neutron Error: %s"), e) raise exc_info[0], exc_info[1], exc_info[2] except TypeError as e: LOG.error(_LE("Neutron Error: %s"), e) msg = _("Invalid security group name: %(name)s.") % {"name": name} raise exception.SecurityGroupNotFound(six.text_type(msg))
def add_to_instance(self, context, instance, security_group_name): """Add security group to the instance.""" neutron = neutronapi.get_client(context) try: security_group_id = neutronv20.find_resourceid_by_name_or_id( neutron, 'security_group', security_group_name, context.project_id) except n_exc.NeutronClientNoUniqueMatch as e: raise exception.NoUniqueMatch(six.text_type(e)) except n_exc.NeutronClientException as e: exc_info = sys.exc_info() if e.status_code == 404: msg = (_("Security group %(name)s is not found for " "project %(project)s") % { 'name': security_group_name, 'project': context.project_id }) self.raise_not_found(msg) else: LOG.exception(_LE("Neutron Error:")) raise exc_info[0], exc_info[1], exc_info[2] params = {'device_id': instance.uuid} try: ports = neutron.list_ports(**params).get('ports') except n_exc.NeutronClientException: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Neutron Error:")) if not ports: msg = (_("instance_id %s could not be found as device id on" " any ports") % instance.uuid) self.raise_not_found(msg) for port in ports: if not self._has_security_group_requirements(port): LOG.warning( _LW("Cannot add security group %(name)s to " "%(instance)s since the port %(port_id)s " "does not meet security requirements"), { 'name': security_group_name, 'instance': instance.uuid, 'port_id': port['id'] }) raise exception.SecurityGroupCannotBeApplied() if 'security_groups' not in port: port['security_groups'] = [] port['security_groups'].append(security_group_id) updated_port = {'security_groups': port['security_groups']} try: LOG.info( _LI("Adding security group %(security_group_id)s to " "port %(port_id)s"), { 'security_group_id': security_group_id, 'port_id': port['id'] }) neutron.update_port(port['id'], {'port': updated_port}) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Neutron Error:"))
def get_image_vm_generation(self, root_vhd_path, image_meta): image_props = image_meta['properties'] default_vm_gen = self._hostutils.get_default_vm_generation() image_prop_vm = image_props.get(constants.IMAGE_PROP_VM_GEN, default_vm_gen) if image_prop_vm not in self._hostutils.get_supported_vm_types(): LOG.error( _LE('Requested VM Generation %s is not supported on ' ' this OS.'), image_prop_vm) raise vmutils.HyperVException( _('Requested VM Generation %s is not supported on this ' 'OS.') % image_prop_vm) vm_gen = VM_GENERATIONS[image_prop_vm] if (vm_gen != constants.VM_GEN_1 and root_vhd_path and self._vhdutils.get_vhd_format(root_vhd_path) == constants.DISK_FORMAT_VHD): LOG.error( _LE('Requested VM Generation %s, but provided VHD ' 'instead of VHDX.'), vm_gen) raise vmutils.HyperVException( _('Requested VM Generation %s, but provided VHD instead of ' 'VHDX.') % vm_gen) return vm_gen
def add_rules(self, context, id, name, vals): """Add security group rule(s) to security group. Note: the Nova security group API doesn't support adding multiple security group rules at once but the EC2 one does. Therefore, this function is written to support both. Multiple rules are installed to a security group in neutron using bulk support. """ neutron = neutronapi.get_client(context) body = self._make_neutron_security_group_rules_list(vals) try: rules = neutron.create_security_group_rule( body).get('security_group_rules') except n_exc.NeutronClientException as e: exc_info = sys.exc_info() if e.status_code == 404: LOG.exception(_LE("Neutron Error getting security group %s"), name) self.raise_not_found(six.text_type(e)) elif e.status_code == 409: LOG.exception(_LE("Neutron Error adding rules to security " "group %s"), name) self.raise_over_quota(six.text_type(e)) elif e.status_code == 400: LOG.exception(_LE("Neutron Error: %s"), six.text_type(e)) self.raise_invalid_property(six.text_type(e)) else: LOG.exception(_LE("Neutron Error:")) raise exc_info[0], exc_info[1], exc_info[2] converted_rules = [] for rule in rules: converted_rules.append( self._convert_to_patron_security_group_rule_format(rule)) return converted_rules
def remove_from_instance(self, context, instance, security_group_name): """Remove the security group associated with the instance.""" neutron = neutronapi.get_client(context) try: security_group_id = neutronv20.find_resourceid_by_name_or_id( neutron, 'security_group', security_group_name, context.project_id) except n_exc.NeutronClientException as e: exc_info = sys.exc_info() if e.status_code == 404: msg = (_("Security group %(name)s is not found for " "project %(project)s") % {'name': security_group_name, 'project': context.project_id}) self.raise_not_found(msg) else: LOG.exception(_LE("Neutron Error:")) raise exc_info[0], exc_info[1], exc_info[2] params = {'device_id': instance.uuid} try: ports = neutron.list_ports(**params).get('ports') except n_exc.NeutronClientException: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Neutron Error:")) if not ports: msg = (_("instance_id %s could not be found as device id on" " any ports") % instance.uuid) self.raise_not_found(msg) found_security_group = False for port in ports: try: port.get('security_groups', []).remove(security_group_id) except ValueError: # When removing a security group from an instance the security # group should be on both ports since it was added this way if # done through the patron api. In case it is not a 404 is only # raised if the security group is not found on any of the # ports on the instance. continue updated_port = {'security_groups': port['security_groups']} try: LOG.info(_LI("Adding security group %(security_group_id)s to " "port %(port_id)s"), {'security_group_id': security_group_id, 'port_id': port['id']}) neutron.update_port(port['id'], {'port': updated_port}) found_security_group = True except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Neutron Error:")) if not found_security_group: msg = (_("Security group %(security_group_name)s not associated " "with the instance %(instance)s") % {'security_group_name': security_group_name, 'instance': instance.uuid}) self.raise_not_found(msg)
def _run(self, name, method_type, args, kwargs, func=None): if method_type not in ('pre', 'post'): msg = _("Wrong type of hook method. " "Only 'pre' and 'post' type allowed") raise ValueError(msg) for e in self.extensions: obj = e.obj hook_method = getattr(obj, method_type, None) if hook_method: LOG.debug("Running %(name)s %(type)s-hook: %(obj)s", {'name': name, 'type': method_type, 'obj': obj}) try: if func: hook_method(func, *args, **kwargs) else: hook_method(*args, **kwargs) except FatalHookException: msg = _LE("Fatal Exception running %(name)s " "%(type)s-hook: %(obj)s") LOG.exception(msg, {'name': name, 'type': method_type, 'obj': obj}) raise except Exception: msg = _LE("Exception running %(name)s " "%(type)s-hook: %(obj)s") LOG.exception(msg, {'name': name, 'type': method_type, 'obj': obj})
def add_to_instance(self, context, instance, security_group_name): """Add security group to the instance.""" neutron = neutronapi.get_client(context) try: security_group_id = neutronv20.find_resourceid_by_name_or_id( neutron, 'security_group', security_group_name, context.project_id) except n_exc.NeutronClientNoUniqueMatch as e: raise exception.NoUniqueMatch(six.text_type(e)) except n_exc.NeutronClientException as e: exc_info = sys.exc_info() if e.status_code == 404: msg = (_("Security group %(name)s is not found for " "project %(project)s") % {'name': security_group_name, 'project': context.project_id}) self.raise_not_found(msg) else: LOG.exception(_LE("Neutron Error:")) raise exc_info[0], exc_info[1], exc_info[2] params = {'device_id': instance.uuid} try: ports = neutron.list_ports(**params).get('ports') except n_exc.NeutronClientException: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Neutron Error:")) if not ports: msg = (_("instance_id %s could not be found as device id on" " any ports") % instance.uuid) self.raise_not_found(msg) for port in ports: if not self._has_security_group_requirements(port): LOG.warning(_LW("Cannot add security group %(name)s to " "%(instance)s since the port %(port_id)s " "does not meet security requirements"), {'name': security_group_name, 'instance': instance.uuid, 'port_id': port['id']}) raise exception.SecurityGroupCannotBeApplied() if 'security_groups' not in port: port['security_groups'] = [] port['security_groups'].append(security_group_id) updated_port = {'security_groups': port['security_groups']} try: LOG.info(_LI("Adding security group %(security_group_id)s to " "port %(port_id)s"), {'security_group_id': security_group_id, 'port_id': port['id']}) neutron.update_port(port['id'], {'port': updated_port}) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Neutron Error:"))
def create_shadow_table(migrate_engine, table_name=None, table=None, **col_name_col_instance): """This method create shadow table for table with name ``table_name`` or table instance ``table``. :param table_name: Autoload table with this name and create shadow table :param table: Autoloaded table, so just create corresponding shadow table. :param col_name_col_instance: contains pair column_name=column_instance. column_instance is instance of Column. These params are required only for columns that have unsupported types by sqlite. For example BigInteger. :returns: The created shadow_table object. """ meta = MetaData(bind=migrate_engine) if table_name is None and table is None: raise exception.PatronException( _("Specify `table_name` or `table` " "param")) if not (table_name is None or table is None): raise exception.PatronException( _("Specify only one param `table_name` " "`table`")) if table is None: table = Table(table_name, meta, autoload=True) columns = [] for column in table.columns: if isinstance(column.type, NullType): new_column = oslodbutils._get_not_supported_column( col_name_col_instance, column.name) columns.append(new_column) else: columns.append(column.copy()) shadow_table_name = db._SHADOW_TABLE_PREFIX + table.name shadow_table = Table(shadow_table_name, meta, *columns, mysql_engine='InnoDB') try: shadow_table.create() return shadow_table except (db_exc.DBError, OperationalError): # NOTE(ekudryashova): At the moment there is a case in oslo.db code, # which raises unwrapped OperationalError, so we should catch it until # oslo.db would wraps all such exceptions LOG.info(repr(shadow_table)) LOG.exception(_LE('Exception while creating table.')) raise exception.ShadowTableExists(name=shadow_table_name) except Exception: LOG.info(repr(shadow_table)) LOG.exception(_LE('Exception while creating table.'))
def umount_volume(mnt_base): """Wraps execute calls for unmouting a Quobyte volume""" try: utils.execute('umount.quobyte', mnt_base) except processutils.ProcessExecutionError as exc: if 'Device or resource busy' in exc.message: LOG.error(_LE("The Quobyte volume at %s is still in use."), mnt_base) else: LOG.exception(_LE("Couldn't unmount the Quobyte Volume at %s"), mnt_base)
def ec2_error_ex(ex, req, code=None, message=None, unexpected=False): """Return an EC2 error response based on passed exception and log the exception on an appropriate log level: * DEBUG: expected errors * ERROR: unexpected errors All expected errors are treated as client errors and 4xx HTTP status codes are always returned for them. Unexpected 5xx errors may contain sensitive information, suppress their messages for security. """ if not code: code = exception_to_ec2code(ex) status = getattr(ex, 'code', None) if not status: status = 500 if unexpected: log_fun = LOG.error log_msg = _LE("Unexpected %(ex_name)s raised: %(ex_str)s") else: log_fun = LOG.debug log_msg = "%(ex_name)s raised: %(ex_str)s" # NOTE(jruzicka): For compatibility with EC2 API, treat expected # exceptions as client (4xx) errors. The exception error code is 500 # by default and most exceptions inherit this from PatronException even # though they are actually client errors in most cases. if status >= 500: status = 400 context = req.environ['patron.context'] request_id = context.request_id log_msg_args = { 'ex_name': type(ex).__name__, 'ex_str': ex } log_fun(log_msg, log_msg_args, context=context) if ex.args and not message and (not unexpected or status < 500): message = unicode(ex.args[0]) if unexpected: # Log filtered environment for unexpected errors. env = req.environ.copy() for k in env.keys(): if not isinstance(env[k], six.string_types): env.pop(k) log_fun(_LE('Environment: %s'), jsonutils.dumps(env)) if not message: message = _('Unknown error occurred.') return faults.ec2_error_response(request_id, code, message, status=status)
def ec2_error_ex(ex, req, code=None, message=None, unexpected=False): """Return an EC2 error response based on passed exception and log the exception on an appropriate log level: * DEBUG: expected errors * ERROR: unexpected errors All expected errors are treated as client errors and 4xx HTTP status codes are always returned for them. Unexpected 5xx errors may contain sensitive information, suppress their messages for security. """ if not code: code = exception_to_ec2code(ex) status = getattr(ex, 'code', None) if not status: status = 500 if unexpected: log_fun = LOG.error log_msg = _LE("Unexpected %(ex_name)s raised: %(ex_str)s") else: log_fun = LOG.debug log_msg = "%(ex_name)s raised: %(ex_str)s" # NOTE(jruzicka): For compatibility with EC2 API, treat expected # exceptions as client (4xx) errors. The exception error code is 500 # by default and most exceptions inherit this from PatronException even # though they are actually client errors in most cases. if status >= 500: status = 400 context = req.environ['patron.context'] request_id = context.request_id log_msg_args = {'ex_name': type(ex).__name__, 'ex_str': ex} log_fun(log_msg, log_msg_args, context=context) if ex.args and not message and (not unexpected or status < 500): message = unicode(ex.args[0]) if unexpected: # Log filtered environment for unexpected errors. env = req.environ.copy() for k in env.keys(): if not isinstance(env[k], six.string_types): env.pop(k) log_fun(_LE('Environment: %s'), jsonutils.dumps(env)) if not message: message = _('Unknown error occurred.') return faults.ec2_error_response(request_id, code, message, status=status)
def introduce_vdi(session, sr_ref, vdi_uuid=None, target_lun=None): """Introduce VDI in the host.""" try: vdi_ref = _get_vdi_ref(session, sr_ref, vdi_uuid, target_lun) if vdi_ref is None: greenthread.sleep(CONF.xenserver.introduce_vdi_retry_wait) session.call_xenapi("SR.scan", sr_ref) vdi_ref = _get_vdi_ref(session, sr_ref, vdi_uuid, target_lun) except session.XenAPI.Failure: LOG.exception(_LE('Unable to introduce VDI on SR')) raise exception.StorageError( reason=_('Unable to introduce VDI on SR %s') % sr_ref) if not vdi_ref: raise exception.StorageError( reason=_('VDI not found on SR %(sr)s (vdi_uuid ' '%(vdi_uuid)s, target_lun %(target_lun)s)') % {'sr': sr_ref, 'vdi_uuid': vdi_uuid, 'target_lun': target_lun}) try: vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref) LOG.debug(vdi_rec) except session.XenAPI.Failure: LOG.exception(_LE('Unable to get record of VDI')) raise exception.StorageError( reason=_('Unable to get record of VDI %s on') % vdi_ref) if vdi_rec['managed']: # We do not need to introduce the vdi return vdi_ref try: return session.call_xenapi("VDI.introduce", vdi_rec['uuid'], vdi_rec['name_label'], vdi_rec['name_description'], vdi_rec['SR'], vdi_rec['type'], vdi_rec['sharable'], vdi_rec['read_only'], vdi_rec['other_config'], vdi_rec['location'], vdi_rec['xenstore_data'], vdi_rec['sm_config']) except session.XenAPI.Failure: LOG.exception(_LE('Unable to introduce VDI for SR')) raise exception.StorageError( reason=_('Unable to introduce VDI for SR %s') % sr_ref)
def get_volume_encryptor(connection_info, **kwargs): """Creates a VolumeEncryptor used to encrypt the specified volume. :param: the connection information used to attach the volume :returns VolumeEncryptor: the VolumeEncryptor for the volume """ encryptor = nop.NoOpEncryptor(connection_info, **kwargs) location = kwargs.get('control_location', None) if location and location.lower() == 'front-end': # case insensitive provider = kwargs.get('provider') if provider == 'LuksEncryptor': provider = 'patron.volume.encryptors.luks.' + provider elif provider == 'CryptsetupEncryptor': provider = 'patron.volume.encryptors.cryptsetup.' + provider elif provider == 'NoOpEncryptor': provider = 'patron.volume.encryptors.nop.' + provider try: encryptor = importutils.import_object(provider, connection_info, **kwargs) except Exception as e: LOG.error(_LE("Error instantiating %(provider)s: %(exception)s"), {'provider': provider, 'exception': e}) raise return encryptor
def wrapped(*args, **kwargs): try: return f(*args, **kwargs) except Exception as exc: if isinstance(exc, webob.exc.WSGIHTTPException): if isinstance(errors, int): t_errors = (errors,) else: t_errors = errors if exc.code in t_errors: raise elif isinstance(exc, exception.PolicyNotAuthorized): # Note(cyeoh): Special case to handle # PolicyNotAuthorized exceptions so every # extension method does not need to wrap authorize # calls. ResourceExceptionHandler silently # converts NotAuthorized to HTTPForbidden raise elif isinstance(exc, exception.ValidationError): # Note(oomichi): Handle a validation error, which # happens due to invalid API parameters, as an # expected error. raise LOG.exception(_LE("Unexpected exception in API method")) msg = _('Unexpected API Error. Please report this at ' 'http://bugs.launchpad.net/patron/ and attach the Nova ' 'API log if possible.\n%s') % type(exc) raise webob.exc.HTTPInternalServerError(explanation=msg)
def unplug_ovs_hybrid(self, instance, vif): """UnPlug using hybrid strategy Unhook port from OVS, unhook port from bridge, delete bridge, and delete both veth devices. """ try: br_name = self.get_br_name(vif['id']) v1_name, v2_name = self.get_veth_pair_names(vif['id']) if linux_net.device_exists(br_name): utils.execute('brctl', 'delif', br_name, v1_name, run_as_root=True) utils.execute('ip', 'link', 'set', br_name, 'down', run_as_root=True) utils.execute('brctl', 'delbr', br_name, run_as_root=True) linux_net.delete_ovs_vif_port(self.get_bridge_name(vif), v2_name) except processutils.ProcessExecutionError: LOG.exception(_LE("Failed while unplugging vif"), instance=instance)
def plug_iovisor(self, instance, vif): """Plug using PLUMgrid IO Visor Driver Connect a network device to their respective Virtual Domain in PLUMgrid Platform. """ dev = self.get_vif_devname(vif) iface_id = vif['id'] linux_net.create_tap_dev(dev) net_id = vif['network']['id'] tenant_id = instance.project_id try: utils.execute('ifc_ctl', 'gateway', 'add_port', dev, run_as_root=True) utils.execute('ifc_ctl', 'gateway', 'ifup', dev, 'access_vm', vif['network']['label'] + "_" + iface_id, vif['address'], 'pgtag2=%s' % net_id, 'pgtag1=%s' % tenant_id, run_as_root=True) except processutils.ProcessExecutionError: LOG.exception(_LE("Failed while plugging vif"), instance=instance)
def join(self, member, group, service=None): """Add a new member to a service group. :param member: the joined member ID/name :param group: the group ID/name, of the joined member :param service: a `patron.service.Service` object """ process_id = str(os.getpid()) LOG.debug('ZooKeeperDriver: join new member %(id)s(%(pid)s) to the ' '%(gr)s group, service=%(sr)s', {'id': member, 'pid': process_id, 'gr': group, 'sr': service}) member = self._memberships.get((group, member), None) if member is None: # the first time to join. Generate a new object path = "%s/%s/%s" % (CONF.zookeeper.sg_prefix, group, member) try: zk_member = membership.Membership(self._session, path, process_id) except RuntimeError: LOG.exception(_LE("Unable to join. It is possible that either" " another node exists with the same name, or" " this node just restarted. We will try " "again in a short while to make sure.")) eventlet.sleep(CONF.zookeeper.sg_retry_interval) zk_member = membership.Membership(self._session, path, member) self._memberships[(group, member)] = zk_member
def _error(self, inner, req): LOG.exception(_LE("Caught error: %s"), unicode(inner)) safe = getattr(inner, 'safe', False) headers = getattr(inner, 'headers', None) status = getattr(inner, 'code', 500) if status is None: status = 500 msg_dict = dict(url=req.url, status=status) LOG.info(_LI("%(url)s returned with HTTP %(status)d"), msg_dict) outer = self.status_to_type(status) if headers: outer.headers = headers # NOTE(johannes): We leave the explanation empty here on # purpose. It could possibly have sensitive information # that should not be returned back to the user. See # bugs 868360 and 874472 # NOTE(eglynn): However, it would be over-conservative and # inconsistent with the EC2 API to hide every exception, # including those that are safe to expose, see bug 1021373 if safe: user_locale = req.best_match_language() inner_msg = translate(inner.message, user_locale) outer.explanation = '%s: %s' % (inner.__class__.__name__, inner_msg) notifications.send_api_fault(req.url, status, inner) return wsgi.Fault(outer)
def create_key(self, ctxt, expiration=None, name='Nova Compute Key', payload_content_type='application/octet-stream', mode='CBC', algorithm='AES', length=256): """Creates a key. :param ctxt: contains information of the user and the environment for the request (patron/context.py) :param expiration: the date the key will expire :param name: a friendly name for the secret :param payload_content_type: the format/type of the secret data :param mode: the algorithm mode (e.g. CBC or CTR mode) :param algorithm: the algorithm associated with the secret :param length: the bit length of the secret :return: the UUID of the new key :raises Exception: if key creation fails """ barbican_client = self._get_barbican_client(ctxt) try: key_order = barbican_client.orders.create_key( name, algorithm, length, mode, payload_content_type, expiration) order_ref = key_order.submit() order = barbican_client.orders.get(order_ref) return self._retrieve_secret_uuid(order.secret_ref) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error creating key: %s"), e)
def delete(self, req, id): context = req.environ['patron.context'] authorize(context) reservation = None try: if CONF.enable_network_quota: reservation = QUOTAS.reserve(context, networks=-1) except Exception: reservation = None LOG.exception(_LE("Failed to update usages deallocating " "network.")) def _rollback_quota(reservation): if CONF.enable_network_quota and reservation: QUOTAS.rollback(context, reservation) try: self.network_api.delete(context, id) except exception.PolicyNotAuthorized as e: _rollback_quota(reservation) raise exc.HTTPForbidden(explanation=six.text_type(e)) except exception.NetworkInUse as e: _rollback_quota(reservation) raise exc.HTTPConflict(explanation=e.format_message()) except exception.NetworkNotFound: _rollback_quota(reservation) msg = _("Network not found") raise exc.HTTPNotFound(explanation=msg) if CONF.enable_network_quota and reservation: QUOTAS.commit(context, reservation) response = webob.Response(status_int=202) return response
def create_secret(self, usage_type, usage_id, password=None): """Create a secret. usage_type: one of 'iscsi', 'ceph', 'rbd' or 'volume' 'rbd' will be converted to 'ceph'. usage_id: name of resource in secret """ secret_conf = vconfig.LibvirtConfigSecret() secret_conf.ephemeral = False secret_conf.private = False secret_conf.usage_id = usage_id if usage_type in ('rbd', 'ceph'): secret_conf.usage_type = 'ceph' elif usage_type == 'iscsi': secret_conf.usage_type = 'iscsi' elif usage_type == 'volume': secret_conf.usage_type = 'volume' else: msg = _("Invalid usage_type: %s") raise exception.PatronException(msg % usage_type) xml = secret_conf.to_xml() try: LOG.debug('Secret XML: %s' % xml) conn = self.get_connection() secret = conn.secretDefineXML(xml) if password is not None: secret.setValue(password) return secret except libvirt.libvirtError: with excutils.save_and_reraise_exception(): LOG.error(_LE('Error defining a secret with XML: %s') % xml)
def get_key(self, ctxt, key_id, payload_content_type='application/octet-stream'): """Retrieves the specified key. :param ctxt: contains information of the user and the environment for the request (patron/context.py) :param key_id: the UUID of the key to retrieve :param payload_content_type: The format/type of the secret data :return: SymmetricKey representation of the key :raises Exception: if key retrieval fails """ try: secret = self._get_secret(ctxt, key_id) secret_data = self._get_secret_data(secret, payload_content_type) if payload_content_type == 'application/octet-stream': # convert decoded string to list of unsigned ints for each byte key_data = array.array('B', base64.b64decode(secret_data)).tolist() else: key_data = secret_data key = keymgr_key.SymmetricKey(secret.algorithm, key_data) return key except Exception as e: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error getting key: %s"), e)
def main(): """Parse environment and arguments and call the appropriate action.""" config.parse_args(sys.argv, default_config_files=jsonutils.loads(os.environ['CONFIG_FILE'])) logging.setup(CONF, "patron") global LOG LOG = logging.getLogger('patron.dhcpbridge') objects.register_all() if not CONF.conductor.use_local: block_db_access() objects_base.NovaObject.indirection_api = \ conductor_rpcapi.ConductorAPI() if CONF.action.name in ['add', 'del', 'old']: LOG.debug("Called '%(action)s' for mac '%(mac)s' with ip '%(ip)s'", {"action": CONF.action.name, "mac": CONF.action.mac, "ip": CONF.action.ip}) CONF.action.func(CONF.action.mac, CONF.action.ip) else: try: network_id = int(os.environ.get('NETWORK_ID')) except TypeError: LOG.error(_LE("Environment variable 'NETWORK_ID' must be set.")) return(1) print(init_leases(network_id)) rpc.cleanup()
def teardown_container(container_dir, container_root_device=None): """Teardown the container rootfs mounting once it is spawned. It will umount the container that is mounted, and delete any linked devices. """ try: img = _DiskImage(image=None, mount_dir=container_dir) img.teardown() # Make sure container_root_device is released when teardown container. if container_root_device: if 'loop' in container_root_device: LOG.debug("Release loop device %s", container_root_device) utils.execute('losetup', '--detach', container_root_device, run_as_root=True, attempts=3) else: LOG.debug('Release nbd device %s', container_root_device) utils.execute('qemu-nbd', '-d', container_root_device, run_as_root=True) except Exception: LOG.exception(_LE('Failed to teardown container filesystem'))
def __exit__(self, ex_type, ex_value, ex_traceback): if not ex_value: return True if isinstance(ex_value, exception.Forbidden): raise Fault(webob.exc.HTTPForbidden( explanation=ex_value.format_message())) elif isinstance(ex_value, exception.VersionNotFoundForAPIMethod): raise elif isinstance(ex_value, exception.Invalid): raise Fault(exception.ConvertedException( code=ex_value.code, explanation=ex_value.format_message())) elif isinstance(ex_value, TypeError): exc_info = (ex_type, ex_value, ex_traceback) LOG.error(_LE('Exception handling resource: %s'), ex_value, exc_info=exc_info) raise Fault(webob.exc.HTTPBadRequest()) elif isinstance(ex_value, Fault): LOG.info(_LI("Fault thrown: %s"), ex_value) raise ex_value elif isinstance(ex_value, webob.exc.HTTPException): LOG.info(_LI("HTTP exception thrown: %s"), ex_value) raise Fault(ex_value) # We didn't handle the exception return False
def obj_class_from_name(cls, objname, objver): """Returns a class from the registry based on a name and version.""" if objname not in cls._obj_classes: LOG.error(_LE('Unable to instantiate unregistered object type ' '%(objtype)s'), dict(objtype=objname)) raise exception.UnsupportedObjectError(objtype=objname) # NOTE(comstud): If there's not an exact match, return the highest # compatible version. The objects stored in the class are sorted # such that highest version is first, so only set compatible_match # once below. compatible_match = None for objclass in cls._obj_classes[objname]: if objclass.VERSION == objver: return objclass if (not compatible_match and versionutils.is_compatible(objver, objclass.VERSION)): compatible_match = objclass if compatible_match: return compatible_match # As mentioned above, latest version is always first in the list. latest_ver = cls._obj_classes[objname][0].VERSION raise exception.IncompatibleObjectVersion(objname=objname, objver=objver, supported=latest_ver)
def unplug_iovisor(self, instance, vif): """Unplug using PLUMgrid IO Visor Driver Delete network device and to their respective connection to the Virtual Domain in PLUMgrid Platform. """ iface_id = vif['id'] dev = self.get_vif_devname(vif) try: utils.execute('ifc_ctl', 'gateway', 'ifdown', dev, 'access_vm', vif['network']['label'] + "_" + iface_id, vif['address'], run_as_root=True) utils.execute('ifc_ctl', 'gateway', 'del_port', dev, run_as_root=True) linux_net.delete_net_dev(dev) except processutils.ProcessExecutionError: LOG.exception(_LE("Failed while unplugging vif"), instance=instance)
def _refresh_default_networks(self): self._default_networks = [] if CONF.use_neutron_default_nets == "True": try: self._default_networks = self._get_default_networks() except Exception: LOG.exception(_LE("Failed to get default networks"))
def unplug_ivs_ethernet(self, instance, vif): """Unplug the VIF by deleting the port from the bridge.""" try: linux_net.delete_ivs_vif_port(self.get_vif_devname(vif)) except processutils.ProcessExecutionError: LOG.exception(_LE("Failed while unplugging vif"), instance=instance)
def _reset_state(self, req, id, body): """Permit admins to reset the state of a server.""" context = req.environ["patron.context"] authorize(context, 'resetState') # Identify the desired state from the body try: state = state_map[body["os-resetState"]["state"]] except (TypeError, KeyError): msg = _("Desired state must be specified. Valid states " "are: %s") % ', '.join(sorted(state_map.keys())) raise exc.HTTPBadRequest(explanation=msg) instance = common.get_instance(self.compute_api, context, id) try: instance.vm_state = state instance.task_state = None instance.save(admin_state_reset=True) except exception.InstanceNotFound: msg = _("Server not found") raise exc.HTTPNotFound(explanation=msg) except Exception: readable = traceback.format_exc() LOG.exception(_LE("Compute.api::resetState %s"), readable) raise exc.HTTPUnprocessableEntity() return webob.Response(status_int=202)
def _load_file(self, handle): """Decode the JSON file. Broken out for testing.""" try: return jsonutils.load(handle) except ValueError: LOG.exception(_LE("Could not decode scheduler options")) return {}
def list(self, context, names=None, ids=None, project=None, search_opts=None): """Returns list of security group rules owned by tenant.""" neutron = neutronapi.get_client(context) search_opts = {} if names: search_opts['name'] = names if ids: search_opts['id'] = ids if project: search_opts['tenant_id'] = project try: security_groups = neutron.list_security_groups( **search_opts).get('security_groups') except n_exc.NeutronClientException: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Neutron Error getting security groups")) converted_rules = [] for security_group in security_groups: converted_rules.append( self._convert_to_patron_security_group_format(security_group)) return converted_rules
def _sign_csr(csr_text, ca_folder): with utils.tempdir() as tmpdir: inbound = os.path.join(tmpdir, 'inbound.csr') outbound = os.path.join(tmpdir, 'outbound.csr') try: with open(inbound, 'w') as csrfile: csrfile.write(csr_text) except IOError: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Failed to write inbound.csr')) LOG.debug('Flags path: %s', ca_folder) start = os.getcwd() # Change working dir to CA fileutils.ensure_tree(ca_folder) os.chdir(ca_folder) utils.execute('openssl', 'ca', '-batch', '-out', outbound, '-config', './openssl.cnf', '-infiles', inbound) out, _err = utils.execute('openssl', 'x509', '-in', outbound, '-serial', '-noout') serial = string.strip(out.rpartition('=')[2]) os.chdir(start) with open(outbound, 'r') as crtfile: return (serial, crtfile.read())
def copy_key(self, ctxt, key_id): """Copies (i.e., clones) a key stored by barbican. :param ctxt: contains information of the user and the environment for the request (patron/context.py) :param key_id: the UUID of the key to copy :return: the UUID of the key copy :raises Exception: if key copying fails """ try: secret = self._get_secret(ctxt, key_id) con_type = secret.content_types['default'] secret_data = self._get_secret_data(secret, payload_content_type=con_type) key = keymgr_key.SymmetricKey(secret.algorithm, secret_data) copy_uuid = self.store_key(ctxt, key, secret.expiration, secret.name, con_type, 'base64', secret.algorithm, secret.bit_length, secret.mode, True) return copy_uuid except Exception as e: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error copying key: %s"), e)
def wrapped(*args, **kwargs): try: return f(*args, **kwargs) except Exception as exc: if isinstance(exc, webob.exc.WSGIHTTPException): if isinstance(errors, int): t_errors = (errors, ) else: t_errors = errors if exc.code in t_errors: raise elif isinstance(exc, exception.PolicyNotAuthorized): # Note(cyeoh): Special case to handle # PolicyNotAuthorized exceptions so every # extension method does not need to wrap authorize # calls. ResourceExceptionHandler silently # converts NotAuthorized to HTTPForbidden raise elif isinstance(exc, exception.ValidationError): # Note(oomichi): Handle a validation error, which # happens due to invalid API parameters, as an # expected error. raise LOG.exception(_LE("Unexpected exception in API method")) msg = _( 'Unexpected API Error. Please report this at ' 'http://bugs.launchpad.net/patron/ and attach the Nova ' 'API log if possible.\n%s') % type(exc) raise webob.exc.HTTPInternalServerError(explanation=msg)
def do_associate(): # associate floating ip floating = objects.FloatingIP.associate(context, floating_address, fixed_address, self.host) fixed = floating.fixed_ip if not fixed: # NOTE(vish): ip was already associated return try: # gogo driver time self.l3driver.add_floating_ip(floating_address, fixed_address, interface, fixed['network']) except processutils.ProcessExecutionError as e: with excutils.save_and_reraise_exception(): try: objects.FloatingIP.disassociate(context, floating_address) except Exception: LOG.warning(_LW('Failed to disassociated floating ' 'address: %s'), floating_address) pass if "Cannot find device" in six.text_type(e): try: LOG.error(_LE('Interface %s not found'), interface) except Exception: pass raise exception.NoFloatingIpInterface( interface=interface) payload = dict(project_id=context.project_id, instance_id=instance_uuid, floating_ip=floating_address) self.notifier.info(context, 'network.floating_ip.associate', payload)
def verify_base_size(self, base, size, base_size=0): """Check that the base image is not larger than size. Since images can't be generally shrunk, enforce this constraint taking account of virtual image size. """ # Note(pbrady): The size and min_disk parameters of a glance # image are checked against the instance size before the image # is even downloaded from glance, but currently min_disk is # adjustable and doesn't currently account for virtual disk size, # so we need this extra check here. # NOTE(cfb): Having a flavor that sets the root size to 0 and having # patron effectively ignore that size and use the size of the # image is considered a feature at this time, not a bug. if size is None: return if size and not base_size: base_size = self.get_disk_size(base) if size < base_size: msg = _LE('%(base)s virtual size %(base_size)s ' 'larger than flavor root disk size %(size)s') LOG.error(msg % { 'base': base, 'base_size': base_size, 'size': size }) raise exception.FlavorDiskTooSmall()
def _cleanup_deploy(self, context, node, instance, network_info, flavor=None): if flavor is None: flavor = instance.flavor patch = patcher.create(node).get_cleanup_patch(instance, network_info, flavor) # Unassociate the node patch.append({'op': 'remove', 'path': '/instance_uuid'}) try: self.ironicclient.call('node.update', node.uuid, patch) except ironic.exc.BadRequest: LOG.error( _LE("Failed to clean up the parameters on node %(node)s " "when unprovisioning the instance %(instance)s"), { 'node': node.uuid, 'instance': instance.uuid }) reason = (_("Fail to clean up node %s parameters") % node.uuid) raise exception.InstanceTerminationFailure(reason=reason) self._unplug_vifs(node, instance, network_info) self._stop_firewall(instance, network_info)
def delete(self, req, id): """Delete an server group.""" context = _authorize_context(req) try: sg = objects.InstanceGroup.get_by_uuid(context, id) except patron.exception.InstanceGroupNotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) quotas = objects.Quotas(context=context) project_id, user_id = objects.quotas.ids_from_server_group(context, sg) try: # We have to add the quota back to the user that created # the server group quotas.reserve(project_id=project_id, user_id=user_id, server_groups=-1) except Exception: quotas = None LOG.exception(_LE("Failed to update usages deallocating " "server group")) try: sg.destroy() except patron.exception.InstanceGroupNotFound as e: if quotas: quotas.rollback() raise webob.exc.HTTPNotFound(explanation=e.format_message()) if quotas: quotas.commit()
def obj_class_from_name(cls, objname, objver): """Returns a class from the registry based on a name and version.""" if objname not in cls._obj_classes: LOG.error( _LE('Unable to instantiate unregistered object type ' '%(objtype)s'), dict(objtype=objname)) raise exception.UnsupportedObjectError(objtype=objname) # NOTE(comstud): If there's not an exact match, return the highest # compatible version. The objects stored in the class are sorted # such that highest version is first, so only set compatible_match # once below. compatible_match = None for objclass in cls._obj_classes[objname]: if objclass.VERSION == objver: return objclass if (not compatible_match and versionutils.is_compatible(objver, objclass.VERSION)): compatible_match = objclass if compatible_match: return compatible_match # As mentioned above, latest version is always first in the list. latest_ver = cls._obj_classes[objname][0].VERSION raise exception.IncompatibleObjectVersion(objname=objname, objver=objver, supported=latest_ver)
def __init__(self, cell_state_cls=None): super(CellStateManager, self).__init__() if not cell_state_cls: cell_state_cls = CellState self.cell_state_cls = cell_state_cls self.my_cell_state = cell_state_cls(CONF.cells.name, is_me=True) self.parent_cells = {} self.child_cells = {} self.last_cell_db_check = datetime.datetime.min attempts = 0 while True: try: self._cell_data_sync(force=True) break except db_exc.DBError: attempts += 1 if attempts > 120: raise LOG.exception(_LE('DB error')) time.sleep(30) my_cell_capabs = {} for cap in CONF.cells.capabilities: name, value = cap.split('=', 1) if ';' in value: values = set(value.split(';')) else: values = set([value]) my_cell_capabs[name] = values self.my_cell_state.update_capabilities(my_cell_capabs)
def delete(self, req, id): context = req.environ['patron.context'] authorize(context) reservation = None try: if CONF.enable_network_quota: reservation = QUOTAS.reserve(context, networks=-1) except Exception: reservation = None LOG.exception( _LE("Failed to update usages deallocating " "network.")) def _rollback_quota(reservation): if CONF.enable_network_quota and reservation: QUOTAS.rollback(context, reservation) try: self.network_api.delete(context, id) except exception.PolicyNotAuthorized as e: _rollback_quota(reservation) raise exc.HTTPForbidden(explanation=six.text_type(e)) except exception.NetworkInUse as e: _rollback_quota(reservation) raise exc.HTTPConflict(explanation=e.format_message()) except exception.NetworkNotFound: _rollback_quota(reservation) msg = _("Network not found") raise exc.HTTPNotFound(explanation=msg) if CONF.enable_network_quota and reservation: QUOTAS.commit(context, reservation) response = webob.Response(status_int=202) return response
def _load_config(): # Don't load in global context, since we can't assume # these modules are accessible when distutils uses # this module import ConfigParser from oslo_config import cfg import logging global loaded, PATRON_VENDOR, PATRON_PRODUCT, PATRON_PACKAGE if loaded: return loaded = True cfgfile = cfg.CONF.find_file("release") if cfgfile is None: return try: cfg = ConfigParser.RawConfigParser() cfg.read(cfgfile) if cfg.has_option("Patron", "vendor"): PATRON_VENDOR = cfg.get("Patron", "vendor") if cfg.has_option("Patron", "product"): PATRON_PRODUCT = cfg.get("Patron", "product") if cfg.has_option("Patron", "package"): PATRON_PACKAGE = cfg.get("Patron", "package") except Exception as ex: LOG = logging.getLogger(__name__) LOG.error(_LE("Failed to load %(cfgfile)s: %(ex)s"), {"cfgfile": cfgfile, "ex": ex})
def __init__(self, driver, name, pool=None, snapshot=None, read_only=False): client, ioctx = driver._connect_to_rados(pool) try: snap_name = snapshot.encode('utf8') if snapshot else None self.volume = rbd.Image(ioctx, name.encode('utf8'), snapshot=snap_name, read_only=read_only) except rbd.ImageNotFound: with excutils.save_and_reraise_exception(): LOG.debug("rbd image %s does not exist", name) driver._disconnect_from_rados(client, ioctx) except rbd.Error: with excutils.save_and_reraise_exception(): LOG.exception(_LE("error opening rbd image %s"), name) driver._disconnect_from_rados(client, ioctx) self.driver = driver self.client = client self.ioctx = ioctx
def parse_server_string(server_str): """Parses the given server_string and returns a tuple of host and port. If it's not a combination of host part and port, the port element is an empty string. If the input is invalid expression, return a tuple of two empty strings. """ try: # First of all, exclude pure IPv6 address (w/o port). if netaddr.valid_ipv6(server_str): return (server_str, '') # Next, check if this is IPv6 address with a port number combination. if server_str.find("]:") != -1: (address, port) = server_str.replace('[', '', 1).split(']:') return (address, port) # Third, check if this is a combination of an address and a port if server_str.find(':') == -1: return (server_str, '') # This must be a combination of an address and a port (address, port) = server_str.split(':') return (address, port) except (ValueError, netaddr.AddrFormatError): LOG.error(_LE('Invalid server_string: %s'), server_str) return ('', '')
def verify_base_size(self, base, size, base_size=0): """Check that the base image is not larger than size. Since images can't be generally shrunk, enforce this constraint taking account of virtual image size. """ # Note(pbrady): The size and min_disk parameters of a glance # image are checked against the instance size before the image # is even downloaded from glance, but currently min_disk is # adjustable and doesn't currently account for virtual disk size, # so we need this extra check here. # NOTE(cfb): Having a flavor that sets the root size to 0 and having # patron effectively ignore that size and use the size of the # image is considered a feature at this time, not a bug. if size is None: return if size and not base_size: base_size = self.get_disk_size(base) if size < base_size: msg = _LE('%(base)s virtual size %(base_size)s ' 'larger than flavor root disk size %(size)s') LOG.error(msg % {'base': base, 'base_size': base_size, 'size': size}) raise exception.FlavorDiskTooSmall()
def _write_conf(self, config): try: LOG.debug('Re-wrote %s', CONF.console_xvp_conf) with open(CONF.console_xvp_conf, 'w') as cfile: cfile.write(config) except IOError: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Failed to write configuration file"))
def run(self): try: self._copy() except IOError as err: # Invalid argument error means that the vm console pipe was closed, # probably the vm was stopped. The worker can stop it's execution. if err.errno != errno.EINVAL: LOG.error(_LE("Error writing vm console log file from " "serial console pipe. Error: %s") % err)