def check_string_length(value, name=None, min_length=0, max_length=None): """Check the length of specified string :param value: the value of the string :param name: the name of the string :param min_length: the min_length of the string :param max_length: the max_length of the string """ if not isinstance(value, six.string_types): if name is None: msg = _("The input is not a string or unicode") else: msg = _("%s is not a string or unicode") % name raise exception.InvalidInput(message=msg) if name is None: name = value if len(value) < min_length: msg = _("%(name)s has a minimum character requirement of " "%(min_length)s.") % {'name': name, 'min_length': min_length} raise exception.InvalidInput(message=msg) if max_length and len(value) > max_length: msg = _("%(name)s has more than %(max_length)s " "characters.") % {'name': name, 'max_length': max_length} raise exception.InvalidInput(message=msg)
def create_entry(self, name, address, type, domain): if name is None: raise exception.InvalidInput(_("Invalid name")) if type.lower() != 'a': raise exception.InvalidInput( _("This driver only supports " "type 'a'")) if self.get_entries_by_name(name, domain): raise exception.FloatingIpDNSExists(name=name, domain=domain) outfile = open(self.filename, 'a+') outfile.write("%s %s %s\n" % (address, self.qualify(name, domain), type)) outfile.close()
def create_entry(self, name, address, type, domain): if type.lower() != 'a': raise exception.InvalidInput(_("This driver only supports " "type 'a' entries.")) dEntry = DomainEntry(self.lobj, domain) dEntry.add_entry(name, address)
def _verify_resources(self, resources): resource_keys = ["vcpus", "memory_mb", "local_gb", "cpu_info", "vcpus_used", "memory_mb_used", "local_gb_used", "numa_topology"] missing_keys = [k for k in resource_keys if k not in resources] if missing_keys: reason = _("Missing keys: %s") % missing_keys raise exception.InvalidInput(reason=reason)
def _address_to_hosts(self, addresses): """Iterate over hosts within an address range. If an explicit range specifier is missing, the parameter is interpreted as a specific individual address. """ try: return [netaddr.IPAddress(addresses)] except ValueError: net = netaddr.IPNetwork(addresses) if net.size < 4: reason = _("/%s should be specified as single address(es) " "not in cidr format") % net.prefixlen raise exception.InvalidInput(reason=reason) else: return net.iter_hosts() except netaddr.AddrFormatError as exc: raise exception.InvalidInput(reason=six.text_type(exc))
def validate_integer(value, name, min_value=None, max_value=None): """Make sure that value is a valid integer, potentially within range.""" try: value = int(str(value)) except (ValueError, UnicodeEncodeError): msg = _('%(value_name)s must be an integer') raise exception.InvalidInput(reason=( msg % {'value_name': name})) if min_value is not None: if value < min_value: msg = _('%(value_name)s must be >= %(min_value)d') raise exception.InvalidInput( reason=(msg % {'value_name': name, 'min_value': min_value})) if max_value is not None: if value > max_value: msg = _('%(value_name)s must be <= %(max_value)d') raise exception.InvalidInput( reason=( msg % {'value_name': name, 'max_value': max_value}) ) return value
def delete_entry(self, name, domain): if name is None: raise exception.InvalidInput(_("Invalid name")) deleted = False infile = open(self.filename, 'r') outfile = tempfile.NamedTemporaryFile('w', delete=False) for line in infile: entry = self.parse_line(line) if ((not entry) or entry['name'] != self.qualify(name, domain)): outfile.write(line) else: deleted = True infile.close() outfile.close() shutil.move(outfile.name, self.filename) if not deleted: LOG.warning(_LW('Cannot delete entry |%s|'), self.qualify(name, domain)) raise exception.NotFound
def create(self, context, size, name, description, snapshot=None, image_id=None, volume_type=None, metadata=None, availability_zone=None): client = cinderclient(context) if snapshot is not None: snapshot_id = snapshot['id'] else: snapshot_id = None kwargs = dict(snapshot_id=snapshot_id, volume_type=volume_type, user_id=context.user_id, project_id=context.project_id, availability_zone=availability_zone, metadata=metadata, imageRef=image_id) if isinstance(client, v1_client.Client): kwargs['display_name'] = name kwargs['display_description'] = description else: kwargs['name'] = name kwargs['description'] = description try: item = client.volumes.create(size, **kwargs) return _untranslate_volume_summary_view(context, item) except cinder_exception.OverLimit: raise exception.OverQuota(overs='volumes') except (cinder_exception.BadRequest, keystone_exception.BadRequest) as e: raise exception.InvalidInput(reason=e)
def __init__(self, name, loader=None, use_ssl=False, max_url_len=None): """Initialize, but do not start the WSGI server. :param name: The name of the WSGI server given to the loader. :param loader: Loads the WSGI application using the given name. :returns: None """ self.name = name self.manager = self._get_manager() self.loader = loader or wsgi.Loader() self.app = self.loader.load_app(name) # inherit all compute_api worker counts from osapi_compute if name.startswith('openstack_compute_api'): wname = 'osapi_compute' else: wname = name self.host = getattr(CONF, '%s_listen' % name, "0.0.0.0") self.port = getattr(CONF, '%s_listen_port' % name, 0) self.workers = (getattr(CONF, '%s_workers' % wname, None) or processutils.get_worker_count()) if self.workers and self.workers < 1: worker_name = '%s_workers' % name msg = (_("%(worker_name)s value of %(workers)s is invalid, " "must be greater than 0") % {'worker_name': worker_name, 'workers': str(self.workers)}) raise exception.InvalidInput(msg) self.use_ssl = use_ssl self.server = wsgi.Server(name, self.app, host=self.host, port=self.port, use_ssl=self.use_ssl, max_url_len=max_url_len) # Pull back actual port used self.port = self.server.port self.backdoor_port = None
def wrapper(self, ctx, volume_id, *args, **kwargs): try: res = method(self, ctx, volume_id, *args, **kwargs) except (cinder_exception.ClientException, keystone_exception.ClientException): exc_type, exc_value, exc_trace = sys.exc_info() if isinstance( exc_value, (keystone_exception.NotFound, cinder_exception.NotFound)): exc_value = exception.VolumeNotFound(volume_id=volume_id) elif isinstance( exc_value, (keystone_exception.BadRequest, cinder_exception.BadRequest)): exc_value = exception.InvalidInput( reason=six.text_type(exc_value)) raise exc_value, None, exc_trace except (cinder_exception.ConnectionError, keystone_exception.ConnectionError): exc_type, exc_value, exc_trace = sys.exc_info() exc_value = exception.CinderConnectionFailed( reason=six.text_type(exc_value)) raise exc_value, None, exc_trace return res
def create(name, memory, vcpus, root_gb, ephemeral_gb=0, flavorid=None, swap=0, rxtx_factor=1.0, is_public=True): """Creates flavors.""" if not flavorid: flavorid = uuid.uuid4() kwargs = { 'memory_mb': memory, 'vcpus': vcpus, 'root_gb': root_gb, 'ephemeral_gb': ephemeral_gb, 'swap': swap, 'rxtx_factor': rxtx_factor, } if isinstance(name, six.string_types): name = name.strip() # ensure name do not exceed 255 characters utils.check_string_length(name, 'name', min_length=1, max_length=255) # ensure name does not contain any special characters valid_name = VALID_NAME_REGEX.search(name) if not valid_name: msg = _("Flavor names can only contain printable characters " "and horizontal spaces.") raise exception.InvalidInput(reason=msg) # NOTE(vish): Internally, flavorid is stored as a string but it comes # in through json as an integer, so we convert it here. flavorid = unicode(flavorid) # ensure leading/trailing whitespaces not present. if flavorid.strip() != flavorid: msg = _("id cannot contain leading and/or trailing whitespace(s)") raise exception.InvalidInput(reason=msg) # ensure flavor id does not exceed 255 characters utils.check_string_length(flavorid, 'id', min_length=1, max_length=255) # ensure flavor id does not contain any special characters valid_flavor_id = VALID_ID_REGEX.search(flavorid) if not valid_flavor_id: msg = _("Flavor id can only contain letters from A-Z (both cases), " "periods, dashes, underscores and spaces.") raise exception.InvalidInput(reason=msg) # NOTE(wangbo): validate attributes of the creating flavor. # ram and vcpus should be positive ( > 0) integers. # disk, ephemeral and swap should be non-negative ( >= 0) integers. flavor_attributes = { 'memory_mb': ('ram', 1), 'vcpus': ('vcpus', 1), 'root_gb': ('disk', 0), 'ephemeral_gb': ('ephemeral', 0), 'swap': ('swap', 0) } for key, value in flavor_attributes.items(): kwargs[key] = utils.validate_integer(kwargs[key], value[0], value[1], db.MAX_INT) # rxtx_factor should be a positive float try: kwargs['rxtx_factor'] = float(kwargs['rxtx_factor']) if (kwargs['rxtx_factor'] <= 0 or kwargs['rxtx_factor'] > SQL_SP_FLOAT_MAX): raise ValueError() except ValueError: msg = (_("'rxtx_factor' argument must be a float between 0 and %g") % SQL_SP_FLOAT_MAX) raise exception.InvalidInput(reason=msg) kwargs['name'] = name kwargs['flavorid'] = flavorid # ensure is_public attribute is boolean try: kwargs['is_public'] = strutils.bool_from_string( is_public, strict=True) except ValueError: raise exception.InvalidInput(reason=_("is_public must be a boolean")) flavor = objects.Flavor(context=context.get_admin_context(), **kwargs) flavor.create() return flavor
def fake_volume_create(self, context, size, name, description, snapshot, **param): raise exception.InvalidInput(reason="bad request data")
def _get_servers(self, req, is_detail): """Returns a list of servers, based on any search options specified.""" search_opts = {} search_opts.update(req.GET) context = req.environ['patron.context'] remove_invalid_options(context, search_opts, self._get_server_search_options()) # Verify search by 'status' contains a valid status. # Convert it to filter by vm_state or task_state for compute_api. search_opts.pop('status', None) if 'status' in req.GET.keys(): statuses = req.GET.getall('status') states = common.task_and_vm_state_from_status(statuses) vm_state, task_state = states if not vm_state and not task_state: return {'servers': []} search_opts['vm_state'] = vm_state # When we search by vm state, task state will return 'default'. # So we don't need task_state search_opt. if 'default' not in task_state: search_opts['task_state'] = task_state if 'changes-since' in search_opts: try: parsed = timeutils.parse_isotime(search_opts['changes-since']) except ValueError: msg = _('Invalid changes-since value') raise exc.HTTPBadRequest(explanation=msg) search_opts['changes-since'] = parsed # By default, compute's get_all() will return deleted instances. # If an admin hasn't specified a 'deleted' search option, we need # to filter out deleted instances by setting the filter ourselves. # ... Unless 'changes-since' is specified, because 'changes-since' # should return recently deleted images according to the API spec. if 'deleted' not in search_opts: if 'changes-since' not in search_opts: # No 'changes-since', so we only want non-deleted servers search_opts['deleted'] = False if search_opts.get("vm_state") == ['deleted']: if context.is_admin: search_opts['deleted'] = True else: msg = _("Only administrators may list deleted instances") raise exc.HTTPForbidden(explanation=msg) # If all tenants is passed with 0 or false as the value # then remove it from the search options. Nothing passed as # the value for all_tenants is considered to enable the feature all_tenants = search_opts.get('all_tenants') if all_tenants: try: if not strutils.bool_from_string(all_tenants, True): del search_opts['all_tenants'] except ValueError as err: raise exception.InvalidInput(six.text_type(err)) if 'all_tenants' in search_opts: policy.enforce(context, 'compute:get_all_tenants', { 'project_id': context.project_id, 'user_id': context.user_id }) del search_opts['all_tenants'] else: if context.project_id: search_opts['project_id'] = context.project_id else: search_opts['user_id'] = context.user_id limit, marker = common.get_limit_and_marker(req) # Sorting by multiple keys and directions is conditionally enabled sort_keys, sort_dirs = None, None if self.ext_mgr.is_loaded('os-server-sort-keys'): sort_keys, sort_dirs = common.get_sort_params(req.params) try: instance_list = self.compute_api.get_all(context, search_opts=search_opts, limit=limit, marker=marker, want_objects=True, sort_keys=sort_keys, sort_dirs=sort_dirs) except exception.MarkerNotFound: msg = _('marker [%s] not found') % marker raise exc.HTTPBadRequest(explanation=msg) except exception.FlavorNotFound: LOG.debug("Flavor '%s' could not be found", search_opts['flavor']) instance_list = objects.InstanceList() if is_detail: instance_list.fill_faults() response = self._view_builder.detail(req, instance_list) else: response = self._view_builder.index(req, instance_list) req.cache_db_instances(instance_list) return response
def _new_ingress_rule(ip_protocol, from_port, to_port, group_id=None, cidr=None): values = {} if group_id: values['group_id'] = group_id # Open everything if an explicit port range or type/code are not # specified, but only if a source group was specified. ip_proto_upper = ip_protocol.upper() if ip_protocol else '' if (ip_proto_upper == 'ICMP' and from_port is None and to_port is None): from_port = -1 to_port = -1 elif (ip_proto_upper in ['TCP', 'UDP'] and from_port is None and to_port is None): from_port = 1 to_port = 65535 elif cidr: values['cidr'] = cidr if ip_protocol and from_port is not None and to_port is not None: ip_protocol = str(ip_protocol) try: # Verify integer conversions from_port = int(from_port) to_port = int(to_port) except ValueError: if ip_protocol.upper() == 'ICMP': raise exception.InvalidInput(reason=_( "Type and" " Code must be integers for ICMP protocol type")) else: raise exception.InvalidInput(reason=_("To and From ports " "must be integers")) if ip_protocol.upper() not in ['TCP', 'UDP', 'ICMP']: raise exception.InvalidIpProtocol(protocol=ip_protocol) # Verify that from_port must always be less than # or equal to to_port if (ip_protocol.upper() in ['TCP', 'UDP'] and (from_port > to_port)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="Former value cannot" " be greater than the later") # Verify valid TCP, UDP port ranges if (ip_protocol.upper() in ['TCP', 'UDP'] and (from_port < 1 or to_port > 65535)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="Valid TCP ports should" " be between 1-65535") # Verify ICMP type and code if (ip_protocol.upper() == "ICMP" and (from_port < -1 or from_port > 255 or to_port < -1 or to_port > 255)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="For ICMP, the" " type:code must be valid") values['protocol'] = ip_protocol values['from_port'] = from_port values['to_port'] = to_port else: # If cidr based filtering, protocol and ports are mandatory if cidr: return None return values
def __init__(self, name, app, host='0.0.0.0', port=0, pool_size=None, protocol=eventlet.wsgi.HttpProtocol, backlog=128, use_ssl=False, max_url_len=None): """Initialize, but do not start, a WSGI server. :param name: Pretty name for logging. :param app: The WSGI application to serve. :param host: IP address to serve the application. :param port: Port number to server the application. :param pool_size: Maximum number of eventlets to spawn concurrently. :param backlog: Maximum number of queued connections. :param max_url_len: Maximum length of permitted URLs. :returns: None :raises: patron.exception.InvalidInput """ # Allow operators to customize http requests max header line size. eventlet.wsgi.MAX_HEADER_LINE = CONF.max_header_line self.name = name self.app = app self._server = None self._protocol = protocol self.pool_size = pool_size or self.default_pool_size self._pool = eventlet.GreenPool(self.pool_size) self._logger = logging.getLogger("patron.%s.wsgi.server" % self.name) self._wsgi_logger = loggers.WritableLogger(self._logger) self._use_ssl = use_ssl self._max_url_len = max_url_len self.client_socket_timeout = CONF.client_socket_timeout or None if backlog < 1: raise exception.InvalidInput( reason='The backlog must be more than 1') bind_addr = (host, port) # TODO(dims): eventlet's green dns/socket module does not actually # support IPv6 in getaddrinfo(). We need to get around this in the # future or monitor upstream for a fix try: info = socket.getaddrinfo(bind_addr[0], bind_addr[1], socket.AF_UNSPEC, socket.SOCK_STREAM)[0] family = info[0] bind_addr = info[-1] except Exception: family = socket.AF_INET try: self._socket = eventlet.listen(bind_addr, family, backlog=backlog) except EnvironmentError: LOG.error(_LE("Could not bind to %(host)s:%(port)s"), { 'host': host, 'port': port }) raise (self.host, self.port) = self._socket.getsockname()[0:2] LOG.info(_LI("%(name)s listening on %(host)s:%(port)s"), { 'name': self.name, 'host': self.host, 'port': self.port })
def _get_servers(self, req, is_detail): """Returns a list of servers, based on any search options specified.""" search_opts = {} search_opts.update(req.GET) context = req.environ['patron.context'] remove_invalid_options(context, search_opts, self._get_server_search_options()) # Verify search by 'status' contains a valid status. # Convert it to filter by vm_state or task_state for compute_api. search_opts.pop('status', None) if 'status' in req.GET.keys(): statuses = req.GET.getall('status') states = common.task_and_vm_state_from_status(statuses) vm_state, task_state = states if not vm_state and not task_state: return {'servers': []} search_opts['vm_state'] = vm_state # When we search by vm state, task state will return 'default'. # So we don't need task_state search_opt. if 'default' not in task_state: search_opts['task_state'] = task_state if 'changes-since' in search_opts: try: parsed = timeutils.parse_isotime(search_opts['changes-since']) except ValueError: msg = _('Invalid changes-since value') raise exc.HTTPBadRequest(explanation=msg) search_opts['changes-since'] = parsed # By default, compute's get_all() will return deleted instances. # If an admin hasn't specified a 'deleted' search option, we need # to filter out deleted instances by setting the filter ourselves. # ... Unless 'changes-since' is specified, because 'changes-since' # should return recently deleted images according to the API spec. if 'deleted' not in search_opts: if 'changes-since' not in search_opts: # No 'changes-since', so we only want non-deleted servers search_opts['deleted'] = False if search_opts.get("vm_state") == ['deleted']: if context.is_admin: search_opts['deleted'] = True else: msg = _("Only administrators may list deleted instances") raise exc.HTTPForbidden(explanation=msg) # If tenant_id is passed as a search parameter this should # imply that all_tenants is also enabled unless explicitly # disabled. Note that the tenant_id parameter is filtered out # by remove_invalid_options above unless the requestor is an # admin. # TODO(gmann): 'all_tenants' flag should not be required while # searching with 'tenant_id'. Ref bug# 1185290 # +microversions to achieve above mentioned behavior by # uncommenting below code. # if 'tenant_id' in search_opts and 'all_tenants' not in search_opts: # We do not need to add the all_tenants flag if the tenant # id associated with the token is the tenant id # specified. This is done so a request that does not need # the all_tenants flag does not fail because of lack of # policy permission for compute:get_all_tenants when it # doesn't actually need it. # if context.project_id != search_opts.get('tenant_id'): # search_opts['all_tenants'] = 1 # If all tenants is passed with 0 or false as the value # then remove it from the search options. Nothing passed as # the value for all_tenants is considered to enable the feature all_tenants = search_opts.get('all_tenants') if all_tenants: try: if not strutils.bool_from_string(all_tenants, True): del search_opts['all_tenants'] except ValueError as err: raise exception.InvalidInput(six.text_type(err)) if 'all_tenants' in search_opts: if is_detail: authorize(context, action="detail:get_all_tenants") else: authorize(context, action="index:get_all_tenants") del search_opts['all_tenants'] else: if context.project_id: search_opts['project_id'] = context.project_id else: search_opts['user_id'] = context.user_id limit, marker = common.get_limit_and_marker(req) sort_keys, sort_dirs = common.get_sort_params(req.params) try: instance_list = self.compute_api.get_all( context, search_opts=search_opts, limit=limit, marker=marker, want_objects=True, expected_attrs=['pci_devices'], sort_keys=sort_keys, sort_dirs=sort_dirs) except exception.MarkerNotFound: msg = _('marker [%s] not found') % marker raise exc.HTTPBadRequest(explanation=msg) except exception.FlavorNotFound: LOG.debug("Flavor '%s' could not be found ", search_opts['flavor']) instance_list = objects.InstanceList() if is_detail: instance_list.fill_faults() response = self._view_builder.detail(req, instance_list) else: response = self._view_builder.index(req, instance_list) req.cache_db_instances(instance_list) return response
def validate_extra_spec_keys(key_names_list): for key_name in key_names_list: if not VALID_EXTRASPEC_NAME_REGEX.match(key_name): expl = _('Key Names can only contain alphanumeric characters, ' 'periods, dashes, underscores, colons and spaces.') raise exception.InvalidInput(message=expl)
def __init__(self, virtapi, scheme="https"): super(VMwareVCDriver, self).__init__(virtapi) if (CONF.vmware.host_ip is None or CONF.vmware.host_username is None or CONF.vmware.host_password is None): raise Exception( _("Must specify host_ip, host_username and " "host_password to use vmwareapi.VMwareVCDriver")) self._datastore_regex = None if CONF.vmware.datastore_regex: try: self._datastore_regex = re.compile(CONF.vmware.datastore_regex) except re.error: raise exception.InvalidInput( reason=_("Invalid Regular Expression %s") % CONF.vmware.datastore_regex) self._session = VMwareAPISession(scheme=scheme) # Update the PBM location if necessary if CONF.vmware.pbm_enabled: self._update_pbm_location() self._validate_configuration() # Get the list of clusters to be used self._cluster_names = CONF.vmware.cluster_name if len(self._cluster_names) > 1: versionutils.report_deprecated_feature( LOG, _LW('The "cluster_name" setting should have only one ' 'cluster name. The capability of allowing ' 'multiple clusters may be dropped in the ' 'Liberty release.')) self.dict_mors = vm_util.get_all_cluster_refs_by_name( self._session, self._cluster_names) if not self.dict_mors: raise exception.NotFound( _("All clusters specified %s were not" " found in the vCenter") % self._cluster_names) # Check if there are any clusters that were specified in the patron.conf # but are not in the vCenter, for missing clusters log a warning. clusters_found = [v.get('name') for k, v in self.dict_mors.iteritems()] missing_clusters = set(self._cluster_names) - set(clusters_found) if missing_clusters: LOG.warning( _LW("The following clusters could not be found in the " "vCenter %s"), list(missing_clusters)) # The _resources is used to maintain the vmops, volumeops and vcstate # objects per cluster self._resources = {} self._resource_keys = set() self._virtapi = virtapi self._update_resources() # The following initialization is necessary since the base class does # not use VC state. first_cluster = self._resources.keys()[0] self._vmops = self._resources.get(first_cluster).get('vmops') self._volumeops = self._resources.get(first_cluster).get('volumeops') self._vc_state = self._resources.get(first_cluster).get('vcstate') # Register the OpenStack extension self._register_openstack_extension()