def urlsafe_encrypt(key, plaintext, blocksize=16): """ Encrypts plaintext. Resulting ciphertext will contain URL-safe characters. If plaintext is Unicode, encode it to UTF-8 before encryption. :param key: AES secret key :param plaintext: Input text to be encrypted :param blocksize: Non-zero integer multiple of AES blocksize in bytes (16) :returns: Resulting ciphertext """ def pad(text): """ Pads text to be encrypted """ pad_length = (blocksize - len(text) % blocksize) sr = random.StrongRandom() pad = b''.join(six.int2byte(sr.randint(1, 0xFF)) for i in range(pad_length - 1)) # We use chr(0) as a delimiter between text and padding return text + b'\0' + pad plaintext = encodeutils.to_utf8(plaintext) key = encodeutils.to_utf8(key) # random initial 16 bytes for CBC init_vector = Random.get_random_bytes(16) cypher = AES.new(key, AES.MODE_CBC, init_vector) padded = cypher.encrypt(pad(six.binary_type(plaintext))) encoded = base64.urlsafe_b64encode(init_vector + padded) if six.PY3: encoded = encoded.decode('ascii') return encoded
def encrypt_data(data, key, iv_bit_count=IV_BIT_COUNT): data = encodeutils.to_utf8(data) key = encodeutils.to_utf8(key) md5_key = hashlib.md5(key).hexdigest() iv = Random.new().read(iv_bit_count) iv = iv[:iv_bit_count] aes = AES.new(md5_key, AES.MODE_CBC, iv) data = pad_for_encryption(data, iv_bit_count) encrypted = aes.encrypt(data) return iv + encrypted
def test_to_utf8(self): self.assertEqual(encodeutils.to_utf8(b'a\xe9\xff'), # bytes b'a\xe9\xff') self.assertEqual(encodeutils.to_utf8(u'a\xe9\xff\u20ac'), # Unicode b'a\xc3\xa9\xc3\xbf\xe2\x82\xac') self.assertRaises(TypeError, encodeutils.to_utf8, 123) # invalid # oslo.i18n Message objects should also be accepted for convenience. # It works because Message is a subclass of six.text_type. Use the # lazy translation to get a Message instance of oslo_i18n. msg = oslo_i18n.fixture.Translation().lazy("test") self.assertEqual(encodeutils.to_utf8(msg), b'test')
def _read_pyca_private_key(private_key, private_key_passphrase=None): kw = {"password": None, "backend": backends.default_backend()} if private_key_passphrase is not None: kw["password"] = encodeutils.to_utf8(private_key_passphrase) else: kw["password"] = None private_key = encodeutils.to_utf8(private_key) try: pk = serialization.load_pem_private_key(private_key, **kw) return pk except TypeError as ex: if len(ex.args) > 0 and ex.args[0].startswith("Password"): raise exceptions.NeedsPassphrase
def _send_inventories(response, resource_provider, inventories): """Send a JSON representation of a list of inventories.""" response.status = 200 response.body = encodeutils.to_utf8(jsonutils.dumps( _serialize_inventories(inventories, resource_provider.generation))) response.content_type = 'application/json' return response
def list_traits(req): context = req.environ['placement.context'] filters = {} try: jsonschema.validate(dict(req.GET), LIST_TRAIT_SCHEMA, format_checker=jsonschema.FormatChecker()) except jsonschema.ValidationError as exc: raise webob.exc.HTTPBadRequest( _('Invalid query string parameters: %(exc)s') % {'exc': exc}) if 'name' in req.GET: filters = _normalize_traits_qs_param(req.GET['name']) if 'associated' in req.GET: if req.GET['associated'].lower() not in ['true', 'false']: raise webob.exc.HTTPBadRequest( explanation=_('The query parameter "associated" only accepts ' '"true" or "false"')) filters['associated'] = ( True if req.GET['associated'].lower() == 'true' else False) traits = objects.TraitList.get_all(context, filters) req.response.status = 200 req.response.body = encodeutils.to_utf8( jsonutils.dumps(_serialize_traits(traits))) req.response.content_type = 'application/json' return req.response
def list_traits(req): context = req.environ['placement.context'] context.can(policies.TRAITS_LIST) want_version = req.environ[microversion.MICROVERSION_ENVIRON] filters = {} util.validate_query_params(req, schema.LIST_TRAIT_SCHEMA) if 'name' in req.GET: filters = _normalize_traits_qs_param(req.GET['name']) if 'associated' in req.GET: if req.GET['associated'].lower() not in ['true', 'false']: raise webob.exc.HTTPBadRequest( _('The query parameter "associated" only accepts ' '"true" or "false"')) filters['associated'] = ( True if req.GET['associated'].lower() == 'true' else False) traits = rp_obj.TraitList.get_all(context, filters) req.response.status = 200 output, last_modified = _serialize_traits(traits, want_version) if want_version.matches((1, 15)): req.response.last_modified = last_modified req.response.cache_control = 'no-cache' req.response.body = encodeutils.to_utf8(jsonutils.dumps(output)) req.response.content_type = 'application/json' return req.response
def update_resource_class(req): """PUT to update a single resource class. On success return a 200 response with a representation of the updated resource class. """ name = util.wsgi_path_item(req.environ, 'name') context = req.environ['placement.context'] data = util.extract_json(req.body, PUT_RC_SCHEMA_V1_2) # The containing application will catch a not found here. rc = objects.ResourceClass.get_by_name(context, name) rc.name = data['name'] try: rc.save() except exception.ResourceClassExists: raise webob.exc.HTTPConflict( _('Resource class already exists: %(name)s') % {'name': rc.name}) except exception.ResourceClassCannotUpdateStandard: raise webob.exc.HTTPBadRequest( _('Cannot update standard resource class %(rp_name)s') % {'rp_name': name}) req.response.body = encodeutils.to_utf8(jsonutils.dumps( _serialize_resource_class(req.environ, rc)) ) req.response.status = 200 req.response.content_type = 'application/json' return req.response
def list_traits_for_resource_provider(req): context = req.environ['placement.context'] context.can(policies.RP_TRAIT_LIST) want_version = req.environ[microversion.MICROVERSION_ENVIRON] uuid = util.wsgi_path_item(req.environ, 'uuid') # Resource provider object is needed for two things: If it is # NotFound we'll get a 404 here, which needs to happen because # get_all_by_resource_provider can return an empty list. # It is also needed for the generation, used in the outgoing # representation. try: rp = rp_obj.ResourceProvider.get_by_uuid(context, uuid) except exception.NotFound as exc: raise webob.exc.HTTPNotFound( _("No resource provider with uuid %(uuid)s found: %(error)s") % {'uuid': uuid, 'error': exc}) traits = rp_obj.TraitList.get_all_by_resource_provider(context, rp) response_body, last_modified = _serialize_traits(traits, want_version) response_body["resource_provider_generation"] = rp.generation if want_version.matches((1, 15)): req.response.last_modified = last_modified req.response.cache_control = 'no-cache' req.response.status = 200 req.response.body = encodeutils.to_utf8(jsonutils.dumps(response_body)) req.response.content_type = 'application/json' return req.response
def get_resource_class(req): """Get a single resource class. On success return a 200 with an application/json body representing the resource class. """ name = util.wsgi_path_item(req.environ, 'name') context = req.environ['placement.context'] context.can(policies.SHOW) want_version = req.environ[microversion.MICROVERSION_ENVIRON] # The containing application will catch a not found here. rc = rp_obj.ResourceClass.get_by_name(context, name) req.response.body = encodeutils.to_utf8(jsonutils.dumps( _serialize_resource_class(req.environ, rc)) ) req.response.content_type = 'application/json' if want_version.matches((1, 15)): req.response.cache_control = 'no-cache' # Non-custom resource classes will return None from pick_last_modified, # so the 'or' causes utcnow to be used. last_modified = util.pick_last_modified(None, rc) or timeutils.utcnow( with_timezone=True) req.response.last_modified = last_modified return req.response
def _send_inventory(response, resource_provider, inventory, status=200): """Send a JSON representation of one single inventory.""" response.status = status response.body = encodeutils.to_utf8(jsonutils.dumps(_serialize_inventory( inventory, generation=resource_provider.generation))) response.content_type = 'application/json' return response
def list_for_consumer(req): """List allocations associated with a consumer.""" context = req.environ['placement.context'] context.can(policies.ALLOC_LIST) consumer_id = util.wsgi_path_item(req.environ, 'consumer_uuid') want_version = req.environ[microversion.MICROVERSION_ENVIRON] # NOTE(cdent): There is no way for a 404 to be returned here, # only an empty result. We do not have a way to validate a # consumer id. allocations = rp_obj.AllocationList.get_all_by_consumer_id( context, consumer_id) output = _serialize_allocations_for_consumer(allocations, want_version) last_modified = _last_modified_from_allocations(allocations, want_version) allocations_json = jsonutils.dumps(output) response = req.response response.status = 200 response.body = encodeutils.to_utf8(allocations_json) response.content_type = 'application/json' if want_version.matches((1, 15)): response.last_modified = last_modified response.cache_control = 'no-cache' return response
def update_resource_provider(req): """PUT to update a single resource provider. On success return a 200 response with a representation of the updated resource provider. """ uuid = util.wsgi_path_item(req.environ, 'uuid') context = req.environ['placement.context'] # The containing application will catch a not found here. resource_provider = objects.ResourceProvider.get_by_uuid( context, uuid) data = util.extract_json(req.body, PUT_RESOURCE_PROVIDER_SCHEMA) resource_provider.name = data['name'] try: resource_provider.save() except db_exc.DBDuplicateEntry as exc: raise webob.exc.HTTPConflict( _('Conflicting resource provider %(name)s already exists.') % {'name': data['name']}) except exception.ObjectActionError as exc: raise webob.exc.HTTPBadRequest( _('Unable to save resource provider %(rp_uuid)s: %(error)s') % {'rp_uuid': uuid, 'error': exc}) req.response.body = encodeutils.to_utf8(jsonutils.dumps( _serialize_provider(req.environ, resource_provider))) req.response.status = 200 req.response.content_type = 'application/json' return req.response
def list_for_resource_provider(req): """List allocations associated with a resource provider.""" # TODO(cdent): On a shared resource provider (for example a # giant disk farm) this list could get very long. At the moment # we have no facility for limiting the output. Given that we are # using a dict of dicts for the output we are potentially limiting # ourselves in terms of sorting and filtering. context = req.environ['placement.context'] uuid = util.wsgi_path_item(req.environ, 'uuid') # confirm existence of resource provider so we get a reasonable # 404 instead of empty list try: resource_provider = objects.ResourceProvider.get_by_uuid( context, uuid) except exception.NotFound as exc: raise webob.exc.HTTPNotFound( _("Resource provider '%(rp_uuid)s' not found: %(error)s") % {'rp_uuid': uuid, 'error': exc}) allocations = objects.AllocationList.get_all_by_resource_provider_uuid( context, uuid) allocations_json = jsonutils.dumps( _serialize_allocations_for_resource_provider( allocations, resource_provider)) req.response.status = 200 req.response.body = encodeutils.to_utf8(allocations_json) req.response.content_type = 'application/json' return req.response
def get_tunnel_name_full(cls, network_type, local_ip, remote_ip): network_type = network_type[:3] remote_ip_hex = cls.get_ip_in_hex(remote_ip) if not remote_ip_hex: return None # Remove length of network_type and two dashes hashlen = (n_const.DEVICE_NAME_MAX_LEN - len(network_type) - 2) // 2 remote_ip_hex = encodeutils.to_utf8(remote_ip_hex) remote_ip_hash = hashlib.sha1(remote_ip_hex).hexdigest()[:hashlen] local_ip_hex = cls.get_ip_in_hex(local_ip) local_ip_hex = encodeutils.to_utf8(local_ip_hex) source_ip_hash = hashlib.sha1(local_ip_hex).hexdigest()[:hashlen] return '%s-%s-%s' % (network_type, source_ip_hash, remote_ip_hash)
def update_traits_for_resource_provider(req): context = req.environ['placement.context'] uuid = util.wsgi_path_item(req.environ, 'uuid') data = util.extract_json(req.body, SET_TRAITS_FOR_RP_SCHEMA) rp_gen = data['resource_provider_generation'] traits = data['traits'] resource_provider = objects.ResourceProvider.get_by_uuid( context, uuid) if resource_provider.generation != rp_gen: raise webob.exc.HTTPConflict( _("Resource provider's generation already changed. Please update " "the generation and try again."), json_formatter=util.json_error_formatter) trait_objs = objects.TraitList.get_all( context, filters={'name_in': traits}) traits_name = set([obj.name for obj in trait_objs]) non_existed_trait = set(traits) - set(traits_name) if non_existed_trait: raise webob.exc.HTTPBadRequest( _("No such trait %s") % ', '.join(non_existed_trait)) resource_provider.set_traits(trait_objs) response_body = _serialize_traits(trait_objs) response_body[ 'resource_provider_generation'] = resource_provider.generation req.response.status = 200 req.response.body = encodeutils.to_utf8(jsonutils.dumps(response_body)) req.response.content_type = 'application/json' return req.response
def list_usages(req): """GET a dictionary of resource provider usage by resource class. If the resource provider does not exist return a 404. On success return a 200 with an application/json representation of the usage dictionary. """ context = req.environ['placement.context'] uuid = util.wsgi_path_item(req.environ, 'uuid') # Resource provider object needed for two things: If it is # NotFound we'll get a 404 here, which needs to happen because # get_all_by_resource_provider_uuid can return an empty list. # It is also needed for the generation, used in the outgoing # representation. try: resource_provider = objects.ResourceProvider.get_by_uuid( context, uuid) except exception.NotFound as exc: raise webob.exc.HTTPNotFound( _("No resource provider with uuid %(uuid)s found: %(error)s") % {'uuid': uuid, 'error': exc}) usage = objects.UsageList.get_all_by_resource_provider_uuid( context, uuid) response = req.response response.body = encodeutils.to_utf8(jsonutils.dumps( _serialize_usages(resource_provider, usage))) req.response.content_type = 'application/json' return req.response
def home(req): want_version = req.environ[microversion.MICROVERSION_ENVIRON] min_version = microversion.min_version_string() max_version = microversion.max_version_string() # NOTE(cdent): As sections of the api are added, links can be # added to this output to align with the guidelines at # http://specs.openstack.org/openstack/api-wg/guidelines/microversion_specification.html#version-discovery version_data = { 'id': 'v%s' % min_version, 'max_version': max_version, 'min_version': min_version, # for now there is only ever one version, so it must be CURRENT 'status': 'CURRENT', 'links': [{ # Point back to this same URL as the root of this version. # NOTE(cdent): We explicitly want this to be a relative-URL # representation of "this same URL", otherwise placement needs # to keep track of proxy addresses and the like, which we have # avoided thus far, in order to construct full URLs. Placement # is much easier to scale if we never track that stuff. 'rel': 'self', 'href': '', }], } version_json = jsonutils.dumps({'versions': [version_data]}) req.response.body = encodeutils.to_utf8(version_json) req.response.content_type = 'application/json' if want_version.matches((1, 15)): req.response.cache_control = 'no-cache' req.response.last_modified = timeutils.utcnow(with_timezone=True) return req.response
def decrypt_data(data, key, iv_bit_count=IV_BIT_COUNT): key = encodeutils.to_utf8(key) md5_key = hashlib.md5(key).hexdigest() iv = data[:iv_bit_count] aes = AES.new(md5_key, AES.MODE_CBC, bytes(iv)) decrypted = aes.decrypt(bytes(data[iv_bit_count:])) return unpad_after_decryption(decrypted)
def get_interface_name(name, prefix='', max_len=n_const.DEVICE_NAME_MAX_LEN): """Construct an interface name based on the prefix and name. The interface name can not exceed the maximum length passed in. Longer names are hashed to help ensure uniqueness. """ requested_name = prefix + name if len(requested_name) <= max_len: return requested_name # We can't just truncate because interfaces may be distinguished # by an ident at the end. A hash over the name should be unique. # Leave part of the interface name on for easier identification if (len(prefix) + INTERFACE_HASH_LEN) > max_len: raise ValueError(_("Too long prefix provided. New name would exceed " "given length for an interface name.")) namelen = max_len - len(prefix) - INTERFACE_HASH_LEN hashed_name = hashlib.sha1(encodeutils.to_utf8(name)) new_name = ('%(prefix)s%(truncated)s%(hash)s' % {'prefix': prefix, 'truncated': name[0:namelen], 'hash': hashed_name.hexdigest()[0:INTERFACE_HASH_LEN]}) LOG.info(_LI("The requested interface name %(requested_name)s exceeds the " "%(limit)d character limitation. It was shortened to " "%(new_name)s to fit."), {'requested_name': requested_name, 'limit': max_len, 'new_name': new_name}) return new_name
def list_allocation_candidates(req): """GET a JSON object with a list of allocation requests and a JSON object of provider summary objects On success return a 200 and an application/json body representing a collection of allocation requests and provider summaries """ context = req.environ['placement.context'] schema = _GET_SCHEMA_1_10 util.validate_query_params(req, schema) resources = util.normalize_resources_qs_param(req.GET['resources']) filters = { 'resources': resources, } try: cands = rp_obj.AllocationCandidates.get_by_filters(context, filters) except exception.ResourceClassNotFound as exc: raise webob.exc.HTTPBadRequest( _('Invalid resource class in resources parameter: %(error)s') % {'error': exc}) response = req.response trx_cands = _transform_allocation_candidates(cands) json_data = jsonutils.dumps(trx_cands) response.body = encodeutils.to_utf8(json_data) response.content_type = 'application/json' return response
def generate_identifier(result): """ Returns a fixed length identifier based on a hash of a combined set of playbook/task values which are as close as we can guess to unique for each task. """ # Determine the playbook file path to use for the ID if result.task.playbook and result.task.playbook.path: playbook_file = result.task.playbook.path else: playbook_file = '' play_path = u'%s.%s' % (playbook_file, result.task.play.name) # Determine the task file path to use for the ID if result.task.file and result.task.file.path: task_file = result.task.file.path else: task_file = '' task_path = u'%s.%s' % (task_file, result.task.name) # Combine both of the above for a full path identifier_path = u'%s.%s' % (play_path, task_path) # Assign the identifier as a hash of the fully unique path. identifier = hashlib.sha1(encodeutils.to_utf8(identifier_path)).hexdigest() return identifier
def list_resource_providers(req): """GET a list of resource providers. On success return a 200 and an application/json body representing a collection of resource providers. """ context = req.environ['placement.context'] want_version = req.environ[microversion.MICROVERSION_ENVIRON] schema = GET_RPS_SCHEMA_1_0 if want_version == (1, 3): schema = GET_RPS_SCHEMA_1_3 if want_version >= (1, 4): schema = GET_RPS_SCHEMA_1_4 try: jsonschema.validate(dict(req.GET), schema, format_checker=jsonschema.FormatChecker()) except jsonschema.ValidationError as exc: raise webob.exc.HTTPBadRequest( _('Invalid query string parameters: %(exc)s') % {'exc': exc}) filters = {} for attr in ['uuid', 'name', 'member_of']: if attr in req.GET: value = req.GET[attr] # special case member_of to always make its value a # list, either by accepting the single value, or if it # starts with 'in:' splitting on ','. # NOTE(cdent): This will all change when we start using # JSONSchema validation of query params. if attr == 'member_of': if value.startswith('in:'): value = value[3:].split(',') else: value = [value] # Make sure the values are actually UUIDs. for aggr_uuid in value: if not uuidutils.is_uuid_like(aggr_uuid): raise webob.exc.HTTPBadRequest( _('Invalid uuid value: %(uuid)s') % {'uuid': aggr_uuid}) filters[attr] = value if 'resources' in req.GET: resources = _normalize_resources_qs_param(req.GET['resources']) filters['resources'] = resources try: resource_providers = objects.ResourceProviderList.get_all_by_filters( context, filters) except exception.ResourceClassNotFound as exc: raise webob.exc.HTTPBadRequest( _('Invalid resource class in resources parameter: %(error)s') % {'error': exc}) response = req.response response.body = encodeutils.to_utf8( jsonutils.dumps(_serialize_providers(req.environ, resource_providers))) response.content_type = 'application/json' return response
def list_allocation_candidates(req): """GET a JSON object with a list of allocation requests and a JSON object of provider summary objects On success return a 200 and an application/json body representing a collection of allocation requests and provider summaries """ context = req.environ['placement.context'] context.can(policies.LIST) want_version = req.environ[microversion.MICROVERSION_ENVIRON] get_schema = schema.GET_SCHEMA_1_10 if want_version.matches((1, 25)): get_schema = schema.GET_SCHEMA_1_25 elif want_version.matches((1, 21)): get_schema = schema.GET_SCHEMA_1_21 elif want_version.matches((1, 17)): get_schema = schema.GET_SCHEMA_1_17 elif want_version.matches((1, 16)): get_schema = schema.GET_SCHEMA_1_16 util.validate_query_params(req, get_schema) requests = util.parse_qs_request_groups(req) limit = req.GET.getall('limit') # JSONschema has already confirmed that limit has the form # of an integer. if limit: limit = int(limit[0]) group_policy = req.GET.getall('group_policy') or None # Schema ensures we get either "none" or "isolate" if group_policy: group_policy = group_policy[0] else: # group_policy is required if more than one numbered request group was # specified. if len([rg for rg in requests.values() if rg.use_same_provider]) > 1: raise webob.exc.HTTPBadRequest( _('The "group_policy" parameter is required when specifying ' 'more than one "resources{N}" parameter.')) try: cands = rp_obj.AllocationCandidates.get_by_requests( context, requests, limit=limit, group_policy=group_policy) except exception.ResourceClassNotFound as exc: raise webob.exc.HTTPBadRequest( _('Invalid resource class in resources parameter: %(error)s') % {'error': exc}) except exception.TraitNotFound as exc: raise webob.exc.HTTPBadRequest(six.text_type(exc)) response = req.response trx_cands = _transform_allocation_candidates(cands, requests, want_version) json_data = jsonutils.dumps(trx_cands) response.body = encodeutils.to_utf8(json_data) response.content_type = 'application/json' if want_version.matches((1, 15)): response.cache_control = 'no-cache' response.last_modified = timeutils.utcnow(with_timezone=True) return response
def get_interface_mac(interface): MAC_START = 18 MAC_END = 24 s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) dev = interface[:constants.DEVICE_NAME_MAX_LEN] dev = encodeutils.to_utf8(dev) info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', dev)) return ':'.join(["%02x" % b for b in iterbytes(info[MAC_START:MAC_END])])
def get_interface_mac(interface): MAC_START = 18 MAC_END = 24 s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) dev = interface[:constants.DEVICE_NAME_MAX_LEN] dev = encodeutils.to_utf8(dev) info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', dev)) return ''.join(['%02x:' % ord(char) for char in info[MAC_START:MAC_END]])[:-1]
def create_resource_provider(req): """POST to create a resource provider. On success return a 201 response with an empty body (microversions 1.0 - 1.19) or a 200 response with a payload representing the newly created resource provider (microversions 1.20 - latest), and a location header pointing to the resource provider. """ context = req.environ['placement.context'] context.can(policies.CREATE) schema = rp_schema.POST_RESOURCE_PROVIDER_SCHEMA want_version = req.environ[microversion.MICROVERSION_ENVIRON] if want_version.matches((1, 14)): schema = rp_schema.POST_RP_SCHEMA_V1_14 data = util.extract_json(req.body, schema) try: if data.get('uuid'): # Normalize UUID with no proper dashes into dashed one # with format {8}-{4}-{4}-{4}-{12} data['uuid'] = str(uuidlib.UUID(data['uuid'])) else: data['uuid'] = uuidutils.generate_uuid() resource_provider = rp_obj.ResourceProvider(context, **data) resource_provider.create() except db_exc.DBDuplicateEntry as exc: # Whether exc.columns has one or two entries (in the event # of both fields being duplicates) appears to be database # dependent, so going with the complete solution here. duplicate = ', '.join(['%s: %s' % (column, data[column]) for column in exc.columns]) raise webob.exc.HTTPConflict( _('Conflicting resource provider %(duplicate)s already exists.') % {'duplicate': duplicate}, comment=errors.DUPLICATE_NAME) except exception.ObjectActionError as exc: raise webob.exc.HTTPBadRequest( _('Unable to create resource provider "%(name)s", %(rp_uuid)s: ' '%(error)s') % {'name': data['name'], 'rp_uuid': data['uuid'], 'error': exc}) req.response.location = util.resource_provider_url( req.environ, resource_provider) if want_version.matches(min_version=(1, 20)): req.response.body = encodeutils.to_utf8(jsonutils.dumps( _serialize_provider(req.environ, resource_provider, want_version))) req.response.content_type = 'application/json' modified = util.pick_last_modified(None, resource_provider) req.response.last_modified = modified req.response.cache_control = 'no-cache' else: req.response.status = 201 req.response.content_type = None return req.response
def _read_privatekey(privatekey_pem, passphrase=None): if passphrase is not None: passphrase = encodeutils.to_utf8(passphrase) privatekey_pem = privatekey_pem.encode('ascii') try: return serialization.load_pem_private_key(privatekey_pem, passphrase, backends.default_backend()) except Exception: raise exceptions.NeedsPassphrase
def fake_request(self, method, url, body, headers): req = webob.Request.blank(url.path) body = encodeutils.to_utf8(body) req.body = body req.method = method webob_res = req.get_response(self.api) return test_utils.FakeHTTPResponse(status=webob_res.status_int, headers=webob_res.headers, data=webob_res.body)
def __call__(self, req): if os.path.normpath(req.path_info) == "/": resp = base.ec2_md_print(base.VERSIONS + ["latest"]) req.response.body = encodeutils.to_utf8(resp) req.response.content_type = base.MIME_TYPE_TEXT_PLAIN return req.response # Convert webob.headers.EnvironHeaders to a dict and mask any sensitive # details from the logs. if CONF.debug: headers = {k: req.headers[k] for k in req.headers} LOG.debug('Metadata request headers: %s', strutils.mask_dict_password(headers)) if CONF.neutron.service_metadata_proxy: if req.headers.get('X-Metadata-Provider'): meta_data = self._handle_instance_id_request_from_lb(req) else: meta_data = self._handle_instance_id_request(req) else: if req.headers.get('X-Instance-ID'): LOG.warning( "X-Instance-ID present in request headers. The " "'service_metadata_proxy' option must be " "enabled to process this header.") meta_data = self._handle_remote_ip_request(req) if meta_data is None: raise webob.exc.HTTPNotFound() try: data = meta_data.lookup(req.path_info) except base.InvalidMetadataPath: raise webob.exc.HTTPNotFound() if callable(data): return data(req, meta_data) resp = base.ec2_md_print(data) req.response.body = encodeutils.to_utf8(resp) req.response.content_type = meta_data.get_mimetype() return req.response
def get_total_usages(req): """GET the sum of usages for a project or a project/user. On success return a 200 and an application/json body representing the sum/total of usages. Return 404 Not Found if the wanted microversion does not match. """ context = req.environ['placement.context'] # TODO(mriedem): When we support non-admins to use GET /usages we # should pass the project_id (and user_id?) from the query parameters # into context.can() for the target. context.can(policies.TOTAL_USAGES) want_version = req.environ[microversion.MICROVERSION_ENVIRON] util.validate_query_params(req, schema.GET_USAGES_SCHEMA_1_9) # TODO(edleafe): Ensure that these are UUIDs, and not integer IDs project_id = req.GET.get('project_id') user_id = req.GET.get('user_id') usages = usage_obj.get_all_by_project_user(context, project_id, user_id=user_id) response = req.response usages_dict = { 'usages': {resource.resource_class: resource.usage for resource in usages} } response.body = encodeutils.to_utf8(jsonutils.dumps(usages_dict)) req.response.content_type = 'application/json' if want_version.matches((1, 15)): req.response.cache_control = 'no-cache' # While it would be possible to generate a last-modified time # based on the collection of allocations that result in a usage # value (with some spelunking in the SQL) that doesn't align with # the question that is being asked in a request for usages: What # is the usage, now? So the last-modified time is set to utcnow. req.response.last_modified = timeutils.utcnow(with_timezone=True) return req.response
def list_usages(req): """GET a dictionary of resource provider usage by resource class. If the resource provider does not exist return a 404. On success return a 200 with an application/json representation of the usage dictionary. """ context = req.environ['placement.context'] uuid = util.wsgi_path_item(req.environ, 'uuid') want_version = req.environ[microversion.MICROVERSION_ENVIRON] # Resource provider object needed for two things: If it is # NotFound we'll get a 404 here, which needs to happen because # get_all_by_resource_provider_uuid can return an empty list. # It is also needed for the generation, used in the outgoing # representation. try: resource_provider = rp_obj.ResourceProvider.get_by_uuid(context, uuid) except exception.NotFound as exc: raise webob.exc.HTTPNotFound( _("No resource provider with uuid %(uuid)s found: %(error)s") % { 'uuid': uuid, 'error': exc }) usage = rp_obj.UsageList.get_all_by_resource_provider_uuid(context, uuid) response = req.response response.body = encodeutils.to_utf8( jsonutils.dumps(_serialize_usages(resource_provider, usage))) req.response.content_type = 'application/json' if want_version.matches((1, 15)): req.response.cache_control = 'no-cache' # While it would be possible to generate a last-modified time # based on the collection of allocations that result in a usage # value (with some spelunking in the SQL) that doesn't align with # the question that is being asked in a request for usages: What # is the usage, now? So the last-modified time is set to utcnow. req.response.last_modified = timeutils.utcnow(with_timezone=True) return req.response
def test_migrate_v3_unicode(self): dest_xml_template = "<domain type='qemu'><name>%s</name></domain>" name = u'\u00CD\u00F1st\u00E1\u00F1c\u00E9' dest_xml = dest_xml_template % name expect_dest_xml = dest_xml_template % encodeutils.to_utf8(name) self.guest.migrate('an-uri', flags=1, migrate_uri='dest-uri', migrate_disks=[u"disk1", u"disk2"], destination_xml=dest_xml, bandwidth=2) self.domain.migrateToURI3.assert_called_once_with( 'an-uri', flags=1, params={ 'migrate_uri': 'dest-uri', 'migrate_disks': ['disk1', 'disk2'], 'destination_xml': expect_dest_xml, 'persistent_xml': expect_dest_xml, 'bandwidth': 2 })
def publish(self, topic, messages, key=None): """Takes messages and puts them on the supplied kafka topic """ if not isinstance(messages, list): messages = [messages] first = True success = False if key is None: key = int(time.time() * 1000) messages = [encodeutils.to_utf8(m) for m in messages] key = bytes(str(key), 'utf-8') if PY3 else str(key) while not success: try: self._producer.send_messages(topic, key, *messages) success = True except Exception: if first: # This is a warning because of all the other warning and # error messages that are logged in this case. This way # someone looking at the log file can see the retry log.warn( "Failed send on topic {}, clear metadata and retry". format(topic)) # If Kafka is running in Kubernetes, the cached metadata # contains the IP Address of the Kafka pod. If the Kafka # pod has restarted, the IP Address will have changed # which would have caused the first publish to fail. So, # clear the cached metadata and retry the publish self._kafka.reset_topic_metadata(topic) first = False continue log.exception('Error publishing to {} topic.'.format(topic)) raise
def add_bgp_peer(self, speaker_as, peer_ip, peer_as, auth_type='none', password=None): curr_speaker = self.cache.get_bgp_speaker(speaker_as) if not curr_speaker: raise bgp_driver_exc.BgpSpeakerNotAdded(local_as=speaker_as, rtid=self.routerid) # Validate peer_ip and peer_as. utils.validate_as_num('remote_as', peer_as) utils.validate_string(peer_ip) utils.validate_auth(auth_type, password) if password is not None: password = encodeutils.to_utf8(password) # Notify Ryu about BGP Peer addition curr_speaker.neighbor_add(address=peer_ip, remote_as=peer_as, password=password, connect_mode=CONNECT_MODE_ACTIVE) LOG.info(_LI('Added BGP Peer %(peer)s for remote_as=%(as)d to ' 'BGP Speaker running for local_as=%(local_as)d.'), {'peer': peer_ip, 'as': peer_as, 'local_as': speaker_as})
def list_for_resource_provider(req): """List allocations associated with a resource provider.""" # TODO(cdent): On a shared resource provider (for example a # giant disk farm) this list could get very long. At the moment # we have no facility for limiting the output. Given that we are # using a dict of dicts for the output we are potentially limiting # ourselves in terms of sorting and filtering. context = req.environ['placement.context'] context.can(policies.RP_ALLOC_LIST) want_version = req.environ[microversion.MICROVERSION_ENVIRON] uuid = util.wsgi_path_item(req.environ, 'uuid') # confirm existence of resource provider so we get a reasonable # 404 instead of empty list try: rp = rp_obj.ResourceProvider.get_by_uuid(context, uuid) except exception.NotFound as exc: raise webob.exc.HTTPNotFound( _("Resource provider '%(rp_uuid)s' not found: %(error)s") % { 'rp_uuid': uuid, 'error': exc }) allocs = rp_obj.AllocationList.get_all_by_resource_provider(context, rp) output = _serialize_allocations_for_resource_provider( allocs, rp, want_version) last_modified = _last_modified_from_allocations(allocs, want_version) allocations_json = jsonutils.dumps(output) response = req.response response.status = 200 response.body = encodeutils.to_utf8(allocations_json) response.content_type = 'application/json' if want_version.matches((1, 15)): response.last_modified = last_modified response.cache_control = 'no-cache' return response
def list_allocation_candidates(req): """GET a JSON object with a list of allocation requests and a JSON object of provider summary objects On success return a 200 and an application/json body representing a collection of allocation requests and provider summaries """ context = req.environ['placement.context'] want_version = req.environ[microversion.MICROVERSION_ENVIRON] get_schema = schema.GET_SCHEMA_1_10 if want_version.matches((1, 16)): get_schema = schema.GET_SCHEMA_1_16 util.validate_query_params(req, get_schema) requests = util.parse_qs_request_groups(req.GET) limit = req.GET.getall('limit') # JSONschema has already confirmed that limit has the form # of an integer. if limit: limit = int(limit[0]) try: cands = rp_obj.AllocationCandidates.get_by_requests( context, requests, limit) except exception.ResourceClassNotFound as exc: raise webob.exc.HTTPBadRequest( _('Invalid resource class in resources parameter: %(error)s') % {'error': exc}) response = req.response trx_cands = _transform_allocation_candidates(cands, want_version) json_data = jsonutils.dumps(trx_cands) response.body = encodeutils.to_utf8(json_data) response.content_type = 'application/json' if want_version.matches((1, 15)): response.cache_control = 'no-cache' response.last_modified = timeutils.utcnow(with_timezone=True) return response
def update_traits_for_resource_provider(req): context = req.environ['placement.context'] context.can(policies.RP_TRAIT_UPDATE) want_version = req.environ[microversion.MICROVERSION_ENVIRON] uuid = util.wsgi_path_item(req.environ, 'uuid') data = util.extract_json(req.body, schema.SET_TRAITS_FOR_RP_SCHEMA) rp_gen = data['resource_provider_generation'] traits = data['traits'] resource_provider = rp_obj.ResourceProvider.get_by_uuid( context, uuid) if resource_provider.generation != rp_gen: raise webob.exc.HTTPConflict( _("Resource provider's generation already changed. Please update " "the generation and try again."), json_formatter=util.json_error_formatter, comment=errors.CONCURRENT_UPDATE) trait_objs = rp_obj.TraitList.get_all( context, filters={'name_in': traits}) traits_name = set([obj.name for obj in trait_objs]) non_existed_trait = set(traits) - set(traits_name) if non_existed_trait: raise webob.exc.HTTPBadRequest( _("No such trait %s") % ', '.join(non_existed_trait)) resource_provider.set_traits(trait_objs) response_body, last_modified = _serialize_traits(trait_objs, want_version) response_body[ 'resource_provider_generation'] = resource_provider.generation if want_version.matches((1, 15)): req.response.last_modified = last_modified req.response.cache_control = 'no-cache' req.response.status = 200 req.response.body = encodeutils.to_utf8(jsonutils.dumps(response_body)) req.response.content_type = 'application/json' return req.response
def get_total_usages(req): """GET the sum of usages for a project or a project/user. On success return a 200 and an application/json body representing the sum/total of usages. Return 404 Not Found if the wanted microversion does not match. """ context = req.environ['placement.context'] want_version = req.environ[microversion.MICROVERSION_ENVIRON] schema = GET_USAGES_SCHEMA_1_9 util.validate_query_params(req, schema) project_id = req.GET.get('project_id') user_id = req.GET.get('user_id') usages = rp_obj.UsageList.get_all_by_project_user(context, project_id, user_id=user_id) response = req.response usages_dict = { 'usages': {resource.resource_class: resource.usage for resource in usages} } response.body = encodeutils.to_utf8(jsonutils.dumps(usages_dict)) req.response.content_type = 'application/json' if want_version.matches((1, 15)): req.response.cache_control = 'no-cache' # While it would be possible to generate a last-modified time # based on the collection of allocations that result in a usage # value (with some spelunking in the SQL) that doesn't align with # the question that is being asked in a request for usages: What # is the usage, now? So the last-modified time is set to utcnow. req.response.last_modified = timeutils.utcnow(with_timezone=True) return req.response
def get_resource_provider(req): """Get a single resource provider. On success return a 200 with an application/json body representing the resource provider. """ want_version = req.environ[microversion.MICROVERSION_ENVIRON] uuid = util.wsgi_path_item(req.environ, 'uuid') # The containing application will catch a not found here. context = req.environ['placement.context'] resource_provider = rp_obj.ResourceProvider.get_by_uuid(context, uuid) response = req.response response.body = encodeutils.to_utf8( jsonutils.dumps( _serialize_provider(req.environ, resource_provider, want_version))) response.content_type = 'application/json' if want_version.matches((1, 15)): modified = util.pick_last_modified(None, resource_provider) response.last_modified = modified response.cache_control = 'no-cache' return response
def get_resource_class(req): """Get a single resource class. On success return a 200 with an application/json body representing the resource class. """ name = util.wsgi_path_item(req.environ, 'name') context = req.environ['placement.context'] want_version = req.environ[microversion.MICROVERSION_ENVIRON] # The containing application will catch a not found here. rc = rp_obj.ResourceClass.get_by_name(context, name) req.response.body = encodeutils.to_utf8( jsonutils.dumps(_serialize_resource_class(req.environ, rc))) req.response.content_type = 'application/json' if want_version.matches((1, 15)): req.response.cache_control = 'no-cache' # Non-custom resource classes will return None from pick_last_modified, # so the 'or' causes utcnow to be used. last_modified = util.pick_last_modified( None, rc) or timeutils.utcnow(with_timezone=True) req.response.last_modified = last_modified return req.response
def execute(cmd, process_input=None, addl_env=None, check_exit_code=True, return_stderr=False, log_fail_as_error=True, extra_ok_codes=None, run_as_root=False, do_decode=True): if process_input is not None: _process_input = encodeutils.to_utf8(process_input) else: _process_input = None obj, cmd = create_process(cmd, addl_env=addl_env, tpool_proxy=False) _stdout, _stderr = avoid_blocking_call(obj.communicate, _process_input) obj.stdin.close() _stdout = helpers.safe_decode_utf8(_stdout) _stderr = helpers.safe_decode_utf8(_stderr) m = _("\nCommand: %(cmd)s\nExit code: %(code)s\nStdin: %(stdin)s\n" "Stdout: %(stdout)s\nStderr: %(stderr)s") % \ {'cmd': cmd, 'code': obj.returncode, 'stdin': process_input or '', 'stdout': _stdout, 'stderr': _stderr} extra_ok_codes = extra_ok_codes or [] if obj.returncode and obj.returncode in extra_ok_codes: obj.returncode = None log_msg = m.strip().replace('\n', '; ') if obj.returncode and log_fail_as_error: LOG.error(log_msg) else: LOG.debug(log_msg) if obj.returncode and check_exit_code: raise exceptions.ProcessExecutionError(m, returncode=obj.returncode) return (_stdout, _stderr) if return_stderr else _stdout
def list_for_consumer(req): """List allocations associated with a consumer.""" context = req.environ['placement.context'] context.can(policies.ALLOC_LIST) consumer_id = util.wsgi_path_item(req.environ, 'consumer_uuid') want_version = req.environ[microversion.MICROVERSION_ENVIRON] # NOTE(cdent): There is no way for a 404 to be returned here, # only an empty result. We do not have a way to validate a # consumer id. allocations = alloc_obj.get_all_by_consumer_id(context, consumer_id) output = _serialize_allocations_for_consumer(allocations, want_version) last_modified = _last_modified_from_allocations(allocations, want_version) allocations_json = jsonutils.dumps(output) response = req.response response.status = 200 response.body = encodeutils.to_utf8(allocations_json) response.content_type = 'application/json' if want_version.matches((1, 15)): response.last_modified = last_modified response.cache_control = 'no-cache' return response
def update_resource_provider(req): """PUT to update a single resource provider. On success return a 200 response with a representation of the updated resource provider. """ uuid = util.wsgi_path_item(req.environ, 'uuid') context = req.environ['placement.context'] # The containing application will catch a not found here. resource_provider = objects.ResourceProvider.get_by_uuid(context, uuid) data = util.extract_json(req.body, PUT_RESOURCE_PROVIDER_SCHEMA) resource_provider.name = data['name'] try: resource_provider.save() except db_exc.DBDuplicateEntry as exc: raise webob.exc.HTTPConflict( _('Conflicting resource provider already exists: %(error)s') % {'error': exc}, json_formatter=util.json_error_formatter) except exception.ObjectActionError as exc: raise webob.exc.HTTPBadRequest( _('Unable to save resource provider %(rp_uuid)s: %(error)s') % { 'rp_uuid': uuid, 'error': exc }, json_formatter=util.json_error_formatter) req.response.body = encodeutils.to_utf8( jsonutils.dumps(_serialize_provider(req.environ, resource_provider))) req.response.status = 200 req.response.content_type = 'application/json' return req.response
def list_traits_for_resource_provider(req): context = req.environ['placement.context'] uuid = util.wsgi_path_item(req.environ, 'uuid') # Resource provider object is needed for two things: If it is # NotFound we'll get a 404 here, which needs to happen because # get_all_by_resource_provider can return an empty list. # It is also needed for the generation, used in the outgoing # representation. try: rp = rp_obj.ResourceProvider.get_by_uuid(context, uuid) except exception.NotFound as exc: raise webob.exc.HTTPNotFound( _("No resource provider with uuid %(uuid)s found: %(error)s") % {'uuid': uuid, 'error': exc}) traits = rp_obj.TraitList.get_all_by_resource_provider(context, rp) response_body = _serialize_traits(traits) response_body["resource_provider_generation"] = rp.generation req.response.status = 200 req.response.body = encodeutils.to_utf8(jsonutils.dumps(response_body)) req.response.content_type = 'application/json' return req.response
def list_resource_providers(req): """GET a list of resource providers. On success return a 200 and an application/json body representing a collection of resource providers. """ context = req.environ['placement.context'] want_version = req.environ[microversion.MICROVERSION_ENVIRON] schema = GET_RPS_SCHEMA_1_0 if want_version == (1, 3): schema = GET_RPS_SCHEMA_1_3 if want_version >= (1, 4): schema = GET_RPS_SCHEMA_1_4 try: jsonschema.validate(dict(req.GET), schema, format_checker=jsonschema.FormatChecker()) except jsonschema.ValidationError as exc: raise webob.exc.HTTPBadRequest( _('Invalid query string parameters: %(exc)s') % {'exc': exc}, json_formatter=util.json_error_formatter) filters = {} for attr in ['uuid', 'name', 'member_of']: if attr in req.GET: value = req.GET[attr] # special case member_of to always make its value a # list, either by accepting the single value, or if it # starts with 'in:' splitting on ','. # NOTE(cdent): This will all change when we start using # JSONSchema validation of query params. if attr == 'member_of': if value.startswith('in:'): value = value[3:].split(',') else: value = [value] # Make sure the values are actually UUIDs. for aggr_uuid in value: if not uuidutils.is_uuid_like(aggr_uuid): raise webob.exc.HTTPBadRequest( _('Invalid uuid value: %(uuid)s') % {'uuid': aggr_uuid}, json_formatter=util.json_error_formatter) filters[attr] = value if 'resources' in req.GET: resources = _normalize_resources_qs_param(req.GET['resources']) filters['resources'] = resources try: resource_providers = objects.ResourceProviderList.get_all_by_filters( context, filters) except exception.ResourceClassNotFound as exc: raise webob.exc.HTTPBadRequest( _('Invalid resource class in resources parameter: %(error)s') % {'error': exc}, json_formatter=util.json_error_formatter) response = req.response response.body = encodeutils.to_utf8( jsonutils.dumps(_serialize_providers(req.environ, resource_providers))) response.content_type = 'application/json' return response
def migrate(self, destination, migrate_uri=None, migrate_disks=None, destination_xml=None, flags=0, bandwidth=0): """Migrate guest object from its current host to the destination :param destination: URI of host destination where guest will be migrate :param migrate_uri: URI for invoking the migration :param migrate_disks: List of disks to be migrated :param destination_xml: The guest XML to be used on the target host :param flags: May be one of more of the following: VIR_MIGRATE_LIVE Do not pause the VM during migration VIR_MIGRATE_PEER2PEER Direct connection between source & destination hosts VIR_MIGRATE_TUNNELLED Tunnel migration data over the libvirt RPC channel VIR_MIGRATE_PERSIST_DEST If the migration is successful, persist the domain on the destination host. VIR_MIGRATE_UNDEFINE_SOURCE If the migration is successful, undefine the domain on the source host. VIR_MIGRATE_NON_SHARED_INC Migration with non-shared storage with incremental disk copy VIR_MIGRATE_AUTO_CONVERGE Slow down domain to make sure it does not change its memory faster than a hypervisor can transfer the changed memory to the destination host VIR_MIGRATE_POSTCOPY Tell libvirt to enable post-copy migration VIR_MIGRATE_TLS Use QEMU-native TLS :param bandwidth: The maximum bandwidth in MiB/s """ params = {} # In migrateToURI3 these parameters are extracted from the # `params` dict params['bandwidth'] = bandwidth if destination_xml: params['destination_xml'] = destination_xml if migrate_disks: params['migrate_disks'] = migrate_disks if migrate_uri: params['migrate_uri'] = migrate_uri # Due to a quirk in the libvirt python bindings, # VIR_MIGRATE_NON_SHARED_INC with an empty migrate_disks is # interpreted as "block migrate all writable disks" rather than # "don't block migrate any disks". This includes attached # volumes, which will potentially corrupt data on those # volumes. Consequently we need to explicitly unset # VIR_MIGRATE_NON_SHARED_INC if there are no disks to be block # migrated. if (flags & libvirt.VIR_MIGRATE_NON_SHARED_INC != 0 and not params.get('migrate_disks')): flags &= ~libvirt.VIR_MIGRATE_NON_SHARED_INC # In the Python2 libvirt bindings, strings passed to # migrateToURI3 via params must not be unicode. if six.PY2: params = { key: encodeutils.to_utf8(value) if isinstance( value, six.text_type) else value for key, value in params.items() } self._domain.migrateToURI3(destination, params=params, flags=flags)
def encode_body(body): """Encode unicode body. WebOb requires to encode unicode body used to update response body. """ return encodeutils.to_utf8(body)
def _get_schema_sha1(schema_raw): return hashlib.sha1(encodeutils.to_utf8(schema_raw)).hexdigest()
def execute(*cmd, **kwargs): """Helper method to shell out and execute a command through subprocess. Allows optional retry. :param cmd: Passed to subprocess.Popen. :type cmd: string :param cwd: Set the current working directory :type cwd: string :param process_input: Send to opened process. :type process_input: string or bytes :param env_variables: Environment variables and their values that will be set for the process. :type env_variables: dict :param check_exit_code: Single bool, int, or list of allowed exit codes. Defaults to [0]. Raise :class:`ProcessExecutionError` unless program exits with one of these code. :type check_exit_code: boolean, int, or [int] :param delay_on_retry: True | False. Defaults to True. If set to True, wait a short amount of time before retrying. :type delay_on_retry: boolean :param attempts: How many times to retry cmd. :type attempts: int :param run_as_root: True | False. Defaults to False. If set to True, the command is prefixed by the command specified in the root_helper kwarg. :type run_as_root: boolean :param root_helper: command to prefix to commands called with run_as_root=True :type root_helper: string :param shell: whether or not there should be a shell used to execute this command. Defaults to false. :type shell: boolean :param loglevel: log level for execute commands. :type loglevel: int. (Should be logging.DEBUG or logging.INFO) :param log_errors: Should stdout and stderr be logged on error? Possible values are :py:attr:`~.LogErrors.DEFAULT`, :py:attr:`~.LogErrors.FINAL`, or :py:attr:`~.LogErrors.ALL`. Note that the values :py:attr:`~.LogErrors.FINAL` and :py:attr:`~.LogErrors.ALL` are **only** relevant when multiple attempts of command execution are requested using the ``attempts`` parameter. :type log_errors: :py:class:`~.LogErrors` :param binary: On Python 3, return stdout and stderr as bytes if binary is True, as Unicode otherwise. :type binary: boolean :param on_execute: This function will be called upon process creation with the object as a argument. The Purpose of this is to allow the caller of `processutils.execute` to track process creation asynchronously. :type on_execute: function(:class:`subprocess.Popen`) :param on_completion: This function will be called upon process completion with the object as a argument. The Purpose of this is to allow the caller of `processutils.execute` to track process completion asynchronously. :type on_completion: function(:class:`subprocess.Popen`) :param preexec_fn: This function will be called in the child process just before the child is executed. WARNING: On windows, we silently drop this preexec_fn as it is not supported by subprocess.Popen on windows (throws a ValueError) :type preexec_fn: function() :param prlimit: Set resource limits on the child process. See below for a detailed description. :type prlimit: :class:`ProcessLimits` :param python_exec: The python executable to use for enforcing prlimits. If this is not set it will default to use sys.executable. :type python_exec: string :returns: (stdout, stderr) from process execution :raises: :class:`UnknownArgumentError` on receiving unknown arguments :raises: :class:`ProcessExecutionError` :raises: :class:`OSError` The *prlimit* parameter can be used to set resource limits on the child process. If this parameter is used, the child process will be spawned by a wrapper process which will set limits before spawning the command. .. versionchanged:: 3.17 *process_input* can now be either bytes or string on python3. .. versionchanged:: 3.4 Added *prlimit* optional parameter. .. versionchanged:: 1.5 Added *cwd* optional parameter. .. versionchanged:: 1.9 Added *binary* optional parameter. On Python 3, *stdout* and *stderr* are now returned as Unicode strings by default, or bytes if *binary* is true. .. versionchanged:: 2.1 Added *on_execute* and *on_completion* optional parameters. .. versionchanged:: 2.3 Added *preexec_fn* optional parameter. """ cwd = kwargs.pop('cwd', None) process_input = kwargs.pop('process_input', None) if process_input is not None: process_input = encodeutils.to_utf8(process_input) env_variables = kwargs.pop('env_variables', None) check_exit_code = kwargs.pop('check_exit_code', [0]) ignore_exit_code = False delay_on_retry = kwargs.pop('delay_on_retry', True) attempts = kwargs.pop('attempts', 1) run_as_root = kwargs.pop('run_as_root', False) root_helper = kwargs.pop('root_helper', '') shell = kwargs.pop('shell', False) loglevel = kwargs.pop('loglevel', logging.DEBUG) log_errors = kwargs.pop('log_errors', None) if log_errors is None: log_errors = LogErrors.DEFAULT binary = kwargs.pop('binary', False) on_execute = kwargs.pop('on_execute', None) on_completion = kwargs.pop('on_completion', None) preexec_fn = kwargs.pop('preexec_fn', None) prlimit = kwargs.pop('prlimit', None) python_exec = kwargs.pop('python_exec', sys.executable) if isinstance(check_exit_code, bool): ignore_exit_code = not check_exit_code check_exit_code = [0] elif isinstance(check_exit_code, int): check_exit_code = [check_exit_code] if kwargs: raise UnknownArgumentError(_('Got unknown keyword args: %r') % kwargs) if isinstance(log_errors, six.integer_types): log_errors = LogErrors(log_errors) if not isinstance(log_errors, LogErrors): raise InvalidArgumentError(_('Got invalid arg log_errors: %r') % log_errors) if run_as_root and hasattr(os, 'geteuid') and os.geteuid() != 0: if not root_helper: raise NoRootWrapSpecified( message=_('Command requested root, but did not ' 'specify a root helper.')) if shell: # root helper has to be injected into the command string cmd = [' '.join((root_helper, cmd[0]))] + list(cmd[1:]) else: # root helper has to be tokenized into argument list cmd = shlex.split(root_helper) + list(cmd) cmd = [str(c) for c in cmd] if prlimit: if os.name == 'nt': LOG.log(loglevel, _('Process resource limits are ignored as ' 'this feature is not supported on Windows.')) else: args = [python_exec, '-m', 'oslo_concurrency.prlimit'] args.extend(prlimit.prlimit_args()) args.append('--') args.extend(cmd) cmd = args sanitized_cmd = strutils.mask_password(' '.join(cmd)) watch = timeutils.StopWatch() while attempts > 0: attempts -= 1 watch.restart() try: LOG.log(loglevel, _('Running cmd (subprocess): %s'), sanitized_cmd) _PIPE = subprocess.PIPE # pylint: disable=E1101 if os.name == 'nt': on_preexec_fn = None close_fds = False else: on_preexec_fn = functools.partial(_subprocess_setup, preexec_fn) close_fds = True obj = subprocess.Popen(cmd, stdin=_PIPE, stdout=_PIPE, stderr=_PIPE, close_fds=close_fds, preexec_fn=on_preexec_fn, shell=shell, cwd=cwd, env=env_variables) if on_execute: on_execute(obj) try: # eventlet.green.subprocess is not really greenthread friendly # on Windows. In order to avoid blocking other greenthreads, # we have to wrap this call using tpool. if eventlet_patched and os.name == 'nt': result = tpool.execute(obj.communicate, process_input) else: result = obj.communicate(process_input) obj.stdin.close() # pylint: disable=E1101 _returncode = obj.returncode # pylint: disable=E1101 LOG.log(loglevel, 'CMD "%s" returned: %s in %0.3fs', sanitized_cmd, _returncode, watch.elapsed()) finally: if on_completion: on_completion(obj) if not ignore_exit_code and _returncode not in check_exit_code: (stdout, stderr) = result if six.PY3: stdout = os.fsdecode(stdout) stderr = os.fsdecode(stderr) sanitized_stdout = strutils.mask_password(stdout) sanitized_stderr = strutils.mask_password(stderr) raise ProcessExecutionError(exit_code=_returncode, stdout=sanitized_stdout, stderr=sanitized_stderr, cmd=sanitized_cmd) if six.PY3 and not binary and result is not None: (stdout, stderr) = result # Decode from the locale using using the surrogateescape error # handler (decoding cannot fail) stdout = os.fsdecode(stdout) stderr = os.fsdecode(stderr) return (stdout, stderr) else: return result except (ProcessExecutionError, OSError) as err: # if we want to always log the errors or if this is # the final attempt that failed and we want to log that. if log_errors == LOG_ALL_ERRORS or ( log_errors == LOG_FINAL_ERROR and not attempts): if isinstance(err, ProcessExecutionError): format = _('%(desc)r\ncommand: %(cmd)r\n' 'exit code: %(code)r\nstdout: %(stdout)r\n' 'stderr: %(stderr)r') LOG.log(loglevel, format, {"desc": err.description, "cmd": err.cmd, "code": err.exit_code, "stdout": err.stdout, "stderr": err.stderr}) else: format = _('Got an OSError\ncommand: %(cmd)r\n' 'errno: %(errno)r') LOG.log(loglevel, format, {"cmd": sanitized_cmd, "errno": err.errno}) if not attempts: LOG.log(loglevel, _('%r failed. Not Retrying.'), sanitized_cmd) raise else: LOG.log(loglevel, _('%r failed. Retrying.'), sanitized_cmd) if delay_on_retry: time.sleep(random.randint(20, 200) / 100.0) finally: # NOTE(termie): this appears to be necessary to let the subprocess # call clean something up in between calls, without # it two execute calls in a row hangs the second one # NOTE(bnemec): termie's comment above is probably specific to the # eventlet subprocess module, but since we still # have to support that we're leaving the sleep. It # won't hurt anything in the stdlib case anyway. time.sleep(0)
def get_certificate(self): if self._cert_container.certificate: return encodeutils.to_utf8( self._cert_container.certificate.payload) return None
def list_allocation_candidates(req): """GET a JSON object with a list of allocation requests and a JSON object of provider summary objects On success return a 200 and an application/json body representing a collection of allocation requests and provider summaries """ context = req.environ['placement.context'] context.can(policies.LIST) want_version = req.environ[microversion.MICROVERSION_ENVIRON] get_schema = schema.GET_SCHEMA_1_10 if want_version.matches((1, 31)): get_schema = schema.GET_SCHEMA_1_31 elif want_version.matches((1, 25)): get_schema = schema.GET_SCHEMA_1_25 elif want_version.matches((1, 21)): get_schema = schema.GET_SCHEMA_1_21 elif want_version.matches((1, 17)): get_schema = schema.GET_SCHEMA_1_17 elif want_version.matches((1, 16)): get_schema = schema.GET_SCHEMA_1_16 util.validate_query_params(req, get_schema) requests = lib.RequestGroup.dict_from_request(req) limit = req.GET.getall('limit') # JSONschema has already confirmed that limit has the form # of an integer. if limit: limit = int(limit[0]) group_policy = req.GET.getall('group_policy') or None # Schema ensures we get either "none" or "isolate" if group_policy: group_policy = group_policy[0] else: # group_policy is required if more than one numbered request group was # specified. if len([rg for rg in requests.values() if rg.use_same_provider]) > 1: raise webob.exc.HTTPBadRequest( 'The "group_policy" parameter is required when specifying ' 'more than one "resources{N}" parameter.') try: cands = ac_obj.AllocationCandidates.get_by_requests( context, requests, limit=limit, group_policy=group_policy) except exception.ResourceClassNotFound as exc: raise webob.exc.HTTPBadRequest( 'Invalid resource class in resources parameter: %(error)s' % {'error': exc}) except exception.TraitNotFound as exc: raise webob.exc.HTTPBadRequest(six.text_type(exc)) response = req.response trx_cands = _transform_allocation_candidates(cands, requests, want_version) json_data = jsonutils.dumps(trx_cands) response.body = encodeutils.to_utf8(json_data) response.content_type = 'application/json' if want_version.matches((1, 15)): response.cache_control = 'no-cache' response.last_modified = timeutils.utcnow(with_timezone=True) return response
def get_intermediates(self): if self._cert_container.intermediates: intermediates = encodeutils.to_utf8( self._cert_container.intermediates.payload) return list(cert_parser.get_intermediates_pems(intermediates)) return None
def _send_aggregates(response, aggregate_uuids): response.status = 200 response.body = encodeutils.to_utf8( jsonutils.dumps(_serialize_aggregates(aggregate_uuids))) response.content_type = 'application/json' return response
def _sign_instance_id(self, instance_id): secret = self.conf.metadata_proxy_shared_secret secret = encodeutils.to_utf8(secret) instance_id = encodeutils.to_utf8(instance_id) return hmac.new(secret, instance_id, hashlib.sha256).hexdigest()
def migrate(self, destination, migrate_uri=None, migrate_disks=None, destination_xml=None, flags=0, bandwidth=0): """Migrate guest object from its current host to the destination :param destination: URI of host destination where guest will be migrate :param migrate_uri: URI for invoking the migration :param migrate_disks: List of disks to be migrated :param destination_xml: The guest XML to be used on the target host :param flags: May be one of more of the following: VIR_MIGRATE_LIVE Do not pause the VM during migration VIR_MIGRATE_PEER2PEER Direct connection between source & destination hosts VIR_MIGRATE_TUNNELLED Tunnel migration data over the libvirt RPC channel VIR_MIGRATE_PERSIST_DEST If the migration is successful, persist the domain on the destination host. VIR_MIGRATE_UNDEFINE_SOURCE If the migration is successful, undefine the domain on the source host. VIR_MIGRATE_PAUSED Leave the domain suspended on the remote side. VIR_MIGRATE_NON_SHARED_DISK Migration with non-shared storage with full disk copy VIR_MIGRATE_NON_SHARED_INC Migration with non-shared storage with incremental disk copy VIR_MIGRATE_CHANGE_PROTECTION Protect against domain configuration changes during the migration process (set automatically when supported). VIR_MIGRATE_UNSAFE Force migration even if it is considered unsafe. VIR_MIGRATE_OFFLINE Migrate offline :param bandwidth: The maximum bandwidth in MiB/s """ params = {} # In migrateToURI3 these parameters are extracted from the # `params` dict params['bandwidth'] = bandwidth if destination_xml: params['destination_xml'] = destination_xml if migrate_disks: params['migrate_disks'] = migrate_disks if migrate_uri: params['migrate_uri'] = migrate_uri # Due to a quirk in the libvirt python bindings, # VIR_MIGRATE_NON_SHARED_INC with an empty migrate_disks is # interpreted as "block migrate all writable disks" rather than # "don't block migrate any disks". This includes attached # volumes, which will potentially corrupt data on those # volumes. Consequently we need to explicitly unset # VIR_MIGRATE_NON_SHARED_INC if there are no disks to be block # migrated. if (flags & libvirt.VIR_MIGRATE_NON_SHARED_INC != 0 and not params.get('migrate_disks')): flags &= ~libvirt.VIR_MIGRATE_NON_SHARED_INC # In the Python2 libvirt bindings, strings passed to # migrateToURI3 via params must not be unicode. if six.PY2: params = { key: encodeutils.to_utf8(value) if isinstance( value, six.text_type) else value for key, value in params.items() } self._domain.migrateToURI3(destination, params=params, flags=flags)
def default(self, response, result): response.content_type = 'application/json' body = self.to_json(result) body = encodeutils.to_utf8(body) response.body = body
def get_private_key(self): if self._cert_container.private_key: return encodeutils.to_utf8( self._cert_container.private_key.payload) return None
def __call__(self, req): """Get the nova micro version number * If neither "X-OpenStack-Nova-API-Version" nor "OpenStack-API-Version" (specifying "compute") is provided, act as if the minimum supported microversion was specified. * If both headers are provided, "OpenStack-API-Version" will be preferred. * If "X-OpenStack-Nova-API-Version" or "OpenStack-API-Version" is provided, respond with the API at that microversion. If that's outside of the range of microversions supported, return 406 Not Acceptable. * If "X-OpenStack-Nova-API-Version" or "OpenStack-API-Version" has a value of "latest" (special keyword), act as if maximum was specified. """ version_num = req.environ.get( constants.HTTP_NOVA_API_VERSION_REQUEST_HEADER) legacy_version_num = req.environ.get( constants.HTTP_LEGACY_NOVA_API_VERSION_REQUEST_HEADER) message = None api_version = None if version_num is None and legacy_version_num is None: micro_version = constants.NOVA_APIGW_MIN_VERSION elif version_num is not None: err_msg = ("Invalid format of client version '%s'. " "Expected format 'compute X.Y'," "where X is a major part and Y " "is a minor part of version.") % version_num try: nova_version_prefix = version_num.split()[0] micro_version = ''.join(version_num.split()[1:]) if nova_version_prefix != 'compute': message = err_msg except Exception: message = err_msg else: micro_version = legacy_version_num if message is None: try: # Returns checked APIVersion object, # or raise UnsupportedVersion exceptions. api_version = api_versions.get_api_version(micro_version) except exceptions.UnsupportedVersion as e: message = e.message if message is None and api_version is not None: min_minor = int(constants.NOVA_APIGW_MIN_VERSION.split('.')[1]) max_minor = int(constants.NOVA_APIGW_MAX_VERSION.split('.')[1]) if api_version.is_latest(): micro_version = constants.NOVA_APIGW_MAX_VERSION api_version.ver_minor = max_minor if api_version.ver_minor < min_minor or \ api_version.ver_minor > max_minor: message = ("Version %s is not supported by the API. " "Minimum is %s, and maximum is %s" % (micro_version, constants.NOVA_APIGW_MIN_VERSION, constants.NOVA_APIGW_MAX_VERSION)) if message is None: req.environ[constants.NOVA_API_VERSION_REQUEST_HEADER] = \ micro_version if self.app: return req.get_response(self.app) else: content_type = 'application/json' body = jsonutils.dumps( self._format_error('406', message, 'computeFault')) response = webob.Response() response.content_type = content_type response.body = encodeutils.to_utf8(body) response.status_code = 406 return response
def get_private_key_passphrase(self): if self._cert_container.private_key_passphrase: return encodeutils.to_utf8( self._cert_container.private_key_passphrase.payload) return None