def test_create_server_detect_from_image(self): """If user doesn't pass in diskConfig for server, use image metadata to specify AUTO or MANUAL. """ req = fakes.HTTPRequest.blank('/fake/servers') req.method = 'POST' req.content_type = 'application/json' body = {'server': { 'name': 'server_test', 'imageRef': 'a440c04b-79fa-479c-bed1-0b816eaec379', 'flavorRef': '1', }} req.body = utils.dumps(body) res = req.get_response(self.app) server_dict = utils.loads(res.body)['server'] self.assertDiskConfig(server_dict, 'MANUAL') req = fakes.HTTPRequest.blank('/fake/servers') req.method = 'POST' req.content_type = 'application/json' body = {'server': { 'name': 'server_test', 'imageRef': '70a599e0-31e7-49b7-b260-868f441e862b', 'flavorRef': '1', }} req.body = utils.dumps(body) res = req.get_response(self.app) server_dict = utils.loads(res.body)['server'] self.assertDiskConfig(server_dict, 'AUTO')
def __call__(self, req): # Request lazy serialization req.environ['nova.lazy_serialize'] = True response = req.get_response(self.application) # See if we're using the simple serialization driver simple_serial = req.environ.get('nova.simple_serial') if simple_serial is not None: body_obj = utils.loads(response.body) response.body = simple_serial.serialize(body_obj) return response # See if there's a serializer... serializer = req.environ.get('nova.serializer') if serializer is None: return response # OK, build up the arguments for the serialize() method kwargs = dict(action=req.environ['nova.action']) if 'nova.template' in req.environ: kwargs['template'] = req.environ['nova.template'] # Re-serialize the body response.body = serializer.serialize(utils.loads(response.body), **kwargs) return response
def test_show_server(self): req = fakes.HTTPRequest.blank('/fake/servers/%s' % MANUAL_INSTANCE_UUID) res = req.get_response(self.app) server_dict = utils.loads(res.body)['server'] self.assertDiskConfig(server_dict, 'MANUAL') req = fakes.HTTPRequest.blank('/fake/servers/%s' % AUTO_INSTANCE_UUID) res = req.get_response(self.app) server_dict = utils.loads(res.body)['server'] self.assertDiskConfig(server_dict, 'AUTO')
def test_show_image(self): req = fakes.HTTPRequest.blank( '/fake/images/a440c04b-79fa-479c-bed1-0b816eaec379') res = req.get_response(self.app) image_dict = utils.loads(res.body)['image'] self.assertDiskConfig(image_dict, 'MANUAL') req = fakes.HTTPRequest.blank( '/fake/images/70a599e0-31e7-49b7-b260-868f441e862b') res = req.get_response(self.app) image_dict = utils.loads(res.body)['image'] self.assertDiskConfig(image_dict, 'AUTO')
def test_show_server(self): req = fakes.HTTPRequest.blank( '/fake/servers/%s' % MANUAL_INSTANCE_UUID) res = req.get_response(self.app) server_dict = utils.loads(res.body)['server'] self.assertDiskConfig(server_dict, 'MANUAL') req = fakes.HTTPRequest.blank( '/fake/servers/%s' % AUTO_INSTANCE_UUID) res = req.get_response(self.app) server_dict = utils.loads(res.body)['server'] self.assertDiskConfig(server_dict, 'AUTO')
def process(self, req, *args, **kwargs): for pre_handler in self.pre_handlers: pre_handler(req) res = req.get_response(self.application) # Don't call extensions if the main application returned an # unsuccessful status successful = 200 <= res.status_int < 400 if not successful: return res # Deserialize the response body, if any body = None if res.body: body = utils.loads(res.body) # currently request handlers are un-ordered for handler in self.handlers: res = handler(req, res, body) # Reserialize the response body if body is not None: res.body = utils.dumps(body) return res
def assert_compute_node_has_enough_disk(self, context, instance_ref, dest, disk_over_commit): """Checks if destination host has enough disk for block migration. :param context: security context :param instance_ref: nova.db.sqlalchemy.models.Instance object :param dest: destination host :param disk_over_commit: if True, consider real(not virtual) disk size. """ # Libvirt supports qcow2 disk format,which is usually compressed # on compute nodes. # Real disk image (compressed) may enlarged to "virtual disk size", # that is specified as the maximum disk size. # (See qemu-img -f path-to-disk) # Scheduler recognizes destination host still has enough disk space # if real disk size < available disk size # if disk_over_commit is True, # otherwise virtual disk size < available disk size. # Getting total available disk of host available_gb = self._get_compute_info(context, dest, 'disk_available_least') available = available_gb * (1024**3) # Getting necessary disk size try: topic = db.queue_get_for(context, FLAGS.compute_topic, instance_ref['host']) ret = rpc.call( context, topic, { "method": 'get_instance_disk_info', "args": { 'instance_name': instance_ref['name'] } }) disk_infos = utils.loads(ret) except rpc_common.RemoteError: LOG.exception( _("host %(dest)s is not compatible with " "original host %(src)s.") % locals()) raise necessary = 0 if disk_over_commit: for info in disk_infos: necessary += int(info['disk_size']) else: for info in disk_infos: necessary += int(info['virt_disk_size']) # Check that available disk > necessary disk if (available - necessary) < 0: instance_uuid = instance_ref['uuid'] reason = _("Unable to migrate %(instance_uuid)s to %(dest)s: " "Lack of disk(host:%(available)s " "<= instance:%(necessary)s)") raise exception.MigrationError(reason=reason % locals())
def assert_compute_node_has_enough_disk(self, context, instance_ref, dest, disk_over_commit): """Checks if destination host has enough disk for block migration. :param context: security context :param instance_ref: nova.db.sqlalchemy.models.Instance object :param dest: destination host :param disk_over_commit: if True, consider real(not virtual) disk size. """ # Libvirt supports qcow2 disk format,which is usually compressed # on compute nodes. # Real disk image (compressed) may enlarged to "virtual disk size", # that is specified as the maximum disk size. # (See qemu-img -f path-to-disk) # Scheduler recognizes destination host still has enough disk space # if real disk size < available disk size # if disk_over_commit is True, # otherwise virtual disk size < available disk size. # Refresh compute_nodes table topic = db.queue_get_for(context, FLAGS.compute_topic, dest) rpc.call(context, topic, {"method": "update_available_resource"}) # Getting total available disk of host available_gb = self._get_compute_info(context, dest, 'disk_available_least') available = available_gb * (1024 ** 3) # Getting necessary disk size try: topic = db.queue_get_for(context, FLAGS.compute_topic, instance_ref['host']) ret = rpc.call(context, topic, {"method": 'get_instance_disk_info', "args": {'instance_name': instance_ref.name}}) disk_infos = utils.loads(ret) except rpc.RemoteError: LOG.exception(_("host %(dest)s is not compatible with " "original host %(src)s.") % locals()) raise necessary = 0 if disk_over_commit: for info in disk_infos: necessary += int(info['disk_size']) else: for info in disk_infos: necessary += int(info['virt_disk_size']) # Check that available disk > necessary disk if (available - necessary) < 0: instance_id = ec2utils.id_to_ec2_id(instance_ref['id']) reason = _("Unable to migrate %(instance_id)s to %(dest)s: " "Lack of disk(host:%(available)s " "<= instance:%(necessary)s)") raise exception.MigrationError(reason=reason % locals())
def __call__(self, req): request_id = context.generate_request_id() signature = req.params.get('Signature') if not signature: msg = _("Signature not provided") return ec2_error(req, request_id, "Unauthorized", msg) access = req.params.get('AWSAccessKeyId') if not access: msg = _("Access key not provided") return ec2_error(req, request_id, "Unauthorized", msg) # Make a copy of args for authentication and signature verification. auth_params = dict(req.params) # Not part of authentication args auth_params.pop('Signature') cred_dict = { 'access': access, 'signature': signature, 'host': req.host, 'verb': req.method, 'path': req.path, 'params': auth_params, } if "ec2" in FLAGS.keystone_ec2_url: creds = {'ec2Credentials': cred_dict} else: creds = {'auth': {'OS-KSEC2:ec2Credentials': cred_dict}} creds_json = utils.dumps(creds) headers = {'Content-Type': 'application/json'} o = urlparse.urlparse(FLAGS.keystone_ec2_url) if o.scheme == "http": conn = httplib.HTTPConnection(o.netloc) else: conn = httplib.HTTPSConnection(o.netloc) conn.request('POST', o.path, body=creds_json, headers=headers) response = conn.getresponse() data = response.read() if response.status != 200: if response.status == 401: msg = response.reason else: msg = _("Failure communicating with keystone") return ec2_error(req, request_id, "Unauthorized", msg) result = utils.loads(data) conn.close() try: token_id = result['access']['token']['id'] user_id = result['access']['user']['id'] project_id = result['access']['token']['tenant'] roles = [ role['name'] for role in result['access']['user']['roles'] ] except (AttributeError, KeyError), e: LOG.exception("Keystone failure: %s" % e) msg = _("Failure communicating with keystone") return ec2_error(req, request_id, "Unauthorized", msg)
def test_detail_servers(self): req = fakes.HTTPRequest.blank('/fake/servers/detail') res = req.get_response(self.app) server_dicts = utils.loads(res.body)['servers'] expectations = ['MANUAL', 'AUTO'] for server_dict, expected in zip(server_dicts, expectations): self.assertDiskConfig(server_dict, expected)
def __call__(self, req): request_id = context.generate_request_id() signature = req.params.get('Signature') if not signature: msg = _("Signature not provided") return ec2_error(req, request_id, "Unauthorized", msg) access = req.params.get('AWSAccessKeyId') if not access: msg = _("Access key not provided") return ec2_error(req, request_id, "Unauthorized", msg) # Make a copy of args for authentication and signature verification. auth_params = dict(req.params) # Not part of authentication args auth_params.pop('Signature') cred_dict = { 'access': access, 'signature': signature, 'host': req.host, 'verb': req.method, 'path': req.path, 'params': auth_params, } if "ec2" in FLAGS.keystone_ec2_url: creds = {'ec2Credentials': cred_dict} else: creds = {'auth': {'OS-KSEC2:ec2Credentials': cred_dict}} creds_json = utils.dumps(creds) headers = {'Content-Type': 'application/json'} o = urlparse.urlparse(FLAGS.keystone_ec2_url) if o.scheme == "http": conn = httplib.HTTPConnection(o.netloc) else: conn = httplib.HTTPSConnection(o.netloc) conn.request('POST', o.path, body=creds_json, headers=headers) response = conn.getresponse() data = response.read() if response.status != 200: if response.status == 401: msg = response.reason else: msg = _("Failure communicating with keystone") return ec2_error(req, request_id, "Unauthorized", msg) result = utils.loads(data) conn.close() try: token_id = result['access']['token']['id'] user_id = result['access']['user']['id'] project_id = result['access']['token']['tenant']['id'] roles = [role['name'] for role in result['access']['user']['roles']] except (AttributeError, KeyError), e: LOG.exception("Keystone failure: %s" % e) msg = _("Failure communicating with keystone") return ec2_error(req, request_id, "Unauthorized", msg)
def read_domains(fname): try: f = open(fname, 'r') json = f.read() f.close() domains = utils.loads(json) return domains except IOError: raise exception.NotFound()
def __do_request(self, path, context, **kwargs): req = wsgi.Request.blank(path) req.method = "POST" req.body = urllib.urlencode({"json": utils.dumps(kwargs)}) req.environ["openstack.context"] = context resp = req.get_response(self.app) try: return utils.loads(resp.body) except Exception: return resp.body
def __do_request(self, path, context, **kwargs): req = wsgi.Request.blank(path) req.method = 'POST' req.body = urllib.urlencode({'json': utils.dumps(kwargs)}) req.environ['openstack.context'] = context resp = req.get_response(self.app) try: return utils.loads(resp.body) except Exception: return resp.body
def test_detail_image(self): req = fakes.HTTPRequest.blank('/fake/images/detail') res = req.get_response(self.app) image_dicts = utils.loads(res.body)['images'] expectations = ['MANUAL', 'AUTO'] for image_dict, expected in zip(image_dicts, expectations): # NOTE(sirp): image fixtures 6 and 7 are setup for # auto_disk_config testing if image_dict['id'] in (6, 7): self.assertDiskConfig(image_dict, expected)
class EC2Token(wsgi.Middleware): """Authenticate an EC2 request with keystone and convert to token.""" @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): # Read request signature and access id. try: signature = req.params['Signature'] access = req.params['AWSAccessKeyId'] except KeyError, e: LOG.exception(e) raise webob.exc.HTTPBadRequest() # Make a copy of args for authentication and signature verification. auth_params = dict(req.params) # Not part of authentication args auth_params.pop('Signature') # Authenticate the request. creds = { 'ec2Credentials': { 'access': access, 'signature': signature, 'host': req.host, 'verb': req.method, 'path': req.path, 'params': auth_params, } } creds_json = utils.dumps(creds) headers = {'Content-Type': 'application/json'} # Disable "has no x member" pylint error # for httplib and urlparse # pylint: disable-msg=E1101 o = urlparse.urlparse(FLAGS.keystone_ec2_url) if o.scheme == "http": conn = httplib.HTTPConnection(o.netloc) else: conn = httplib.HTTPSConnection(o.netloc) conn.request('POST', o.path, body=creds_json, headers=headers) response = conn.getresponse().read() conn.close() # NOTE(vish): We could save a call to keystone by # having keystone return token, tenant, # user, and roles from this call. result = utils.loads(response) try: token_id = result['access']['token']['id'] except (AttributeError, KeyError), e: LOG.exception(e) raise webob.exc.HTTPBadRequest()
def __call__(self, req): # Read request signature and access id. try: signature = req.params['Signature'] access = req.params['AWSAccessKeyId'] except KeyError: raise webob.exc.HTTPBadRequest() # Make a copy of args for authentication and signature verification. auth_params = dict(req.params) # Not part of authentication args auth_params.pop('Signature') # Authenticate the request. creds = { 'ec2Credentials': { 'access': access, 'signature': signature, 'host': req.host, 'verb': req.method, 'path': req.path, 'params': auth_params, } } creds_json = utils.dumps(creds) headers = {'Content-Type': 'application/json'} # Disable 'has no x member' pylint error # for httplib and urlparse # pylint: disable-msg=E1101 o = urlparse.urlparse(FLAGS.keystone_ec2_url) if o.scheme == 'http': conn = environment.httplib.HTTPConnection(o.netloc) else: conn = environment.httplib.HTTPSConnection(o.netloc) conn.request('POST', o.path, body=creds_json, headers=headers) response = conn.getresponse().read() conn.close() # NOTE(vish): We could save a call to keystone by # having keystone return token, tenant, # user, and roles from this call. result = utils.loads(response) try: token_id = result['access']['token']['id'] except (AttributeError, KeyError): raise webob.exc.HTTPBadRequest() # Authenticated! req.headers['X-Auth-Token'] = token_id return self.application
def __call__(self, req): # Read request signature and access id. try: signature = req.params["Signature"] access = req.params["AWSAccessKeyId"] except KeyError: raise webob.exc.HTTPBadRequest() # Make a copy of args for authentication and signature verification. auth_params = dict(req.params) # Not part of authentication args auth_params.pop("Signature") # Authenticate the request. creds = { "ec2Credentials": { "access": access, "signature": signature, "host": req.host, "verb": req.method, "path": req.path, "params": auth_params, } } creds_json = utils.dumps(creds) headers = {"Content-Type": "application/json"} # Disable 'has no x member' pylint error # for httplib and urlparse # pylint: disable-msg=E1101 o = urlparse.urlparse(FLAGS.keystone_ec2_url) if o.scheme == "http": conn = httplib.HTTPConnection(o.netloc) else: conn = httplib.HTTPSConnection(o.netloc) conn.request("POST", o.path, body=creds_json, headers=headers) response = conn.getresponse().read() conn.close() # NOTE(vish): We could save a call to keystone by # having keystone return token, tenant, # user, and roles from this call. result = utils.loads(response) try: token_id = result["access"]["token"]["id"] except (AttributeError, KeyError): raise webob.exc.HTTPBadRequest() # Authenticated! req.headers["X-Auth-Token"] = token_id return self.application
def send(self, msg_body): msg_type = 'kanyun' msg_uuid = str(utils.gen_uuid()) self.handler.send_multipart([msg_type, msg_uuid, utils.dumps(msg_body)]) r_msg_type, r_msg_uuid, r_msg_body = self.handler.recv_multipart() assert (all([x == y for x, y in zip([msg_type, msg_uuid], [r_msg_type, r_msg_uuid])])) result = utils.loads(r_msg_body) if result['code'] == 500: raise Exception() else: return result['data'] or dict()
def send(self, msg_body): msg_type = 'lb' msg_uuid = str(utils.gen_uuid()) self.handler.send_multipart([msg_type, msg_uuid, utils.dumps(msg_body)]) r_msg_type, r_msg_uuid, r_msg_body = self.handler.recv_multipart() assert (all([x == y for x, y in zip([msg_type, msg_uuid], [r_msg_type, r_msg_uuid])])) result = utils.loads(r_msg_body)['msg'] if result['code'] == 500: raise Exception() else: return result['load_balancer_ids']
def deserialize(self, datastring): """ Deserialize a string to a dictionary. The string must be in the format of a supported MIME type. """ datastring = datastring.strip() try: is_xml = (datastring[0] == '<') if not is_xml: return utils.loads(datastring) return self._from_xml(datastring) except: return None
def assert_compute_node_has_enough_disk(self, context, instance_ref, dest, disk_over_commit): """Checks if destination host has enough disk for block migration. :param context: security context :param instance_ref: nova.db.sqlalchemy.models.Instance object :param dest: destination host :param disk_over_commit: if True, consider real(not virtual) disk size. """ # Libvirt supports qcow2 disk format,which is usually compressed # on compute nodes. # Real disk image (compressed) may enlarged to "virtual disk size", # that is specified as the maximum disk size. # (See qemu-img -f path-to-disk) # Scheduler recognizes destination host still has enough disk space # if real disk size < available disk size # if disk_over_commit is True, # otherwise virtual disk size < available disk size. # Getting total available disk of host available_gb = self._get_compute_info(context, dest, "disk_available_least") available = available_gb * (1024 ** 3) # Getting necessary disk size topic = db.queue_get_for(context, FLAGS.compute_topic, instance_ref["host"]) ret = rpc.call( context, topic, {"method": "get_instance_disk_info", "args": {"instance_name": instance_ref["name"]}} ) disk_infos = utils.loads(ret) necessary = 0 if disk_over_commit: for info in disk_infos: necessary += int(info["disk_size"]) else: for info in disk_infos: necessary += int(info["virt_disk_size"]) # Check that available disk > necessary disk if (available - necessary) < 0: instance_uuid = instance_ref["uuid"] reason = _( "Unable to migrate %(instance_uuid)s to %(dest)s: " "Lack of disk(host:%(available)s " "<= instance:%(necessary)s)" ) raise exception.MigrationError(reason=reason % locals())
def process_request(self, request): if 'json' not in request.params: return params_json = request.params['json'] params_parsed = utils.loads(params_json) params = {} for k, v in params_parsed.iteritems(): if k in ('self', 'context'): continue if k.startswith('_'): continue params[k] = v request.environ['openstack.params'] = params
def test_create_server_override_manual(self): req = fakes.HTTPRequest.blank('/fake/servers') req.method = 'POST' req.content_type = 'application/json' body = {'server': { 'name': 'server_test', 'imageRef': 'cedef40a-ed67-4d10-800e-17455edce175', 'flavorRef': '1', 'RAX-DCF:diskConfig': 'MANUAL' }} req.body = utils.dumps(body) res = req.get_response(self.app) server_dict = utils.loads(res.body)['server'] self.assertDiskConfig(server_dict, 'MANUAL')
def process_request(self, request): if "json" not in request.params: return params_json = request.params["json"] params_parsed = utils.loads(params_json) params = {} for k, v in params_parsed.iteritems(): if k in ("self", "context"): continue if k.startswith("_"): continue params[k] = v request.environ["openstack.params"] = params
def send(self, msg_body): msg_type = "lb" msg_uuid = str(utils.gen_uuid()) self.handler.send_multipart([msg_type, msg_uuid, utils.dumps(msg_body)]) r_msg_type, r_msg_uuid, r_msg_body = self.handler.recv_multipart() assert all([x == y for x, y in zip([msg_type, msg_uuid], [r_msg_type, r_msg_uuid])]) # result = utils.loads(r_msg_body)['msg'] # if result['code'] == 500: result = utils.loads(r_msg_body) if "msg" in result: # FIXME: old version support result = result["msg"] if result["code"] == 500: return None else: # return result['load_balancer_ids'] return result["data"]
def action_peek_json(body): """Determine action to invoke.""" try: decoded = utils.loads(body) except ValueError: msg = _("cannot understand JSON") raise exception.MalformedRequestBody(reason=msg) # Make sure there's exactly one key... if len(decoded) != 1: msg = _("too many body keys") raise exception.MalformedRequestBody(reason=msg) # Return the action and the decoded body... return decoded.keys()[0]
def process(self, req, *args, **kwargs): res = req.get_response(self.application) # Deserialize the response body, if any body = None if res.body: body = utils.loads(res.body) # currently request handlers are un-ordered for handler in self.handlers: res = handler(req, res, body) # Reserialize the response body if body is not None: res.body = utils.dumps(body) return res
def __call__(self, req): # Read request signature and access id. try: signature = req.params['Signature'] access = req.params['AWSAccessKeyId'] except KeyError: raise webob.exc.HTTPBadRequest() # Make a copy of args for authentication and signature verification. auth_params = dict(req.params) # Not part of authentication args auth_params.pop('Signature') # Authenticate the request. creds = { 'ec2Credentials': { 'access': access, 'signature': signature, 'host': req.host, 'verb': req.method, 'path': req.path, 'params': auth_params, } } creds_json = utils.dumps(creds) headers = {'Content-Type': 'application/json'} o = urlparse(FLAGS.keystone_ec2_url) if o.scheme == "http": conn = httplib.HTTPConnection(o.netloc) else: conn = httplib.HTTPSConnection(o.netloc) conn.request('POST', o.path, body=creds_json, headers=headers) response = conn.getresponse().read() conn.close() # NOTE(vish): We could save a call to keystone by # having keystone return token, tenant, # user, and roles from this call. result = utils.loads(response) # TODO(vish): check for errors token_id = result['auth']['token']['id'] # Authenticated! req.headers['X-Auth-Token'] = token_id return self.application
def send(self, msg_body): msg_type = 'lb' msg_uuid = str(utils.gen_uuid()) self.handler.send_multipart( [msg_type, msg_uuid, utils.dumps(msg_body)]) r_msg_type, r_msg_uuid, r_msg_body = self.handler.recv_multipart() assert (all([ x == y for x, y in zip([msg_type, msg_uuid], [r_msg_type, r_msg_uuid]) ])) #result = utils.loads(r_msg_body)['msg'] #if result['code'] == 500: result = utils.loads(r_msg_body) if "msg" in result: # FIXME: old version support result = result["msg"] if result['code'] == 500: return None else: #return result['load_balancer_ids'] return result['data']
def deserialize_remote_exception(data): failure = utils.loads(str(data)) trace = failure.get('tb', []) message = failure.get('message', "") + "\n" + "\n".join(trace) name = failure.get('class') module = failure.get('module') # NOTE(ameade): We DO NOT want to allow just any module to be imported, in # order to prevent arbitrary code execution. if not module in FLAGS.allowed_rpc_exception_modules: return RemoteError(name, failure.get('message'), trace) try: __import__(module) mod = sys.modules[module] klass = getattr(mod, name) if not issubclass(klass, Exception): raise TypeError("Can only deserialize Exceptions") failure = klass(**failure.get('kwargs', {})) except (AttributeError, TypeError, ImportError): return RemoteError(name, failure.get('message'), trace) ex_type = type(failure) str_override = lambda self: message new_ex_type = type(ex_type.__name__ + "_Remote", (ex_type, ), {'__str__': str_override}) try: # NOTE(ameade): Dynamically create a new exception type and swap it in # as the new type for the exception. This only works on user defined # Exceptions and not core python exceptions. This is important because # we cannot necessarily change an exception message so we must override # the __str__ method. failure.__class__ = new_ex_type except TypeError as e: # NOTE(ameade): If a core exception then just add the traceback to the # first exception argument. failure.args = (message, ) + failure.args[1:] return failure
def deserialize_remote_exception(data): failure = utils.loads(str(data)) trace = failure.get('tb', []) message = failure.get('message', "") + "\n" + "\n".join(trace) name = failure.get('class') module = failure.get('module') # NOTE(ameade): We DO NOT want to allow just any module to be imported, in # order to prevent arbitrary code execution. if not module in FLAGS.allowed_rpc_exception_modules: return RemoteError(name, failure.get('message'), trace) try: __import__(module) mod = sys.modules[module] klass = getattr(mod, name) if not issubclass(klass, Exception): raise TypeError("Can only deserialize Exceptions") failure = klass(**failure.get('kwargs', {})) except (AttributeError, TypeError, ImportError): return RemoteError(name, failure.get('message'), trace) ex_type = type(failure) str_override = lambda self: message new_ex_type = type(ex_type.__name__ + "_Remote", (ex_type,), {'__str__': str_override}) try: # NOTE(ameade): Dynamically create a new exception type and swap it in # as the new type for the exception. This only works on user defined # Exceptions and not core python exceptions. This is important because # we cannot necessarily change an exception message so we must override # the __str__ method. failure.__class__ = new_ex_type except TypeError as e: # NOTE(ameade): If a core exception then just add the traceback to the # first exception argument. failure.args = (message,) + failure.args[1:] return failure
def _pre_POST_servers(self, req): # NOTE(sirp): deserialization currently occurs *after* pre-processing # extensions are called. Until extensions are refactored so that # deserialization occurs earlier, we have to perform the # deserialization ourselves. content_type = req.content_type if 'xml' in content_type: node = minidom.parseString(req.body) server = node.getElementsByTagName('server')[0] api_value = server.getAttribute(self.API_DISK_CONFIG) if api_value: value = disk_config_from_api(api_value) server.setAttribute(self.INTERNAL_DISK_CONFIG, str(value)) req.body = str(node.toxml()) else: body = utils.loads(req.body) server = body['server'] api_value = server.get(self.API_DISK_CONFIG) if api_value: value = disk_config_from_api(api_value) server[self.INTERNAL_DISK_CONFIG] = value req.body = utils.dumps(body)
def __call__(self, req): # Read request signature and access id. try: signature = req.params['Signature'] access = req.params['AWSAccessKeyId'] except KeyError: raise webob.exc.HTTPBadRequest() # Make a copy of args for authentication and signature verification. auth_params = dict(req.params) # Not part of authentication args auth_params.pop('Signature') # Authenticate the request. client = httplib2.Http() creds = {'ec2Credentials': {'access': access, 'signature': signature, 'host': req.host, 'verb': req.method, 'path': req.path, 'params': auth_params, }} headers = {'Content-Type': 'application/json'}, resp, content = client.request(FLAGS.keystone_ec2_url, 'POST', headers=headers, body=utils.dumps(creds)) # NOTE(vish): We could save a call to keystone by # having keystone return token, tenant, # user, and roles from this call. result = utils.loads(content) # TODO(vish): check for errors token_id = result['auth']['token']['id'] # Authenticated! req.headers['X-Auth-Token'] = token_id return self.application
def _from_json(self, datastring): try: return utils.loads(datastring) except ValueError: msg = _("cannot understand JSON") raise exception.MalformedRequestBody(reason=msg)
def _from_json(self, datastring): return utils.loads(datastring)
def assertJSONEquals(self, x, y): """Check if two json strings represent the equivalent Python object""" self.assertEquals(utils.loads(x), utils.loads(y)) return utils.loads(x) == utils.loads(y)