def _update_usage_from_instance(self, context, resources, instance): """Update usage for a single instance.""" uuid = instance['uuid'] is_new_instance = uuid not in self.tracked_instances is_deleted_instance = instance['vm_state'] == vm_states.DELETED if is_new_instance: self.tracked_instances[uuid] = obj_base.obj_to_primitive(instance) sign = 1 if is_deleted_instance: self.tracked_instances.pop(uuid) sign = -1 self.stats.update_stats_for_instance(instance) if self.pci_tracker: self.pci_tracker.update_pci_for_instance(context, instance) # if it's a new or deleted instance: if is_new_instance or is_deleted_instance: # new instance, update compute node resource usage: self._update_usage(context, resources, instance, sign=sign) resources['current_workload'] = self.stats.calculate_workload() if self.pci_tracker: resources['pci_stats'] = jsonutils.dumps(self.pci_tracker.stats) else: resources['pci_stats'] = jsonutils.dumps([])
def test_snapshot_create_force(self): snapshot = {"volume_id": 12, "force": True, "display_name": "Snapshot Test Name", "display_description": "Snapshot Test Desc"} body = dict(snapshot=snapshot) req = webob.Request.blank('/v2/fake/os-snapshots') req.method = 'POST' req.body = jsonutils.dumps(body) req.headers['content-type'] = 'application/json' resp = req.get_response(self.app) self.assertEqual(resp.status_int, 200) resp_dict = jsonutils.loads(resp.body) self.assertIn('snapshot', resp_dict) self.assertEqual(resp_dict['snapshot']['displayName'], snapshot['display_name']) self.assertEqual(resp_dict['snapshot']['displayDescription'], snapshot['display_description']) self.assertEqual(resp_dict['snapshot']['volumeId'], snapshot['volume_id']) # Test invalid force paramter snapshot = {"volume_id": 12, "force": '**&&^^%%$$##@@'} body = dict(snapshot=snapshot) req = webob.Request.blank('/v2/fake/os-snapshots') req.method = 'POST' req.body = jsonutils.dumps(body) req.headers['content-type'] = 'application/json' resp = req.get_response(self.app) self.assertEqual(resp.status_int, 400)
def test_json_filter_basic_operators(self): host = fakes.FakeHostState('host1', 'node1', {}) # (operator, arguments, expected_result) ops_to_test = [ ['=', [1, 1], True], ['=', [1, 2], False], ['<', [1, 2], True], ['<', [1, 1], False], ['<', [2, 1], False], ['>', [2, 1], True], ['>', [2, 2], False], ['>', [2, 3], False], ['<=', [1, 2], True], ['<=', [1, 1], True], ['<=', [2, 1], False], ['>=', [2, 1], True], ['>=', [2, 2], True], ['>=', [2, 3], False], ['in', [1, 1], True], ['in', [1, 1, 2, 3], True], ['in', [4, 1, 2, 3], False], ['not', [True], False], ['not', [False], True], ['or', [True, False], True], ['or', [False, False], False], ['and', [True, True], True], ['and', [False, False], False], ['and', [True, False], False], # Nested ((True or False) and (2 > 1)) == Passes ['and', [['or', True, False], ['>', 2, 1]], True]] for (op, args, expected) in ops_to_test: raw = [op] + args filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertEqual(expected, self.filt_cls.host_passes(host, filter_properties)) # This results in [False, True, False, True] and if any are True # then it passes... raw = ['not', True, False, True, False] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertTrue(self.filt_cls.host_passes(host, filter_properties)) # This results in [False, False, False] and if any are True # then it passes...which this doesn't raw = ['not', True, True, True] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
def test_rpc_errors(self): api = create_api() req = webob.Request.blank('/rpc') req.method = 'POST' req.content_type = 'application/json' # Body is not a list, it should fail req.body = jsonutils.dumps({}) res = req.get_response(api) self.assertEqual(res.status_int, 400) # cmd is not dict, it should fail. req.body = jsonutils.dumps([None]) res = req.get_response(api) self.assertEqual(res.status_int, 400) # No command key, it should fail. req.body = jsonutils.dumps([{}]) res = req.get_response(api) self.assertEqual(res.status_int, 400) # kwargs not dict, it should fail. req.body = jsonutils.dumps([{"command": "test", "kwargs": 200}]) res = req.get_response(api) self.assertEqual(res.status_int, 400) # Command does not exist, it should fail. req.body = jsonutils.dumps([{"command": "test"}]) res = req.get_response(api) self.assertEqual(res.status_int, 404)
def test_invalid_metadata_items_on_update_item(self): self.stubs.Set(nova.db, 'instance_metadata_update', return_create_instance_metadata) self.stubs.Set(nova.db, 'instance_metadata_update', return_create_instance_metadata) data = {"metadata": {}} for num in range(CONF.quota_metadata_items + 1): data['metadata']['key%i' % num] = "blah" req = self._get_request() req.method = 'PUT' req.body = jsonutils.dumps(data) req.headers["content-type"] = "application/json" # test for long key data = {"metadata": {"a" * 260: "value1"}} req.body = jsonutils.dumps(data) self.assertRaises(self.validation_ex_large, self.controller.update_all, req, self.uuid, body=data) # test for long value data = {"metadata": {"key": "v" * 260}} req.body = jsonutils.dumps(data) self.assertRaises(self.validation_ex_large, self.controller.update_all, req, self.uuid, body=data) # test for empty key. data = {"metadata": {"": "value1"}} req.body = jsonutils.dumps(data) self.assertRaises(self.validation_ex, self.controller.update_all, req, self.uuid, body=data)
def test_create_server_detect_from_image(self): """If user doesn't pass in diskConfig for server, use image metadata to specify AUTO or MANUAL. """ req = fakes.HTTPRequest.blank('/fake/servers') req.method = 'POST' req.content_type = 'application/json' body = {'server': { 'name': 'server_test', 'imageRef': 'a440c04b-79fa-479c-bed1-0b816eaec379', 'flavorRef': '1', }} req.body = jsonutils.dumps(body) res = req.get_response(self.app) server_dict = jsonutils.loads(res.body)['server'] self.assertDiskConfig(server_dict, 'MANUAL') req = fakes.HTTPRequest.blank('/fake/servers') req.method = 'POST' req.content_type = 'application/json' body = {'server': { 'name': 'server_test', 'imageRef': '70a599e0-31e7-49b7-b260-868f441e862b', 'flavorRef': '1', }} req.body = jsonutils.dumps(body) res = req.get_response(self.app) server_dict = jsonutils.loads(res.body)['server'] self.assertDiskConfig(server_dict, 'AUTO')
def authorize_console(self, context, token, console_type, host, port, internal_access_path, instance_uuid): token_dict = {'token': token, 'instance_uuid': instance_uuid, 'console_type': console_type, 'host': host, 'port': port, 'internal_access_path': internal_access_path, 'last_activity_at': time.time()} data = jsonutils.dumps(token_dict) # We need to log the warning message if the token is not cached # successfully, because the failure will cause the console for # instance to not be usable. if not self.mc.set(token.encode('UTF-8'), data, CONF.console_token_ttl): LOG.warning(_LW("Token: %(token)s failed to save into memcached."), {'token': token}) tokens = self._get_tokens_for_instance(instance_uuid) # Remove the expired tokens from cache. for tok in tokens: token_str = self.mc.get(tok.encode('UTF-8')) if not token_str: tokens.remove(tok) tokens.append(token) if not self.mc.set(instance_uuid.encode('UTF-8'), jsonutils.dumps(tokens)): LOG.warning(_LW("Instance: %(instance_uuid)s failed to save " "into memcached"), {'instance_uuid': instance_uuid}) LOG.audit(_("Received Token: %(token)s, %(token_dict)s"), {'token': token, 'token_dict': token_dict})
def test_cluster_node_list_update(self): node1 = self.env.create_node(api=False) node2 = self.env.create_node(api=False) cluster = self.env.create_cluster(api=False) resp = self.app.put( reverse('ClusterHandler', kwargs={'obj_id': cluster.id}), jsonutils.dumps({'nodes': [node1.id]}), headers=self.default_headers, expect_errors=True ) self.assertEqual(resp.status_code, 200) nodes = self.db.query(Node).filter(Node.cluster == cluster).all() self.assertEqual(1, len(nodes)) self.assertEqual(nodes[0].id, node1.id) resp = self.app.put( reverse('ClusterHandler', kwargs={'obj_id': cluster.id}), jsonutils.dumps({'nodes': [node2.id]}), headers=self.default_headers ) self.assertEqual(resp.status_code, 200) nodes = self.db.query(Node).filter(Node.cluster == cluster) self.assertEqual(1, nodes.count())
def test_node_timestamp_updated_only_by_agent(self): node = self.env.create_node(api=False) timestamp = node.timestamp resp = self.app.put( reverse('NodeCollectionHandler'), jsonutils.dumps([ {'mac': node.mac, 'status': 'discover', 'manufacturer': 'old'} ]), headers=self.default_headers) self.assertEqual(resp.status_code, 200) node = self.db.query(Node).get(node.id) self.assertEqual(node.timestamp, timestamp) resp = self.app.put( reverse('NodeAgentHandler'), jsonutils.dumps( {'mac': node.mac, 'status': 'discover', 'manufacturer': 'new'} ), headers=self.default_headers) self.assertEqual(resp.status_code, 200) node = self.db.query(Node).get(node.id) self.assertNotEqual(node.timestamp, timestamp) self.assertEqual('new', node.manufacturer)
def upgrade_releases(connection): select = text( """SELECT id, attributes_metadata, roles_metadata from releases""") update = text( """UPDATE releases SET attributes_metadata = :attrs, roles_metadata = :roles, wizard_metadata = :wiz_meta WHERE id = :id""") r = connection.execute(select) # reading fixture files in loop is in general a bad idea and as long as # wizard_metadata is the same for all existing releases getting it can # be moved outside of the loop for release in r: attrs_meta = upgrade_release_attributes_50_to_51( jsonutils.loads(release[1])) roles_meta = upgrade_release_roles_50_to_51( jsonutils.loads(release[2])) connection.execute( update, id=release[0], attrs=jsonutils.dumps(attrs_meta), roles=jsonutils.dumps(roles_meta), wiz_meta=jsonutils.dumps(_wizard_meta) )
def upgrade_ubuntu_cobbler_profile_6_0_to_6_1(connection): select_query = text("SELECT id, generated FROM attributes") update_query = text( "UPDATE attributes SET generated = :generated WHERE id = :attr_id") for attr_id, generated in connection.execute(select_query): attrs = jsonutils.loads(generated) if attrs['cobbler']['profile'] == 'ubuntu_1204_x86_64': attrs['cobbler']['profile'] = 'ubuntu_1404_x86_64' connection.execute( update_query, generated=jsonutils.dumps(attrs), attr_id=attr_id) select_query = text("SELECT id, attributes_metadata FROM releases") update_query = text( "UPDATE releases SET attributes_metadata = :attrs_meta" " WHERE id = :release_id") for release_id, attributes_metadata in connection.execute(select_query): attrs = jsonutils.loads(attributes_metadata) if attrs['generated']['cobbler']['profile']['generator_arg'] == \ 'ubuntu_1204_x86_64': attrs['generated']['cobbler']['profile']['generator_arg'] = \ 'ubuntu_1404_x86_64' connection.execute( update_query, attrs_meta=jsonutils.dumps(attrs), release_id=release_id)
def _show(self, resource_type, response_file, uuid1, uuid2=None, relations=None): target_uuid = uuid2 or uuid1 if resource_type.endswith('attachment'): resource_type = resource_type[:resource_type.index('attachment')] with open("%s/%s" % (self.fake_files_path, response_file)) as f: response_template = f.read() res_dict = getattr(self, '_fake_%s_dict' % resource_type) for item in res_dict.itervalues(): if 'tags' in item: item['tags_json'] = jsonutils.dumps(item['tags']) # replace sec prof rules with their json dump def jsonify_rules(rule_key): if rule_key in item: rules_json = jsonutils.dumps(item[rule_key]) item['%s_json' % rule_key] = rules_json jsonify_rules('logical_port_egress_rules') jsonify_rules('logical_port_ingress_rules') items = [jsonutils.loads(response_template % res_dict[res_uuid]) for res_uuid in res_dict if res_uuid == target_uuid] if items: return jsonutils.dumps(items[0]) raise api_exc.ResourceNotFound()
def test_stats_sending_enabled(self): self.assertEqual(objects.MasterNodeSettings.must_send_stats(), False) resp = self.app.get( reverse("MasterNodeSettingsHandler"), headers=self.default_headers) self.assertEqual(200, resp.status_code) data = resp.json_body # emulate user confirmed settings in UI data["settings"]["statistics"]["user_choice_saved"]["value"] = True resp = self.app.put( reverse("MasterNodeSettingsHandler"), headers=self.default_headers, params=jsonutils.dumps(data) ) self.assertEqual(200, resp.status_code) self.assertTrue(objects.MasterNodeSettings.must_send_stats()) # emulate user disabled statistics sending data["settings"]["statistics"]["send_anonymous_statistic"]["value"] = \ False resp = self.app.put( reverse("MasterNodeSettingsHandler"), headers=self.default_headers, params=jsonutils.dumps(data) ) self.assertEqual(200, resp.status_code) self.assertFalse(objects.MasterNodeSettings.must_send_stats())
def post(self, jsonurl, data): # convert json data string to dict ddict = json.loads(data) # check basic parameters self.testcase.assertIn('method', ddict) meth = ddict['method'] self.testcase.assertIn(meth, ipamethods) self.testcase.assertIn('params', ddict) self.testcase.assertIsInstance(ddict['params'], list) self.testcase.assertEqual(len(ddict['params']), 2) self.testcase.assertIsInstance(ddict['params'][0], list) self.testcase.assertIsInstance(ddict['params'][1], dict) self.testcase.assertIn('version', ddict['params'][1]) # check method specific parameters if meth.startswith('dnsrecord_'): self.testcase.assertEqual(len(ddict['params'][0]), 2) # domain params end with a . param1 = ddict['params'][0][0] self.testcase.assertEqual(param1[-1], ".") elif meth.startswith('dnszone_'): self.testcase.assertEqual(len(ddict['params'][0]), 1) param1 = ddict['params'][0][0] self.testcase.assertEqual(param1[-1], ".") rc = {} if self.needauth: self.needauth = False # reset return MockResponse(401, json.dumps(rc)) if self.error: rc['error'] = {'code': self.error} self.error = None # reset else: rc['error'] = None return MockResponse(200, json.dumps(rc))
def _update_usage_from_migration(self, context, instance, image_meta, resources, migration): """Update usage for a single migration. The record may represent an incoming or outbound migration. """ uuid = migration['instance_uuid'] LOG.audit(_("Updating from migration %s") % uuid) incoming = (migration['dest_compute'] == self.host and migration['dest_node'] == self.nodename) outbound = (migration['source_compute'] == self.host and migration['source_node'] == self.nodename) same_node = (incoming and outbound) record = self.tracked_instances.get(uuid, None) itype = None if same_node: # same node resize. record usage for whichever instance type the # instance is *not* in: if (instance['instance_type_id'] == migration['old_instance_type_id']): itype = self._get_instance_type(context, instance, 'new_', migration['new_instance_type_id']) else: # instance record already has new flavor, hold space for a # possible revert to the old instance type: itype = self._get_instance_type(context, instance, 'old_', migration['old_instance_type_id']) elif incoming and not record: # instance has not yet migrated here: itype = self._get_instance_type(context, instance, 'new_', migration['new_instance_type_id']) elif outbound and not record: # instance migrated, but record usage for a possible revert: itype = self._get_instance_type(context, instance, 'old_', migration['old_instance_type_id']) if image_meta is None: image_meta = utils.get_image_from_system_metadata( instance['system_metadata']) if itype: numa_topology = ( hardware.VirtNUMAInstanceTopology.get_constraints( itype, image_meta)) usage = self._get_usage_dict( itype, numa_topology=numa_topology) if self.pci_tracker: self.pci_tracker.update_pci_for_migration(context, instance) self._update_usage(context, resources, usage) if self.pci_tracker: resources['pci_stats'] = jsonutils.dumps( self.pci_tracker.stats) else: resources['pci_stats'] = jsonutils.dumps([]) self.tracked_migrations[uuid] = (migration, itype)
def test_unassignment(self): cluster = self.env.create( cluster_kwargs={"api": True}, nodes_kwargs=[{}] ) node = self.env.nodes[0] # correct unassignment resp = self.app.post( reverse( 'NodeUnassignmentHandler', kwargs={'cluster_id': cluster['id']} ), jsonutils.dumps([{'id': node.id}]), headers=self.default_headers ) self.assertEqual(200, resp.status_code) self.assertEqual(node.cluster, None) self.assertEqual(node.pending_roles, []) #Test with invalid node ids for node_id in (0, node.id + 50): resp = self.app.post( reverse( 'NodeUnassignmentHandler', kwargs={'cluster_id': cluster['id']} ), jsonutils.dumps([{'id': node_id}]), headers=self.default_headers, expect_errors=True ) self.assertEqual(400, resp.status_code) #Test with invalid cluster id resp = self.app.post( reverse( 'NodeUnassignmentHandler', kwargs={'cluster_id': cluster['id'] + 5} ), jsonutils.dumps([{'id': node.id}]), headers=self.default_headers, expect_errors=True ) self.assertEqual(resp.status_code, 404) # Test with wrong cluster id self.env.create( cluster_kwargs={"api": True}, nodes_kwargs=[{}] ) resp = self.app.post( reverse( 'NodeUnassignmentHandler', kwargs={'cluster_id': cluster['id']} ), jsonutils.dumps([{'id': self.env.clusters[1].nodes[0].id}]), headers=self.default_headers, expect_errors=True ) self.assertEqual(resp.status_code, 400)
def _is_serializable(value): """Returns True if value is serializable.""" try: jsonutils.dumps(value) except TypeError: LOG.info(_LI("Value with type=%s is not serializable") % type(value)) return False return True
def test_validate_fail(self): info = dict(INST_INFO_DICT) del info['image_source'] self.node.instance_info = json.dumps(info) with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: task.node['instance_info'] = json.dumps(info) self.assertRaises(exception.MissingParameterValue, task.driver.deploy.validate, task)
def convert_ec2_to_v3_credential(ec2credential): blob = {'access': ec2credential.access, 'secret': ec2credential.secret} return {'id': hash_access_key(ec2credential.access), 'user_id': ec2credential.user_id, 'project_id': ec2credential.tenant_id, 'blob': jsonutils.dumps(blob), 'type': 'ec2', 'extra': jsonutils.dumps({})}
def default(self, data, result=None): def sanitizer(obj): if isinstance(obj, datetime.datetime): _dtime = obj - datetime.timedelta(microseconds=obj.microsecond) return _dtime.isoformat() return unicode(obj) if result: data.body = jsonutils.dumps(result) return jsonutils.dumps(data, default=sanitizer)
def __call__(self, target, creds, enforcer): """Check http: rules by calling to a remote server. This example implementation simply verifies that the response is exactly 'True'. """ url = ("http:" + self.match) % target data = {"target": jsonutils.dumps(target), "credentials": jsonutils.dumps(creds)} post_data = urlparse.urlencode(data) f = urlrequest.urlopen(url, post_data) return f.read() == "True"
def test_attributes_update_put(self): cluster_id = self.env.create_cluster(api=True)['id'] cluster_db = self.env.clusters[0] resp = self.app.get( reverse( 'ClusterAttributesHandler', kwargs={'cluster_id': cluster_id}), headers=self.default_headers ) self.assertEqual(200, resp.status_code) resp = self.app.put( reverse( 'ClusterAttributesHandler', kwargs={'cluster_id': cluster_id}), params=jsonutils.dumps({ 'editable': { "foo": "bar" }, }), headers=self.default_headers ) self.assertEqual(200, resp.status_code) attrs = objects.Cluster.get_attributes(cluster_db) self.assertEqual("bar", attrs.editable["foo"]) attrs.editable.pop('foo') # 400 on generated update resp = self.app.put( reverse( 'ClusterAttributesHandler', kwargs={'cluster_id': cluster_id}), params=jsonutils.dumps({ 'generated': { "foo": "bar" }, }), headers=self.default_headers, expect_errors=True ) self.assertEqual(400, resp.status_code) # 400 if editable is not dict resp = self.app.put( reverse( 'ClusterAttributesHandler', kwargs={'cluster_id': cluster_id}), params=jsonutils.dumps({ 'editable': ["foo", "bar"], }), headers=self.default_headers, expect_errors=True ) self.assertEqual(400, resp.status_code)
def _update_available_resource(self, context, resources): if 'pci_passthrough_devices' in resources: if not self.pci_tracker: self.pci_tracker = pci_manager.PciDevTracker() devs = [] for dev in jsonutils.loads(resources.pop( 'pci_passthrough_devices')): if dev['dev_type'] == 'type-PF': continue if self.pci_filter.device_assignable(dev): devs.append(dev) self.pci_tracker.set_hvdevs(devs) # Grab all instances assigned to this node: instances = objects.InstanceList.get_by_host_and_node( context, self.host, self.nodename, expected_attrs=['system_metadata', 'numa_topology']) # Now calculate usage based on instance utilization: self._update_usage_from_instances(context, resources, instances) # Grab all in-progress migrations: capi = self.conductor_api migrations = capi.migration_get_in_progress_by_host_and_node(context, self.host, self.nodename) self._update_usage_from_migrations(context, resources, migrations) # Detect and account for orphaned instances that may exist on the # hypervisor, but are not in the DB: orphans = self._find_orphaned_instances() self._update_usage_from_orphans(context, resources, orphans) # NOTE(yjiang5): Because pci device tracker status is not cleared in # this periodic task, and also because the resource tracker is not # notified when instances are deleted, we need remove all usages # from deleted instances. if self.pci_tracker: self.pci_tracker.clean_usage(instances, migrations, orphans) resources['pci_stats'] = jsonutils.dumps(self.pci_tracker.stats) else: resources['pci_stats'] = jsonutils.dumps([]) self._report_final_resource_view(resources) metrics = self._get_host_metrics(context, self.nodename) resources['metrics'] = jsonutils.dumps(metrics) self._sync_compute_node(context, resources)
def test_admin_nic_and_ip_assignment(self): cluster = self.env.create_cluster(api=True) admin_ip = str(IPNetwork( self.env.network_manager.get_admin_network_group().cidr)[0]) mac1, mac2 = (self.env.generate_random_mac(), self.env.generate_random_mac()) meta = self.env.default_metadata() meta['interfaces'] = [{'name': 'eth1', 'mac': mac2, 'ip': admin_ip, 'pxe': True}, {'name': 'eth0', 'mac': mac1}] self.env.create_node(api=True, meta=meta, mac=mac2, cluster_id=cluster['id']) node_db = self.env.nodes[0] admin_iface = self.env.network_manager.get_admin_interface(node_db) self.assertEqual(admin_iface.mac, mac2) self.assertEqual(admin_iface.ip_addr, admin_ip) meta = deepcopy(node_db.meta) for interface in meta['interfaces']: if interface['mac'] == mac2: # reset admin ip for previous admin interface interface['ip'] = None elif interface['mac'] == mac1: # set new admin interface interface['ip'] = admin_ip resp = self.app.put( reverse('NodeAgentHandler'), jsonutils.dumps({'id': node_db.id, 'meta': meta}), headers=self.default_headers ) self.assertEqual(resp.status_code, 200) self.db.refresh(node_db) admin_iface = self.env.network_manager.get_admin_interface(node_db) self.assertEqual(admin_iface.mac, mac2) self.assertEqual(admin_iface.ip_addr, None) resp = self.app.put( reverse('NodeCollectionHandler'), jsonutils.dumps([{'id': node_db.id, 'cluster_id': None}]), headers=self.default_headers ) self.assertEqual(resp.status_code, 200) self.db.refresh(node_db) admin_iface = self.env.network_manager.get_admin_interface(node_db) self.assertEqual(admin_iface.mac, mac1) self.assertEqual(admin_iface.ip_addr, admin_ip)
def prepare(): meta = base.reflect_db_metadata() # Fill in migration table with data db.execute( meta.tables[extensions_migration_buffer_table_name].insert(), [{'extension_name': 'volume_manager', 'data': jsonutils.dumps({'node_id': 1, 'volumes': [{'volume': 1}]})}, {'extension_name': 'volume_manager', 'data': jsonutils.dumps({'node_id': 2, 'volumes': [{'volume': 2}]})}, {'extension_name': 'some_different_extension', 'data': 'some_data'}]) db.commit()
def format_output_data(self, data): # Modify data to make it more readable if self.resource in data: for k, v in six.iteritems(data[self.resource]): if isinstance(v, list): value = '\n'.join(jsonutils.dumps( i, indent=self.json_indent) if isinstance(i, dict) else str(i) for i in v) data[self.resource][k] = value elif isinstance(v, dict): value = jsonutils.dumps(v, indent=self.json_indent) data[self.resource][k] = value elif v is None: data[self.resource][k] = ''
def test_create_new_task(self): # 0. POST /tasks # Create a new task with valid input and type task_data = _new_task_fixture() task_owner = 'tenant1' body_content = json.dumps(task_data) path = "/v2/tasks" response, content = self.http.request( path, 'POST', headers=minimal_task_headers(task_owner), body=body_content) self.assertEqual(201, response.status) data = json.loads(content) task_id = data['id'] self.assertIsNotNone(task_id) self.assertEqual(task_owner, data['owner']) self.assertEqual(task_data['type'], data['type']) self.assertEqual(task_data['input'], data['input']) # 1. POST /tasks # Create a new task with invalid type # Expect BadRequest(400) Error as response task_data = _new_task_fixture(type='invalid') task_owner = 'tenant1' body_content = json.dumps(task_data) path = "/v2/tasks" response, content = self.http.request( path, 'POST', headers=minimal_task_headers(task_owner), body=body_content) self.assertEqual(400, response.status) # 1. POST /tasks # Create a new task with invalid input for type 'import' # Expect BadRequest(400) Error as response task_data = _new_task_fixture(task_input='{something: invalid}') task_owner = 'tenant1' body_content = json.dumps(task_data) path = "/v2/tasks" response, content = self.http.request( path, 'POST', headers=minimal_task_headers(task_owner), body=body_content) self.assertEqual(400, response.status) # NOTE(nikhil): wait for all task executions to finish before exiting # else there is a risk of running into deadlock self._wait_on_task_execution()
def test_get_by_instance_legacy(self): fakesysmeta = { 'pci_requests': jsonutils.dumps([fake_legacy_pci_requests[0]]), 'new_pci_requests': jsonutils.dumps([fake_legacy_pci_requests[1]]), } instance = objects.Instance(uuid='fake-uuid', system_metadata=fakesysmeta) requests = objects.InstancePCIRequests.get_by_instance(self.context, instance) self.assertEqual(2, len(requests.requests)) self.assertEqual('alias_1', requests.requests[0].alias_name) self.assertFalse(requests.requests[0].is_new) self.assertEqual('alias_2', requests.requests[1].alias_name) self.assertTrue(requests.requests[1].is_new)
def _pack_json_msg(self, msg): """Qpid cannot serialize dicts containing strings longer than 65535 characters. This function dumps the message content to a JSON string, which Qpid is able to handle. :param msg: May be either a Qpid Message object or a bare dict. :returns: A Qpid Message with its content field JSON encoded. """ try: msg.content = jsonutils.dumps(msg.content) except AttributeError: # Need to have a Qpid message so we can set the content_type. msg = qpid_messaging.Message(jsonutils.dumps(msg)) msg.content_type = JSON_CONTENT_TYPE return msg
def _get_available_resources(self, host_stats): return { "vcpus": host_stats["vcpus"], "memory_mb": host_stats["host_memory_total"], "local_gb": host_stats["disk_total"], "vcpus_used": 0, "memory_mb_used": host_stats["host_memory_total"] - host_stats["host_memory_free"], "local_gb_used": host_stats["disk_used"], "hypervisor_type": host_stats["hypervisor_type"], "hypervisor_version": host_stats["hypervisor_version"], "hypervisor_hostname": host_stats["hypervisor_hostname"], "cpu_info": jsonutils.dumps(host_stats["cpu_info"]), "supported_instances": jsonutils.dumps(host_stats["supported_instances"]), "numa_topology": None, }
def _do_request(self, method, request_url, data=None, reload_needed=False): """Perform REST request and return response""" try: info = "method = " + method + ", " + \ "request_url = " + request_url + ", " + \ "\ndata = " + str(data) + ", " + \ "\nreload_needed = " + str(reload_needed) + ", " + \ "CPNR_server_ip = " + self.CPNR_server_ip + ", " + \ "CPNR_server_port = " +\ str(self.CPNR_server_port) + ", " + \ "CPNR_server_username = "******", " + \ "timeout = " + str(self.timeout) LOG.debug("info = {0}".format(info)) if method == 'GET' or method == 'DELETE': start_time = time.time() response = requests.request(method, request_url, auth=self.auth, headers=self.headers, timeout=self.timeout) LOG.debug("%(method)s Took %(time).2f seconds to process", { 'method': method, 'time': time.time() - start_time }) if response.status_code != requests.codes.ok: raise Exception( "Invalid return code {0}".format(response.status_code), "Expected return code \ for {0} is {1}".format(method, requests.codes.ok)) elif method == 'POST' or method == 'PUT': if data is None: raise Exception( "data dictionary is empty for {0}".format(method)) json_dump = jsonutils.dumps(data) start_time = time.time() response = requests.request(method, request_url, data=json_dump, auth=self.auth, headers=self.headers, timeout=self.timeout) LOG.debug("%(method)s Took %(time).2f seconds to process", { 'method': method, 'time': time.time() - start_time }) if method == 'POST': if response.status_code != requests.codes.created: raise Exception( "Invalid return code {0}".format( response.status_code), "Expected return \ code for POST is {0}".format( requests.codes.created)) else: if response.status_code != requests.codes.ok: raise Exception( "Invalid return code {0}".format( response.status_code), "Expected return \ code for PUT is {0}".format(requests.codes.ok)) LOG.debug("response.text = {0}".format(response.text)) except r_exc.Timeout as te: LOG.warning( _LW("Request timeout for CPNR," "%(ex_type)s, %(ex_args)s. %(ex_info)s)"), { 'ex_type': str(type(te)), 'ex_args': str(te.args), 'ex_info': str(info) }) except r_exc.ConnectionError as ce: LOG.exception( _LE("Unable to connect to CPNR," "%(ex_type)s, %(ex_args)s. %(ex_info)s)"), { 'ex_type': str(type(ce)), 'ex_args': str(ce.args), 'ex_info': str(info) }) except Exception as e: LOG.error( _LE("Unexpected error with CPNR," "%(ex_type)s, %(ex_args)s. %(ex_info)s)"), { 'ex_type': str(type(e)), 'ex_args': str(e.args), 'ex_info': str(info) }) else: if method == 'GET': LOG.debug("response.json() = {0}".format(response.json())) # print response.json() return response.json() elif method == 'POST' or method == 'PUT' or method == 'DELETE': LOG.debug("response.status_code = {0}".format( response.status_code)) # print response.text if reload_needed is True: self._cpnr_reload_needed = True elif 'Scope' in request_url: if "AX_DHCP_RELOAD_REQUIRED" in response.content: self._cpnr_reload_needed = True return response.status_code LOG.debug("{0} request completed. Return code = {1}".format( method, response.status_code))
def _convert_supported_instances_to_db_format(updates): hv_specs = updates.pop('supported_hv_specs', None) if hv_specs is not None: hv_specs = [hv_spec.to_list() for hv_spec in hv_specs] updates['supported_instances'] = jsonutils.dumps(hv_specs)
def setUp(self): super(Examples, self).setUp() # The data for several tests are signed using openssl and are stored in # files in the signing subdirectory. In order to keep the values # consistent between the tests and the signed documents, we read them # in for use in the tests. with open(os.path.join(CMSDIR, 'auth_token_scoped.json')) as f: self.TOKEN_SCOPED_DATA = cms.cms_to_token(f.read()) with open(os.path.join(CMSDIR, 'auth_token_scoped.pem')) as f: self.SIGNED_TOKEN_SCOPED = cms.cms_to_token(f.read()) self.SIGNED_TOKEN_SCOPED_HASH = _hash_signed_token_safe( self.SIGNED_TOKEN_SCOPED) self.SIGNED_TOKEN_SCOPED_HASH_SHA256 = _hash_signed_token_safe( self.SIGNED_TOKEN_SCOPED, mode='sha256') with open(os.path.join(CMSDIR, 'auth_token_unscoped.pem')) as f: self.SIGNED_TOKEN_UNSCOPED = cms.cms_to_token(f.read()) with open(os.path.join(CMSDIR, 'auth_v3_token_scoped.pem')) as f: self.SIGNED_v3_TOKEN_SCOPED = cms.cms_to_token(f.read()) self.SIGNED_v3_TOKEN_SCOPED_HASH = _hash_signed_token_safe( self.SIGNED_v3_TOKEN_SCOPED) self.SIGNED_v3_TOKEN_SCOPED_HASH_SHA256 = _hash_signed_token_safe( self.SIGNED_v3_TOKEN_SCOPED, mode='sha256') with open(os.path.join(CMSDIR, 'auth_token_revoked.pem')) as f: self.REVOKED_TOKEN = cms.cms_to_token(f.read()) with open(os.path.join(CMSDIR, 'auth_token_scoped_expired.pem')) as f: self.SIGNED_TOKEN_SCOPED_EXPIRED = cms.cms_to_token(f.read()) with open(os.path.join(CMSDIR, 'auth_v3_token_revoked.pem')) as f: self.REVOKED_v3_TOKEN = cms.cms_to_token(f.read()) with open(os.path.join(CMSDIR, 'auth_token_scoped.pkiz')) as f: self.SIGNED_TOKEN_SCOPED_PKIZ = cms.cms_to_token(f.read()) with open(os.path.join(CMSDIR, 'auth_token_unscoped.pkiz')) as f: self.SIGNED_TOKEN_UNSCOPED_PKIZ = cms.cms_to_token(f.read()) with open(os.path.join(CMSDIR, 'auth_v3_token_scoped.pkiz')) as f: self.SIGNED_v3_TOKEN_SCOPED_PKIZ = cms.cms_to_token(f.read()) with open(os.path.join(CMSDIR, 'auth_token_revoked.pkiz')) as f: self.REVOKED_TOKEN_PKIZ = cms.cms_to_token(f.read()) with open(os.path.join(CMSDIR, 'auth_token_scoped_expired.pkiz')) as f: self.SIGNED_TOKEN_SCOPED_EXPIRED_PKIZ = cms.cms_to_token(f.read()) with open(os.path.join(CMSDIR, 'auth_v3_token_revoked.pkiz')) as f: self.REVOKED_v3_TOKEN_PKIZ = cms.cms_to_token(f.read()) with open(os.path.join(CMSDIR, 'revocation_list.json')) as f: self.REVOCATION_LIST = jsonutils.loads(f.read()) with open(os.path.join(CMSDIR, 'revocation_list.pem')) as f: self.SIGNED_REVOCATION_LIST = jsonutils.dumps({'signed': f.read()}) self.SIGNING_CERT_FILE = os.path.join(CERTDIR, 'signing_cert.pem') with open(self.SIGNING_CERT_FILE) as f: self.SIGNING_CERT = f.read() self.KERBEROS_BIND = 'USER@REALM' self.SIGNING_KEY_FILE = os.path.join(KEYDIR, 'signing_key.pem') with open(self.SIGNING_KEY_FILE) as f: self.SIGNING_KEY = f.read() self.SIGNING_CA_FILE = os.path.join(CERTDIR, 'cacert.pem') with open(self.SIGNING_CA_FILE) as f: self.SIGNING_CA = f.read() self.UUID_TOKEN_DEFAULT = "ec6c0710ec2f471498484c1b53ab4f9d" self.UUID_TOKEN_NO_SERVICE_CATALOG = '8286720fbe4941e69fa8241723bb02df' self.UUID_TOKEN_UNSCOPED = '731f903721c14827be7b2dc912af7776' self.UUID_TOKEN_BIND = '3fc54048ad64405c98225ce0897af7c5' self.UUID_TOKEN_UNKNOWN_BIND = '8885fdf4d42e4fb9879e6379fa1eaf48' self.VALID_DIABLO_TOKEN = 'b0cf19b55dbb4f20a6ee18e6c6cf1726' self.v3_UUID_TOKEN_DEFAULT = '5603457654b346fdbb93437bfe76f2f1' self.v3_UUID_TOKEN_UNSCOPED = 'd34835fdaec447e695a0a024d84f8d79' self.v3_UUID_TOKEN_DOMAIN_SCOPED = 'e8a7b63aaa4449f38f0c5c05c3581792' self.v3_UUID_TOKEN_BIND = '2f61f73e1c854cbb9534c487f9bd63c2' self.v3_UUID_TOKEN_UNKNOWN_BIND = '7ed9781b62cd4880b8d8c6788ab1d1e2' self.UUID_SERVICE_TOKEN_DEFAULT = 'fe4c0710ec2f492748596c1b53ab124' self.v3_UUID_SERVICE_TOKEN_DEFAULT = 'g431071bbc2f492748596c1b53cb229' revoked_token = self.REVOKED_TOKEN if isinstance(revoked_token, six.text_type): revoked_token = revoked_token.encode('utf-8') self.REVOKED_TOKEN_HASH = utils.hash_signed_token(revoked_token) self.REVOKED_TOKEN_HASH_SHA256 = utils.hash_signed_token(revoked_token, mode='sha256') self.REVOKED_TOKEN_LIST = ({ 'revoked': [{ 'id': self.REVOKED_TOKEN_HASH, 'expires': timeutils.utcnow() }] }) self.REVOKED_TOKEN_LIST_JSON = jsonutils.dumps(self.REVOKED_TOKEN_LIST) revoked_v3_token = self.REVOKED_v3_TOKEN if isinstance(revoked_v3_token, six.text_type): revoked_v3_token = revoked_v3_token.encode('utf-8') self.REVOKED_v3_TOKEN_HASH = utils.hash_signed_token(revoked_v3_token) hash = utils.hash_signed_token(revoked_v3_token, mode='sha256') self.REVOKED_v3_TOKEN_HASH_SHA256 = hash self.REVOKED_v3_TOKEN_LIST = ({ 'revoked': [{ 'id': self.REVOKED_v3_TOKEN_HASH, 'expires': timeutils.utcnow() }] }) self.REVOKED_v3_TOKEN_LIST_JSON = jsonutils.dumps( self.REVOKED_v3_TOKEN_LIST) revoked_token_pkiz = self.REVOKED_TOKEN_PKIZ if isinstance(revoked_token_pkiz, six.text_type): revoked_token_pkiz = revoked_token_pkiz.encode('utf-8') self.REVOKED_TOKEN_PKIZ_HASH = utils.hash_signed_token( revoked_token_pkiz) revoked_v3_token_pkiz = self.REVOKED_v3_TOKEN_PKIZ if isinstance(revoked_v3_token_pkiz, six.text_type): revoked_v3_token_pkiz = revoked_v3_token_pkiz.encode('utf-8') self.REVOKED_v3_PKIZ_TOKEN_HASH = utils.hash_signed_token( revoked_v3_token_pkiz) self.REVOKED_TOKEN_PKIZ_LIST = ({ 'revoked': [ { 'id': self.REVOKED_TOKEN_PKIZ_HASH, 'expires': timeutils.utcnow() }, { 'id': self.REVOKED_v3_PKIZ_TOKEN_HASH, 'expires': timeutils.utcnow() }, ] }) self.REVOKED_TOKEN_PKIZ_LIST_JSON = jsonutils.dumps( self.REVOKED_TOKEN_PKIZ_LIST) self.SIGNED_TOKEN_SCOPED_KEY = cms.cms_hash_token( self.SIGNED_TOKEN_SCOPED) self.SIGNED_TOKEN_UNSCOPED_KEY = cms.cms_hash_token( self.SIGNED_TOKEN_UNSCOPED) self.SIGNED_v3_TOKEN_SCOPED_KEY = cms.cms_hash_token( self.SIGNED_v3_TOKEN_SCOPED) self.SIGNED_TOKEN_SCOPED_PKIZ_KEY = cms.cms_hash_token( self.SIGNED_TOKEN_SCOPED_PKIZ) self.SIGNED_TOKEN_UNSCOPED_PKIZ_KEY = cms.cms_hash_token( self.SIGNED_TOKEN_UNSCOPED_PKIZ) self.SIGNED_v3_TOKEN_SCOPED_PKIZ_KEY = cms.cms_hash_token( self.SIGNED_v3_TOKEN_SCOPED_PKIZ) self.INVALID_SIGNED_TOKEN = ( "MIIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB" "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC" "DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD" "EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE" "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF" "0000000000000000000000000000000000000000000000000000000000000000" "1111111111111111111111111111111111111111111111111111111111111111" "2222222222222222222222222222222222222222222222222222222222222222" "3333333333333333333333333333333333333333333333333333333333333333" "4444444444444444444444444444444444444444444444444444444444444444" "5555555555555555555555555555555555555555555555555555555555555555" "6666666666666666666666666666666666666666666666666666666666666666" "7777777777777777777777777777777777777777777777777777777777777777" "8888888888888888888888888888888888888888888888888888888888888888" "9999999999999999999999999999999999999999999999999999999999999999" "0000000000000000000000000000000000000000000000000000000000000000") self.INVALID_SIGNED_PKIZ_TOKEN = ( "PKIZ_AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" "BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB" "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC" "DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD" "EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE" "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF" "0000000000000000000000000000000000000000000000000000000000000000" "1111111111111111111111111111111111111111111111111111111111111111" "2222222222222222222222222222222222222222222222222222222222222222" "3333333333333333333333333333333333333333333333333333333333333333" "4444444444444444444444444444444444444444444444444444444444444444" "5555555555555555555555555555555555555555555555555555555555555555" "6666666666666666666666666666666666666666666666666666666666666666" "7777777777777777777777777777777777777777777777777777777777777777" "8888888888888888888888888888888888888888888888888888888888888888" "9999999999999999999999999999999999999999999999999999999999999999" "0000000000000000000000000000000000000000000000000000000000000000") # JSON responses keyed by token ID self.TOKEN_RESPONSES = {} # basic values PROJECT_ID = 'tenant_id1' PROJECT_NAME = 'tenant_name1' USER_ID = 'user_id1' USER_NAME = 'user_name1' DOMAIN_ID = 'domain_id1' DOMAIN_NAME = 'domain_name1' ROLE_NAME1 = 'role1' ROLE_NAME2 = 'role2' SERVICE_PROJECT_ID = 'service_project_id1' SERVICE_PROJECT_NAME = 'service_project_name1' SERVICE_USER_ID = 'service_user_id1' SERVICE_USER_NAME = 'service_user_name1' SERVICE_DOMAIN_ID = 'service_domain_id1' SERVICE_DOMAIN_NAME = 'service_domain_name1' SERVICE_ROLE_NAME1 = 'service_role1' SERVICE_ROLE_NAME2 = 'service_role2' self.SERVICE_TYPE = 'identity' self.UNVERSIONED_SERVICE_URL = 'http://keystone.server:5000/' self.SERVICE_URL = self.UNVERSIONED_SERVICE_URL + 'v2.0' # Old Tokens self.TOKEN_RESPONSES[self.VALID_DIABLO_TOKEN] = { 'access': { 'token': { 'id': self.VALID_DIABLO_TOKEN, 'expires': '2020-01-01T00:00:10.000123Z', 'tenantId': PROJECT_ID, }, 'user': { 'id': USER_ID, 'name': USER_NAME, 'roles': [ { 'name': ROLE_NAME1 }, { 'name': ROLE_NAME2 }, ], }, }, } # Generated V2 Tokens token = fixture.V2Token(token_id=self.UUID_TOKEN_DEFAULT, tenant_id=PROJECT_ID, tenant_name=PROJECT_NAME, user_id=USER_ID, user_name=USER_NAME) token.add_role(name=ROLE_NAME1) token.add_role(name=ROLE_NAME2) svc = token.add_service(self.SERVICE_TYPE) svc.add_endpoint(public=self.SERVICE_URL) self.TOKEN_RESPONSES[self.UUID_TOKEN_DEFAULT] = token token = fixture.V2Token(token_id=self.UUID_TOKEN_UNSCOPED, user_id=USER_ID, user_name=USER_NAME) self.TOKEN_RESPONSES[self.UUID_TOKEN_UNSCOPED] = token token = fixture.V2Token(token_id='valid-token', tenant_id=PROJECT_ID, tenant_name=PROJECT_NAME, user_id=USER_ID, user_name=USER_NAME) token.add_role(ROLE_NAME1) token.add_role(ROLE_NAME2) self.TOKEN_RESPONSES[self.UUID_TOKEN_NO_SERVICE_CATALOG] = token token = fixture.V2Token(token_id=self.SIGNED_TOKEN_SCOPED_KEY, tenant_id=PROJECT_ID, tenant_name=PROJECT_NAME, user_id=USER_ID, user_name=USER_NAME) token.add_role(ROLE_NAME1) token.add_role(ROLE_NAME2) self.TOKEN_RESPONSES[self.SIGNED_TOKEN_SCOPED_KEY] = token token = fixture.V2Token(token_id=self.SIGNED_TOKEN_UNSCOPED_KEY, user_id=USER_ID, user_name=USER_NAME) self.TOKEN_RESPONSES[self.SIGNED_TOKEN_UNSCOPED_KEY] = token token = fixture.V2Token(token_id=self.UUID_TOKEN_BIND, tenant_id=PROJECT_ID, tenant_name=PROJECT_NAME, user_id=USER_ID, user_name=USER_NAME) token.add_role(ROLE_NAME1) token.add_role(ROLE_NAME2) token['access']['token']['bind'] = {'kerberos': self.KERBEROS_BIND} self.TOKEN_RESPONSES[self.UUID_TOKEN_BIND] = token token = fixture.V2Token(token_id=self.UUID_TOKEN_UNKNOWN_BIND, tenant_id=PROJECT_ID, tenant_name=PROJECT_NAME, user_id=USER_ID, user_name=USER_NAME) token.add_role(ROLE_NAME1) token.add_role(ROLE_NAME2) token['access']['token']['bind'] = {'FOO': 'BAR'} self.TOKEN_RESPONSES[self.UUID_TOKEN_UNKNOWN_BIND] = token token = fixture.V2Token(token_id=self.UUID_SERVICE_TOKEN_DEFAULT, tenant_id=SERVICE_PROJECT_ID, tenant_name=SERVICE_PROJECT_NAME, user_id=SERVICE_USER_ID, user_name=SERVICE_USER_NAME) token.add_role(name=SERVICE_ROLE_NAME1) token.add_role(name=SERVICE_ROLE_NAME2) svc = token.add_service(self.SERVICE_TYPE) svc.add_endpoint(public=self.SERVICE_URL) self.TOKEN_RESPONSES[self.UUID_SERVICE_TOKEN_DEFAULT] = token # Generated V3 Tokens token = fixture.V3Token(user_id=USER_ID, user_name=USER_NAME, user_domain_id=DOMAIN_ID, user_domain_name=DOMAIN_NAME, project_id=PROJECT_ID, project_name=PROJECT_NAME, project_domain_id=DOMAIN_ID, project_domain_name=DOMAIN_NAME) token.add_role(id=ROLE_NAME1, name=ROLE_NAME1) token.add_role(id=ROLE_NAME2, name=ROLE_NAME2) svc = token.add_service(self.SERVICE_TYPE) svc.add_endpoint('public', self.SERVICE_URL) self.TOKEN_RESPONSES[self.v3_UUID_TOKEN_DEFAULT] = token token = fixture.V3Token(user_id=USER_ID, user_name=USER_NAME, user_domain_id=DOMAIN_ID, user_domain_name=DOMAIN_NAME) self.TOKEN_RESPONSES[self.v3_UUID_TOKEN_UNSCOPED] = token token = fixture.V3Token(user_id=USER_ID, user_name=USER_NAME, user_domain_id=DOMAIN_ID, user_domain_name=DOMAIN_NAME, domain_id=DOMAIN_ID, domain_name=DOMAIN_NAME) token.add_role(id=ROLE_NAME1, name=ROLE_NAME1) token.add_role(id=ROLE_NAME2, name=ROLE_NAME2) svc = token.add_service(self.SERVICE_TYPE) svc.add_endpoint('public', self.SERVICE_URL) self.TOKEN_RESPONSES[self.v3_UUID_TOKEN_DOMAIN_SCOPED] = token token = fixture.V3Token(user_id=USER_ID, user_name=USER_NAME, user_domain_id=DOMAIN_ID, user_domain_name=DOMAIN_NAME, project_id=PROJECT_ID, project_name=PROJECT_NAME, project_domain_id=DOMAIN_ID, project_domain_name=DOMAIN_NAME) token.add_role(name=ROLE_NAME1) token.add_role(name=ROLE_NAME2) svc = token.add_service(self.SERVICE_TYPE) svc.add_endpoint('public', self.SERVICE_URL) self.TOKEN_RESPONSES[self.SIGNED_v3_TOKEN_SCOPED_KEY] = token token = fixture.V3Token(user_id=USER_ID, user_name=USER_NAME, user_domain_id=DOMAIN_ID, user_domain_name=DOMAIN_NAME, project_id=PROJECT_ID, project_name=PROJECT_NAME, project_domain_id=DOMAIN_ID, project_domain_name=DOMAIN_NAME) token.add_role(name=ROLE_NAME1) token.add_role(name=ROLE_NAME2) svc = token.add_service(self.SERVICE_TYPE) svc.add_endpoint('public', self.SERVICE_URL) token['token']['bind'] = {'kerberos': self.KERBEROS_BIND} self.TOKEN_RESPONSES[self.v3_UUID_TOKEN_BIND] = token token = fixture.V3Token(user_id=USER_ID, user_name=USER_NAME, user_domain_id=DOMAIN_ID, user_domain_name=DOMAIN_NAME, project_id=PROJECT_ID, project_name=PROJECT_NAME, project_domain_id=DOMAIN_ID, project_domain_name=DOMAIN_NAME) token.add_role(name=ROLE_NAME1) token.add_role(name=ROLE_NAME2) svc = token.add_service(self.SERVICE_TYPE) svc.add_endpoint('public', self.SERVICE_URL) token['token']['bind'] = {'FOO': 'BAR'} self.TOKEN_RESPONSES[self.v3_UUID_TOKEN_UNKNOWN_BIND] = token token = fixture.V3Token(user_id=SERVICE_USER_ID, user_name=SERVICE_USER_NAME, user_domain_id=SERVICE_DOMAIN_ID, user_domain_name=SERVICE_DOMAIN_NAME, project_id=SERVICE_PROJECT_ID, project_name=SERVICE_PROJECT_NAME, project_domain_id=SERVICE_DOMAIN_ID, project_domain_name=SERVICE_DOMAIN_NAME) token.add_role(id=SERVICE_ROLE_NAME1, name=SERVICE_ROLE_NAME1) token.add_role(id=SERVICE_ROLE_NAME2, name=SERVICE_ROLE_NAME2) svc = token.add_service(self.SERVICE_TYPE) svc.add_endpoint('public', self.SERVICE_URL) self.TOKEN_RESPONSES[self.v3_UUID_SERVICE_TOKEN_DEFAULT] = token # PKIZ tokens generally link to above tokens self.TOKEN_RESPONSES[self.SIGNED_TOKEN_SCOPED_PKIZ_KEY] = ( self.TOKEN_RESPONSES[self.SIGNED_TOKEN_SCOPED_KEY]) self.TOKEN_RESPONSES[self.SIGNED_TOKEN_UNSCOPED_PKIZ_KEY] = ( self.TOKEN_RESPONSES[self.SIGNED_TOKEN_UNSCOPED_KEY]) self.TOKEN_RESPONSES[self.SIGNED_v3_TOKEN_SCOPED_PKIZ_KEY] = ( self.TOKEN_RESPONSES[self.SIGNED_v3_TOKEN_SCOPED_KEY]) self.JSON_TOKEN_RESPONSES = dict([ (k, jsonutils.dumps(v)) for k, v in six.iteritems(self.TOKEN_RESPONSES) ])
def test_instance_object_none_info_cache(self): inst = fake_instance.fake_instance_obj('fake-context', expected_attrs=['info_cache']) self.assertIsNone(inst.info_cache) result = compute_utils.get_nw_info_for_instance(inst) self.assertEqual(jsonutils.dumps([]), result.json())
def _set_pci_request(self, claim): request = [{'count': 1, 'spec': [{'vendor_id': 'v', 'product_id': 'p'}], }] claim.instance.update( system_metadata={'new_pci_requests': jsonutils.dumps(request)})
def _format_fixed_ips(port): try: return '\n'.join([jsonutils.dumps(ip) for ip in port['fixed_ips']]) except (TypeError, KeyError): return ''
def _set_policy(self, new_policy): with open(self.tmpfilename, "w") as policyfile: policyfile.write(jsonutils.dumps(new_policy))
def _get_create_request_json(self, body_dict): req = webob.Request.blank('/v2/fake/os-create-server-ext') req.headers['Content-Type'] = 'application/json' req.method = 'POST' req.body = jsonutils.dumps(body_dict) return req
def _make_request(self, url, body): req = webob.Request.blank(self.fake_url + url) req.method = 'POST' req.body = jsonutils.dumps(body) req.content_type = 'application/json' return req.get_response(self.app)
def _test_dnsmasq_execute(self, extra_expected=None): network_ref = { 'id': 'fake', 'label': 'fake', 'gateway': '10.0.0.1', 'multi_host': False, 'cidr': '10.0.0.0/24', 'netmask': '255.255.255.0', 'dns1': '8.8.4.4', 'dhcp_start': '1.0.0.2', 'dhcp_server': '10.0.0.1', 'share_address': False } def fake_execute(*args, **kwargs): executes.append(args) return "", "" def fake_add_dhcp_mangle_rule(*args, **kwargs): executes.append(args) self.stubs.Set(linux_net, '_execute', fake_execute) self.stubs.Set(linux_net, '_add_dhcp_mangle_rule', fake_add_dhcp_mangle_rule) self.stubs.Set(os, 'chmod', lambda *a, **kw: None) self.stubs.Set(linux_net, 'write_to_file', lambda *a, **kw: None) self.stubs.Set(linux_net, '_dnsmasq_pid_for', lambda *a, **kw: None) dev = 'br100' default_domain = CONF.dhcp_domain for domain in ('', default_domain): executes = [] self.flags(dhcp_domain=domain) fixedips = self._get_fixedips(network_ref) linux_net.restart_dhcp(self.context, dev, network_ref, fixedips) expected = [ 'env', 'CONFIG_FILE=%s' % jsonutils.dumps(CONF.dhcpbridge_flagfile), 'NETWORK_ID=fake', 'dnsmasq', '--strict-order', '--bind-interfaces', '--conf-file=%s' % CONF.dnsmasq_config_file, '--pid-file=%s' % linux_net._dhcp_file(dev, 'pid'), '--dhcp-optsfile=%s' % linux_net._dhcp_file(dev, 'opts'), '--listen-address=%s' % network_ref['dhcp_server'], '--except-interface=lo', "--dhcp-range=set:%s,%s,static,%s,%ss" % (network_ref['label'], network_ref['dhcp_start'], network_ref['netmask'], CONF.dhcp_lease_time), '--dhcp-lease-max=256', '--dhcp-hostsfile=%s' % linux_net._dhcp_file(dev, 'conf'), '--dhcp-script=%s' % CONF.dhcpbridge, '--no-hosts', '--leasefile-ro' ] if CONF.dhcp_domain: expected.append('--domain=%s' % CONF.dhcp_domain) if extra_expected: expected += extra_expected self.assertEqual([(dev, ), tuple(expected)], executes)
def get_response(self, *args, **kwargs): response = mock.Mock() response.status_code = requests.codes.ok response.content = jsonutils.dumps({'session_uuid': 'new_auth_token'}) return response
dict(id=1, local_gb=10, memory_mb=1024, vcpus=1, vcpus_used=0, local_gb_used=0, memory_mb_used=0, updated_at=None, cpu_info='baremetal cpu', service=dict(host='host1', disabled=False), hypervisor_hostname='node1uuid', host_ip='127.0.0.1', hypervisor_version=1, hypervisor_type='ironic', stats=jsonutils.dumps( dict(ironic_driver="nova.virt.ironic.driver.IronicDriver", cpu_arch='i386')), supported_instances='[["i386", "baremetal", "baremetal"]]', free_disk_gb=10, free_ram_mb=1024), dict(id=2, local_gb=20, memory_mb=2048, vcpus=1, vcpus_used=0, local_gb_used=0, memory_mb_used=0, updated_at=None, cpu_info='baremetal cpu', service=dict(host='host2', disabled=True), hypervisor_hostname='node2uuid',
def _update_usage_from_migration(self, context, instance, image_meta, resources, migration): """Update usage for a single migration. The record may represent an incoming or outbound migration. """ uuid = migration['instance_uuid'] LOG.audit(_("Updating from migration %s") % uuid) incoming = (migration['dest_compute'] == self.host and migration['dest_node'] == self.nodename) outbound = (migration['source_compute'] == self.host and migration['source_node'] == self.nodename) same_node = (incoming and outbound) record = self.tracked_instances.get(uuid, None) itype = None if same_node: # same node resize. record usage for whichever instance type the # instance is *not* in: if (instance['instance_type_id'] == migration['old_instance_type_id']): itype = self._get_instance_type( context, instance, 'new_', migration['new_instance_type_id']) else: # instance record already has new flavor, hold space for a # possible revert to the old instance type: itype = self._get_instance_type( context, instance, 'old_', migration['old_instance_type_id']) elif incoming and not record: # instance has not yet migrated here: itype = self._get_instance_type(context, instance, 'new_', migration['new_instance_type_id']) elif outbound and not record: # instance migrated, but record usage for a possible revert: itype = self._get_instance_type(context, instance, 'old_', migration['old_instance_type_id']) if image_meta is None: image_meta = utils.get_image_from_system_metadata( instance['system_metadata']) if itype: host_topology = resources.get('numa_topology') if host_topology: host_topology = hardware.VirtNUMAHostTopology.from_json( host_topology) numa_topology = (hardware.VirtNUMAInstanceTopology.get_constraints( itype, image_meta)) numa_topology = ( hardware.VirtNUMAHostTopology.fit_instance_to_host( host_topology, numa_topology)) usage = self._get_usage_dict(itype, numa_topology=numa_topology) if self.pci_tracker: self.pci_tracker.update_pci_for_migration(context, instance) self._update_usage(context, resources, usage) if self.pci_tracker: resources['pci_stats'] = jsonutils.dumps( self.pci_tracker.stats) else: resources['pci_stats'] = jsonutils.dumps([]) self.tracked_migrations[uuid] = (migration, itype)
def index(self, response, result): property_type_json = tojson(PropertyTypes, result) body = json.dumps(property_type_json, ensure_ascii=False) response.unicode_body = six.text_type(body) response.content_type = 'application/json'
def _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj): LOG.debug(_("Creating NAT rule: %s"), nat_rule_obj) uri = nsxlib._build_uri_path(LROUTERNAT_RESOURCE, parent_resource_id=router_id) return nsxlib.do_request(HTTP_POST, uri, jsonutils.dumps(nat_rule_obj), cluster=cluster)
def post_handler(self, handler_name, obj_data, handler_kwargs={}): resp = self.app.post(reverse(handler_name, kwargs=handler_kwargs), jsonutils.dumps(obj_data), headers=self.default_headers) self.assertIn(resp.status_code, (200, 201)) return resp
def _format_fixed_ips_csv(port): try: return jsonutils.dumps(port['fixed_ips']) except (TypeError, KeyError): return ''
def patch_handler(self, handler_name, request_params, handler_kwargs={}): resp = self.app.patch(reverse(handler_name, kwargs=handler_kwargs), params=jsonutils.dumps(request_params), headers=self.default_headers) self.assertIn(resp.status_code, (200, 202)) return resp
def _vendor_data(self, version, path): if self._check_os_version(HAVANA, version): self.set_mimetype(MIME_TYPE_APPLICATION_JSON) return jsonutils.dumps(self.vddriver.get()) raise KeyError(path)
def do_hash(item): return hash(jsonutils.dumps(item))
def test_instance_dict_none_info_cache(self): inst = fake_instance.fake_db_instance(info_cache=None) self.assertIsNone(inst['info_cache']) result = compute_utils.get_nw_info_for_instance(inst) self.assertEqual(jsonutils.dumps([]), result.json())
def test_pki_encoder(self): data = {'field': 'value'} json = jsonutils.dumps(data, cls=common_utils.PKIEncoder) expected_json = b'{"field":"value"}' self.assertEqual(expected_json, json)
def serialize(self, entity): return jsonutils.dumps(entity)
def test_dumps(self): self.assertEqual('{"a": "b"}', jsonutils.dumps({'a': 'b'}))
import mock from oslo.serialization import jsonutils from oslo.utils import timeutils from nova import db from nova import exception from nova.objects import compute_node from nova.objects import hv_spec from nova.objects import service from nova.tests.objects import test_objects from nova.virt import hardware NOW = timeutils.utcnow().replace(microsecond=0) fake_stats = {'num_foo': '10'} fake_stats_db_format = jsonutils.dumps(fake_stats) # host_ip is coerced from a string to an IPAddress # but needs to be converted to a string for the database format fake_host_ip = '127.0.0.1' fake_numa_topology = hardware.VirtNUMAHostTopology(cells=[ hardware.VirtNUMATopologyCellUsage(0, set([1, 2]), 512), hardware.VirtNUMATopologyCellUsage(1, set([3, 4]), 512) ]) fake_numa_topology_db_format = fake_numa_topology.to_json() fake_hv_spec = hv_spec.HVSpec(arch='foo', hv_type='bar', vm_mode='foobar') fake_supported_hv_specs = [fake_hv_spec] # for backward compatibility, each supported instance object # is stored as a list in the database fake_supported_hv_specs_db_format = jsonutils.dumps([fake_hv_spec.to_list()]) fake_compute_node = { 'created_at': NOW,
def _convert_pci_stats_to_db_format(updates): pools = updates.pop('pci_device_pools', None) if pools: updates['pci_stats'] = jsonutils.dumps(pools.obj_to_primitive())
def gen_json_block(cls, data): return "\n.. code-block:: javascript\n\n{0}\n\n".format("\n".join( [" " + s for s in jsonutils.dumps(data, indent=4).split("\n")]))
def _encode_body(self, body): return jsonutils.dumps(body)
def test_dumps_namedtuple(self): n = collections.namedtuple("foo", "bar baz")(1, 2) self.assertEqual('[1, 2]', jsonutils.dumps(n))
def _convert_stats_to_db_format(updates): stats = updates.pop('stats', None) if stats is not None: updates['stats'] = jsonutils.dumps(stats)