def update_from_compute_node(self, compute): """Update information about a host from its compute_node info.""" if (self.updated and compute['updated_at'] and self.updated > compute['updated_at']): return all_ram_mb = compute['memory_mb'] # Assume virtual size is all consumed by instances if use qcow2 disk. free_gb = compute['free_disk_gb'] least_gb = compute.get('disk_available_least') if least_gb is not None: if least_gb > free_gb: # can occur when an instance in database is not on host LOG.warning(_LW("Host %(hostname)s has more disk space than " "database expected " "(%(physical)sgb > %(database)sgb)"), {'physical': least_gb, 'database': free_gb, 'hostname': compute['hypervisor_hostname']}) free_gb = min(least_gb, free_gb) free_disk_mb = free_gb * 1024 self.disk_mb_used = compute['local_gb_used'] * 1024 # NOTE(jogo) free_ram_mb can be negative self.free_ram_mb = compute['free_ram_mb'] self.total_usable_ram_mb = all_ram_mb self.total_usable_disk_gb = compute['local_gb'] self.free_disk_mb = free_disk_mb self.vcpus_total = compute['vcpus'] self.vcpus_used = compute['vcpus_used'] self.updated = compute['updated_at'] self.numa_topology = compute['numa_topology'] if 'pci_stats' in compute: self.pci_stats = pci_stats.PciDeviceStats(compute['pci_stats']) else: self.pci_stats = None # All virt drivers report host_ip self.host_ip = compute['host_ip'] self.hypervisor_type = compute.get('hypervisor_type') self.hypervisor_version = compute.get('hypervisor_version') self.hypervisor_hostname = compute.get('hypervisor_hostname') self.cpu_info = compute.get('cpu_info') if compute.get('supported_instances'): self.supported_instances = jsonutils.loads( compute.get('supported_instances')) # Don't store stats directly in host_state to make sure these don't # overwrite any values, or get overwritten themselves. Store in self so # filters can schedule with them. stats = compute.get('stats', None) or '{}' self.stats = jsonutils.loads(stats) # Track number of instances on host self.num_instances = int(self.stats.get('num_instances', 0)) self.num_io_ops = int(self.stats.get('io_workload', 0)) # update metrics self._update_metrics_from_compute_node(compute)
def test_task_schema_api(self): # 0. GET /schemas/task # Verify schema for task path = "/v2/schemas/task" response, content = self.http.request(path, 'GET', headers=minimal_task_headers()) self.assertEqual(200, response.status) schema = tasks.get_task_schema() expected_schema = schema.minimal() data = json.loads(content) self.assertIsNotNone(data) self.assertEqual(expected_schema, data) # 1. GET /schemas/tasks # Verify schema for tasks path = "/v2/schemas/tasks" response, content = self.http.request(path, 'GET', headers=minimal_task_headers()) self.assertEqual(200, response.status) schema = tasks.get_collection_schema() expected_schema = schema.minimal() data = json.loads(content) self.assertIsNotNone(data) self.assertEqual(expected_schema, data) # NOTE(nikhil): wait for all task executions to finish before exiting # else there is a risk of running into deadlock self._wait_on_task_execution()
def test_plugin_prefix_with_parent_resource(self): controller = self.DummySvcPlugin() parent = dict(member_name="tenant", collection_name="tenants") member = {'custom_member_action': "GET"} collections = {'collection_action': "GET"} res_ext = extensions.ResourceExtension('tweedles', controller, parent, path_prefix="/dummy_svc", member_actions=member, collection_actions=collections) test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) index_response = test_app.get("/dummy_svc/tenants/1/tweedles") self.assertEqual(200, index_response.status_int) response = test_app.get("/dummy_svc/tenants/1/" "tweedles/1/custom_member_action") self.assertEqual(200, response.status_int) self.assertEqual(jsonutils.loads(response.body)['member_action'], "value") response = test_app.get("/dummy_svc/tenants/2/" "tweedles/collection_action") self.assertEqual(200, response.status_int) self.assertEqual(jsonutils.loads(response.body)['collection'], "value")
def upgrade_ubuntu_cobbler_profile_6_0_to_6_1(connection): select_query = text("SELECT id, generated FROM attributes") update_query = text( "UPDATE attributes SET generated = :generated WHERE id = :attr_id") for attr_id, generated in connection.execute(select_query): attrs = jsonutils.loads(generated) if attrs['cobbler']['profile'] == 'ubuntu_1204_x86_64': attrs['cobbler']['profile'] = 'ubuntu_1404_x86_64' connection.execute( update_query, generated=jsonutils.dumps(attrs), attr_id=attr_id) select_query = text("SELECT id, attributes_metadata FROM releases") update_query = text( "UPDATE releases SET attributes_metadata = :attrs_meta" " WHERE id = :release_id") for release_id, attributes_metadata in connection.execute(select_query): attrs = jsonutils.loads(attributes_metadata) if attrs['generated']['cobbler']['profile']['generator_arg'] == \ 'ubuntu_1204_x86_64': attrs['generated']['cobbler']['profile']['generator_arg'] = \ 'ubuntu_1404_x86_64' connection.execute( update_query, attrs_meta=jsonutils.dumps(attrs), release_id=release_id)
def test_new_fields_exists_and_empty(self): # check attributes_metadata field exists result = db.execute( sa.select([self.meta.tables['plugins'].c.attributes_metadata])) # check attributes_metadata value is empty self.assertEqual( jsonutils.loads(result.fetchone()[0]), {}) result = db.execute( sa.select([self.meta.tables['plugins'].c.volumes_metadata])) self.assertEqual( jsonutils.loads(result.fetchone()[0]), {}) result = db.execute( sa.select([self.meta.tables['plugins'].c.roles_metadata])) self.assertEqual( jsonutils.loads(result.fetchone()[0]), {}) result = db.execute( sa.select([self.meta.tables['plugins'].c.deployment_tasks])) self.assertEqual( jsonutils.loads(result.fetchone()[0]), []) result = db.execute( sa.select([self.meta.tables['plugins'].c.tasks])) self.assertEqual( jsonutils.loads(result.fetchone()[0]), [])
def _from_db_object(context, compute, db_compute): special_cases = set([ 'stats', 'supported_hv_specs', 'host', 'pci_device_pools', ]) fields = set(compute.fields) - special_cases for key in fields: compute[key] = db_compute[key] stats = db_compute['stats'] if stats: compute['stats'] = jsonutils.loads(stats) sup_insts = db_compute.get('supported_instances') if sup_insts: hv_specs = jsonutils.loads(sup_insts) hv_specs = [objects.HVSpec.from_list(hv_spec) for hv_spec in hv_specs] compute['supported_hv_specs'] = hv_specs pci_stats = db_compute.get('pci_stats') compute.pci_device_pools = pci_device_pool.from_pci_stats(pci_stats) compute._context = context # Make sure that we correctly set the host field depending on either # host column is present in the table or not compute._host_from_db_object(compute, db_compute) compute.obj_reset_changes() return compute
def test_get(self): # Volume won't have anything other than base by default meta = self.bak_meta_api.get(self.volume_id) s1 = set(jsonutils.loads(meta).keys()) s2 = ["version", self.bak_meta_api.TYPE_TAG_VOL_BASE_META] self.assertEqual(s1.symmetric_difference(s2), set()) self._add_metadata(vol_glance_meta=True) meta = self.bak_meta_api.get(self.volume_id) s1 = set(jsonutils.loads(meta).keys()) s2 = ["version", self.bak_meta_api.TYPE_TAG_VOL_BASE_META, self.bak_meta_api.TYPE_TAG_VOL_GLANCE_META] self.assertEqual(s1.symmetric_difference(s2), set()) self._add_metadata(vol_meta=True) meta = self.bak_meta_api.get(self.volume_id) s1 = set(jsonutils.loads(meta).keys()) s2 = [ "version", self.bak_meta_api.TYPE_TAG_VOL_BASE_META, self.bak_meta_api.TYPE_TAG_VOL_GLANCE_META, self.bak_meta_api.TYPE_TAG_VOL_META, ] self.assertEqual(s1.symmetric_difference(s2), set())
def test_list_extensions_json(self): app = router.APIRouter() request = webob.Request.blank("/fake/extensions") response = request.get_response(app) self.assertEqual(200, response.status_int) # Make sure we have all the extensions, extra extensions being OK. data = jsonutils.loads(response.body) names = [str(x['name']) for x in data['extensions'] if str(x['name']) in self.ext_list] names.sort() self.assertEqual(names, self.ext_list) # Ensure all the timestamps are valid according to iso8601 for ext in data['extensions']: iso8601.parse_date(ext['updated']) # Make sure that at least Fox in Sox is correct. (fox_ext, ) = [ x for x in data['extensions'] if x['alias'] == 'FOXNSOX'] self.assertEqual( fox_ext, {'namespace': 'http://www.fox.in.socks/api/ext/pie/v1.0', 'name': 'Fox In Socks', 'updated': '2011-01-22T13:25:27-06:00', 'description': 'The Fox In Socks Extension.', 'alias': 'FOXNSOX', 'links': []}, ) for ext in data['extensions']: url = '/fake/extensions/%s' % ext['alias'] request = webob.Request.blank(url) response = request.get_response(app) output = jsonutils.loads(response.body) self.assertEqual(output['extension']['alias'], ext['alias'])
def test_create_server_detect_from_image(self): """If user doesn't pass in diskConfig for server, use image metadata to specify AUTO or MANUAL. """ req = fakes.HTTPRequest.blank('/fake/servers') req.method = 'POST' req.content_type = 'application/json' body = {'server': { 'name': 'server_test', 'imageRef': 'a440c04b-79fa-479c-bed1-0b816eaec379', 'flavorRef': '1', }} req.body = jsonutils.dumps(body) res = req.get_response(self.app) server_dict = jsonutils.loads(res.body)['server'] self.assertDiskConfig(server_dict, 'MANUAL') req = fakes.HTTPRequest.blank('/fake/servers') req.method = 'POST' req.content_type = 'application/json' body = {'server': { 'name': 'server_test', 'imageRef': '70a599e0-31e7-49b7-b260-868f441e862b', 'flavorRef': '1', }} req.body = jsonutils.dumps(body) res = req.get_response(self.app) server_dict = jsonutils.loads(res.body)['server'] self.assertDiskConfig(server_dict, 'AUTO')
def test_v3_disabled(self): client = self.client(self.public_app) # request to /v3 should fail resp = client.get('/v3/') self.assertEqual(resp.status_int, 404) # request to /v2.0 should pass resp = client.get('/v2.0/') self.assertEqual(resp.status_int, 200) data = jsonutils.loads(resp.body) expected = v2_VERSION_RESPONSE self._paste_in_port(expected['version'], 'http://localhost:%s/v2.0/' % CONF.public_port) self.assertEqual(data, expected) # only v2 information should be displayed by requests to / v2_only_response = { "versions": { "values": [ v2_EXPECTED_RESPONSE ] } } self._paste_in_port(v2_only_response['versions']['values'][0], 'http://localhost:%s/v2.0/' % CONF.public_port) resp = client.get('/') self.assertEqual(resp.status_int, 300) data = jsonutils.loads(resp.body) self.assertEqual(data, v2_only_response)
def upgrade_releases(connection): select = text( """SELECT id, attributes_metadata, roles_metadata from releases""") update = text( """UPDATE releases SET attributes_metadata = :attrs, roles_metadata = :roles, wizard_metadata = :wiz_meta WHERE id = :id""") r = connection.execute(select) # reading fixture files in loop is in general a bad idea and as long as # wizard_metadata is the same for all existing releases getting it can # be moved outside of the loop for release in r: attrs_meta = upgrade_release_attributes_50_to_51( jsonutils.loads(release[1])) roles_meta = upgrade_release_roles_50_to_51( jsonutils.loads(release[2])) connection.execute( update, id=release[0], attrs=jsonutils.dumps(attrs_meta), roles=jsonutils.dumps(roles_meta), wiz_meta=jsonutils.dumps(_wizard_meta) )
def test_create_ensure_expires_at_is_not_returned(self): response = webob.Response() self.serializer.create(response, self.fixtures[0]) serialized_task = jsonutils.loads(response.body) self.assertEqual(response.status_int, 201) self.assertEqual(self.fixtures[0].task_id, serialized_task['id']) self.assertEqual(self.fixtures[0].task_input, serialized_task['input']) self.assertNotIn('expires_at', serialized_task) self.assertEqual('application/json', response.content_type) response = webob.Response() self.serializer.create(response, self.fixtures[1]) serialized_task = jsonutils.loads(response.body) self.assertEqual(response.status_int, 201) self.assertEqual(self.fixtures[1].task_id, serialized_task['id']) self.assertEqual(self.fixtures[1].task_input, serialized_task['input']) self.assertNotIn('expires_at', serialized_task) self.assertEqual('application/json', response.content_type)
def test_property_ops_when_quota_violated(self): # Image list must be empty to begin with image_list = self._get()['images'] self.assertEqual(0, len(image_list)) orig_property_quota = 10 CONF.set_override('image_property_quota', orig_property_quota) # Create an image (with deployer-defined properties) req_body = {'name': 'testimg', 'disk_format': 'aki', 'container_format': 'aki'} for i in range(orig_property_quota): req_body['k_%d' % i] = 'v_%d' % i image = self._create_image(req_body) image_id = image['id'] for i in range(orig_property_quota): self.assertEqual('v_%d' % i, image['k_%d' % i]) # Now reduce property quota. We should be allowed to modify/delete # existing properties (even if the result still exceeds property quota) # but not add new properties nor replace existing properties with new # properties (as long as we're over the quota) self.config(image_property_quota=2) patch_body = [{'op': 'replace', 'path': '/k_4', 'value': 'v_4.new'}] image = jsonutils.loads(self._patch(image_id, patch_body, 200)) self.assertEqual('v_4.new', image['k_4']) patch_body = [{'op': 'remove', 'path': '/k_7'}] image = jsonutils.loads(self._patch(image_id, patch_body, 200)) self.assertNotIn('k_7', image) patch_body = [{'op': 'add', 'path': '/k_100', 'value': 'v_100'}] self._patch(image_id, patch_body, 413) image = self._get(image_id) self.assertNotIn('k_100', image) patch_body = [ {'op': 'remove', 'path': '/k_5'}, {'op': 'add', 'path': '/k_100', 'value': 'v_100'}, ] self._patch(image_id, patch_body, 413) image = self._get(image_id) self.assertNotIn('k_100', image) self.assertIn('k_5', image) # temporary violations to property quota should be allowed as long as # it's within one PATCH request and the end result does not violate # quotas. patch_body = [{'op': 'add', 'path': '/k_100', 'value': 'v_100'}, {'op': 'add', 'path': '/k_99', 'value': 'v_99'}] to_rm = ['k_%d' % i for i in range(orig_property_quota) if i != 7] patch_body.extend([{'op': 'remove', 'path': '/%s' % k} for k in to_rm]) image = jsonutils.loads(self._patch(image_id, patch_body, 200)) self.assertEqual('v_99', image['k_99']) self.assertEqual('v_100', image['k_100']) for k in to_rm: self.assertNotIn(k, image)
def test_all_task_api(self): # 0. GET /tasks # Verify no tasks path = "/v2/tasks" response, content = self.http.request(path, 'GET', headers=minimal_task_headers()) content_dict = json.loads(content) self.assertEqual(200, response.status) self.assertFalse(content_dict['tasks']) # 1. GET /tasks/{task_id} # Verify non-existent task task_id = 'NON_EXISTENT_TASK' path = "/v2/tasks/%s" % task_id response, content = self.http.request(path, 'GET', headers=minimal_task_headers()) self.assertEqual(404, response.status) # 2. POST /tasks # Create a new task task_owner = 'tenant1' data, req_input = self._post_new_task(owner=task_owner) # 3. GET /tasks/{task_id} # Get an existing task task_id = data['id'] path = "/v2/tasks/%s" % task_id response, content = self.http.request(path, 'GET', headers=minimal_task_headers()) self.assertEqual(200, response.status) # 4. GET /tasks # Get all tasks (not deleted) path = "/v2/tasks" response, content = self.http.request(path, 'GET', headers=minimal_task_headers()) self.assertEqual(200, response.status) self.assertIsNotNone(content) data = json.loads(content) self.assertIsNotNone(data) self.assertEqual(1, len(data['tasks'])) # NOTE(venkatesh) find a way to get expected_keys from tasks controller expected_keys = set(['id', 'expires_at', 'type', 'owner', 'status', 'created_at', 'updated_at', 'self', 'schema']) task = data['tasks'][0] self.assertEqual(expected_keys, set(task.keys())) self.assertEqual(req_input['type'], task['type']) self.assertEqual(task_owner, task['owner']) self.assertEqual('success', task['status']) self.assertIsNotNone(task['created_at']) self.assertIsNotNone(task['updated_at']) # NOTE(nikhil): wait for all task executions to finish before exiting # else there is a risk of running into deadlock self._wait_on_task_execution()
def test_old_fields_exists(self): result = db.execute( sa.select([self.meta.tables['plugins'].c.name])) self.assertEqual( result.fetchone()[0], 'test_plugin') result = db.execute( sa.select([self.meta.tables['plugins'].c.title])) self.assertEqual( result.fetchone()[0], 'Test plugin') result = db.execute( sa.select([self.meta.tables['plugins'].c.version])) self.assertEqual( result.fetchone()[0], '1.0.0') result = db.execute( sa.select([self.meta.tables['plugins'].c.description])) self.assertEqual( result.fetchone()[0], 'Test plugin for Fuel') result = db.execute( sa.select([self.meta.tables['plugins'].c.homepage])) self.assertEqual( result.fetchone()[0], 'http://fuel_plugins.test_plugin.com') result = db.execute( sa.select([self.meta.tables['plugins'].c.package_version])) self.assertEqual( result.fetchone()[0], '3.0.0') result = db.execute( sa.select([self.meta.tables['plugins'].c.groups])) self.assertEqual( jsonutils.loads(result.fetchone()[0]), ['tgroup']) result = db.execute( sa.select([self.meta.tables['plugins'].c.authors])) self.assertEqual( jsonutils.loads(result.fetchone()[0]), ['tauthor']) result = db.execute( sa.select([self.meta.tables['plugins'].c.licenses])) self.assertEqual( jsonutils.loads(result.fetchone()[0]), ['tlicense']) result = db.execute( sa.select([self.meta.tables['plugins'].c.releases])) self.assertEqual( jsonutils.loads(result.fetchone()[0]), [{'repository_path': 'repositories/ubuntu'}]) result = db.execute( sa.select([self.meta.tables['plugins'].c.fuel_version])) self.assertEqual( jsonutils.loads(result.fetchone()[0]), ['6.1', '7.0'])
def test_loads_unicode(self): self.assertIsInstance(jsonutils.loads(b'"foo"'), six.text_type) self.assertIsInstance(jsonutils.loads(u'"foo"'), six.text_type) # 'test' in Ukrainian i18n_str_unicode = u'"\u0442\u0435\u0441\u0442"' self.assertIsInstance(jsonutils.loads(i18n_str_unicode), six.text_type) i18n_str = i18n_str_unicode.encode('utf-8') self.assertIsInstance(jsonutils.loads(i18n_str), six.text_type)
def _post_handler(resource, binary): if re.search(r'/api/workflow/.+/action/.+', resource): data = jsonutils.loads('{"uri":"some_uri"}') return 202, '', '', data elif re.search(r'/api/service\?name=.+', resource): data = jsonutils.loads('{"links":{"actions":{"provision":"someuri"}}}') return 201, '', '', data elif binary: return 201, '', '', '' else: return 202, '', '', ''
def test_all_task_api(self): # 0. GET /tasks # Verify no tasks path = "/v2/tasks" response, content = self.http.request(path, "GET", headers=minimal_task_headers()) content_dict = json.loads(content) self.assertEqual(200, response.status) self.assertFalse(content_dict["tasks"]) # 1. GET /tasks/{task_id} # Verify non-existent task task_id = "NON_EXISTENT_TASK" path = "/v2/tasks/%s" % task_id response, content = self.http.request(path, "GET", headers=minimal_task_headers()) self.assertEqual(404, response.status) # 2. POST /tasks # Create a new task task_owner = "tenant1" data, req_input = self._post_new_task(owner=task_owner) # 3. GET /tasks/{task_id} # Get an existing task task_id = data["id"] path = "/v2/tasks/%s" % task_id response, content = self.http.request(path, "GET", headers=minimal_task_headers()) self.assertEqual(200, response.status) # 4. GET /tasks # Get all tasks (not deleted) path = "/v2/tasks" response, content = self.http.request(path, "GET", headers=minimal_task_headers()) self.assertEqual(200, response.status) self.assertIsNotNone(content) data = json.loads(content) self.assertIsNotNone(data) self.assertEqual(1, len(data["tasks"])) # NOTE(venkatesh) find a way to get expected_keys from tasks controller expected_keys = set(["id", "type", "owner", "status", "created_at", "updated_at", "self", "schema"]) task = data["tasks"][0] self.assertEqual(expected_keys, set(task.keys())) self.assertEqual(req_input["type"], task["type"]) self.assertEqual(task_owner, task["owner"]) self.assertEqual("processing", task["status"]) self.assertIsNotNone(task["created_at"]) self.assertIsNotNone(task["updated_at"]) # NOTE(nikhil): wait for all task executions to finish before exiting # else there is a risk of running into deadlock self._wait_on_task_execution()
def test_resource_extension_for_get_custom_collection_action(self): controller = self.ResourceExtensionController() collections = {'custom_collection_action': "GET"} res_ext = extensions.ResourceExtension('tweedles', controller, collection_actions=collections) test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) response = test_app.get("/tweedles/custom_collection_action") self.assertEqual(200, response.status_int) LOG.debug(jsonutils.loads(response.body)) self.assertEqual(jsonutils.loads(response.body)['collection'], "value")
def test_vendor_content_type_json(self): ctype = 'application/vnd.openstack.compute+json' req = webob.Request.blank('/') req.headers['Accept'] = ctype res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) self.assertEqual(res.content_type, ctype) jsonutils.loads(res.body)
def test_show_server(self): req = fakes.HTTPRequest.blank( '/fake/servers/%s' % MANUAL_INSTANCE_UUID) res = req.get_response(self.app) server_dict = jsonutils.loads(res.body)['server'] self.assertDiskConfig(server_dict, 'MANUAL') req = fakes.HTTPRequest.blank( '/fake/servers/%s' % AUTO_INSTANCE_UUID) res = req.get_response(self.app) server_dict = jsonutils.loads(res.body)['server'] self.assertDiskConfig(server_dict, 'AUTO')
def test_cobbler_profile_updated(self): result = db.execute( sa.select([self.meta.tables['attributes'].c.generated])) generated = jsonutils.loads(result.fetchone()[0]) self.assertEqual(generated['cobbler']['profile'], 'ubuntu_1404_x86_64') result = db.execute(sa.select( [self.meta.tables['releases'].c.attributes_metadata])) attrs_metadata = jsonutils.loads(result.fetchone()[0]) self.assertEqual( attrs_metadata['generated']['cobbler']['profile']['generator_arg'], 'ubuntu_1404_x86_64')
def test_get_rdp_console_not_ready(self): self.stubs.Set(compute_api.API, 'get_rdp_console', fake_get_rdp_console_not_ready) body = {'os-getRDPConsole': {'type': 'rdp-html5'}} req = webob.Request.blank(self.url) req.method = "POST" req.body = jsonutils.dumps(body) req.headers["content-type"] = "application/json" res = req.get_response(self.app) jsonutils.loads(res.body) self.assertEqual(res.status_int, 409)
def test_show_image(self): req = fakes.HTTPRequest.blank( '/fake/images/a440c04b-79fa-479c-bed1-0b816eaec379') res = req.get_response(self.app) image_dict = jsonutils.loads(res.body)['image'] self.assertDiskConfig(image_dict, 'MANUAL') req = fakes.HTTPRequest.blank( '/fake/images/70a599e0-31e7-49b7-b260-868f441e862b') res = req.get_response(self.app) image_dict = jsonutils.loads(res.body)['image'] self.assertDiskConfig(image_dict, 'AUTO')
def test_new_fields_exists_and_empty(self): # check node_nic_interfaces fields result = db.execute( sa.select([self.meta.tables['node_nic_interfaces'] .c.offloading_modes])) self.assertEqual( jsonutils.loads(result.fetchone()[0]), []) # the same for bond interfaces result = db.execute( sa.select([self.meta.tables['node_bond_interfaces'] .c.offloading_modes])) self.assertEqual( jsonutils.loads(result.fetchone()[0]), [])
def parseconf(conf): """Parse Ganesha config. Both native format and JSON are supported. """ try: # allow config to be specified in JSON -- # for sake of people who might feel Ganesha config foreign. d = jsonutils.loads(conf) except ValueError: d = jsonutils.loads(_conf2json(conf)) return d
def test_random_seed(self): inst = self.instance.obj_clone() mdinst = fake_InstanceMetadata(self.stubs, inst) # verify that 2013-04-04 has the 'random' field mdjson = mdinst.lookup("/openstack/2013-04-04/meta_data.json") mddict = jsonutils.loads(mdjson) self.assertIn("random_seed", mddict) self.assertEqual(len(base64.b64decode(mddict["random_seed"])), 512) # verify that older version do not have it mdjson = mdinst.lookup("/openstack/2012-08-10/meta_data.json") self.assertNotIn("random_seed", jsonutils.loads(mdjson))
def _get_handler(resource): if resource == GET_200[2]: if rest_call_function_mock.TEMPLATES_MISSING: data = jsonutils.loads('[]') else: data = jsonutils.loads( '[{"name":"openstack_l2_l3"},{"name":"openstack_l4"}]' ) return 200, '', '', data if resource in GET_200: return 200, '', '', '' else: data = jsonutils.loads('{"complete":"True", "success": "True"}') return 202, '', '', data
def test_sync_multi_chunk(self): # The fake NSX API client cannot be used for this test ctx = context.get_admin_context() # Generate 4 networks, 1 port per network, and 4 routers with self._populate_data(ctx, net_size=4, port_size=1, router_size=4): fake_lswitches = jsonutils.loads( self.fc.handle_get('/ws.v1/lswitch'))['results'] fake_lrouters = jsonutils.loads( self.fc.handle_get('/ws.v1/lrouter'))['results'] fake_lswitchports = jsonutils.loads( self.fc.handle_get('/ws.v1/lswitch/*/lport'))['results'] return_values = [ # Chunk 0 - lswitches (fake_lswitches, None, 4), # Chunk 0 - lrouters (fake_lrouters[:2], 'xxx', 4), # Chunk 0 - lports (size only) ([], 'start', 4), # Chunk 1 - lrouters (2 more) (lswitches are skipped) (fake_lrouters[2:], None, None), # Chunk 1 - lports (fake_lswitchports, None, 4)] def fake_fetch_data(*args, **kwargs): return return_values.pop(0) # 2 Chunks, with 6 resources each. # 1st chunk lswitches and lrouters # 2nd chunk lrouters and lports # Mock _fetch_data with mock.patch.object( self._plugin._synchronizer, '_fetch_data', side_effect=fake_fetch_data): sp = sync.SyncParameters(6) def do_chunk(chunk_idx, ls_cursor, lr_cursor, lp_cursor): self._plugin._synchronizer._synchronize_state(sp) self.assertEqual(chunk_idx, sp.current_chunk) self.assertEqual(ls_cursor, sp.ls_cursor) self.assertEqual(lr_cursor, sp.lr_cursor) self.assertEqual(lp_cursor, sp.lp_cursor) # check 1st chunk do_chunk(1, None, 'xxx', 'start') # check 2nd chunk do_chunk(0, None, None, None) # Chunk size should have stayed the same self.assertEqual(sp.chunk_size, 6)
def request(self, url, method, **kwargs): headers = copy.deepcopy(self.default_headers) headers.update(kwargs.get('headers', {})) options = copy.deepcopy(self.request_options) if 'body' in kwargs: headers['Content-Type'] = 'application/json' options['data'] = jsonutils.dumps(kwargs['body']) self.log_request(method, url, headers, options.get('data', None)) resp = requests.request(method, url, headers=headers, **options) self.log_response(resp) body = None if resp.text: try: body = jsonutils.loads(resp.text) except ValueError: pass if resp.status_code >= 400: raise exceptions.from_response(resp, method, url) return resp, body
def obj_from_db_obj(cls, db_obj): return cls.obj_from_primitive(jsonutils.loads(db_obj))
def test_release_modes_are_added(self): result = db.execute(sa.select([self.meta.tables['releases'].c.modes])) for row in result: self.assertItemsEqual(jsonutils.loads(row['modes']), ['ha_compact', 'multinode'])
def _get_pass(self, body): return jsonutils.loads(body).get('password')
def test_get_metadata(self): json_metadata = self.driver.get_metadata(self.volume_id) metadata = jsonutils.loads(json_metadata) self.assertEqual(metadata['version'], 1)
def test_mongo_has_primary(self): result = db.execute( sa.select([self.meta.tables['releases'].c.roles_metadata])) roles_metadata = jsonutils.loads(result.fetchone()[0]) self.assertTrue(roles_metadata['mongo']['has_primary'])
def test_download_non_exists_image_raises_http_not_found(self): """ We test the following sequential series of actions: 0. POST /images with public image named Image1 and no custom properties - Verify 201 returned 1. HEAD image - Verify HTTP headers have correct information we just added 2. GET image - Verify all information on image we just added is correct 3. DELETE image1 - Delete the newly added image 4. GET image - Verify that 404 HTTPNotFound exception is raised """ self.cleanup() self.start_servers(**self.__dict__.copy()) image_data = "*" * FIVE_KB headers = minimal_headers('Image1') path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'POST', headers=headers, body=image_data) self.assertEqual(201, response.status) data = jsonutils.loads(content) image_id = data['image']['id'] self.assertEqual( hashlib.md5(image_data).hexdigest(), data['image']['checksum']) self.assertEqual(FIVE_KB, data['image']['size']) self.assertEqual("Image1", data['image']['name']) self.assertTrue(data['image']['is_public']) # 1. HEAD image # Verify image found now path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, image_id) http = httplib2.Http() response, content = http.request(path, 'HEAD') self.assertEqual(200, response.status) self.assertEqual("Image1", response['x-image-meta-name']) # 2. GET /images # Verify one public image path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) expected_result = { "images": [{ "container_format": "ovf", "disk_format": "raw", "id": image_id, "name": "Image1", "checksum": "c2e5db72bd7fd153f53ede5da5a06de3", "size": 5120 }] } self.assertEqual(expected_result, jsonutils.loads(content)) # 3. DELETE image1 path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, image_id) http = httplib2.Http() response, content = http.request(path, 'DELETE') self.assertEqual(200, response.status) # 4. GET image # Verify that 404 HTTPNotFound exception is raised path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, image_id) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(404, response.status) self.stop_servers()
def from_json(self, datastring): try: return jsonutils.loads(datastring, object_hook=self._sanitizer) except ValueError: msg = _('Malformed JSON in request body.') raise webob.exc.HTTPBadRequest(explanation=msg)
def put_os_hosts_sample_host(request, context): result = {'host': 'dummy'} result.update(jsonutils.loads(request.body)) return result
def post_os_keypairs(request, context): body = jsonutils.loads(request.body) assert list(body) == ['keypair'] fakes.assert_has_keys(body['keypair'], required=['name']) return {'keypair': keypair}
def put_servers_1234(request, context): body = jsonutils.loads(request.body) assert list(body) == ['server'] fakes.assert_has_keys(body['server'], optional=['name', 'adminPass']) return request.body
def _call_agent(session, instance, vm_ref, method, addl_args=None, timeout=None, success_codes=None): """Abstracts out the interaction with the agent xenapi plugin.""" if addl_args is None: addl_args = {} if timeout is None: timeout = CONF.xenserver.agent_timeout if success_codes is None: success_codes = ['0'] # always fetch domid because VM may have rebooted dom_id = session.VM.get_domid(vm_ref) args = { 'id': str(uuid.uuid4()), 'dom_id': str(dom_id), 'timeout': str(timeout), } args.update(addl_args) try: ret = session.call_plugin('agent', method, args) except session.XenAPI.Failure as e: err_msg = e.details[-1].splitlines()[-1] if 'TIMEOUT:' in err_msg: LOG.error(_('TIMEOUT: The call to %(method)s timed out. ' 'args=%(args)r'), {'method': method, 'args': args}, instance=instance) raise exception.AgentTimeout(method=method) elif 'REBOOT:' in err_msg: LOG.debug('REBOOT: The call to %(method)s detected a reboot. ' 'args=%(args)r', {'method': method, 'args': args}, instance=instance) _wait_for_new_dom_id(session, vm_ref, dom_id, method) return _call_agent(session, instance, vm_ref, method, addl_args, timeout, success_codes) elif 'NOT IMPLEMENTED:' in err_msg: LOG.error(_('NOT IMPLEMENTED: The call to %(method)s is not ' 'supported by the agent. args=%(args)r'), {'method': method, 'args': args}, instance=instance) raise exception.AgentNotImplemented(method=method) else: LOG.error(_('The call to %(method)s returned an error: %(e)s. ' 'args=%(args)r'), {'method': method, 'args': args, 'e': e}, instance=instance) raise exception.AgentError(method=method) if not isinstance(ret, dict): try: ret = jsonutils.loads(ret) except TypeError: LOG.error(_('The agent call to %(method)s returned an invalid ' 'response: %(ret)r. args=%(args)r'), {'method': method, 'ret': ret, 'args': args}, instance=instance) raise exception.AgentError(method=method) if ret['returncode'] not in success_codes: LOG.error(_('The agent call to %(method)s returned an ' 'an error: %(ret)r. args=%(args)r'), {'method': method, 'ret': ret, 'args': args}, instance=instance) raise exception.AgentError(method=method) LOG.debug('The agent call to %(method)s was successful: ' '%(ret)r. args=%(args)r', {'method': method, 'ret': ret, 'args': args}, instance=instance) # Some old versions of the Windows agent have a trailing \\r\\n # (ie CRLF escaped) for some reason. Strip that off. return ret['message'].replace('\\r\\n', '')
def post_servers_1234_action(self, request, context): _body = '' body = jsonutils.loads(request.body) context.status_code = 202 assert len(body.keys()) == 1 action = list(body)[0] if action == 'reboot': assert list(body[action]) == ['type'] assert body[action]['type'] in ['HARD', 'SOFT'] elif action == 'rebuild': body = body[action] adminPass = body.get('adminPass', 'randompassword') assert 'imageRef' in body _body = self.server_1234.copy() _body['adminPass'] = adminPass elif action == 'resize': keys = body[action].keys() assert 'flavorRef' in keys elif action == 'confirmResize': assert body[action] is None # This one method returns a different response code context.status_code = 204 return None elif action == 'revertResize': assert body[action] is None elif action == 'migrate': assert body[action] is None elif action == 'os-stop': assert body[action] is None elif action == 'os-start': assert body[action] is None elif action == 'forceDelete': assert body[action] is None elif action == 'restore': assert body[action] is None elif action == 'pause': assert body[action] is None elif action == 'unpause': assert body[action] is None elif action == 'lock': assert body[action] is None elif action == 'unlock': assert body[action] is None elif action == 'rescue': assert body[action] is None _body = {'Password': '******'} elif action == 'unrescue': assert body[action] is None elif action == 'resume': assert body[action] is None elif action == 'suspend': assert body[action] is None elif action == 'lock': assert body[action] is None elif action == 'unlock': assert body[action] is None elif action == 'shelve': assert body[action] is None elif action == 'shelveOffload': assert body[action] is None elif action == 'unshelve': assert body[action] is None elif action == 'addFixedIp': assert list(body[action]) == ['networkId'] elif action == 'removeFixedIp': assert list(body[action]) == ['address'] elif action == 'addFloatingIp': assert (list(body[action]) == ['address'] or sorted( list(body[action])) == ['address', 'fixed_address']) elif action == 'removeFloatingIp': assert list(body[action]) == ['address'] elif action == 'createImage': assert set(body[action].keys()) == set(['name', 'metadata']) context.headers['location'] = "http://blah/images/456" elif action == 'changePassword': assert list(body[action]) == ['adminPass'] elif action == 'os-getConsoleOutput': assert list(body[action]) == ['length'] context.status_code = 202 return {'output': 'foo'} elif action == 'os-getVNCConsole': assert list(body[action]) == ['type'] elif action == 'os-getSPICEConsole': assert list(body[action]) == ['type'] elif action == 'os-getRDPConsole': assert list(body[action]) == ['type'] elif action == 'os-getSerialConsole': assert list(body[action]) == ['type'] elif action == 'os-migrateLive': assert set(body[action].keys()) == set( ['host', 'block_migration', 'disk_over_commit']) elif action == 'os-resetState': assert list(body[action]) == ['state'] elif action == 'resetNetwork': assert body[action] is None elif action == 'addSecurityGroup': assert list(body[action]) == ['name'] elif action == 'removeSecurityGroup': assert list(body[action]) == ['name'] elif action == 'createBackup': assert set(body[action]) == set( ['name', 'backup_type', 'rotation']) elif action == 'evacuate': keys = list(body[action]) if 'adminPass' in keys: keys.remove('adminPass') assert set(keys) == set(['host', 'onSharedStorage']) else: raise AssertionError("Unexpected server action: %s" % action) return {'server': _body}
def post_servers_1234_action(self, request, context): context.status_code = 202 body_is_none_list = [ 'revert_resize', 'migrate', 'stop', 'start', 'force_delete', 'restore', 'pause', 'unpause', 'lock', 'unlock', 'unrescue', 'resume', 'suspend', 'lock', 'unlock', 'shelve', 'shelve_offload', 'unshelve', 'reset_network', 'rescue', 'confirm_resize' ] body_return_map = { 'rescue': { 'admin_password': '******' }, 'get_console_output': { 'output': 'foo' }, 'rebuild': { 'server': self.server_1234 }, } body_param_check_exists = { 'rebuild': 'image_ref', 'resize': 'flavor_ref' } body_params_check_exact = { 'reboot': ['type'], 'add_fixed_ip': ['network_id'], 'evacuate': ['host', 'on_shared_storage'], 'remove_fixed_ip': ['address'], 'change_password': ['admin_password'], 'get_console_output': ['length'], 'get_vnc_console': ['type'], 'get_spice_console': ['type'], 'get_serial_console': ['type'], 'reset_state': ['state'], 'create_image': ['name', 'metadata'], 'migrate_live': ['host', 'block_migration', 'disk_over_commit'], 'create_backup': ['name', 'backup_type', 'rotation'], 'attach': ['volume_id', 'device'], 'detach': ['volume_id'], 'swap_volume_attachment': ['old_volume_id', 'new_volume_id'] } body = jsonutils.loads(request.body) assert len(body.keys()) == 1 action = list(body)[0] _body = body_return_map.get(action, '') if action in body_is_none_list: assert body[action] is None if action in body_param_check_exists: assert body_param_check_exists[action] in body[action] if action == 'evacuate': body[action].pop('admin_password', None) if action in body_params_check_exact: assert set(body[action]) == set(body_params_check_exact[action]) if action == 'reboot': assert body[action]['type'] in ['HARD', 'SOFT'] elif action == 'confirm_resize': # This one method returns a different response code context.status_code = 204 elif action == 'create_image': context.headers['location'] = "http://blah/images/456" if action not in set.union(set(body_is_none_list), set(body_params_check_exact.keys()), set(body_param_check_exists.keys())): raise AssertionError("Unexpected server action: %s" % action) return _body
def post_os_networks(request, context): body = jsonutils.loads(request.body) return {'network': body}
def process_result_value(self, value, dialect): if value is not None: value = jsonutils.loads(value) return value
def _from_json(self, datastring): try: return jsonutils.loads(datastring) except ValueError: msg = _("cannot understand JSON") raise exception.MalformedRequestBody(reason=msg)
def rest_call(self, action, resource, data='', headers=None, timeout=False, reconnect=False, hash_handler=None): uri = self.base_uri + resource body = jsonutils.dumps(data) headers = headers or {} headers['Content-type'] = 'application/json' headers['Accept'] = 'application/json' headers['NeutronProxy-Agent'] = self.name headers['Instance-ID'] = self.neutron_id headers['Orchestration-Service-ID'] = ORCHESTRATION_SERVICE_ID if hash_handler: # this will be excluded on calls that don't need hashes # (e.g. topology sync, capability checks) headers[HASH_MATCH_HEADER] = hash_handler.read_for_update() else: hash_handler = cdb.HashHandler() if 'keep-alive' in self.capabilities: headers['Connection'] = 'keep-alive' else: reconnect = True if self.auth: headers['Authorization'] = self.auth LOG.debug( "ServerProxy: server=%(server)s, port=%(port)d, " "ssl=%(ssl)r", { 'server': self.server, 'port': self.port, 'ssl': self.ssl }) LOG.debug( "ServerProxy: resource=%(resource)s, data=%(data)r, " "headers=%(headers)r, action=%(action)s", { 'resource': resource, 'data': data, 'headers': headers, 'action': action }) # unspecified timeout is False because a timeout can be specified as # None to indicate no timeout. if timeout is False: timeout = self.timeout if timeout != self.timeout: # need a new connection if timeout has changed reconnect = True if not self.currentconn or reconnect: if self.currentconn: self.currentconn.close() if self.ssl: self.currentconn = HTTPSConnectionWithValidation( self.server, self.port, timeout=timeout) if self.currentconn is None: LOG.error( _LE('ServerProxy: Could not establish HTTPS ' 'connection')) return 0, None, None, None self.currentconn.combined_cert = self.combined_cert else: self.currentconn = httplib.HTTPConnection(self.server, self.port, timeout=timeout) if self.currentconn is None: LOG.error( _LE('ServerProxy: Could not establish HTTP ' 'connection')) return 0, None, None, None try: self.currentconn.request(action, uri, body, headers) response = self.currentconn.getresponse() respstr = response.read() respdata = respstr if response.status in self.success_codes: hash_value = response.getheader(HASH_MATCH_HEADER) # don't clear hash from DB if a hash header wasn't present if hash_value is not None: hash_handler.put_hash(hash_value) else: hash_handler.clear_lock() try: respdata = jsonutils.loads(respstr) except ValueError: # response was not JSON, ignore the exception pass else: # release lock so others don't have to wait for timeout hash_handler.clear_lock() ret = (response.status, response.reason, respstr, respdata) except httplib.HTTPException: # If we were using a cached connection, try again with a new one. with excutils.save_and_reraise_exception() as ctxt: self.currentconn.close() if reconnect: # if reconnect is true, this was on a fresh connection so # reraise since this server seems to be broken ctxt.reraise = True else: # if reconnect is false, it was a cached connection so # try one more time before re-raising ctxt.reraise = False return self.rest_call(action, resource, data, headers, timeout=timeout, reconnect=True) except (socket.timeout, socket.error) as e: self.currentconn.close() LOG.error(_LE('ServerProxy: %(action)s failure, %(e)r'), { 'action': action, 'e': e }) ret = 0, None, None, None LOG.debug( "ServerProxy: status=%(status)d, reason=%(reason)r, " "ret=%(ret)s, data=%(data)r", { 'status': ret[0], 'reason': ret[1], 'ret': ret[2], 'data': ret[3] }) return ret
def _get_servers(self, body): return jsonutils.loads(body).get('servers')
def test_get_head_simple_post(self): """ We test the following sequential series of actions: 0. GET /images - Verify no public images 1. GET /images/detail - Verify no public images 2. POST /images with public image named Image1 and no custom properties - Verify 201 returned 3. HEAD image - Verify HTTP headers have correct information we just added 4. GET image - Verify all information on image we just added is correct 5. GET /images - Verify the image we just added is returned 6. GET /images/detail - Verify the image we just added is returned 7. PUT image with custom properties of "distro" and "arch" - Verify 200 returned 8. PUT image with too many custom properties - Verify 413 returned 9. GET image - Verify updated information about image was stored 10. PUT image - Remove a previously existing property. 11. PUT image - Add a previously deleted property. 12. PUT image/members/member1 - Add member1 to image 13. PUT image/members/member2 - Add member2 to image 14. GET image/members - List image members 15. DELETE image/members/member1 - Delete image member1 16. PUT image/members - Attempt to replace members with an overlimit amount 17. PUT image/members/member11 - Attempt to add a member while at limit 18. POST /images with another public image named Image2 - attribute and three custom properties, "distro", "arch" & "foo" - Verify a 200 OK is returned 19. HEAD image2 - Verify image2 found now 20. GET /images - Verify 2 public images 21. GET /images with filter on user-defined property "distro". - Verify both images are returned 22. GET /images with filter on user-defined property 'distro' but - with non-existent value. Verify no images are returned 23. GET /images with filter on non-existent user-defined property - "boo". Verify no images are returned 24. GET /images with filter 'arch=i386' - Verify only image2 is returned 25. GET /images with filter 'arch=x86_64' - Verify only image1 is returned 26. GET /images with filter 'foo=bar' - Verify only image2 is returned 27. DELETE image1 - Delete image 28. GET image/members - List deleted image members 29. PUT image/members/member2 - Update existing member2 of deleted image 30. PUT image/members/member3 - Add member3 to deleted image 31. DELETE image/members/member2 - Delete member2 from deleted image 32. DELETE image2 - Delete image 33. GET /images - Verify no images are listed """ self.cleanup() self.start_servers(**self.__dict__.copy()) # 0. GET /images # Verify no public images path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) self.assertEqual('{"images": []}', content) # 1. GET /images/detail # Verify no public images path = "http://%s:%d/v1/images/detail" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) self.assertEqual('{"images": []}', content) # 2. POST /images with public image named Image1 # attribute and no custom properties. Verify a 200 OK is returned image_data = "*" * FIVE_KB headers = minimal_headers('Image1') path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'POST', headers=headers, body=image_data) self.assertEqual(201, response.status) data = jsonutils.loads(content) image_id = data['image']['id'] self.assertEqual( hashlib.md5(image_data).hexdigest(), data['image']['checksum']) self.assertEqual(FIVE_KB, data['image']['size']) self.assertEqual("Image1", data['image']['name']) self.assertTrue(data['image']['is_public']) # 3. HEAD image # Verify image found now path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, image_id) http = httplib2.Http() response, content = http.request(path, 'HEAD') self.assertEqual(200, response.status) self.assertEqual("Image1", response['x-image-meta-name']) # 4. GET image # Verify all information on image we just added is correct path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, image_id) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) expected_image_headers = { 'x-image-meta-id': image_id, 'x-image-meta-name': 'Image1', 'x-image-meta-is_public': 'True', 'x-image-meta-status': 'active', 'x-image-meta-disk_format': 'raw', 'x-image-meta-container_format': 'ovf', 'x-image-meta-size': str(FIVE_KB) } expected_std_headers = { 'content-length': str(FIVE_KB), 'content-type': 'application/octet-stream' } for expected_key, expected_value in expected_image_headers.items(): self.assertEqual( expected_value, response[expected_key], "For key '%s' expected header value '%s'. " "Got '%s'" % (expected_key, expected_value, response[expected_key])) for expected_key, expected_value in expected_std_headers.items(): self.assertEqual( expected_value, response[expected_key], "For key '%s' expected header value '%s'. " "Got '%s'" % (expected_key, expected_value, response[expected_key])) self.assertEqual("*" * FIVE_KB, content) self.assertEqual( hashlib.md5("*" * FIVE_KB).hexdigest(), hashlib.md5(content).hexdigest()) # 5. GET /images # Verify one public image path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) expected_result = { "images": [{ "container_format": "ovf", "disk_format": "raw", "id": image_id, "name": "Image1", "checksum": "c2e5db72bd7fd153f53ede5da5a06de3", "size": 5120 }] } self.assertEqual(expected_result, jsonutils.loads(content)) # 6. GET /images/detail # Verify image and all its metadata path = "http://%s:%d/v1/images/detail" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) expected_image = { "status": "active", "name": "Image1", "deleted": False, "container_format": "ovf", "disk_format": "raw", "id": image_id, "is_public": True, "deleted_at": None, "properties": {}, "size": 5120 } image = jsonutils.loads(content) for expected_key, expected_value in expected_image.items(): self.assertEqual( expected_value, image['images'][0][expected_key], "For key '%s' expected header value '%s'. " "Got '%s'" % (expected_key, expected_value, image['images'][0][expected_key])) # 7. PUT image with custom properties of "distro" and "arch" # Verify 200 returned headers = { 'X-Image-Meta-Property-Distro': 'Ubuntu', 'X-Image-Meta-Property-Arch': 'x86_64' } path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, image_id) http = httplib2.Http() response, content = http.request(path, 'PUT', headers=headers) self.assertEqual(200, response.status) data = jsonutils.loads(content) self.assertEqual("x86_64", data['image']['properties']['arch']) self.assertEqual("Ubuntu", data['image']['properties']['distro']) # 8. PUT image with too many custom properties # Verify 413 returned headers = {} for i in range(11): # configured limit is 10 headers['X-Image-Meta-Property-foo%d' % i] = 'bar' path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, image_id) http = httplib2.Http() response, content = http.request(path, 'PUT', headers=headers) self.assertEqual(413, response.status) # 9. GET /images/detail # Verify image and all its metadata path = "http://%s:%d/v1/images/detail" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) expected_image = { "status": "active", "name": "Image1", "deleted": False, "container_format": "ovf", "disk_format": "raw", "id": image_id, "is_public": True, "deleted_at": None, "properties": { 'distro': 'Ubuntu', 'arch': 'x86_64' }, "size": 5120 } image = jsonutils.loads(content) for expected_key, expected_value in expected_image.items(): self.assertEqual( expected_value, image['images'][0][expected_key], "For key '%s' expected header value '%s'. " "Got '%s'" % (expected_key, expected_value, image['images'][0][expected_key])) # 10. PUT image and remove a previously existing property. headers = {'X-Image-Meta-Property-Arch': 'x86_64'} path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, image_id) http = httplib2.Http() response, content = http.request(path, 'PUT', headers=headers) self.assertEqual(200, response.status) path = "http://%s:%d/v1/images/detail" % ("127.0.0.1", self.api_port) response, content = http.request(path, 'GET') self.assertEqual(200, response.status) data = jsonutils.loads(content)['images'][0] self.assertEqual(1, len(data['properties'])) self.assertEqual("x86_64", data['properties']['arch']) # 11. PUT image and add a previously deleted property. headers = { 'X-Image-Meta-Property-Distro': 'Ubuntu', 'X-Image-Meta-Property-Arch': 'x86_64' } path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, image_id) http = httplib2.Http() response, content = http.request(path, 'PUT', headers=headers) self.assertEqual(200, response.status) data = jsonutils.loads(content) path = "http://%s:%d/v1/images/detail" % ("127.0.0.1", self.api_port) response, content = http.request(path, 'GET') self.assertEqual(200, response.status) data = jsonutils.loads(content)['images'][0] self.assertEqual(2, len(data['properties'])) self.assertEqual("x86_64", data['properties']['arch']) self.assertEqual("Ubuntu", data['properties']['distro']) self.assertNotEqual(data['created_at'], data['updated_at']) # 12. Add member to image path = ("http://%s:%d/v1/images/%s/members/pattieblack" % ("127.0.0.1", self.api_port, image_id)) http = httplib2.Http() response, content = http.request(path, 'PUT') self.assertEqual(204, response.status) # 13. Add member to image path = ("http://%s:%d/v1/images/%s/members/pattiewhite" % ("127.0.0.1", self.api_port, image_id)) http = httplib2.Http() response, content = http.request(path, 'PUT') self.assertEqual(204, response.status) # 14. List image members path = ("http://%s:%d/v1/images/%s/members" % ("127.0.0.1", self.api_port, image_id)) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) data = jsonutils.loads(content) self.assertEqual(2, len(data['members'])) self.assertEqual('pattieblack', data['members'][0]['member_id']) self.assertEqual('pattiewhite', data['members'][1]['member_id']) # 15. Delete image member path = ("http://%s:%d/v1/images/%s/members/pattieblack" % ("127.0.0.1", self.api_port, image_id)) http = httplib2.Http() response, content = http.request(path, 'DELETE') self.assertEqual(204, response.status) # 16. Attempt to replace members with an overlimit amount # Adding 11 image members should fail since configured limit is 10 path = ("http://%s:%d/v1/images/%s/members" % ("127.0.0.1", self.api_port, image_id)) memberships = [] for i in range(11): member_id = "foo%d" % i memberships.append(dict(member_id=member_id)) http = httplib2.Http() body = jsonutils.dumps(dict(memberships=memberships)) response, content = http.request(path, 'PUT', body=body) self.assertEqual(413, response.status) # 17. Attempt to add a member while at limit # Adding an 11th member should fail since configured limit is 10 path = ("http://%s:%d/v1/images/%s/members" % ("127.0.0.1", self.api_port, image_id)) memberships = [] for i in range(10): member_id = "foo%d" % i memberships.append(dict(member_id=member_id)) http = httplib2.Http() body = jsonutils.dumps(dict(memberships=memberships)) response, content = http.request(path, 'PUT', body=body) self.assertEqual(204, response.status) path = ("http://%s:%d/v1/images/%s/members/fail_me" % ("127.0.0.1", self.api_port, image_id)) http = httplib2.Http() response, content = http.request(path, 'PUT') self.assertEqual(413, response.status) # 18. POST /images with another public image named Image2 # attribute and three custom properties, "distro", "arch" & "foo". # Verify a 200 OK is returned image_data = "*" * FIVE_KB headers = minimal_headers('Image2') headers['X-Image-Meta-Property-Distro'] = 'Ubuntu' headers['X-Image-Meta-Property-Arch'] = 'i386' headers['X-Image-Meta-Property-foo'] = 'bar' path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'POST', headers=headers, body=image_data) self.assertEqual(201, response.status) data = jsonutils.loads(content) image2_id = data['image']['id'] self.assertEqual( hashlib.md5(image_data).hexdigest(), data['image']['checksum']) self.assertEqual(FIVE_KB, data['image']['size']) self.assertEqual("Image2", data['image']['name']) self.assertTrue(data['image']['is_public']) self.assertEqual('Ubuntu', data['image']['properties']['distro']) self.assertEqual('i386', data['image']['properties']['arch']) self.assertEqual('bar', data['image']['properties']['foo']) # 19. HEAD image2 # Verify image2 found now path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, image2_id) http = httplib2.Http() response, content = http.request(path, 'HEAD') self.assertEqual(200, response.status) self.assertEqual("Image2", response['x-image-meta-name']) # 20. GET /images # Verify 2 public images path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) images = jsonutils.loads(content)['images'] self.assertEqual(2, len(images)) self.assertEqual(image2_id, images[0]['id']) self.assertEqual(image_id, images[1]['id']) # 21. GET /images with filter on user-defined property 'distro'. # Verify both images are returned path = "http://%s:%d/v1/images?property-distro=Ubuntu" % ( "127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) images = jsonutils.loads(content)['images'] self.assertEqual(2, len(images)) self.assertEqual(image2_id, images[0]['id']) self.assertEqual(image_id, images[1]['id']) # 22. GET /images with filter on user-defined property 'distro' but # with non-existent value. Verify no images are returned path = "http://%s:%d/v1/images?property-distro=fedora" % ( "127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) images = jsonutils.loads(content)['images'] self.assertEqual(0, len(images)) # 23. GET /images with filter on non-existent user-defined property # 'boo'. Verify no images are returned path = "http://%s:%d/v1/images?property-boo=bar" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) images = jsonutils.loads(content)['images'] self.assertEqual(0, len(images)) # 24. GET /images with filter 'arch=i386' # Verify only image2 is returned path = "http://%s:%d/v1/images?property-arch=i386" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) images = jsonutils.loads(content)['images'] self.assertEqual(1, len(images)) self.assertEqual(image2_id, images[0]['id']) # 25. GET /images with filter 'arch=x86_64' # Verify only image1 is returned path = "http://%s:%d/v1/images?property-arch=x86_64" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) images = jsonutils.loads(content)['images'] self.assertEqual(1, len(images)) self.assertEqual(image_id, images[0]['id']) # 26. GET /images with filter 'foo=bar' # Verify only image2 is returned path = "http://%s:%d/v1/images?property-foo=bar" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) images = jsonutils.loads(content)['images'] self.assertEqual(1, len(images)) self.assertEqual(image2_id, images[0]['id']) # 27. DELETE image1 path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, image_id) http = httplib2.Http() response, content = http.request(path, 'DELETE') self.assertEqual(200, response.status) # 28. Try to list members of deleted image path = ("http://%s:%d/v1/images/%s/members" % ("127.0.0.1", self.api_port, image_id)) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(404, response.status) # 29. Try to update member of deleted image path = ("http://%s:%d/v1/images/%s/members" % ("127.0.0.1", self.api_port, image_id)) http = httplib2.Http() fixture = [{'member_id': 'pattieblack', 'can_share': 'false'}] body = jsonutils.dumps(dict(memberships=fixture)) response, content = http.request(path, 'PUT', body=body) self.assertEqual(404, response.status) # 30. Try to add member to deleted image path = ("http://%s:%d/v1/images/%s/members/chickenpattie" % ("127.0.0.1", self.api_port, image_id)) http = httplib2.Http() response, content = http.request(path, 'PUT') self.assertEqual(404, response.status) # 31. Try to delete member of deleted image path = ("http://%s:%d/v1/images/%s/members/pattieblack" % ("127.0.0.1", self.api_port, image_id)) http = httplib2.Http() response, content = http.request(path, 'DELETE') self.assertEqual(404, response.status) # 32. DELETE image2 path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, image2_id) http = httplib2.Http() response, content = http.request(path, 'DELETE') self.assertEqual(200, response.status) # 33. GET /images # Verify no images are listed path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) images = jsonutils.loads(content)['images'] self.assertEqual(0, len(images)) # 34. HEAD /images/detail path = "http://%s:%d/v1/images/detail" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'HEAD') self.assertEqual(405, response.status) self.assertEqual('GET', response.get('allow')) self.stop_servers()
def _json_loads(properties, attr): prop = properties[attr] if isinstance(prop, six.string_types): properties[attr] = jsonutils.loads(prop)
def assertEqualImages(self, res, uuids, key='images', unjsonify=True): images = jsonutils.loads(res.body)[key] if unjsonify else res self.assertEqual(len(images), len(uuids)) for i, value in enumerate(uuids): self.assertEqual(images[i]['id'], value)
def hydrate(cls, network_info): if isinstance(network_info, six.string_types): network_info = jsonutils.loads(network_info) return cls([VIF.hydrate(vif) for vif in network_info])
def test_get_diagnostics(self): req = self._get_request() res = req.get_response(self.router) output = jsonutils.loads(res.body) self.assertEqual(output, {'data': 'Some diagnostic info'})
def _json_loads(properties, attr): prop = properties[attr] if isinstance(prop, basestring): properties[attr] = jsonutils.loads(prop)
# License for the specific language governing permissions and limitations # under the License. import mock from oslo.config import fixture as fixture_config from oslo.serialization import jsonutils from oslotest import mockpatch import requests import six.moves.urllib.parse as urlparse from ceilometer.alarm import service from ceilometer.openstack.common import context from ceilometer.tests import base as tests_base DATA_JSON = jsonutils.loads( '{"current": "ALARM", "alarm_id": "foobar", "alarm_name": "testalarm",' ' "reason": "what ?", "reason_data": {"test": "test"},' ' "previous": "OK"}') NOTIFICATION = dict(alarm_id='foobar', alarm_name='testalarm', condition=dict(threshold=42), reason='what ?', reason_data={'test': 'test'}, previous='OK', current='ALARM') class TestAlarmNotifier(tests_base.BaseTestCase): def setUp(self): super(TestAlarmNotifier, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf self.setup_messaging(self.CONF)
def _populate_roles(self, token_data, user_id, domain_id, project_id, trust, access_token): if 'roles' in token_data: # no need to repopulate roles return if access_token: filtered_roles = [] authed_role_ids = jsonutils.loads(access_token['role_ids']) all_roles = self.assignment_api.list_roles() for role in all_roles: for authed_role in authed_role_ids: if authed_role == role['id']: filtered_roles.append({ 'id': role['id'], 'name': role['name'] }) token_data['roles'] = filtered_roles return if CONF.trust.enabled and trust: token_user_id = trust['trustor_user_id'] token_project_id = trust['project_id'] # trusts do not support domains yet token_domain_id = None else: token_user_id = user_id token_project_id = project_id token_domain_id = domain_id if token_domain_id or token_project_id: roles = self._get_roles_for_user(token_user_id, token_domain_id, token_project_id) filtered_roles = [] if CONF.trust.enabled and trust: for trust_role in trust['roles']: match_roles = [ x for x in roles if x['id'] == trust_role['id'] ] if match_roles: filtered_roles.append(match_roles[0]) else: raise exception.Forbidden( _('Trustee has no delegated roles.')) else: for role in roles: filtered_roles.append({ 'id': role['id'], 'name': role['name'] }) # user has no project or domain roles, therefore access denied if not filtered_roles: if token_project_id: msg = _('User %(user_id)s has no access ' 'to project %(project_id)s') % { 'user_id': user_id, 'project_id': token_project_id } else: msg = _('User %(user_id)s has no access ' 'to domain %(domain_id)s') % { 'user_id': user_id, 'domain_id': token_domain_id } LOG.debug(msg) raise exception.Unauthorized(msg) token_data['roles'] = filtered_roles
def put_dns_testdomain_entries_testname(request, context): body = jsonutils.loads(request.body) fakes.assert_has_keys(body['dns_entry'], required=['ip', 'dns_type']) context.status_code = 205 return request.body
def test_scrubber_delete_handles_exception(self): """ Test that the scrubber handles the case where an exception occurs when _delete() is called. The scrubber should not write out queue files in this case. """ # Start servers. self.cleanup() self.start_servers(delayed_delete=True, daemon=False, default_store='file') # Check that we are using a file backend. self.assertEqual(self.api_server.default_store, 'file') # add an image headers = { 'x-image-meta-name': 'test_image', 'x-image-meta-is_public': 'true', 'x-image-meta-disk_format': 'raw', 'x-image-meta-container_format': 'ovf', 'content-type': 'application/octet-stream', } path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'POST', body='XXX', headers=headers) self.assertEqual(response.status, 201) image = jsonutils.loads(content)['image'] self.assertEqual('active', image['status']) image_id = image['id'] # delete the image path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, image_id) http = httplib2.Http() response, content = http.request(path, 'DELETE') self.assertEqual(response.status, 200) # ensure the image is marked pending delete response, content = http.request(path, 'HEAD') self.assertEqual(response.status, 200) self.assertEqual('pending_delete', response['x-image-meta-status']) # Remove the file from the backend. file_path = os.path.join(self.api_server.image_dir, str(image_id)) os.remove(file_path) # Wait for the scrub time on the image to pass time.sleep(self.api_server.scrub_time) # run the scrubber app, and ensure it doesn't fall over exe_cmd = "%s -m glance.cmd.scrubber" % sys.executable cmd = ("%s --config-file %s" % (exe_cmd, self.scrubber_daemon.conf_file_name)) exitcode, out, err = execute(cmd, raise_error=False) self.assertEqual(0, exitcode) self.wait_for_scrub(path) # Make sure there are no queue files associated with image. queue_file_path = os.path.join(self.api_server.scrubber_datadir, str(image_id)) self.assertFalse(os.path.exists(queue_file_path)) self.stop_servers()
def put_os_floating_ips_bulk_delete(request, context): body = jsonutils.loads(request.body) ip_range = body.get('ip_range') return {'floating_ips_bulk_delete': ip_range}
def test_scrubber_with_metadata_enc(self): """ test that files written to scrubber_data_dir use metadata_encryption_key when available to encrypt the location """ # FIXME(flaper87): It looks like an older commit # may have broken this test. The file_queue `add_location` # is not being called. self.skipTest("Test broken. See bug #1366682") self.cleanup() self.start_servers(delayed_delete=True, daemon=True, default_store='file') # add an image headers = { 'x-image-meta-name': 'test_image', 'x-image-meta-is_public': 'true', 'x-image-meta-disk_format': 'raw', 'x-image-meta-container_format': 'ovf', 'content-type': 'application/octet-stream', } path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'POST', body='XXX', headers=headers) self.assertEqual(response.status, 201) image = jsonutils.loads(content)['image'] self.assertEqual('active', image['status']) image_id = image['id'] # delete the image path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, image_id) http = httplib2.Http() response, content = http.request(path, 'DELETE') self.assertEqual(response.status, 200) response, content = http.request(path, 'HEAD') self.assertEqual(response.status, 200) self.assertEqual('pending_delete', response['x-image-meta-status']) # ensure the marker file has encrypted the image location by decrypting # it and checking the image_id is intact file_path = os.path.join(self.api_server.scrubber_datadir, str(image_id)) marker_uri = None with open(file_path, 'r') as f: marker_uri = f.readline().strip() self.assertTrue(marker_uri is not None) decrypted_uri = crypt.urlsafe_decrypt( self.api_server.metadata_encryption_key, marker_uri) loc = glance_store.location.StoreLocation({}) loc.parse_uri(decrypted_uri) self.assertEqual(loc.scheme, "file") self.assertEqual(image['id'], loc.obj) self.wait_for_scrub(path) self.stop_servers()