def create_group( self, ctxt, topic, group, group_spec=None, request_spec_list=None, group_filter_properties=None, filter_properties_list=None, ): version = self._compat_ver("3.0", "2.3") cctxt = self.client.prepare(version=version) request_spec_p_list = [] for request_spec in request_spec_list: request_spec_p = jsonutils.to_primitive(request_spec) request_spec_p_list.append(request_spec_p) group_spec_p = jsonutils.to_primitive(group_spec) msg_args = { "group": group, "group_spec": group_spec_p, "request_spec_list": request_spec_p_list, "group_filter_properties": group_filter_properties, "filter_properties_list": filter_properties_list, } if version == "2.3": msg_args["topic"] = topic return cctxt.cast(ctxt, "create_group", **msg_args)
def create_volume( self, ctxt, topic, volume_id, snapshot_id=None, image_id=None, request_spec=None, filter_properties=None, volume=None, ): request_spec_p = jsonutils.to_primitive(request_spec) msg_args = { "snapshot_id": snapshot_id, "image_id": image_id, "request_spec": request_spec_p, "filter_properties": filter_properties, "volume": volume, } version = self._compat_ver("3.0", "2.2", "2.0") if version in ("2.2", "2.0"): msg_args["volume_id"] = volume.id msg_args["topic"] = topic if version == "2.0": # Send request_spec as dict msg_args["request_spec"] = jsonutils.to_primitive(request_spec) # NOTE(dulek): This is to keep supporting Mitaka's scheduler which # expects a dictionary when creating a typeless volume. if msg_args["request_spec"].get("volume_type") is None: msg_args["request_spec"]["volume_type"] = {} cctxt = self.client.prepare(version=version) return cctxt.cast(ctxt, "create_volume", **msg_args)
def create_volume( self, ctxt, topic, volume_id, snapshot_id=None, image_id=None, request_spec=None, filter_properties=None, volume=None, ): request_spec_p = jsonutils.to_primitive(request_spec) msg_args = { "snapshot_id": snapshot_id, "image_id": image_id, "request_spec": request_spec_p, "filter_properties": filter_properties, "volume": volume, } version = self._compat_ver("3.0", "2.2", "2.0") if version in ("2.2", "2.0"): msg_args["volume_id"] = volume.id msg_args["topic"] = topic if version == "2.0": # Send request_spec as dict msg_args["request_spec"] = jsonutils.to_primitive(request_spec) cctxt = self.client.prepare(version=version) return cctxt.cast(ctxt, "create_volume", **msg_args)
def _test_service_get_all(self, fake_filters, **kwargs): services = [ cells_utils.ServiceProxy( objects.Service(**dict(test_service.fake_service, id=1, topic='compute', host='host1')), 'cell1'), cells_utils.ServiceProxy( objects.Service(**dict(test_service.fake_service, id=2, topic='compute', host='host2')), 'cell1')] exp_services = [] for service in services: exp_service = copy.copy(service) exp_service.update({'availability_zone': 'patron'}) exp_services.append(exp_service) self.mox.StubOutWithMock(self.host_api.cells_rpcapi, 'service_get_all') self.host_api.cells_rpcapi.service_get_all(self.ctxt, filters=fake_filters).AndReturn(services) self.mox.ReplayAll() result = self.host_api.service_get_all(self.ctxt, filters=fake_filters, **kwargs) self.mox.VerifyAll() self.assertEqual(jsonutils.to_primitive(exp_services), jsonutils.to_primitive(result))
def test_depth(self): class LevelsGenerator(object): def __init__(self, levels): self._levels = levels def iteritems(self): if self._levels == 0: return iter([]) else: return iter([(0, LevelsGenerator(self._levels - 1))]) l4_obj = LevelsGenerator(4) json_l2 = {0: {0: "?"}} json_l3 = {0: {0: {0: "?"}}} json_l4 = {0: {0: {0: {0: "?"}}}} ret = jsonutils.to_primitive(l4_obj, max_depth=2) self.assertEqual(ret, json_l2) ret = jsonutils.to_primitive(l4_obj, max_depth=3) self.assertEqual(ret, json_l3) ret = jsonutils.to_primitive(l4_obj, max_depth=4) self.assertEqual(ret, json_l4)
def setUp(self): super(VolumeRpcAPITestCase, self).setUp() self.context = context.get_admin_context() vol = {} vol["host"] = "fake_host" vol["availability_zone"] = CONF.storage_availability_zone vol["status"] = "available" vol["attach_status"] = "detached" vol["metadata"] = {"test_key": "test_val"} volume = db.volume_create(self.context, vol) snpshot = { "id": 1, "volume_id": "fake_id", "status": "creating", "progress": "0%", "volume_size": 0, "display_name": "fake_name", "display_description": "fake_description", } snapshot = db.snapshot_create(self.context, snpshot) self.fake_volume = jsonutils.to_primitive(volume) self.fake_volume_metadata = volume["volume_metadata"] self.fake_snapshot = jsonutils.to_primitive(snapshot) self.fake_snapshot_obj = fake_snapshot.fake_snapshot_obj(self.context, **snpshot) self.fake_reservations = ["RESERVATION"]
def setUp(self): super(ComputeRpcAPITestCase, self).setUp() self.context = context.get_admin_context() self.fake_flavor_obj = fake_flavor.fake_flavor_obj(self.context) self.fake_flavor = jsonutils.to_primitive(self.fake_flavor_obj) instance_attr = {'host': 'fake_host', 'instance_type_id': self.fake_flavor_obj['id'], 'instance_type': self.fake_flavor_obj} self.fake_instance_obj = fake_instance.fake_instance_obj(self.context, **instance_attr) self.fake_instance = jsonutils.to_primitive(self.fake_instance_obj) self.fake_volume_bdm = objects_block_dev.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict( {'source_type': 'volume', 'destination_type': 'volume', 'instance_uuid': self.fake_instance_obj.uuid, 'volume_id': 'fake-volume-id'})) # FIXME(melwitt): Temporary while things have no mappings self.patcher1 = mock.patch('nova.objects.InstanceMapping.' 'get_by_instance_uuid') self.patcher2 = mock.patch('nova.objects.HostMapping.get_by_host') mock_inst_mapping = self.patcher1.start() mock_host_mapping = self.patcher2.start() mock_inst_mapping.side_effect = exception.InstanceMappingNotFound( uuid=self.fake_instance_obj.uuid) mock_host_mapping.side_effect = exception.HostMappingNotFound( name=self.fake_instance_obj.host)
def prep_resize( self, ctxt, image, instance, instance_type, host, reservations=None, request_spec=None, filter_properties=None, node=None, clean_shutdown=True, ): instance_type_p = jsonutils.to_primitive(instance_type) image_p = jsonutils.to_primitive(image) msg_args = { "instance": instance, "instance_type": instance_type_p, "image": image_p, "reservations": reservations, "request_spec": request_spec, "filter_properties": filter_properties, "node": node, "clean_shutdown": clean_shutdown, } if self.client.can_send_version("4.0"): version = "4.0" elif self.client.can_send_version("3.38"): version = "3.38" else: del msg_args["clean_shutdown"] version = "3.0" cctxt = self.client.prepare(server=host, version=version) cctxt.cast(ctxt, "prep_resize", **msg_args)
def build_instances(self, ctxt, **kwargs): """Build instances.""" build_inst_kwargs = kwargs instances = build_inst_kwargs['instances'] build_inst_kwargs['image'] = jsonutils.to_primitive( build_inst_kwargs['image']) version = '1.34' if self.client.can_send_version('1.34'): build_inst_kwargs.pop('legacy_bdm', None) else: bdm_p = objects_base.obj_to_primitive( build_inst_kwargs['block_device_mapping']) build_inst_kwargs['block_device_mapping'] = bdm_p version = '1.32' if not self.client.can_send_version('1.32'): instances_p = [jsonutils.to_primitive(inst) for inst in instances] build_inst_kwargs['instances'] = instances_p version = '1.30' if not self.client.can_send_version('1.30'): if 'filter_properties' in build_inst_kwargs: filter_properties = build_inst_kwargs['filter_properties'] flavor = filter_properties['instance_type'] flavor_p = objects_base.obj_to_primitive(flavor) filter_properties['instance_type'] = flavor_p version = '1.8' cctxt = self.client.prepare(version=version) cctxt.cast(ctxt, 'build_instances', build_inst_kwargs=build_inst_kwargs)
def setUp(self): super(VolumeRpcAPITestCase, self).setUp() self.context = context.get_admin_context() vol = {} vol['host'] = 'fake_host' vol['availability_zone'] = CONF.storage_availability_zone vol['status'] = "available" vol['attach_status'] = "detached" vol['metadata'] = {"test_key": "test_val"} volume = db.volume_create(self.context, vol) snpshot = { 'id': 1, 'volume_id': 'fake_id', 'status': "creating", 'progress': '0%', 'volume_size': 0, 'display_name': 'fake_name', 'display_description': 'fake_description'} snapshot = db.snapshot_create(self.context, snpshot) self.fake_volume = jsonutils.to_primitive(volume) self.fake_volume_metadata = volume["volume_metadata"] self.fake_snapshot = jsonutils.to_primitive(snapshot) self.fake_snapshot_obj = fake_snapshot.fake_snapshot_obj(self.context, **snpshot) self.fake_reservations = ["RESERVATION"]
def test_fallback_itertools_count(self): obj = itertools.count(1) ret = jsonutils.to_primitive(obj) self.assertEqual(six.text_type(obj), ret) ret = jsonutils.to_primitive(obj, fallback=lambda _: 'itertools_count') self.assertEqual('itertools_count', ret)
def _notify(self, priority, ctxt, event_type, payload): payload = self._serializer.serialize_entity(ctxt, payload) # NOTE(sileht): simulate the kombu serializer # this permit to raise an exception if something have not # been serialized correctly jsonutils.to_primitive(payload) msg = FakeMessage(self.publisher_id, priority, event_type, payload) NOTIFICATIONS.append(msg)
def test_fallback(self): obj = ReprObject() ret = jsonutils.to_primitive(obj) self.assertIs(obj, ret) ret = jsonutils.to_primitive(obj, fallback=repr) self.assertEqual('repr', ret)
def setUp(self): super(VolumeRpcAPITestCase, self).setUp() self.context = context.get_admin_context() vol = {} vol["host"] = "fake_host" vol["availability_zone"] = CONF.storage_availability_zone vol["status"] = "available" vol["attach_status"] = "detached" vol["metadata"] = {"test_key": "test_val"} volume = db.volume_create(self.context, vol) snpshot = { "id": 1, "volume_id": "fake_id", "status": "creating", "progress": "0%", "volume_size": 0, "display_name": "fake_name", "display_description": "fake_description", } snapshot = db.snapshot_create(self.context, snpshot) source_group = tests_utils.create_consistencygroup( self.context, availability_zone=CONF.storage_availability_zone, volume_type="type1,type2", host="fakehost@fakedrv#fakepool", ) cgsnapshot = tests_utils.create_cgsnapshot(self.context, consistencygroup_id=source_group["id"]) group = tests_utils.create_consistencygroup( self.context, availability_zone=CONF.storage_availability_zone, volume_type="type1,type2", host="fakehost@fakedrv#fakepool", cgsnapshot_id=cgsnapshot["id"], ) group2 = tests_utils.create_consistencygroup( self.context, availability_zone=CONF.storage_availability_zone, volume_type="type1,type2", host="fakehost@fakedrv#fakepool", source_cgid=source_group["id"], ) group = objects.ConsistencyGroup.get_by_id(self.context, group.id) group2 = objects.ConsistencyGroup.get_by_id(self.context, group2.id) self.fake_volume = jsonutils.to_primitive(volume) self.fake_volume_obj = fake_volume.fake_volume_obj(self.context, **vol) self.fake_volume_metadata = volume["volume_metadata"] self.fake_snapshot = jsonutils.to_primitive(snapshot) self.fake_snapshot_obj = fake_snapshot.fake_snapshot_obj(self.context, **snpshot) self.fake_reservations = ["RESERVATION"] self.fake_cg = group self.fake_cg2 = group2 self.fake_src_cg = jsonutils.to_primitive(source_group) self.fake_cgsnap = jsonutils.to_primitive(cgsnapshot)
def test_fallback_nasty(self): obj = int ret = jsonutils.to_primitive(obj) self.assertEqual(six.text_type(obj), ret) def formatter(typeobj): return 'type:%s' % typeobj.__name__ ret = jsonutils.to_primitive(obj, fallback=formatter) self.assertEqual("type:int", ret)
def _do_test(mock_service_get_all): mock_service_get_all.return_value = services result = self.host_api.service_get_all(self.ctxt, filters=fake_filters, **kwargs) mock_service_get_all.assert_called_once_with(self.ctxt, filters=fake_filters) self.assertEqual(jsonutils.to_primitive(exp_services), jsonutils.to_primitive(result))
def setUp(self): super(VolumeRpcAPITestCase, self).setUp() self.context = context.get_admin_context() vol = {} vol['host'] = 'fake_host' vol['availability_zone'] = CONF.storage_availability_zone vol['status'] = "available" vol['attach_status'] = "detached" vol['metadata'] = {"test_key": "test_val"} volume = db.volume_create(self.context, vol) snpshot = { 'id': 1, 'volume_id': 'fake_id', 'status': "creating", 'progress': '0%', 'volume_size': 0, 'display_name': 'fake_name', 'display_description': 'fake_description'} snapshot = db.snapshot_create(self.context, snpshot) source_group = tests_utils.create_consistencygroup( self.context, availability_zone=CONF.storage_availability_zone, volume_type='type1,type2', host='fakehost@fakedrv#fakepool') cgsnapshot = tests_utils.create_cgsnapshot( self.context, consistencygroup_id=source_group['id']) group = tests_utils.create_consistencygroup( self.context, availability_zone=CONF.storage_availability_zone, volume_type='type1,type2', host='fakehost@fakedrv#fakepool', cgsnapshot_id=cgsnapshot['id']) group2 = tests_utils.create_consistencygroup( self.context, availability_zone=CONF.storage_availability_zone, volume_type='type1,type2', host='fakehost@fakedrv#fakepool', source_cgid=source_group['id']) group = objects.ConsistencyGroup.get_by_id(self.context, group.id) group2 = objects.ConsistencyGroup.get_by_id(self.context, group2.id) self.fake_volume = jsonutils.to_primitive(volume) self.fake_volume_metadata = volume["volume_metadata"] self.fake_snapshot = jsonutils.to_primitive(snapshot) self.fake_snapshot_obj = fake_snapshot.fake_snapshot_obj(self.context, **snpshot) self.fake_reservations = ["RESERVATION"] self.fake_cg = group self.fake_cg2 = group2 self.fake_src_cg = jsonutils.to_primitive(source_group) self.fake_cgsnap = jsonutils.to_primitive(cgsnapshot)
def test_fallback_list(self): obj = ReprObject() obj_list = [obj] ret = jsonutils.to_primitive(obj_list) self.assertEqual([obj], ret) ret = jsonutils.to_primitive(obj_list, fallback=repr) self.assertEqual(['repr'], ret)
def setUp(self): super(ShareRpcAPITestCase, self).setUp() self.context = context.get_admin_context() share = db_utils.create_share( availability_zone=CONF.storage_availability_zone, status=constants.STATUS_AVAILABLE ) access = db_utils.create_access(share_id=share['id']) snapshot = db_utils.create_snapshot(share_id=share['id']) share_replica = db_utils.create_share_replica( id='fake_replica', share_id='fake_share_id', host='fake_host', ) share_server = db_utils.create_share_server() cg = {'id': 'fake_cg_id', 'host': 'fake_host'} cgsnapshot = {'id': 'fake_cg_id'} host = {'host': 'fake_host', 'capabilities': 1} self.fake_share = jsonutils.to_primitive(share) # mock out the getattr on the share db model object since jsonutils # doesn't know about those extra attributes to pull in self.fake_share['instance'] = jsonutils.to_primitive(share.instance) self.fake_share_replica = jsonutils.to_primitive(share_replica) self.fake_access = jsonutils.to_primitive(access) self.fake_snapshot = jsonutils.to_primitive(snapshot) self.fake_share_server = jsonutils.to_primitive(share_server) self.fake_cg = jsonutils.to_primitive(cg) self.fake_cgsnapshot = jsonutils.to_primitive(cgsnapshot) self.fake_host = jsonutils.to_primitive(host) self.ctxt = context.RequestContext('fake_user', 'fake_project') self.rpcapi = share_rpcapi.ShareAPI()
def test_instance(self): class MysteryClass(object): a = 10 def __init__(self): self.b = 1 x = MysteryClass() self.assertEqual(jsonutils.to_primitive(x, convert_instances=True), dict(b=1)) self.assertEqual(jsonutils.to_primitive(x), x)
def test_fallback_typeerror(self): class NotIterable(object): # __iter__ is not callable, cause a TypeError in to_primitive() __iter__ = None obj = NotIterable() ret = jsonutils.to_primitive(obj) self.assertEqual(six.text_type(obj), ret) ret = jsonutils.to_primitive(obj, fallback=lambda _: 'fallback') self.assertEqual('fallback', ret)
def setUp(self): super(ComputeRpcAPITestCase, self).setUp() self.context = context.get_admin_context() instance_attr = {'host': 'fake_host', 'instance_type_id': 1} self.fake_instance_obj = fake_instance.fake_instance_obj(self.context, **instance_attr) self.fake_instance = jsonutils.to_primitive(self.fake_instance_obj) self.fake_volume_bdm = jsonutils.to_primitive( fake_block_device.FakeDbBlockDeviceDict( {'source_type': 'volume', 'destination_type': 'volume', 'instance_uuid': self.fake_instance['uuid'], 'volume_id': 'fake-volume-id'}))
def _notify(self, priority, ctxt, event_type, payload): payload = self._serializer.serialize_entity(ctxt, payload) # NOTE(Dinesh_Bhor): simulate the kombu serializer # this permit to raise an exception if something have not # been serialized correctly jsonutils.to_primitive(payload) # NOTE(Dinesh_Bhor): Try to serialize the context, as the rpc would. # An exception will be raised if something is wrong # with the context. self._serializer.serialize_context(ctxt) msg = FakeMessage(self.publisher_id, priority, event_type, payload, ctxt) NOTIFICATIONS.append(msg)
def test_instance(self, warn_mock): class MysteryClass(object): a = 10 def __init__(self): self.b = 1 x = MysteryClass() self.assertEqual(dict(b=1), jsonutils.to_primitive(x, convert_instances=True)) self.assertEqual(x, jsonutils.to_primitive(x)) warn_mock.assert_called_once()
def create_group(self, ctxt, group, group_spec=None, request_spec_list=None, group_filter_properties=None, filter_properties_list=None): cctxt = self._get_cctxt() request_spec_p_list = [jsonutils.to_primitive(rs) for rs in request_spec_list] group_spec_p = jsonutils.to_primitive(group_spec) msg_args = { 'group': group, 'group_spec': group_spec_p, 'request_spec_list': request_spec_p_list, 'group_filter_properties': group_filter_properties, 'filter_properties_list': filter_properties_list, } return cctxt.cast(ctxt, 'create_group', **msg_args)
def create_volume(self, ctxt, volume, host, request_spec, filter_properties, allow_reschedule=True): request_spec_p = jsonutils.to_primitive(request_spec) msg_args = {'volume_id': volume.id, 'request_spec': request_spec_p, 'filter_properties': filter_properties, 'allow_reschedule': allow_reschedule} if self.client.can_send_version('1.32'): version = '1.32' msg_args['volume'] = volume else: version = '1.24' cctxt = self._get_cctxt(host, version) request_spec_p = jsonutils.to_primitive(request_spec) cctxt.cast(ctxt, 'create_volume', **msg_args)
def get(self): """Get cell(s) for the project. Get cell details if for a particular region. """ region = g.args["region"] cell_name = g.args["name"] cell_id = g.args["id"] context = request.environ.get('context') if not region: msg = "`region` is required to get cells" return self.error_response(400, msg) if region and cell_name: # Get this particular cell along with its data try: cell_obj = dbapi.cells_get_by_name(context, region, cell_name) except exceptions.NotFound: return self.error_response(404, 'Not Found') except Exception as err: LOG.error("Error during cells get: %s" % err) return self.error_response(500, 'Unknown Error') cell_obj.data = cell_obj.variables cell = jsonutils.to_primitive(cell_obj) return [cell], 200, None if region and cell_id: # Get this particular cell along with its data try: cell_obj = dbapi.cells_get_by_id(context, cell_id) except exceptions.NotFound: return self.error_response(404, 'Not Found') except Exception as err: LOG.error("Error during cells get: %s" % err) return self.error_response(500, 'Unknown Error') cell_obj.data = cell_obj.variables cell = jsonutils.to_primitive(cell_obj) return [cell], 200, None # No cell id or name so get all cells for this region only try: cells_obj = dbapi.cells_get_all(context, region) cells = jsonutils.to_primitive(cells_obj) return cells, 200, None except exceptions.NotFound: return self.error_response(404, 'Not Found')
def get(self, id=None): """Get region(s) for the project. Get region details if for a particular region. """ _id = g.args["id"] _name = g.args["name"] region_id = id or _id region_name = _name context = request.environ.get('context') if not region_id and not region_name: # Get all regions for this tenant try: regions_obj = dbapi.regions_get_all(context) except exceptions.NotFound: return self.error_response(404, 'Not Found') if regions_obj: result = jsonutils.to_primitive(regions_obj) return result, 200, None else: return None, 404, None if region_name: try: region_obj = dbapi.regions_get_by_name(context, region_name) except exceptions.NotFound: return self.error_response(404, 'Not Found') if region_obj: region_obj.data = region_obj.variables result = jsonutils.to_primitive(region_obj) return [result], 200, None else: return None, 404, None if region_id: try: region_obj = dbapi.regions_get_by_id(context, region_id) except exceptions.NotFound: return self.error_response(404, 'Not Found') if region_obj: region_obj.data = region_obj.variables result = jsonutils.to_primitive(region_obj) return [result], 200, None else: return None, 404, None
def _sanitizer(self, obj): """Sanitizer method that will be passed to jsonutils.dumps.""" if hasattr(obj, "to_dict"): return obj.to_dict() if isinstance(obj, multidict.MultiDict): return obj.mixed() return jsonutils.to_primitive(obj)
def build_instances(self, context, instances, image, filter_properties, admin_password, injected_files, requested_networks, security_groups, block_device_mapping, legacy_bdm=True): image_p = jsonutils.to_primitive(image) version = '1.10' if not self.client.can_send_version(version): version = '1.9' if 'instance_type' in filter_properties: flavor = filter_properties['instance_type'] flavor_p = objects_base.obj_to_primitive(flavor) filter_properties = dict(filter_properties, instance_type=flavor_p) kw = {'instances': instances, 'image': image_p, 'filter_properties': filter_properties, 'admin_password': admin_password, 'injected_files': injected_files, 'requested_networks': requested_networks, 'security_groups': security_groups} if not self.client.can_send_version(version): version = '1.8' kw['requested_networks'] = kw['requested_networks'].as_tuples() if not self.client.can_send_version('1.7'): version = '1.5' bdm_p = objects_base.obj_to_primitive(block_device_mapping) kw.update({'block_device_mapping': bdm_p, 'legacy_bdm': legacy_bdm}) cctxt = self.client.prepare(version=version) cctxt.cast(context, 'build_instances', **kw)
def test_list(self): self.assertEqual(jsonutils.to_primitive([1, 2, 3]), [1, 2, 3])
def test_tuple(self): self.assertEqual(jsonutils.to_primitive((1, 2, 3)), [1, 2, 3])
def test_empty_dict(self): self.assertEqual(jsonutils.to_primitive({}), {})
def test_datetime_preserve(self): x = datetime.datetime(1920, 2, 3, 4, 5, 6, 7) self.assertEqual(jsonutils.to_primitive(x, convert_datetime=False), x)
def test_typeerror(self): x = bytearray # Class, not instance if six.PY3: self.assertEqual(jsonutils.to_primitive(x), u"<class 'bytearray'>") else: self.assertEqual(jsonutils.to_primitive(x), u"<type 'bytearray'>")
def agent_build_get_by_triple(self, context, hypervisor, os, architecture): info = self.db.agent_build_get_by_triple(context, hypervisor, os, architecture) return jsonutils.to_primitive(info)
def instance_get_active_by_window_joined(self, context, begin, end, project_id, host): result = self.db.instance_get_active_by_window_joined( context, begin, end, project_id, host) return jsonutils.to_primitive(result)
def instance_fault_create(self, context, values): result = self.db.instance_fault_create(context, values) return jsonutils.to_primitive(result)
def action_event_finish(self, context, values): evt = self.db.action_event_finish(context, values) return jsonutils.to_primitive(evt)
def service_create(self, context, values): svc = self.db.service_create(context, values) return jsonutils.to_primitive(svc)
def compute_node_create(self, context, values): result = self.db.compute_node_create(context, values) return jsonutils.to_primitive(result)
def test_message_with_param(self): msg = self.trans_fixture.lazy('A message with param: %s') msg = msg % 'test_domain' ret = jsonutils.to_primitive(msg) self.assertEqual(msg, ret)
def test_ipaddr(self): thing = {'ip_addr': netaddr.IPAddress('1.2.3.4')} ret = jsonutils.to_primitive(thing) self.assertEqual({'ip_addr': '1.2.3.4'}, ret)
def provider_fw_rule_get_all(self, context): rules = self.db.provider_fw_rule_get_all(context) return jsonutils.to_primitive(rules)
def test_DateTime(self): x = xmlrpclib.DateTime() x.decode("19710203T04:05:06") self.assertEqual(jsonutils.to_primitive(x), '1971-02-03T04:05:06.000000')
def aggregate_metadata_get_by_host(self, context, host, key='availability_zone'): result = self.db.aggregate_metadata_get_by_host(context, host, key) return jsonutils.to_primitive(result)
def test_datetime(self): x = datetime.datetime(1920, 2, 3, 4, 5, 6, 7) self.assertEqual(jsonutils.to_primitive(x), '1920-02-03T04:05:06.000007')
def aggregate_host_add(self, context, aggregate, host): host_ref = self.db.aggregate_host_add(context.elevated(), aggregate['id'], host) return jsonutils.to_primitive(host_ref)
def test_dict(self): self.assertEqual(jsonutils.to_primitive(dict(a=1, b=2, c=3)), dict(a=1, b=2, c=3))
def migration_get_in_progress_by_host_and_node(self, context, host, node): migrations = self.db.migration_get_in_progress_by_host_and_node( context, host, node) return jsonutils.to_primitive(migrations)
def test_empty_list(self): self.assertEqual(jsonutils.to_primitive([]), [])
def instance_get_by_uuid(self, context, instance_uuid, columns_to_join): return jsonutils.to_primitive( self.db.instance_get_by_uuid(context, instance_uuid, columns_to_join))
def _get_weighted_candidates(self, context, request_spec, filter_properties=None): """Return a list of backends that meet required specs. Returned list is ordered by their fitness. """ elevated = context.elevated() # Since Cinder is using mixed filters from Oslo and it's own, which # takes 'resource_XX' and 'volume_XX' as input respectively, copying # 'volume_XX' to 'resource_XX' will make both filters happy. volume_type = request_spec.get("volume_type") resource_type = volume_type if volume_type is not None else {} config_options = self._get_configuration_options() if filter_properties is None: filter_properties = {} self._populate_retry(filter_properties, request_spec['volume_properties']) request_spec_dict = jsonutils.to_primitive(request_spec) filter_properties.update({'context': context, 'request_spec': request_spec_dict, 'config_options': config_options, 'volume_type': volume_type, 'resource_type': resource_type}) self.populate_filter_properties(request_spec, filter_properties) # If multiattach is enabled on a volume, we need to add # multiattach to extra specs, so that the capability # filtering is enabled. multiattach = request_spec['volume_properties'].get('multiattach', False) if multiattach and 'multiattach' not in resource_type.get( 'extra_specs', {}): if 'extra_specs' not in resource_type: resource_type['extra_specs'] = {} resource_type['extra_specs'].update( multiattach='<is> True') # Find our local list of acceptable backends by filtering and # weighing our options. we virtually consume resources on # it so subsequent selections can adjust accordingly. # Note: remember, we are using an iterator here. So only # traverse this list once. backends = self.host_manager.get_all_backend_states(elevated) # Filter local hosts based on requirements ... backends = self.host_manager.get_filtered_backends(backends, filter_properties) if not backends: return [] LOG.debug("Filtered %s", backends) # weighted_backends = WeightedHost() ... the best # backend for the job. weighed_backends = self.host_manager.get_weighed_backends( backends, filter_properties) return weighed_backends
def test_message_with_named_param(self): msg = self.trans_fixture.lazy('A message with params: %(param)s') msg = msg % {'param': 'hello'} ret = jsonutils.to_primitive(msg) self.assertEqual(msg, ret)
def compute_node_delete(self, context, node): result = self.db.compute_node_delete(context, node['id']) return jsonutils.to_primitive(result)
def compute_node_update(self, context, node, values): result = self.db.compute_node_update(context, node['id'], values) return jsonutils.to_primitive(result)