def test_many(self): osd_id = 0 # This is unrealistic no CRUSH tree should ever be this deep depth = -50000 fake_parent_map = dict([(x, [{'id': x + 1}]) for x in range(depth, -1)]) fake_parent_map[osd_id] = [{'id': depth}] self.assertEqual([range(depth, 0)], lookup_ancestry(osd_id, fake_parent_map))
def retrieve(self, request, fsid, osd_id): osd = self.client.get_sync_object(fsid, 'osd_map', ['osds_by_id', int(osd_id)]) crush_node = self.client.get_sync_object(fsid, 'osd_map', ['osd_tree_node_by_id', int(osd_id)]) osd['reweight'] = float(crush_node['reweight']) osd['server'] = self.client.server_by_service([ServiceId(fsid, OSD, osd_id)])[0][1] pools = self.client.get_sync_object(fsid, 'osd_map', ['osd_pools', int(osd_id)]) osd['pools'] = pools osd_metadata = self.client.get_sync_object(fsid, 'osd_map', ['metadata_by_id', int(osd_id)]) try: osd['backend_device_node'] = osd_metadata['backend_filestore_dev_node'] except KeyError: osd['backend_device_node'] = None try: osd['backend_partition_path'] = osd_metadata['backend_filestore_partition_path'] except KeyError: osd['backend_partition_path'] = None osd_commands = self.client.get_valid_commands(fsid, OSD, [int(osd_id)]) osd.update(osd_commands[int(osd_id)]) parent_map = self.client.get_sync_object(fsid, 'osd_map', ['parent_bucket_by_node_id']) osd.update({'crush_node_ancestry': lookup_ancestry(osd['osd'], parent_map)}) return Response(self.serializer_class(DataObject(osd)).data)
def retrieve(self, request, fsid, osd_id): osd = self.client.get_sync_object( fsid, 'osd_map', ['osds_by_id', int(osd_id)]) crush_node = self.client.get_sync_object( fsid, 'osd_map', ['osd_tree_node_by_id', int(osd_id)]) osd['reweight'] = float(crush_node['reweight']) osd['server'] = self.client.server_by_service( [ServiceId(fsid, OSD, osd_id)])[0][1] pools = self.client.get_sync_object( fsid, 'osd_map', ['osd_pools', int(osd_id)]) osd['pools'] = pools osd_metadata = self.client.get_sync_object( fsid, 'osd_map', ['metadata_by_id', int(osd_id)]) try: osd['backend_device_node'] = osd_metadata[ 'backend_filestore_dev_node'] except KeyError: osd['backend_device_node'] = None try: osd['backend_partition_path'] = osd_metadata[ 'backend_filestore_partition_path'] except KeyError: osd['backend_partition_path'] = None osd_commands = self.client.get_valid_commands(fsid, OSD, [int(osd_id)]) osd.update(osd_commands[int(osd_id)]) parent_map = self.client.get_sync_object(fsid, 'osd_map', ['parent_bucket_by_node_id']) osd.update( {'crush_node_ancestry': lookup_ancestry(osd['osd'], parent_map)}) return Response(self.serializer_class(DataObject(osd)).data)
def list(self, request, fsid): # Get data needed for filtering list_filter = {} if 'pool' in request.GET: try: pool_id = int(request.GET['pool']) except ValueError: return Response("Pool ID must be an integer", status=status.HTTP_400_BAD_REQUEST) list_filter['pool'] = pool_id if 'id__in[]' in request.GET: try: ids = request.GET.getlist("id__in[]") list_filter['id__in'] = [int(i) for i in ids] except ValueError: return Response("Invalid OSD ID in list", status=status.HTTP_400_BAD_REQUEST) # Get data osds = self.client.list(fsid, OSD, list_filter, async=True) parent_map = self.client.get_sync_object(fsid, 'osd_map', ['parent_bucket_by_node_id'], async=True) osd_to_pools = self.client.get_sync_object(fsid, 'osd_map', ['osd_pools'], async=True) crush_nodes = self.client.get_sync_object(fsid, 'osd_map', ['osd_tree_node_by_id'], async=True) osds = osds.get() # Get data depending on OSD list server_info = self.client.server_by_service([ServiceId(fsid, OSD, str(osd['osd'])) for osd in osds], async=True) osd_commands = self.client.get_valid_commands(fsid, OSD, [x['osd'] for x in osds], async=True) # Preparation complete, await all data to serialize result parent_map = parent_map.get() osd_to_pools = osd_to_pools.get() crush_nodes = crush_nodes.get() server_info = server_info.get() osd_commands = osd_commands.get() # Build OSD data objects for o in osds: # An OSD being in the OSD map does not guarantee its presence in the CRUSH # map, as "osd crush rm" and "osd rm" are separate operations. try: o.update({'reweight': float(crush_nodes[o['osd']]['reweight'])}) except KeyError: log.warning("No CRUSH data available for OSD {0}".format(o['osd'])) o.update({'reweight': 0.0}) for o, (service_id, fqdn) in zip(osds, server_info): o['server'] = fqdn for o in osds: o['pools'] = osd_to_pools[o['osd']] o.update(osd_commands[o['osd']]) o.update({'crush_node_ancestry': lookup_ancestry(o['osd'], parent_map)}) return Response(self.serializer_class([DataObject(o) for o in osds], many=True).data)
def test_strange_map(self): osd_id = 1 fake_parent_map = {-5: [{'id': -10, }], -4: [{'id': -5, }, {'id': -1}], -3: [{'id': -5, }, {'id': -1}], -2: [{'id': -5, }, {'id': -1}], 1: [{'id': -20, }, {'id': -2}]} self.assertEqual([[-20], [-2, -5, -10]], lookup_ancestry(osd_id, fake_parent_map))
def retrieve(self, request, fsid, osd_id): osd = self.client.get_sync_object(fsid, 'osd_map', ['osds_by_id', int(osd_id)]) crush_node = self.client.get_sync_object(fsid, 'osd_map', ['osd_tree_node_by_id', int(osd_id)]) osd['reweight'] = float(crush_node['reweight']) osd['server'] = self.client.server_by_service([ServiceId(fsid, OSD, osd_id)])[0][1] pools = self.client.get_sync_object(fsid, 'osd_map', ['osd_pools', int(osd_id)]) osd['pools'] = pools osd_commands = self.client.get_valid_commands(fsid, OSD, [int(osd_id)]) osd.update(osd_commands[int(osd_id)]) parent_map = self.client.get_sync_object(fsid, 'osd_map', ['parent_bucket_by_node_id']) osd.update({'crush_node_ancestry': lookup_ancestry(osd['osd'], parent_map)}) return Response(self.serializer_class(DataObject(osd)).data)
def retrieve(self, request, fsid, osd_id): osd = self.client.get_sync_object( fsid, 'osd_map', ['osds_by_id', int(osd_id)]) crush_node = self.client.get_sync_object( fsid, 'osd_map', ['osd_tree_node_by_id', int(osd_id)]) osd['reweight'] = float(crush_node['reweight']) osd['server'] = self.client.server_by_service( [ServiceId(fsid, OSD, osd_id)])[0][1] pools = self.client.get_sync_object( fsid, 'osd_map', ['osd_pools', int(osd_id)]) osd['pools'] = pools osd_commands = self.client.get_valid_commands(fsid, OSD, [int(osd_id)]) osd.update(osd_commands[int(osd_id)]) parent_map = self.client.get_sync_object(fsid, 'osd_map', ['parent_bucket_by_node_id']) osd.update( {'crush_node_ancestry': lookup_ancestry(osd['osd'], parent_map)}) return Response(self.serializer_class(DataObject(osd)).data)
def list(self, request, fsid): # Get data needed for filtering list_filter = {} if 'pool' in request.GET: try: pool_id = int(request.GET['pool']) except ValueError: return Response("Pool ID must be an integer", status=status.HTTP_400_BAD_REQUEST) list_filter['pool'] = pool_id if 'id__in[]' in request.GET: try: ids = request.GET.getlist("id__in[]") list_filter['id__in'] = [int(i) for i in ids] except ValueError: return Response("Invalid OSD ID in list", status=status.HTTP_400_BAD_REQUEST) # Get data osds = self.client.list(fsid, OSD, list_filter, async=True) parent_map = self.client.get_sync_object(fsid, 'osd_map', ['parent_bucket_by_node_id'], async=True) osd_to_pools = self.client.get_sync_object(fsid, 'osd_map', ['osd_pools'], async=True) crush_nodes = self.client.get_sync_object(fsid, 'osd_map', ['osd_tree_node_by_id'], async=True) osds = osds.get() # Get data depending on OSD list server_info = self.client.server_by_service( [ServiceId(fsid, OSD, str(osd['osd'])) for osd in osds], async=True) osd_commands = self.client.get_valid_commands(fsid, OSD, [x['osd'] for x in osds], async=True) # Preparation complete, await all data to serialize result parent_map = parent_map.get() osd_to_pools = osd_to_pools.get() crush_nodes = crush_nodes.get() server_info = server_info.get() osd_commands = osd_commands.get() # Build OSD data objects for o in osds: # An OSD being in the OSD map does not guarantee its presence in the CRUSH # map, as "osd crush rm" and "osd rm" are separate operations. try: o.update( {'reweight': float(crush_nodes[o['osd']]['reweight'])}) except KeyError: log.warning("No CRUSH data available for OSD {0}".format( o['osd'])) o.update({'reweight': 0.0}) for o, (service_id, fqdn) in zip(osds, server_info): o['server'] = fqdn for o in osds: o['pools'] = osd_to_pools[o['osd']] o.update(osd_commands[o['osd']]) o.update( {'crush_node_ancestry': lookup_ancestry(o['osd'], parent_map)}) return Response( self.serializer_class([DataObject(o) for o in osds], many=True).data)
def test_some_multiple_osd_mapping(self): osd_id = 0 fake_parent_map = {0: [{'id': -1}, {'id': -2}], -1: [{'id': -3}], -2: [{'id': -3}]} self.assertEqual([[-1, -3], [-2, -3]], lookup_ancestry(osd_id, fake_parent_map))
def testSome(self): osd_id = 0 fake_parent_map = dict([(x, [{'id': x + 1}]) for x in range(-5, -1)]) fake_parent_map[osd_id] = [{'id': -5}] self.assertEqual([[-5, -4, -3, -2, -1]], lookup_ancestry(osd_id, fake_parent_map))
def testOne(self): osd_id = 0 fake_parent_map = {0: [{'id': -1}]} self.assertEqual([[-1]], lookup_ancestry(osd_id, fake_parent_map))
def testNone(self): osd_id = 0 fake_parent_map = {} self.assertEqual([], lookup_ancestry(osd_id, fake_parent_map))