def list(self, request, fsid): rules = self.client.list(fsid, CRUSH_RULE, {}) osds_by_rule_id = self.client.get_sync_object(fsid, 'osd_map', ['osds_by_rule_id']) rulesets_data = defaultdict(list) for rule in rules: rule['osd_count'] = len(osds_by_rule_id[rule['rule_id']]) rulesets_data[rule['ruleset']].append(rule) rulesets = [DataObject({ 'id': rd_id, 'rules': [DataObject(r) for r in rd_rules] }) for (rd_id, rd_rules) in rulesets_data.items()] return Response(CrushRuleSetSerializer(rulesets, many=True).data)
def retrieve(self, request, pk): cluster_data = self.client.get_cluster(pk) if not cluster_data: return Response(status=status.HTTP_404_NOT_FOUND) else: cluster = DataObject(cluster_data) return Response(ClusterSerializer(cluster).data)
def get(self, request): grains = get_local_grains() try: ipaddr = socket.gethostbyname(grains['fqdn']) except socket.gaierror: # It is annoying, but not rare, to have a host # that cannot resolve its own name. # From a dict of interface name to list of addresses, # we pick the first address from the first interface # which has some addresses and isn't a loopback. ipaddr = [addrs for name, addrs in grains['ip_interfaces'].items() if name not in ['lo', 'lo0'] and addrs][0][0] proto = "https" if request.is_secure() else "http" bootstrap_url = "{0}://{1}{2}".format(proto, request.META['HTTP_HOST'], reverse('bootstrap')) BOOTSTRAP_UBUNTU = "wget -O - {url} | sudo python" BOOTSTRAP_RHEL = "curl {url} | sudo python" return Response(self.serializer_class(DataObject({ "version": str(VERSION), "license": "N/A", "registered": "N/A", "hostname": grains['host'], "fqdn": grains['fqdn'], "ipaddr": ipaddr, "bootstrap_url": bootstrap_url, "bootstrap_ubuntu": BOOTSTRAP_UBUNTU.format(url=bootstrap_url), "bootstrap_rhel": BOOTSTRAP_RHEL.format(url=bootstrap_url), })).data)
def get(self, request): try: hostname = socket.getfqdn() fqdn = socket.gethostname() ipaddr = socket.gethostbyname(hostname) except socket.gaierror: # It is annoying, but not rare, to have a host # that cannot resolve its own name. # From a dict of interface name to list of addresses, # we pick the first address from the first interface # which has some addresses and isn't a loopback. ipaddr = None hostname = None fqdn = None proto = "https" if request.is_secure() else "http" bootstrap_url = "{0}://{1}{2}".format(proto, request.META['HTTP_HOST'], reverse('bootstrap')) BOOTSTRAP_UBUNTU = "wget -O - {url} | sudo python" BOOTSTRAP_RHEL = "curl {url} | sudo python" return Response(self.serializer_class(DataObject({ "version": str(VERSION), "license": "N/A", "registered": "N/A", "hostname": hostname, "fqdn": fqdn, "ipaddr": ipaddr, "bootstrap_url": bootstrap_url, "bootstrap_ubuntu": BOOTSTRAP_UBUNTU.format(url=bootstrap_url), "bootstrap_rhel": BOOTSTRAP_RHEL.format(url=bootstrap_url), })).data)
def retrieve(self, request, fsid, osd_id): osd = self.client.get_sync_object( fsid, 'osd_map', ['osds_by_id', int(osd_id)]) crush_node = self.client.get_sync_object( fsid, 'osd_map', ['osd_tree_node_by_id', int(osd_id)]) osd['reweight'] = float(crush_node['reweight']) osd['server'] = self.client.server_by_service( [ServiceId(fsid, OSD, osd_id)])[0][1] pools = self.client.get_sync_object( fsid, 'osd_map', ['osd_pools', int(osd_id)]) osd['pools'] = pools osd_metadata = self.client.get_sync_object( fsid, 'osd_map', ['metadata_by_id', int(osd_id)]) try: osd['backend_device_node'] = osd_metadata[ 'backend_filestore_dev_node'] except KeyError: osd['backend_device_node'] = None try: osd['backend_partition_path'] = osd_metadata[ 'backend_filestore_partition_path'] except KeyError: osd['backend_partition_path'] = None osd_commands = self.client.get_valid_commands(fsid, OSD, [int(osd_id)]) osd.update(osd_commands[int(osd_id)]) parent_map = self.client.get_sync_object(fsid, 'osd_map', ['parent_bucket_by_node_id']) osd.update( {'crush_node_ancestry': lookup_ancestry(osd['osd'], parent_map)}) return Response(self.serializer_class(DataObject(osd)).data)
def get(self, request, fsid): servers = self.client.server_list_cluster(fsid, async=True) osd_data = self.client.get_sync_object(fsid, OsdMap.str, async=True) osds = self.client.list(fsid, OSD, {}, async=True) pg_summary = self.client.get_sync_object(fsid, PgSummary.str, async=True) osds = osds.get() servers = servers.get() osd_data = osd_data.get() pg_summary = pg_summary.get() osd_map = OsdMap(None, osd_data) server_info = self.client.server_by_service([ServiceId(fsid, OSD, str(osd['osd'])) for osd in osds], async=True) server_info = server_info.get() osds, osds_by_pg_state = self.generate(pg_summary, osd_map, server_info, servers) if not osds or not osds_by_pg_state: return Response([], status.HTTP_202_ACCEPTED) pg_states = request.QUERY_PARAMS.get('pg_states', None) if pg_states: osds = self._filter_by_pg_state(osds, pg_states, osds_by_pg_state) osd_list = DataObject({ # 'osds': [DataObject({'osd': o}) for o in osds], 'osds': osds, 'osds_by_pg_state': osds_by_pg_state }) return Response(OSDListSerializer(osd_list).data)
def get(self, request, fsid): health = self.client.get_sync_object(fsid, 'health') return Response(ClusterHealthSerializer(DataObject({ 'report': health, 'cluster_update_time': self.client.get_cluster(fsid)['update_time'], 'cluster_update_time_unix': self.client.get_cluster(fsid)['update_time'], })).data)
def get(self, request, fsid): def to_bytes(kb): if kb is not None: return kb * 1024 else: return None df_path = lambda stat_name: "ceph.cluster.{0}.df.{1}".format(fsid, stat_name) # noqa # Check for old vs. new stats (changed in Ceph Firefly) # see Ceph commit ee2dbdb0f5e54fe6f9c5999c032063b084424c4c # Old: New: # total_used total_used_bytes # total_space total_bytes # total_avail total_avail_bytes # # the old stats are in terms of kB (must multiply to get bytes); # the new ones are already in bytes. Check new versions first # so that old relics in the database after upgrade stop being # used. if get_latest_graphite(df_path('total_used_bytes')) is not None: space = { 'used_bytes': get_latest_graphite(df_path('total_used_bytes')), 'capacity_bytes': get_latest_graphite(df_path('total_bytes')), 'free_bytes': get_latest_graphite(df_path('total_avail_bytes')) } else: space = { 'used_bytes': to_bytes(get_latest_graphite(df_path('total_used'))), 'capacity_bytes': to_bytes(get_latest_graphite(df_path('total_space'))), 'free_bytes': to_bytes(get_latest_graphite(df_path('total_avail'))) } return Response(ClusterSpaceSerializer(DataObject({ 'space': space })).data)
def retrieve(self, request, fsid, mon_id): mons = self._get_mons(fsid) try: mon = [m for m in mons if m['name'] == mon_id][0] except IndexError: raise Http404("Mon '%s' not found" % mon_id) return Response(self.serializer_class(DataObject(mon)).data)
def retrieve(self, request, fsid, key): ceph_config = self._get_config(fsid) try: setting = DataObject({'key': key, 'value': ceph_config[key]}) except KeyError: raise Http404("Key '%s' not found" % key) else: return Response(self.serializer_class(setting).data)
def list(self, request, fsid): ceph_config = self._get_config(fsid) settings = [ DataObject({ 'key': k, 'value': v }) for (k, v) in ceph_config.items() ] return Response(self.serializer_class(settings, many=True).data)
def list(self, request, fsid): rules = self.client.list(fsid, CRUSH_RULE, {}) osds_by_rule_id = self.client.get_sync_object(fsid, 'osd_map', ['osds_by_rule_id']) for rule in rules: rule['osd_count'] = len(osds_by_rule_id[rule['rule_id']]) return Response( CrushRuleSerializer([DataObject(r) for r in rules], many=True).data)
def list(self, request, fsid): # Get data needed for filtering list_filter = {} if 'pool' in request.GET: try: pool_id = int(request.GET['pool']) except ValueError: return Response("Pool ID must be an integer", status=status.HTTP_400_BAD_REQUEST) list_filter['pool'] = pool_id if 'id__in[]' in request.GET: try: ids = request.GET.getlist("id__in[]") list_filter['id__in'] = [int(i) for i in ids] except ValueError: return Response("Invalid OSD ID in list", status=status.HTTP_400_BAD_REQUEST) # Get data osds = self.client.list(fsid, OSD, list_filter, async=True) osd_to_pools = self.client.get_sync_object(fsid, 'osd_map', ['osd_pools'], async=True) crush_nodes = self.client.get_sync_object(fsid, 'osd_map', ['osd_tree_node_by_id'], async=True) osds = osds.get() # Get data depending on OSD list server_info = self.client.server_by_service([ServiceId(fsid, OSD, str(osd['osd'])) for osd in osds], async=True) osd_commands = self.client.get_valid_commands(fsid, OSD, [x['osd'] for x in osds], async=True) # Preparation complete, await all data to serialize result osd_to_pools = osd_to_pools.get() crush_nodes = crush_nodes.get() server_info = server_info.get() osd_commands = osd_commands.get() # Build OSD data objects for o in osds: # An OSD being in the OSD map does not guarantee its presence in the CRUSH # map, as "osd crush rm" and "osd rm" are separate operations. try: o.update({'reweight': float(crush_nodes[o['osd']]['reweight'])}) except KeyError: log.warning("No CRUSH data available for OSD {0}".format(o['osd'])) o.update({'reweight': 0.0}) for o, (service_id, fqdn) in zip(osds, server_info): o['server'] = fqdn for o in osds: o['pools'] = osd_to_pools[o['osd']] for o in osds: o.update(osd_commands[o['osd']]) return Response(self.serializer_class([DataObject(o) for o in osds], many=True).data)
def pool_object(self, pool_data, cluster): return DataObject({ 'id': pool_data['pool'], 'cluster': cluster['id'], 'pool_id': pool_data['pool'], 'name': pool_data['pool_name'], 'quota_max_objects': pool_data['quota_max_objects'], 'quota_max_bytes': pool_data['quota_max_bytes'], 'used_objects': get_latest_graphite("ceph.cluster.%s.pool.%s.num_objects" % (cluster['id'], pool_data['pool'])), 'used_bytes': get_latest_graphite("ceph.cluster.%s.pool.%s.num_bytes" % (cluster['id'], pool_data['pool'])) })
def get(self, request, fsid): counters = self.client.get_derived_object(fsid, 'counters') if not counters: return Response({}, status.HTTP_202_ACCEPTED) return Response( ClusterHealthCountersSerializer( DataObject({ 'counters': counters, 'cluster_update_time': self.client.get_cluster(fsid)['update_time'] })).data)
def retrieve(self, request, fsid, osd_id): osd = self.client.get_sync_object(fsid, 'osd_map', ['osds_by_id', int(osd_id)]) crush_node = self.client.get_sync_object(fsid, 'osd_map', ['osd_tree_node_by_id', int(osd_id)]) osd['reweight'] = float(crush_node['reweight']) osd['server'] = self.client.server_by_service([ServiceId(fsid, OSD, osd_id)])[0][1] pools = self.client.get_sync_object(fsid, 'osd_map', ['osd_pools', int(osd_id)]) osd['pools'] = pools osd_commands = self.client.get_valid_commands(fsid, OSD, [int(osd_id)]) osd.update(osd_commands[int(osd_id)]) return Response(self.serializer_class(DataObject(osd)).data)
def get(self, request, fsid): osd_data = self.client.get_sync_object(fsid, OsdMap.str, async=True) mds_data = self.client.get_sync_object(fsid, MdsMap.str, async=True) pg_summary = self.client.get_sync_object(fsid, PgSummary.str, async=True) mon_status = self.client.get_sync_object(fsid, MonStatus.str, async=True) mds_data = mds_data.get() osd_data = osd_data.get() pg_summary = pg_summary.get() mon_status = mon_status.get() counters = self.generate(osd_data, mds_data, mon_status, pg_summary) return Response(ClusterHealthCountersSerializer(DataObject({ 'counters': counters, 'cluster_update_time': self.client.get_cluster(fsid)['update_time'] })).data)
def get(self, request, fsid): osds = self.client.get_derived_object(fsid, 'osds') osds_by_pg_state = self.client.get_derived_object( fsid, 'osds_by_pg_state') if not osds or not osds_by_pg_state: return Response([], status.HTTP_202_ACCEPTED) pg_states = request.QUERY_PARAMS.get('pg_states', None) if pg_states: osds = self._filter_by_pg_state(osds, pg_states, osds_by_pg_state) osd_list = DataObject({ # 'osds': [DataObject({'osd': o}) for o in osds], 'osds': osds, 'osds_by_pg_state': osds_by_pg_state }) return Response(OSDListSerializer(osd_list).data)
def get(self, request, fsid): def to_bytes(kb): if kb is not None: return kb * 1024 else: return None df_path = lambda stat_name: "ceph.cluster.{0}.df.{1}".format( fsid, stat_name) space = { 'used_bytes': to_bytes(get_latest_graphite(df_path('total_used'))), 'capacity_bytes': to_bytes(get_latest_graphite(df_path('total_space'))), 'free_bytes': to_bytes(get_latest_graphite(df_path('total_avail'))) } return Response( ClusterSpaceSerializer(DataObject({'space': space})).data)
def create(self, request, fsid): # Validate try: command = request.DATA['command'] except KeyError: raise ParseError("'command' field is required") else: if not (isinstance(command, basestring) or isinstance(command, list)): raise ParseError("'command' must be a string or list") # Parse string commands to list if isinstance(command, basestring): command = shlex.split(command) name = self.client.get_cluster(fsid)['name'] principle = command[0] try: if principle == 'ceph': command.pop(0) result = self.run_mon_job(fsid, "ceph.ceph_command", [name, command]) elif principle == 'rbd': command.pop(0) result = self.run_mon_job(fsid, "ceph.rbd_command", [command]) elif principle == 'radosgw-admin': raise APIException( "radosgw-admin calls are not yet supported %s" % str(result)) else: # Try the default 'ceph' target to maintain backwards compatibility result = self.run_mon_job(fsid, "ceph.ceph_command", [name, command]) except Exception as ex: raise APIException("Error in cli command: %s" % ex) log.debug("CliViewSet: result = '%s'" % result) if not isinstance(result, dict): # Errors from salt like "module not available" come back as strings raise APIException("Remote error: %s" % str(result)) return Response(self.serializer_class(DataObject(result)).data)
def create(self, request, fsid): # Validate try: command = request.DATA['command'] except KeyError: raise ParseError("'command' field is required") else: if not (isinstance(command, basestring) or isinstance(command, list)): raise ParseError("'command' must be a string or list") # Parse string commands to list if isinstance(command, basestring): command = shlex.split(command) name = self.client.get_cluster(fsid)['name'] result = self.run_mon_job(fsid, "ceph.ceph_command", [name, command]) log.debug("CliViewSet: result = '%s'" % result) if not isinstance(result, dict): # Errors from salt like "module not available" come back as strings raise APIException("Remote error: %s" % str(result)) return Response(self.serializer_class(DataObject(result)).data)
def list(self, request): return Response( self.serializer_class( [DataObject(s) for s in self.client.server_list()], many=True).data)
def retrieve(self, request, fqdn): return Response( self.serializer_class(DataObject( self.client.server_get(fqdn))).data)
def retrieve(self, request, fsid, fqdn): server = self.client.server_get_cluster(fqdn, fsid) self._lookup_ifaces([server]) return Response(self.serializer_class(DataObject(server)).data)
def list(self, request, fsid): servers = self.client.server_list_cluster(fsid) self._lookup_ifaces(servers) return Response( self.serializer_class([DataObject(s) for s in servers], many=True).data)
def retrieve(self, request, **kwargs): request_id = kwargs['request_id'] user_request = DataObject(self.client.get_request(request_id)) return Response(self.serializer_class(user_request).data)
def cancel(self, request, request_id): user_request = DataObject(self.client.cancel_request(request_id)) return Response(self.serializer_class(user_request).data)
def retrieve(self, request, fsid, node_id): crush_node = self.client.get(fsid, CRUSH_NODE, int(node_id)) if crush_node: return Response(self.serializer_class(DataObject(crush_node)).data) return Response(status=status.HTTP_404_NOT_FOUND)
def list(self, request): clusters = [DataObject(c) for c in self.client.list_clusters()] return Response(ClusterSerializer(clusters, many=True).data)
def retrieve(self, request, fsid, type_id): crush_type = self.client.get(fsid, CRUSH_TYPE, int(type_id)) return Response(self.serializer_class(DataObject(crush_type)).data)