def storages(self) -> None: """Saves all storages such as vsnaps.""" table_name = 'storages' # deactivate verbose to avoid double print result = MethodUtils.query_something( name=table_name, source_func=self.__api_queries.get_storages, deactivate_verbose=True ) # get calulated extra info for row in result: row['siteName'] = self.__system_methods.site_name_by_id(row['site']) if('free' in row and 'total' in row and row['free'] > 0 and row['total'] > 0): row['used'] = row['total'] - row['free'] row['pct_free'] = row['free'] / row['total'] * 100 row['pct_used'] = row['used'] / row['total'] * 100 if(self.__verbose): MethodUtils.my_print(data=result) LOGGER.info(">> inserting storage info into database") self.__influx_client.insert_dicts_to_buffer(table_name=table_name, list_with_dicts=result)
def sla_dumps(self) -> None: """Captures and saves SLA subpolicys.""" # capture and display / store SLA dumps sla_dump_list = MethodUtils.query_something( name="slaDumps", source_func=self.__api_queries.get_sla_dump, rename_tuples=[ ("id", "slaId"), ("subpolicy", "slaDump"), ("name", "slaName") ] ) LOGGER.info(">> updating slaStat table with dump of SLA subpolicy") table_name = "slaStats" for row in sla_dump_list: sla_dump = row['slaDump'] time_stamp = row[SppUtils.capture_time_key] sla_id = row['slaId'] tag_dic = {} field_dic = {'slaDump': sla_dump} self.__influx_client.update_row( table_name=table_name, tag_dic=tag_dic, field_dic=field_dic, where_str="time = {}ms AND slaId = \'{}\'".format( time_stamp, sla_id) )
def get_all_jobs(self) -> None: """incrementally saves all stored jobsessions, even before first execution of sppmon""" job_list = MethodUtils.query_something( name="job list", source_func=self.__api_queries.get_job_list) for job in job_list: job_id = job.get("id", None) job_name = job.get("name", None) # this way to make sure we also catch empty strings if (not job_id or not job_name): ExceptionUtils.error_message( f"skipping, missing name or id for job {job}") continue LOGGER.info( ">> capturing Job information for Job \"{}\"".format(job_name)) try: self.__job_by_id(job_id=job_id) except ValueError as error: ExceptionUtils.exception_info( error=error, extra_message= f"error when getting jobs for {job_name}, skipping it") continue
def vms_per_sla(self) -> None: """Calculates the number of VM's per SLA. Hypervisors not supported yet.""" LOGGER.info("> calculating number of VMs per SLA") result = MethodUtils.query_something( name="VMs per SLA", source_func=self.__api_queries.get_vms_per_sla ) LOGGER.info(">> inserting number of VMs per SLA into DB") self.__influx_client.insert_dicts_to_buffer( table_name="slaStats", list_with_dicts=result)
def vadps(self) -> None: """Requests and stores all VAPD proxys from the SPP-server.""" table_name = 'vadps' result = MethodUtils.query_something( name=table_name, source_func=self.__api_queries.get_vadps, rename_tuples=[ ('id', 'vadpId'), ('displayName', 'vadpName') ], deactivate_verbose=True ) for row in result: row['siteName'] = self.__system_methods.site_name_by_id(row['siteId']) if(self.__verbose): MethodUtils.my_print(result) self.__influx_client.insert_dicts_to_buffer(table_name=table_name, list_with_dicts=result)
def sppcatalog(self) -> None: """Saves the spp filesystem catalog information.""" result = MethodUtils.query_something( name="sppcatalog stats", source_func=self.__api_queries.get_file_system, deactivate_verbose=True) value_renames = { 'Configuration': "Configuration", 'Search': "File", 'System': "System", 'Catalog': "Recovery" } for row in result: row['name'] = value_renames[row['name']] if (self.__verbose): MethodUtils.my_print(result) self.__influx_client.insert_dicts_to_buffer("sppcatalog", result)
def cpuram(self) -> None: """Saves the cpu and ram usage of the spp system.""" table_name = 'cpuram' result = MethodUtils.query_something( name=table_name, rename_tuples=[ ('data.size', 'dataSize'), ('data.util', 'dataUtil'), ('data2.size', 'data2Size'), ('data2.util', 'data2Util'), ('data3.size', 'data3Size'), ('data3.util', 'data3Util'), ('memory.size', 'memorySize'), ('memory.util', 'memoryUtil'), ], source_func=self.__api_queries.get_server_metrics) self.__influx_client.insert_dicts_to_buffer(table_name=table_name, list_with_dicts=result)
def sites(self) -> None: """Collects all site informations including throttle rate. This information does not contain much statistic information. It should only be called if new sites were added or changed. """ table_name = 'sites' result = MethodUtils.query_something( name=table_name, source_func=self.__api_queries.get_sites, rename_tuples=[('id', 'siteId'), ('name', 'siteName'), ('throttles', 'throttleRates')]) # save results for renames later for row in result: self.__site_name_dict[row['siteId']] = row['siteName'] self.__influx_client.insert_dicts_to_buffer(table_name=table_name, list_with_dicts=result)
def store_vms(self) -> None: """Stores all vms stats individually Those are reused later to compute vm_stats """ all_vms_list = MethodUtils.query_something( name="all VMs", source_func=self.__api_queries.get_all_vms, rename_tuples=[ ("properties.datacenter.name", "datacenterName") ], deactivate_verbose=True) if(self.__verbose): LOGGER.info(f"found {len(all_vms_list)} vm's.") self.__influx_client.insert_dicts_to_buffer( table_name="vms", list_with_dicts=all_vms_list )
def sites(self) -> None: """Collects all site informations including throttle rate. This information does not contain much statistic information. It should only be called if new sites were added or changed. """ table_name = 'sites' result = MethodUtils.query_something( name=table_name, source_func=self.__api_queries.get_sites, rename_tuples=[('id', 'siteId'), ('name', 'siteName'), ('throttles', 'throttleRates')]) LOGGER.debug(f"sites: {result}") # save results into internal storage to avoid additional request for ID's # used instead of `site_name_by_id` for row in result: self.__site_name_dict[row['siteId']] = row['siteName'] # explicit none check since [] also needs to be converted into str if (row['throttleRates'] != None): row['throttleRates'] = str(row['throttleRates']) self.__influx_client.insert_dicts_to_buffer(table_name=table_name, list_with_dicts=result)