def get_LicenseExport(self): #logger.info("get_LicenseExport has been called") licenseExpURL = f"{self.url}/api/cluster/v2/license/consumption/hour" endTs = int(round(time.time() * 1000)) endTs = int(endTs // 3600000 * 3600000) endTs = endTs - 2 * 3600000 startTs = endTs - 3600000 params = {'startTs': startTs, 'endTs': endTs} logger.info('The params are "%s"' % params) try: getlicenseExp = requests.get(licenseExpURL, headers=self.auth_header, params=params, verify=False) logger.info('get_LicenseExport response code was "%s"' % getlicenseExp.status_code) if getlicenseExp.status_code == 401: error = getlicenseExp.json()['error']['message'] raise AuthException( 'Get License Export Error. Ensure your Cluster Token is correct, active and has the role ServiceProviderAPI. The message was - %s' % error) except requests.exceptions.ConnectTimeout as ex: raise ConfigException('Timeout on connecting with "%s"' % licenseExpURL) from ex except requests.exceptions.RequestException as ex: raise ConfigException('Unable to connect to "%s"' % licenseExpURL) from ex except json.JSONDecodeError as ex: raise ConfigException('Server response from %s is not json' % licenseExpURL) from ex #logger.info('The json item is "%s"' % getlicenseExp.json()) return getlicenseExp.json()['environmentBillingEntries']
def _validate_device(config): hostname = config.get('hostname') group = config.get('group') device_type = config.get('device_type') # Check inputs are valid... if not hostname: raise ConfigException('Hostname must not be empty') if not group: raise ConfigException('Group must not be empty') if not device_type: raise ConfigException('Device Type must not be empty') # Default SNMP port port = 161 host = hostname # If entered as 127.0.0.1:1234, extract the ip and the port split_host = hostname.split(':') if len(split_host) > 1: host = split_host[0] port = split_host[1] try: port = int(port) except ValueError: raise ConfigException( 'Invalid port \'{}\' in hostname input: {}'.format(port, hostname)) device = {'host': host, 'port': port, 'type': device_type, 'group': group} return device
def _validate_authentication(config): snmp_version = config.get('snmp_version') snmp_user = config.get('snmp_user') auth_protocol = config.get('auth_protocol') auth_key = config.get('auth_key') priv_protocol = config.get('priv_protocol') priv_key = config.get('priv_key') # Check inputs are valid... if not snmp_version: raise ConfigException('SNMP Version must not be empty') if not snmp_user: raise ConfigException( 'SNMP User (v3) or Community String (v2) must not be empty') # Other values can be None... # V2 # Expected and ignored # V3 # Match SNMP security level # No Auth or Priv = noAuthNoPriv # Auth no Priv = authNoPriv # Auth + Priv = authPriv try: snmp_version = int(snmp_version) except ValueError: raise ConfigException( 'Expected a number for SNMP Version, received \'{}\''.format( snmp_version)) if snmp_version == 1: raise ConfigException('SNMP Version 1 not supported') elif not (snmp_version == 2 or snmp_version == 3): raise ConfigException( 'SNMP Version expected to be 2 or 3, received \'{}\''.format( snmp_version)) # TODO If auth or priv protocols don't match expected inputs... if auth_protocol: auth_protocol = auth_protocol.lower() if priv_protocol: priv_protocol = priv_protocol.lower() authentication = { 'version': snmp_version, 'user': snmp_user, 'auth': { 'protocol': auth_protocol, 'key': auth_key }, 'priv': { 'protocol': priv_protocol, 'key': priv_key } } return authentication
def initializeSockets(self, kwargs): self.socketGain = True entity = kwargs["associated_entity"] entityCount = 1 confDir = '/etc/haproxy/haproxy.cfg' for procInfo in entity.processes: if 'CmdLine' in procInfo.properties: snapCmdLine = procInfo.properties['CmdLine'].split(' ') if '-f' in snapCmdLine: confDir = snapCmdLine[snapCmdLine.index("-f") + 1] break self.url = [] try: confFile = open(confDir, "r") except IOError: raise ConfigException( 'Cannot open HAProxy configuration file. Please check file permissions. ' + self._HELP_MORE_INFO) else: bindErr = False with confFile: for line in confFile: line = line.partition('#')[0] line = line.strip() if "stats socket" in line: if not "process" in line: bindErr = True tmp = line.split(' ') sockAddr = tmp[tmp.index("socket") + 1] self.url.append(sockAddr) sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) try: sock.connect(sockAddr) sock.close() except OSError as msg: raise ConfigException( 'Cannot connect to the stats socket. Please check file permissions. ' + self._HELP_MORE_INFO) elif "nbproc" in line: tmp = line.split(' ') try: entityCount = int(tmp[tmp.index("nbproc") + 1]) except: raise ConfigException( 'Cannot define number of processes - HAProxy configuration error. Please check HAProxy configuration file. ' + self._HELP_MORE_INFO) if entityCount != len(self.url): raise ConfigException( 'Number of stats sockets and processes differ. There should be one socket for each HAProxy process. ' + self._HELP_MORE_INFO) if bindErr and entityCount > 1: raise ConfigException( 'Socket is not bound to any process. Please check HAProxy configuration file. ' + self._HELP_MORE_INFO)
def query(self, **kwargs): pgi = self.find_single_process_group(pgi_name('DirectAccess')) pgi_id = pgi.group_instance_id json_file_path = "D:\\Software\\Scripts\\DirectAccessStats.JSON" stats = {} try: with open(json_file_path, encoding='utf-8') as json_file: clean_file = json_file.read().replace('\ufeff', '') try: stats = json.loads(clean_file) except ValueError as ex: raise ConfigException('Unable to parse "%s" as JSON' % json_file_path) from ex except IOError as ex: raise ConfigException('Could not open file "%s"' % json_file_path) from ex self.results_builder.absolute(key='total_connections', value=stats['TotalConnections'], entity_id=pgi_id) self.results_builder.absolute(key='total_DA_connections', value=stats['TotalDAConnections'], entity_id=pgi_id) self.results_builder.absolute(key='total_vpn_connections', value=stats['TotalVpnConnections'], entity_id=pgi_id) self.results_builder.absolute(key='total_unique_users', value=stats['TotalUniqueUsers'], entity_id=pgi_id) self.results_builder.absolute(key='max_concurrent_connections', value=stats['MaxConcurrentConnections'], entity_id=pgi_id) self.results_builder.absolute( key='total_cumulative_connections', value=stats['TotalCumulativeConnections'], entity_id=pgi_id) self.results_builder.absolute(key='total_bytes_in', value=stats['TotalBytesIn'], entity_id=pgi_id) self.results_builder.absolute(key='total_bytes_out', value=stats['TotalBytesOut'], entity_id=pgi_id) self.results_builder.absolute(key='total_bytes_in_out', value=stats['TotalBytesInOut'], entity_id=pgi_id)
def query(self, **kwargs): # *************** # READ PROPERTIES # *************** config = kwargs['config'] group = config['group'] name = config['name'] custom_property = config['custom_property'] # *************** # CAN VALIDATE CONFIG/AUTH AND RAISE EXCEPTIONS # *************** if not name: raise ConfigException('Need a device name') if not group: # e.g. test device connection raise AuthException('Need a device group') # *************** # CREATE DYNATRACE DEVICE ENTITIES # *************** g1 = self.topology_builder.create_group(group, group) e1 = g1.create_device(name, name) # *************** # COLLECT METRICS # *************** metric1 = random.randint(0, 101) metric2 = [] for i in range(5): dim = { 'dimension': { 'Dimension': 'dimension{}'.format(i) }, 'value': random.randint(0, 101) } metric2.append(dim) metric3 = random.randint(0, 1001) # *************** # SEND METRICS TO DYNATRACE SERVER # *************** e1.absolute(key='metric1', value=metric1) for dim in metric2: e1.absolute(key='metric2', value=dim['value'], dimensions=dim['dimension']) e1.relative(key='metric3', value=metric3) # *************** # SEND PROPERTIES TO DYNATRACE SERVER # *************** e1.report_property('extension', 'random') if custom_property: e1.report_property('custom', custom_property)
def readSocket(self, url): response = self.getResponseFromSocket(url, 'show stat\n') stats_csv_rows = [row for row in csv.DictReader(response.splitlines())] if len(stats_csv_rows) == 0: raise ConfigException( 'Content from "%s" does not appear to be in haproxy stats format' % url) return stats_csv_rows
def getResponseFromHttp(self, url): try: response = requests.get(url, auth=self.auth, verify=self.verify, timeout=self.timeout) except (requests.exceptions.MissingSchema, requests.exceptions.InvalidSchema, requests.exceptions.InvalidURL) as ex: raise ConfigException('URL: "%s" does not appear to be valid' % url) from ex except requests.exceptions.ConnectTimeout as ex: raise ConfigException('Timeout on connecting with "%s"' % url) from ex except requests.exceptions.RequestException as ex: raise ConfigException('Unable to connect to "%s"' % url) from ex if response.status_code == requests.codes.UNAUTHORIZED: raise AuthException(response) return response
def getResponseFromSocket(self, url, command): sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) try: sock.connect(url) message = command sock.send(str.encode(message)) file_handle = sock.makefile() response = file_handle.read() sock.close() except OSError as msg: raise ConfigException( 'Cannot connect to the stats socket. Please check file permissions. ' + self._HELP_MORE_INFO) return response
def query(self, **kwargs): if os.name != "nt": raise ConfigException("The plugin can only run on Windows hosts") config = kwargs["config"] self.monitored_instances = [] self.ignored_instances = [] if config.get("monitored", None): self.monitored_instances = config.get("monitored").split(",") if config.get("ignored", None): self.ignored_instances = config.get("ignored").split(",") self.snapshot_entries = kwargs["process_snapshot"] self.collect_metrics()
def readHttp(self, url): print("reading from url") if not url.endswith(';csv'): url += ";csv" response = self.getResponseFromHttp(url) stats_csv_rows = [ row for row in csv.DictReader(response.content.decode().splitlines()) ] if len(stats_csv_rows) == 0: raise ConfigException( 'Content from "%s" does not appear to be in haproxy stats format' % url) print(stats_csv_rows) return stats_csv_rows
def query(self, **kwargs): config = kwargs["config"] filename = config["filename"] pginame = config["pginame"] unaccessed = [] for metric in metrics: # only report a value of 0 for metrics with no capture group if metric.pattern.groups == 0: unaccessed.append(metric.name) pgi = self.find_single_process_group(pgi_name(pginame)) pgi_id = pgi.group_instance_id try: inf = open(filename, 'r') for line in inf.readlines(): for metric in metrics: matched, value = metric.match(line) if not matched: continue self.results_builder.absolute(key=metric.name, value=value, entity_id=pgi_id) if metric.name in unaccessed: unaccessed.remove(metric.name) except Exception as ex: raise ConfigException( 'Caught exception while trying to open file "%s": %s' % (filename, ex)) from ex finally: if 'inf' in locals(): try: inf.close() except: pass # report a value of 0 for those metrics that were not found for n in unaccessed: self.results_builder.absolute(key=n, value=0, entity_id=pgi_id)
def get_environments(self): environmentsURL = f"{self.url}/api/cluster/v2/environments" params = {'filter': 'state(ENABLED)', 'pageSize': 1000} try: getEnvironments = requests.get(environmentsURL, headers=self.auth_header, params=params, verify=False) if getEnvironments.status_code == 401: error = getEnvironments.json()['error']['message'] raise AuthException( 'Get Environments Error. Ensure your Cluster Token is correct, active and has the role ServiceProviderAPI. The message was - %s' % error) except requests.exceptions.ConnectTimeout as ex: raise ConfigException('Timeout on connecting with "%s"' % environmentsURL) from ex except requests.exceptions.RequestException as ex: raise ConfigException('Unable to connect to "%s"' % environmentsURL) from ex except json.JSONDecodeError as ex: raise ConfigException('Server response from %s is not json' % environmentsURL) from ex if "nextPageKey" in getEnvironments.json(): environments = getEnvironments.json()['environments'] nextPageKey = getEnvironments.json()['nextPageKey'] requiredCalls = math.ceil( int(getEnvironments.json()['totalCount']) / int(getEnvironments.json()['pageSize'])) i = 1 while i < requiredCalls: if getEnvironments.json()['nextPageKey']: nextPageKey = getEnvironments.json()['nextPageKey'] params = {'nextPageKey': nextPageKey} try: getEnvironments = requests.get(environmentsURL, headers=self.auth_header, params=params) if getEnvironments.status_code == 401: error = getEnvironments.json()['error']['message'] raise AuthException( 'Get Environments Error. Ensure your Cluster Token is correct, active and has the role ServiceProviderAPI. The message was - %s' % error) except requests.exceptions.ConnectTimeout as ex: raise ConfigException('Timeout on connecting with "%s"' % environmentsURL) from ex except requests.exceptions.RequestException as ex: raise ConfigException('Unable to connect to "%s"' % environmentsURL) from ex except json.JSONDecodeError as ex: raise ConfigException( 'Server response from %s is not json' % environmentsURL) from ex pageDataEnvs = getEnvironments.json()['environments'] for item in pageDataEnvs: environments.append(item) i += 1 else: environments = getEnvironments.json()['environments'] # Convert to dict envDict = {} for e in environments: envDict[e['id']] = e return envDict
def query(self, **kwargs) -> None: config = kwargs.get("config") logger.info('Plugin Started') QueryName = config["QueryName"] server = config["server"] User = config["User"] Password = config["Password"] sql = config["Query"] Metric_Names = config["Metric_Names"] Condition = config["Condition"] Pointer = config["Pointer"] Port = config["Port"] SID = config["SID"] ######################################################################## if server == '': raise ConfigException( 'Hostname or IP cannot be empty. You must provide this to connect to an ODBC' ) if User == '' or Password == '': raise ConfigException( 'You must provide a username and password to connect to this iSeries host' ) if sql == '': raise ConfigException('You must provide a query') if Metric_Names == '': raise ConfigException('You must provide a list of metric names') if Condition == '' and Pointer == '': raise ConfigException( 'You must provide a condition or a pointer to extract metrics from Dataframe' ) if Condition != '' and Pointer != '': raise ConfigException( 'Please provide just one: Condition or Pointer') Metric_Names = Metric_Names.split('|') Condition_array = Condition.split('|') Pointer_array = Pointer.split('|') #if len(Metric_Names) != (len(Metric_Names) + len(Condition_array)): #raise ConfigException('The number of element in Metric_Names and Condition of Pointer must be equal') group = self.topology_builder.create_group('Bizagi', 'Bizagi') df = Consulta_Oracle(sql, server, User, Password, Port, SID) device = group.create_device(f'Bizagi - {QueryName}', f'Bizagi - {QueryName}') for i in range(0, len(Metric_Names)): if "(" in Metric_Names[i]: Metric_Names[i] = Metric_Names[i].replace('(', '') Metric_Names[i] = Metric_Names[i].replace(')', '') Metric_Names[i] = self.pointer(Metric_Names[i], df) if Pointer == '': value = self.Condition_validation(Condition_array[i], df) device.absolute("Metric.Tx", value, {"Metrics": Metric_Names[i]}) else: value = self.pointer(Pointer_array[i], df) device.absolute("Metric.Tx", value, {"Metrics": Metric_Names[i]})
def raise_nvml_error(self, error: NVMLError) -> None: self.logger.warning(nvml_error_to_string(error)) raise ConfigException(f"unexpected NVML error: {str(error)}") from error
def query(self, **kwargs): """ Tries to gather as much data as possible without raising errors - as long as a csv is received with enough information to extract proxy and server names, but some metrics missing - no errors are raised. This is to prevent unnecessary problems when different HAProxy versions provide different sets of metrics. Raises: ruxit.api.exceptions.AuthException: HAProxy responded with 401 ruxit.api.exceptions.ConfigException: connection and data parsing errors """ measurementList = {} measurementList['no_dim'] = {} for url in self.url: if (self.socketGain): stats_csv_rows = self.readSocket(url) if not ('idle' in measurementList['no_dim']): measurementList['no_dim']['idle'] = [] measurementList['no_dim']['idle'].append( self.getIdleFromSocket(url)) else: stats_csv_rows = self.readHttp(url) if not ('idle' in measurementList['no_dim']): measurementList['no_dim']['idle'] = [] measurementList['no_dim']['idle'].append( self.getIdleFromHttp(url)) for row in stats_csv_rows: try: pxname = row['# pxname'] svname = row['svname'] dimensionName = pxname if svname == 'FRONTEND': metric_prefix = "fe_" prefix_metrics = { 'ereq', 'scur', 'susage', 'req_rate', 'bin', 'bout' } elif svname == 'BACKEND': continue else: metric_prefix = 'be_' prefix_metrics = { 'econ', 'eresp', 'qcur', 'scur', 'susage', 'bin', 'bout', 'rtime' } except KeyError as ex: raise ConfigException( 'Content from "%s" does not appear to be in haproxy stats format' % url) from ex all_metrics = prefix_metrics.union(self._NO_PREFIX_METRICS) if not (dimensionName in measurementList): measurementList[dimensionName] = {} for metric in all_metrics: if metric in row and metric in prefix_metrics: metric_value = row[metric] if metric_value == '' or metric_value == None: continue metric_json_name = metric_prefix + metric if not (metric_json_name in measurementList[dimensionName]): measurementList[dimensionName][ metric_json_name] = [] measurementList[dimensionName][ metric_json_name].append(metric_value) if metric in row and metric in self._NO_PREFIX_METRICS: metric_value = row[metric] if metric_value == '' or metric_value == None: continue metric_json_name = metric if not (metric_json_name in measurementList[dimensionName]): measurementList[dimensionName][ metric_json_name] = [] measurementList[dimensionName][ metric_json_name].append(metric_value) elif metric == 'susage': try: scur = row['scur'] slim = row['slim'] if scur == '' or slim == '' or scur == None or slim == None: continue metric_json_name = metric_prefix + metric if not (metric_json_name in measurementList[dimensionName]): measurementList[dimensionName][ metric_json_name] = [] measurementList[dimensionName][ metric_json_name].append( (float(scur) / float(slim)) * 100) except KeyError as ex: raise ConfigException( 'Content from "%s" does not appear to be in haproxy stats format' % url) from ex for dimension, metricsList in measurementList.items(): dimensions = {'service': dimension} for metricKey, metricValues in metricsList.items(): aggregatedValue = 0 for value in metricValues: aggregatedValue += float(value) if ("susage" in metricKey): self.results_builder.add_absolute_result( PluginMeasurement(dimensions=dimensions, key=metricKey, value=aggregatedValue / len(metricValues))) elif ("idle" in metricKey): self.results_builder.add_absolute_result( PluginMeasurement(key=metricKey, value=aggregatedValue / len(metricValues))) elif metricKey in self._ABSOLUTE_METRICS: self.results_builder.add_absolute_result( PluginMeasurement(dimensions=dimensions, key=metricKey, value=int(aggregatedValue))) else: self.results_builder.add_relative_result( PluginMeasurement(dimensions=dimensions, key=metricKey, value=int(aggregatedValue)))