def main(): """ Main entry point for Grok Custom Metric forwarder """ parser = OptionParser() parser.add_option("--server", dest="server", help="Grok server") AvogadroAgent.addParserOptions(parser) (options, _args) = parser.parse_args() grok = GrokSession(server=options.server) with grok.connect() as sock: _fetchAndForward(sock, AvogadroCPUTimesAgent, options) _fetchAndForward(sock, AvogadroMemoryAgent, options) _fetchAndForward(sock, AvogadroDiskReadBytesAgent, options) _fetchAndForward(sock, AvogadroDiskWriteBytesAgent, options) _fetchAndForward(sock, AvogadroDiskReadTimeAgent, options) _fetchAndForward(sock, AvogadroDiskWriteTimeAgent, options) _fetchAndForward(sock, AvogadroNetworkBytesSentAgent, options) _fetchAndForward(sock, AvogadroNetworkBytesReceivedAgent, options) _fetchAndForward(sock, AvogadroKeyCountAgent, options) _fetchAndForward(sock, AvogadroKeyDownDownAgent, options) _fetchAndForward(sock, AvogadroKeyUpDownAgent, options) _fetchAndForward(sock, AvogadroKeyHoldAgent, options)
def run(server, apiKey, metricName, resource, numRecords): grok = GrokSession(server=server, apikey=apiKey) inc = 300 currentTimestamp = int(time.time()) - (numRecords * inc) with grok.connect() as sock: for i in xrange(numRecords): value = random.random() sock.sendall("%s %f %d\n" % (metricName, value, currentTimestamp)) currentTimestamp += inc if i % 100 == 99: print ".", sys.stdout.flush() if i == RECORDS_BEFORE_MONITOR: print print "Creating model...", sys.stdout.flush() # Monitor the metric modelSpec = {"metric": metricName, "datasource": "custom"} if resource is not None: modelSpec["resource"] = resource model = grok.createModel(modelSpec) print "done"
def setupHTMITAWSCredentials(publicDnsName, config): """ Using the HTM-IT CLI, connect to HTM-IT to obtain the API Key for the instance. :param publicDnsName: A reachable DNS entry for the HTM-IT server that needs to be configured :param config: A dict containing values for `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` :raises infrastructure.utilities.exceptions.HTMITConfigError: if it is unable to obtain the API Key :returns: The API Key of the HTM-IT server """ credentials = { "aws_access_key_id": config["AWS_ACCESS_KEY_ID"], "aws_secret_access_key": config["AWS_SECRET_ACCESS_KEY"], } server = "https://%s" % publicDnsName htmIt = GrokSession(server=server) htmIt.apikey = htmIt.verifyCredentials(**credentials) if htmIt.apikey: htmIt.updateSettings(settings=credentials, section="aws") return htmIt.apikey else: raise HTMITConfigError("Unable to obtain HTM-IT API Key")
def handle(options, args): """ `grok export` handler. """ (server, apikey) = grokcli.getCommonArgs(parser, args) dump = partial(json.dumps, indent=2) if hasattr(options, "useYaml"): if options.useYaml: dump = partial(yaml.safe_dump, default_flow_style=False) grok = GrokSession(server=server, apikey=apikey) if options.output is not None: outp = open(options.output, "w") else: outp = sys.stdout models = grok.exportModels() if models: try: print >> outp, dump(models) finally: outp.flush() if outp != sys.stdout: outp.close()
def sendVMDataToGrok(grokServer, grokApiKey, configFilePath, host, user, password, vmListFile): grok = GrokSession(server=grokServer, apikey=grokApiKey) metric = getVMdata(configFilePath, host, user, password, vmListFile) if metric is not None: try: response = grok.post(grok.server + "/_vmware/", json=metric, auth=grok.auth) print response.text sleep(10) if response.status_code == 200: print "Data sent to Grok." sys.exit(1) else: print "Error sending data to Grok." sys.exit(1) except requests.exceptions.ConnectionError as e: print e sys.exit(1) except Exception as e: print e sys.exit(1) else: print "Error connecting to ESX server." sys.exit(1)
def handle(options, args): """ `grok POST` handler. """ (endpoint, apikey) = grokcli.getCommonArgs(parser, args) if options.data: data = options.data else: # Pop data source off args try: data = args.pop(0) except IndexError: data = "" server = "%(scheme)s://%(netloc)s" % urlparse(endpoint)._asdict() grok = GrokSession(server=server, apikey=apikey) post = partial(grok.post, endpoint) response = None if data.strip() == "-" or not data: if select.select([ sys.stdin, ], [], [], 0.0)[0]: response = post(data=sys.stdin) else: response = post() elif data: with open(data, "r") as fp: response = post(data=fp) if isinstance(response, Response): print response.text sys.exit(not int(bool(response)))
def handle(options, args): """ `grok custom` handler. """ try: resource = args.pop(0) action = args.pop(0) except IndexError: printHelpAndExit() (server, apikey) = grokcli.getCommonArgs(parser, args) grok = GrokSession(server=server, apikey=apikey) if resource == "metrics": if action == "list": handleListRequest(grok, options.format) elif action == "monitor": if not options.id: printHelpAndExit() handleMonitorRequest(grok, options.id) elif action == "unmonitor": if not options.name: printHelpAndExit() handleUnmonitorRequest(grok, options.name) else: printHelpAndExit() else: printHelpAndExit()
def handle(options, args): """ `grok import` handler. """ (server, apikey) = grokcli.getCommonArgs(parser, args) if options.data: data = options.data else: # Pop data source off args try: data = args.pop(0) except IndexError: data = "-" grok = GrokSession(server=server, apikey=apikey) if data.strip() == "-": if select.select([ sys.stdin, ], [], [], 0.0)[0]: importMetricsFromFile(grok, sys.stdin, **vars(options)) else: parser.print_help() sys.exit(1) elif data: with open(data, "r") as fp: importMetricsFromFile(grok, fp, **vars(options))
def sendDataToDatadog(datadogApiKey, grokServer, grokApiKey, numRecords, metricId): """Get data from Grok and send to Datadog. This gets metric data for the metric matching metricId and converts it into two datasets in the Datadog format: one for the values and one for the anomaly scores. """ # Configure the Datadog library dog_http_api.api_key = datadogApiKey grok = GrokSession(server=grokServer, apikey=grokApiKey) server, metricName = _getMetricServerAndName(grok, metricId) valuesData, anomaliesData = _getMetricData(grok, metricId, numRecords) # Hack to limit number of records for Grok instances prior to version 1.3 # that don't respect the limit parameter when getting metric data. valuesData = valuesData[-numRecords:] anomaliesData = anomaliesData[-numRecords:] print "Sending %i records for metric %s on server %s" % ( len(valuesData), metricName, server) response = dog_http_api.metric(metricName + ".value", valuesData, host=server) if response["status"] != "ok": print "Datadog upload failed with response:\n\n%r" % response response = dog_http_api.metric(metricName + ".anomalyScore", anomaliesData, host=server) if response["status"] != "ok": print "Datadog upload failed with response:\n\n%r" % response
def handle(options, args): """ `grok GET` handler. """ (endpoint, apikey) = grokcli.getCommonArgs(parser, args) server = "%(scheme)s://%(netloc)s" % urlparse(endpoint)._asdict() grok = GrokSession(server=server, apikey=apikey) response = grok.get(endpoint) if isinstance(response, Response): if hasattr(options, "useYaml"): if options.useYaml: print yaml.safe_dump(yaml.load(response.text), default_flow_style=False) else: print response.text sys.exit(not int(bool(response)))
def setupGrokAWSCredentials(publicDnsName, config): """ Using the Grok CLI, connect to Grok to obtain the API Key for the instance. :param publicDnsName: A reachable DNS entry for the Grok server that needs to be configured :param config: A dict containing values for `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` :raises: infrastructure.utilities.exceptions.GrokConfigError if it is unable to obtain the API Key :returns: The API Key of the Grok server """ credentials = { "aws_access_key_id": config["AWS_ACCESS_KEY_ID"], "aws_secret_access_key": config["AWS_SECRET_ACCESS_KEY"] } server = "https://%s" % publicDnsName grok = GrokSession(server=server) grok.apikey = grok.verifyCredentials(**credentials) if grok.apikey: grok.updateSettings(settings=credentials, section="aws") return grok.apikey else: raise GrokConfigError("Unable to obtain Grok API Key")
def handle(options, args): """ `grok credentials` handler. Extracts credentials from command-line interface, updates Grok server using web API. """ try: server = args.pop(0) except IndexError: parser.print_help(sys.stderr) sys.exit(1) if not options.acceptEULA: print >> sys.stderr, ( "Please read and accept the product End User License Agreement " "(EULA) before proceeding.\n" "The EULA can be found here: " "https://aws.amazon.com/marketplace/agreement?asin=B00I18SNQ6\n\n" "To accept the EULA, re-run this command with the " "--accept-eula option.") sys.exit(1) credentials = { "aws_access_key_id": options.AWS_ACCESS_KEY_ID, "aws_secret_access_key": options.AWS_SECRET_ACCESS_KEY } if options.data: if options.data.strip() == "-": updateCredentialsFromFile(sys.stdin, credentials) elif options.data: with open(options.data, "r") as fp: updateCredentialsFromFile(fp, credentials) elif options.use_boto: updateCredentialsFromBoto(credentials) if not (credentials["aws_access_key_id"] and credentials["aws_secret_access_key"]): parser.print_help(sys.stderr) sys.exit(1) usertrack = { "optin": "false" if options.optOutOfDataCollection else "true" } grok = GrokSession(server=server) grok.apikey = grok.verifyCredentials(**credentials) grok.updateSettings(settings=credentials, section="aws") grok.updateSettings(settings=usertrack, section="usertrack") print grok.apikey
def handle(options, args): """ `grok metrics` handler. """ try: action = args.pop(0) except IndexError: printHelpAndExit() (server, apikey) = grokcli.getCommonArgs(parser, args) grok = GrokSession(server=server, apikey=apikey) if action == "list": handleListRequest(grok, options.format, region=options.region, namespace=options.namespace, instance=options.instance) elif action == "unmonitor": if not options.id: printHelpAndExit() handleUnmonitorRequest(grok, options.id) else: printHelpAndExit()
of files open by all processes). """ import datetime import subprocess import time from grokcli.api import GrokSession try: from sample_credentials import (GROK_API_KEY, GROK_SERVER, METRIC_NAME) except (SyntaxError, ImportError): print( "\nERROR: You must update Grok credentials in sample_credentials.py " "before you can continue.\n") import sys sys.exit(1) if __name__ == "__main__": # Grok client grok = GrokSession(server=GROK_SERVER, apikey=GROK_API_KEY) # Add custom metric data with grok.connect() as sock: print 'Collecting "Open file descriptors" sample...', count = subprocess.check_output("/usr/sbin/lsof | /usr/bin/wc -l", shell=True).strip() print count print 'Sending sample to Grok Metric named "%s"' % METRIC_NAME ts = time.mktime(datetime.datetime.utcnow().timetuple()) sock.sendall("%s %s %d\n" % (METRIC_NAME, count, ts)) print "Done!"
from grokcli.api import GrokSession try: from sample_credentials import (GROK_API_KEY, GROK_SERVER, METRIC_NAME) except (SyntaxError, ImportError): print ("\nERROR: You must update Grok credentials in sample_credentials.py " "before you can continue.\n") import sys sys.exit(1) if __name__ == "__main__": # Grok client grok = GrokSession(server=GROK_SERVER, apikey=GROK_API_KEY) # Check metric created for metric in grok.listMetrics("custom"): if metric["name"] == METRIC_NAME: uid = metric["uid"] print 'Metric "%s" has uid: %s' % (METRIC_NAME, uid) break else: print ('"%s" metric does not exist (yet). You can create the metric by' ' sending data to Grok. See "sample_collect_data.py" for a' " simple script that you can use to periodically sample open" " file descriptors, and report the results to the Grok Custom" " Metrics endpoint" % METRIC_NAME) # Send model creation request to create a model connected to the metric
dest="scale", default=1) opt, arg = parser.parse_args(sys.argv[1:]) path = os.path.dirname(os.path.abspath(__file__)) if ((opt.server == "" or opt.key == "") and not opt.noserver): print("burnrate_collect_data.py -s <server> -k <key>") sys.exit(2) if opt.inputfile != "" and not opt.noserver: if opt.verbose: print "Sending existing data to grok..." with open(opt.inputfile, "rb") as inputFile: grok = GrokSession(server=opt.server, apikey=opt.key) with grok.connect() as sock: csvreader = csv.reader(inputFile) for row in csvreader: metricName = row[0] data = float(row[1]) ts = row[2] sock.sendall("%s %s %s\n" % (metricName, (data * int(opt.scale)), int(float(ts)))) else: if not os.path.isfile(opt.outputfile): open(opt.outputfile, "w").close() if not opt.noserver: sendMetricsToGrok(opt)
dest="scale", default=1) opt, arg = parser.parse_args(sys.argv[1:]) path = os.path.dirname(os.path.abspath(__file__)) if ((opt.server=="" or opt.key=="") and not opt.noserver): print ("burnrate_collect_data.py -s <server> -k <key>") sys.exit(2) if opt.inputfile != "" and not opt.noserver: if opt.verbose: print "Sending existing data to HTM-IT..." with open(opt.inputfile, "rb") as inputFile: session = GrokSession(server=opt.server, apikey=opt.key) with session.connect() as sock: csvreader = csv.reader(inputFile) for row in csvreader: metricName = row[0] data = float(row[1]) ts = row[2] sock.sendall("%s %s %s\n" % (metricName, (data*int(opt.scale)), int(float(ts)))) else: if not os.path.isfile(opt.outputfile): open(opt.outputfile, "w").close() if not opt.noserver: sendMetricsToHTMIT(opt) else:
import sys import time from grokcli.api import GrokSession try: from sample_credentials import (GROK_API_KEY, GROK_SERVER, METRIC_NAME) except (SyntaxError, ImportError): print( "\nERROR: You must update Grok credentials in sample_credentials.py " "before you can continue.\n") import sys sys.exit(1) if __name__ == "__main__": # Grok client grok = GrokSession(server=GROK_SERVER, apikey=GROK_API_KEY) # Check metric created for metric in grok.listMetrics("custom"): if metric["name"] == METRIC_NAME: uid = metric["uid"] print 'Metric "%s" has uid: %s' % (METRIC_NAME, uid) break else: print( '"%s" metric does not exist (yet). You can create the metric by' ' sending data to Grok. See "sample_collect_data.py" for a' " simple script that you can use to periodically sample open" " file descriptors, and report the results to the Grok Custom" " Metrics endpoint" % METRIC_NAME)
dest="scale", default=1) opt, arg = parser.parse_args(sys.argv[1:]) path = os.path.dirname(os.path.abspath(__file__)) if ((opt.server=="" or opt.key=="") and not opt.noserver): print ("burnrate_collect_data.py -s <server> -k <key>") sys.exit(2) if opt.inputfile != "" and not opt.noserver: if opt.verbose: print "Sending existing data to grok..." with open(opt.inputfile, "rb") as inputFile: grok = GrokSession(server=opt.server, apikey=opt.key) with grok.connect() as sock: csvreader = csv.reader(inputFile) for row in csvreader: metricName = row[0] data = float(row[1]) ts = row[2] sock.sendall("%s %s %s\n" % (metricName, (data*int(opt.scale)), int(float(ts)))) else: if not os.path.isfile(opt.outputfile): open(opt.outputfile, "w").close() if not opt.noserver: sendMetricsToGrok(opt) else:
def sendMetricsToGrok(opt): """Collects data for burnrate metrics, writes it to a csv file and sends it to Grok. Collects the following metrics (toggled with CL flags): - Total hourly burnrate - Regional hourly burnrate (use -b) - Total number running instances - Regional number running instances (use -r) - Total number stopped instances - Regional number stopped instances (use -p) - Total number all instances - Regional number all instances (use -t) """ grok = GrokSession(server=opt.server, apikey=opt.key) regionalData = getDataByRegions() ts = time.mktime(datetime.datetime.utcnow().timetuple()) with open(opt.outputfile, "ab") as csvfile: csvwriter = csv.writer(csvfile) with grok.connect() as sock: # Regional burn rate calculation/send if opt.regionalBurnrates: if opt.verbose: print "Calculating / sending regional hourly burn rates" for region in regionalData.items(): sock.sendall("%s %s %d\n" % (opt.prefix + "." + region[0] + ".burnrate", (region[1]["burnrate"]*int(opt.scale)), ts)) csvwriter.writerow([(opt.prefix + "." + region[0] + ".burnrate"), str(region[1]["burnrate"]), str(ts)]) # Total burn rate calculation/send if opt.verbose: print "Calculating / sending total hourly burn rate" burnrate = sum(region[1]["burnrate"] for region in regionalData.items()) sock.sendall("%s %s %d\n" % (opt.prefix + ".total.burnrate", (burnrate*int(opt.scale)), ts)) csvwriter.writerow([opt.prefix + ".total.burnrate", str(burnrate), str(ts)]) # Regional running instances calculate/send if opt.regionalRunning: if opt.verbose: print "Calculating / sending regional running instances" for region in regionalData.items(): sock.sendall("%s %s %d\n" % (opt.prefix + "." + region[0] + ".runningInstances", (region[1]["numberRunningInstances"] *int(opt.scale)), ts)) csvwriter.writerow([(opt.prefix + "." + region[0] + ".runningInstances"), str(region[1]["numberRunningInstances"]), str(ts)]) # Total running instances calculate/send if opt.verbose: print "Calculating / sending total running instances" numRunning = sum(region[1]["numberRunningInstances"] for region in regionalData.items()) sock.sendall("%s %s %d\n" % (opt.prefix + ".total.runningInstances", (numRunning*int(opt.scale)), ts)) csvwriter.writerow([opt.prefix + ".total.runningInstances", str(numRunning), str(ts)]) # Regional stopped instances calculate/send if opt.regionalStopped: if opt.verbose: print "Calculating / sending regional stopped instances" for region in regionalData.items(): sock.sendall("%s %s %d\n" % (opt.prefix + "." + region[0] + ".stoppedInstances", (region[1]["numberStoppedInstances"] *int(opt.scale)), ts)) csvwriter.writerow([(opt.prefix + "." + region[0] + ".stoppedInstances"), str(region[1]["numberStoppedInstances"]), str(ts)]) # Total stopped instances calculate/send if opt.verbose: print "Calculating / sending total stopped instances" numStopped = sum(region[1]["numberStoppedInstances"] for region in regionalData.items()) sock.sendall("%s %s %d\n" % (opt.prefix + ".total.stoppedInstances", (numStopped*int(opt.scale)), ts)) csvwriter.writerow([opt.prefix + ".total.stoppedInstances", str(numStopped), str(ts)]) # Regional all instances calculate/send if opt.regionalAll: if opt.verbose: print "Calculating / sending regional all instances" for region in regionalData.items(): sock.sendall("%s %s %d\n" % (opt.prefix + "." + region[0] + ".allInstances", (region[1]["numberAllInstances"] *int(opt.scale)), ts)) csvwriter.writerow([(opt.prefix + "." + region[0] + ".AllInstances"), str(region[1]["numberAllInstances"]), str(ts)]) # Total all instances calculate/send if opt.verbose: print "Calculating / sending total all instances" numAll = sum(region[1]["numberAllInstances"] for region in regionalData.items()) sock.sendall("%s %s %d\n" % (opt.prefix + ".total.allInstances", (numAll*int(opt.scale)), ts)) csvwriter.writerow([opt.prefix + ".total.allInstances", str(numAll), str(ts)]) if opt.verbose: print "Done!"
def handle(options, args): """ `grok cloudwatch` handler. """ try: resource = args.pop(0) action = args.pop(0) except IndexError: printHelpAndExit() (server, apikey) = grokcli.getCommonArgs(parser, args) grok = GrokSession(server=server, apikey=apikey) if resource == "metrics": if action == "monitor": nativeMetric = { "datasource": "cloudwatch", "metric": options.metric, "namespace": options.namespace, "region": options.region } if hasattr(dimensions_callback, "dimensions"): nativeMetric["dimensions"] = dimensions_callback.dimensions else: printHelpAndExit() handleMetricsMonitorRequest(grok, nativeMetric) elif action == "unmonitor": if not (options.region and options.namespace and options.instance and options.metric): printHelpAndExit() handleMetricsUnmonitorRequest(grok, options.region, options.namespace, options.instance, options.metric) elif action == "list": handleMetricsListRequest(grok, options.format, region=options.region, namespace=options.namespace, metricName=options.metric, instance=options.instance) else: printHelpAndExit() elif resource == "instances": if action == "monitor": if not (options.region and options.namespace and options.instance): printHelpAndExit() handleInstanceMonitorRequest(grok, options.region, options.namespace, options.instance) elif action == "unmonitor": if not (options.region and options.namespace and options.instance): printHelpAndExit() handleInstanceUnmonitorRequest(grok, options.region, options.namespace, options.instance) elif action == "list": print "Not yet implemented" else: printHelpAndExit() else: printHelpAndExit()
dest="scale", default=1) opt, arg = parser.parse_args(sys.argv[1:]) path = os.path.dirname(os.path.abspath(__file__)) if ((opt.server == "" or opt.key == "") and not opt.noserver): print("burnrate_collect_data.py -s <server> -k <key>") sys.exit(2) if opt.inputfile != "" and not opt.noserver: if opt.verbose: print "Sending existing data to HTM-IT..." with open(opt.inputfile, "rb") as inputFile: session = GrokSession(server=opt.server, apikey=opt.key) with session.connect() as sock: csvreader = csv.reader(inputFile) for row in csvreader: metricName = row[0] data = float(row[1]) ts = row[2] sock.sendall("%s %s %s\n" % (metricName, (data * int(opt.scale)), int(float(ts)))) else: if not os.path.isfile(opt.outputfile): open(opt.outputfile, "w").close() if not opt.noserver: sendMetricsToHTMIT(opt)
import time from grokcli.api import GrokSession try: from sample_credentials import GROK_API_KEY, GROK_SERVER, METRIC_NAME except (SyntaxError, ImportError): print ("\nERROR: You must update Grok credentials in sample_credentials.py " "before you can continue.\n") import sys sys.exit(1) if __name__ == "__main__": # Grok client grok = GrokSession(server=GROK_SERVER, apikey=GROK_API_KEY) # Check metric created for metric in grok.listMetrics("custom"): if metric["name"] == METRIC_NAME: uid = metric["uid"] print 'Metric "%s" has uid: %s' % (METRIC_NAME, uid) break else: print ( '"%s" metric does not exist (yet). You can create the metric by' ' sending data to Grok. See "sample-collect-data.py" for a' " simple script that you can use to periodically sample open" " file descriptors, and report the results to the Grok Custom" " Metrics endpoint" % METRIC_NAME )
def handle(options, args): """ `grok autostacks` handler. """ try: resource = args.pop(0) action = args.pop(0) except IndexError: printHelpAndExit() (server, apikey) = grokcli.getCommonArgs(parser, args) grok = GrokSession(server=server, apikey=apikey) if resource == "stacks": if action == "list": handleListRequest(grok, options.format) elif action == "create": if not (options.region and options.filters): printHelpAndExit() filters = json.loads(options.filters) if options.preview: handlePreviewRequest(grok, options.format, options.region, filters) else: if not options.name: printHelpAndExit() handleCreateRequest(grok, options.name, options.region, filters) elif action == "delete": if not (options.id or (options.name and options.region)): printHelpAndExit() handleDeleteRequest(grok, options.id, options.name, options.region) else: printHelpAndExit() elif resource == "metrics": if not (options.id or (options.name and options.region)): printHelpAndExit() if action == "list": handleMetricsListRequest(grok, options.id, options.name, options.region, options.format) elif action == "add": if not (options.metricNamespace and options.metricName): printHelpAndExit() handleMetricsAddRequest(grok, options.id, options.name, options.region, options.metricNamespace, options.metricName) elif action == "remove": if not options.metricID: printHelpAndExit() handleMetricsRemoveRequest(grok, options.id, options.name, options.region, options.metricID) elif resource == "instances": if not (options.id or (options.name and options.region)): printHelpAndExit() if action == "list": handleInstancesListRequest(grok, options.id, options.name, options.region, options.format) else: printHelpAndExit()
def sendMetricsToHTMIT(opt): """Collects data for burnrate metrics, writes it to a csv file and sends it to HTMIT Collects the following metrics (toggled with CL flags): - Total hourly burnrate - Regional hourly burnrate (use -b) - Total number running instances - Regional number running instances (use -r) - Total number stopped instances - Regional number stopped instances (use -p) - Total number all instances - Regional number all instances (use -t) """ session = GrokSession(server=opt.server, apikey=opt.key) regionalData = getDataByRegions() ts = time.mktime(datetime.datetime.utcnow().timetuple()) with open(opt.outputfile, "ab") as csvfile: csvwriter = csv.writer(csvfile) with session.connect() as sock: # Regional burn rate calculation/send if opt.regionalBurnrates: if opt.verbose: print "Calculating / sending regional hourly burn rates" for region in regionalData.items(): sock.sendall( "%s %s %d\n" % (opt.prefix + "." + region[0] + ".burnrate", (region[1]["burnrate"] * int(opt.scale)), ts)) csvwriter.writerow([ (opt.prefix + "." + region[0] + ".burnrate"), str(region[1]["burnrate"]), str(ts) ]) # Total burn rate calculation/send if opt.verbose: print "Calculating / sending total hourly burn rate" burnrate = sum(region[1]["burnrate"] for region in regionalData.items()) sock.sendall("%s %s %d\n" % (opt.prefix + ".total.burnrate", (burnrate * int(opt.scale)), ts)) csvwriter.writerow( [opt.prefix + ".total.burnrate", str(burnrate), str(ts)]) # Regional running instances calculate/send if opt.regionalRunning: if opt.verbose: print "Calculating / sending regional running instances" for region in regionalData.items(): sock.sendall( "%s %s %d\n" % (opt.prefix + "." + region[0] + ".runningInstances", (region[1]["numberRunningInstances"] * int(opt.scale)), ts)) csvwriter.writerow([ (opt.prefix + "." + region[0] + ".runningInstances"), str(region[1]["numberRunningInstances"]), str(ts) ]) # Total running instances calculate/send if opt.verbose: print "Calculating / sending total running instances" numRunning = sum(region[1]["numberRunningInstances"] for region in regionalData.items()) sock.sendall("%s %s %d\n" % (opt.prefix + ".total.runningInstances", (numRunning * int(opt.scale)), ts)) csvwriter.writerow([ opt.prefix + ".total.runningInstances", str(numRunning), str(ts) ]) # Regional stopped instances calculate/send if opt.regionalStopped: if opt.verbose: print "Calculating / sending regional stopped instances" for region in regionalData.items(): sock.sendall( "%s %s %d\n" % (opt.prefix + "." + region[0] + ".stoppedInstances", (region[1]["numberStoppedInstances"] * int(opt.scale)), ts)) csvwriter.writerow([ (opt.prefix + "." + region[0] + ".stoppedInstances"), str(region[1]["numberStoppedInstances"]), str(ts) ]) # Total stopped instances calculate/send if opt.verbose: print "Calculating / sending total stopped instances" numStopped = sum(region[1]["numberStoppedInstances"] for region in regionalData.items()) sock.sendall("%s %s %d\n" % (opt.prefix + ".total.stoppedInstances", (numStopped * int(opt.scale)), ts)) csvwriter.writerow([ opt.prefix + ".total.stoppedInstances", str(numStopped), str(ts) ]) # Regional all instances calculate/send if opt.regionalAll: if opt.verbose: print "Calculating / sending regional all instances" for region in regionalData.items(): sock.sendall( "%s %s %d\n" % (opt.prefix + "." + region[0] + ".allInstances", (region[1]["numberAllInstances"] * int(opt.scale)), ts)) csvwriter.writerow([ (opt.prefix + "." + region[0] + ".AllInstances"), str(region[1]["numberAllInstances"]), str(ts) ]) # Total all instances calculate/send if opt.verbose: print "Calculating / sending total all instances" numAll = sum(region[1]["numberAllInstances"] for region in regionalData.items()) sock.sendall("%s %s %d\n" % (opt.prefix + ".total.allInstances", (numAll * int(opt.scale)), ts)) csvwriter.writerow( [opt.prefix + ".total.allInstances", str(numAll), str(ts)]) if opt.verbose: print "Done!"