def _validateAutostackResults(self, metricID, instanceIDs, metricName): """Validate that AutoStack data is correct based on the metrics data. This method ensures that there is at least twelve records and validates that the most recent five are computed correctly. By skipping at least the first few, we ensure that the data is still available in Cloudwatch. The number five is arbitrary, we just need some records to validate. @param metricID the ID of the AutoStack metric to check @param instanceIDs a sequence of the IDs for the metrics that make up the AutoStack @param metricName the metric name to validate """ # Get the AutoStack data response = requests.get("https://localhost/_models/%s/data" % metricID, auth=(self.apiKey, ""), verify=False) self.assertSetEqual(set(response.json().keys()), set(["names", "data"])) names = response.json()["names"] self.assertSetEqual( set(["timestamp", "value", "anomaly_score", "rowid"]), set(names)) data = response.json()["data"] # Make sure that we have enough data to validate self.assertGreaterEqual(len(data), 12) recordsToValidate = dict((r[0], r[1]) for r in data[:12]) # Get the start and end dates to pull Cloudwatch data for start = datetime.datetime.strptime(min(recordsToValidate.keys()), "%Y-%m-%d %H:%M:%S") end = datetime.datetime.strptime(max(recordsToValidate.keys()), "%Y-%m-%d %H:%M:%S") # Collect the Cloudwatch data for the timestamp range for all instances dataByTimestamp = collections.defaultdict(list) conn = boto.ec2.cloudwatch.connect_to_region( _REGION, aws_access_key_id=config.get("aws", "aws_access_key_id"), aws_secret_access_key=config.get("aws", "aws_secret_access_key")) for instanceID in instanceIDs: data = conn.get_metric_statistics( period=_PERIOD, start_time=start, end_time=end, metric_name=metricName, namespace="AWS/EC2", statistics=("Average",), dimensions={"InstanceId": instanceID}) for record in data: dataByTimestamp[record["Timestamp"]].append(record["Average"]) # Check that the manually averaged values match the AutoStack value numRecordsValidated = 0 for timestamp, records in dataByTimestamp.iteritems(): expectedAverage = sum(records) / len(records) actualAverage = recordsToValidate[timestamp.strftime("%Y-%m-%d %H:%M:%S")] self.assertAlmostEqual( expectedAverage, actualAverage, 4, "AutoStack value of %f differs from average from CloudWatch of %f " "at time %s" % (actualAverage, expectedAverage, timestamp)) if len(records) >= 2: numRecordsValidated += 1 # Make sure we checked enough records that had multiple instances self.assertGreaterEqual(numRecordsValidated, 5)
def getBaseConnectionArgsDict(): """Return a dictonary of common database connection arguments.""" return { "host": config.get("repository", "host"), "port": config.getint("repository", "port"), "user": config.get("repository", "user"), "passwd": config.get("repository", "passwd"), "charset": "utf8", "use_unicode": True, }
def getRDSInstances(region): """Simple generator for getting RDS instances. :param region: the region to get instances for :returns: a generator of :class:`boto.rds.dbinstance.DBInstance` instances """ awsAccessKeyId = config.get("aws", "aws_access_key_id") awsSecretAccessKey = config.get("aws", "aws_secret_access_key") conn = boto.rds.connect_to_region(region_name=region, aws_access_key_id=awsAccessKeyId, aws_secret_access_key=awsSecretAccessKey) for instance in conn.get_all_dbinstances(): yield instance
def getELBInstances(region): """Simple generator for getting ELB instances. :param region: the region to get instances for :returns: a generator of :class:`boto.ec2.elb.load_balancer.LoadBalancer` instances """ awsAccessKeyId = config.get("aws", "aws_access_key_id") awsSecretAccessKey = config.get("aws", "aws_secret_access_key") conn = boto.ec2.elb.connect_to_region( region_name=region, aws_access_key_id=awsAccessKeyId, aws_secret_access_key=awsSecretAccessKey) for loadBalancer in conn.get_all_load_balancers(): yield loadBalancer
def reset(offline=False): """ Reset the htm-it database; upon successful completion, the necessary schema are created, but the tables are not populated :param offline: False to execute SQL commands; True to just dump SQL commands to stdout for offline mode or debugging """ # Make sure we have the latest version of configuration config.loadConfig() dbName = config.get('repository', 'db') resetDatabaseSQL = ("DROP DATABASE IF EXISTS %(database)s; " "CREATE DATABASE %(database)s;" % { "database": dbName }) statements = resetDatabaseSQL.split(";") engine = getUnaffiliatedEngine() with engine.connect() as connection: for s in statements: if s.strip(): connection.execute(s) migrate(offline=offline)
def getEC2Instances(region, filters=None): """Simple generator for getting EC2 instances. :param region: the region to get instances for :param filters: Dictionary of filters See http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html :returns: a generator of :class:`boto.ec2.instance.Instance` instances """ awsAccessKeyId = config.get("aws", "aws_access_key_id") awsSecretAccessKey = config.get("aws", "aws_secret_access_key") conn = boto.ec2.connect_to_region(region_name=region, aws_access_key_id=awsAccessKeyId, aws_secret_access_key=awsSecretAccessKey) for reservation in conn.get_all_reservations(filters=filters): for instance in reservation.instances: yield instance
def sendWelcomeEmail(toAddress): subject = config.get("registration", "subject") body = open(resource_filename(htm.it.__name__, os.path.join("../conf", config.get("registration", "body")))).read() body = body.replace("\n", "\r\n") # Ensure windows newlines serverUrl = web.ctx.env['HTTP_HOST'] templated = dict(apiKey=config.get("security", "apikey"), serverUrl=serverUrl) try: ses_utils.sendEmail(subject=subject.format(**templated), body=body.format(**templated), toAddresses=[toAddress]) except BotoServerError: raise web.badrequest("Invalid email address.")
def POST(self): url = config.get("usertrack", "wufoo_url") user = config.get("usertrack", "wufoo_user") fields = { 'name': 'Field6', 'company': 'Field3', 'email': 'Field4', 'edition': 'Field8', 'version': 'Field10', 'build': 'Field12', 'accountId': 'Field14', 'uniqueServerId': 'Field16', 'instanceId': 'Field18', 'region': 'Field19', 'instanceType': 'Field21' } payload = {} instanceData = instance_utils.getInstanceData() or {} for datum in instanceData: if datum in fields: payload[fields[datum]] = instanceData[datum] data = json.loads(web.data()) if 'email' in data and len(data['email']): sendWelcomeEmail(data['email']) for datum in data: payload[fields[datum]] = data[datum] payload[fields["uniqueServerId"]] = config.get("usertrack", "htm_it_id") if config.getboolean("usertrack", "send_to_wufoo"): requests.post(url=url, data=payload, auth=(user, '')) for (fieldName, field) in fields.iteritems(): log.info("{TAG:WUFOO.CUST.REG} %s=%s" % (fieldName, payload.get(field))) return web.HTTPError(status="204", data="No Content")
def sendWelcomeEmail(toAddress): subject = config.get("registration", "subject") body = open( resource_filename( htm.it.__name__, os.path.join("../conf", config.get("registration", "body")))).read() body = body.replace("\n", "\r\n") # Ensure windows newlines serverUrl = web.ctx.env['HTTP_HOST'] templated = dict(apiKey=config.get("security", "apikey"), serverUrl=serverUrl) try: ses_utils.sendEmail(subject=subject.format(**templated), body=body.format(**templated), toAddresses=[toAddress]) except BotoServerError: raise web.badrequest("Invalid email address.")
def setUpClass(cls): cls.autostacksApp = TestApp(autostacks_api.app.wsgifunc()) cls.cloudwatchApp = TestApp(cloudwatch_api.app.wsgifunc()) cls.customApp = TestApp(custom_api.app.wsgifunc()) cls.instancesApp = TestApp(instances_api.app.wsgifunc()) cls.modelApp = TestApp(models_api.app.wsgifunc()) cls.headers = getDefaultHTTPHeaders(htm.it.app.config) cls.plaintextPort = config.getint("metric_listener", "plaintext_port") cls.apiKey = config.get("security", "apikey")
def getAutoScalingGroups(region, filters=None): """Simple generator for getting AutoScaling groups. :param region: the region to get groups for :param filters: Dictionary of filters See http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html :returns: a generator of :class:`boto.ec2.autoscale.group.AutoScalingGroup` instances """ awsAccessKeyId = config.get("aws", "aws_access_key_id") awsSecretAccessKey = config.get("aws", "aws_secret_access_key") conn = boto.ec2.autoscale.connect_to_region( region_name=region, aws_access_key_id=awsAccessKeyId, aws_secret_access_key=awsSecretAccessKey) names = None if filters: names = filters.get("tag:Name", None) for group in conn.get_all_groups(names=names): yield group
def run(logDir, force=False): """Upload logs from the specified directory if the customer is opted in.""" # Ensure that logs are uploaded only for customers opted in. optedIn = config.get("usertrack", "optin") == "true" if not force and not optedIn: _LOGGER.info("Customer is not opted into log uploading, exiting.") sys.exit(0) conn = boto.connect_s3(_AWS_ACCESS_KEY, _AWS_SECRET_KEY) bucket = conn.get_bucket(_BUCKET, validate=False) _uploadLogs(bucket, logDir, _ROTATION_FORMAT)
def sendEmail(subject, body, toAddresses, region=None, sender=None): """Send an email with AWS SES. :param subject: Email subject header :param body: Email body :param toAddresses: Email recipient(s) :param region: AWS Region :param sender: Email sender :returns: SES Message ID or None """ if region is None: region = config.get("aws", "default_region") if sender is None: sender = config.get("notifications", "sender") if region not in _SES_ENDPOINTS: raise ValueError("Region '%s' provided does not exist in known SES region " "endpoints set." % region) regionInfo = RegionInfo(None, region, _SES_ENDPOINTS[region]) awsAccessKeyId = config.get("notifications", "aws_access_key_id") awsSecretAccessKey = config.get("notifications", "aws_secret_access_key") if awsAccessKeyId == "" or awsSecretAccessKey == "": awsAccessKeyId = config.get("aws", "aws_access_key_id") awsSecretAccessKey = config.get("aws", "aws_secret_access_key") conn = boto.ses.SESConnection(region=regionInfo, aws_access_key_id=awsAccessKeyId, aws_secret_access_key=awsSecretAccessKey) # Send the email result = conn.send_email(source=sender, subject=subject, body=body, to_addresses=toAddresses) # Return the SES message ID, or None if there is no message ID in the response if "SendEmailResponse" in result: if "SendEmailResult" in result["SendEmailResponse"]: if "MessageId" in result["SendEmailResponse"]["SendEmailResult"]: return result["SendEmailResponse"]["SendEmailResult"]["MessageId"] return None
def reset(offline=False): """ Reset the htm-it database; upon successful completion, the necessary schema are created, but the tables are not populated :param offline: False to execute SQL commands; True to just dump SQL commands to stdout for offline mode or debugging """ # Make sure we have the latest version of configuration config.loadConfig() dbName = config.get('repository', 'db') resetDatabaseSQL = ( "DROP DATABASE IF EXISTS %(database)s; " "CREATE DATABASE %(database)s;" % {"database": dbName}) statements = resetDatabaseSQL.split(";") engine = getUnaffiliatedEngine() with engine.connect() as connection: for s in statements: if s.strip(): connection.execute(s) migrate(offline=offline)
model = htmIt.createModel(modelSpec) print "done" if __name__ == "__main__": parser = optparse.OptionParser() parser.add_option("--server", default="https://localhost", help="Server address of HTM-IT instance.") parser.add_option("--apiKey", default=None, help="API key for the HTM-IT instance.") parser.add_option("--metricName", default=None, help="Name to give the new metric.") parser.add_option("--resource", default=None, help="Name to give the new metric.") parser.add_option("-n", "--numRecords", default=DEFAULT_RECORDS, type="int", help="The number of records to send.") options, _ = parser.parse_args() apiKey = options.apiKey or config.get("security", "apikey") metricName = options.metricName or "test.%f" % time.time() run(options.server, apiKey, metricName, options.resource, options.numRecords)
from htm.it.app import config, repository from htmengine import utils from htm.it.app.repository import schema from htm.it.app.aws import s3_utils from htm.it import htm_it_logging from nta.utils.file_lock import ExclusiveFileLock # Path format for writing Android logs. _LOG_FORMAT_ANDROID = os.path.join(os.path.dirname(htm.it.__file__), "..", "logs", "android.log") _LOGGER = htm_it_logging.getExtendedLogger(__name__) _AWS_ACCESS_KEY = config.get("aws", "aws_access_key_id") _AWS_SECRET_KEY = config.get("aws", "aws_secret_access_key") _BUCKET = "htm.it.logs" _MACHINE_ID = uuid.getnode() _KEY_PREFIX = "metric_dumps/%s-" % _MACHINE_ID _UPLOAD_ATTEMPTS = 3 urls = ( # /_logging/android "/android", "AndroidHandler", # /_logging/feedback "/feedback", "FeedbackHandler", )
def getDatabaseNameFromConfig(cls): return config.get(cls.REPO_SECTION_NAME, cls.REPO_DATABASE_ATTR_NAME)
import htm.it import htm.it.app from htm.it.app import config, repository from htmengine import utils from htm.it.app.repository import schema from htm.it.app.aws import s3_utils from htm.it import htm_it_logging from nta.utils.file_lock import ExclusiveFileLock # Path format for writing Android logs. _LOG_FORMAT_ANDROID = os.path.join(os.path.dirname(htm.it.__file__), "..", "logs", "android.log") _LOGGER = htm_it_logging.getExtendedLogger(__name__) _AWS_ACCESS_KEY = config.get("aws", "aws_access_key_id") _AWS_SECRET_KEY = config.get("aws", "aws_secret_access_key") _BUCKET = "htm.it.logs" _MACHINE_ID = uuid.getnode() _KEY_PREFIX = "metric_dumps/%s-" % _MACHINE_ID _UPLOAD_ATTEMPTS = 3 urls = ( # /_logging/android "/android", "AndroidHandler", # /_logging/feedback "/feedback", "FeedbackHandler", )
def GET(self, region=None): # pylint: disable=R0201 """ Get quick selection instance suggestions to monitor :: GET /_instances/suggestions Sample Output: :: { "suggested": [ { "region": "us-west-2", "namespace": "AWS/EC2", "id": "i-12345678" }, ... (up to 8 total suggested) ... ], "alternates": [ { "region": "us-west-2", "namespace": "AWS/ELB", "id": "grok-docs-elb" }, ... (up to 22 total alternatives) ... ] } """ if region is None: region = config.get("aws", "default_region") ec2Queue = Queue.Queue() ec2Thread = threading.Thread( target=ec2_utils.getSuggestedInstances, args=(region, ec2Queue, _AWS_INSTANCE_FETCHING_TIME_LIMIT)) ec2Thread.start() rdsQueue = Queue.Queue() rdsThread = threading.Thread( target=rds_utils.getSuggestedInstances, args=(region, rdsQueue, _AWS_INSTANCE_FETCHING_TIME_LIMIT)) rdsThread.start() elbQueue = Queue.Queue() elbThread = threading.Thread( target=elb_utils.getSuggestedInstances, args=(region, elbQueue, _AWS_INSTANCE_FETCHING_TIME_LIMIT)) elbThread.start() asgQueue = Queue.Queue() asgThread = threading.Thread( target=asg_utils.getSuggestedInstances, args=(region, asgQueue, _AWS_INSTANCE_FETCHING_TIME_LIMIT)) asgThread.start() response = { "suggested": [], "alternates": [], } # Wait for the threads to finish ec2Thread.join() rdsThread.join() elbThread.join() asgThread.join() n = 0 done = False while n < _MAX_SUGGESTED_INSTANCES_TOTAL and not done: done = True # EC2 Instances try: instance = ec2Queue.get(block=False) if n < _NUM_SUGGESTED_INSTANCES: response["suggested"].append(instance) else: response["alternates"].append(instance) done = False n += 1 if n >= _MAX_SUGGESTED_INSTANCES_TOTAL: break except Queue.Empty: pass # RDS Instances try: instance = rdsQueue.get(block=False) if n < _NUM_SUGGESTED_INSTANCES: response["suggested"].append(instance) else: response["alternates"].append(instance) done = False n += 1 if n >= _MAX_SUGGESTED_INSTANCES_TOTAL: break except Queue.Empty: pass # Load Balancers try: instance = elbQueue.get(block=False) if n < _NUM_SUGGESTED_INSTANCES: response["suggested"].append(instance) else: response["alternates"].append(instance) done = False n += 1 if n >= _MAX_SUGGESTED_INSTANCES_TOTAL: break except Queue.Empty: pass # AutoScaling groups try: instance = asgQueue.get(block=False) if n < _NUM_SUGGESTED_INSTANCES: response["suggested"].append(instance) else: response["alternates"].append(instance) done = False n += 1 if n >= _MAX_SUGGESTED_INSTANCES_TOTAL: break except Queue.Empty: pass return encodeJson(response)
print print "Creating model...", sys.stdout.flush() # Monitor the metric modelSpec = {"metric": metricName, "datasource": "custom"} if resource is not None: modelSpec["resource"] = resource model = htmIt.createModel(modelSpec) print "done" if __name__ == "__main__": parser = optparse.OptionParser() parser.add_option("--server", default="https://localhost", help="Server address of HTM-IT instance.") parser.add_option("--apiKey", default=None, help="API key for the HTM-IT instance.") parser.add_option("--metricName", default=None, help="Name to give the new metric.") parser.add_option("--resource", default=None, help="Name to give the new metric.") parser.add_option("-n", "--numRecords", default=DEFAULT_RECORDS, type="int", help="The number of records to send.") options, _ = parser.parse_args() apiKey = options.apiKey or config.get("security", "apikey") metricName = options.metricName or "test.%f" % time.time() run(options.server, apiKey, metricName, options.resource, options.numRecords)
def GET(self, region=None): # pylint: disable=R0201 """ Get quick selection instance suggestions to monitor :: GET /_instances/suggestions Sample Output: :: { "suggested": [ { "region": "us-west-2", "namespace": "AWS/EC2", "id": "i-12345678" }, ... (up to 8 total suggested) ... ], "alternates": [ { "region": "us-west-2", "namespace": "AWS/ELB", "id": "grok-docs-elb" }, ... (up to 22 total alternatives) ... ] } """ if region is None: region = config.get("aws", "default_region") ec2Queue = Queue.Queue() ec2Thread = threading.Thread(target=ec2_utils.getSuggestedInstances, args=(region, ec2Queue, _AWS_INSTANCE_FETCHING_TIME_LIMIT)) ec2Thread.start() rdsQueue = Queue.Queue() rdsThread = threading.Thread(target=rds_utils.getSuggestedInstances, args=(region, rdsQueue, _AWS_INSTANCE_FETCHING_TIME_LIMIT)) rdsThread.start() elbQueue = Queue.Queue() elbThread = threading.Thread(target=elb_utils.getSuggestedInstances, args=(region, elbQueue, _AWS_INSTANCE_FETCHING_TIME_LIMIT)) elbThread.start() asgQueue = Queue.Queue() asgThread = threading.Thread(target=asg_utils.getSuggestedInstances, args=(region, asgQueue, _AWS_INSTANCE_FETCHING_TIME_LIMIT)) asgThread.start() response = { "suggested": [], "alternates": [], } # Wait for the threads to finish ec2Thread.join() rdsThread.join() elbThread.join() asgThread.join() n = 0 done = False while n < _MAX_SUGGESTED_INSTANCES_TOTAL and not done: done = True # EC2 Instances try: instance = ec2Queue.get(block=False) if n < _NUM_SUGGESTED_INSTANCES: response["suggested"].append(instance) else: response["alternates"].append(instance) done = False n += 1 if n >= _MAX_SUGGESTED_INSTANCES_TOTAL: break except Queue.Empty: pass # RDS Instances try: instance = rdsQueue.get(block=False) if n < _NUM_SUGGESTED_INSTANCES: response["suggested"].append(instance) else: response["alternates"].append(instance) done = False n += 1 if n >= _MAX_SUGGESTED_INSTANCES_TOTAL: break except Queue.Empty: pass # Load Balancers try: instance = elbQueue.get(block=False) if n < _NUM_SUGGESTED_INSTANCES: response["suggested"].append(instance) else: response["alternates"].append(instance) done = False n += 1 if n >= _MAX_SUGGESTED_INSTANCES_TOTAL: break except Queue.Empty: pass # AutoScaling groups try: instance = asgQueue.get(block=False) if n < _NUM_SUGGESTED_INSTANCES: response["suggested"].append(instance) else: response["alternates"].append(instance) done = False n += 1 if n >= _MAX_SUGGESTED_INSTANCES_TOTAL: break except Queue.Empty: pass return encodeJson(response)
import json import logging import optparse import os import re import sys import time import boto from htm.it.app import config _LOGGER = logging.getLogger(__name__) _USAGE = "Usage: upload_logs.py [--force] path/to/log/dir" _AWS_ACCESS_KEY = config.get("aws", "aws_access_key_id") _AWS_SECRET_KEY = config.get("aws", "aws_secret_access_key") _BUCKET = "htm.it.logs" _KEY_PREFIX = "upload/%s-" % config.get("usertrack", "htm_it_id") _UPLOADED_DIR = "uploaded" _UPLOAD_ATTEMPTS = 3 _ROTATION_FORMAT = r".*-\d{8}-\d{6}" def run(logDir, force=False): """Upload logs from the specified directory if the customer is opted in.""" # Ensure that logs are uploaded only for customers opted in. optedIn = config.get("usertrack", "optin") == "true" if not force and not optedIn: _LOGGER.info("Customer is not opted into log uploading, exiting.")
def setUp(self): self.apiKey = config.get("security", "apikey")
import json import logging import optparse import os import re import sys import time import boto from htm.it.app import config _LOGGER = logging.getLogger(__name__) _USAGE = "Usage: upload_logs.py [--force] path/to/log/dir" _AWS_ACCESS_KEY = config.get("aws", "aws_access_key_id") _AWS_SECRET_KEY = config.get("aws", "aws_secret_access_key") _BUCKET = "htm.it.logs" _KEY_PREFIX = "upload/%s-" % config.get("usertrack", "htm_it_id") _UPLOADED_DIR = "uploaded" _UPLOAD_ATTEMPTS = 3 _ROTATION_FORMAT = r".*-\d{8}-\d{6}" def run(logDir, force=False): """Upload logs from the specified directory if the customer is opted in.""" # Ensure that logs are uploaded only for customers opted in. optedIn = config.get("usertrack", "optin") == "true" if not force and not optedIn: