def test2(self): nodes = [0, 1] edges = [(0, 1), (1, 0)] self.assertEquals(bfs(create_graph(nodes, edges), 0, 5), (None, None)) self.assertEquals(bfs(create_graph(nodes, edges), 0, 1), (1, 1)) nodes = [0, 1, 2, 3] edges = [(0, 1), (1, 0), (1, 3), (3, 2)] self.assertEquals(bfs(create_graph(nodes, edges), 0, 3), (3, 2))
def test2(self): nodes = [0,1] edges = [(0,1), (1,0)] self.assertEquals(bfs(create_graph(nodes, edges), 0, 5), (None, None)) self.assertEquals(bfs(create_graph(nodes, edges), 0, 1), (1, 1)) nodes = [0,1,2,3] edges = [(0,1), (1,0), (1,3), (3,2)] self.assertEquals(bfs(create_graph(nodes, edges), 0, 3), (3, 2))
def test1(self): nodes = [0,1,2,3,4,5,6] edges = [(0,1), (1,2), (0,2), (3,4), (3,2), (2,3), (2,1), (4,6), (6,5)] graph = create_graph(nodes, edges) self.assertEquals(bfs(graph, 0, 5), (5, 6)) self.assertEquals(bfs(graph, 0, 0), (0, 0)) self.assertEquals(bfs(graph, 0, 1), (1, 1)) self.assertEquals(bfs(graph, 0, 7), (None, None))
def test1(self): nodes = [0,1,2,3,4,5,6] edges = [(0,1), (1,2), (0,2), (3,4), (3,2), (2,3), (2,1), (4,6), (6,5)] graph = create_graph(nodes, edges) self.assertEquals(dfs(graph, 0, 5), 5) self.assertEquals(dfs(graph, 0, 0), 0) self.assertEquals(dfs(graph, 0, 1), 1) self.assertEquals(dfs(graph, 0, 7), None)
def test_sort7(self): nodes = [0,1] edges = [(0,1), (1,0)] graph = create_graph(nodes, edges) self.assertIn(topological_sort(graph), [[1,0], [0,1]])
def test_sort6(self): nodes = [0,1,2,3] edges = [(0,1), (0,2), (2,3)] graph = create_graph(nodes, edges) self.assertIn(topological_sort(graph), [[0, 1, 2, 3], [0, 2, 3, 1]])
def test_sort5(self): nodes = [0,1,2,3,4] edges = [(1,0), (4,3), (3, 2), (3, 1), (2, 1), (1, 0)] graph = create_graph(nodes, edges) self.assertIn(topological_sort(graph), [[4, 3, 2, 1, 0], [4, 3, 1, 0, 2]])
def test_sort4(self): nodes = [] edges = [] graph = create_graph(nodes, edges) self.assertEqual(topological_sort(graph), [])
def test_sort2(self): nodes = [0,1,2] edges = [(0,2), (2,0), (2,1)] graph = create_graph(nodes, edges) self.assertEqual(topological_sort(graph), [0,2,1])
def test_sort1(self): nodes = [0,1,2,3,4,5,6] edges = [(0,1), (1,2), (0,2), (3,4), (3,2), (2,3), (2,1), (4,6), (6,5)] graph = create_graph(nodes, edges) self.assertEqual(topological_sort(graph), [0,1,2,3,4,6,5])
def main(text): regionList = ['us-east-1', 'us-west-1', 'us-west-2', 'eu-west-1', 'ap-northeast-1', 'ap-southeast-2'] region = regionList[0] cluster = "" ret = "" text.pop(0) # remove command name if len(text) == 0: return "You did not supply a query to run" if text[0] == 'help': return information() awsKeyId = None awsSecretKey = None awsSessionToken = None tokens = [] if 'in' in text: while text[-1] != 'in': tokens.append(text.pop()) extractedRegion = re.search(r'[a-z]{2}-[a-z]+-[1-9]{1}', " ".join(tokens)) if extractedRegion: region = extractedRegion.group() tokens.remove(region) text.remove('in') if len(tokens) > 0 and os.path.isfile("./aws.config"): with open("aws.config") as f: config = json.load(f) for account in config['Accounts']: if account['AccountName'] in tokens: tokens.remove(account['AccountName']) sts_client = boto3.client('sts') assumedRole = sts_client.assume_role(RoleArn=account['RoleArn'], RoleSessionName="AssumedRole") awsKeyId = assumedRole['Credentials']['AccessKeyId'] awsSecretKey = assumedRole['Credentials']['SecretAccessKey'] awsSessionToken = assumedRole['Credentials']['SessionToken'] if len(tokens) > 0: return "Could not resolve " + " ".join(tokens) elif len(tokens) > 0: return "Could not locate aws.config file" session = boto3.session.Session(aws_access_key_id=awsKeyId, aws_secret_access_key=awsSecretKey, aws_session_token=awsSessionToken) if 'regions' in text: if 'clusters' in text: for region in regionList[:]: ecs = session.client("ecs", region_name=region) ret = ecs.list_clusters() if len(ecs.list_clusters()['clusterArns']) == 0: regionList.remove(region) return " ".join(regionList) ecs = session.client("ecs", region_name=region) if 'list' in text: text.remove("list") ret = "" if 'clusters' in text: clusters = ecs.list_clusters()['clusterArns'] if len(clusters) == 0: return "There are no clusters in this region: " + region for cluster in clusters: ret = ret + cluster.split('/')[-1] + '\n' return ret elif 'services' in text: text.remove("services") if len(text) == 0: return "I need a cluster name to complete the requested operation. To view the cluster names, use 'jarvis ecs list clusters <region>'" attachments = [] services = [] try: for service in ecs.list_services(cluster=text[0])['serviceArns']: services.append(service) except Exception as e: print e return "Cluster " + text[0] + " was not found in region " + region if len(services) == 0: return "There doesn't seem to be any services in the cluster " + text[0] services_desc = ecs.describe_services(cluster=text[0],services=services) fields = [] for service in services_desc['services']: image = ecs.describe_task_definition(taskDefinition=service['taskDefinition']) imagename = image['taskDefinition']['containerDefinitions'][0]['image'].split(':')[-1] servicename = service['serviceName'].split('/')[-1] ret = ret + servicename + "\t\t" + imagename + "\n" fields.append({ 'title': servicename, 'value': 'Version: ' + imagename, 'short': True }) attachments.append({ 'fallback': 'Service List', 'title': 'List of Services', 'fields': fields, 'color': 'good' }) return attachments elif 'describe' in text or 'desc' in text: cw = session.client('cloudwatch', region_name=region) text.pop(0) createGraph = False if "graph" in text: text.remove("graph") createGraph = True if len(text) == 1: clustername = text[0] clusters = ecs.describe_clusters(clusters=[clustername]) if clusters['failures']: return "I could not find the cluster specified: " + clustername attachments = [] clustercpu = cw.get_metric_statistics( Namespace="AWS/ECS", MetricName="CPUUtilization", Dimensions=[{'Name': 'ClusterName', 'Value': clustername}], StartTime=datetime.today() - timedelta(days=1), EndTime=datetime.today(), Period=1800, Statistics=['Average'], Unit='Percent') clustermem = cw.get_metric_statistics( Namespace="AWS/ECS", MetricName="MemoryUtilization", Dimensions=[{'Name': 'ClusterName', 'Value': clustername}], StartTime=datetime.utcnow() - timedelta(days=1), EndTime=datetime.utcnow(), Period=1800, Statistics=['Average'], Unit='Percent') cpudata = [] memdata = [] for datapoint in clustercpu['Datapoints']: cpudata.append([datapoint['Timestamp'], datapoint['Average']]) for datapoint in clustermem['Datapoints']: memdata.append([datapoint['Timestamp'], datapoint['Average']]) cpudata = sorted(cpudata, key=lambda x: x[0]) memdata = sorted(memdata, key=lambda x: x[0]) clustercpu = math.ceil(cpudata[0][1]) clustercpu = int(clustercpu) clustermem = math.ceil(memdata[0][1]) clustermem = int(clustermem) clusters = clusters['clusters'][0] fields = [{ 'title': 'Registered Instances', 'value': clusters['registeredContainerInstancesCount'], 'short': True }, { 'title': 'Active Services', 'value': clusters['activeServicesCount'], 'short': True }, { 'title': 'Running Tasks', 'value': clusters['runningTasksCount'], 'short': True }, { 'title': 'Pending Tasks', 'value': clusters['pendingTasksCount'], 'short': True }] if not createGraph: fields.append({ 'title': 'Memory Usage', 'value': str(clustermem) + "%", 'short': True }) fields.append({ 'title': 'CPU Usage', 'value': str(clustercpu) + "%", 'short': True }) attachments.append({ 'fallback': 'Cluster: ' + clusters['clusterName'], 'title': 'Cluster ' + clusters['clusterName'], 'fields': fields, 'color': 'good' }) if createGraph: attachments.append(common.create_graph('Graphing Cluster CPU and Memory Usage over 1 day', 'Cluster CPU', [i[1] for i in cpudata], 'Cluster Memory', [i[1] for i in memdata], [i[0].strftime("%I%M") for i in cpudata])) return attachments elif len(text) == 2: attachments = [] if len(text) < 2: return """I need a cluster name and a service name to complete the requested operation. To view the cluster names, use 'jarvis ecs list clusters <region>' To view the services, use 'jarvis ecs list services <cluster> <region>'""" matched = False matchedCount = 0 servicename = text[0] clustername = text[1] try: services = ecs.list_services(cluster=text[1])['serviceArns'] except Exception as e: print e return "Cluster " + text[1] + " was not found in region " + region for service in services: if text[0] in service and not matched: matched = True matchedCount+=1 try: services_desc = ecs.describe_services(cluster=text[1],services=[service]) except Exception as e: print e return "Cluster " + text[0] + " was not found in region " + region for service in services_desc['services']: image = ecs.describe_task_definition(taskDefinition=service['taskDefinition']) imagename = image['taskDefinition']['containerDefinitions'][0]['image'].split(':')[-1] servicename = service['serviceName'].split('/')[-1] attachments.append( { 'fallback': 'Service ' + servicename, 'title': servicename, 'fields': [{ 'title': 'Deployment', 'value': imagename, 'short': True }, { 'title': 'Updated At', 'value': service['deployments'][0]['updatedAt'].strftime("%Y-%m-%d %H:%M %z") , 'short': True }, { 'title': 'CPU Reservation', 'value': str(image['taskDefinition']['containerDefinitions'][0]['cpu']) + " Units", 'short': True }, { 'title': 'Memory Reservation', 'value': str(image['taskDefinition']['containerDefinitions'][0]['memory']) + " Megabytes", 'short': True }, { 'title': 'Running Tasks', 'value': service['runningCount'], 'short': True }], 'color': 'good' } ) elif text[0] in service and matched: matchedCount+=1 if matchedCount > 1: attachments.append({ 'fallback': 'Service ' + servicename, 'title': str(matchedCount) + ' Services Matched', 'text': 'If this is not the service you asked for, you can list the services using jarvis ecs list services', 'color': 'warning' }) if matched: if createGraph: servicecpu = cw.get_metric_statistics( Namespace="AWS/ECS", MetricName="CPUUtilization", Dimensions=[{'Name': 'ClusterName', 'Value': clustername}, {'Name': 'ServiceName', 'Value': servicename}], StartTime=datetime.today() - timedelta(days=1), EndTime=datetime.today(), Period=1800, Statistics=['Average'], Unit='Percent') cpudata = [] for datapoint in servicecpu['Datapoints']: cpudata.append([datapoint['Timestamp'], datapoint['Average']]) servicemem = cw.get_metric_statistics( Namespace="AWS/ECS", MetricName="MemoryUtilization", Dimensions=[{'Name': 'ClusterName', 'Value': clustername}, {'Name': 'ServiceName', 'Value': servicename}], StartTime=datetime.today() - timedelta(days=1), EndTime=datetime.today(), Period=1800, Statistics=['Average'], Unit='Percent') memdata = [] for datapoint in servicemem['Datapoints']: memdata.append([datapoint['Timestamp'], datapoint['Average']]) memdata = sorted(memdata, key=lambda x: x[0]) cpudata = sorted(cpudata, key=lambda x: x[0]) attachments.append(common.create_graph("Graphing Service CPU and Memory Usage over 1 day", 'Service CPU', [i[1] for i in cpudata], 'Service Memory', [i[1] for i in memdata], [i[0].strftime("%I%M") for i in cpudata])) return attachments else: return "Could not find any services that include " + text[0] else: return "I did not understand the query. Please try again."
print(msg) if __name__ == "__main__": # Create the workspace. print(f'creating workspace "{WORKSPACE}"...') result = create_workspace(WORKSPACE) response = result.json() if response["errors"]: print("errors encountered:\n", *response["errors"], sep="\n") sys.exit(1) else: print("created succesfully") # Upload the data tables. for table in ["members", "clubs", "membership"]: print(f"uploading {table}...") result = upload_csv(f"data/{table}.csv", WORKSPACE, table) if result.status_code != 200: error_message(result.json()) sys.exit(1) print("tables uploaded successfully") # Create the graph. print("creating graph...") graph = create_graph(WORKSPACE, "boston", ["members", "clubs"], "membership") pprint(graph.json())
def main(text): regionList = ['us-east-1', 'us-west-1', 'us-west-2', 'eu-west-1', 'ap-northeast-1', 'ap-southeast-2'] region = regionList[0] text.pop(0) # remove command name if len(text) == 0: return "You did not supply a query to run" if text[0] == 'help': return information() awsKeyId = None awsSecretKey = None awsSessionToken = None loadedApplications = None tokens = [] config = None if os.path.isfile("./aws.config"): with open("aws.config") as f: config = json.load(f) if config.get('Applications'): loadedApplications = config['Applications'] if 'in' in text: while text[-1] != 'in': tokens.append(text.pop()) extractedRegion = re.search(r'[a-z]{2}-[a-z]+-[1-9]{1}', " ".join(tokens)) if extractedRegion: region = extractedRegion.group() tokens.remove(region) text.remove('in') if len(tokens) > 0 and config != None: for account in config['Accounts']: if account['AccountName'] in tokens: tokens.remove(account['AccountName']) sts_client = boto3.client('sts') assumedRole = sts_client.assume_role(RoleArn=account['RoleArn'], RoleSessionName="AssumedRole") awsKeyId = assumedRole['Credentials']['AccessKeyId'] awsSecretKey = assumedRole['Credentials']['SecretAccessKey'] awsSessionToken = assumedRole['Credentials']['SessionToken'] # Load application settings for this account if account.get('Applications'): loadedApplications = account['Applications'] if len(tokens) > 0: return "Could not resolve " + " ".join(tokens) elif len(tokens) > 0: return "Could not locate aws.config file" session = boto3.session.Session(aws_access_key_id=awsKeyId, aws_secret_access_key=awsSecretKey, aws_session_token=awsSessionToken) eb = session.client("elasticbeanstalk", region_name=region) if 'list' in text: text.remove("list") ret = "" if 'applications' in text or 'apps' in text: try: applications = eb.describe_applications()['Applications'] except Exception as e: print e return "Could not describe applications in " + region if len(applications) == 0: return "There are no beanstalk applications in this region: " + region for app in applications: ret = ret + app['ApplicationName'] + "\n" return ret elif 'environments' in text or 'envs' in text: text.pop(0) application = None if len(text) > 0: application = " ".join(text) attachments = [] environments = [] try: if application == None: for env in eb.describe_environments()['Environments']: environments.append(env) else: for env in eb.describe_environments(ApplicationName=application)['Environments']: environments.append(env) except Exception as e: print e return "Application " + application + " was not found in region " + region if len(environments) == 0: return "There doesn't seem to be any environments in the application " + application fields = [] activeLoadBalancer = None if application != None and loadedApplications != None: for app in loadedApplications[region]: if app['ApplicationName'].lower() == application.lower(): try: if app.get('Account'): for account in config['Accounts']: print "Looping" if account['AccountName'] == app['Account']: sts_client = boto3.client('sts') assumedRole = sts_client.assume_role(RoleArn=account['RoleArn'], RoleSessionName="AssumedRole") awsKeyId = assumedRole['Credentials']['AccessKeyId'] awsSecretKey = assumedRole['Credentials']['SecretAccessKey'] awsSessionToken = assumedRole['Credentials']['SessionToken'] session_temp = boto3.session.Session(aws_access_key_id=awsKeyId, aws_secret_access_key=awsSecretKey, aws_session_token=awsSessionToken) r = session_temp.client('route53', region_name=region) else: r = session.client('route53', region_name=region) records = r.list_resource_record_sets(HostedZoneId=app['HostedZoneId'], StartRecordName=app['DNSRecord'], StartRecordType='A') activeLoadBalancer = records['ResourceRecordSets'][0]['AliasTarget']['DNSName'] except: pass for env in environments: live = "" if activeLoadBalancer != None : if env['EndpointURL'].lower() in activeLoadBalancer.lower(): live = ":live-environment:" status = ":healthy-environment:" health = env['Health'] if health == 'Yellow': status = ":unstable-environment:" elif health == "Red": status = ":failing-environment:" else: if env['Status'] == "Launching": status = ":rocket:" elif env['Status'] == "Updating": status = ":updating-environment:" elif env['Status'] == "Terminating": status = ":warning:" elif env['Status'] == "Terminated": status = ":x:" fields.append({ 'title': status + " " + env['EnvironmentName'] + " " + live, 'value': 'Version: ' + env['VersionLabel'], 'short': True }) attachments.append({ 'fallback': 'Environment List', 'title': 'List of Environments', 'fields': fields, 'color': 'good' }) return attachments elif 'describe' in text or 'desc' in text: text.pop(0) attachments = [] if 'application' in text or 'app' in text: text.pop(0) application = " ".join(text) environments = [] try: environments = eb.describe_environments(ApplicationName=application)['Environments'] except Exception as e: print e return "Could not describe "+ " ".join(text) + " in " + region if len(environments) == 0: return "There are no beanstalk environments in this application: " + " ".join(text) fields = [] activeLoadBalancer = None if application != None and loadedApplications != None: for app in loadedApplications[region]: if app['ApplicationName'].lower() == application.lower(): try: if app.get('Account'): for account in config['Accounts']: print "Looping" if account['AccountName'] == app['Account']: sts_client = boto3.client('sts') assumedRole = sts_client.assume_role(RoleArn=account['RoleArn'], RoleSessionName="AssumedRole") awsKeyId = assumedRole['Credentials']['AccessKeyId'] awsSecretKey = assumedRole['Credentials']['SecretAccessKey'] awsSessionToken = assumedRole['Credentials']['SessionToken'] session_temp = boto3.session.Session(aws_access_key_id=awsKeyId, aws_secret_access_key=awsSecretKey, aws_session_token=awsSessionToken) r = session_temp.client('route53', region_name=region) else: r = session.client('route53', region_name=region) records = r.list_resource_record_sets(HostedZoneId=app['HostedZoneId'], StartRecordName=app['DNSRecord'], StartRecordType='A') activeLoadBalancer = records['ResourceRecordSets'][0]['AliasTarget']['DNSName'] except: pass for env in environments: live = "" if activeLoadBalancer != None : if env['EndpointURL'].lower() in activeLoadBalancer.lower(): live = ":live-environment:" status = ":healthy-environment:" health = env['Health'] if health == 'Yellow': status = ":unstable-environment:" elif health == "Red": status = ":failing-environment:" else: if env['Status'] == "Launching": status = ":rocket:" elif env['Status'] == "Updating": status = ":updating-environment:" elif env['Status'] == "Terminating": status = ":warning:" elif env['Status'] == "Terminated": status = ":x:" fields.append({ 'title': status + " " + env['EnvironmentName'] + " " + live, 'value': 'Version: ' + env['VersionLabel'], 'short': True }) attachments.append({ 'fallback': 'Environment List', 'title': 'List of Environments', 'fields': fields, 'color': 'good' }) return attachments elif 'environment' in text or 'env' in text: text.pop(0) environment = text.pop(0) graph = False graphType = None if 'graph' in text: graph = True print len(text) print text.index('graph') if len(text) > text.index('graph') + 1: graphType = text[text.index('graph') + 1] attachments = [] environments = [] try: description = eb.describe_environments(EnvironmentNames=[environment])['Environments'][0] except Exception as e: print e return "Environment " + environment + " was not found in region " + region events = eb.describe_events(EnvironmentName=environment, MaxRecords=5, Severity="WARN", StartTime=datetime.today() - timedelta(days=1))['Events'] resources = eb.describe_environment_resources(EnvironmentName=environment)['EnvironmentResources'] instances = resources['Instances'] loadBalancerName = None if len(resources['LoadBalancers']) > 0: loadBalancerName = resources['LoadBalancers'][0]['Name'] fields = [] version = description['VersionLabel'] runningInstances = len(instances) fields.append({ 'title': 'Current Deployment', 'value': 'Version: ' + version, 'short': True }) fields.append({ 'title': 'Running Instances', 'value': str(runningInstances) + ' Instances', 'short': True }) fields.append({ 'title': 'Container Version', 'value': description['SolutionStackName'], 'short': True }) fields.append({ 'title': 'Last Updated', 'value': description['DateUpdated'].strftime("%d/%m at %H:%M"), 'short': True }) for event in events: fields.append({ 'title': event['Severity'] + " at " + event['EventDate'].strftime("%d/%m at %H:%M"), 'value': event['Message'], 'short': True }) status = ":healthy-environment:" health = description['Health'] if health == 'Yellow': status = ":unstable-environment:" elif health == "Red": status = ":failing-environment:" else: if description['Status'] == "Launching": status = ":rocket:" elif description['Status'] == "Updating": status = ":arrows_counterclockwise:" elif description['Status'] == "Terminating": status = ":warning:" elif description['Status'] == "Terminated": status = ":x:" attachments.append({ 'fallback': 'Environment List', 'title': status + " " + environment, 'fields': fields, 'color': 'good' }) if graph != False and loadBalancerName != None: cw = session.client('cloudwatch', region_name=region) reqdata = [] latdata = [] timedata = None if graphType == None or graphType == 'requests': envrequests = cw.get_metric_statistics( Namespace="AWS/ELB", MetricName="RequestCount", Dimensions=[{'Name': 'LoadBalancerName', 'Value': loadBalancerName}], StartTime=datetime.today() - timedelta(days=1), EndTime=datetime.today(), Period=1800, Statistics=['Sum'], Unit='Count') for datapoint in envrequests['Datapoints']: reqdata.append([datapoint['Timestamp'], datapoint['Sum']]) reqdata = sorted(reqdata, key=lambda x: x[0]) timedata = [i[0].strftime("%I%M") for i in reqdata] if graphType == None or graphType == 'latency': envlatency = cw.get_metric_statistics( Namespace="AWS/ELB", MetricName="Latency", Dimensions=[{'Name': 'LoadBalancerName', 'Value': loadBalancerName}], StartTime=datetime.utcnow() - timedelta(days=1), EndTime=datetime.utcnow(), Period=1800, Statistics=['Average'], Unit='Seconds') for datapoint in envlatency['Datapoints']: latdata.append([datapoint['Timestamp'], datapoint['Average']]) latdata = sorted(latdata, key=lambda x: x[0]) if timedata == None: timedata = [i[0].strftime("%I%M") for i in latdata] attachments.append(common.create_graph('Graphing Environment Requests and Latency over 1 day', 'Requests (Count)', [i[1] for i in reqdata], 'Latency (Seconds)', [i[1] for i in latdata], timedata)) return attachments elif 'unpause' in text or 'unp' in text: text.pop(0) environment = " ".join(text) message = "Environment " + environment + " has been unpaused" try: resources = eb.describe_environment_resources(EnvironmentName=environment)['EnvironmentResources'] except Exception as e: print e return "Environment " + environment + " was not found in region " + region autoscalerName = resources['AutoScalingGroups'][0]['Name'] asClient = session.client('autoscaling', region_name=region) autoscaler = asClient.describe_auto_scaling_groups(AutoScalingGroupNames=[autoscalerName])['AutoScalingGroups'][0] if autoscaler['MaxSize'] != 0 or autoscaler['MinSize'] != 0: return "Environment " + environment + " is not currently paused" autoscalerTags= autoscaler['Tags'] try: minInstances = int(next((tag['Value'] for tag in autoscalerTags if tag['Key'] == 'pause:max-instances'))) maxInstances = int(next((tag['Value'] for tag in autoscalerTags if tag['Key'] == 'pause:min-instances'))) except Exception as e: minInstances = 1 maxInstances = 1 message += "\nTags were missing for instance size on the autoscaling group, max and min instances set to a default of 1" try: asClient.update_auto_scaling_group( AutoScalingGroupName=autoscalerName, MinSize=minInstances, MaxSize=maxInstances ) except Exception as e: print e return "Unable to unpause environment " + environment return message else: return "I did not understand the query. Please try again."