def aws_test(request): aws = AWS.objects.get(user=request.user) AWS_ACCESS_KEY=aws.AWS_ACCESS_KEY AWS_ACCESS_SECRET=aws.AWS_SECRET_KEY aws_conn = boto.ec2.connect_to_region("us-west-2",aws_access_key_id=AWS_ACCESS_KEY,aws_secret_access_key=AWS_ACCESS_SECRET) reservations = aws_conn.get_all_reservations() cloudwatch = boto.connect_cloudwatch() metrics = cloudwatch.list_metrics() print 'AWS IMs metrics', metrics user = request.user user.last_login = datetime.datetime.now() user.save() for reservation in reservations: instances = reservation.instances for instance in instances: print 'id', instance.id print 'attributes', instance.__dict__ return HttpResponse(True)
def aws_test(request): aws = AWS.objects.get(user=request.user) AWS_ACCESS_KEY=aws.AWS_ACCESS_KEY AWS_ACCESS_SECRET=aws.AWS_SECRET_KEY aws_conn = boto.ec2.connect_to_region("us-west-2",aws_access_key_id=AWS_ACCESS_KEY,aws_secret_access_key=AWS_ACCESS_SECRET) reservations = aws_conn.get_all_reservations() cloudwatch = boto.connect_cloudwatch() metrics = cloudwatch.list_metrics() print '-'*100 print 'metrics', metrics for reservation in reservations: instances = reservation.instances for instance in instances: print '-'*100 print 'id', instance.id #print 'attributes', instance.__dict__ return HttpResponse(True)
def task_cpu(value): '''get workers cpu utilization''' instances = [] ins = ec_conn.get_all_instances(filters={"tag:task":value}) for i in ins: for j in i.instances: l = cloudwatch.list_metrics(dimensions={'InstanceId':j.id}, metric_name='CPUUtilization') k = l[0] cpu=k.query(datetime.datetime.now()- datetime.timedelta(hours=0,minutes=5), datetime.datetime.now(), 'Maximum', 'Percent') print cpu return instances
def ajax_aws_graphs(request, instance_id, graph_type="all"): print '-- ajax_aws_graphs', request.user user = request.user profile = userprofile.objects.get(user=request.user) vms_cache = Cache.objects.get(user=user) vm_cache = vms_cache.vms_response vm_cache = base64.b64decode(vm_cache) try: vm_cache = pickle.loads(vm_cache)[instance_id] except: return HttpResponse("XXX " + instance_id) if (vm_cache['user_id'] != request.user.id): return HttpResponse("access denied") aws_access_key = profile.aws_access_key aws_secret_key = profile.aws_secret_key aws_ec2_verified = profile.aws_ec2_verified ec2_region = vm_cache['instance']['region']['name'] ec2conn = boto.ec2.connect_to_region(ec2_region, aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key) cloudwatch = boto.ec2.cloudwatch.connect_to_region( ec2_region, aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key) reservations = ec2conn.get_all_instances(instance_ids=[ instance_id, ]) instance = reservations[0].instances[0] end = datetime.datetime.utcnow() start = end - datetime.timedelta(days=10) metric = cloudwatch.list_metrics(dimensions={'InstanceId': instance_id}, metric_name="CPUUtilization")[0] cpu_utilization_datapoints = metric.query(start, end, 'Average', 'Percent', period=3600) return HttpResponse("data " + instance_id + "=" + str(instance) + " ** " + graph_type.upper())
def task_cpu(value): '''get workers cpu utilization''' instances = [] ins = ec_conn.get_all_instances(filters={"tag:task": value}) for i in ins: for j in i.instances: l = cloudwatch.list_metrics(dimensions={'InstanceId': j.id}, metric_name='CPUUtilization') k = l[0] cpu = k.query( datetime.datetime.now() - datetime.timedelta(hours=0, minutes=5), datetime.datetime.now(), 'Maximum', 'Percent') print cpu return instances
def ajax_aws_graphs(request, instance_id, graph_type="all"): print '-- ajax_aws_graphs', request.user user = request.user profile = userprofile.objects.get(user=request.user) vms_cache = Cache.objects.get(user=user) vm_cache = vms_cache.vms_response vm_cache = base64.b64decode(vm_cache) try: vm_cache = pickle.loads(vm_cache)[instance_id] except: return HttpResponse("XXX " + instance_id) if(vm_cache['user_id']!=request.user.id): return HttpResponse("access denied") aws_access_key = profile.aws_access_key aws_secret_key = profile.aws_secret_key aws_ec2_verified = profile.aws_ec2_verified ec2_region = vm_cache['instance']['region']['name'] ec2conn = boto.ec2.connect_to_region(ec2_region,aws_access_key_id=aws_access_key,aws_secret_access_key=aws_secret_key) cloudwatch = boto.ec2.cloudwatch.connect_to_region(ec2_region,aws_access_key_id=aws_access_key,aws_secret_access_key=aws_secret_key) reservations = ec2conn.get_all_instances(instance_ids=[instance_id,]) instance = reservations[0].instances[0] end = datetime.datetime.utcnow() start = end - datetime.timedelta(days=10) metric = cloudwatch.list_metrics(dimensions={'InstanceId':instance_id}, metric_name="CPUUtilization")[0] cpu_utilization_datapoints = metric.query(start, end, 'Average', 'Percent',period=3600) print cpu_utilization_datapoints return HttpResponse("data " + instance_id + "=" + str(instance) + " ** " + graph_type.upper())
def create_alarm(instance_id): global alarmed if alarmed == False: sns = boto.sns.connect_to_region('eu-west-1') sns.create_topic('DavidK_Network_Problem') # Create a topic arn = 'arn:aws:sns:eu-west-1:808146113457:DavidK_Network_Problem' #Amazon Resource Name, uniquely identify AWS resources sns.subscribe(arn, "email", "*****@*****.**") # subscribe my email to the topic cloudwatch = boto.ec2.cloudwatch.connect_to_region('eu-west-1') # create a list holding the metric that the alarm will be based on metrics = cloudwatch.list_metrics(dimensions={'InstanceId' : instance_id},metric_name='NetworkIn') # call to create the autoscaling group and to get policy arn for the alarm as_policy_arn = create_auto_scaling(instance_id) # create the alarm alarm = metrics[0].create_alarm(name='Network_Usage_Alarm', comparison='>=', threshold=500000, period=60, evaluation_periods=1, statistic='Average', alarm_actions=[arn,as_policy_arn]) if alarm: print '\n----------' print 'Alarm set' print '----------\n' alarmed = True else: print '\nAlarm has not been set\n'
def aws_vm_view(request,vm_name): print '-- aws_vm_view' print request.user user = request.user profile = userprofile.objects.get(user=request.user) user.last_login = datetime.datetime.now() user.save() aws_access_key = profile.aws_access_key aws_secret_key = profile.aws_secret_key ip = request.META['REMOTE_ADDR'] _log_user_activity(profile,"click","/aws/"+vm_name,"aws_vm_view",ip=ip) vms_cache = Cache.objects.get(user=user) vm_cache = vms_cache.vms_response vm_cache = base64.b64decode(vm_cache) try: vm_cache = pickle.loads(vm_cache)[vm_name] except: return HttpResponse("XXX " + vm_name) ec2_region = vm_cache['instance']['region']['name'] if(vm_cache['user_id']!=request.user.id): return HttpResponse("access denied") if(vms_cache.vms_console_output_cache): console_output = vms_cache.vms_console_output_cache else: aws_access_key = profile.aws_access_key aws_secret_key = profile.aws_secret_key aws_ec2_verified = profile.aws_ec2_verified ec2conn = boto.ec2.connect_to_region(ec2_region,aws_access_key_id=aws_access_key,aws_secret_access_key=aws_secret_key) reservations = ec2conn.get_all_instances(instance_ids=[vm_name,]) instance = reservations[0].instances[0] console_output = instance.get_console_output() console_output = console_output.output if(not console_output): console_output = "" vms_cache.vms_console_output_cache = console_output vms_cache.save() end = datetime.datetime.now() start = end - datetime.timedelta(minutes=60) ec2conn = boto.ec2.connect_to_region(ec2_region,aws_access_key_id=aws_access_key,aws_secret_access_key=aws_secret_key) cloudwatch = boto.ec2.cloudwatch.connect_to_region(ec2_region,aws_access_key_id=aws_access_key,aws_secret_access_key=aws_secret_key) metric = cloudwatch.list_metrics(dimensions={'InstanceId':vm_cache['id']}, metric_name="NetworkIn")[0] networkin_datapoints = metric.query(start, end, 'Average', '') metric = cloudwatch.list_metrics(dimensions={'InstanceId':vm_cache['id']}, metric_name="NetworkOut")[0] networkout_datapoints = metric.query(start, end, 'Average', '') metric = cloudwatch.list_metrics(dimensions={'InstanceId':vm_cache['id']}, metric_name="DiskReadOps")[0] disk_readops_datapoints = metric.query(start, end, 'Average', '') metric = cloudwatch.list_metrics(dimensions={'InstanceId':vm_cache['id']}, metric_name="DiskWriteOps")[0] disk_writeops_datapoints = metric.query(start, end, 'Average', '') metric = cloudwatch.list_metrics(dimensions={'InstanceId':vm_cache['id']}, metric_name="DiskReadBytes")[0] disk_readbytes_datapoints = metric.query(start, end, 'Average', '') metric = cloudwatch.list_metrics(dimensions={'InstanceId':vm_cache['id']}, metric_name="DiskWriteBytes")[0] disk_writebytes_datapoints = metric.query(start, end, 'Average', '') networkin_datapoints = json.dumps(networkin_datapoints,default=date_handler) networkout_datapoints = json.dumps(networkout_datapoints,default=date_handler) disk_readops_datapoints = json.dumps(disk_readops_datapoints,default=date_handler) disk_writeops_datapoints = json.dumps(disk_writeops_datapoints,default=date_handler) disk_readbytes_datapoints = json.dumps(disk_readbytes_datapoints,default=date_handler) disk_writebytes_datapoints = json.dumps(disk_writebytes_datapoints,default=date_handler) return render_to_response('aws_vm.html', {'vm_name':vm_name,'vm_cache':vm_cache,'console_output':console_output,'networkin_datapoints':networkin_datapoints,'networkout_datapoints':networkout_datapoints,'disk_readops_datapoints':disk_readops_datapoints,'disk_writeops_datapoints':disk_writeops_datapoints,'disk_readbytes_datapoints':disk_readbytes_datapoints,'disk_writebytes_datapoints':disk_writebytes_datapoints,}, context_instance=RequestContext(request))
def ajax_vms_refresh(request): user = request.user profile = userprofile.objects.get(user=request.user) print 'Refreshing', user, 'VMs cache..' aws_access_key = profile.aws_access_key aws_secret_key = profile.aws_secret_key aws_ec2_verified = profile.aws_ec2_verified virtual_machines = {} servers = mongo.servers.find({'secret':profile.secret,}).sort('_id',-1) vms_cache = Cache.objects.get_or_create(user=user) vms_cache = vms_cache[0] vms_cache.is_updating = True vms_cache.save() if(servers.count()): print 'servers count', servers.count() for server in servers: instance_metrics = {} instance_metrics['id'] = server['uuid'] instance_metrics['user_id'] = request.user.id instance_metrics['provider'] = 'agent' instance_metrics['instance'] = {} instance_metrics['instance']['user_id'] = request.user.id instance_metrics['instance']['state'] = {} instance_metrics['instance']['tags'] = {} try: instance_metrics["instance"]['tags']['Name'] = server['name'] #instance_metrics["instance"]['tags']['Name'] = ''.join(x for x in unicodedata.normalize('NFKD', server['hostname']) if x in string.ascii_letters).lower() except: instance_metrics["instance"]['tags']['Name'] = server['hostname'].replace('.','-').lower() if 'tags' in server: instance_metrics["instance"]['tags']['tags'] = server['tags'] uuid = server['uuid'] if((datetime.datetime.now()-server['last_seen']).total_seconds()>20): instance_metrics['instance']['state']['state'] = "Stopped" if((datetime.datetime.now()-server['last_seen']).total_seconds()>1800): instance_metrics['instance']['state']['state'] = "Offline" else: instance_metrics['instance']['state']['state'] = "Running" cpu_usage_ = "" params = {'start':'2m-ago','m':'sum:' + uuid.replace(':','-') + '.sys.cpu'} tsdb = requests.get('http://'+settings.TSDB_HOST+':'+str(settings.TSDB_PORT)+'/api/query',params=params) tsdb_response = json.loads(tsdb.text) try: tsdb_response = tsdb_response[0]['dps'] except: tsdb_response = [] c=0 for i in tsdb_response: cpu_usage_ += str(round(tsdb_response[i],2)) cpu_usage_ += "," if(c==60): break c+=1 cpu_usage = cpu_usage_[:-1] cpu_usage_reversed = "" cpu_usage_array_reversed = [] for i in cpu_usage.split(','): cpu_usage_array_reversed.insert(0,i) for i in cpu_usage_array_reversed: cpu_usage_reversed += str(i)+"," cpu_usage_reversed = cpu_usage_reversed[:-1] instance_metrics['cpu_utilization_datapoints'] = cpu_usage_reversed virtual_machines[server['uuid'].replace(':','-')] = instance_metrics #print 'virtual_machines', virtual_machines if aws_ec2_verified: aws_regions = profile.aws_enabled_regions.split(',') print 'AWS regions', aws_regions for ec2_region in aws_regions: if(ec2_region): ec2conn = boto.ec2.connect_to_region(ec2_region,aws_access_key_id=aws_access_key,aws_secret_access_key=aws_secret_key) cloudwatch = boto.ec2.cloudwatch.connect_to_region(ec2_region,aws_access_key_id=aws_access_key,aws_secret_access_key=aws_secret_key) try: reservations = ec2conn.get_all_instances() except: vms_cache.is_updating = False vms_cache.vms_response = "" vms_cache.save() print vms_cache.is_updating print vms_cache.vms_response #return HttpResponse("access denied") instances = [i for r in reservations for i in r.instances] for instance in instances: if not instance: continue instance_metrics = {} instance_metrics['instance'] = {} print '** instance', instance.id, instance.private_ip_address volumes = [] for volume in ec2conn.get_all_volumes(filters={'attachment.instance-id': instance.id}): volumes.append([volume.id, volume.iops, volume.size,]) groups = [] for group in instance.__dict__['groups']: groups.append([group.id, group.name,]) instance_metrics['id'] = instance.id instance_metrics['user_id'] = request.user.id instance_metrics['provider'] = "aws-ec2" instance_metrics['instance']['placement'] = instance.placement instance_metrics['instance']['user_id'] = request.user.id instance_metrics['instance']['groups'] = groups instance_metrics['instance']['block_device_mapping'] = volumes instance_metrics['instance']['architecture'] = instance.architecture instance_metrics['instance']['client_token'] = instance.client_token instance_metrics['instance']['dns_name'] = instance.dns_name instance_metrics['instance']['private_ip_address'] = instance.private_ip_address instance_metrics['instance']['hypervisor'] = instance.hypervisor instance_metrics['instance']['id'] = instance.id instance_metrics['instance']['image_id'] = instance.image_id instance_metrics['instance']['instance_type'] = instance.instance_type instance_metrics['instance']['ip_address'] = instance.ip_address instance_metrics['instance']['key_name'] = instance.key_name instance_metrics['instance']['launch_time'] = instance.launch_time instance_metrics['instance']['monitored'] = instance.monitored instance_metrics['instance']['persistent'] = instance.persistent instance_metrics['instance']['ramdisk'] = instance.ramdisk instance_metrics['instance']['root_device_name'] = instance.root_device_name instance_metrics['instance']['root_device_type'] = instance.root_device_type instance_metrics['instance']['tags'] = instance.tags instance_metrics['instance']['virtualization_type'] = instance.virtualization_type instance_metrics['instance']['vpc_id'] = instance.vpc_id instance_metrics['instance']['region'] = {"endpoint":instance.region.endpoint,"name":instance.region.name,} instance_metrics['instance']['state'] = {"state":instance.state,"code":instance.state_code,"state_reason":instance.state_reason,} virtual_machines[instance.id] = instance_metrics print 'Updating', request.user, 'cache..' print instance.platform, instance.product_codes try: ec2conn.monitor_instance(str(instance.id)) except: print instance.id, 'instance not in a monitorable state!!'.upper() #pprint(instance_metrics) continue # Here is where you define start - end for the Logs............... end = datetime.datetime.now() start = end - datetime.timedelta(minutes=60) # This is how you list all possible values on the response.... # print ec2conn.list_metrics() try: metric = cloudwatch.list_metrics(dimensions={'InstanceId':instance.id}, metric_name="CPUUtilization")[0] except: continue cpu_utilization_datapoints = metric.query(start, end, 'Average', 'Percent') instance_metrics['cpu_utilization_datapoints'] = json.dumps(cpu_utilization_datapoints,default=date_handler) virtual_machines[instance.id] = instance_metrics vms_cache.vms_response = base64.b64encode(pickle.dumps(virtual_machines, pickle.HIGHEST_PROTOCOL)) vms_cache.last_seen = datetime.datetime.now() vms_cache.is_updating = False vms_cache.save() print 'VMs cache was succesfully updated.' return HttpResponse("ALLDONE")
def aws_vm_view(request,vm_name): print '-- aws_vm_view' print request.user user = request.user profile = userprofile.objects.get(user=request.user) user.last_login = datetime.datetime.now() user.save() aws_access_key = profile.aws_access_key aws_secret_key = profile.aws_secret_key ip = request.META['REMOTE_ADDR'] _log_user_activity(profile,"click","/aws/"+vm_name,"aws_vm_view",ip=ip) vms_cache = Cache.objects.get(user=user) vm_cache = vms_cache.vms_response vm_cache = base64.b64decode(vm_cache) try: vm_cache = pickle.loads(vm_cache)[vm_name] except: return HttpResponse(vm_name) ec2_region = vm_cache['instance']['region']['name'] if(vm_cache['user_id']!=request.user.id): return HttpResponse("access denied") if(vms_cache.vms_console_output_cache): console_output = vms_cache.vms_console_output_cache else: aws_access_key = profile.aws_access_key aws_secret_key = profile.aws_secret_key aws_ec2_verified = profile.aws_ec2_verified ec2conn = boto.ec2.connect_to_region(ec2_region,aws_access_key_id=aws_access_key,aws_secret_access_key=aws_secret_key) reservations = ec2conn.get_all_instances(instance_ids=[vm_name,]) instance = reservations[0].instances[0] console_output = instance.get_console_output() console_output = console_output.output if(not console_output): console_output = "" vms_cache.vms_console_output_cache = console_output vms_cache.save() end = datetime.datetime.now() start = end - datetime.timedelta(minutes=60) ec2conn = boto.ec2.connect_to_region(ec2_region,aws_access_key_id=aws_access_key,aws_secret_access_key=aws_secret_key) cloudwatch = boto.ec2.cloudwatch.connect_to_region(ec2_region,aws_access_key_id=aws_access_key,aws_secret_access_key=aws_secret_key) metric = cloudwatch.list_metrics(dimensions={'InstanceId':vm_cache['id']}, metric_name="NetworkIn")[0] networkin_datapoints = metric.query(start, end, 'Average', '') metric = cloudwatch.list_metrics(dimensions={'InstanceId':vm_cache['id']}, metric_name="NetworkOut")[0] networkout_datapoints = metric.query(start, end, 'Average', '') metric = cloudwatch.list_metrics(dimensions={'InstanceId':vm_cache['id']}, metric_name="DiskReadOps")[0] disk_readops_datapoints = metric.query(start, end, 'Average', '') metric = cloudwatch.list_metrics(dimensions={'InstanceId':vm_cache['id']}, metric_name="DiskWriteOps")[0] disk_writeops_datapoints = metric.query(start, end, 'Average', '') metric = cloudwatch.list_metrics(dimensions={'InstanceId':vm_cache['id']}, metric_name="DiskReadBytes")[0] disk_readbytes_datapoints = metric.query(start, end, 'Average', '') metric = cloudwatch.list_metrics(dimensions={'InstanceId':vm_cache['id']}, metric_name="DiskWriteBytes")[0] disk_writebytes_datapoints = metric.query(start, end, 'Average', '') networkin_datapoints = json.dumps(networkin_datapoints,default=date_handler) networkout_datapoints = json.dumps(networkout_datapoints,default=date_handler) disk_readops_datapoints = json.dumps(disk_readops_datapoints,default=date_handler) disk_writeops_datapoints = json.dumps(disk_writeops_datapoints,default=date_handler) disk_readbytes_datapoints = json.dumps(disk_readbytes_datapoints,default=date_handler) disk_writebytes_datapoints = json.dumps(disk_writebytes_datapoints,default=date_handler) return render_to_response( 'aws_vm.html', { 'vm_name':vm_name, 'vm_cache':vm_cache, 'console_output':console_output, 'networkin_datapoints':networkin_datapoints, 'networkout_datapoints':networkout_datapoints, 'disk_readops_datapoints':disk_readops_datapoints, 'disk_writeops_datapoints':disk_writeops_datapoints, 'disk_readbytes_datapoints':disk_readbytes_datapoints, 'disk_writebytes_datapoints':disk_writebytes_datapoints, }, context_instance=RequestContext(request))
def ajax_vms_refresh(request): user = request.user profile = userprofile.objects.get(user=request.user) print 'Refreshing', user, 'VMs cache..' aws_access_key = profile.aws_access_key aws_secret_key = profile.aws_secret_key aws_ec2_verified = profile.aws_ec2_verified virtual_machines = {} servers = mongo.servers.find({'secret':profile.secret,}).sort('_id',-1) vms_cache = Cache.objects.get_or_create(user=user) vms_cache = vms_cache[0] vms_cache.is_updating = True vms_cache.save() if(servers.count()): print 'servers count', servers.count() for server in servers: instance_metrics = {} instance_metrics['id'] = server['uuid'] instance_metrics['user_id'] = request.user.id instance_metrics['provider'] = 'agent' instance_metrics['instance'] = {} instance_metrics['instance']['user_id'] = request.user.id instance_metrics['instance']['state'] = {} instance_metrics['instance']['tags'] = {} try: instance_metrics["instance"]['tags']['Name'] = server['name'] #instance_metrics["instance"]['tags']['Name'] = ''.join(x for x in unicodedata.normalize('NFKD', server['hostname']) if x in string.ascii_letters).lower() except: instance_metrics["instance"]['tags']['Name'] = server['hostname'].replace('.','-').lower() if 'tags' in server: instance_metrics["instance"]['tags']['tags'] = server['tags'] uuid = server['uuid'] if((datetime.datetime.now()-server['last_seen']).total_seconds()>20): instance_metrics['instance']['state']['state'] = "Stopped" if((datetime.datetime.now()-server['last_seen']).total_seconds()>300): instance_metrics['instance']['state']['state'] = "Offline" else: instance_metrics['instance']['state']['state'] = "Running" cpu_usage_ = "" params = {'start':'2m-ago','m':'sum:' + uuid.replace(':','-') + '.sys.cpu'} tsdb = requests.get('http://'+settings.TSDB_HOST+':'+str(settings.TSDB_PORT)+'/api/query',params=params) tsdb_response = json.loads(tsdb.text) try: tsdb_response = tsdb_response[0]['dps'] except: tsdb_response = [] c=0 for i in tsdb_response: cpu_usage_ += str(round(tsdb_response[i],2)) cpu_usage_ += "," if(c==60): break c+=1 cpu_usage = cpu_usage_[:-1] cpu_usage_reversed = "" cpu_usage_array_reversed = [] for i in cpu_usage.split(','): cpu_usage_array_reversed.insert(0,i) for i in cpu_usage_array_reversed: cpu_usage_reversed += str(i)+"," cpu_usage_reversed = cpu_usage_reversed[:-1] instance_metrics['cpu_utilization_datapoints'] = cpu_usage_reversed virtual_machines[server['uuid'].replace(':','-')] = instance_metrics #print 'virtual_machines', virtual_machines if aws_ec2_verified: aws_regions = profile.aws_enabled_regions.split(',') print 'AWS regions', aws_regions for ec2_region in aws_regions: if(ec2_region): ec2conn = boto.ec2.connect_to_region(ec2_region,aws_access_key_id=aws_access_key,aws_secret_access_key=aws_secret_key) cloudwatch = boto.ec2.cloudwatch.connect_to_region(ec2_region,aws_access_key_id=aws_access_key,aws_secret_access_key=aws_secret_key) try: reservations = ec2conn.get_all_instances() except: vms_cache.is_updating = False vms_cache.vms_response = "" vms_cache.save() print vms_cache.is_updating print vms_cache.vms_response #return HttpResponse("access denied") instances = [i for r in reservations for i in r.instances] for instance in instances: if not instance: continue instance_metrics = {} instance_metrics['instance'] = {} print '** instance', instance.id, instance.private_ip_address volumes = [] for volume in ec2conn.get_all_volumes(filters={'attachment.instance-id': instance.id}): volumes.append([volume.id, volume.iops, volume.size,]) groups = [] for group in instance.__dict__['groups']: groups.append([group.id, group.name,]) instance_metrics['id'] = instance.id instance_metrics['user_id'] = request.user.id instance_metrics['provider'] = "aws-ec2" instance_metrics['instance']['placement'] = instance.placement instance_metrics['instance']['user_id'] = request.user.id instance_metrics['instance']['groups'] = groups instance_metrics['instance']['block_device_mapping'] = volumes instance_metrics['instance']['architecture'] = instance.architecture instance_metrics['instance']['client_token'] = instance.client_token instance_metrics['instance']['dns_name'] = instance.dns_name instance_metrics['instance']['private_ip_address'] = instance.private_ip_address instance_metrics['instance']['hypervisor'] = instance.hypervisor instance_metrics['instance']['id'] = instance.id instance_metrics['instance']['image_id'] = instance.image_id instance_metrics['instance']['instance_type'] = instance.instance_type instance_metrics['instance']['ip_address'] = instance.ip_address instance_metrics['instance']['key_name'] = instance.key_name instance_metrics['instance']['launch_time'] = instance.launch_time instance_metrics['instance']['monitored'] = instance.monitored instance_metrics['instance']['persistent'] = instance.persistent instance_metrics['instance']['ramdisk'] = instance.ramdisk instance_metrics['instance']['root_device_name'] = instance.root_device_name instance_metrics['instance']['root_device_type'] = instance.root_device_type instance_metrics['instance']['tags'] = instance.tags instance_metrics['instance']['virtualization_type'] = instance.virtualization_type instance_metrics['instance']['vpc_id'] = instance.vpc_id instance_metrics['instance']['region'] = {"endpoint":instance.region.endpoint,"name":instance.region.name,} instance_metrics['instance']['state'] = {"state":instance.state,"code":instance.state_code,"state_reason":instance.state_reason,} virtual_machines[instance.id] = instance_metrics print 'Updating', request.user, 'cache..' print instance.platform, instance.product_codes try: ec2conn.monitor_instance(str(instance.id)) except: print instance.id, 'instance not in a monitorable state!!'.upper() #pprint(instance_metrics) continue # Here is where you define start - end for the Logs............... end = datetime.datetime.now() start = end - datetime.timedelta(minutes=60) # This is how you list all possible values on the response.... # print ec2conn.list_metrics() try: metric = cloudwatch.list_metrics(dimensions={'InstanceId':instance.id}, metric_name="CPUUtilization")[0] except: continue cpu_utilization_datapoints = metric.query(start, end, 'Average', 'Percent') instance_metrics['cpu_utilization_datapoints'] = json.dumps(cpu_utilization_datapoints,default=date_handler) virtual_machines[instance.id] = instance_metrics vms_cache.vms_response = base64.b64encode(pickle.dumps(virtual_machines, pickle.HIGHEST_PROTOCOL)) vms_cache.last_seen = datetime.datetime.now() vms_cache.is_updating = False vms_cache.save() print 'VMs cache was succesfully updated.' return HttpResponse("ALLDONE")
def application(environ, start_response): """Hand-rolled WSGI application so I can stream output. ...by returning a generator that yields the response body lines. """ request = webob.Request(environ) headers = [('Content-Type', 'text/html')] # validate request if request.method not in ('GET', 'POST'): return webob.exc.HTTPMethodNotAllowed()(environ, start_response) url = request.params.get('url') if not url: return webob.exc.HTTPBadRequest('Missing required parameter: url')( environ, start_response) parsed = urlparse.urlparse(url) if parsed.netloc in DOMAIN_BLACKLIST: return webob.exc.HTTPBadRequest( 'Sorry, this content is not currently supported due to copyright.' )(environ, start_response) # check that our CPU credit balance isn't too low try: cloudwatch = boto.ec2.cloudwatch.connect_to_region( 'us-west-2', aws_access_key_id=AWS_KEY_ID, aws_secret_access_key=AWS_SECRET_KEY) for metric in cloudwatch.list_metrics(metric_name='CPUCreditBalance'): if metric.name == 'CPUCreditBalance': stats = metric.query( datetime.datetime.now() - datetime.timedelta(minutes=10), datetime.datetime.now(), ['Average']) if stats: credit = stats[-1].get('Average') if credit and credit <= 30: msg = "Sorry, we're too busy right now. Please try again later!" exc = webob.exc.HTTPServiceUnavailable(msg) exc.html_template_obj = Template(HTML_HEADER + msg + HTML_FOOTER) return exc(environ, start_response) except: logging.exception("Couldn't fetch CPU credit balance from CloudWatch!") write_fn = start_response('200 OK', headers) def write(line): write_fn(line.encode('utf-8')) def run(): """Generator that does all the work and yields the response body lines. TODO: figure out how to catch and log stack traces when this function raises an exception. Currently the log only gets the exception message. Wrapping the call at the bottom in try/except doesn't work since it's a generator. :/ """ yield HTML_HEADER yield ('<div id="progress">\nFetching %s ...<br />' % url).encode('utf-8') # function to print out status while downloading def download_progress_hook(progress): status = progress.get('status') if status == 'finished': msg = '<br />Extracting audio (this can take a while)...\n' elif status == 'error': # we always get an 'error' progress when the video finishes downloading. # not sure why. ignore it. return elif status == 'downloading': p = lambda field: progress.get(field) or '' try: percent = float(p('_percent_str').strip('%') or '0') except ValueError: percent = 0 msg = ( '<span><progress max="100" value="%s"></progress><br /> ' '%s of %s at %s in %s...</span>\n' % (percent, p('_downloaded_bytes_str'), p('_total_bytes_str') or p('_total_bytes_estimate_str'), p('_speed_str'), p('_eta_str'))) else: msg = status + '<br />\n' write(msg) # fetch video info (resolves URL) to see if we've already downloaded it options = { 'outtmpl': u'/tmp/%(webpage_url)s', 'restrictfilenames': True, # don't allow & or spaces in file names 'updatetime': False, # don't set output file mtime to video mtime 'logger': logging, 'logtostderr': True, 'format': 'bestaudio/best', 'noplaylist': True, 'postprocessors': [{ 'key': 'FFmpegExtractAudio', 'preferredcodec': 'mp3', 'preferredquality': '192', }], 'progress_hooks': [download_progress_hook], } ydl = youtube_dl.YoutubeDL(options) with handle_errors(write): info = ydl.extract_info(url, download=False) # prepare_filename() returns the video filename, not the postprocessed one, # so change the extension manually. the resulting filename will look like: # '/tmp/https_-_www.youtube.com_watchv=6dyWlM4ej3Q.mp3' # # ext4 max filename length is 255 bytes, and huffduffer also silently # truncates URLs to 255 chars total, so truncate before that if necessary. filename_prefix = ydl.prepare_filename(info)[:245 - len(S3_BASE)] options['outtmpl'] = filename_prefix.replace('%', '%%') + '.%(ext)s' filename = filename_prefix + '.mp3' s3 = boto.connect_s3(aws_access_key_id=AWS_KEY_ID, aws_secret_access_key=AWS_SECRET_KEY) bucket = s3.get_bucket(S3_BUCKET) # strip the filename's path, scheme, and leading www., mobile, or m. # the resulting S3 key will look like 'youtube.com_watchv=6dyWlM4ej3Q.mp3' s3_key = re.sub('^https?_-_((www|m|mobile|player).)?', '', os.path.basename(filename)) key = bucket.get_key(s3_key, validate=False) if key.exists(): yield 'Already downloaded! <br />\n' else: # download video and extract mp3 yield 'Downloading...<br />\n' with handle_errors(write): youtube_dl.YoutubeDL(options).download([url]) # upload to S3 # http://docs.pythonboto.org/en/latest/s3_tut.html yield 'Uploading to S3...<br />\n' def upload_callback(sent, total): write('<span><progress max="100" value="%s"></progress><br /> ' '%.2fMB of %.2fMB</span>\n' % ((sent * 100 / total), float(sent) / 1000000, float(total) / 1000000)) key.set_contents_from_filename(filename, cb=upload_callback) key.make_public() os.remove(filename) # get metadata, specifically last_modified key = bucket.get_key(s3_key) # generate description description = info.get('description') or '' footer = """\ Original video: %s Downloaded by http://huffduff-video.snarfed.org/ on %s Available for 30 days after download""" % (url, key.last_modified) # last_modified format is RFC 7231, e.g. 'Fri, 22 Jul 2016 07:11:46 GMT' if description: footer = """ === """ + footer max_len = 1500 - len(footer) if len(description) > max_len: description = description[:max_len] + '...' description += footer # open 'Huffduff it' page yield """\n<br />Opening Huffduffer dialog... <script type="text/javascript"> window.location = "https://huffduffer.com/add?popup=true&%s"; </script> """ % urllib.urlencode([(k, v.encode('utf-8')) for k, v in ( ('bookmark[url]', (S3_BASE + s3_key)), ('bookmark[title]', info.get('title') or ''), ('bookmark[description]', description), ('bookmark[tags]', ','.join(info.get('categories') or [])), )]) yield HTML_FOOTER # alternative: # http://themindfulbit.com/blog/optimizing-your-podcast-site-for-huffduffer return run()
def home(request): if not request.user.is_authenticated(): print '-- web:' print 'anonymous' return render_to_response('web.html', locals(), context_instance=RequestContext(request)) print '-'*100 print '-- dashboard:' print request.user user = request.user profile = userprofile.objects.get(user=request.user) secret = profile.secret aws_access_key = profile.aws_access_key aws_secret_key = profile.aws_secret_key aws_ec2_verified = profile.aws_ec2_verified aws_virtual_machines = {} if aws_ec2_verified: aws_regions = profile.aws_enabled_regions.split(',') print 'AWS regions', aws_regions for ec2_region in aws_regions: if(ec2_region): ec2conn = boto.ec2.connect_to_region(ec2_region,aws_access_key_id=aws_access_key,aws_secret_access_key=aws_secret_key) cloudwatch = boto.ec2.cloudwatch.connect_to_region(ec2_region,aws_access_key_id=aws_access_key,aws_secret_access_key=aws_secret_key) reservations = ec2conn.get_all_instances() instances = [i for r in reservations for i in r.instances] for instance in instances: instance_metrics = {} instance_metrics['instance'] = instance.__dict__ #pprint(instance.__dict__) print '** name', instance.id print '** monitoring', instance.monitoring_state if(instance.monitoring_state=="disabled"): try: ec2conn.monitor_instance(str(instance.id)) except: print instance.id, 'instance not in a monitorable state!'.upper() print instance.id, 'state:', instance.state print instance.id, 'reason:', instance.state_reason['message'] continue end = datetime.datetime.utcnow() start = end - datetime.timedelta(hours=1) # ['Minimum', 'Maximum', 'Sum', 'Average', 'SampleCount'] # ['Seconds', 'Percent', 'Bytes', 'Bits', 'Count', 'Bytes/Second', 'Bits/Second', 'Count/Second'] # CPUUtilization metric = cloudwatch.list_metrics(dimensions={'InstanceId':instance.id}, metric_name="CPUUtilization")[0] cpu_utilization_datapoints = metric.query(start, end, 'Average', 'Percent') instance_metrics['cpu_utilization_datapoints'] = cpu_utilization_datapoints # DiskReadOps #metric = cloudwatch.list_metrics(dimensions={'InstanceId':instance.id}, metric_name="DiskReadOps")[0] #disk_readops_datapoints = metric.query(start, end, 'Average', '') #instance_metrics['disk_readops_datapoints'] = disk_readops_datapoints # DiskWriteOps #metric = cloudwatch.list_metrics(dimensions={'InstanceId':instance.id}, metric_name="DiskWriteOps")[0] #disk_writeops_datapoints = metric.query(start, end, 'Average', '') #instance_metrics['disk_writeops_datapoints'] = disk_writeops_datapoints # DiskReadBytes metric = cloudwatch.list_metrics(dimensions={'InstanceId':instance.id}, metric_name="DiskReadBytes")[0] disk_readbytes_datapoints = metric.query(start, end, 'Average', '') instance_metrics['disk_readbytes_datapoints'] = disk_readbytes_datapoints # DiskWriteBytes metric = cloudwatch.list_metrics(dimensions={'InstanceId':instance.id}, metric_name="DiskWriteBytes")[0] disk_writebytes_datapoints = metric.query(start, end, 'Average', '') instance_metrics['disk_writebytes_datapoints'] = disk_writebytes_datapoints # NetworkIn metric = cloudwatch.list_metrics(dimensions={'InstanceId':instance.id}, metric_name="NetworkIn")[0] networkin_datapoints = metric.query(start, end, 'Average', '') instance_metrics['networkin_datapoints'] = networkin_datapoints # NetworkOut metric = cloudwatch.list_metrics(dimensions={'InstanceId':instance.id}, metric_name="NetworkOut")[0] networkout_datapoints = metric.query(start, end, 'Average', '') instance_metrics['networkout_datapoints'] = networkout_datapoints aws_virtual_machines[instance.id] = instance_metrics private_virtual_machines = {} return render_to_response('dashboard.html', {'private_virtual_machines':private_virtual_machines, 'aws_virtual_machines':aws_virtual_machines,}, context_instance=RequestContext(request))
def ajax_vms_refresh(request): user = request.user profile = userprofile.objects.get(user=request.user) print 'Refreshing', user, 'VMs cache..' aws_access_key = profile.aws_access_key aws_secret_key = profile.aws_secret_key aws_ec2_verified = profile.aws_ec2_verified virtual_machines = {} servers = mongo.servers.find({'secret':profile.secret,}).sort('_id',-1) vms_cache = Cache.objects.get_or_create(user=user) vms_cache = vms_cache[0] vms_cache.is_updating = True vms_cache.save() if(servers.count()): print 'servers count', servers.count() for server in servers: instance_metrics = {} instance_metrics['id'] = server['uuid'] instance_metrics['user_id'] = request.user.id instance_metrics['provider'] = 'agent' instance_metrics['instance'] = {} instance_metrics['instance']['user_id'] = request.user.id instance_metrics['instance']['state'] = {} instance_metrics['instance']['tags'] = {} #instance_metrics["instance"]['tags']['Name'] = ''.join(x for x in unicodedata.normalize('NFKD', server['hostname']) if x in string.ascii_letters).lower() instance_metrics["instance"]['tags']['Name'] = server['hostname'].replace('.','-').lower() uuid = server['uuid'] cpu_usage = mongo.cpu_usage.find({'uuid':uuid,}).sort('_id',-1).limit(60) #loadavg = mongo.loadavg.find({'uuid':uuid,}).sort('_id',-1).limit(60) #mem_usage = mongo.memory_usage.find({'uuid':uuid,}).sort('_id',-1).limit(60) #disks_usage = mongo.disks_usage.find({'uuid':uuid,}).sort('_id',-1).limit(60) #activity = mongo.activity.find({'uuid':uuid,}).sort('_id',-1).limit(5) if((datetime.datetime.utcnow()-server['last_seen']).total_seconds()>20): instance_metrics['instance']['state']['state'] = "Stopped" if((datetime.datetime.utcnow()-server['last_seen']).total_seconds()>600): cpu_usage = [] else: instance_metrics['instance']['state']['state'] = "Running" print '** SERVER ', server['uuid'], 'last seen', (datetime.datetime.utcnow()-server['last_seen']).total_seconds(), 'secongs ago..' cpu_usage_ = "" for usage in cpu_usage: cpu_usage_ += str(usage['cpu_usage']['cpu_used']) cpu_usage_ += "," cpu_usage = cpu_usage_[:-1] cpu_usage_reversed = "" cpu_usage_array_reversed = [] for i in cpu_usage.split(','): cpu_usage_array_reversed.insert(0,i) for i in cpu_usage_array_reversed: cpu_usage_reversed += str(i)+"," cpu_usage_reversed = cpu_usage_reversed[:-1] instance_metrics['cpu_utilization_datapoints'] = cpu_usage_reversed virtual_machines[server['uuid'].replace(':','-')] = instance_metrics #print 'virtual_machines', virtual_machines if aws_ec2_verified: aws_regions = profile.aws_enabled_regions.split(',') print 'AWS regions', aws_regions for ec2_region in aws_regions: if(ec2_region): ec2conn = boto.ec2.connect_to_region(ec2_region,aws_access_key_id=aws_access_key,aws_secret_access_key=aws_secret_key) cloudwatch = boto.ec2.cloudwatch.connect_to_region(ec2_region,aws_access_key_id=aws_access_key,aws_secret_access_key=aws_secret_key) try: reservations = ec2conn.get_all_instances() except: vms_cache.is_updating = False vms_cache.vms_response = "" vms_cache.save() print vms_cache.is_updating print vms_cache.vms_response return HttpResponse("access denied") instances = [i for r in reservations for i in r.instances] for instance in instances: if not instance: continue instance_metrics = {} instance_metrics['instance'] = {} print '** instance', instance.id, instance.private_ip_address volumes = [] for volume in ec2conn.get_all_volumes(filters={'attachment.instance-id': instance.id}): volumes.append([volume.id, volume.iops, volume.size,]) groups = [] for group in instance.__dict__['groups']: groups.append([group.id, group.name,]) instance_metrics['id'] = instance.id instance_metrics['user_id'] = request.user.id instance_metrics['provider'] = "aws-ec2" instance_metrics['instance']['placement'] = instance.placement instance_metrics['instance']['user_id'] = request.user.id instance_metrics['instance']['groups'] = groups instance_metrics['instance']['block_device_mapping'] = volumes instance_metrics['instance']['architecture'] = instance.architecture instance_metrics['instance']['client_token'] = instance.client_token instance_metrics['instance']['dns_name'] = instance.dns_name instance_metrics['instance']['private_ip_address'] = instance.private_ip_address instance_metrics['instance']['hypervisor'] = instance.hypervisor instance_metrics['instance']['id'] = instance.id instance_metrics['instance']['image_id'] = instance.image_id instance_metrics['instance']['instance_type'] = instance.instance_type instance_metrics['instance']['ip_address'] = instance.ip_address instance_metrics['instance']['key_name'] = instance.key_name instance_metrics['instance']['launch_time'] = instance.launch_time instance_metrics['instance']['monitored'] = instance.monitored instance_metrics['instance']['persistent'] = instance.persistent instance_metrics['instance']['ramdisk'] = instance.ramdisk instance_metrics['instance']['root_device_name'] = instance.root_device_name instance_metrics['instance']['root_device_type'] = instance.root_device_type instance_metrics['instance']['tags'] = instance.tags instance_metrics['instance']['virtualization_type'] = instance.virtualization_type instance_metrics['instance']['vpc_id'] = instance.vpc_id instance_metrics['instance']['region'] = {"endpoint":instance.region.endpoint,"name":instance.region.name,} instance_metrics['instance']['state'] = {"state":instance.state,"code":instance.state_code,"state_reason":instance.state_reason,} virtual_machines[instance.id] = instance_metrics print 'Updating', request.user, 'cache..' print instance.platform, instance.product_codes try: ec2conn.monitor_instance(str(instance.id)) except: print instance.id, 'instance not in a monitorable state!!'.upper() #pprint(instance_metrics) continue # Here is where you define start - end for the Logs............... end = datetime.datetime.utcnow() start = end - datetime.timedelta(minutes=60) # This is how you list all possible values on the response.... # print ec2conn.list_metrics() # CPUUtilization try: metric = cloudwatch.list_metrics(dimensions={'InstanceId':instance.id}, metric_name="CPUUtilization")[0] except: continue cpu_utilization_datapoints = metric.query(start, end, 'Average', 'Percent') instance_metrics['cpu_utilization_datapoints'] = json.dumps(cpu_utilization_datapoints,default=date_handler) # DiskReadOps #metric = cloudwatch.list_metrics(dimensions={'InstanceId':instance.id}, metric_name="DiskReadOps")[0] #disk_readops_datapoints = metric.query(start, end, 'Average', '') #instance_metrics['disk_readops_datapoints'] = json.dumps(disk_readops_datapoints,default=date_handler) # DiskWriteOps #metric = cloudwatch.list_metrics(dimensions={'InstanceId':instance.id}, metric_name="DiskWriteOps")[0] #disk_writeops_datapoints = metric.query(start, end, 'Average', '') #instance_metrics['disk_writeops_datapoints'] = json.dumps(disk_writeops_datapoints,default=date_handler) # DiskReadBytes #metric = cloudwatch.list_metrics(dimensions={'InstanceId':instance.id}, metric_name="DiskReadBytes")[0] #disk_readbytes_datapoints = metric.query(start, end, 'Average', '') #instance_metrics['disk_readbytes_datapoints'] = json.dumps(disk_readbytes_datapoints,default=date_handler) # DiskWriteBytes #metric = cloudwatch.list_metrics(dimensions={'InstanceId':instance.id}, metric_name="DiskWriteBytes")[0] #disk_writebytes_datapoints = metric.query(start, end, 'Average', '') #instance_metrics['disk_writebytes_datapoints'] = json.dumps(disk_writebytes_datapoints,default=date_handler) # NetworkIn #metric = cloudwatch.list_metrics(dimensions={'InstanceId':instance.id}, metric_name="NetworkIn")[0] #networkin_datapoints = metric.query(start, end, 'Average', '') #instance_metrics['networkin_datapoints'] = json.dumps(networkin_datapoints,default=date_handler) # NetworkOut #metric = cloudwatch.list_metrics(dimensions={'InstanceId':instance.id}, metric_name="NetworkOut")[0] #networkout_datapoints = metric.query(start, end, 'Average', '') #instance_metrics['networkout_datapoints'] = json.dumps(networkout_datapoints,default=date_handler) virtual_machines[instance.id] = instance_metrics vms_cache.vms_response = base64.b64encode(pickle.dumps(virtual_machines, pickle.HIGHEST_PROTOCOL)) vms_cache.last_seen = timezone.now() vms_cache.is_updating = False vms_cache.save() print 'VMs cache was succesfully updated.' return HttpResponse("ALLDONE")