def get_host_availability_metrics(self, timestamp_begin, timestamp_end): hour = 3600.0 # get host data from db host_handler = HostDataHandler() # calculte results = [] host_obj_list = host_handler.get_hosts() print '=====' for i in range(len(host_obj_list)): #get host last failure host_ip = host_obj_list[i].get_ip() last_failure_timestamp = host_handler.get_last_failure(timestamp_begin, host_ip) #reset to initial parameters time_begin = datetime.datetime.strptime(timestamp_begin, '%Y-%m-%dT%H:%M:%S') time_end = datetime.datetime.strptime(timestamp_end, '%Y-%m-%dT%H:%M:%S') total_period = (time_end - time_begin).total_seconds() timestamp_begin_host = timestamp_begin #update begin_time and total_period to include failure if last_failure_timestamp is not None: time_begin = datetime.datetime.strptime(last_failure_timestamp, '%Y-%m-%dT%H:%M:%S') total_period = (time_end - time_begin).total_seconds() timestamp_begin_host = last_failure_timestamp host_data = host_handler.get_host_status_db(host_ip, timestamp_begin_host, timestamp_end) metric_result_obj = self._get_availability_metrics_per_host(host_ip, host_data, time_begin, total_period) results.append(metric_result_obj) return results
def store_host_data(hosts, interval=1, percpu=False): db = HostDataHandler() while True: for host in hosts: data = get_host_metric(host) if(data == 'Unknown host'): continue cpu = data["cpu"] memory = data["memory"] disk = data["disk"] db.save_data_db(cpu, memory, disk, host) time.sleep(60)
def __init__(self): self.__ceilometer = CeilometerClient() self.__keystone = KeystoneClient() self.__nova = NovaClient() self.__hosts_db = HostDataHandler() self.__benchmark_db = BenchmarkDataHandler()
class DataHandler: def __init__(self): self.__ceilometer = CeilometerClient() self.__keystone = KeystoneClient() self.__nova = NovaClient() self.__hosts_db = HostDataHandler() self.__benchmark_db = BenchmarkDataHandler() def projects(self): return json.dumps(self.__keystone.projects) def sugestion(self): project_list = [ a['name'] for a in json.loads(self.projects()) ] host_vm_info = self.__nova.vm_info(project_list) desligar = {} migracoes = {} copia_hosts = host_vm_info[:] for e in host_vm_info: dic_aux = e.copy() chave = e.keys()[0] if( len( dic_aux[chave]['vms'].keys() ) > 0 ): vms_aux = dic_aux[chave]['vms'].copy() copia_hosts.remove(e) migra = False migracoes[chave] = {} for i in vms_aux: for j in copia_hosts: migra = False if( (j[j.keys()[0]]['Livre'][0] >= vms_aux[i][0]) and (j[j.keys()[0]]['Livre'][1] >= vms_aux[i][1]) and (j[j.keys()[0]]['Livre'][2] >= vms_aux[i][2])): valores = [ j[j.keys()[0]]['Livre'][0] - vms_aux[i][0], j[j.keys()[0]]['Livre'][1] - vms_aux[i][1], j[j.keys()[0]]['Livre'][2] - vms_aux[i][2] ] j[j.keys()[0]]['Livre'] = valores migracoes[chave][ e[chave]['nomes'].get(i) ] = j.keys()[0] migra = True break else: continue if migra == False: migracoes[chave][ e[chave]['nomes'].get(i) ] = None desligar[chave] = False if not chave in desligar: desligar[chave] = True else: copia_hosts.remove(e) desligar[chave] = True continue saida = {} saida['Hosts']= desligar saida['Migracoes'] = migracoes return json.dumps(saida) def cpu_util_from(self, timestamp_begin=None, timestamp_end=None, resource_id=None): return json.dumps(self.__ceilometer.get_cpu_util(timestamp_begin, timestamp_end, resource_id)) def cpu_util_flavors(self, timestamp_begin=None, timestamp_end=None): data = self.__ceilometer.get_cpu_util_flavors(timestamp_begin, timestamp_end) ret = analytics.recommendations.recomenda_flavor(data) return json.dumps(ret) def projects_with_instances_and_cpu_util(self): projects = self.__keystone.tenants ret = { 'name' : 'cloud', 'children' : [] } for p in projects: proj = { 'name' : p.name, 'children' : [] } instances = self.__nova.instances(p.name) for i in instances: proj['children'].append({ 'resource_id' : i.id, 'instance_name' : i.name }) ret['children'].append(proj) return json.dumps(ret) def alarms_history(self, timestamp_begin=None, timestamp_end=None): return json.dumps(self.__ceilometer.get_alarms_history(timestamp_begin, timestamp_end)) def add_alarm(self, name, resource, threshold, operator, period, ev_period): return self.__ceilometer.set_alarm(name, resource, threshold, operator, period, ev_period) def alarm_email(self, data_requested): alarm_id = ast.literal_eval(data_requested)['alarm_id'] userId = self.__ceilometer.get_alarm_userid(alarm_id) projectId = self.__ceilometer.get_alarm_projectid(alarm_id) userEmail = self.__keystone.get_user_email(userId, projectId) send_email('*****@*****.**', [userEmail], [], 'Alert Telemetry Cloud', 'Email disparado pelo alarme!!!', '*****@*****.**', '4n4lyt1cs') def alarm_description(self): return json.dumps(self.__ceilometer.get_alarm_parameters()) def delete_alarm(self, alarm_id): return json.dumps(self.__ceilometer.delete_alarms(alarm_id)) def hosts_cpu(self, timestamp_begin, timestamp_end): return self.__hosts_db.get_data_db('Cpu_Util', timestamp_begin, timestamp_end) def hosts_memory(self, timestamp_begin, timestamp_end): return self.__hosts_db.get_data_db('Memory', timestamp_begin, timestamp_end) def hosts_disk(self, timestamp_begin, timestamp_end): return self.__hosts_db.get_data_db('Disk', timestamp_begin, timestamp_end) def host_metrics(self, project): return self.__nova.metrics(project) def hosts_recommendation(self, r_cpu, r_memory , r_disk): resource = [] ret = {} r_cpu = json.loads(r_cpu) r_memory = json.loads(r_memory) r_disk = json.loads(r_disk) for host in r_cpu: host_http = host["host_address"] if host["data"] is None: continue for data in host["data"]: resource.append(data["data"]) resource = sorted(resource) if(len(resource)%2 == 0): index = len(resource)/2 mediana = (resource[index-1] + resource[index+1])/2 else: mediana = resource[int(math.ceil(len(resource)/2))] if mediana >= 95: ret[host_http] ="sobrecarregado" else: resource = [] for host_mem in r_memory: if(host["host_address"] == host_mem["host_address"]): for data in host_mem["data"]: for value in json.loads(data["data"]): resource.append(value["percent"]) resource = sorted(resource) if(len(resource)%2 == 0): index = len(resource)/2 mediana = (resource[index-1] + resource[index+1])/2 else: mediana = resource[int(math.ceil(len(resource)/2))] if mediana >= 95: ret[host_http] ="sobrecarregado" else: ret[host_http] ="normal" return json.dumps(ret) #return json.dumps(cpu) def instances_from_host(self, host_name): ret = [] projects = self.__keystone.projects for project in projects: instances = self.__nova.instances(project['name']) for instance in instances: print instance._info if instance._info['os-extended-server-attributes:host'] == host_name: ret.append({'instance_name' : instance.name, 'instance_id' : instance.id}) return ret def migrate_to_host(self, project_name, host_name, instance_id): host_vm = self.__nova.vm_hostname(project_name,instance_id) if host_vm._info['os-extended-server-attributes:host'] == host_name: raise MigrateException(400,"Migracao para o mesmo destino") elif host_vm._info['os-extended-server-attributes:host'] == 'truta' and host_name != 'truta': raise MigrateException(500,"Migracao de host para compute node") else: self.__nova.vm_migration(project_name,host_name,instance_id) return True def get_benchmark_bd(self): ret = self.__benchmark_db.get_data_db() return ret def start_instance_bench(self, project): return self.__nova.start_instance_bench(project) def get_benchmark(self, project): benchmark_ip = self.__nova.get_benchmark_ip(project) data = requests.get('http://'+benchmark_ip+':5151/get_benchmarking') return data.json() def get_benchmark_status(self, project): benchmark_ip = self.__nova.get_benchmark_ip(project) data = requests.get('http://'+benchmark_ip+':5151/get_status') return data.text def repeat_benchmark(self, project): benchmark_ip = self.__nova.get_benchmark_ip(project) data = requests.get('http://'+benchmark_ip+':5151/start_benchmarking') return data.text def remove_benchmark_instance(self): id = self.__nova.benchmark_id() if id == None: return "sem instancia benchmark" else: remove = self.__nova.remove_instance(id) return remove
class DataHandler: def __init__(self): self.__ceilometer = CeilometerClient() self.__keystone = KeystoneClient() self.__nova = NovaClient() self.__hosts_db = HostDataHandler() def projects(self): return json.dumps(self.__keystone.projects) def cpu_util_from(self, timestamp_begin=None, timestamp_end=None, resource_id=None): return json.dumps(self.__ceilometer.get_cpu_util(timestamp_begin, timestamp_end, resource_id)) def cpu_util_flavors(self, timestamp_begin=None, timestamp_end=None): data = self.__ceilometer.get_cpu_util_flavors(timestamp_begin, timestamp_end) ret = analytics.recommendations.recomenda_flavor(data) return json.dumps(ret) def projects_with_instances_and_cpu_util(self): projects = self.__keystone.tenants ret = { 'name' : 'cloud', 'children' : [] } for p in projects: proj = { 'name' : p.name, 'children' : [] } instances = self.__nova.instances(p.name) cpu_data = get_latest_cpu_util_from_database(project_id=p.id, limit=len(instances)) for sample in cpu_data: proj['children'].append({ 'resource_id' : sample[4], 'cpu_util_percent' : sample[6] }) ret['children'].append(proj) return json.dumps(ret) def alarms_history(self, timestamp_begin=None, timestamp_end=None): return json.dumps(self.__ceilometer.get_alarms_history(timestamp_begin, timestamp_end)) def add_alarm(self, name, resource, threshold, operator, period, ev_period): return self.__ceilometer.set_alarm(name, resource, threshold, operator, period, ev_period) def alarm_email(self, data_requested): alarm_id = ast.literal_eval(data_requested)['alarm_id'] userId = self.__ceilometer.get_alarm_userid(alarm_id) projectId = self.__ceilometer.get_alarm_projectid(alarm_id) userEmail = self.__keystone.get_user_email(userId, projectId) send_email('*****@*****.**', [userEmail], [], 'Alert Telemetry Cloud', 'Email disparado pelo alarme!!!', '*****@*****.**', '4n4lyt1cs') def alarm_description(self): return json.dumps(self.__ceilometer.get_alarm_parameters()) def delete_alarm(self, alarm_id): return json.dumps(self.__ceilometer.delete_alarms(alarm_id)) def hosts_cpu(self, timestamp_begin, timestamp_end): return self.__hosts_db.get_data_db('Cpu_Util', timestamp_begin, timestamp_end) def hosts_memory(self, timestamp_begin, timestamp_end): return self.__hosts_db.get_data_db('Memory', timestamp_begin, timestamp_end) def hosts_disk(self, timestamp_begin, timestamp_end): return self.__hosts_db.get_data_db('Disk', timestamp_begin, timestamp_end) def host_metrics(self, project): return self.__nova.metrics(project) def hosts_recommendation(self, r_cpu, r_memory , r_disk): resource = [] ret = {} r_cpu = json.loads(r_cpu) r_memory = json.loads(r_memory) r_disk = json.loads(r_disk) for host in r_cpu: host_http = host["host_address"] if host["data"] is None: continue for data in host["data"]: resource.append(data["data"]) resource = sorted(resource) if(len(resource)%2 == 0): index = len(resource)/2 mediana = (resource[index-1] + resource[index+1])/2 else: mediana = resource[int(math.ceil(len(resource)/2))] if mediana >= 95: ret[host_http] ="sobrecarregado" else: resource = [] for host_mem in r_memory: if(host["host_address"] == host_mem["host_address"]): for data in host_mem["data"]: for value in json.loads(data["data"]): resource.append(value["percent"]) resource = sorted(resource) if(len(resource)%2 == 0): index = len(resource)/2 mediana = (resource[index-1] + resource[index+1])/2 else: mediana = resource[int(math.ceil(len(resource)/2))] if mediana >= 95: ret[host_http] ="sobrecarregado" else: ret[host_http] ="normal" return json.dumps(ret) #return json.dumps(cpu) def instances_from_host(self, host_name): ret = [] projects = self.__keystone.projects for project in projects: instances = self.__nova.instances(project['name']) for instance in instances: print instance._info if instance._info['os-extended-server-attributes:host'] == host_name: ret.append({'instance_name' : instance.name, 'instance_id' : instance.id}) return ret