def __init__(self, config_path='openstack_dashboard/api/telemetry_api/environment.conf'):
        self.__config = ConfigParser.ConfigParser()
        self.__config.read(config_path)
        #self.__ceilometer = CeilometerClient(self.__config)
        self.__ceilometer = CeilometerClient()
        self.__keystone = KeystoneClient(self.__config)


        #self.__nova = NovaClient(self.__config)
        self.__nova = NovaClient()

        server = self.__config.get('Misc', 'dbserver')
        user = self.__config.get('Misc', 'dbuser')
        passwd = self.__config.get('Misc', 'dbpass')
        database = self.__config.get('Misc', 'hostsdbname')
        table = self.__config.get('Misc', 'hostsdbtable')
        self.__hosts = ast.literal_eval(self.get_config().get('Openstack', 'Hosts'))
        self.__hosts_db = HostDataHandler()
        self.__benchmark_db = BenchmarkDataHandler(server, user, passwd)
        self.__reduction = Reduction()
class DataHandler:

    def __init__(self, config_path='openstack_dashboard/api/telemetry_api/environment.conf'):
        self.__config = ConfigParser.ConfigParser()
        self.__config.read(config_path)
        #self.__ceilometer = CeilometerClient(self.__config)
        self.__ceilometer = CeilometerClient()
        self.__keystone = KeystoneClient(self.__config)


        #self.__nova = NovaClient(self.__config)
        self.__nova = NovaClient()

        server = self.__config.get('Misc', 'dbserver')
        user = self.__config.get('Misc', 'dbuser')
        passwd = self.__config.get('Misc', 'dbpass')
        database = self.__config.get('Misc', 'hostsdbname')
        table = self.__config.get('Misc', 'hostsdbtable')
        self.__hosts = ast.literal_eval(self.get_config().get('Openstack', 'Hosts'))
        self.__hosts_db = HostDataHandler()
        self.__benchmark_db = BenchmarkDataHandler(server, user, passwd)
        self.__reduction = Reduction()

    def get_config(self):
        return self.__config

    def projects(self):
        return json.dumps(self.__keystone.projects)

    def get_critical_hosts(self,instances_critical, information):
        critical_hosts = []
        for cpn in information:
            cpn_name = cpn.keys()[0]
            cpn_server_list = cpn[cpn_name]['vms'].keys()
            for server in cpn_server_list:
                if server in instances_critical:
                    critical_hosts.append(cpn_name)
        return critical_hosts

    def suggestion(self, list_not_ignore=[]):
        project_list = [ project['name'] for project in json.loads(self.projects())] #returns the list of existing projects
        compute_nodes_info_list = self.__nova.vm_info(project_list) #list of jsons - json correspond to a compute node information
        instances_id_project = {} #dict instances_id : project_name
        for compute_node_aux in compute_nodes_info_list:
            try:
                compute_name = compute_node_aux.keys()[0] #get the name of compute node to access information
                for key in compute_node_aux[compute_name]['Info_project'].keys(): #key represent a project
                    #id represent de id of any instance present in a given project(key) and in the compute_name
                    for id in compute_node_aux[compute_name]['Info_project'][key]:
                        instances_id_project[id] = key
            except Exception as excp:
                return {"error": excp.message}
        #get the list of critical instances (can't migrate them)
        critical_instances = self.__nova.critical_instances(project_list)
        #list of critical hosts
        critical_cpn = self.get_critical_hosts(critical_instances,compute_nodes_info_list)
        shutdown = {} #dict compute_node : True/False for shutdown
        migrations = {} #dict with all migrations 
        compute_nodes_copy = compute_nodes_info_list[:] #copy to aux with the algorithm
        owner_cpn_instance = {}
        #for cp in compute_nodes_info_list:
        #    owner_cpn = cp.keys()[0]
        #    vms_cp = cp[owner_cpn]['vms'].keys()
        #    for v in vms_cp:
        #        owner_cpn_instance[v] = owner_cpn
        #begin of algorithm
        #cpn_data - compute node data
        try:
            for cpn_data in compute_nodes_info_list:
                data  = cpn_data.copy() #contains all information about a compute node
                actual_cpn = cpn_data.keys()[0] #actual compute node (try migration for all instances)
                #verify is the compute node has critical instances
                if actual_cpn not in critical_cpn:
                    if( len( data[actual_cpn]['vms'].keys()) > 0 ):
                        instances_data = data[actual_cpn]['vms'].copy() #copy the list of vms
                        compute_nodes_copy.remove(cpn_data) #remove all data from compute node in the copy
                        migration_flag = False #flag to say if the instace can migrate to other compute node
                        migrations[actual_cpn] = {} #dict with all migrations for compute node
                        for instance_id in instances_data:
                            for other_cpn in compute_nodes_copy:
                                #verification - instance is not critical
                                if (other_cpn not in critical_cpn and instance_id not in critical_instances):
                                    migration_flag = False
                                    other_cpn_name = other_cpn.keys()[0]
                                    if list_not_ignore == [] or actual_cpn in list_not_ignore:
                                        if(other_cpn[other_cpn_name]['Livre'][0] >= instances_data[instance_id][0] and
                                           other_cpn[other_cpn_name]['Livre'][1] >= instances_data[instance_id][1] and
                                           other_cpn[other_cpn_name]['Livre'][2] >= instances_data[instance_id][2]):
                                           from_cpn = None
                                           migrate_to_cpn = None
                                           if instance_id in owner_cpn_instance:
                                               from_cpn = owner_cpn_instance[instance_id].keys()[0]
                                               migrate_to_cpn = owner_cpn_instance[instance_id][from_cpn]
                                           else:
                                               from_cpn = actual_cpn
                                               migrate_to_cpn = other_cpn_name
                                           if actual_cpn == from_cpn or actual_cpn == migrate_to_cpn:
                                               #update values of free resources
                                               new_values = [other_cpn[other_cpn_name]['Livre'][0] - instances_data[instance_id][0],
                                                         other_cpn[other_cpn_name]['Livre'][1] - instances_data[instance_id][1],
                                                         other_cpn[other_cpn_name]['Livre'][2] - instances_data[instance_id][2]]
                                               other_cpn[other_cpn_name]['Livre'] = new_values
                                               #sending instance to the other host with all information
                                               instances_other_cpn = other_cpn[other_cpn_name]['vms']
                                               instances_other_cpn[instance_id] = instances_data[instance_id]
                                               other_cpn[other_cpn_name]['vms'] = instances_other_cpn
                                               other_cpn[other_cpn_name]['nomes'][instance_id] = data[actual_cpn]['nomes'][instance_id]
                                               migration_flag = True
                                               migrations[actual_cpn][instance_id] = [ other_cpn_name , cpn_data[actual_cpn]['nomes'].get(instance_id) ,instances_id_project[instance_id]]
                                               #update future owner of instance
                                               owner_cpn_instance[instance_id] = {actual_cpn : other_cpn_name}
                                           else:
                                               continue
                                        else:
                                            break 
                                    else:
                                        migration_flag = False
                                else:
                                    continue
                            #update owner
                            if migration_flag == False:
                                if instance_id in owner_cpn_instance:
                                    if( owner_cpn_instance[instance_id].has_key(actual_cpn) or actual_cpn in owner_cpn_instance[instance_id].values()):
                                        continue
                                else:
                                    migrations[actual_cpn][instance_id] = None
                                    shutdown[actual_cpn] = False
                        if not actual_cpn in shutdown:
                            shutdown[actual_cpn] = True
                    else:
                         compute_nodes_copy.remove(cpn_data)
                         shutdown[actual_cpn] = True
                         continue  
                else:
                    compute_nodes_copy.remove(cpn_data)
                    shutdown[actual_cpn] = False
        except Exception as excp2:
                return {"error in algorithm suggestion":excp2.message}

        for host_key in migrations.keys():
            if None in migrations[host_key].values():
                migrations[host_key] = {}
        output = {} #json output with all data
        output['Hosts'] = shutdown
        output['Migracoes'] = migrations
        #recomendation = self.remove_duplicated_migrations(output)
        return output

    #Not using anymore
    #def remove_duplicated_migrations(self, output):
    #    result = output
    #    for compute_node in result['Migracoes'].keys():
    #        for server  in result['Migracoes'][compute_node].keys():
    #            if not self.__nova.verify_host_has_server(compute_node,server):
    #                result['Migracoes'][compute_node].pop(server)
    #    return result

    def cpu_util_from(self, timestamp_begin=None, timestamp_end=None, resource_id=None):
        return json.dumps(self.__ceilometer.get_cpu_util(timestamp_begin, timestamp_end, resource_id))

    def cpu_util_flavors(self, timestamp_begin=None, timestamp_end=None):
        data = self.__ceilometer.get_cpu_util_flavors(timestamp_begin, timestamp_end)
        ret = analytics.recommendations.recomenda_flavor(data)
        return json.dumps(ret)

    def network_incoming_bytes_rate_from(self, timestamp_begin=None, timestamp_end=None, resource_id=None):
        return json.dumps(self.__ceilometer.get_network_incoming_bytes_rate(timestamp_begin, timestamp_end, resource_id))

    def network_outgoing_bytes_rate_from(self, timestamp_begin=None, timestamp_end=None, resource_id=None):
        return json.dumps(self.__ceilometer.get_network_outgoing_bytes_rate(timestamp_begin, timestamp_end, resource_id))

    def projects_with_instances_and_cpu_util(self):
        projects = self.__keystone.tenants

        ret = { 'name' : 'cloud', 'children' : [] }

        for p in projects:
            proj = { 'name' : p.name, 'children' : [] }

            instances = self.__nova.instances(p.name)

            for i in instances:                 
                proj['children'].append({ 'resource_id' : i.id, 'instance_name' : i.name })

            ret['children'].append(proj)

        return ret

    def alarms_history(self, timestamp_begin=None, timestamp_end=None):
        return self.__ceilometer.get_alarms_history(timestamp_begin, timestamp_end)

    def add_alarm(self, name, resource, threshold, operator, period, ev_period, send_mail, email_admin, instance=""):
        return self.__ceilometer.set_alarm(name, resource, threshold, operator, period, ev_period, send_mail, email_admin, instance)

    def alarm_email(self, data_requested):
        alarm_id = ast.literal_eval(data_requested)['alarm_id']
        userId = self.__ceilometer.get_alarm_userid(alarm_id)
        projectId = self.__ceilometer.get_alarm_projectid(alarm_id)
        userEmail = self.__keystone.get_user_email(userId, projectId)
        copy_admin = self.__ceilometer.get_alarm_email_status(alarm_id)
        adminEmail = self.__keystone.get_user_email(self.__keystone.get_user(projectId,'admin'),projectId)



        if 'True' in copy_admin[0] and 'True' in copy_admin[1] :
            send_email('*****@*****.**',
                   [adminEmail],
                   [],
                   'Alert Telemetry Cloud',
                   'Email disparado pelo alarme!!!',
                   '*****@*****.**',
                   '4n4lyt1cs')

            send_email('*****@*****.**',
                   [userEmail],
                   [],
                   'Alert Telemetry Cloud',
                   'Email disparado pelo alarme!!!',
                   '*****@*****.**',
                   '4n4lyt1cs')

        elif 'True' in copy_admin[0] and 'False' in copy_admin[1]:
            send_email('*****@*****.**',
                   [userEmail],
                   [],
                   'Alert Telemetry Cloud',
                   'Email disparado pelo alarme!!!',
                   '*****@*****.**',
                   '4n4lyt1cs')

        elif 'False' in copy_admin[0] and 'True' in copy_admin[1]:
            send_email('*****@*****.**',
                   [adminEmail],
                   [],
                   'Alert Telemetry Cloud',
                   'Email disparado pelo alarme!!!',
                   '*****@*****.**',
                   '4n4lyt1cs')

    def alarm_description(self):
        return self.__ceilometer.get_alarm_parameters()
    
    def delete_alarm(self, alarm_id):
        return json.dumps(self.__ceilometer.delete_alarms(alarm_id))

    def hosts_cpu(self, timestamp_begin, timestamp_end):
        return self.__hosts_db.get_data_db('Cpu_Util', timestamp_begin, timestamp_end)

    def hosts_memory(self, timestamp_begin, timestamp_end):
        return self.__hosts_db.get_data_db('Memory', timestamp_begin, timestamp_end)

    def hosts_disk(self, timestamp_begin, timestamp_end):
        return self.__hosts_db.get_data_db('Disk', timestamp_begin, timestamp_end)

    def hosts_network(self, timestamp_begin, timestamp_end):
        return self.__hosts_db.get_data_db('Network', timestamp_begin, timestamp_end)

    def host_metrics(self, project):
        return self.__nova.metrics(project)

    def host_aggregates(self, project):
        return self.__nova.host_aggregates(project)

    def resource_host(self, host):
        return self.__nova.resource_host(host)


    def hosts_recommendation(self, r_cpu, r_memory , r_disk):
        resource = []
        ret = {}
        r_cpu = json.loads(r_cpu)
        r_memory = json.loads(r_memory)
        r_disk = json.loads(r_disk)
        for host in r_cpu:
            host_http = host["host_address"]
            if host["data"] is None:
                continue
            for data in host["data"]:
                resource.append(data["data"])
            resource = sorted(resource)
            if(len(resource)%2 == 0):
                index = len(resource)/2
                mediana = (resource[index-1] + resource[index+1])/2
            else:
                mediana = resource[int(math.ceil(len(resource)/2))]

            if mediana >= 95:
                ret[host_http] ="sobrecarregado"
            else:
                resource = []
                for host_mem in r_memory:
                    if(host["host_address"]  == host_mem["host_address"]):
                        for data in host_mem["data"]:
                            for value in json.loads(data["data"]):
                                resource.append(value["percent"])
                        resource = sorted(resource)

                if(len(resource)%2 == 0):
                    index = len(resource)/2
                    mediana = (resource[index-1] + resource[index+1])/2
                else:
                    mediana = resource[int(math.ceil(len(resource)/2))]

                if mediana >= 95:
                    ret[host_http] ="sobrecarregado"
                else:
                    ret[host_http] ="normal"       
        return json.dumps(ret)
        #return json.dumps(cpu)

    def instances_from_host(self, host_name):
        attr_host = 'OS-EXT-SRV-ATTR:host'
        ret = []
        projects = self.__keystone.projects
        for project in projects:
            instances = self.__nova.instances(project['name'])
            for instance in instances:
                print instance._info
                if instance._info[attr_host] == host_name:
                    ret.append({'instance_name' : instance.name, 'instance_id' : instance.id})
        return ret   

    def migrate_to_host(self, project_name, host_name, instance_id):
        #host_vm = self.__nova.vm_hostname(project_name,instance_id)
        #attr_host = 'OS-EXT-SRV-ATTR:host'
        #if host_vm._info[attr_host] == host_name:
        #    raise MigrateException(400,"Migracao para o mesmo destino")
	#elif host_vm._info[attr_host] == 'truta' and host_name != 'truta':
        #    raise MigrateException(500,"Migracao de host para compute node")
        #else:
        try:
            retorno = self.__nova.vm_migration(project_name,host_name,instance_id)
        except Exception as a:
            return {"erro":a.message}
        return {"status":"success"}


    def get_benchmark_bd(self):
        ret = self.__benchmark_db.get_data_db()
        return ret


    def start_instance_bench(self, project, host):
        return self.__nova.start_instance_bench(project, host)


    def get_benchmark(self, project, host):
        benchmark_ip = self.__nova.get_benchmark_ip(project, host)
        data = requests.get('http://'+benchmark_ip+':5151/get_benchmarking')
        return data.json()
 
    def get_benchmark_status(self, project, host):
        benchmark_ip = self.__nova.get_benchmark_ip(project, host)
        print benchmark_ip
        data = requests.get('http://'+benchmark_ip+':5151/get_status')
        return data.text

    def repeat_benchmark(self, project):
        benchmark_ip = self.__nova.get_benchmark_ip(project)
        data = requests.get('http://'+benchmark_ip+':5151/start_benchmarking')
        return data.text

    def remove_benchmark_instance(self, host):
        id = self.__nova.benchmark_id(host)
        if id == None:
            return "sem instancia benchmark"
        else:
            remove = self.__nova.remove_instance(id)
            return remove

    def hosts_aggregation_cpu(self, timestamp_begin=None, timestamp_end=None):
        ret = []

        cpu_data = self.hosts_cpu(timestamp_begin, timestamp_end)
        aggregates = self.__nova.host_aggregates('admin')

        for aggregate in aggregates:
            result = []
            host_address = aggregate["host_address"]
            for host in host_address:
                host_name = self.__nova.server_name_by_ip(host)
                host_cpu = self.__nova.resource_host(host_name)["cpu"]
                
                for data in cpu_data:
                    if(data["host_address"]==host):
                        convert = []

                        for cpu_percent in data["data"]:
                            cpu_percent["data"] = (1 - cpu_percent["data"]/100.0)* host_cpu
                            convert.append(cpu_percent)
 
                        if(len(result)==0):
                            result = convert
                        else:
                            if(len(result) > len(convert)):
                                result = result[0:len(convert)]
                            for i in range(len(result)):
                                value = result[i]
                                value["data"] = value["data"] + (convert[i])["data"]
                                result[i] = value
                                
                        break
            ret.append({"Aggregate":aggregate["name"], "data":result})
        return json.dumps(ret)

    def hosts_aggregation_memory(self, timestamp_begin=None, timestamp_end=None):
        ret = []

        memory_data = self.hosts_memory(timestamp_begin, timestamp_end)
        aggregates = self.__nova.host_aggregates('admin')

        for aggregate in aggregates:
            result = []
            host_address = aggregate["host_address"]
            aggregate_memory = self.__nova.resource_aggregates(aggregate['name'])['memory_mb']
            for host in host_address:	
                host_name = self.__nova.server_name_by_ip(host)
                host_memory = self.__nova.resource_host(host_name)["memory_mb"]

                for data in memory_data:
                    if(data["host_address"]==host):
                        convert = []

                        for memory_percent in data["data"]:
                            memory_percent['data'] = ((json.loads(memory_percent['data'])[0]['percent']/100.0 )*host_memory)/aggregate_memory
                            convert.append(memory_percent)

                        if(len(result)==0):
                            result = convert
                        else:
                            if(len(result) > len(convert)):
                                result = result[0:len(convert)]
                            for i in range(len(result)):
                                value = result[i]
                                value["data"] = (value["data"] + (convert[i])["data"])
                                result[i] = value

                        break
            ret.append({"Aggregate":aggregate["name"], "data":result})
        return json.dumps(ret)


    def hosts_aggregation_disk(self, timestamp_begin=None, timestamp_end=None):
        ret = []

        disk_data = self.hosts_disk(timestamp_begin, timestamp_end)
        aggregates = self.__nova.host_aggregates('admin')

        for aggregate in aggregates:
            result = []
            host_address = aggregate["host_address"]
            aggregate_disk = self.__nova.resource_aggregates(aggregate['name'])['disk']
            for host in host_address:
                host_name = self.__nova.server_name_by_ip(host)
                host_disk = self.__nova.resource_host(host_name)["disk_gb"]

                for data in disk_data:
                    if(data["host_address"]==host):
                        convert = []

                        for disk_percent in data["data"]:
                            disk_percent['data'] = ((json.loads(disk_percent['data'])[0]['percent']/100)*host_disk)/aggregate_disk
                            convert.append(disk_percent)

                        if(len(result)==0):
                            result = convert
                        else:
                            if(len(result) > len(convert)):
                                result = result[0:len(convert)]
                            for i in range(len(result)):
                                value = result[i]
                                value["data"] = value["data"] + (convert[i])["data"]
                                result[i] = value

                        break
            ret.append({"Aggregate":aggregate["name"], "data":result})
        
        return json.dumps(ret)

    def hosts_aggregation_network(self, timestamp_begin=None, timestamp_end=None):
        ret = []
        hosts = self.get_compute_nodes_ips()
        network_data = self.points_reduction_by_server_network(timestamp_begin, timestamp_end, hosts)
        aggregates = self.__nova.host_aggregates('admin')

        for aggregate in aggregates:
            result_incoming = []
            result_outgoing = []
            host_address = aggregate["host_address"]
            for host in host_address:
                for data in network_data:
                    if(data["host_address"]==host):
                        convert_incoming = []

                        for network_incoming_rate in data["incoming_rate"]:
                            convert_incoming.append(network_incoming_rate)

                        if(len(result_incoming)==0):
                            result_incoming = convert_incoming
                        else:
                            if(len(result_incoming) > len(convert_incoming)):
                                result_incoming = result_incoming[0:len(convert_incoming)]
                            for i in range(len(result_incoming)):
                                value = result_incoming[i]
                                value["net_bytes_recv"] = value["net_bytes_recv"] + (convert_incoming[i])["net_bytes_recv"]
                                result_incoming[i] = value

                        convert_outgoing = []

                        for network_outgoing_rate in data["outgoing_rate"]:
                            convert_outgoing.append(network_outgoing_rate)

                        if(len(result_outgoing)==0):
                            result_outgoing = convert_outgoing
                        else:
                            if(len(result_outgoing) > len(convert_outgoing)):
                                result_outgoing = result_outgoing[0:len(convert_outgoing)]
                            for i in range(len(result_outgoing)):
                                value = result_outgoing[i]
                                value["net_bytes_sent"] = value["net_bytes_sent"] + (convert_outgoing[i])["net_bytes_sent"]
                                result_outgoing[i] = value

                        break

            ret.append({"Aggregate":aggregate["name"], 'incoming_rate': result_incoming, 'outgoing_rate': result_outgoing})

        return json.dumps(ret)

    def points_reduction_by_server_cpu(self, timestamp_begin, timestamp_end, hosts):
        data = []
        old_data = self.hosts_cpu(timestamp_begin, timestamp_end)
        key = 'data'
        if len(old_data)==0 or len(old_data[0][key]) <= 3:
           result = old_data
        else:
            for host in range(len(hosts)):
                dict_host = {}
                dict_host["host_address"] = hosts[host]
                dict_host['data'] = self.__reduction.points_reduction(old_data[host]['data'],key)
                data.append(dict_host)
                result = data
        return result
    
    def points_reduction_by_server_memory(self, timestamp_begin, timestamp_end, hosts):
        data = []
        old_data = self.hosts_memory(timestamp_begin, timestamp_end)
        key = 'data'
        if len(old_data)==0 or len(old_data[0][key]) <= 3:
           result = old_data
        else:
            for host in range(len(hosts)):
                dict_host = {}
                dict_host["host_address"] = hosts[host]
                dict_host['data'] = self.__reduction.points_reduction_for_percent(old_data[host]['data'],key)
                data.append(dict_host)
                result = data
        return result

    def points_reduction_by_server_disk(self, timestamp_begin, timestamp_end, hosts):
        data = []
        old_data = self.hosts_disk(timestamp_begin, timestamp_end)
        if len(old_data)==0 or len(old_data[0]['data']) <= 3:
           result = old_data
        else:
            for host in range(len(hosts)):
                dict_host = {}
                dict_host["host_address"] = hosts[host]
                dict_host['data'] = self.__reduction.points_reduction_disk(old_data[host]['data'])
                data.append(dict_host)
                result = data

        return result

    def points_reduction_by_server_network(self, timestamp_begin, timestamp_end, hosts):
        data = []
        old_data = self.hosts_network(timestamp_begin, timestamp_end)

        for host in old_data:
            single_host_data = {'host_address': host['host_address'], 'incoming_rate': [], 'outgoing_rate': []}

            if len(host['data']) > 1:
                sample = host['data'][0]
                for sample_index in range(len(host['data']) - 1):
                    sample_index += 1

                    network_data = json.loads(sample['data'])[0]
                    before_timestamp = datetime.strptime(sample['timestamp'], '%Y-%m-%dT%H:%M:%S')
                    before_net_bytes_recv = network_data['net_bytes_recv']
                    before_net_bytes_sent = network_data['net_bytes_sent']

                    next_sample = host['data'][sample_index]
                    network_data = json.loads(next_sample['data'])[0]
                    after_timestamp = datetime.strptime(next_sample['timestamp'], '%Y-%m-%dT%H:%M:%S')
                    after_net_bytes_recv = network_data['net_bytes_recv']
                    after_net_bytes_sent = network_data['net_bytes_sent']

                    timestamp_delta = (after_timestamp - before_timestamp).total_seconds()
                    net_bytes_recv_delta = after_net_bytes_recv - before_net_bytes_recv
                    net_bytes_sent_delta = after_net_bytes_sent - before_net_bytes_sent

                    if (net_bytes_recv_delta < 0) or (net_bytes_sent_delta < 0):
                        continue
                    else:
                        sample = next_sample

                    net_bytes_recv_delta = net_bytes_recv_delta / timestamp_delta
                    net_bytes_sent_delta = net_bytes_sent_delta / timestamp_delta

                    single_host_data['incoming_rate'].append({'timestamp': sample['timestamp'], 'net_bytes_recv': net_bytes_recv_delta})
                    single_host_data['outgoing_rate'].append({'timestamp': sample['timestamp'], 'net_bytes_sent': net_bytes_sent_delta})

                single_host_data['incoming_rate'] = self.__reduction.points_reduction(single_host_data['incoming_rate'], 'net_bytes_recv')
                if math.isnan(single_host_data['incoming_rate'][-1]['net_bytes_recv']):
                    single_host_data['incoming_rate'].pop()

                single_host_data['outgoing_rate'] = self.__reduction.points_reduction(single_host_data['outgoing_rate'], 'net_bytes_sent')
                if math.isnan(single_host_data['outgoing_rate'][-1]['net_bytes_sent']):
                    single_host_data['outgoing_rate'].pop()

            data.append(single_host_data)

        return data

    def points_reduction_vm(self, timestamp_begin,timestamp_end,resource_id):
        old_data = json.loads(self.cpu_util_from(timestamp_begin,timestamp_end,resource_id))
        key2 = "cpu_util_percent"
        data = self.__reduction.points_reduction(old_data,key2)
        return data

    def points_reduction_vm_network_incoming(self, timestamp_begin,timestamp_end,resource_id):
        old_data = json.loads(self.network_incoming_bytes_rate_from(timestamp_begin,timestamp_end,resource_id))
        key2 = "network_incoming_bytes_rate"
        data  = self.__reduction.points_reduction(old_data,key2)
        return data

    def points_reduction_vm_network_outgoing(self, timestamp_begin,timestamp_end,resource_id):
        old_data = json.loads(self.network_outgoing_bytes_rate_from(timestamp_begin,timestamp_end,resource_id))
        key2 = "network_outgoing_bytes_rate"
        data  =  self.__reduction.points_reduction(old_data,key2)
        return data

    def vm_info(self):
        ret = []
        project = []

        for project_data in json.loads(self.projects()):
            project.append(project_data["name"])
        
        for project_name in project:
            informations =  self.__nova.vm_info(project)
            vms_data = {}
            for node in informations:
                for node_name in node.keys():
                    vms_name = (node[node_name])['nomes']
                    for key in vms_name.keys():
                        vms_data[key] = vms_name[key]

            ret.append({project_name:vms_data})

        return ret

    def instances_by_project(self):
        project = []

        for project_data in json.loads(self.projects()):
            project.append(project_data["name"])

        informations =  self.__nova.vm_info(project)
       
        vms_data = {}

        for node in informations:
            for node_name in node.keys():
                project_name = (node[node_name])['Info_project']
                vms_name = (node[node_name])['nomes']
                for key in project_name.keys():
                    for instance_id in project_name[key]:
                        if not (key in vms_data.keys()):
                            vms_data[key] = {}
                            vms_data[key][instance_id] = vms_name[instance_id]
                        else:
                            vms_data[key][instance_id] = vms_name[instance_id]

        return vms_data
 
    def vcpus_for_aggregate(self, project):
        return json.dumps(self.__nova.vcpus_for_aggregate(project))

    def create_snapshot(self, instance_id):
        self.__nova.create_snapshot(instance_id)

    def suspend_instance(self, instance_id):
        self.__nova.suspend_instance(instance_id)

    def get_computenodes_names(self):
        return self.__nova.list_compute_nodes()

    def get_compute_nodes_ips(self):
        ips = []
        all_hosts = self.get_hosts('compute_node')
        for host in all_hosts:
            ips.append(host.get_ip())

        return ips

    def get_host_availability_metrics(self, timestamp_begin, timestamp_end):
        calculator = HostMetricsCalculator()

        return calculator.get_host_availability_metrics(timestamp_begin, timestamp_end)

    def get_services_status(self, host, timestamp_begin, timestamp_end):
        return self.__hosts_db.get_service_status_db(host, timestamp_begin, timestamp_end)

    def points_reduction_services_status(self, host, timestamp_begin, timestamp_end):
        data = self.get_services_status(host, timestamp_begin, timestamp_end)
        output = {}
        for key in data.keys():
            output[key] = self.__reduction.points_reduction_for_step(data[key], 'status')

        return output

    def get_host_status(self, host, timestamp_begin, timestamp_end):
        return self.__hosts_db.get_host_status_db(host, timestamp_begin, timestamp_end)

    def points_reduction_host_status(self, host, timestamp_begin, timestamp_end):
        data = self.get_host_status(host, timestamp_begin, timestamp_end)

        return {'data': self.__reduction.points_reduction_for_step(data['data'], 'status')}

    def get_hosts(self, type=None):
        return hosts_from_dict_list(self.__hosts, type)
class DataHandler:

    def __init__(self):
        self.__ceilometer = CeilometerClient()
        self.__keystone = KeystoneClient()
        self.__nova = NovaClient()
        self.__hosts_db = HostDataHandler()
        self.__benchmark_db = BenchmarkDataHandler()

    def projects(self):
        return json.dumps(self.__keystone.projects)

    def sugestion(self):
        project_list = [ a['name'] for a in json.loads(self.projects()) ]
        host_vm_info = self.__nova.vm_info(project_list)
        desligar = {}
        migracoes = {}
        copia_hosts = host_vm_info[:]
        for e in host_vm_info:
            dic_aux = e.copy()
            chave = e.keys()[0]
            if( len( dic_aux[chave]['vms'].keys() ) > 0 ):
                vms_aux = dic_aux[chave]['vms'].copy()
                copia_hosts.remove(e)
                migra = False
                migracoes[chave] = {}                
                for i in vms_aux:
                    for j in copia_hosts:
                        migra = False
                        if( (j[j.keys()[0]]['Livre'][0] >= vms_aux[i][0]) and (j[j.keys()[0]]['Livre'][1] >= vms_aux[i][1])  and (j[j.keys()[0]]['Livre'][2] >= vms_aux[i][2])):
                            valores = [ j[j.keys()[0]]['Livre'][0] - vms_aux[i][0], j[j.keys()[0]]['Livre'][1] - vms_aux[i][1], j[j.keys()[0]]['Livre'][2] - vms_aux[i][2] ]
                            j[j.keys()[0]]['Livre'] = valores
                            migracoes[chave][ e[chave]['nomes'].get(i) ] = j.keys()[0]
                            migra = True
                            break
                        else:
                            continue
                    if migra == False:
                        migracoes[chave][ e[chave]['nomes'].get(i) ] = None
                        desligar[chave] = False
                if not chave in desligar:
                   desligar[chave] = True
            else:
                copia_hosts.remove(e)
                desligar[chave] = True
                continue
        saida = {}
        saida['Hosts']= desligar
        saida['Migracoes'] = migracoes
        return json.dumps(saida)    

    def cpu_util_from(self, timestamp_begin=None, timestamp_end=None, resource_id=None):
        return json.dumps(self.__ceilometer.get_cpu_util(timestamp_begin, timestamp_end, resource_id))

    def cpu_util_flavors(self, timestamp_begin=None, timestamp_end=None):
        data = self.__ceilometer.get_cpu_util_flavors(timestamp_begin, timestamp_end)
        ret = analytics.recommendations.recomenda_flavor(data)
        return json.dumps(ret)

    def projects_with_instances_and_cpu_util(self):
        projects = self.__keystone.tenants

        ret = { 'name' : 'cloud', 'children' : [] }

        for p in projects:
            proj = { 'name' : p.name, 'children' : [] }

            instances = self.__nova.instances(p.name)

            for i in instances:                 
                proj['children'].append({ 'resource_id' : i.id, 'instance_name' : i.name })

            ret['children'].append(proj)

        return json.dumps(ret)

    def alarms_history(self, timestamp_begin=None, timestamp_end=None):
        return json.dumps(self.__ceilometer.get_alarms_history(timestamp_begin, timestamp_end))

    def add_alarm(self, name, resource, threshold, operator, period, ev_period):
        return self.__ceilometer.set_alarm(name, resource, threshold, operator, period, ev_period)

    def alarm_email(self, data_requested):
        alarm_id = ast.literal_eval(data_requested)['alarm_id']
        userId = self.__ceilometer.get_alarm_userid(alarm_id)
        projectId = self.__ceilometer.get_alarm_projectid(alarm_id)
        userEmail = self.__keystone.get_user_email(userId, projectId)

        send_email('*****@*****.**', 
                        [userEmail],
                        [],
                        'Alert Telemetry Cloud',
                        'Email disparado pelo alarme!!!', 
                        '*****@*****.**',
                        '4n4lyt1cs')

    def alarm_description(self):
        return json.dumps(self.__ceilometer.get_alarm_parameters())
    
    def delete_alarm(self, alarm_id):
        return json.dumps(self.__ceilometer.delete_alarms(alarm_id))

    def hosts_cpu(self, timestamp_begin, timestamp_end):
        return self.__hosts_db.get_data_db('Cpu_Util', timestamp_begin, timestamp_end)

    def hosts_memory(self, timestamp_begin, timestamp_end):
        return self.__hosts_db.get_data_db('Memory', timestamp_begin, timestamp_end)

    def hosts_disk(self, timestamp_begin, timestamp_end):
        return self.__hosts_db.get_data_db('Disk', timestamp_begin, timestamp_end)

    def host_metrics(self, project):
        return self.__nova.metrics(project)

    def hosts_recommendation(self, r_cpu, r_memory , r_disk):
        resource = []
        ret = {}
        r_cpu = json.loads(r_cpu)
        r_memory = json.loads(r_memory)
        r_disk = json.loads(r_disk)
        for host in r_cpu:
            host_http = host["host_address"]
            if host["data"] is None:
                continue
            for data in host["data"]:
                resource.append(data["data"])
            resource = sorted(resource)
            if(len(resource)%2 == 0):
                index = len(resource)/2
                mediana = (resource[index-1] + resource[index+1])/2
            else:
                mediana = resource[int(math.ceil(len(resource)/2))]

            if mediana >= 95:
                ret[host_http] ="sobrecarregado"
            else:
                resource = []
                for host_mem in r_memory:
                    if(host["host_address"]  == host_mem["host_address"]):
                        for data in host_mem["data"]:
                            for value in json.loads(data["data"]):
                                resource.append(value["percent"])
                        resource = sorted(resource)

                if(len(resource)%2 == 0):
                    index = len(resource)/2
                    mediana = (resource[index-1] + resource[index+1])/2
                else:
                    mediana = resource[int(math.ceil(len(resource)/2))]

                if mediana >= 95:
                    ret[host_http] ="sobrecarregado"
                else:
                    ret[host_http] ="normal"       
        return json.dumps(ret)
        #return json.dumps(cpu)

    def instances_from_host(self, host_name):
        ret = []
        projects = self.__keystone.projects
        for project in projects:
            instances = self.__nova.instances(project['name'])
            for instance in instances:
                print instance._info
                if instance._info['os-extended-server-attributes:host'] == host_name:
                    ret.append({'instance_name' : instance.name, 'instance_id' : instance.id})
        return ret   

    def migrate_to_host(self, project_name, host_name, instance_id):
        host_vm = self.__nova.vm_hostname(project_name,instance_id)
        if host_vm._info['os-extended-server-attributes:host'] == host_name:
            raise MigrateException(400,"Migracao para o mesmo destino")
	elif host_vm._info['os-extended-server-attributes:host'] == 'truta' and host_name != 'truta':
            raise MigrateException(500,"Migracao de host para compute node")
        else:
            self.__nova.vm_migration(project_name,host_name,instance_id)
            return True


    def get_benchmark_bd(self):
        ret = self.__benchmark_db.get_data_db()
        return ret


    def start_instance_bench(self, project):
        return self.__nova.start_instance_bench(project)


    def get_benchmark(self, project):
        benchmark_ip = self.__nova.get_benchmark_ip(project)
        data = requests.get('http://'+benchmark_ip+':5151/get_benchmarking')
        return data.json()
 
    def get_benchmark_status(self, project):
        benchmark_ip = self.__nova.get_benchmark_ip(project)
        data = requests.get('http://'+benchmark_ip+':5151/get_status')
        return data.text

    def repeat_benchmark(self, project):
        benchmark_ip = self.__nova.get_benchmark_ip(project)
        data = requests.get('http://'+benchmark_ip+':5151/start_benchmarking')
        return data.text

    def remove_benchmark_instance(self):
        id = self.__nova.benchmark_id()
        if id == None:
            return "sem instancia benchmark"
        else:
            remove = self.__nova.remove_instance(id)
            return remove
 def __init__(self):
     self.__ceilometer = CeilometerClient()
     self.__keystone = KeystoneClient()
     self.__nova = NovaClient()
     self.__hosts_db = HostDataHandler()
     self.__benchmark_db = BenchmarkDataHandler()
class DataHandler:

    def __init__(self):
        self.__ceilometer = CeilometerClient()
        self.__keystone = KeystoneClient()
        self.__nova = NovaClient()
        self.__hosts_db = HostDataHandler()

    def projects(self):
        return json.dumps(self.__keystone.projects)

    def cpu_util_from(self, timestamp_begin=None, timestamp_end=None, resource_id=None):
        return json.dumps(self.__ceilometer.get_cpu_util(timestamp_begin, timestamp_end, resource_id))

    def cpu_util_flavors(self, timestamp_begin=None, timestamp_end=None):
        data = self.__ceilometer.get_cpu_util_flavors(timestamp_begin, timestamp_end)
        ret = analytics.recommendations.recomenda_flavor(data)
        return json.dumps(ret)

    def projects_with_instances_and_cpu_util(self):
        projects = self.__keystone.tenants

        ret = { 'name' : 'cloud', 'children' : [] }

        for p in projects:
            proj = { 'name' : p.name, 'children' : [] }

            instances = self.__nova.instances(p.name)

            cpu_data = get_latest_cpu_util_from_database(project_id=p.id, limit=len(instances))
            for sample in cpu_data:                 
                proj['children'].append({ 'resource_id' : sample[4], 'cpu_util_percent' : sample[6] })

            ret['children'].append(proj)

        return json.dumps(ret)

    def alarms_history(self, timestamp_begin=None, timestamp_end=None):
        return json.dumps(self.__ceilometer.get_alarms_history(timestamp_begin, timestamp_end))

    def add_alarm(self, name, resource, threshold, operator, period, ev_period):
        return self.__ceilometer.set_alarm(name, resource, threshold, operator, period, ev_period)

    def alarm_email(self, data_requested):
        alarm_id = ast.literal_eval(data_requested)['alarm_id']
        userId = self.__ceilometer.get_alarm_userid(alarm_id)
        projectId = self.__ceilometer.get_alarm_projectid(alarm_id)
        userEmail = self.__keystone.get_user_email(userId, projectId)

        send_email('*****@*****.**', 
                        [userEmail],
                        [],
                        'Alert Telemetry Cloud',
                        'Email disparado pelo alarme!!!', 
                        '*****@*****.**',
                        '4n4lyt1cs')

    def alarm_description(self):
        return json.dumps(self.__ceilometer.get_alarm_parameters())
    
    def delete_alarm(self, alarm_id):
        return json.dumps(self.__ceilometer.delete_alarms(alarm_id))

    def hosts_cpu(self, timestamp_begin, timestamp_end):
        return self.__hosts_db.get_data_db('Cpu_Util', timestamp_begin, timestamp_end)

    def hosts_memory(self, timestamp_begin, timestamp_end):
        return self.__hosts_db.get_data_db('Memory', timestamp_begin, timestamp_end)

    def hosts_disk(self, timestamp_begin, timestamp_end):
        return self.__hosts_db.get_data_db('Disk', timestamp_begin, timestamp_end)

    def host_metrics(self, project):
        return self.__nova.metrics(project)

    def hosts_recommendation(self, r_cpu, r_memory , r_disk):
        resource = []
        ret = {}
        r_cpu = json.loads(r_cpu)
        r_memory = json.loads(r_memory)
        r_disk = json.loads(r_disk)
        for host in r_cpu:
            host_http = host["host_address"]
            if host["data"] is None:
                continue
            for data in host["data"]:
                resource.append(data["data"])
            resource = sorted(resource)
            if(len(resource)%2 == 0):
                index = len(resource)/2
                mediana = (resource[index-1] + resource[index+1])/2
            else:
                mediana = resource[int(math.ceil(len(resource)/2))]

            if mediana >= 95:
                ret[host_http] ="sobrecarregado"
            else:
                resource = []
                for host_mem in r_memory:
                    if(host["host_address"]  == host_mem["host_address"]):
                        for data in host_mem["data"]:
                            for value in json.loads(data["data"]):
                                resource.append(value["percent"])
                        resource = sorted(resource)

                if(len(resource)%2 == 0):
                    index = len(resource)/2
                    mediana = (resource[index-1] + resource[index+1])/2
                else:
                    mediana = resource[int(math.ceil(len(resource)/2))]

                if mediana >= 95:
                    ret[host_http] ="sobrecarregado"
                else:
                    ret[host_http] ="normal"       
        return json.dumps(ret)
        #return json.dumps(cpu)

    def instances_from_host(self, host_name):
        ret = []
        projects = self.__keystone.projects
        for project in projects:
            instances = self.__nova.instances(project['name'])
            for instance in instances:
                print instance._info
                if instance._info['os-extended-server-attributes:host'] == host_name:
                    ret.append({'instance_name' : instance.name, 'instance_id' : instance.id})
        return ret