Beispiel #1
0
 def fit(self,
         stable_antecedents: List[str],
         flexible_antecedents: List[str],
         consequent: str,
         conf: float,
         supp: float,
         desired_classes: List[str] = None,
         desired_changes: List[list] = None,
         is_nan: bool = False,
         is_reduction: bool = True,
         min_stable_antecedents: int = 1,
         min_flexible_antecedents: int = 1):
     """
     Get action rules.
     Define antecedent and consequent.
     - stable_antecedents - List of column names.
     - flexible_antecedents - List of column names.
     - consequent - Name of column.
     Confidence and support.
     - conf - value in % for confidence of classification rules.
     - supp - value in % for support of classification rules.
     Desired classes or desired changes must be entered.
     - desired_classes - List of decision states. For example ["1"]. DEFAULT: None
     - desired_changes - List of desired changes. For example [["0", "1"]]. DEFAULT: None
     Should nan values be used.
     - is_nan - True means nan values are used, False means nan values are not used. DEFAULT: FALSE
     Should the reduction table be used.
     - is_reduction - is reduction table used DEFAULT: TRUE
     Minimal number of stable and flexible couples
     - min_stable_antecedents - min. stable couples. DEFAULT: 1
     - min_flexible_antecedents - min. flexible couples. DEFAULT: 1
     """
     if bool(desired_classes) != bool(desired_changes):
         desired_state = DesiredState(desired_classes=desired_classes,
                                      desired_changes=desired_changes)
     else:
         raise Exception(
             "Desired classes or desired changes must be entered")
     antecedents = stable_antecedents + flexible_antecedents
     self.decisions.prepare_data_fim(antecedents, consequent)
     self.decisions.fit_fim_apriori(conf=conf, support=supp)
     self.decisions.generate_decision_table()
     stable = self.decisions.decision_table[stable_antecedents]
     flex = self.decisions.decision_table[flexible_antecedents]
     target = self.decisions.decision_table[[consequent]]
     supp = self.decisions.support
     conf = self.decisions.confidence
     reduced_tables = Reduction(stable, flex, target, desired_state, supp,
                                conf, is_nan)
     if is_reduction:
         reduced_tables.reduce()
     action_rules = ActionRules(
         reduced_tables.stable_tables, reduced_tables.flexible_tables,
         reduced_tables.decision_tables, desired_state, reduced_tables.supp,
         reduced_tables.conf, is_nan, min_stable_antecedents,
         min_flexible_antecedents)
     action_rules.fit()
     self.action_rules = action_rules.action_rules
Beispiel #2
0
def main(argv):
    inputfile = ''
    outputfile = ''
    debug = False
    try:
        opts, args = getopt.getopt(argv,"hi:o:d",["ifile=","ofile="])
    except getopt.GetoptError:
        print 'single.py -i <inputfile> -o <outputfile>'
        sys.exit(2)
    for opt, arg in opts:
        if opt == '-h':
            print 'single.py -i <inputfile> -o <outputfile> -d for debug'
            sys.exit()
        elif opt in ("-d", "--debug"):
            debug = True
        elif opt in ("-i", "--ifile"):
            inputfile = arg
        elif opt in ("-o", "--ofile"):
            outputfile = arg
        

    try:
        fi = open(inputfile,"r")
        fl = open(outputfile,"a")
    except IOError:
        print 'main.py -i <inputfile> -o <outputfile>'
        sys.exit(2)
    l = []
    pols = []
    files = [inputfile]
    for fileName in files:
        save = outputfile
        f = open(fileName,'r')

        #read, pols = recoverfile(save, f)
        if True:
            for line in f:
                try:
                    pol = Polynomial(line)
                    pols.append(pol)
                except Exception as e:
                    print line
                    sys.exit(2)
    result = defaultdict(list)
    print len(pols)
    for pol in pols:
        if len(pol.coefs()) > 1:
            red = Reduction(debug)
            count = red.reduction(pol.coefs())
            result =  str(pol.coefs()) + ":" + str(count)
            print result
            fl.write(result + "\n")
Beispiel #3
0
    def SNF_reduction(self):
        while True:
            self._computedSNF = False
            self.computeSNF()
            all_diag = True
            for p in self.dimensions:
                T = np.zeros_like(self.D[p])
                T[:min(self.D[p].shape), :min(self.D[p].shape)] = np.diag(np.diag(self.D[p]))
                all_diag = all_diag and np.all(T == self.D[p])
                if not all_diag:
                    continue
            if all_diag:
                break

        f_A = ChainMap(self, matrices={q: self.A[q].T for q in self.dimensions})
        f_G = ChainMap(self, matrices={q: self.G[q].T for q in self.dimensions})
        f_D = ChainMap(self, matrices={q: self.D[q].T for q in self.dimensions}, degree=self._degree)

        M = ChainComplex(cell_class=self.cell_class)
        M._degree = +1
        for q in self.dimensions:
            M[q] = self[q]
        M.d = f_D

        return Reduction(
            src=self, dst=M,
            projection=f_G,
            inclusion=f_A,
            integral=ChainMap(self, degree=-self._degree)
        )
Beispiel #4
0
 def run(self):
     print "Starting thread: " + str(self.threadID) + '\n'
     for i in self.polynomials:
         reduc = Reduction()
         self.lockscreen.acquire()
         print 'Thread: '+ str(self.threadID) + ' Doing: ' + str(i.coefs())
         self.lockscreen.release()
         count = reduc.reduction(i.coefs())
         self.locker.acquire()
         r = str(i.coefs()) + ":" + str(count)
         f = open(self.save, "a")
         f.write(r + '\n')
         f.close()
         self.locker.release()
         self.lockscreen.acquire()
         #print r
         self.lockscreen.release()
         del reduc
    def __init__(self, config_path='openstack_dashboard/api/telemetry_api/environment.conf'):
        self.__config = ConfigParser.ConfigParser()
        self.__config.read(config_path)
        #self.__ceilometer = CeilometerClient(self.__config)
        self.__ceilometer = CeilometerClient()
        self.__keystone = KeystoneClient(self.__config)


        #self.__nova = NovaClient(self.__config)
        self.__nova = NovaClient()

        server = self.__config.get('Misc', 'dbserver')
        user = self.__config.get('Misc', 'dbuser')
        passwd = self.__config.get('Misc', 'dbpass')
        database = self.__config.get('Misc', 'hostsdbname')
        table = self.__config.get('Misc', 'hostsdbtable')
        self.__hosts = ast.literal_eval(self.get_config().get('Openstack', 'Hosts'))
        self.__hosts_db = HostDataHandler()
        self.__benchmark_db = BenchmarkDataHandler(server, user, passwd)
        self.__reduction = Reduction()
Beispiel #6
0
    def reduction(self):
        cplx = self.decomposition()

        d = self.src.d
        pi_t, pi_s, pi_c, iota_t, iota_s, iota_c = self._projections_inclusions(cplx)
        d_33 = pi_c * d * iota_c
        d_31 = pi_c * d * iota_t
        d_21 = pi_s * d * iota_t

        d_21_inv = ChainMap(d_21.dst, d_21.src, degree=+1)
        for p in self.dimensions:
            d_21_inv[p] = np.linalg.inv(d_21[p + 1]).astype(np.int32)
        d_23 = pi_s * d * iota_c

        cplx['c'].d = d_33 - d_31 * d_21_inv * d_23

        f = pi_c - d_31 * d_21_inv * pi_s
        g = iota_c - iota_t * d_21_inv * d_23

        h = iota_t * d_21_inv * pi_s

        return Reduction(self.src, cplx['c'], f, g, h)
Beispiel #7
0
 def __reduction(self):
     reductor = Reduction(self.__population, self.population_size,
                          self.x_min, self.x_max)
     reductor.perform()
Beispiel #8
0
		print "I am here"
		# threads = []
		# i = 0
		# j = 2
		# for temp in range(0, len(pols)/2):
		# 	if (j > len(pols)):
		# 		j = len(pols)
		# 		thread = ThreadCount(temp,lockScreen, lock, pols[i:j], save)
		# 		i = j+1
		# 		j += 2
		# 		threads.append(thread)
		# for thread in threads:
		# 	thread.start()
			# for current in threads:
		# 	current.join()
		for i in pols:
			print "doing pol: " + str(i.coefs())
			reduc = Reduction()
			count = reduc.reduction(i.coefs())
			r = str(i.coefs()) + ":" + str(count)
			f_save.write(r + '\n')
			del reduc








class DataHandler:

    def __init__(self, config_path='openstack_dashboard/api/telemetry_api/environment.conf'):
        self.__config = ConfigParser.ConfigParser()
        self.__config.read(config_path)
        #self.__ceilometer = CeilometerClient(self.__config)
        self.__ceilometer = CeilometerClient()
        self.__keystone = KeystoneClient(self.__config)


        #self.__nova = NovaClient(self.__config)
        self.__nova = NovaClient()

        server = self.__config.get('Misc', 'dbserver')
        user = self.__config.get('Misc', 'dbuser')
        passwd = self.__config.get('Misc', 'dbpass')
        database = self.__config.get('Misc', 'hostsdbname')
        table = self.__config.get('Misc', 'hostsdbtable')
        self.__hosts = ast.literal_eval(self.get_config().get('Openstack', 'Hosts'))
        self.__hosts_db = HostDataHandler()
        self.__benchmark_db = BenchmarkDataHandler(server, user, passwd)
        self.__reduction = Reduction()

    def get_config(self):
        return self.__config

    def projects(self):
        return json.dumps(self.__keystone.projects)

    def get_critical_hosts(self,instances_critical, information):
        critical_hosts = []
        for cpn in information:
            cpn_name = cpn.keys()[0]
            cpn_server_list = cpn[cpn_name]['vms'].keys()
            for server in cpn_server_list:
                if server in instances_critical:
                    critical_hosts.append(cpn_name)
        return critical_hosts

    def suggestion(self, list_not_ignore=[]):
        project_list = [ project['name'] for project in json.loads(self.projects())] #returns the list of existing projects
        compute_nodes_info_list = self.__nova.vm_info(project_list) #list of jsons - json correspond to a compute node information
        instances_id_project = {} #dict instances_id : project_name
        for compute_node_aux in compute_nodes_info_list:
            try:
                compute_name = compute_node_aux.keys()[0] #get the name of compute node to access information
                for key in compute_node_aux[compute_name]['Info_project'].keys(): #key represent a project
                    #id represent de id of any instance present in a given project(key) and in the compute_name
                    for id in compute_node_aux[compute_name]['Info_project'][key]:
                        instances_id_project[id] = key
            except Exception as excp:
                return {"error": excp.message}
        #get the list of critical instances (can't migrate them)
        critical_instances = self.__nova.critical_instances(project_list)
        #list of critical hosts
        critical_cpn = self.get_critical_hosts(critical_instances,compute_nodes_info_list)
        shutdown = {} #dict compute_node : True/False for shutdown
        migrations = {} #dict with all migrations 
        compute_nodes_copy = compute_nodes_info_list[:] #copy to aux with the algorithm
        owner_cpn_instance = {}
        #for cp in compute_nodes_info_list:
        #    owner_cpn = cp.keys()[0]
        #    vms_cp = cp[owner_cpn]['vms'].keys()
        #    for v in vms_cp:
        #        owner_cpn_instance[v] = owner_cpn
        #begin of algorithm
        #cpn_data - compute node data
        try:
            for cpn_data in compute_nodes_info_list:
                data  = cpn_data.copy() #contains all information about a compute node
                actual_cpn = cpn_data.keys()[0] #actual compute node (try migration for all instances)
                #verify is the compute node has critical instances
                if actual_cpn not in critical_cpn:
                    if( len( data[actual_cpn]['vms'].keys()) > 0 ):
                        instances_data = data[actual_cpn]['vms'].copy() #copy the list of vms
                        compute_nodes_copy.remove(cpn_data) #remove all data from compute node in the copy
                        migration_flag = False #flag to say if the instace can migrate to other compute node
                        migrations[actual_cpn] = {} #dict with all migrations for compute node
                        for instance_id in instances_data:
                            for other_cpn in compute_nodes_copy:
                                #verification - instance is not critical
                                if (other_cpn not in critical_cpn and instance_id not in critical_instances):
                                    migration_flag = False
                                    other_cpn_name = other_cpn.keys()[0]
                                    if list_not_ignore == [] or actual_cpn in list_not_ignore:
                                        if(other_cpn[other_cpn_name]['Livre'][0] >= instances_data[instance_id][0] and
                                           other_cpn[other_cpn_name]['Livre'][1] >= instances_data[instance_id][1] and
                                           other_cpn[other_cpn_name]['Livre'][2] >= instances_data[instance_id][2]):
                                           from_cpn = None
                                           migrate_to_cpn = None
                                           if instance_id in owner_cpn_instance:
                                               from_cpn = owner_cpn_instance[instance_id].keys()[0]
                                               migrate_to_cpn = owner_cpn_instance[instance_id][from_cpn]
                                           else:
                                               from_cpn = actual_cpn
                                               migrate_to_cpn = other_cpn_name
                                           if actual_cpn == from_cpn or actual_cpn == migrate_to_cpn:
                                               #update values of free resources
                                               new_values = [other_cpn[other_cpn_name]['Livre'][0] - instances_data[instance_id][0],
                                                         other_cpn[other_cpn_name]['Livre'][1] - instances_data[instance_id][1],
                                                         other_cpn[other_cpn_name]['Livre'][2] - instances_data[instance_id][2]]
                                               other_cpn[other_cpn_name]['Livre'] = new_values
                                               #sending instance to the other host with all information
                                               instances_other_cpn = other_cpn[other_cpn_name]['vms']
                                               instances_other_cpn[instance_id] = instances_data[instance_id]
                                               other_cpn[other_cpn_name]['vms'] = instances_other_cpn
                                               other_cpn[other_cpn_name]['nomes'][instance_id] = data[actual_cpn]['nomes'][instance_id]
                                               migration_flag = True
                                               migrations[actual_cpn][instance_id] = [ other_cpn_name , cpn_data[actual_cpn]['nomes'].get(instance_id) ,instances_id_project[instance_id]]
                                               #update future owner of instance
                                               owner_cpn_instance[instance_id] = {actual_cpn : other_cpn_name}
                                           else:
                                               continue
                                        else:
                                            break 
                                    else:
                                        migration_flag = False
                                else:
                                    continue
                            #update owner
                            if migration_flag == False:
                                if instance_id in owner_cpn_instance:
                                    if( owner_cpn_instance[instance_id].has_key(actual_cpn) or actual_cpn in owner_cpn_instance[instance_id].values()):
                                        continue
                                else:
                                    migrations[actual_cpn][instance_id] = None
                                    shutdown[actual_cpn] = False
                        if not actual_cpn in shutdown:
                            shutdown[actual_cpn] = True
                    else:
                         compute_nodes_copy.remove(cpn_data)
                         shutdown[actual_cpn] = True
                         continue  
                else:
                    compute_nodes_copy.remove(cpn_data)
                    shutdown[actual_cpn] = False
        except Exception as excp2:
                return {"error in algorithm suggestion":excp2.message}

        for host_key in migrations.keys():
            if None in migrations[host_key].values():
                migrations[host_key] = {}
        output = {} #json output with all data
        output['Hosts'] = shutdown
        output['Migracoes'] = migrations
        #recomendation = self.remove_duplicated_migrations(output)
        return output

    #Not using anymore
    #def remove_duplicated_migrations(self, output):
    #    result = output
    #    for compute_node in result['Migracoes'].keys():
    #        for server  in result['Migracoes'][compute_node].keys():
    #            if not self.__nova.verify_host_has_server(compute_node,server):
    #                result['Migracoes'][compute_node].pop(server)
    #    return result

    def cpu_util_from(self, timestamp_begin=None, timestamp_end=None, resource_id=None):
        return json.dumps(self.__ceilometer.get_cpu_util(timestamp_begin, timestamp_end, resource_id))

    def cpu_util_flavors(self, timestamp_begin=None, timestamp_end=None):
        data = self.__ceilometer.get_cpu_util_flavors(timestamp_begin, timestamp_end)
        ret = analytics.recommendations.recomenda_flavor(data)
        return json.dumps(ret)

    def network_incoming_bytes_rate_from(self, timestamp_begin=None, timestamp_end=None, resource_id=None):
        return json.dumps(self.__ceilometer.get_network_incoming_bytes_rate(timestamp_begin, timestamp_end, resource_id))

    def network_outgoing_bytes_rate_from(self, timestamp_begin=None, timestamp_end=None, resource_id=None):
        return json.dumps(self.__ceilometer.get_network_outgoing_bytes_rate(timestamp_begin, timestamp_end, resource_id))

    def projects_with_instances_and_cpu_util(self):
        projects = self.__keystone.tenants

        ret = { 'name' : 'cloud', 'children' : [] }

        for p in projects:
            proj = { 'name' : p.name, 'children' : [] }

            instances = self.__nova.instances(p.name)

            for i in instances:                 
                proj['children'].append({ 'resource_id' : i.id, 'instance_name' : i.name })

            ret['children'].append(proj)

        return ret

    def alarms_history(self, timestamp_begin=None, timestamp_end=None):
        return self.__ceilometer.get_alarms_history(timestamp_begin, timestamp_end)

    def add_alarm(self, name, resource, threshold, operator, period, ev_period, send_mail, email_admin, instance=""):
        return self.__ceilometer.set_alarm(name, resource, threshold, operator, period, ev_period, send_mail, email_admin, instance)

    def alarm_email(self, data_requested):
        alarm_id = ast.literal_eval(data_requested)['alarm_id']
        userId = self.__ceilometer.get_alarm_userid(alarm_id)
        projectId = self.__ceilometer.get_alarm_projectid(alarm_id)
        userEmail = self.__keystone.get_user_email(userId, projectId)
        copy_admin = self.__ceilometer.get_alarm_email_status(alarm_id)
        adminEmail = self.__keystone.get_user_email(self.__keystone.get_user(projectId,'admin'),projectId)



        if 'True' in copy_admin[0] and 'True' in copy_admin[1] :
            send_email('*****@*****.**',
                   [adminEmail],
                   [],
                   'Alert Telemetry Cloud',
                   'Email disparado pelo alarme!!!',
                   '*****@*****.**',
                   '4n4lyt1cs')

            send_email('*****@*****.**',
                   [userEmail],
                   [],
                   'Alert Telemetry Cloud',
                   'Email disparado pelo alarme!!!',
                   '*****@*****.**',
                   '4n4lyt1cs')

        elif 'True' in copy_admin[0] and 'False' in copy_admin[1]:
            send_email('*****@*****.**',
                   [userEmail],
                   [],
                   'Alert Telemetry Cloud',
                   'Email disparado pelo alarme!!!',
                   '*****@*****.**',
                   '4n4lyt1cs')

        elif 'False' in copy_admin[0] and 'True' in copy_admin[1]:
            send_email('*****@*****.**',
                   [adminEmail],
                   [],
                   'Alert Telemetry Cloud',
                   'Email disparado pelo alarme!!!',
                   '*****@*****.**',
                   '4n4lyt1cs')

    def alarm_description(self):
        return self.__ceilometer.get_alarm_parameters()
    
    def delete_alarm(self, alarm_id):
        return json.dumps(self.__ceilometer.delete_alarms(alarm_id))

    def hosts_cpu(self, timestamp_begin, timestamp_end):
        return self.__hosts_db.get_data_db('Cpu_Util', timestamp_begin, timestamp_end)

    def hosts_memory(self, timestamp_begin, timestamp_end):
        return self.__hosts_db.get_data_db('Memory', timestamp_begin, timestamp_end)

    def hosts_disk(self, timestamp_begin, timestamp_end):
        return self.__hosts_db.get_data_db('Disk', timestamp_begin, timestamp_end)

    def hosts_network(self, timestamp_begin, timestamp_end):
        return self.__hosts_db.get_data_db('Network', timestamp_begin, timestamp_end)

    def host_metrics(self, project):
        return self.__nova.metrics(project)

    def host_aggregates(self, project):
        return self.__nova.host_aggregates(project)

    def resource_host(self, host):
        return self.__nova.resource_host(host)


    def hosts_recommendation(self, r_cpu, r_memory , r_disk):
        resource = []
        ret = {}
        r_cpu = json.loads(r_cpu)
        r_memory = json.loads(r_memory)
        r_disk = json.loads(r_disk)
        for host in r_cpu:
            host_http = host["host_address"]
            if host["data"] is None:
                continue
            for data in host["data"]:
                resource.append(data["data"])
            resource = sorted(resource)
            if(len(resource)%2 == 0):
                index = len(resource)/2
                mediana = (resource[index-1] + resource[index+1])/2
            else:
                mediana = resource[int(math.ceil(len(resource)/2))]

            if mediana >= 95:
                ret[host_http] ="sobrecarregado"
            else:
                resource = []
                for host_mem in r_memory:
                    if(host["host_address"]  == host_mem["host_address"]):
                        for data in host_mem["data"]:
                            for value in json.loads(data["data"]):
                                resource.append(value["percent"])
                        resource = sorted(resource)

                if(len(resource)%2 == 0):
                    index = len(resource)/2
                    mediana = (resource[index-1] + resource[index+1])/2
                else:
                    mediana = resource[int(math.ceil(len(resource)/2))]

                if mediana >= 95:
                    ret[host_http] ="sobrecarregado"
                else:
                    ret[host_http] ="normal"       
        return json.dumps(ret)
        #return json.dumps(cpu)

    def instances_from_host(self, host_name):
        attr_host = 'OS-EXT-SRV-ATTR:host'
        ret = []
        projects = self.__keystone.projects
        for project in projects:
            instances = self.__nova.instances(project['name'])
            for instance in instances:
                print instance._info
                if instance._info[attr_host] == host_name:
                    ret.append({'instance_name' : instance.name, 'instance_id' : instance.id})
        return ret   

    def migrate_to_host(self, project_name, host_name, instance_id):
        #host_vm = self.__nova.vm_hostname(project_name,instance_id)
        #attr_host = 'OS-EXT-SRV-ATTR:host'
        #if host_vm._info[attr_host] == host_name:
        #    raise MigrateException(400,"Migracao para o mesmo destino")
	#elif host_vm._info[attr_host] == 'truta' and host_name != 'truta':
        #    raise MigrateException(500,"Migracao de host para compute node")
        #else:
        try:
            retorno = self.__nova.vm_migration(project_name,host_name,instance_id)
        except Exception as a:
            return {"erro":a.message}
        return {"status":"success"}


    def get_benchmark_bd(self):
        ret = self.__benchmark_db.get_data_db()
        return ret


    def start_instance_bench(self, project, host):
        return self.__nova.start_instance_bench(project, host)


    def get_benchmark(self, project, host):
        benchmark_ip = self.__nova.get_benchmark_ip(project, host)
        data = requests.get('http://'+benchmark_ip+':5151/get_benchmarking')
        return data.json()
 
    def get_benchmark_status(self, project, host):
        benchmark_ip = self.__nova.get_benchmark_ip(project, host)
        print benchmark_ip
        data = requests.get('http://'+benchmark_ip+':5151/get_status')
        return data.text

    def repeat_benchmark(self, project):
        benchmark_ip = self.__nova.get_benchmark_ip(project)
        data = requests.get('http://'+benchmark_ip+':5151/start_benchmarking')
        return data.text

    def remove_benchmark_instance(self, host):
        id = self.__nova.benchmark_id(host)
        if id == None:
            return "sem instancia benchmark"
        else:
            remove = self.__nova.remove_instance(id)
            return remove

    def hosts_aggregation_cpu(self, timestamp_begin=None, timestamp_end=None):
        ret = []

        cpu_data = self.hosts_cpu(timestamp_begin, timestamp_end)
        aggregates = self.__nova.host_aggregates('admin')

        for aggregate in aggregates:
            result = []
            host_address = aggregate["host_address"]
            for host in host_address:
                host_name = self.__nova.server_name_by_ip(host)
                host_cpu = self.__nova.resource_host(host_name)["cpu"]
                
                for data in cpu_data:
                    if(data["host_address"]==host):
                        convert = []

                        for cpu_percent in data["data"]:
                            cpu_percent["data"] = (1 - cpu_percent["data"]/100.0)* host_cpu
                            convert.append(cpu_percent)
 
                        if(len(result)==0):
                            result = convert
                        else:
                            if(len(result) > len(convert)):
                                result = result[0:len(convert)]
                            for i in range(len(result)):
                                value = result[i]
                                value["data"] = value["data"] + (convert[i])["data"]
                                result[i] = value
                                
                        break
            ret.append({"Aggregate":aggregate["name"], "data":result})
        return json.dumps(ret)

    def hosts_aggregation_memory(self, timestamp_begin=None, timestamp_end=None):
        ret = []

        memory_data = self.hosts_memory(timestamp_begin, timestamp_end)
        aggregates = self.__nova.host_aggregates('admin')

        for aggregate in aggregates:
            result = []
            host_address = aggregate["host_address"]
            aggregate_memory = self.__nova.resource_aggregates(aggregate['name'])['memory_mb']
            for host in host_address:	
                host_name = self.__nova.server_name_by_ip(host)
                host_memory = self.__nova.resource_host(host_name)["memory_mb"]

                for data in memory_data:
                    if(data["host_address"]==host):
                        convert = []

                        for memory_percent in data["data"]:
                            memory_percent['data'] = ((json.loads(memory_percent['data'])[0]['percent']/100.0 )*host_memory)/aggregate_memory
                            convert.append(memory_percent)

                        if(len(result)==0):
                            result = convert
                        else:
                            if(len(result) > len(convert)):
                                result = result[0:len(convert)]
                            for i in range(len(result)):
                                value = result[i]
                                value["data"] = (value["data"] + (convert[i])["data"])
                                result[i] = value

                        break
            ret.append({"Aggregate":aggregate["name"], "data":result})
        return json.dumps(ret)


    def hosts_aggregation_disk(self, timestamp_begin=None, timestamp_end=None):
        ret = []

        disk_data = self.hosts_disk(timestamp_begin, timestamp_end)
        aggregates = self.__nova.host_aggregates('admin')

        for aggregate in aggregates:
            result = []
            host_address = aggregate["host_address"]
            aggregate_disk = self.__nova.resource_aggregates(aggregate['name'])['disk']
            for host in host_address:
                host_name = self.__nova.server_name_by_ip(host)
                host_disk = self.__nova.resource_host(host_name)["disk_gb"]

                for data in disk_data:
                    if(data["host_address"]==host):
                        convert = []

                        for disk_percent in data["data"]:
                            disk_percent['data'] = ((json.loads(disk_percent['data'])[0]['percent']/100)*host_disk)/aggregate_disk
                            convert.append(disk_percent)

                        if(len(result)==0):
                            result = convert
                        else:
                            if(len(result) > len(convert)):
                                result = result[0:len(convert)]
                            for i in range(len(result)):
                                value = result[i]
                                value["data"] = value["data"] + (convert[i])["data"]
                                result[i] = value

                        break
            ret.append({"Aggregate":aggregate["name"], "data":result})
        
        return json.dumps(ret)

    def hosts_aggregation_network(self, timestamp_begin=None, timestamp_end=None):
        ret = []
        hosts = self.get_compute_nodes_ips()
        network_data = self.points_reduction_by_server_network(timestamp_begin, timestamp_end, hosts)
        aggregates = self.__nova.host_aggregates('admin')

        for aggregate in aggregates:
            result_incoming = []
            result_outgoing = []
            host_address = aggregate["host_address"]
            for host in host_address:
                for data in network_data:
                    if(data["host_address"]==host):
                        convert_incoming = []

                        for network_incoming_rate in data["incoming_rate"]:
                            convert_incoming.append(network_incoming_rate)

                        if(len(result_incoming)==0):
                            result_incoming = convert_incoming
                        else:
                            if(len(result_incoming) > len(convert_incoming)):
                                result_incoming = result_incoming[0:len(convert_incoming)]
                            for i in range(len(result_incoming)):
                                value = result_incoming[i]
                                value["net_bytes_recv"] = value["net_bytes_recv"] + (convert_incoming[i])["net_bytes_recv"]
                                result_incoming[i] = value

                        convert_outgoing = []

                        for network_outgoing_rate in data["outgoing_rate"]:
                            convert_outgoing.append(network_outgoing_rate)

                        if(len(result_outgoing)==0):
                            result_outgoing = convert_outgoing
                        else:
                            if(len(result_outgoing) > len(convert_outgoing)):
                                result_outgoing = result_outgoing[0:len(convert_outgoing)]
                            for i in range(len(result_outgoing)):
                                value = result_outgoing[i]
                                value["net_bytes_sent"] = value["net_bytes_sent"] + (convert_outgoing[i])["net_bytes_sent"]
                                result_outgoing[i] = value

                        break

            ret.append({"Aggregate":aggregate["name"], 'incoming_rate': result_incoming, 'outgoing_rate': result_outgoing})

        return json.dumps(ret)

    def points_reduction_by_server_cpu(self, timestamp_begin, timestamp_end, hosts):
        data = []
        old_data = self.hosts_cpu(timestamp_begin, timestamp_end)
        key = 'data'
        if len(old_data)==0 or len(old_data[0][key]) <= 3:
           result = old_data
        else:
            for host in range(len(hosts)):
                dict_host = {}
                dict_host["host_address"] = hosts[host]
                dict_host['data'] = self.__reduction.points_reduction(old_data[host]['data'],key)
                data.append(dict_host)
                result = data
        return result
    
    def points_reduction_by_server_memory(self, timestamp_begin, timestamp_end, hosts):
        data = []
        old_data = self.hosts_memory(timestamp_begin, timestamp_end)
        key = 'data'
        if len(old_data)==0 or len(old_data[0][key]) <= 3:
           result = old_data
        else:
            for host in range(len(hosts)):
                dict_host = {}
                dict_host["host_address"] = hosts[host]
                dict_host['data'] = self.__reduction.points_reduction_for_percent(old_data[host]['data'],key)
                data.append(dict_host)
                result = data
        return result

    def points_reduction_by_server_disk(self, timestamp_begin, timestamp_end, hosts):
        data = []
        old_data = self.hosts_disk(timestamp_begin, timestamp_end)
        if len(old_data)==0 or len(old_data[0]['data']) <= 3:
           result = old_data
        else:
            for host in range(len(hosts)):
                dict_host = {}
                dict_host["host_address"] = hosts[host]
                dict_host['data'] = self.__reduction.points_reduction_disk(old_data[host]['data'])
                data.append(dict_host)
                result = data

        return result

    def points_reduction_by_server_network(self, timestamp_begin, timestamp_end, hosts):
        data = []
        old_data = self.hosts_network(timestamp_begin, timestamp_end)

        for host in old_data:
            single_host_data = {'host_address': host['host_address'], 'incoming_rate': [], 'outgoing_rate': []}

            if len(host['data']) > 1:
                sample = host['data'][0]
                for sample_index in range(len(host['data']) - 1):
                    sample_index += 1

                    network_data = json.loads(sample['data'])[0]
                    before_timestamp = datetime.strptime(sample['timestamp'], '%Y-%m-%dT%H:%M:%S')
                    before_net_bytes_recv = network_data['net_bytes_recv']
                    before_net_bytes_sent = network_data['net_bytes_sent']

                    next_sample = host['data'][sample_index]
                    network_data = json.loads(next_sample['data'])[0]
                    after_timestamp = datetime.strptime(next_sample['timestamp'], '%Y-%m-%dT%H:%M:%S')
                    after_net_bytes_recv = network_data['net_bytes_recv']
                    after_net_bytes_sent = network_data['net_bytes_sent']

                    timestamp_delta = (after_timestamp - before_timestamp).total_seconds()
                    net_bytes_recv_delta = after_net_bytes_recv - before_net_bytes_recv
                    net_bytes_sent_delta = after_net_bytes_sent - before_net_bytes_sent

                    if (net_bytes_recv_delta < 0) or (net_bytes_sent_delta < 0):
                        continue
                    else:
                        sample = next_sample

                    net_bytes_recv_delta = net_bytes_recv_delta / timestamp_delta
                    net_bytes_sent_delta = net_bytes_sent_delta / timestamp_delta

                    single_host_data['incoming_rate'].append({'timestamp': sample['timestamp'], 'net_bytes_recv': net_bytes_recv_delta})
                    single_host_data['outgoing_rate'].append({'timestamp': sample['timestamp'], 'net_bytes_sent': net_bytes_sent_delta})

                single_host_data['incoming_rate'] = self.__reduction.points_reduction(single_host_data['incoming_rate'], 'net_bytes_recv')
                if math.isnan(single_host_data['incoming_rate'][-1]['net_bytes_recv']):
                    single_host_data['incoming_rate'].pop()

                single_host_data['outgoing_rate'] = self.__reduction.points_reduction(single_host_data['outgoing_rate'], 'net_bytes_sent')
                if math.isnan(single_host_data['outgoing_rate'][-1]['net_bytes_sent']):
                    single_host_data['outgoing_rate'].pop()

            data.append(single_host_data)

        return data

    def points_reduction_vm(self, timestamp_begin,timestamp_end,resource_id):
        old_data = json.loads(self.cpu_util_from(timestamp_begin,timestamp_end,resource_id))
        key2 = "cpu_util_percent"
        data = self.__reduction.points_reduction(old_data,key2)
        return data

    def points_reduction_vm_network_incoming(self, timestamp_begin,timestamp_end,resource_id):
        old_data = json.loads(self.network_incoming_bytes_rate_from(timestamp_begin,timestamp_end,resource_id))
        key2 = "network_incoming_bytes_rate"
        data  = self.__reduction.points_reduction(old_data,key2)
        return data

    def points_reduction_vm_network_outgoing(self, timestamp_begin,timestamp_end,resource_id):
        old_data = json.loads(self.network_outgoing_bytes_rate_from(timestamp_begin,timestamp_end,resource_id))
        key2 = "network_outgoing_bytes_rate"
        data  =  self.__reduction.points_reduction(old_data,key2)
        return data

    def vm_info(self):
        ret = []
        project = []

        for project_data in json.loads(self.projects()):
            project.append(project_data["name"])
        
        for project_name in project:
            informations =  self.__nova.vm_info(project)
            vms_data = {}
            for node in informations:
                for node_name in node.keys():
                    vms_name = (node[node_name])['nomes']
                    for key in vms_name.keys():
                        vms_data[key] = vms_name[key]

            ret.append({project_name:vms_data})

        return ret

    def instances_by_project(self):
        project = []

        for project_data in json.loads(self.projects()):
            project.append(project_data["name"])

        informations =  self.__nova.vm_info(project)
       
        vms_data = {}

        for node in informations:
            for node_name in node.keys():
                project_name = (node[node_name])['Info_project']
                vms_name = (node[node_name])['nomes']
                for key in project_name.keys():
                    for instance_id in project_name[key]:
                        if not (key in vms_data.keys()):
                            vms_data[key] = {}
                            vms_data[key][instance_id] = vms_name[instance_id]
                        else:
                            vms_data[key][instance_id] = vms_name[instance_id]

        return vms_data
 
    def vcpus_for_aggregate(self, project):
        return json.dumps(self.__nova.vcpus_for_aggregate(project))

    def create_snapshot(self, instance_id):
        self.__nova.create_snapshot(instance_id)

    def suspend_instance(self, instance_id):
        self.__nova.suspend_instance(instance_id)

    def get_computenodes_names(self):
        return self.__nova.list_compute_nodes()

    def get_compute_nodes_ips(self):
        ips = []
        all_hosts = self.get_hosts('compute_node')
        for host in all_hosts:
            ips.append(host.get_ip())

        return ips

    def get_host_availability_metrics(self, timestamp_begin, timestamp_end):
        calculator = HostMetricsCalculator()

        return calculator.get_host_availability_metrics(timestamp_begin, timestamp_end)

    def get_services_status(self, host, timestamp_begin, timestamp_end):
        return self.__hosts_db.get_service_status_db(host, timestamp_begin, timestamp_end)

    def points_reduction_services_status(self, host, timestamp_begin, timestamp_end):
        data = self.get_services_status(host, timestamp_begin, timestamp_end)
        output = {}
        for key in data.keys():
            output[key] = self.__reduction.points_reduction_for_step(data[key], 'status')

        return output

    def get_host_status(self, host, timestamp_begin, timestamp_end):
        return self.__hosts_db.get_host_status_db(host, timestamp_begin, timestamp_end)

    def points_reduction_host_status(self, host, timestamp_begin, timestamp_end):
        data = self.get_host_status(host, timestamp_begin, timestamp_end)

        return {'data': self.__reduction.points_reduction_for_step(data['data'], 'status')}

    def get_hosts(self, type=None):
        return hosts_from_dict_list(self.__hosts, type)
Beispiel #10
0
    def am_model(self, do_SNF=True, verbose=False):
        st = time()
        pt = time()
        reduction = self.reduction()
        if verbose:
            print('Vector field reduction calculated in {:.3f}s'.format(time() - pt))

        pt = time()
        V = create_vector_field(reduction.dst)
        if verbose:
            print('Reduced vector field built in {:.3f}s'.format(time() - pt))

        while V != 0:
            pt = time()
            reduction = V.reduction() * reduction
            if verbose:
                print('Vector field reduction calculated in {:.3f}s'.format(time() - pt))
            pt  =time()
            V = create_vector_field(reduction.dst)
            if verbose:
                print('Reduced vector field built in {:.3f}s'.format(time() - pt))

        if do_SNF:
            # Compute SNF if necessary
            if reduction.dst.d != 0:
                pt = time()
                snf_reduction = reduction.dst.SNF_reduction()
                if verbose:
                    print('SNF reduction calculated in {:.3f}s'.format(time() - pt))
                pt = time()
                reduction = snf_reduction * reduction
                if verbose:
                    print('Final reduction built in {:.3f}s'.format(time() - pt))

        # Make all differentials have positive coefficient
        pt = time()
        f_sign = ChainMap(reduction.dst)
        h_sign = ChainMap(reduction.dst, degree=1)
        for sigma in reduction.dst:
            co_d = reduction.dst.d.T
            c = co_d(sigma)
            if c:
                tau = next(iter(c))
                if c[tau] < 0:
                    f_sign.set_image(sigma, -sigma)
                else:
                    f_sign.set_image(sigma, sigma)
            else:
                f_sign.set_image(sigma, sigma)

        new_dst = ChainComplex(cell_class=reduction.dst.cell_class)
        for p in reduction.dst.dimensions:
            new_dst[p] = reduction.dst[p]
        new_d = ChainMap(new_dst, degree=-1)
        for p, M in reduction.dst.d.matrices.items():
            new_d[p] = np.abs(M)
        new_dst.d = new_d
        reduction = Reduction(reduction.dst, new_dst, f_sign, f_sign, h_sign) * reduction
        if verbose:
            print('Signs adjusted in {:.3f}s'.format(time() - pt))
            print('Full time {:.3f}s'.format(time() - pt))
        return reduction
def getSummary(): #actually calculate summary
	opLabel.pack()
	opEntry.pack()
	opEntry.config(state='normal' , bg="white" , fg="black")
	opEntry.delete(1.0,END)
	opEntry.insert(INSERT,"Generating Summary...")

	global endTime1,endTime2	
	
	
	startTime = time.time()
	text = ipEntry.get("1.0", END)
	
	ps = PorterStemmer()
	red_ra=redRatio.get()/100.00
	
	if var1.get():
		words = text.split()
		for w in words:
			text += ps.stem(w)
			text += " "
	
	if var6.get()==1:
		r = Reduction() #object of class Red..
		reduced_text = r.reduce(text, red_ra)
		op=' '.join(reduced_text)
		global opTA
		opTA=op
	elif var6.get()==2:
		m = modelVC.summary()
		op = m.summarize(text,red_ra)
		global opPA
		opPA=op


	if var2.get()==1:
		op=shortForm.shortF(op)

	opEntry.delete(1.0,END)
	opEntry.insert(INSERT, op)
	
	if var6.get()==1: 
		endTime1 = time.time() - startTime
		print("Time taken: %f secs" %endTime1)
		if var4.get():
			tkMessageBox.showinfo("Statistics", "Time taken to complete: %f secs" %endTime)
	
	if var6.get()==2: 
		endTime2 = time.time() - startTime
		print("Time taken: %f secs" %endTime2)
		if var4.get():
			tkMessageBox.showinfo("Statistics", "Time taken to complete: %f secs" %endTime)

	if var3.get() == 1:
		file_f = open("log.txt", "a")
		lenText=len(text)
		file_f.write("\nInput length: %d" %lenText)
		lenText = len(op)
		file_f.write("\tSummary length: %d" % lenText)
		file_f.write('\nStemming: %d' %var1.get())
		file_f.write('\tShort Forms: %d' %var2.get())
		file_f.write('\tReduction Ratio: %d percent' % redRatio.get())
		file_f.write('\tModel: %d ' % var6.get())
		if var6.get()==1: 
			file_f.write("\nTime taken: %f secs\n" %endTime1)
		if var6.get()==2: 
			file_f.write("\nTime taken: %f secs\n" %endTime2)

		file_f.close()