示例#1
0
def main(argv):

	utilities.setupcsv()
	if(len(argv) != 3):
		print("Expecting filename with all times and filename with outlier times to process to be provided as command line arguments.")
		print("Not provided so exiting"); 
		return
	timesfile = argv[1]
	outlierfile = argv[2]
	filesystem = 2
	dataset = outlierfile.split("_")[0]
	alltimes = utilities.readfile(timesfile,',',False)
	outliertimes = utilities.readfile(outlierfile,',',False)
# Start and end times of the periods in question
	alltimes = utilities.converttexttotime(alltimes)
	outliertimes = utilities.converttexttotime(outliertimes)
# Start and end times, plus 10 minutes before and 10 minutes after (in that order)
	augmentedalltimes = utilities.addbeforeandaftertimes(alltimes)
# Unixtime version of the augmented times to enable database querying
	unixalltimes = utilities.getunixtimes(augmentedalltimes)
	sourcedata = utilities.opendatabase()
	ossdata = utilities.getossdata(sourcedata,filesystem,unixalltimes,augmentedalltimes)
	aggregatedalldata = utilities.processossdata(ossdata)
	utilities.printprocessedossdata(aggregatedalldata,outliertimes,True,dataset)
	utilities.graphprocessedossdata(aggregatedalldata,outliertimes,True,dataset)
	utilities.closedatabase(sourcedata)
    def get_program_name_from_pid(self, pid):

        if not pid:
            return 'None'

        comm = utilities.readfile("/proc/" + str(pid) + "/comm")
        return comm[0].strip()
示例#3
0
    def nwUtilization_info(self):
        interface_content = utilities.readfile(self.dev_file)
        interface_transmitted = {}
        interface_speed = {}

        for i in range(2, len(interface_content)):
            line_data = interface_content[i].split()
            if line_data[0] == "lo:":
                break
            else:
                interface = line_data[0].split(":")[0]
                interface_transmitted[interface] = line_data[9]
                cmd = "ethtool " + interface + " | grep Speed"
                output = os.popen(cmd).read()
                if not output:
                    speed = 0
                else:
                    speed = str(output).split(":")[1]
                    # print(speed)
                    speed = re.findall('\d+', speed)
                    speed = speed[0]

                interface_speed[interface] = speed

        return interface_transmitted, interface_speed
示例#4
0
def main(argv):

    utilities.setupcsv()
    if (len(argv) != 3):
        print(
            "Expecting filename with the times to process and 0 or 1 (0 for OSS usage profile graphs and 1 for OSS count bar graph) to be provided as command line arguments."
        )
        print("Not provided so exiting")
        return
    filename = argv[1]
    choice = int(argv[2])
    filesystem = 2
    texttimes = utilities.readfile(filename, ',', False)
    # Start and end times of the periods in question
    times = utilities.converttexttotime(texttimes)
    # Start and end times, plus 10 minutes before and 10 minutes after (in that order)
    augmentedtimes = utilities.addbeforeandaftertimes(times)
    # Unixtime version of the augmented times to enable database querying
    unixtimes = utilities.getunixtimes(augmentedtimes)
    sourcedata = utilities.opendatabase()
    ossdata = utilities.getossdata(sourcedata, filesystem, unixtimes,
                                   augmentedtimes)
    if (choice == 0):
        utilities.graphossdata(ossdata)
    elif (choice == 1):
        utilities.graphossdataossuse(ossdata)
    else:
        print("Wrong choice")
    utilities.closedatabase(sourcedata)
示例#5
0
def main(argv):

    utilities.setupcsv()
    if (len(argv) != 2):
        print(
            "Expecting filename(s) with the times to process to be provided as command line arguments."
        )
        print("Not provided so exiting")
        return
    for filename in argv[1:]:
        filesystem = 2
        texttimes = utilities.readfile(filename, ',', False)
        # Start and end times of the periods in question
        times = utilities.converttexttotime(texttimes)
        # Start and end times, plus 10 minutes before and 10 minutes after (in that order)
        augmentedtimes = utilities.addbeforeandaftertimes(times)
        # Unixtime version of the augmented times to enable database querying
        unixtimes = utilities.getunixtimes(augmentedtimes)
        sourcedata = utilities.opendatabase()
        appdata = utilities.getappdata(sourcedata, unixtimes, augmentedtimes)
        processedappdata = utilities.processappdata(appdata)
        for app in processedappdata:
            print app


#		utilities.graphappdata(ossdata)
    utilities.closedatabase(sourcedata)
    def get_username_from_uid(self, uid):
        username = "******"
        etc_passwd_content = utilities.readfile(ETC_PASSWD_FILE)
        for line in etc_passwd_content:
            user_data = line.strip().split(":")

            if user_data[2] == str(uid):
                username = user_data[0]
        return username
示例#7
0
 def getfile(self):
     filename = askopenfilename()
     filename_w_ext = os.path.basename(filename)
     data = ut.readfile(filename)
     self.add_data(data)
     myMain = m.Main(dataset=filename, num_iterations=1)
     v, w, s, mw, myKnapSack = myMain.Run()
     self.add_bottom(filename_w_ext, mw, w, v)
     self.add_solution(s, myKnapSack.getValues(), myKnapSack.getWeights())
 def get_all_disk_names(self, path):
     disk_content = utilities.readfile(path)
     disk_name_list = []
     for line in disk_content:
         fields = line.split()
         is_disk = re.findall('(^[hs]d[a-z]+)', fields[2])
         is_disk_part = re.findall('(^[hs]d[a-z]+)([\d]+)', fields[2])
         if is_disk_part or is_disk:
             disk_name_list.append(fields[2])
     return disk_name_list
 def get_disk_stats(self, path, disk_name_list):
     disk_content = utilities.readfile(path)
     disk_stats = {}
     for line in disk_content:
         fields = line.split()
         disk_name = fields[2]
         if any(disk_name in name for name in disk_name_list):
             disk_stats[(fields[2], 'diskReads')] = fields[3]
             disk_stats[(fields[2], 'diskWrites')] = fields[7]
             disk_stats[(fields[2], 'BlockReads')] = fields[5]
             disk_stats[(fields[2], 'BlockWrites')] = fields[9]
     return disk_stats
示例#10
0
 def get_mem_info(self):
     mem_content = utilities.readfile(self.MEM_INFO_FILE)
     values = OrderedDict()
     for line in mem_content:
         fields = line.split()
         is_totalMem = re.findall('^MemTotal', fields[0])
         is_freeMem = re.findall('^MemFree', fields[0])
         if is_totalMem:
             values[fields[0]] = fields[1]
         if is_freeMem:
             values[fields[0]] = fields[1]
     return values
示例#11
0
def main(argv):

	utilities.setupcsv()
	if(len(argv) != 2 and len(argv) != 3):
		print("Expecting filename(s) with the times to process (and possibly an outliers file) to be provided as command line arguments.")
		print("Not provided so exiting"); 
		return
	filename = argv[1]
	addoutliers = False
	if(len(argv) == 3):
		outliersfile = argv[2]
		addoutliers = True
	appids = utilities.readfile(filename,',',False)
	outliers = []
	if(addoutliers):
		outliers = utilities.readfile(outliersfile,',',False)
	sourcedata = utilities.opendatabase()
	appdata = utilities.getappdatafromappids(sourcedata,appids)
	processedappdata = utilities.processappdatafromappids(appdata)
	utilities.printappdata(processedappdata,outliers,addoutliers)
	utilities.graphappdata(processedappdata,rotatexlabels=True,reducexlabels=True,graphoutliers=addoutliers,outliers=outliers)
	utilities.closedatabase(sourcedata)
    def readContent(self, file_name):
        tcp_content = utilities.readfile(file_name)
        # skip the header line
        tcp_content.pop(0)

        connections = []

        for line in tcp_content:
            content = line.strip().split()

            # Local IP address and port number
            local_ip_port = content[1]
            local_dec_ip, local_dec_port = self.hex_to_dec_ip(local_ip_port)
            #local_hostname = convert_dec_ip_to_hostname(local_dec_ip)

            # Remote IP address and port number
            remote_ip_port = content[2]
            remote_dec_ip, remote_dec_port = self.hex_to_dec_ip(remote_ip_port)
            # remote_hostname = convert_dec_ip_to_hostname(remote_dec_ip)

            # Username of program
            uid = content[7]
            user_name = self.get_username_from_uid(uid)

            # Process ID
            inode = content[9]
            pid = self.get_pid_from_inode(inode)

            program_name = self.get_program_name_from_pid(pid)

            if file_name == TCP_FILE:
                protocol = 'TCP'
            else:
                protocol = 'UDP'

            l_hstnm = self.convert_dec_ip_to_hostname(local_dec_ip)
            r_hstnm = self.convert_dec_ip_to_hostname(remote_dec_ip)

            connection_tuple = (str(l_hstnm), str(local_dec_port),
                                str(r_hstnm), str(remote_dec_port), user_name,
                                str(pid), program_name, protocol)

            connections.append(connection_tuple)

        return connections
示例#13
0
    def get_cpu_info(self):
        stat_content = utilities.readfile(self.STAT_FILE)
        values = OrderedDict()
        for line in stat_content:
            fields = line.split()
            is_cpu = re.findall('^cpu', fields[0])
            is_intr = re.findall('^intr', fields[0])
            is_ctxt = re.findall('^ctxt', fields[0])
            if is_cpu:
                self.cpu_count += 1
                field_values = [fields[1], fields[3], fields[4]]
                values[fields[0]] = field_values
            if is_intr:
                values[fields[0]] = fields[1]
            if is_ctxt:
                values[fields[0]] = fields[1]

        return values
示例#14
0
    def snmp_info(self):
        snmp_values = {}
        field_headers = []

        snmp_content = utilities.readfile(self.SNMP_FILE)
        line_count = 0
        for line in snmp_content:
            field_type, field_values = line.split(":", 1)
            field_values = field_values.split()
            if (field_type == 'Ip' or field_type == 'Tcp'
                    or field_type == 'Udp') and line_count == 0:
                field_headers = field_values
                line_count = 1
            elif (field_type == 'Ip' or field_type == 'Tcp'
                  or field_type == 'Udp') and line_count == 1:
                snmp_values[field_type] = dict(zip(field_headers,
                                                   field_values))
                line_count = 0

        return snmp_values
示例#15
0
def main(argv):

    utilities.setupcsv()
    if (len(argv) < 2):
        print(
            "Expecting filename(s) with the times to process to be provided as command line arguments."
        )
        print("Not provided so exiting")
        return
    filesystem = 2
    for nextfile in argv[1:]:
        filename = nextfile
        temp = filename.split("_")
        if (len(temp) == 2):
            temp = temp[1].split(".")[0]
            processcount = int(temp)
        else:
            processcount = 0
        texttimes = utilities.readfile(filename, ',', False)
        # Start and end times of the periods in question
        times = utilities.converttexttotime(texttimes)
        # Start and end times, plus 10 minutes before and 10 minutes after (in that order)
        augmentedtimes = utilities.addbeforeandaftertimes(times)
        # Unixtime version of the augmented times to enable database querying
        unixtimes = utilities.getunixtimes(augmentedtimes)
        sourcedata = utilities.opendatabase()
        ossdata = utilities.getossdata(sourcedata, filesystem, unixtimes,
                                       augmentedtimes)
        aggregateddata = utilities.processossdata(ossdata)
        utilities.printprocessedossdata(aggregateddata, None, False,
                                        processcount)
        utilities.graphprocessedossdata(aggregateddata, None, False,
                                        processcount)


#	utilities.deleterecords(sourcedata,4)
#	utilities.deleterecords(sourcedata,1)
    utilities.closedatabase(sourcedata)
示例#16
0
    def __init__(self,
                 dataset="./TestDataset/f1_l-d_kp_10_269",
                 num_iterations=1,
                 algorithm=0,
                 population_size=100,
                 Pcross=0.8,
                 Pmutate=0.2,
                 k=20,
                 GA_iterations=100,
                 SA_iterations=2000,
                 MaxTemp=200,
                 TempChange=0.80,
                 HybridGAIterations=150,
                 HybridSAIterations=150):

        self.dataset = dataset
        self.num_iterations = num_iterations

        self.filename_w_ext = os.path.basename(dataset)
        #try:
        self.optimal_value = ut.getOptimalValue(self.filename_w_ext)
        #except:
        #self.optimal_value = 0

        #get file into array
        #self.arr = ut.readfile("./TestDataset/"+self.dataset)
        self.arr = ut.readfile(self.dataset)

        self.n_items = self.arr[0][0]  #first line first element
        self.max_weight = self.arr[0][1]  #first line second element

        self.population_size = population_size
        if self.population_size % 2 != 0:
            self.population_size += 1
        self.Pcross = Pcross
        self.Pmutate = Pmutate
        self.k = k
        self.GA_iterations = GA_iterations

        self.SA_iterations = SA_iterations
        self.MaxTemp = MaxTemp
        self.TempChange = TempChange

        self.HybridGAIterations = HybridGAIterations
        self.HybridSAIterations = HybridSAIterations

        #split into weights and values
        self.weights = []
        self.values = []
        for pair in self.arr[1:]:
            self.values.append(pair[0])
            self.weights.append(pair[1])

        #Initialize problem
        self.myKnapSack = KS.Knapsack(weights=self.weights,
                                      values=self.values,
                                      maxWeight=self.max_weight)

        #Initialize an algorithm
        self.algorithm = None
        if algorithm == 1:
            self.algorithm = Algorithms.GeneticAlgorithm(
                pop_size=self.population_size,
                KnapsackObj=self.myKnapSack,
                pcross=self.Pcross,
                pmutate=self.Pmutate,
                MaxIterations=self.GA_iterations,
                k=self.k)
        elif algorithm == 2:
            self.algorithm = Algorithms.SimulatedAnnealing(
                max_iterations=self.SA_iterations,
                temp_max=self.MaxTemp,
                temp_change=self.TempChange,
                KnapsackObj=self.myKnapSack)
        else:
            self.algorithm = Algorithms.GeneticAnnealing(
                population_size=self.population_size,
                problem_size=self.n_items,
                pcross=self.Pcross,
                pmutate=self.Pmutate,
                temp_max=self.MaxTemp,
                temp_change=self.TempChange,
                GA_iterations=self.HybridGAIterations,
                SA_iterations=self.HybridSAIterations,
                k=self.k,
                KnapsackObj=self.myKnapSack)
示例#17
0
    char_vocab)
print('Read ', characterVocabularySize, ' characters')

tagVocabulary, tagReverseVocabulary, numberOfTags = vb.readTagList(
    tag_dict_file)
print('Read ', numberOfTags, ' NER tags')
#One hot encoded vectors
tagVocabularyVector = ut.one_hot_encoder(tagVocabulary)

sourceDictionary, reverseSourceDictionary, sourceDictionarySize, embeddings, embeddingDimension = vb.loadEmbeddings(
    word2vec_file)
print("Read words with word embedding dimension ", embeddingDimension,
      " and number of entries ", sourceDictionarySize)
#print(sourceDictionary)

train_sentences, train_max_seq_length = ut.readfile(train_file, tagVocabulary)
print("Training data contains ", len(train_sentences),
      " Lines " + " maximum number of words in a sentence is ",
      train_max_seq_length)

tune_sentences, tune_max_seq_length = ut.readfile(tune_file, tagVocabulary)
print("Development data contains ", len(tune_sentences), " Lines ",
      " maximum number of words in a sentence is ", tune_max_seq_length)

test_sentences, test_max_seq_length = ut.readfile(test_file, tagVocabulary)
print("Test data contains ", len(test_sentences), " Lines ",
      " maximum number of words in a sentence is ", test_max_seq_length)


def test_output():
    pass