Ejemplo n.º 1
0
    def profiler(self, cache_class, **kargs):

        if 'size' in kargs:
            size = kargs['size']
        else:
            size = self.cache_size

        if 'data' in kargs and 'dataType' in kargs:
            if kargs['dataType'] == 'plain':
                reader = plainCacheReader(kargs['data'])
            if kargs['dataType'] == 'csv':
                reader = csvCacheReader(kargs['data'])
            if kargs['dataType'] == 'vscsi':
                reader = vscsiCacheReader(kargs['data'])
        else:
            reader = self.reader

        assert size != -1, "you didn't provide size for cache"
        assert reader, "you didn't provide data file or data type"

        profiler = None

        # print(cache_class)
        # print(LRU)
        if cache_class.lower() == "lru":
            profiler = pardaProfiler(LRU, size, reader)
            # print(profiler)

        else:
            # TODO
            # parameters about cache class can be passed in kargs
            pass

        return profiler
Ejemplo n.º 2
0
 def prepare_file(self):
     print("changing file format")
     with open('temp.dat', 'w') as ofile:
         i = self.reader.read_one_element()
         while i != None:
             ofile.write(str(i) + '\n')
             i = self.reader.read_one_element()
     self.reader = plainCacheReader('temp.dat')
Ejemplo n.º 3
0
 def prepare_file(self):
     print("changing file format")
     with open('temp.dat', 'w') as ofile:
         i = self.reader.read_one_element()
         while i != None:
             ofile.write(str(i) + '\n')
             i = self.reader.read_one_element()
     self.reader = plainCacheReader('temp.dat')
Ejemplo n.º 4
0
        count = 0
        for i in self.c_long_array:
            if i > -1 and i < cache_size:
                count += 1
        print(count)




        # def __del__(self):
        #     if os.path.exists('temp.dat'):
        #         os.remove('temp.dat')


if __name__ == "__main__":
    p = pardaProfiler(LRU, 30000, plainCacheReader("../data/parda.trace"))
    # p = parda(LRU, 30000, csvCacheReader("../data/trace_CloudPhysics_txt", 4))
    # p = parda(LRU, 30000, vscsiReader("../data/cloudPhysics/w02_vscsi1.vscsitrace"))
    # p = parda(LRU, 3000000, basicCacheReader("temp.dat"))
    # p.run(parda_mode.seq, threads=4)
    # p.get_reuse_distance()
    import os
    import shutil

    for f in os.listdir('../data/mining/'):
        shutil.copy('../data/mining/' + f, '../data/mining/mining.dat')
        print(f)
        p._test()
    # p.run_with_specified_lines(10000, 20000)
        # p.plotHRC(autosize=True, autosize_threshhold=0.00001)
Ejemplo n.º 5
0
from mimircache.cache.LRU import LRU
from mimircache.cacheReader.plainReader import plainCacheReader
from mimircache.cacheReader.csvReader import csvCacheReader
from mimircache.cacheReader.vscsiReader import vscsiCacheReader
from mimircache.profiler.basicLRUProfiler import basicLRUProfiler
from mimircache.profiler.pardaProfiler import pardaProfiler
from mimircache.profiler.pardaProfiler import parda_mode

# first step: construct a reader for reading any kind of trace

# this one is the most basic one, each line is a label/tag
reader1 = plainCacheReader("../data/parda.trace")

# this one reads csv file and choose one column as label/tag
reader2 = csvCacheReader("../data/trace_CloudPhysics_txt", column=4)

# this one reads binary cloudphysics trace file
reader3 = vscsiCacheReader("../data/trace_CloudPhysics_bin")

# reader is also a generator, for readers you can do the following thing:
# read one trace element at one time:
reader1.read_one_element()
# for loop:
# for element in reader1:
#     pass
# reset, after read some elements, you want to go back
reader1.reset()

# second step: construct a profiler for analyze

# basic mattson profiler (toooooo slow)
Ejemplo n.º 6
0
        self.reader.reset()
        for i in self.reader:
            self.addOneTraceElement(i)
        # p.printMRC()
        self.outputHRC()
        self.plotHRC()



if __name__ == "__main__":
    import time
    import cProfile

    t1 = time.time()
    # r = plainCacheReader('../../data/test')
    r = plainCacheReader('../../data/parda.trace')

    # p = generalProfiler(LRU, 6000, 20, r, 48)
    # p = generalProfiler(ARC, (10, 0.5), 10, r, 1)
    p = generalProfiler(LFU_LRU, 800, 80, r, 4)
    # p = generalProfiler(ARC, 5, 5, r, 1)


    import pstats, io

    pr = cProfile.Profile()
    pr.enable()


    p.run()
Ejemplo n.º 7
0
        print(count)
        self.parda_seq.get_reuse_distance(c_file_name, c_line_num,
                                          self.c_cache_size, self.c_long_array)
        count = 0
        for i in self.c_long_array:
            if i > -1 and i < cache_size:
                count += 1
        print(count)

        # def __del__(self):
        #     if os.path.exists('temp.dat'):
        #         os.remove('temp.dat')


if __name__ == "__main__":
    p = pardaProfiler(LRU, 30000, plainCacheReader("../data/parda.trace"))
    # p = parda(LRU, 30000, csvCacheReader("../data/trace_CloudPhysics_txt", 4))
    # p = parda(LRU, 30000, vscsiReader("../data/cloudPhysics/w02_vscsi1.vscsitrace"))
    # p = parda(LRU, 3000000, basicCacheReader("temp.dat"))
    # p.run(parda_mode.seq, threads=4)
    # p.get_reuse_distance()
    import os
    import shutil

    for f in os.listdir('../data/mining/'):
        shutil.copy('../data/mining/' + f, '../data/mining/mining.dat')
        print(f)
        p._test()
    # p.run_with_specified_lines(10000, 20000)
    # p.plotHRC(autosize=True, autosize_threshhold=0.00001)
Ejemplo n.º 8
0
from mimircache.cache.LRU import LRU
from mimircache.cacheReader.plainReader import plainCacheReader
from mimircache.cacheReader.csvReader import csvCacheReader
from mimircache.cacheReader.vscsiReader import vscsiCacheReader
from mimircache.profiler.basicLRUProfiler import basicLRUProfiler
from mimircache.profiler.pardaProfiler import pardaProfiler
from mimircache.profiler.pardaProfiler import parda_mode

# first step: construct a reader for reading any kind of trace

# this one is the most basic one, each line is a label/tag
reader1 = plainCacheReader("../data/parda.trace")

# this one reads csv file and choose one column as label/tag
reader2 = csvCacheReader("../data/trace_CloudPhysics_txt", column=4)

# this one reads binary cloudphysics trace file
reader3 = vscsiCacheReader("../data/trace_CloudPhysics_bin")

# reader is also a generator, for readers you can do the following thing:
# read one trace element at one time:
reader1.read_one_element()
# for loop:
# for element in reader1:
#     pass
# reset, after read some elements, you want to go back
reader1.reset()

# second step: construct a profiler for analyze

# basic mattson profiler (toooooo slow)
Ejemplo n.º 9
0
 def open(self, file_path):
     # assert os.path.exists(file_path), "data file does not exist"
     self.reader = plainCacheReader(file_path)