コード例 #1
0
ファイル: perf_tests.py プロジェクト: RRZE-HPC/pycachesim
 def time_load1000000(self):
     l3 = Cache(4096, 1024, 8, "LRU")
     l2 = Cache(4096, 8, 8, "LRU", parent=l3)
     l1 = Cache(512, 8, 8, "LRU", parent=l2)
     mh = CacheSimulator(l1)
     
     with Timer() as t:
         mh.load(0, 1000000)
     return t.interval
コード例 #2
0
ファイル: perf_tests.py プロジェクト: mindis/pycachesim
    def time_load1000000(self):
        l3 = Cache(4096, 1024, 8, "LRU")
        l2 = Cache(4096, 8, 8, "LRU", parent=l3)
        l1 = Cache(512, 8, 8, "LRU", parent=l2)
        mh = CacheSimulator(l1)

        with Timer() as t:
            mh.load(0, 1000000)
        return t.interval
コード例 #3
0
ファイル: perf_tests.py プロジェクト: RRZE-HPC/pycachesim
 def time_load100000_tiny_collisions(self):
     l3 = Cache(4, 8, 8, "LRU")
     l2 = Cache(4, 4, 8, "LRU", parent=l3)
     l1 = Cache(2, 4, 8, "LRU", parent=l2)
     mh = CacheSimulator(l1)
     mh.load(0, 100000)
     
     with Timer() as t:
         mh.load(0, 100000)
     return t.interval
コード例 #4
0
ファイル: test.py プロジェクト: chubbymaggie/pycachesim
 def test_fill_nocl(self):
     l3 = Cache(4, 8, 1, "LRU")
     l2 = Cache(4, 4, 1, "LRU", parent=l3)
     l1 = Cache(2, 4, 1, "LRU", parent=l2)
     mem = MainMemory(l3)
     mh = CacheSimulator(l1, mem)
 
     mh.load(0, 32)
     mh.load(16,48)
     
     self.assertEqual(l1.cached, set(range(40,48)))
     self.assertEqual(l2.cached, set(range(32,48)))
     self.assertEqual(l3.cached, set(range(16,48)))
コード例 #5
0
ファイル: test.py プロジェクト: chubbymaggie/pycachesim
 def test_fill(self):
     l3 = Cache(4, 8, 8,"LRU")
     l2 = Cache(4, 4, 8,"LRU", parent=l3)
     l1 = Cache(2, 4, 8,"LRU", parent=l2)
     mem = MainMemory(l3)
     mh = CacheSimulator(l1, mem)
 
     mh.load(0, 512)
     mh.load(448, 576)
     
     self.assertEqual(l1.cached, set(range(512, 576)))
     self.assertEqual(l2.cached, set(range(448, 576)))
     self.assertEqual(l3.cached, set(range(320, 576)))
コード例 #6
0
ファイル: test.py プロジェクト: RRZE-HPC/pycachesim
 def test_fill(self):
     mem = MainMemory()
     l3 = Cache("L3", 4, 8, 8, "LRU")
     mem.load_to(l3)
     mem.store_from(l3)
     l2 = Cache("L2", 4, 4, 8, "LRU", store_to=l3, load_from=l3)
     l1 = Cache("L1", 2, 4, 8, "LRU", store_to=l2, load_from=l2)
     mh = CacheSimulator(l1, mem)
 
     mh.load(range(0, 512))
     mh.load(range(448, 576))
     
     self.assertEqual(l1.cached, set(range(512, 576)))
     self.assertEqual(l2.cached, set(range(448, 576)))
     self.assertEqual(l3.cached, set(range(320, 576)))
コード例 #7
0
ファイル: test.py プロジェクト: tejashah94/pycachesim
    def _build_Skylake_caches(self):
        cacheline_size = 64

        mem = MainMemory(name="MEM")
        l3 = Cache(name="L3",
                   # 20x1.375MB = 27.5MB, with 11-ways with unknown hash function, thus we use
                   # 16-ways and select number of sets accordingly
                   sets=28160, ways=16, cl_size=cacheline_size,
                   replacement_policy="LRU",
                   write_back=True, write_allocate=False,  # victim caches don't need write-allocate
                   store_to=None, load_from=None, victims_to=None,
                   swap_on_load=False)  # This is a victim cache, so exclusiveness is implicit
        mem.store_from(l3)
        l2 = Cache(name="L2",
                   sets=1024, ways=16, cl_size=cacheline_size,  # 1MB
                   replacement_policy="LRU",
                   write_back=True, write_allocate=True,
                   store_to=l3, load_from=None, victims_to=l3,
                   swap_on_load=False)  # L2-L1 is inclusive
        mem.load_to(l2)
        l1 = Cache(name="L1",
                   sets=64, ways=8, cl_size=cacheline_size,  # 32kB
                   replacement_policy="LRU",
                   write_back=False, write_allocate=False,
                   store_to=l2, load_from=l2, victims_to=None,
                   swap_on_load=False)  # inclusive/exclusive does not matter in first-level
        cs = CacheSimulator(first_level=l1,
                            main_memory=mem)

        return cs, l1, l2, l3, mem, cacheline_size
コード例 #8
0
ファイル: test.py プロジェクト: tejashah94/pycachesim
    def _build_Bulldozer_caches(self):
        cacheline_size = 64

        mem = MainMemory(name="MEM")
        l3 = Cache(name="L3",
                   sets=2048, ways=64, cl_size=cacheline_size,  # 4MB
                   replacement_policy="LRU",
                   write_back=True, write_allocate=False,  # victim caches don't need write-allocate
                   store_to=None, load_from=None, victims_to=None,
                   swap_on_load=False)  # This is a victim cache, so exclusiveness should be obvious
        mem.store_from(l3)
        l2 = Cache(name="L2",
                   sets=2048, ways=16, cl_size=cacheline_size,  # 2048kB 
                   replacement_policy="LRU",
                   write_back=True, write_allocate=True,
                   store_to=l3, load_from=None, victims_to=l3,
                   swap_on_load=False)  # L2-L1 is inclusive (unlike with AMD Istanbul)
        mem.load_to(l2)
        wcc = Cache(name="WCC",
                    sets=1, ways=64, cl_size=cacheline_size,  # 4KB
                    replacement_policy="LRU",
                    write_combining=True, subblock_size=1,
                    write_back=True, write_allocate=False,  # this policy only makes sens with WCC
                    store_to=l2, load_from=None, victims_to=None,
                    swap_on_load=False)
        l1 = Cache(name="L1",
                   sets=64, ways=4, cl_size=cacheline_size,  # 16kB
                   replacement_policy="LRU",
                   write_back=False, write_allocate=False,
                   store_to=wcc, load_from=l2, victims_to=None,
                   swap_on_load=False)  # inclusive/exclusive does not matter in first-level
        cs = CacheSimulator(first_level=l1,
                            main_memory=mem)

        return cs, l1, wcc, l2, l3, mem, cacheline_size
コード例 #9
0
ファイル: test.py プロジェクト: RRZE-HPC/pycachesim
 def test_from_dict(self):
     cs, caches, mem = CacheSimulator.from_dict({
         'L1': {
             'sets': 64, 'ways': 8, 'cl_size': 64,
             'replacement_policy': 'LRU',
             'write_allocate': True, 'write_back': True,
             'load_from': 'L2', 'store_to': 'L2'},
          'L2': {
             'sets': 512, 'ways': 8, 'cl_size': 64,
             'replacement_policy': 'LRU',
             'write_allocate': True, 'write_back': True,
             'load_from': 'L3', 'store_to': 'L3'},
          'L3': {
             'sets': 20480, 'ways': 16, 'cl_size': 64,
             'replacement_policy': 'LRU', 
             'write_allocate': True, 'write_back': True}
     })
     
     self.assertEqual(cs.first_level.name, 'L1')
     
     caches = {c.name: c for c in cs.levels(with_mem=False)}
     
     self.assertEqual(sorted(['L1', 'L2', 'L3']), sorted(caches.keys()))
     self.assertEqual(mem.last_level_load.name, 'L3')
     self.assertEqual(mem.last_level_store.name, 'L3')
     self.assertEqual(cs.first_level.backend.store_to, caches['L2'].backend)
     self.assertEqual(cs.first_level.backend.load_from, caches['L2'].backend)
コード例 #10
0
ファイル: test.py プロジェクト: tejashah94/pycachesim
    def test_from_dict_victims(self):
        cs, caches, mem = CacheSimulator.from_dict({
            'L1': {
                'sets': 64, 'ways': 8, 'cl_size': 64,
                'replacement_policy': 'LRU',
                'write_allocate': True, 'write_back': True,
                'load_from': 'L2', 'store_to': 'L2'},
            'L2': {
                'sets': 512, 'ways': 16, 'cl_size': 64,
                'replacement_policy': 'LRU',
                'write_allocate': True, 'write_back': True,
                'store_to': 'L3', 'victims_to': 'L3'},
            'L3': {
                'sets': 20480, 'ways': 16, 'cl_size': 64,
                'replacement_policy': 'LRU',
                'write_allocate': True, 'write_back': True}
        })

        self.assertEqual(cs.first_level.name, 'L1')

        caches = {c.name: c for c in cs.levels(with_mem=False)}

        self.assertEqual(sorted(['L1', 'L2', 'L3']), sorted(caches.keys()))
        self.assertEqual(mem.last_level_load.name, 'L2')
        self.assertEqual(mem.last_level_store.name, 'L3')
        self.assertEqual(cs.first_level.backend.store_to, caches['L2'].backend)
        self.assertEqual(cs.first_level.backend.load_from, caches['L2'].backend)
        self.assertEqual(caches['L2'].backend.victims_to, mem.last_level_store.backend)
        self.assertEqual(caches['L2'].backend.load_from, None)
コード例 #11
0
def create_cache(l1_ways, l1_block_size, l1_size, l2_ways, l2_block_size,
                 l2_size):

    if l1_ways == 0:
        l1_sets = 1
        l1_ways = l1_size // l1_block_size
    else:
        l1_sets = l1_size // (l1_block_size * l1_ways)

    if l2_ways == 0:
        l2_sets = 1
        l2_ways = l2_size // l2_block_size
    else:
        l2_sets = l2_size // (l2_block_size * l2_ways)

    l2 = Cache("L2", l2_sets, l2_ways, l2_block_size, replacement_policy="LRU")
    l1 = Cache("L1",
               l1_sets,
               l1_ways,
               l1_block_size,
               replacement_policy="LRU",
               store_to=l2,
               load_from=l2)
    mem = MainMemory()
    mem.load_to(l2)
    mem.store_from(l2)
    return CacheSimulator(l1, mem)
コード例 #12
0
ファイル: test.py プロジェクト: mindis/pycachesim
 def _get_SandyEP_caches(self):
     # Cache hierarchy as found in a Sandy Brige EP:
     cacheline_size = 64
     mem = MainMemory()
     l3 = Cache("L3",
                20480,
                16,
                cacheline_size,
                "LRU",
                write_back=True,
                write_allocate=True)  # 20MB 16-ways
     mem.load_to(l3)
     mem.store_from(l3)
     l2 = Cache("L2",
                512,
                8,
                cacheline_size,
                "LRU",
                write_back=True,
                write_allocate=True,
                store_to=l3,
                load_from=l3)  # 256kB 8-ways
     l1 = Cache("L1",
                64,
                8,
                cacheline_size,
                "LRU",
                write_back=True,
                write_allocate=True,
                store_to=l2,
                load_from=l2)  # 32kB 8-ways
     mh = CacheSimulator(l1, mem)
     return mh, l1, l2, l3, mem, cacheline_size
コード例 #13
0
def make_cache() -> CacheSimulator:
    """Returns a fresh cache of standard size."""
    mem = MainMemory()
    l3 = Cache("L3", 20480, 16, 64, "LRU")  # 20MB: 20480 sets, 16-ways with
    # cacheline size of 64 bytes
    mem.load_to(l3)
    mem.store_from(l3)
    l2 = Cache("L2", 512, 8, 64, "LRU", store_to=l3, load_from=l3)  # 256KB
    l1 = Cache("L1", 64, 8, 64, "LRU", store_to=l2, load_from=l2)  # 32KB
    cs = CacheSimulator(l1, mem)
    return cs
コード例 #14
0
ファイル: perf_tests.py プロジェクト: mindis/pycachesim
    def time_load100000_tiny_collisions(self):
        l3 = Cache(4, 8, 8, "LRU")
        l2 = Cache(4, 4, 8, "LRU", parent=l3)
        l1 = Cache(2, 4, 8, "LRU", parent=l2)
        mh = CacheSimulator(l1)
        mh.load(0, 100000)

        with Timer() as t:
            mh.load(0, 100000)
        return t.interval
コード例 #15
0
def create_cache(f):
    """
    :param f: command line args
    :return:
    """
    mem = MainMemory()
    # cache type, sets, associativity, block size, eviction pattern: LRU, MRU, RR and FIFO
    l3 = Cache(
        "L3", f.c3, f.a3, f.b3,
        "LRU")  # 20MB: 20480 sets, 16-ways with cacheline size of 64 bytes
    mem.load_to(l3)
    mem.store_from(l3)
    l2 = Cache("L2", f.c2, f.a2, f.b2, "LRU", store_to=l3,
               load_from=l3)  # 256KB
    l1 = Cache("L1", f.c1, f.a1, f.b1, "LRU", store_to=l2,
               load_from=l2)  # 32KB
    return CacheSimulator(l1, mem)
コード例 #16
0
ファイル: test.py プロジェクト: tejashah94/pycachesim
    def test_fill(self):
        mem = MainMemory()
        l3 = Cache("L3", 4, 8, 8, "LRU")
        mem.load_to(l3)
        mem.store_from(l3)
        l2 = Cache("L2", 4, 4, 8, "LRU", store_to=l3, load_from=l3)
        l1 = Cache("L1", 2, 4, 8, "LRU", store_to=l2, load_from=l2)
        mh = CacheSimulator(l1, mem)

        mh.load(range(0, 512))
        mh.load(range(448, 576))

        self.assertEqual(l1.cached, set(range(512, 576)))
        self.assertEqual(l2.cached, set(range(448, 576)))
        self.assertEqual(l3.cached, set(range(320, 576)))
コード例 #17
0
ファイル: nddiCacheSim.py プロジェクト: dave-estes-UNC/nddi
print "Frame Volume Dimensions: ", fvDimensions
print "Coefficient Matrix Size: ", ivSize, "x", len(fvDimensions)
print "Frame Volume Stride Order: ", fvStrideOrder
print "Bytes per Pixel: ", bpp
print "Bytes per Input Vector value: ", bpiv
print "Bytes per Coefficient: ", bpc
print "Bytes per Scaler: ", bps

mem = MainMemory()
l3 = Cache("L3", 20480, 16, 64,
           "LRU")  # 20MB: 20480 sets, 16-ways with cacheline size of 64 bytes
mem.load_to(l3)
mem.store_from(l3)
l2 = Cache("L2", 512, 8, 64, "LRU", store_to=l3, load_from=l3)  # 256KB
l1 = Cache("L1", 64, 8, 64, "LRU", store_to=l2, load_from=l2)  # 32KB
cs = CacheSimulator(l1, mem)


def fvAccessRow(tuple, length, access):
    strideMultiplier = bpp
    mem = 0
    bytes = length * bpp
    for strideOrder in fvStrideOrder:
        mem += tuple[strideOrder] * strideMultiplier
        strideMultiplier *= fvDimensions[strideOrder]
    if args.verbose:
        print "Addr: ", mem, " Byte Count: ", bytes
    if access == "READ_ACCESS":
        cs.load(mem, bytes)
    elif access == "WRITE_ACCESS":
        cs.store(mem, bytes)
コード例 #18
0
ファイル: examples.py プロジェクト: tejashah94/pycachesim
l2 = Cache(name="L2",
           sets=512, ways=8, cl_size=cacheline_size,
           replacement_policy="LRU",
           write_back=True, write_allocate=True,
           store_to=l3, load_from=l3, victims_to=None,
           swap_on_load=False)

l1 = Cache(name="L1",
           sets=64, ways=8, cl_size=cacheline_size,
           replacement_policy="LRU",
           write_back=True, write_allocate=True,
           store_to=l2, load_from=l2, victims_to=None,
           swap_on_load=False)  # inclusive/exclusive does not matter in first-level

cs = CacheSimulator(first_level=l1, main_memory=mem)

cs.load(23)
cv = CacheVisualizer(cs, [10, 16])
cv.dump_state()

# =============================
# AMD Bulldozer Exclusive Cache
# =============================
cacheline_size = 64

mem = MainMemory()
l3 = Cache(name="L3",
           sets=2048, ways=64, cl_size=cacheline_size,  # 4MB
           replacement_policy="LRU",
           write_back=True, write_allocate=True,
コード例 #19
0
    l2 = Cache("L2", 512, 8, 64, "LRU", store_to=l3, load_from=l3)  # 256KB
    l1 = Cache("L1", 64, 8, 64, "LRU", store_to=l2, load_from=l2)  # 32KB
elif model == 2:
    l3 = Cache("L3", 12288, 16, 64, "LRU")
    mem.load_to(l3)
    mem.store_from(l3)
    l2 = Cache("L2", 256, 4, 64, "LRU", store_to=l3, load_from=l3)
    l1 = Cache("L1", 32, 8, 64, "LRU", store_to=l2, load_from=l2)
elif model == 3:
    l3 = Cache("L3", 20, 4, 64, "LRU")
    mem.load_to(l3)
    mem.store_from(l3)
    l2 = Cache("L2", 2, 1, 64, "LRU", store_to=l3, load_from=l3)
    l1 = Cache("L1", 1, 1, 64, "LRU", store_to=l2, load_from=l2)

cs = CacheSimulator(l1, mem)

infile = open("cachesim_output.out", "rb")

while True:
    chunk = infile.read(9)
    if len(chunk) < 9:
        break

    t = chunk[0]
    a = int.from_bytes(chunk[1:8], byteorder='little', signed=False)

    if t == 0:
        if verbose == True:
            print('load  ' + str(a))
        cs.load(a, length=8)