def test_invalid_data_loops_type(self): ''' Invalid data_loops type. ''' with self.assertRaisesRegexp(TypeError, 'NestedLoopDesc: .*data_loops.*'): _ = NestedLoopDesc(loopcnt=(3, 8, 4), usize_gbuf=(20, 30, 9), usize_regf=(3, 3, 1), unit_access=((19, 29, 9), (18, 28, 8), (35, 45, 15), (1, 1, 2)), data_loops=[DataDimLoops(le.IFM, le.OFM), DataDimLoops(le.IFM, le.BAT), DataDimLoops(le.OFM, le.BAT)], unit_ops=7, unit_time=7 ) with self.assertRaisesRegexp(TypeError, 'NestedLoopDesc: .*data_loops.*'): _ = NestedLoopDesc(loopcnt=(3, 8, 4), usize_gbuf=(20, 30, 9), usize_regf=(3, 3, 1), unit_access=((19, 29, 9), (18, 28, 8), (35, 45, 15), (1, 1, 2)), data_loops=((le.IFM, le.OFM), (le.IFM, le.BAT), (le.OFM, le.BAT)), unit_ops=7, unit_time=7 )
def test_invalid_args(self): ''' Invalid arguments. ''' with self.assertRaisesRegexp(ValueError, 'DataDimLoops: .*LoopEnum.*'): _ = DataDimLoops(le.NUM + 1) with self.assertRaisesRegexp(ValueError, 'DataDimLoops: .*LoopEnum.*'): _ = DataDimLoops(le.IFM, le.NUM)
def test_total_access_of_at_sum(self): ''' Get total_access_of_at sum. ''' nld = NestedLoopDesc(loopcnt=(3, 8, 4), usize_gbuf=(20, 30, 9), usize_regf=(3, 3, 1), unit_access=((19, 29, 9), (18, 28, 8), (35, 45, 15), (1, 1, 2)), data_loops=(DataDimLoops(le.IFM, le.OFM), DataDimLoops(le.IFM, le.BAT), DataDimLoops(le.OFM, le.BAT)), unit_ops=7, unit_time=7 ) self.assertEqual(nld.total_access_at_of(me.DRAM), 19 * 3 * 8 + 29 * 3 * 4 + 9 * 8 * 4) self.assertEqual(nld.total_access_at_of(me.GBUF), 18 * 3 * 8 + 28 * 3 * 4 + 8 * 8 * 4) self.assertEqual(nld.total_access_at_of(me.ITCN), 35 * 3 * 8 + 45 * 3 * 4 + 15 * 8 * 4) self.assertEqual(nld.total_access_at_of(me.REGF), 1 * 3 * 8 + 1 * 3 * 4 + 2 * 8 * 4)
def test_data_loops(self): ''' Get data_loops. ''' dls = InputLayer.data_loops() ilayer = InputLayer(3, 227) self.assertTupleEqual(ilayer.data_loops(), dls) self.assertEqual(dls[de.FIL], DataDimLoops()) self.assertEqual(dls[de.IFM], DataDimLoops()) self.assertEqual(dls[de.OFM], DataDimLoops(le.OFM, le.BAT))
def test_drop(self): ''' drop. ''' lst = [str(lpe) for lpe in range(le.NUM)] for loops in self._gen_loop_combs(): ddls = DataDimLoops(*loops) sublst = ddls.drop(lst) self.assertEqual(len(sublst), le.NUM - len(loops))
def test_take(self): ''' take. ''' lst = [str(lpe) for lpe in range(le.NUM)] for loops in self._gen_loop_combs(): ddls = DataDimLoops(*loops) sublst = ddls.take(lst) self.assertEqual(len(sublst), len(loops)) self.assertListEqual(sublst, [str(lpe) for lpe in loops])
def test_data_loops_all_lpe(self): ''' data_loops in constructor have all LoopEnum. ''' data_loops = [None] * de.NUM data_loops[de.FIL] = DataDimLoops(le.IFM, le.OFM) data_loops[de.IFM] = DataDimLoops(le.IFM, le.OFM, le.BAT) data_loops[de.OFM] = DataDimLoops(le.OFM, le.BAT) bufshr = BufShrScheme(self.nr1, self.ps1, data_loops) self.assertTupleEqual(bufshr.dim(de.IFM), (1, 1)) self.assertTrue(all(math.isinf(d) for d in bufshr.nbr_dists[de.IFM]))
def test_data_loops(self): ''' Get data_loops. ''' dls = ConvLayer.data_loops() self.assertEqual(dls[de.FIL], DataDimLoops(le.IFM, le.OFM)) self.assertEqual(dls[de.IFM], DataDimLoops(le.IFM, le.BAT)) self.assertEqual(dls[de.OFM], DataDimLoops(le.OFM, le.BAT)) clayer = ConvLayer(3, 64, [28, 14], 3, strd=2) flayer = FCLayer(2048, 4096, sfil=2) self.assertTupleEqual(FCLayer.data_loops(), dls) self.assertTupleEqual(clayer.data_loops(), dls) self.assertTupleEqual(flayer.data_loops(), dls)
def test_take_and_drop(self): ''' take and drop. ''' lst = [str(lpe) for lpe in range(le.NUM)] for loops in self._gen_loop_combs(): ddls = DataDimLoops(*loops) takelst = ddls.take(lst) droplst = ddls.drop(lst) self.assertEqual(len(takelst) + len(droplst), le.NUM) self.assertTrue(set(takelst).isdisjoint(set(droplst))) self.assertSetEqual(set(takelst) | set(droplst), set(lst))
def test_data_loops(self): ''' Get data_loops. ''' dls = LocalRegionLayer.data_loops() self.assertEqual(dls[de.FIL], DataDimLoops()) self.assertEqual(dls[de.IFM], DataDimLoops(le.OFM, le.BAT)) self.assertEqual(dls[de.OFM], DataDimLoops(le.OFM, le.BAT)) llayer = LocalRegionLayer(64, 28, 2, 1) player = PoolingLayer(64, 28, 2) self.assertTupleEqual(PoolingLayer.data_loops(), dls) self.assertTupleEqual(llayer.data_loops(), dls) self.assertTupleEqual(player.data_loops(), dls)
def test_total_ops(self): ''' Get total_ops. ''' nld = NestedLoopDesc(loopcnt=(3, 8, 4), usize_gbuf=(20, 30, 9), usize_regf=(3, 3, 1), unit_access=((19, 29, 9), (18, 28, 8), (35, 45, 15), (1, 1, 2)), data_loops=(DataDimLoops(le.IFM, le.OFM), DataDimLoops(le.IFM, le.BAT), DataDimLoops(le.OFM, le.BAT)), unit_ops=7, unit_time=7) self.assertEqual(nld.total_ops(), 7 * 3 * 8 * 4)
def test_data_cnt(self): ''' Get data_cnt. ''' lcnt = [3, 5, 7] for loops in self._gen_loop_combs(): ddls = DataDimLoops(*loops) dcnt = ddls.data_cnt(lcnt) dcnt2 = 1 for lpe in ddls.loops(): dcnt2 *= lcnt[lpe] self.assertEqual(dcnt, dcnt2)
def test_invalid_loopcnt_len(self): ''' Invalid loopcnt len. ''' with self.assertRaisesRegexp(ValueError, 'NestedLoopDesc: .*loopcnt.*'): _ = NestedLoopDesc(loopcnt=(3, 8), usize_gbuf=(20, 30, 9), usize_regf=(3, 3, 1), unit_access=((19, 29, 9), (18, 28, 8), (35, 45, 15), (1, 1, 2)), data_loops=(DataDimLoops(le.IFM, le.OFM), DataDimLoops(le.IFM, le.BAT), DataDimLoops(le.OFM, le.BAT)), unit_ops=7, unit_time=7)
def test_data_loops(self): ''' data_loops in constructor. ''' data_loops = [None] * de.NUM data_loops[de.FIL] = DataDimLoops(le.IFM, le.OFM) data_loops[de.IFM] = DataDimLoops(le.OFM, le.BAT) data_loops[de.OFM] = DataDimLoops(le.OFM, le.BAT) for nr, ps in zip([self.nr1, self.nr2, self.nr3], [self.ps1, self.ps2, self.ps3]): bufshr = BufShrScheme(nr, ps, data_loops) self.assertTupleEqual(bufshr.dim(de.IFM), bufshr.dim(de.OFM)) self.assertTupleEqual(bufshr.nbr_dists[de.IFM], bufshr.nbr_dists[de.OFM])
def test_usize_regf_of(self): ''' Accessor usize_regf. ''' nld = NestedLoopDesc(loopcnt=(3, 8, 4), usize_gbuf=(20, 30, 9), usize_regf=(3, 3, 1), unit_access=((19, 29, 9), (18, 28, 8), (35, 45, 15), (1, 1, 2)), data_loops=(DataDimLoops(le.IFM, le.OFM), DataDimLoops(le.IFM, le.BAT), DataDimLoops(le.OFM, le.BAT)), unit_ops=7, unit_time=7) self.assertEqual(nld.usize_regf_of(de.FIL), 3, 'usize_regf: FIL') self.assertEqual(nld.usize_regf_of(de.IFM), 3, 'usize_regf: IFM') self.assertEqual(nld.usize_regf_of(de.OFM), 1, 'usize_regf: OFM')
def test_default_data_loops(self): ''' Default data_loops in constructor. ''' data_loops = [None] * de.NUM data_loops[de.FIL] = DataDimLoops(le.IFM, le.OFM) data_loops[de.IFM] = DataDimLoops(le.IFM, le.BAT) data_loops[de.OFM] = DataDimLoops(le.OFM, le.BAT) for bufshr, nr, ps in zip([self.bufshr1, self.bufshr2, self.bufshr3], [self.nr1, self.nr2, self.nr3], [self.ps1, self.ps2, self.ps3]): bufshr_ = BufShrScheme(nr, ps, data_loops) for dce in range(de.NUM): self.assertTupleEqual(bufshr.dim(dce), bufshr_.dim(dce)) self.assertTupleEqual(bufshr.nbr_dists[dce], bufshr_.nbr_dists[dce])
def test_unit_access_at_of(self): ''' Accessor unit_access. ''' nld = NestedLoopDesc(loopcnt=(3, 8, 4), usize_gbuf=(20, 30, 9), usize_regf=(3, 3, 1), unit_access=((19, 29, 9), (18, 28, 8), (35, 45, 15), (1, 1, 2)), data_loops=(DataDimLoops(le.IFM, le.OFM), DataDimLoops(le.IFM, le.BAT), DataDimLoops(le.OFM, le.BAT)), unit_ops=7, unit_time=7) self.assertEqual(nld.unit_access_at_of(me.DRAM), 19 + 29 + 9, 'unit_access: DRAM') self.assertEqual(nld.unit_access_at_of(me.ITCN), 35 + 45 + 15, 'unit_access: ITCN') self.assertEqual(nld.unit_access_at_of(me.GBUF, de.OFM), 8, 'unit_access: GBUF, OFM') self.assertEqual(nld.unit_access_at_of(me.REGF, de.FIL), 1, 'unit_access: REGF, FIL')
def test_valid_repeated_args(self): ''' Valid repeated arguments. ''' ddls = DataDimLoops(le.IFM, le.OFM, le.IFM, le.IFM) self.assertTupleEqual(ddls.loops(), (le.IFM, le.OFM)) ddls = DataDimLoops(*([le.BAT] * 10)) self.assertTupleEqual(ddls.loops(), (le.BAT, ))
def test_valid_args(self): ''' Valid arguments. ''' nld = NestedLoopDesc(loopcnt=(3, 8, 4), usize_gbuf=(20, 30, 9), usize_regf=(3, 3, 1), unit_access=((19, 29, 9), (18, 28, 8), (35, 45, 15), (1, 1, 2)), data_loops=(DataDimLoops(le.IFM, le.OFM), DataDimLoops(le.IFM, le.BAT), DataDimLoops(le.OFM, le.BAT)), unit_ops=7, unit_time=7 ) self.assertEqual(nld.loopcnt, (3, 8, 4), 'loopcnt') self.assertEqual(nld.usize_gbuf, (20, 30, 9), 'usize_gbuf') self.assertEqual(nld.usize_regf, (3, 3, 1), 'usize_regf') self.assertEqual(nld.unit_access, ((19, 29, 9), (18, 28, 8), (35, 45, 15), (1, 1, 2)), 'unit_access') self.assertEqual(nld.data_loops[de.FIL], DataDimLoops(le.IFM, le.OFM), 'data_loops: FIL') self.assertEqual(nld.data_loops[de.IFM], DataDimLoops(le.IFM, le.BAT), 'data_loops: IFM') self.assertEqual(nld.data_loops[de.OFM], DataDimLoops(le.OFM, le.BAT), 'data_loops: OFM') self.assertEqual(nld.unit_ops, 7, 'unit_ops') self.assertEqual(nld.unit_time, 7, 'unit_time')
def test_valid_args(self): ''' Valid arguments. ''' ddls = DataDimLoops(le.IFM, le.OFM) self.assertTupleEqual(ddls.loops(), (le.IFM, le.OFM)) ddls = DataDimLoops(le.BAT, le.IFM, le.OFM) self.assertTupleEqual(ddls.loops(), (le.IFM, le.OFM, le.BAT))
def setUp(self): # Workload. self.layer = {} self.layer['BASE'] = ConvLayer(12, 10, 28, 3) self.layer['LGFIL'] = ConvLayer(2, 4, 28, 20) self.layer['POOL'] = PoolingLayer(32, 28, 2) self.batch_size = 4 # Resource. self.resource = {} dim_array = PhyDim2(16, 16) proc_region = NodeRegion(origin=PhyDim2(0, 0), dim=PhyDim2(1, 1), type=NodeRegion.PROC) data_regions = (NodeRegion(origin=PhyDim2(0, 0), dim=PhyDim2(1, 1), type=NodeRegion.DATA), ) # Typical resource. self.resource['BASE'] = Resource(proc_region=proc_region, data_regions=data_regions, dim_array=dim_array, size_gbuf=65536, size_regf=64) # Larger resource with sufficient capacity, to make all schemes valid. self.resource['LG'] = Resource(proc_region=proc_region, data_regions=data_regions, dim_array=dim_array, size_gbuf=1024**3, size_regf=1024**3) # Small resource. self.resource['SM'] = Resource(proc_region=proc_region, data_regions=data_regions, dim_array=dim_array, size_gbuf=4096, size_regf=16) # Nested loop description after mapping. self.nld = {} self.nld['BASE'] = next( MapStrategyEyeriss(self.layer['BASE'], self.batch_size, dim_array).gen_nested_loop_desc()) self.nld['LGFIL'] = next( MapStrategyEyeriss(self.layer['LGFIL'], self.batch_size, dim_array).gen_nested_loop_desc()) self.nld['POOL'] = next( MapStrategyEyeriss(self.layer['POOL'], self.batch_size, dim_array).gen_nested_loop_desc()) # Fake nested loop, with zero filter size. self.nld['ZERO_FIL'] = NestedLoopDesc( loopcnt=(12, 10, 4), usize_gbuf=(0, 1000, 800), usize_regf=(0, 3, 1), unit_access=((0, 1000, 800), (0, 1000, 800), (3, 9, 7), (1, 1, 1)), data_loops=(DataDimLoops(le.IFM, le.OFM), DataDimLoops(le.IFM, le.BAT), DataDimLoops(le.OFM, le.BAT)), unit_ops=1, unit_time=1) # Fake nested loop, with zero ifmap size. self.nld['ZERO_IFM'] = NestedLoopDesc( loopcnt=(12, 10, 4), usize_gbuf=(9, 0, 800), usize_regf=(3, 0, 1), unit_access=((9, 0, 800), (9, 0, 800), (3, 9, 7), (1, 1, 1)), data_loops=(DataDimLoops(le.IFM, le.OFM), DataDimLoops(le.IFM, le.BAT), DataDimLoops(le.OFM, le.BAT)), unit_ops=1, unit_time=1) # Options. self.options = {} # Basic. self.options['BASE'] = Option(ntops=2**30) # Multiprocessing. self.options['MP'] = Option(ntops=2**30, nprocesses=8) # Limited top schemes. self.options['NTOPS'] = Option(ntops=10) # Bypass. self.options['BYP'] = Option(sw_gbuf_bypass=(True, ) * 3, ntops=2**30) # Bypass solver. self.options['BYPSOL'] = Option(sw_gbuf_bypass=(True, ) * 3, sw_solve_loopblocking=True, ntops=2**30) # Cost. self.cost = Cost(mac_op=1, mem_hier=(200, 6, 2, 1), noc_hop=50, unit_static=50) # Partition occupation. self.part_occ = 0.91
def test_loops(self): ''' Get loops. ''' for loops in self._gen_loop_combs(): ddls = DataDimLoops(*loops) self.assertTupleEqual(ddls.loops(), loops)
def test_nested_loop_desc_sanity(self): ''' Generated nested loop description sanity check. ''' batch_size = 4 for layer in self.convlayers.values() + self.fclayers.values() \ + self.lrlayers.values() + self.fake_layers.values(): ms = MapStrategyEyeriss(layer, batch_size, self.dim_array) for nld in ms.gen_nested_loop_desc(): # Replication reduces numbers of IFM/OFM. self.assertGreaterEqual(layer.nifm, nld.loopcnt[le.IFM]) self.assertGreaterEqual(layer.nofm, nld.loopcnt[le.OFM]) # Folding increases batch size. self.assertEqual(nld.loopcnt[le.BAT] % batch_size, 0) # Total and unit ops. self.assertAlmostEqual(nld.total_ops(), layer.total_ops(batch_size)) self.assertAlmostEqual(nld.unit_ops * util.prod(nld.loopcnt), layer.total_ops(batch_size)) # Unit time and unit ops. # The difference is due to the loop occupation, which is not # counted in utilization. self.assertGreaterEqual( nld.unit_time * ms.utilization() * self.dim_array.size(), nld.unit_ops) # Total access at DRAM. self.assertAlmostEqual( nld.total_access_at_of(me.DRAM, de.FIL), layer.total_filter_size() if isinstance(layer, ConvLayer) else 0) # IFM may have refetch due to folding. self.assertGreaterEqual( nld.total_access_at_of(me.DRAM, de.IFM) + 1e-7, layer.total_ifmap_size(batch_size)) self.assertAlmostEqual(nld.total_access_at_of(me.DRAM, de.OFM), layer.total_ofmap_size(batch_size)) # Unit access to REGF. self.assertAlmostEqual( nld.unit_access[me.REGF][de.FIL] * util.prod(nld.loopcnt), layer.total_ops(batch_size) if isinstance( layer, ConvLayer) else 0) self.assertAlmostEqual( nld.unit_access[me.REGF][de.IFM] * util.prod(nld.loopcnt), layer.total_ops(batch_size)) self.assertAlmostEqual( nld.unit_access[me.REGF][de.OFM] * util.prod(nld.loopcnt), layer.total_ops(batch_size)) # Unit GBUF size and unit access to DRAM. self.assertTrue( all(us >= ua for us, ua in zip(nld.usize_gbuf, nld.unit_access[me.DRAM]))) # Unit REGF size. if isinstance(layer, ConvLayer): # See JSSC'17, IV. A. Dimensions Beyond 2-D in PE Array. 1). self.assertEqual(nld.usize_regf[de.FIL], layer.wfil) self.assertEqual(nld.usize_regf[de.IFM], layer.wfil) self.assertEqual(nld.usize_regf[de.OFM], 1) # Data dimension loops. if isinstance(layer, ConvLayer): self.assertEqual(nld.data_loops[de.FIL], DataDimLoops(le.IFM, le.OFM)) self.assertEqual(nld.data_loops[de.IFM], DataDimLoops(le.IFM, le.BAT)) self.assertEqual(nld.data_loops[de.OFM], DataDimLoops(le.OFM, le.BAT)) elif isinstance(layer, ConvLayer): self.assertEqual(nld.data_loops[de.FIL], DataDimLoops()) self.assertEqual(nld.data_loops[de.IFM], DataDimLoops(le.OFM, le.BAT)) self.assertEqual(nld.data_loops[de.OFM], DataDimLoops(le.OFM, le.BAT))
def setUp(self): # Workload. self.layer = {} self.layer['BASE'] = ConvLayer(12, 10, 28, 3) self.layer['LGFIL'] = ConvLayer(2, 4, 28, 20) self.layer['POOL'] = PoolingLayer(32, 28, 2) self.layer['PAR'] = ConvLayer(24, 36, 56, 3) self.batch_size = 4 # Resource. self.resource = {} dim_array = PhyDim2(16, 16) proc_region = NodeRegion(origin=PhyDim2(0, 0), dim=PhyDim2(1, 1), type=NodeRegion.PROC) data_region = NodeRegion(origin=PhyDim2(0, 0), dim=PhyDim2(1, 1), type=NodeRegion.DRAM) # Typical resource. self.resource['BASE'] = Resource(proc_region=proc_region, dram_region=data_region, src_data_region=data_region, dst_data_region=data_region, dim_array=dim_array, size_gbuf=65536, size_regf=64, array_bus_width=float('inf'), dram_bandwidth=float('inf'), no_time_mux=False) # Larger resource with sufficient capacity, to make all schemes valid. self.resource['LG'] = Resource(proc_region=proc_region, dram_region=data_region, src_data_region=data_region, dst_data_region=data_region, dim_array=dim_array, size_gbuf=1024**3, size_regf=1024**3, array_bus_width=float('inf'), dram_bandwidth=float('inf'), no_time_mux=False) # Small resource. self.resource['SM'] = Resource(proc_region=proc_region, dram_region=data_region, src_data_region=data_region, dst_data_region=data_region, dim_array=dim_array, size_gbuf=4096, size_regf=16, array_bus_width=float('inf'), dram_bandwidth=float('inf'), no_time_mux=False) # Multi-node parallel resource. self.resource['PAR'] = Resource(proc_region=NodeRegion( origin=PhyDim2(0, 0), dim=PhyDim2(4, 2), type=NodeRegion.PROC), dram_region=data_region, src_data_region=data_region, dst_data_region=data_region, dim_array=dim_array, size_gbuf=25000, size_regf=64, array_bus_width=float('inf'), dram_bandwidth=float('inf'), no_time_mux=False) # Resource with no data regions. proc_data_region = NodeRegion(origin=PhyDim2(1, 1), dim=PhyDim2(1, 1), type=NodeRegion.PROC) self.resource['SRCNOTDATA'] = Resource( proc_region=proc_region, dram_region=data_region, src_data_region=proc_data_region, dst_data_region=data_region, dim_array=dim_array, size_gbuf=1024**3, size_regf=1024**3, array_bus_width=float('inf'), dram_bandwidth=float('inf'), no_time_mux=False) self.resource['DSTNOTDATA'] = Resource( proc_region=proc_region, dram_region=data_region, src_data_region=data_region, dst_data_region=proc_data_region, dim_array=dim_array, size_gbuf=1024**3, size_regf=1024**3, array_bus_width=float('inf'), dram_bandwidth=float('inf'), no_time_mux=False) self.resource['DATALOCAL'] = Resource(proc_region=proc_region, dram_region=data_region, src_data_region=proc_region, dst_data_region=proc_region, dim_array=dim_array, size_gbuf=1024**3, size_regf=1024**3, array_bus_width=float('inf'), dram_bandwidth=float('inf'), no_time_mux=False) # Filter pinning. self.resource['FILPIN'] = Resource(proc_region=proc_region, dram_region=data_region, src_data_region=data_region, dst_data_region=data_region, dim_array=dim_array, size_gbuf=1024**3, size_regf=1024**3, array_bus_width=float('inf'), dram_bandwidth=float('inf'), no_time_mux=True) # Nested loop description after mapping. self.nld = {} self.nld['BASE'] = next( MapStrategyEyeriss(self.layer['BASE'], self.batch_size, 1, dim_array).gen_nested_loop_desc()) self.nld['LGFIL'] = next( MapStrategyEyeriss(self.layer['LGFIL'], self.batch_size, 1, dim_array).gen_nested_loop_desc()) self.nld['POOL'] = next( MapStrategyEyeriss(self.layer['POOL'], self.batch_size, 1, dim_array).gen_nested_loop_desc()) # Fake nested loop, with zero filter size. self.nld['ZERO_FIL'] = NestedLoopDesc( loopcnt=(12, 10, 4), usize_gbuf=(0, 1000, 800), usize_regf=(0, 3, 1), unit_access=((0, 1000, 800), (0, 1000, 800), (3, 9, 7), (1, 1, 1)), data_loops=(DataDimLoops(le.IFM, le.OFM), DataDimLoops(le.IFM, le.BAT), DataDimLoops(le.OFM, le.BAT)), unit_ops=1, unit_time=1) # Fake nested loop, with zero ifmap size. self.nld['ZERO_IFM'] = NestedLoopDesc( loopcnt=(12, 10, 4), usize_gbuf=(9, 0, 800), usize_regf=(3, 0, 1), unit_access=((9, 0, 800), (9, 0, 800), (3, 9, 7), (1, 1, 1)), data_loops=(DataDimLoops(le.IFM, le.OFM), DataDimLoops(le.IFM, le.BAT), DataDimLoops(le.OFM, le.BAT)), unit_ops=1, unit_time=1) # Fake partition scheme. self.part = PartitionScheme(range(pe.NUM), ((1, 1), ) * pe.NUM) # Fake buffer sharing scheme. self.bufshr = BufShrScheme(proc_region, self.part) # Options. self.options = {} # Basic. self.options['BASE'] = Option(ntops=2**30) # Multiprocessing. self.options['MP'] = Option(ntops=2**30, nprocesses=8) # Limited top schemes. self.options['NTOPS'] = Option(ntops=10) # Bypass. self.options['BYP'] = Option(sw_gbuf_bypass=(True, ) * 3, ntops=2**30) # Bypass solver. self.options['BYPSOL'] = Option(sw_gbuf_bypass=(True, ) * 3, sw_solve_loopblocking=True, ntops=2**30) # Access forwarding. self.options['ACCFWD'] = Option(hw_access_forwarding=True, ntops=2**30) # Buffer sharing. self.options['BUFSHR'] = Option(hw_gbuf_sharing=True, ntops=2**30) # Buffer sharing with bypassing. self.options['BUFSHR-BYP'] = Option(sw_gbuf_bypass=(True, ) * 3, hw_gbuf_sharing=True, ntops=2**30) # Constraint. self.none_cstr = SchedulingConstraint() self.cstr = SchedulingConstraint(topifm=1, topbat=1) # Cost. self.cost = Cost(mac_op=1, mem_hier=(200, 6, 2, 1), noc_hop=50, idl_unit=50)
def test_repr(self): ''' __repr__. ''' # pylint: disable=eval-used for loops in self._gen_loop_combs(): ddls = DataDimLoops(*loops) self.assertEqual(eval(repr(ddls)), ddls)