class TestMLP_network():
    def __init__(self, mlp_network):
        self.net = mlp_network  #MLP_network(18,32,64,32,2)
        self.map_strategy = MapStrategyEyeriss
        self.resource = Resource(
            proc_region=NodeRegion(origin=PhyDim2(0, 0),
                                   dim=PhyDim2(1, 1),
                                   type=NodeRegion.PROC),
            data_regions=(NodeRegion(origin=PhyDim2(0, 0),
                                     dim=PhyDim2(1, 1),
                                     type=NodeRegion.DATA), ),
            dim_array=PhyDim2(16, 16),
            size_gbuf=128 * 1024 // 2,  # 128 kB
            size_regf=512 // 2,  # 512 B
        )

        self.cost = Cost(mac_op=1,
                         mem_hier=(200, 6, 2, 1),
                         noc_hop=0,
                         unit_static=0)

        self.options = Option()

    def test_eyeriss_isca16(self):
        network = self.net
        batch_size = 16
        nnd = NNDataflow(network, batch_size, self.resource, self.cost,
                         self.map_strategy)

        tops, cache_stats = nnd.schedule_search(self.options)

        if not tops:
            sys.stderr.write("No valid dataflow found!")
            return None
        dfsch = tops[0]

        ## Write results.

        res_map = OrderedDict()

        res_map['net'] = "MLP_L"
        res_map['batch'] = batch_size
        res_map['resource'] = self.resource._asdict()
        res_map['cost'] = self.cost._asdict()
        res_map['options'] = self.options._asdict()

        res_map['cache_stats'] = cache_stats

        stats = stats_dict(dfsch, self.cost)
        for key, val in stats.items():
            res_map[key] = val

        return res_map
Ejemplo n.º 2
0
def do_scheduling(args):
    '''
    Get optimal scheduling for given problem. Return a result schedule.
    '''

    ## Network.

    network = import_network(args.net)
    batch_size = args.batch

    ## Resource.

    dim_nodes = PhyDim2(*args.nodes)
    dim_array = PhyDim2(*args.array)

    # Sizes of gbuf and regf are in words.
    word = (args.word + 7) / 8
    size_gbuf = args.gbuf / word
    size_regf = args.regf / word

    array_bus_width = args.bus_width // args.word
    if not array_bus_width:
        array_bus_width = float('inf')
    dram_bandwidth = args.dram_bw / word

    proc_region = NodeRegion(dim=dim_nodes,
                             origin=PhyDim2(0, 0),
                             type=NodeRegion.PROC)

    if args.mem_type == '2D':
        # Memory nodes are on two sides.
        data_region = NodeRegion(dim=PhyDim2(2, 2),
                                 origin=PhyDim2(0, 0),
                                 dist=dim_nodes - PhyDim2(1, 1),
                                 type=NodeRegion.DRAM)
        assert data_region.rel2abs(PhyDim2(1, 1)) + PhyDim2(1, 1) \
                == proc_region.dim
    elif args.mem_type == '3D':
        # Memory nodes are on the top.
        data_region = NodeRegion(dim=dim_nodes,
                                 origin=PhyDim2(0, 0),
                                 type=NodeRegion.DRAM)

    resource = Resource(proc_region=proc_region,
                        dram_region=data_region,
                        src_data_region=data_region,
                        dst_data_region=data_region,
                        dim_array=dim_array,
                        size_gbuf=size_gbuf,
                        size_regf=size_regf,
                        array_bus_width=array_bus_width,
                        dram_bandwidth=dram_bandwidth,
                        no_time_mux=False)

    ## Cost.

    hier_cost = [0] * me.NUM
    hier_cost[me.DRAM] = args.hier_cost[0]
    hier_cost[me.GBUF] = args.hier_cost[1]
    hier_cost[me.ITCN] = args.hier_cost[2]
    hier_cost[me.REGF] = args.hier_cost[3]
    cost = Cost(mac_op=args.op_cost,
                mem_hier=tuple(hier_cost),
                noc_hop=args.hop_cost,
                idl_unit=args.unit_idle_cost)

    ## Options.

    bypass = [True] * de.NUM
    bypass[de.IFM] = 'i' not in args.disable_bypass
    bypass[de.OFM] = 'o' not in args.disable_bypass
    bypass[de.FIL] = 'f' not in args.disable_bypass
    options = Option(
        sw_gbuf_bypass=tuple(bypass),
        sw_solve_loopblocking=args.solve_loopblocking,
        hw_access_forwarding=args.enable_access_forwarding,
        hw_gbuf_sharing=args.enable_gbuf_sharing,
        hw_gbuf_save_writeback=args.enable_save_writeback,
        partition_hybrid=args.hybrid_partition,
        partition_batch=args.batch_partition,
        partition_ifmaps=args.ifmaps_partition,
        partition_interlayer=args.interlayer_partition,
        layer_pipeline_time_ovhd=args.layer_pipeline_time_overhead,
        layer_pipeline_max_degree=args.layer_pipeline_max_degree,
        layer_pipeline_opt=not args.disable_interlayer_opt,
        opt_goal=args.goal.lower(),
        ntops=args.top,
        nprocesses=args.processes,
        verbose=args.verbose)

    ## Search schedules.

    nnd = NNDataflow(network, batch_size, resource, cost, MapStrategyEyeriss)
    tbeg = time.time()
    tops, cache_stats = nnd.schedule_search(options)
    tend = time.time()
    telapsed = tend - tbeg

    if not tops:
        sys.stderr.write('No valid dataflow found.\n')
        return None

    top = tops[0]

    ## Write results.

    res_map = OrderedDict()

    res_map['version'] = get_version(with_local=True)

    res_map['net'] = args.net
    res_map['batch'] = args.batch

    res_map['resource'] = resource._asdict()
    res_map['cost'] = cost._asdict()
    res_map['options'] = options._asdict()

    res_map['cache_stats'] = cache_stats
    res_map['elapsed'] = telapsed

    stats = stats_dict(top, cost)
    for key, val in stats.items():
        res_map[key] = val

    return res_map
Ejemplo n.º 3
0
class TestScheduling(unittest.TestCase):
    ''' Tests for Scheduling module. '''
    def setUp(self):

        self.layers = {}
        self.layers['BASE'] = ConvLayer(8, 16, 28, 3)
        self.layers['POOL'] = PoolingLayer(16, 28, 2)
        self.layers['LR'] = LocalRegionLayer(16, 28, nreg=3, sreg=1)

        self.batch_size = 4

        self.cost = Cost(mac_op=1,
                         mem_hier=(200, 6, 2, 1),
                         noc_hop=50,
                         idl_unit=50)

        self.none_cstr = SchedulingConstraint()
        self.cstr = SchedulingConstraint(topofm=1, topbat=self.batch_size)

        self.resource = Resource(
            proc_region=NodeRegion(origin=PhyDim2(0, 0),
                                   dim=PhyDim2(4, 4),
                                   type=NodeRegion.PROC),
            dram_region=NodeRegion(origin=PhyDim2(0, 0),
                                   dim=PhyDim2(4, 1),
                                   type=NodeRegion.DRAM),
            src_data_region=NodeRegion(origin=PhyDim2(0, 0),
                                       dim=PhyDim2(4, 1),
                                       type=NodeRegion.DRAM),
            dst_data_region=NodeRegion(origin=PhyDim2(0, 0),
                                       dim=PhyDim2(4, 1),
                                       type=NodeRegion.DRAM),
            dim_array=PhyDim2(16, 16),
            size_gbuf=65536,
            size_regf=64,
            array_bus_width=float('inf'),
            dram_bandwidth=float('inf'),
            no_time_mux=False)

        self.options = Option(partition_hybrid=True,
                              partition_batch=True,
                              partition_ifmaps=True,
                              ntops=10)

        self.ifmap_layouts = {}
        part = PartitionScheme(order=(pe.INPP, pe.BATP, pe.OUTP, pe.OFMP),
                               pdims=((1, 2), (2, 1), (1, 2), (2, 1)))
        for wlkey in self.layers:
            input_layer = self.layers[wlkey].input_layer()
            self.ifmap_layouts[wlkey] = DataLayout(
                frngs=(FmapRange((0, 0, 0, 0),
                                 FmapPosition(b=self.batch_size,
                                              n=input_layer.nofm,
                                              h=input_layer.hofm,
                                              w=input_layer.wofm)), ),
                regions=(self.resource.src_data_region, ),
                parts=(part.projection(self.resource.src_data_region,
                                       appl2frng=True), ))

        self.sched_seq = (2, 0, 1)

    def test_valid_args(self):
        ''' Valid arguments for constructor. '''
        schd = Scheduling(self.layers['BASE'], self.batch_size, self.cost,
                          MapStrategyEyeriss)

        self.assertEqual(schd.layer, self.layers['BASE'])
        self.assertEqual(schd.batch_size, self.batch_size)
        self.assertEqual(schd.cost, self.cost)
        self.assertEqual(schd.map_strategy_class, MapStrategyEyeriss)

    def test_invalid_layer(self):
        ''' Invalid layer argument. '''
        with self.assertRaisesRegexp(TypeError, 'Scheduling: .*layer.*'):
            _ = Scheduling((64, 128, 28, 3), self.batch_size, self.cost,
                           MapStrategyEyeriss)

    def test_invalid_cost(self):
        ''' Invalid cost argument. '''
        with self.assertRaisesRegexp(TypeError, 'Scheduling: .*cost.*'):
            _ = Scheduling(self.layers['BASE'], self.batch_size,
                           tuple(self.cost), MapStrategyEyeriss)

    def test_invalid_map_strategy(self):
        ''' Invalid cost argument. '''
        class _DummyClass(object):  # pylint: disable=too-few-public-methods
            pass

        with self.assertRaisesRegexp(TypeError,
                                     'Scheduling: .*map_strategy_class.*'):
            _ = Scheduling(self.layers['BASE'], self.batch_size, self.cost,
                           _DummyClass)

    def test_schedule_search(self):
        ''' Schedule search. '''
        for wlkey in self.layers:
            layer = self.layers[wlkey]
            ifmap_layout = self.ifmap_layouts[wlkey]

            schd = Scheduling(layer, self.batch_size, self.cost,
                              MapStrategyEyeriss)

            condition = SchedulingCondition(resource=self.resource,
                                            constraint=self.cstr,
                                            ifmap_layout=ifmap_layout,
                                            sched_seq=self.sched_seq)

            res = schd.schedule_search(condition, self.options)

            # Top N.
            self.assertLessEqual(len(res), self.options.ntops)
            self.assertTrue(all(isinstance(r, SchedulingResult) for r in res))
            for idx in range(len(res) - 1):
                self.assertLessEqual(res[idx].total_cost,
                                     res[idx + 1].total_cost)

            # Combination of loop blocking and partitioning.
            for r in res:
                self.assertAlmostEqual(
                    r.total_cost,
                    r.scheme['cost_op'] + r.scheme['cost_access'] +
                    r.scheme['cost_noc'] + r.scheme['cost_static'])
                self.assertEqual(r.total_ops, layer.total_ops(self.batch_size))
                self.assertSequenceEqual(r.scheme['total_nhops'], [
                    nh * f for nh, f in zip(r.scheme['unit_nhops'],
                                            r.scheme['fetch'][0])
                ])
                self.assertEqual(r.num_nodes,
                                 self.resource.proc_region.dim.size())

            # Constraint.
            for r in res:
                self.assertEqual(r.scheme['to'][0], 1)

            # Ofmap layout.
            for r in res:
                self.assertEqual(r.ofmap_layout.complete_fmap_range().size(),
                                 layer.total_ofmap_size(self.batch_size))

            # Sequence number.
            for r in res:
                self.assertTupleEqual(r.sched_seq, condition.sched_seq)

    def test_schedule_search_ilayout(self):
        ''' Invalid ifmap_layout. '''
        layer = self.layers['BASE']

        schd = Scheduling(layer, self.batch_size, self.cost,
                          MapStrategyEyeriss)

        # Shift ifmap out of memory region.
        condition = SchedulingCondition(
            resource=self.resource,
            constraint=self.none_cstr,
            ifmap_layout=self.ifmap_layouts['BASE']._replace(regions=tuple(
                r._replace(origin=PhyDim2(-10, -10))
                for r in self.ifmap_layouts['BASE'].regions)),
            sched_seq=self.sched_seq)

        with self.assertRaisesRegexp(ValueError, 'Scheduling: .*ifmap.*'):
            _ = schd.schedule_search(condition, self.options)

        # Not match layer.
        condition = SchedulingCondition(
            resource=self.resource,
            constraint=self.none_cstr,
            ifmap_layout=self.ifmap_layouts['POOL'],
            sched_seq=self.sched_seq)

        with self.assertRaisesRegexp(ValueError, 'Scheduling: .*ifmap.*'):
            _ = schd.schedule_search(condition, self.options)

    def test_schedule_search_nolbs(self):
        ''' Schedule search with no lbs. '''
        layer = self.layers['BASE']
        ifmap_layout = self.ifmap_layouts['BASE']

        schd = Scheduling(layer, self.batch_size, self.cost,
                          MapStrategyEyeriss)

        condition = SchedulingCondition(
            resource=self.resource._replace(size_regf=0),
            constraint=self.none_cstr,
            ifmap_layout=ifmap_layout,
            sched_seq=self.sched_seq)

        res = schd.schedule_search(condition, self.options)

        self.assertFalse(res)

    def test_pernode_sched_cache(self):
        ''' Per-node scheduling cache. '''
        # pylint: disable=no-member
        Scheduling.schedule_search_per_node.cache_clear()

        layer = self.layers['BASE']
        ifmap_layout = self.ifmap_layouts['BASE']

        schd = Scheduling(layer, self.batch_size, self.cost,
                          MapStrategyEyeriss)

        self.assertEqual(schd.schedule_search_per_node.cache_info().currsize,
                         0)
        self.assertTupleEqual(schd.cache_stats(), (0, 0))

        condition = SchedulingCondition(resource=self.resource,
                                        constraint=self.cstr,
                                        ifmap_layout=ifmap_layout,
                                        sched_seq=self.sched_seq)

        Scheduling.schedule_search.cache_clear()
        _ = schd.schedule_search(condition, self.options)

        h, m = schd.cache_stats()
        self.assertEqual(schd.schedule_search_per_node.cache_info().currsize,
                         m)
        self.assertEqual(h, 0)
        n = m

        Scheduling.schedule_search.cache_clear()
        _ = schd.schedule_search(condition, self.options)

        self.assertEqual(schd.schedule_search_per_node.cache_info().currsize,
                         n)
        self.assertTupleEqual(schd.cache_stats(), (n, n))

    def test_pernode_sched_cache_key(self):
        ''' Per-node scheduling cache key must be hash-able. '''
        # pylint: disable=no-member
        Scheduling.schedule_search.cache_clear()
        Scheduling.schedule_search_per_node.cache_clear()

        layer = self.layers['BASE']
        ifmap_layout = self.ifmap_layouts['BASE']

        schd = Scheduling(layer, self.batch_size, self.cost,
                          MapStrategyEyeriss)

        condition = SchedulingCondition(resource=self.resource,
                                        constraint=self.cstr,
                                        ifmap_layout=ifmap_layout,
                                        sched_seq=self.sched_seq)

        _ = schd.schedule_search(condition, self.options)

        h, m = schd.cache_stats()
        self.assertEqual(h, 0)

        # Make another instance.
        rsrc = Resource(**self.resource._asdict())
        cstr = self.cstr
        opts = Option(**self.options._asdict())
        self.assertNotEqual(id(rsrc), id(self.resource))
        self.assertNotEqual(id(opts), id(self.options))

        part = PartitionScheme(order=(pe.BATP, pe.INPP, pe.OUTP, pe.OFMP),
                               pdims=((2, 4), (2, 1), (1, 1), (1, 1)))

        _ = schd.schedule_search_per_node(part, rsrc, cstr, opts)

        h2, m2 = schd.cache_stats()
        self.assertEqual(h2, h + 1)
        self.assertEqual(m2, m)
Ejemplo n.º 4
0
class TestScheduling(unittest.TestCase):
    ''' Tests for Scheduling module. '''

    def setUp(self):

        self.layers = {}
        self.layers['BASE'] = ConvLayer(8, 16, 28, 3)
        self.layers['POOL'] = PoolingLayer(16, 28, 2)
        self.layers['LR'] = LocalRegionLayer(16, 28, nreg=3, sreg=1)

        self.batch_size = 4

        self.cost = Cost(mac_op=1, mem_hier=(200, 6, 2, 1),
                         noc_hop=50, unit_static=50)

        self.resource = Resource(
            proc_region=NodeRegion(origin=PhyDim2(0, 0), dim=PhyDim2(4, 4),
                                   type=NodeRegion.PROC),
            data_regions=(NodeRegion(origin=PhyDim2(0, 0), dim=PhyDim2(4, 1),
                                     type=NodeRegion.DATA),),
            dim_array=PhyDim2(16, 16), size_gbuf=65536, size_regf=64)

        self.options = Option(partition_hybrid=True, partition_batch=True,
                              partition_ifmaps=True, ntops=10)

        self.ifmap_layouts = {}
        part = PartitionScheme(order=(pe.INPP, pe.BATP, pe.OUTP, pe.OFMP),
                               pdims=((1, 2), (2, 1), (1, 2), (2, 1)))
        for wlkey in self.layers:
            self.ifmap_layouts[wlkey] = partition.get_ofmap_layout(
                self.layers[wlkey].input_layer(), self.batch_size, part,
                self.resource.src_data_region())

    def test_valid_args(self):
        ''' Valid arguments for constructor. '''
        schd = Scheduling(self.layers['BASE'], self.batch_size, self.cost,
                          MapStrategyEyeriss)

        self.assertEqual(schd.layer, self.layers['BASE'])
        self.assertEqual(schd.batch_size, self.batch_size)
        self.assertEqual(schd.cost, self.cost)
        self.assertEqual(schd.map_strategy_class, MapStrategyEyeriss)

    def test_invalid_layer(self):
        ''' Invalid layer argument. '''
        with self.assertRaisesRegexp(TypeError, 'Scheduling: .*layer.*'):
            _ = Scheduling((64, 128, 28, 3), self.batch_size, self.cost,
                           MapStrategyEyeriss)

    def test_invalid_cost(self):
        ''' Invalid cost argument. '''
        with self.assertRaisesRegexp(TypeError, 'Scheduling: .*cost.*'):
            _ = Scheduling(self.layers['BASE'], self.batch_size,
                           tuple(self.cost), MapStrategyEyeriss)

    def test_invalid_map_strategy(self):
        ''' Invalid cost argument. '''
        class _DummyClass(object):  # pylint: disable=too-few-public-methods
            pass

        with self.assertRaisesRegexp(TypeError,
                                     'Scheduling: .*map_strategy_class.*'):
            _ = Scheduling(self.layers['BASE'], self.batch_size, self.cost,
                           _DummyClass)

    def test_schedule_search(self):
        ''' Schedule search. '''
        for wlkey in self.layers:
            layer = self.layers[wlkey]
            ifmap_layout = self.ifmap_layouts[wlkey]

            schd = Scheduling(layer, self.batch_size, self.cost,
                              MapStrategyEyeriss)

            condition = SchedulingCondition(resource=self.resource,
                                            ifmap_layout=ifmap_layout)

            res = schd.schedule_search(condition, self.options)

            # Top N.
            self.assertLessEqual(len(res), self.options.ntops)
            self.assertTrue(all(isinstance(r, SchedulingResult) for r in res))
            for idx in range(len(res) - 1):
                self.assertLessEqual(res[idx].total_cost,
                                     res[idx + 1].total_cost)

            # Combination of loop blocking and partitioning.
            for r in res:
                self.assertEqual(r.total_cost,
                                 r.dict_loop['cost'] + r.dict_part['cost'])
                self.assertEqual(r.dict_loop['ops'],
                                 layer.total_ops(self.batch_size))
                self.assertSequenceEqual(r.dict_part['total_nhops'],
                                         [nh * f for nh, f
                                          in zip(r.dict_part['unit_nhops'],
                                                 r.dict_loop['fetch'][0])])
                self.assertEqual(r.dict_part['num_nodes'],
                                 self.resource.proc_region.dim.size())

            # Ofmap layout.
            for r in res:
                self.assertEqual(r.ofmap_layout.frmap.complete_fmap_range()
                                 .size(),
                                 layer.total_ofmap_size(self.batch_size))

    def test_schedule_search_ilayout(self):
        ''' Invalid ifmap_layout. '''
        layer = self.layers['BASE']
        ifmap_layout = self.ifmap_layouts['BASE']

        schd = Scheduling(layer, self.batch_size, self.cost,
                          MapStrategyEyeriss)

        # Shift ifmap out of memory region.
        condition = SchedulingCondition(
            resource=self.resource,
            ifmap_layout=ifmap_layout.view(PhyDim2(1, 1)))

        with self.assertRaisesRegexp(ValueError, 'Scheduling: .*ifmap.*'):
            _ = schd.schedule_search(condition, self.options)

        # Not match layer.
        condition = SchedulingCondition(
            resource=self.resource,
            ifmap_layout=self.ifmap_layouts['POOL'])

        with self.assertRaisesRegexp(ValueError, 'Scheduling: .*ifmap.*'):
            _ = schd.schedule_search(condition, self.options)

    def test_pernode_sched_cache(self):
        ''' Per-node scheduling cache. '''
        layer = self.layers['BASE']
        ifmap_layout = self.ifmap_layouts['BASE']

        schd = Scheduling(layer, self.batch_size, self.cost,
                          MapStrategyEyeriss)

        self.assertEqual(len(schd.pernode_sched_cache), 0)
        self.assertTupleEqual(schd.cache_stats(), (0, 0))

        condition = SchedulingCondition(resource=self.resource,
                                        ifmap_layout=ifmap_layout)

        _ = schd.schedule_search(condition, self.options)

        h, m = schd.cache_stats()
        self.assertEqual(len(schd.pernode_sched_cache), m)
        self.assertEqual(h, 0)
        n = m

        _ = schd.schedule_search(condition, self.options)

        self.assertEqual(len(schd.pernode_sched_cache), n)
        self.assertTupleEqual(schd.cache_stats(), (n, n))

    def test_pernode_sched_cache_key(self):
        ''' Per-node scheduling cache key must be hash-able. '''
        layer = self.layers['BASE']
        ifmap_layout = self.ifmap_layouts['BASE']

        schd = Scheduling(layer, self.batch_size, self.cost,
                          MapStrategyEyeriss)

        condition = SchedulingCondition(resource=self.resource,
                                        ifmap_layout=ifmap_layout)

        _ = schd.schedule_search(condition, self.options)

        h, m = schd.cache_stats()
        self.assertEqual(h, 0)

        # Make another instance.
        rsrc = Resource(**self.resource._asdict())
        opts = Option(**self.options._asdict())
        self.assertNotEqual(id(rsrc), id(self.resource))
        self.assertNotEqual(id(opts), id(self.options))

        part = PartitionScheme(order=(pe.BATP, pe.INPP, pe.OUTP, pe.OFMP),
                               pdims=((2, 2), (2, 2), (1, 1), (1, 1)))

        _ = schd.schedule_search_per_node(part, rsrc, opts)

        h2, m2 = schd.cache_stats()
        self.assertEqual(h2, h + 1)
        self.assertEqual(m2, m)
Ejemplo n.º 5
0
def do_scheduling(args):
    '''
    Get optimal scheduling for given problem. Return a result schedule.
    '''

    ## Network.

    network = import_network(args.net)
    batch_size = args.batch

    ## Resource.

    dim_nodes = PhyDim2(*args.nodes)
    dim_array = PhyDim2(*args.array)

    # Sizes of gbuf and regf are in words.
    word = (args.word + 7) / 8
    size_gbuf = args.gbuf / word
    size_regf = args.regf / word

    proc_region = NodeRegion(dim=dim_nodes,
                             origin=PhyDim2(0, 0),
                             type=NodeRegion.PROC)

    if args.mem_type == '2D':
        # Memory nodes are on two sides.
        data_regions = (NodeRegion(dim=PhyDim2(h=dim_nodes.h, w=1),
                                   origin=PhyDim2(h=0, w=0),
                                   type=NodeRegion.DATA),
                        NodeRegion(dim=PhyDim2(h=dim_nodes.h, w=1),
                                   origin=PhyDim2(h=0, w=dim_nodes.w - 1),
                                   type=NodeRegion.DATA))
    elif args.mem_type == '3D':
        # All nodes have memory.
        data_regions = (NodeRegion(dim=dim_nodes,
                                   origin=PhyDim2(0, 0),
                                   type=NodeRegion.DATA), )

    resource = Resource(proc_region=proc_region,
                        data_regions=data_regions,
                        dim_array=dim_array,
                        size_gbuf=size_gbuf,
                        size_regf=size_regf)

    ## Cost.

    hier_cost = [0] * me.NUM
    hier_cost[me.DRAM] = args.hier_cost[0]
    hier_cost[me.GBUF] = args.hier_cost[1]
    hier_cost[me.ITCN] = args.hier_cost[2]
    hier_cost[me.REGF] = args.hier_cost[3]
    cost = Cost(mac_op=args.op_cost,
                mem_hier=tuple(hier_cost),
                noc_hop=args.hop_cost,
                unit_static=args.unit_static_cost)

    ## Options.

    bypass = [True] * de.NUM
    bypass[de.IFM] = 'i' not in args.disable_bypass
    bypass[de.OFM] = 'o' not in args.disable_bypass
    bypass[de.FIL] = 'f' not in args.disable_bypass
    options = Option(sw_gbuf_bypass=tuple(bypass),
                     sw_solve_loopblocking=args.solve_loopblocking,
                     partition_hybrid=args.hybrid_partition,
                     partition_batch=args.batch_partition,
                     partition_ifmaps=args.ifmaps_partition,
                     ntops=args.top,
                     nprocesses=args.processes,
                     verbose=args.verbose)

    ## Search schedules.

    nnd = NNDataflow(network, batch_size, resource, cost, MapStrategyEyeriss)
    tops, cache_stats = nnd.schedule_search(options)

    if not tops:
        sys.stderr.write('No valid dataflow found.\n')
        return None

    top = tops[0]

    ## Write results.

    res_map = OrderedDict()

    res_map['version'] = get_version(with_local=True)

    res_map['net'] = args.net
    res_map['batch'] = args.batch

    res_map['resource'] = resource._asdict()
    res_map['cost'] = cost._asdict()
    res_map['options'] = options._asdict()

    res_map['cache_stats'] = cache_stats

    stats = stats_dict(top, cost)
    for key, val in stats.items():
        res_map[key] = val

    return res_map