def test_xgraph_factory(self): xlayers = [ XLayer(name='in1', type=['Input'], bottoms=[], tops=['conv1'], targets=[]), XLayer(name='in2', type=['Input'], bottoms=[], tops=['add1'], targets=[]), XLayer(name='conv1', type=['Convolution'], bottoms=['in1'], tops=['add1'], data=ConvData(weights=np.array([[[[1, 2], [3, 4]]]], dtype=np.float32), biases=np.array([0., 1.], dtype=np.float32)), targets=[]), XLayer(name='add1', type=['Eltwise'], bottoms=['conv1', 'in2'], tops=[], targets=[]) ] xgraph = TestXGraphBasePass.xgraph_factory.build_from_xlayer(xlayers) test_pass = TestPass() new_xgraph = test_pass.execute(xgraph) assert (len(new_xgraph) == 4) assert (new_xgraph.get('conv1').type[0] == 'Pooling')
def test_xgraph_add_remove(self): xgraph = XGraph() xgraph.add( XLayer(name='in1', type=['Input'], bottoms=[], tops=[], targets=[])) assert (len(xgraph) == 1) assert (len(xgraph.get_layer_names()) == 1) assert (len(xgraph.get_output_names()) == 1) assert (len(xgraph.get_input_names()) == 1) X_conv = XLayer(name='conv1', type=['Convolution'], bottoms=['in1'], tops=[], data=ConvData(weights=np.array([[[[1, 2], [3, 4]]]], dtype=np.float32), biases=np.array([0., 1.], dtype=np.float32)), targets=[]) xgraph.add(X_conv) assert (len(xgraph) == 2) assert (len(xgraph.get_layer_names()) == 2) assert (len(xgraph.get_output_names()) == 1) assert (len(xgraph.get_input_names()) == 1) xgraph.remove(X_conv.name) assert (len(xgraph) == 1) assert (len(xgraph.get_layer_names()) == 1) assert (len(xgraph.get_output_names()) == 1) assert (len(xgraph.get_input_names()) == 1)
def test_simple(self): net = [ XLayer(name='in1', type=['Input'], shapes=[1, 1, 4, 4], sizes=[16], bottoms=[], tops=['conv1'], layer=['in1'], targets=[]), XLayer(name='conv1', type=['Convolution'], shapes=[1, 2, 3, 3], sizes=[18], bottoms=['in1'], tops=[], layer=['conv1'], data=ConvData(np.array([1, 1]), np.array([0, 0])), attrs={ 'data_layout': 'NCHW', 'padding': [[0, 0], [0, 0], [1, 1], [1, 1]] }, targets=[]) ] xgraph = TestLayoutTransformationPass.xgraph_factory\ .build_from_xlayer(net) layout_transform_pass = XGraphLayoutTransformationPass('NHWC') new_xgraph = layout_transform_pass.execute(xgraph) xlayers = new_xgraph.get_layers() # print(xlayers) assert len(new_xgraph) == 4 assert xlayers[0].type[0] == 'Input' assert xlayers[1].type[0] == 'Transpose' assert xlayers[2].type[0] == 'Convolution' assert xlayers[3].type[0] == 'Transpose' assert xlayers[0].bottoms == [] assert xlayers[0].tops == ['conv1_bottom_NCHW>NHWC'] assert xlayers[0].shapes == [1, 1, 4, 4] assert xlayers[1].bottoms == ['in1'] assert xlayers[1].tops == ['conv1'] assert xlayers[1].shapes == [1, 4, 4, 1] assert xlayers[2].bottoms == ['conv1_bottom_NCHW>NHWC'] assert xlayers[2].tops == ['conv1_top_NHWC>NCHW'] assert xlayers[2].shapes == [1, 3, 3, 2] assert xlayers[3].bottoms == ['conv1'] assert xlayers[3].tops == [] assert xlayers[3].shapes == [1, 2, 3, 3] # NCHW -> NHWC assert xlayers[1].attrs['axes'] == [0, 2, 3, 1] # NHWC -> NCHW assert xlayers[3].attrs['axes'] == [0, 3, 1, 2] assert xlayers[2].attrs['data_layout'] == 'NHWC'
def test_two_partitions_through_interruption(self): # A layer inside a residual type branch os not supported # Here: BatchNorm net = [ XLayer(name='in1', type=['Input'], shapes=[1, 1, 4, 4], sizes=[16], bottoms=[], tops=['conv1'], layer=['in1'], targets=[]), XLayer(name='conv1', type=['Convolution'], shapes=[1, 2, 3, 3], sizes=[18], bottoms=['in1'], tops=['pool1', 'bn1'], data=ConvData(np.array([1, 1]), np.array([0, 0])), layer=['conv1'], targets=[]), XLayer(name='pool1', type=['Pooling'], shapes=[1, 4, 3, 3], sizes=[36], bottoms=['conv1'], tops=['concat1'], layer=['pool1'], targets=[]), XLayer(name='bn1', type=['BatchNorm'], shapes=[1, 2, 3, 3], sizes=[18], bottoms=['conv1'], tops=['concat1'], data=BatchData(np.array([1, 1]), np.array([0, 0]), np.array([1, 1]), np.array([0, 0])), layer=['bn1'], targets=[]), XLayer(name='concat1', type=['Concat'], shapes=[1, 6, 3, 3], sizes=[54], bottoms=['pool1', 'bn1'], tops=['conv2'], layer=['concat1'], targets=[]), XLayer(name='conv2', type=['Convolution'], shapes=[1, 10, 2, 2], sizes=[40], bottoms=['concat1'], tops=[], data=ConvData(np.array([1, 1]), np.array([0, 0])), layer=['conv2'], targets=[]) ] xgraph = TestXGraphPartitioner.xgraph_factory.build_from_xlayer(net) TargetRegistry().annotate_ops(xgraph) p_xgraph = TestXGraphPartitioner.xgraph_partitioner.partition( xgraph, ['test']) assert len(p_xgraph.get_layer_names()) == 6 assert p_xgraph.get_subgraph_names() == ['xp0'] p_xlayers = p_xgraph.get_layers() assert p_xlayers[0].type[0] in ['Input'] assert p_xlayers[1].type[0] in ['Convolution'] assert p_xlayers[2].type[0] in ['Pooling'] assert p_xlayers[3].type[0] in ['BatchNorm'] assert p_xlayers[4].type[0] in ['Concat'] assert p_xlayers[5].type[0] in ['Convolution'] assert p_xlayers[0].target == 'cpu' assert p_xlayers[1].target == 'test' assert p_xlayers[2].target == 'test' assert p_xlayers[3].target == 'cpu' assert p_xlayers[4].target == 'cpu' assert p_xlayers[5].target == 'cpu' assert p_xlayers[0].subgraph is None assert p_xlayers[1].subgraph == 'xp0' assert p_xlayers[2].subgraph == 'xp0' assert p_xlayers[3].subgraph is None assert p_xlayers[4].subgraph is None assert p_xlayers[5].subgraph is None assert p_xlayers[3].name == 'bn1' assert p_xlayers[3].bottoms == ['conv1'] assert p_xlayers[3].tops == ['concat1'] assert p_xlayers[4].name == 'concat1' assert p_xlayers[4].bottoms == ['pool1', 'bn1'] assert p_xlayers[4].tops == ['conv2'] subgraphs = TestXGraphPartitioner.xgraph_partitioner.get_subgraphs( p_xgraph) assert len(subgraphs) == 1 xp0 = subgraphs[0] assert xp0.name == 'xp0' xp0_xgraph = TestXGraphPartitioner.xgraph_factory\ .build_from_xlayer(xp0.subgraph_data) assert xp0.bottoms == ['in1'] assert xp0.tops == ['bn1', 'concat1'] assert xp0.shapes == [[1, 2, 3, 3], [1, 4, 3, 3]] assert xp0.sizes == [18, 36] assert xp0.attrs['target'] == 'test' assert xp0.attrs['__bottom_tensors'] == {'xinput0': ['in1']} assert xp0.attrs['orig_bottom_tensors'] == {'xinput0': ['in1']} assert xp0.attrs['__top_tensors'] == \ {'conv1': ['bn1'], 'pool1': ['concat1']} assert xp0.attrs['orig_top_tensors'] == \ {'conv1': ['bn1'], 'pool1': ['concat1']} assert (len(xp0_xgraph) == 3) xp0_layers = xp0_xgraph.get_layers() assert [X.name for X in xp0_xgraph.get_input_layers()] == ['xinput0'] # TODO: XGraph only recognizes output layers when they have no top # layers assert [X.name for X in xp0_xgraph.get_output_layers()] ==\ ['pool1'] assert xp0_layers[0].type[0] == 'Input' assert xp0_layers[0].layer[0] == 'conv1' assert xp0_layers[1].type[0] == 'Convolution' assert xp0_layers[2].type[0] == 'Pooling' assert xp0_layers[0].bottoms == [] assert xp0_layers[0].tops == ['conv1'] assert xp0_layers[1].bottoms == ['xinput0'] assert xp0_layers[1].tops == ['pool1'] assert xp0_layers[2].bottoms == ['conv1'] assert xp0_layers[2].tops == []
def test_small(self): net = [ XLayer(name='in1', type=['Input'], shapes=[1, 1, 4, 4], sizes=[16], bottoms=[], tops=['conv1'], layer=['in1'], targets=[]), XLayer(name='in2', type=['Input'], shapes=[1, 2, 2, 2], sizes=[8], bottoms=[], tops=['dense1'], layer=['in2'], targets=[]), XLayer(name='conv1', type=['Convolution'], shapes=[1, 2, 3, 3], sizes=[18], bottoms=['in1'], tops=['pool1'], layer=['conv1'], data=ConvData(np.array([1, 1]), np.array([0, 0])), attrs={ 'data_layout': 'NCHW', 'padding': [[0, 0], [0, 0], [1, 1], [1, 1]], 'kernel_size': [3, 3], 'strides': [1, 1], 'dilation': [1, 1], 'groups': 1, 'channels': [2, 2] }, targets=[]), XLayer(name='pool1', type=['Pooling'], shapes=[1, 2, 2, 2], sizes=[8], bottoms=['conv1'], tops=['dense1'], layer=['pool1'], attrs={ 'data_layout': 'NCHW', 'padding': [[0, 0], [0, 0], [1, 1], [1, 1]], 'kernel_size': [3, 3], 'strides': [1, 1], }, targets=[]), XLayer(name='dense1', type=['Dense'], shapes=[1, 20], sizes=[20], bottoms=['pool1', 'in2'], tops=[], data=ConvData(np.array([1, 1]), np.array([0, 0])), layer=['dense1'], targets=[]) ] xgraph = TestDPUContrib.xgraph_factory.build_from_xlayer(net) p_xgraph = partition(xgraph, ['dpuv2-zcu104']) dpu_xgraph = TestDPUContrib.target_registry\ .get_target_build_func('dpuv2-zcu104')(p_xgraph) assert len(dpu_xgraph) == 6 layers = dpu_xgraph.get_layers() assert layers[0].type[0] == 'Input' assert layers[1].type[0] == 'Transpose' assert layers[1].bottoms == ['in1'] assert layers[1].tops == ['xp0'] assert layers[2].type[0] == 'DPU' assert layers[2].bottoms == ['conv1_bottom_NCHW>NHWC'] assert layers[2].tops == ['pool1'] assert layers[2].shapes == [[1, 2, 2, 2]] assert layers[2].attrs['target'] == 'dpuv2-zcu104' assert layers[2].attrs['input_names'] == ['xinput0'] assert layers[2].attrs['output_names'] == ['pool1'] assert layers[2].attrs['input_layers']['xinput0'] == ['conv1'] assert layers[2].attrs['output_layers']['pool1'] == ['pool1'] assert layers[2].attrs['__top_tensors'] ==\ {'pool1': ['pool1_top_NHWC>NCHW']} assert layers[2].attrs['orig_top_tensors'] ==\ {'pool1': ['dense1']} assert layers[2].attrs['__bottom_tensors'] ==\ {'xinput0': ['conv1_bottom_NCHW>NHWC']} assert layers[2].attrs['orig_bottom_tensors'] ==\ {'xinput0': ['in1']} # Merged TupleGetItem and Transpose layer assert layers[3].type[0] == 'TupleGetItem' assert layers[3].name == 'pool1' assert layers[3].shapes == [1, 2, 2, 2] assert layers[3].bottoms == ['xp0'] assert layers[3].tops == ['dense1'] assert layers[3].attrs['transpose'] is True assert layers[4].type[0] == 'Input' assert layers[4].name == 'in2' assert layers[4].tops == ['dense1'] assert layers[5].type[0] == 'Dense' assert layers[5].name == 'dense1' assert layers[5].shapes == [1, 20] assert layers[5].bottoms == ['pool1', 'in2'] assert layers[5].tops == []
def test_small(self): net = [ XLayer( name="in1", type=["Input"], shapes=[1, 1, 4, 4], sizes=[16], bottoms=[], tops=["conv1"], layer=["in1"], targets=[], ), XLayer( name="in2", type=["Input"], shapes=[1, 2, 2, 2], sizes=[8], bottoms=[], tops=["dense1"], layer=["in2"], targets=[], ), XLayer( name="conv1", type=["Convolution"], shapes=[1, 2, 3, 3], sizes=[18], bottoms=["in1"], tops=["pool1"], layer=["conv1"], data=ConvData(np.array([1, 1]), np.array([0, 0])), attrs={ "data_layout": "NCHW", "padding": [[0, 0], [0, 0], [1, 1], [1, 1]], "kernel_size": [3, 3], "strides": [1, 1], "dilation": [1, 1], "groups": 1, "channels": [2, 2], }, targets=[], ), XLayer( name="pool1", type=["Pooling"], shapes=[1, 2, 2, 2], sizes=[8], bottoms=["conv1"], tops=["dense1"], layer=["pool1"], attrs={ "data_layout": "NCHW", "padding": [[0, 0], [0, 0], [1, 1], [1, 1]], "kernel_size": [3, 3], "strides": [1, 1], }, targets=[], ), XLayer( name="dense1", type=["Dense"], shapes=[1, 20], sizes=[20], bottoms=["pool1", "in2"], tops=[], data=ConvData(np.array([1, 1]), np.array([0, 0])), layer=["dense1"], targets=[], ), ] xgraph = TestDPUCZDX8G.xgraph_factory.build_from_xlayer(net) p_xgraph = partition(xgraph, ["dpuv2-zcu104"]) dpu_xgraph = TestDPUCZDX8G.target_registry.get_target_build_func( "dpuv2-zcu104")(p_xgraph) assert len(dpu_xgraph) == 6 layers = dpu_xgraph.get_layers() assert layers[0].type[0] == "Input" assert layers[1].type[0] == "Transpose" assert layers[1].bottoms == ["in1"] assert layers[1].tops == ["xp0"] assert layers[2].type[0] == "DPU" assert layers[2].bottoms == ["conv1_bottom_NCHW-NHWC"] assert layers[2].tops == ["pool1"] assert layers[2].shapes == [[1, 2, 2, 2]] assert layers[2].attrs["target"] == "dpuv2-zcu104" assert layers[2].attrs["input_names"] == ["xinput0"] assert layers[2].attrs["output_names"] == ["pool1"] assert layers[2].attrs["input_layers"]["xinput0"] == ["conv1"] assert layers[2].attrs["output_layers"]["pool1"] == ["pool1"] assert layers[2].attrs["__top_tensors"] == { "pool1": ["pool1_top_NHWC-NCHW"] } assert layers[2].attrs["orig_top_tensors"] == {"pool1": ["dense1"]} assert layers[2].attrs["__bottom_tensors"] == { "xinput0": ["conv1_bottom_NCHW-NHWC"] } assert layers[2].attrs["orig_bottom_tensors"] == {"xinput0": ["in1"]} # Merged TupleGetItem and Transpose layer assert layers[3].type[0] == "TupleGetItem" assert layers[3].name == "pool1" assert layers[3].shapes == [1, 2, 2, 2] assert layers[3].bottoms == ["xp0"] assert layers[3].tops == ["dense1"] assert layers[3].attrs["transpose"] is True assert layers[4].type[0] == "Input" assert layers[4].name == "in2" assert layers[4].tops == ["dense1"] assert layers[5].type[0] == "Dense" assert layers[5].name == "dense1" assert layers[5].shapes == [1, 20] assert layers[5].bottoms == ["pool1", "in2"] assert layers[5].tops == []
def test_visualize(self): xgraph = XGraph() xgraph.add(XLayer( name='in1', type=['Input'], bottoms=[], tops=[], targets=[] )) xgraph.add(XLayer( name='in2', type=['Input'], bottoms=[], tops=[], targets=[] )) xgraph.add(XLayer( name='conv1', type=['Convolution'], bottoms=['in1'], tops=[], data=ConvData( weights=np.array([[[[1, 2], [3, 4]]]], dtype=np.float32), biases=np.array([0., 1.], dtype=np.float32) ), targets=[] )) xgraph.add(XLayer( name='add1', type=['Eltwise'], bottoms=['conv1', 'in2'], tops=[], targets=[] )) xgraph.insert(XLayer( name='conv2', type=['Convolution'], bottoms=['in2'], tops=['add1'], data=ConvData( weights=np.array([[[[1, 2], [3, 4]]]], dtype=np.float32), biases=np.array([0., 1.], dtype=np.float32) ), targets=[] )) xgraph.add(XLayer( name='conv3', type=['Convolution'], bottoms=['add1'], tops=[], data=ConvData( weights=np.array([[[[1, 2], [3, 4]]]], dtype=np.float32), biases=np.array([0., 1.], dtype=np.float32) ), targets=[] )) xgraph.add(XLayer( name='pool1', type=['Pooling'], bottoms=['add1'], tops=[], targets=[] )) xgraph.add(XLayer( name='add2', type=['Eltwise'], bottoms=['conv3', 'pool1'], tops=[], targets=[] )) assert len(xgraph) == 8 assert xgraph.get_layer_names() == \ ['in1', 'conv1', 'in2', 'conv2', 'add1', 'conv3', 'pool1', 'add2'] out_file = os.path.join(FILE_DIR, 'viz.png') xgraph.visualize(out_file) os.remove(out_file)
def test_xgraph_device_tagging(self): xgraph = XGraph() xgraph.add(XLayer( name='in1', type=['Input'], bottoms=[], tops=[], targets=[] )) xgraph.add(XLayer( name='in2', type=['Input'], bottoms=[], tops=[], targets=[] )) xgraph.add(XLayer( name='conv1', type=['Convolution'], bottoms=['in1'], tops=[], data=ConvData( weights=np.array([[[[1, 2], [3, 4]]]], dtype=np.float32), biases=np.array([0., 1.], dtype=np.float32) ), targets=[] )) xgraph.add(XLayer( name='add1', type=['Eltwise'], bottoms=['conv1', 'in2'], tops=[], targets=[] )) xgraph.insert(XLayer( name='conv2', type=['Convolution'], bottoms=['in2'], tops=['add1'], data=ConvData( weights=np.array([[[[1, 2], [3, 4]]]], dtype=np.float32), biases=np.array([0., 1.], dtype=np.float32) ), targets=[] )) xgraph.add(XLayer( name='pool1', type=['Pooling'], bottoms=['add1'], tops=[], targets=[] )) xgraph = partition(xgraph, ['cpu']) assert len(xgraph) == 6 xlayers = xgraph.get_layers() assert xgraph.get_layer_names() == \ ['in1', 'conv1', 'in2', 'conv2', 'add1', 'pool1'] assert set(xlayers[0].targets) == set(['cpu', 'qsim']) assert set(xlayers[1].targets) == set(['cpu', 'qsim', 'test']) assert set(xlayers[2].targets) == set(['cpu', 'qsim']) assert set(xlayers[3].targets) == set(['cpu', 'qsim', 'test']) assert set(xlayers[4].targets) == set(['cpu', 'qsim']) assert set(xlayers[5].targets) == set(['cpu', 'qsim', 'test']) xgraph.remove('conv1') assert len(xgraph) == 5 xlayers = xgraph.get_layers() assert xgraph.get_layer_names() == \ ['in1', 'in2', 'conv2', 'add1', 'pool1'] assert xlayers[3].type[0] == 'Eltwise' assert xlayers[3].bottoms == ['in1', 'conv2'] assert set(xlayers[0].targets) == set(['cpu', 'qsim']) assert set(xlayers[1].targets) == set(['cpu', 'qsim']) assert set(xlayers[2].targets) == set(['cpu', 'qsim', 'test']) assert set(xlayers[3].targets) == set(['cpu', 'qsim']) assert set(xlayers[4].targets) == set(['cpu', 'qsim', 'test'])
def test_conv(self): W = np.reshape( np.array([[[1, 2], [3, 0]], [[1, 1], [0, 1]]], dtype=np.float32), (2, 1, 2, 2)) B = np.array([0., 0.], dtype=np.float32) net = [ XLayer( name='in', type=['Input'], shapes=[1, 1, 4, 4], sizes=[16], bottoms=[], tops=['conv2d0'], layer=['in'], targets=[] ), XLayer( name='conv2d0', type=['Convolution'], shapes=[1, 2, 3, 3], sizes=[18], bottoms=['in'], tops=[], layer=['conv2d0'], data=ConvData(W, B), attrs={ 'data_layout': 'NCHW', 'kernel_layout': 'OIHW', 'kernel_size': [2, 2], 'shape': [1, 2, 3, 3], 'padding': [[0, 0], [0, 0], [0, 0], [0, 0]], 'strides': [1, 1], 'dilation': [1, 1], 'groups': 1 }, targets=[] ) ] xgraph = TestQuantSimPass.xgraph_factory.build_from_xlayer( net, name='test1' ) quant_sim_pass = XGraphQuantSimPass( fdir=FILE_PATH, name=xgraph.get_name() + '_qsim' ) qsim_xgraph = quant_sim_pass.execute(xgraph=xgraph, subgraphs_only=False) exec_graph = TestQuantSimPass.xf_exec_graph_factory.build_runtime( qsim_xgraph ) inpts = { 'in': np.reshape( np.array([ [10, 10, 0, 40], [50, 10, 0, 80], [30, 50, 10, 0], [10, 90, 30, 40]], dtype=np.float32 ), (1, 1, 4, 4)) } res = exec_graph.run(inpts) outpt = res[0] # for idx, layer, inpts, outpt, _ in exec_graph.run_stepwise(inpts): # print(layer.name, outpt) expected_outpt = np.array([[[ [182.28346, 36.45669, 80.20472], [160.40944, 160.40944, 189.5748], [160.40944, 342.6929, 102.078735]], [[29.165354, 7.2913384, 123.95275], [109.37008, 21.874016, 80.20472], [167.70079, 87.49606, 51.039368]]]], dtype=np.float32) np.testing.assert_array_almost_equal(outpt, expected_outpt, decimal=4)
def test_multi_top_tensors(self): net = [ XLayer(name='in1', type=['Input'], shapes=[1, 1, 4, 4], sizes=[16], bottoms=[], tops=['conv1'], layer=['in1'], targets=[]), XLayer(name='conv1', type=['Convolution'], shapes=[1, 2, 3, 3], sizes=[18], bottoms=['in1'], tops=['pool1'], layer=['conv1'], data=ConvData(np.array([1, 1]), np.array([0, 0])), targets=[]), XLayer(name='pool1', type=['Pooling'], shapes=[1, 2, 2, 2], sizes=[8], bottoms=['conv1'], tops=['t1', 't2'], layer=['pool1'], targets=[]), XLayer(name='t1', type=['Transpose'], shapes=[1, 2, 2, 2], sizes=[8], bottoms=['pool1'], tops=['s1'], layer=['t1'], internal=1, targets=[], attrs={'axes': [0, 2, 3, 1]}), XLayer(name='t2', type=['Transpose'], shapes=[1, 2, 2, 2], sizes=[8], bottoms=['pool1'], tops=['s2', 's3'], layer=['t2'], internal=1, targets=[], attrs={'axes': [0, 2, 3, 1]}), XLayer(name='s1', type=['Sqrt'], shapes=[1, 2, 2, 2], sizes=[8], bottoms=['t1'], tops=[], layer=['s1'], internal=0, targets=[]), XLayer(name='s2', type=['Sqrt'], shapes=[1, 2, 2, 2], sizes=[8], bottoms=['t2'], tops=[], layer=['s2'], internal=0, targets=[]), XLayer(name='s3', type=['Sqrt'], shapes=[1, 2, 2, 2], sizes=[8], bottoms=['t2'], tops=[], layer=['s3'], internal=0, targets=[]) ] xgraph = TestXGraphPartitioner.xgraph_factory.build_from_xlayer(net) TargetRegistry().annotate_ops(xgraph) p_xgraph = TestXGraphPartitioner.xgraph_partitioner.partition( xgraph, ['test']) assert len(p_xgraph.get_layer_names()) == 8 assert p_xgraph.get_subgraph_names() == ['xp0'] p_xlayers = p_xgraph.get_layers() assert p_xlayers[0].type[0] in ['Input'] assert p_xlayers[1].type[0] in ['Convolution'] assert p_xlayers[2].type[0] in ['Pooling'] assert p_xlayers[3].type[0] in ['Transpose'] assert p_xlayers[4].type[0] in ['Sqrt'] assert p_xlayers[5].type[0] in ['Transpose'] assert p_xlayers[6].type[0] in ['Sqrt'] assert p_xlayers[7].type[0] in ['Sqrt'] assert p_xlayers[0].target == 'cpu' assert p_xlayers[1].target == 'test' assert p_xlayers[2].target == 'test' assert p_xlayers[3].target == 'cpu' assert p_xlayers[4].target == 'cpu' assert p_xlayers[5].target == 'cpu' assert p_xlayers[6].target == 'cpu' assert p_xlayers[7].target == 'cpu' assert p_xlayers[0].subgraph is None assert p_xlayers[1].subgraph == 'xp0' assert p_xlayers[2].subgraph == 'xp0' assert p_xlayers[3].subgraph is None assert p_xlayers[4].subgraph is None assert p_xlayers[5].subgraph is None assert p_xlayers[6].subgraph is None assert p_xlayers[7].subgraph is None subgraphs = TestXGraphPartitioner.xgraph_partitioner.get_subgraphs( p_xgraph) assert len(subgraphs) == 1 xp0 = subgraphs[0] assert xp0.name == 'xp0' xp0_xgraph = TestXGraphPartitioner.xgraph_factory\ .build_from_xlayer(xp0.subgraph_data) assert xp0.bottoms == ['in1'] assert xp0.tops == ['t1', 't2'] assert xp0.shapes == [[1, 2, 2, 2], [1, 2, 2, 2]] assert xp0.sizes == [8, 8] assert len(xp0_xgraph) == 3 __bottom_tensors = xp0.attrs['__bottom_tensors'] orig_bottom_tensors = xp0.attrs['orig_bottom_tensors'] assert len(__bottom_tensors) == 1 assert 'xinput0' in __bottom_tensors assert __bottom_tensors['xinput0'] == ['in1'] assert len(orig_bottom_tensors) == 1 assert 'xinput0' in orig_bottom_tensors assert orig_bottom_tensors['xinput0'] == ['in1'] __top_tensors = xp0.attrs['__top_tensors'] orig_top_tensors = xp0.attrs['orig_top_tensors'] assert len(__top_tensors) == 1 assert 'pool1' in __top_tensors assert __top_tensors['pool1'] == ['t1', 't2'] assert len(orig_top_tensors) == 1 assert 'pool1' in orig_top_tensors assert orig_top_tensors['pool1'] == ['s1', 's2', 's3']
def test_inception_like_block(self): net = [ XLayer(name='in1', type=['Input'], shapes=[1, 1, 4, 4], sizes=[16], bottoms=[], tops=['concat1'], layer=['in1'], targets=[]), XLayer(name='in2', type=['Input'], shapes=[1, 1, 4, 4], sizes=[16], bottoms=[], tops=['concat1'], layer=['in2'], targets=[]), XLayer(name='concat1', type=['Concat'], shapes=[1, 2, 4, 4], sizes=[32], bottoms=['in1', 'in2'], tops=['conv1', 'conv2'], layer=['concat1'], targets=[]), XLayer(name='conv1', type=['Convolution'], shapes=[1, 4, 3, 3], sizes=[], bottoms=['concat1'], tops=['pool1'], data=ConvData(np.array([1, 1]), np.array([0, 0])), layer=['conv1'], targets=[]), XLayer(name='pool1', type=['Pooling'], shapes=[1, 4, 2, 2], sizes=[], bottoms=['conv1'], tops=['concat2'], layer=['pool1'], targets=[]), XLayer(name='conv2', type=['Convolution'], shapes=[1, 4, 2, 2], sizes=[], bottoms=['concat1'], tops=['concat2'], data=ConvData(np.array([1, 1]), np.array([0, 0])), layer=['conv2'], targets=[]), XLayer(name='concat2', type=['Concat'], shapes=[1, 8, 2, 2], sizes=[32], bottoms=['pool1', 'conv2'], tops=['dense1'], layer=['concat2'], targets=[]), XLayer(name='dense1', type=['Dense'], shapes=[1, 20], sizes=[20], bottoms=['concat2'], tops=[], layer=['dense1'], data=ConvData(np.array([1, 1]), np.array([0, 0])), targets=[]) ] xgraph = TestXGraphPartitioner.xgraph_factory.build_from_xlayer(net) TargetRegistry().annotate_ops(xgraph) p_xgraph = TestXGraphPartitioner.xgraph_partitioner.partition( xgraph, ['test']) assert (len(p_xgraph.get_layer_names()) == 8) p_xlayers = p_xgraph.get_layers() assert (p_xlayers[0].target == 'cpu') assert (p_xlayers[1].target == 'cpu') assert (p_xlayers[2].target == 'test') assert (p_xlayers[3].target == 'test') assert (p_xlayers[4].target == 'test') assert (p_xlayers[5].target == 'test') assert (p_xlayers[6].target == 'test') assert (p_xlayers[7].target == 'cpu') assert (p_xlayers[0].subgraph is None) assert (p_xlayers[1].subgraph is None) assert (p_xlayers[2].subgraph == 'xp0') assert (p_xlayers[3].subgraph == 'xp0') assert (p_xlayers[4].subgraph == 'xp0') assert (p_xlayers[5].subgraph == 'xp0') assert (p_xlayers[6].subgraph == 'xp0') assert (p_xlayers[7].subgraph is None) subgraphs = TestXGraphPartitioner.xgraph_partitioner.get_subgraphs( p_xgraph) assert (len(subgraphs) == 1) xp0 = subgraphs[0] assert (xp0.name == 'xp0') xp0_xgraph = TestXGraphPartitioner.xgraph_factory\ .build_from_xlayer(xp0.subgraph_data) assert (xp0.bottoms == ['in1', 'in2']) assert (xp0.tops == ['dense1']) assert (xp0.shapes == [[1, 8, 2, 2]]) assert (xp0.sizes == [32]) assert (len(xp0_xgraph) == 7) xp0_layers = xp0_xgraph.get_layers() assert (xp0_layers[0].type[0] == 'Input') assert (xp0_layers[0].layer[0] == 'concat1') assert (xp0_layers[1].type[0] == 'Input') assert (xp0_layers[1].layer[0] == 'concat1') assert (xp0_layers[2].type[0] == 'Concat') assert (xp0_layers[3].type[0] == 'Convolution') assert (xp0_layers[4].type[0] == 'Pooling') assert (xp0_layers[5].type[0] == 'Convolution') assert (xp0_layers[6].type[0] == 'Concat') assert (xp0_layers[0].bottoms == []) assert (xp0_layers[0].tops == ['concat1']) assert (xp0_layers[1].bottoms == []) assert (xp0_layers[1].tops == ['concat1']) assert (xp0_layers[2].bottoms == ['xinput0', 'xinput1']) assert (xp0_layers[2].tops == ['conv1', 'conv2']) assert (xp0_layers[3].bottoms == ['concat1']) assert (xp0_layers[3].tops == ['pool1']) assert (xp0_layers[4].bottoms == ['conv1']) assert (xp0_layers[4].tops == ['concat2']) assert (xp0_layers[5].bottoms == ['concat1']) assert (xp0_layers[5].tops == ['concat2']) assert (xp0_layers[6].bottoms == ['pool1', 'conv2']) assert (xp0_layers[6].tops == [])
def test_basic(self): net = [ XLayer(name='in1', type=['Input'], shapes=[1, 1, 4, 4], sizes=[16], bottoms=[], tops=['conv1'], layer=['in1'], targets=[]), XLayer(name='in2', type=['Input'], shapes=[1, 2, 2, 2], sizes=[8], bottoms=[], tops=['add1'], layer=['in2'], targets=[]), XLayer(name='conv1', type=['Convolution'], shapes=[1, 2, 3, 3], sizes=[18], bottoms=['in1'], tops=['pool1'], data=ConvData(np.array([1, 1]), np.array([0, 0])), layer=['conv1'], targets=[]), XLayer(name='pool1', type=['Pooling'], shapes=[1, 2, 2, 2], sizes=[8], bottoms=['conv1'], tops=['add1'], layer=['pool1'], targets=[]), XLayer(name='add1', type=['Eltwise'], shapes=[1, 2, 2, 2], sizes=[8], bottoms=['pool1', 'in2'], tops=[], layer=['add1'], targets=[]) ] xgraph = TestXGraphPartitioner.xgraph_factory.build_from_xlayer(net) TargetRegistry().annotate_ops(xgraph) p_xgraph = TestXGraphPartitioner.xgraph_partitioner.partition( xgraph, ['test']) assert len(p_xgraph.get_layer_names()) == 5 assert p_xgraph.get_subgraph_names() == ['xp0'] p_xlayers = p_xgraph.get_layers() assert p_xlayers[0].type[0] in ['Input'] assert p_xlayers[1].type[0] in ['Convolution'] assert p_xlayers[2].type[0] in ['Pooling'] assert p_xlayers[3].type[0] in ['Input'] assert p_xlayers[4].type[0] in ['Eltwise'] assert p_xlayers[0].target == 'cpu' assert p_xlayers[1].target == 'test' assert p_xlayers[2].target == 'test' assert p_xlayers[3].target == 'cpu' assert p_xlayers[4].target == 'cpu' assert p_xlayers[0].subgraph is None assert p_xlayers[1].subgraph == 'xp0' assert p_xlayers[2].subgraph == 'xp0' assert p_xlayers[3].subgraph is None assert p_xlayers[4].subgraph is None subgraphs = TestXGraphPartitioner.xgraph_partitioner.get_subgraphs( p_xgraph) assert len(subgraphs) == 1 xp0 = subgraphs[0] assert xp0.name == 'xp0' xp0_xgraph = TestXGraphPartitioner.xgraph_factory\ .build_from_xlayer(xp0.subgraph_data) assert xp0.bottoms == ['in1'] assert xp0.tops == ['add1'] assert xp0.shapes == [[1, 2, 2, 2]] assert xp0.sizes == [8] assert len(xp0_xgraph) == 3 xp0_layers = xp0_xgraph.get_layers() assert xp0_layers[0].type[0] == 'Input' assert xp0_layers[0].layer[0] == 'conv1' assert xp0_layers[1].type[0] == 'Convolution' assert xp0_layers[2].type[0] == 'Pooling' assert xp0_layers[0].bottoms == [] assert xp0_layers[0].tops == ['conv1'] assert xp0_layers[1].bottoms == ['xinput0'] assert xp0_layers[1].tops == ['pool1'] assert xp0_layers[2].bottoms == ['conv1'] assert xp0_layers[2].tops == []
def test_two_partition_inputs(self): net = [ XLayer(name='in1', type=['Input'], shapes=[1, 1, 4, 4], sizes=[16], bottoms=[], tops=['conv1'], layer=['in1'], targets=[]), XLayer(name='in2', type=['Input'], shapes=[1, 1, 4, 4], sizes=[16], bottoms=[], tops=['conv2'], layer=['in2'], targets=[]), XLayer(name='conv1', type=['Convolution'], shapes=[1, 2, 3, 3], sizes=[18], bottoms=['in1'], tops=['pool1'], data=ConvData(np.array([1, 1]), np.array([0, 0])), layer=['conv1'], targets=[]), XLayer(name='pool1', type=['Pooling'], shapes=[1, 2, 2, 2], sizes=[8], bottoms=['conv1'], tops=['concat1'], layer=['pool1'], targets=[]), XLayer(name='conv2', type=['Convolution'], shapes=[1, 2, 2, 2], sizes=[8], bottoms=['in2'], tops=['concat1'], data=ConvData(np.array([1, 1]), np.array([0, 0])), layer=['conv2'], targets=[]), XLayer(name='concat1', type=['Concat'], shapes=[1, 4, 2, 2], sizes=[16], bottoms=['pool1', 'conv2'], tops=['dense1'], layer=['concat1'], targets=[]), XLayer(name='dense1', type=['Dense'], shapes=[1, 20], sizes=[], bottoms=['concat1'], tops=[], layer=['dense1'], data=ConvData(np.array([1, 1]), np.array([0, 0])), targets=[]) ] xgraph = TestXGraphPartitioner.xgraph_factory.build_from_xlayer(net) TargetRegistry().annotate_ops(xgraph) p_xgraph = TestXGraphPartitioner.xgraph_partitioner.partition( xgraph, ['test']) assert len(p_xgraph.get_layer_names()) == 7 assert p_xgraph.get_subgraph_names() == ['xp2'] p_xlayers = p_xgraph.get_layers() assert p_xlayers[0].target == 'cpu' assert p_xlayers[1].target == 'test' assert p_xlayers[2].target == 'test' assert p_xlayers[3].target == 'cpu' assert p_xlayers[4].target == 'test' assert p_xlayers[5].target == 'test' assert p_xlayers[6].target == 'cpu' assert p_xlayers[0].subgraph is None assert p_xlayers[1].subgraph == 'xp2' assert p_xlayers[2].subgraph == 'xp2' assert p_xlayers[3].subgraph is None assert p_xlayers[4].subgraph == 'xp2' assert p_xlayers[5].subgraph == 'xp2' assert p_xlayers[6].subgraph is None subgraphs = TestXGraphPartitioner.xgraph_partitioner.get_subgraphs( p_xgraph) assert len(subgraphs) == 1 xp2 = subgraphs[0] assert xp2.name == 'xp2' xp2_xgraph = TestXGraphPartitioner.xgraph_factory\ .build_from_xlayer(xp2.subgraph_data) assert xp2.bottoms == ['in1', 'in2'] assert xp2.tops == ['dense1'] assert xp2.shapes == [[1, 4, 2, 2]] assert xp2.sizes == [16] assert len(xp2_xgraph) == 6 xp2_layers = xp2_xgraph.get_layers() assert (xp2_layers[0].type[0] == 'Input') assert (xp2_layers[0].layer[0] == 'conv1') assert (xp2_layers[1].type[0] == 'Convolution') assert (xp2_layers[2].type[0] == 'Pooling') assert (xp2_layers[3].type[0] == 'Input') assert (xp2_layers[3].layer[0] == 'conv2') assert (xp2_layers[4].type[0] == 'Convolution') assert (xp2_layers[5].type[0] == 'Concat') assert (xp2_layers[0].bottoms == []) assert (xp2_layers[0].tops == ['conv1']) assert (xp2_layers[1].bottoms == ['xinput0']) assert (xp2_layers[1].tops == ['pool1']) assert (xp2_layers[2].bottoms == ['conv1']) assert (xp2_layers[2].tops == ['concat1']) assert (xp2_layers[3].bottoms == []) assert (xp2_layers[3].tops == ['conv2']) assert (xp2_layers[4].bottoms == ['xinput1']) assert (xp2_layers[4].tops == ['concat1']) assert (xp2_layers[5].bottoms == ['pool1', 'conv2']) assert (xp2_layers[5].tops == [])
def test_multiple_partitions_largest_last(self): net = [ XLayer(name='in1', type=['Input'], shapes=[1, 1, 4, 4], sizes=[16], bottoms=[], tops=['conv1'], layer=['in1'], targets=[]), XLayer(name='conv1', type=['Convolution'], shapes=[1, 2, 3, 3], sizes=[18], bottoms=['in1'], tops=['t1'], layer=['conv1'], data=ConvData(weights=np.array([1, 1], dtype=np.float32), biases=np.array([0, 0], dtype=np.float32)), targets=[]), XLayer(name='t1', type=['Transpose'], shapes=[1, 3, 3, 2], sizes=[18], bottoms=['conv1'], tops=['conv2'], layer=['t1'], targets=[], attrs={'axes': [0, 2, 3, 1]}), XLayer(name='conv2', type=['Convolution'], shapes=[1, 3, 3, 2], sizes=[18], bottoms=['t1'], tops=['pool1'], layer=['conv2'], data=ConvData(weights=np.array([1, 1], dtype=np.float32), biases=np.array([0, 0], dtype=np.float32)), targets=[]), XLayer(name='pool1', type=['Pooling'], shapes=[1, 2, 2, 2], sizes=[8], bottoms=['conv2'], tops=[], layer=['pool1'], targets=[]) ] xgraph = TestXGraphPartitioner.xgraph_factory.build_from_xlayer(net) TargetRegistry().annotate_ops(xgraph) p_xgraph = TestXGraphPartitioner.xgraph_partitioner.partition( xgraph, ['test']) assert len(p_xgraph.get_layer_names()) == 5 # ! Only xp1 because only one subgraph can exist for now (largest) assert set(p_xgraph.get_subgraph_names()) == set(['xp1']) p_xlayers = p_xgraph.get_layers() assert (p_xlayers[0].type[0] in ['Input']) assert (p_xlayers[1].type[0] in ['Convolution']) assert (p_xlayers[2].type[0] in ['Transpose']) assert (p_xlayers[3].type[0] in ['Convolution']) assert (p_xlayers[4].type[0] in ['Pooling']) assert (p_xlayers[0].target == 'cpu') assert (p_xlayers[1].target == 'cpu') assert (p_xlayers[2].target == 'cpu') assert (p_xlayers[3].target == 'test') assert (p_xlayers[4].target == 'test') assert (p_xlayers[0].subgraph is None) assert (p_xlayers[1].subgraph is None) assert (p_xlayers[2].subgraph is None) assert (p_xlayers[3].subgraph == 'xp1') assert (p_xlayers[4].subgraph == 'xp1') subgraphs = TestXGraphPartitioner.xgraph_partitioner.get_subgraphs( p_xgraph) assert (len(subgraphs) == 1) xp1 = subgraphs[0] assert (xp1.name == 'xp1') xp1_xgraph = TestXGraphPartitioner.xgraph_factory\ .build_from_xlayer(xp1.subgraph_data) assert (xp1.bottoms == ['t1']) assert (xp1.tops == []) assert (xp1.shapes == [[1, 2, 2, 2]]) assert (xp1.sizes == [8]) assert (len(xp1_xgraph) == 3) xp1_layers = xp1_xgraph.get_layers() assert (xp1_layers[0].type[0] == 'Input') assert (xp1_layers[0].layer[0] == 'conv2') assert (xp1_layers[1].type[0] == 'Convolution') assert (xp1_layers[2].type[0] == 'Pooling') assert (xp1_layers[0].bottoms == []) assert (xp1_layers[0].tops == ['conv2']) assert (xp1_layers[1].bottoms == ['xinput0']) assert (xp1_layers[1].tops == ['pool1']) assert (xp1_layers[2].bottoms == ['conv2']) assert (xp1_layers[2].tops == [])
def test_multiple_partitions(self): net = [ XLayer(name='in1', type=['Input'], shapes=[1, 1, 4, 4], sizes=[16], bottoms=[], tops=['conv1'], layer=['in1'], targets=[]), XLayer(name='in2', type=['Input'], shapes=[1, 2, 2, 2], sizes=[8], bottoms=[], tops=['add1'], layer=['in2'], targets=[]), XLayer(name='conv1', type=['Convolution'], shapes=[1, 2, 3, 3], sizes=[18], bottoms=['in1'], tops=['pool1'], data=ConvData(np.array([1, 1]), np.array([0, 0])), layer=['conv1'], targets=[]), XLayer(name='pool1', type=['Pooling'], shapes=[1, 2, 2, 2], sizes=[8], bottoms=['conv1'], tops=['add1'], layer=['pool1'], targets=[]), XLayer(name='add1', type=['Eltwise'], shapes=[1, 2, 2, 2], sizes=[8], bottoms=['pool1', 'in2'], tops=[], layer=['add1'], targets=[]), XLayer(name='bn1', type=['BatchNorm'], shapes=[1, 2, 2, 2], sizes=[8], bottoms=['add1'], tops=['pool2'], data=BatchData(np.array([1, 1]), np.array([0, 0]), np.array([1, 1]), np.array([0, 0])), layer=['bn1'], targets=[]), XLayer(name='pool2', type=['Pooling'], shapes=[1, 2, 2, 2], sizes=[8], bottoms=['bn1'], tops=[], layer=['pool2'], targets=[]) ] xgraph = TestXGraphPartitioner.xgraph_factory.build_from_xlayer(net) TargetRegistry().annotate_ops(xgraph) p_xgraph = TestXGraphPartitioner.xgraph_partitioner.partition( xgraph, ['test']) assert (len(p_xgraph.get_layer_names()) == 7) # ! Only xp0 because only one subgraph can exist for now (largest) assert (set(p_xgraph.get_subgraph_names()) == set(['xp0'])) p_xlayers = p_xgraph.get_layers() assert (p_xlayers[0].type[0] in ['Input']) assert (p_xlayers[1].type[0] in ['Convolution']) assert (p_xlayers[2].type[0] in ['Pooling']) assert (p_xlayers[3].type[0] in ['Input']) assert (p_xlayers[4].type[0] in ['Eltwise']) assert (p_xlayers[5].type[0] in ['BatchNorm']) assert (p_xlayers[6].type[0] in ['Pooling']) assert (p_xlayers[0].target == 'cpu') assert (p_xlayers[1].target == 'test') assert (p_xlayers[2].target == 'test') assert (p_xlayers[3].target == 'cpu') assert (p_xlayers[4].target == 'cpu') assert (p_xlayers[5].target == 'cpu') # ! CPU because only one subgraph can exist for now (largest) assert (p_xlayers[6].target == 'cpu') assert (p_xlayers[0].subgraph is None) assert (p_xlayers[1].subgraph == 'xp0') assert (p_xlayers[2].subgraph == 'xp0') assert (p_xlayers[3].subgraph is None) assert (p_xlayers[4].subgraph is None) assert (p_xlayers[5].subgraph is None) assert (p_xlayers[6].subgraph is None) subgraphs = TestXGraphPartitioner.xgraph_partitioner.get_subgraphs( p_xgraph) assert (len(subgraphs) == 1) xp0 = subgraphs[0] assert (xp0.name == 'xp0') xp0_xgraph = TestXGraphPartitioner.xgraph_factory\ .build_from_xlayer(xp0.subgraph_data) assert (xp0.bottoms == ['in1']) assert (xp0.tops == ['add1']) assert (xp0.shapes == [[1, 2, 2, 2]]) assert (xp0.sizes == [8]) assert (len(xp0_xgraph) == 3) xp0_layers = xp0_xgraph.get_layers() assert (xp0_layers[0].type[0] == 'Input') assert (xp0_layers[0].layer[0] == 'conv1') assert (xp0_layers[1].type[0] == 'Convolution') assert (xp0_layers[2].type[0] == 'Pooling') assert (xp0_layers[0].bottoms == []) assert (xp0_layers[0].tops == ['conv1']) assert (xp0_layers[1].bottoms == ['xinput0']) assert (xp0_layers[1].tops == ['pool1']) assert (xp0_layers[2].bottoms == ['conv1']) assert (xp0_layers[2].tops == [])
def test_xgraph_factory(self): xlayers = [ XLayer(name='in1', type=['Input'], bottoms=[], tops=['conv1'], targets=[]), XLayer(name='in2', type=['Input'], bottoms=[], tops=['add1'], targets=[]), XLayer(name='conv1', type=['Convolution'], bottoms=['in1'], tops=['add1'], data=ConvData(weights=np.array([[[[1, 2], [3, 4]]]], dtype=np.float32), biases=np.array([0., 1.], dtype=np.float32)), targets=[]), XLayer(name='add1', type=['Eltwise'], bottoms=['conv1', 'in2'], tops=['conv2', 'pool1'], targets=[]), XLayer(name='conv2', type=['Convolution'], bottoms=['add1'], tops=['add2'], data=ConvData(weights=np.array([[[[1, 2], [3, 4]]]], dtype=np.float32), biases=np.array([0., 1.], dtype=np.float32)), targets=[]), XLayer(name='pool1', type=['Pooling'], bottoms=['add1'], tops=['add2'], targets=[]), XLayer(name='add2', type=['Eltwise'], bottoms=['conv2', 'pool1'], tops=[], targets=[]) ] xgraph = TestXGraphFactory.xgraph_factory.build_from_xlayer(xlayers) # GENERAL assert len(xgraph) == 7 assert len(xgraph.get_layer_names()) == 7 assert xgraph.get_layer_names() == \ ['in1', 'conv1', 'in2', 'add1', 'conv2', 'pool1', 'add2'] assert len(xgraph.get_output_names()) == 1 assert len(xgraph.get_input_names()) == 2 # DEVICES xlayers = xgraph.get_layers() # assert set(xlayers[0].targets) == set(['cpu']) # assert set(xlayers[1].targets) == set(['cpu', 'test']) # assert set(xlayers[2].targets) == set(['cpu']) # assert set(xlayers[3].targets) == set(['cpu']) # assert set(xlayers[4].targets) == set(['cpu', 'test']) # assert set(xlayers[5].targets) == set(['cpu', 'test']) # assert set(xlayers[6].targets) == set(['cpu']) # Bottoms / tops assert xgraph.get_top_layers('in1')[0].name == 'conv1' assert len(xgraph.get_bottom_layers('in1')) == 0 assert xgraph.get_top_layers('in2')[0].name == 'add1' assert len(xgraph.get_bottom_layers('in2')) == 0 assert len(xgraph.get_bottom_layers('conv1')) == 1 assert len(xgraph.get_top_layers('conv1')) == 1 assert xgraph.get_top_layers('conv1')[0].name == 'add1' assert xgraph.get_bottom_layers('conv1')[0].name == 'in1' assert len(xgraph.get_bottom_layers('add1')) == 2 assert len(xgraph.get_top_layers('add1')) == 2 assert xgraph.get_bottom_layers('add1')[0].name == 'conv1' assert xgraph.get_top_layers('add1')[0].name == 'conv2'
def test_target(self): net = [ XLayer(name='in1', type=['Input'], shapes=[1, 1, 4, 4], sizes=[16], bottoms=[], tops=['conv1'], layer=['in1'], targets=[]), XLayer(name='conv1', type=['Convolution'], shapes=[1, 2, 3, 3], sizes=[18], bottoms=['in1'], tops=['pool1'], layer=['conv1'], data=ConvData(np.array([1, 1]), np.array([0, 0])), attrs={ 'data_layout': 'NCHW', 'padding': [[0, 0], [0, 0], [1, 1], [1, 1]] }, targets=[], target='test'), XLayer(name='pool1', type=['Pooling'], shapes=[1, 2, 2, 2], sizes=[8], bottoms=['conv1'], tops=['concat1'], layer=['pool1'], attrs={ 'data_layout': 'NCHW', 'padding': [[0, 0], [0, 0], [1, 1], [1, 1]] }, targets=[]), XLayer(name='in2', type=['Input'], shapes=[1, 4, 4, 1], sizes=[16], bottoms=[], tops=['in2_transpose'], layer=['in2'], targets=[]), XLayer(name='in2_transpose', type=['Transpose'], shapes=[1, 1, 4, 4], sizes=[16], bottoms=['in2'], tops=['conv2'], layer=['in2_transpose'], attrs={'axes': [0, 3, 1, 2]}, targets=[]), XLayer(name='conv2', type=['Convolution'], shapes=[1, 2, 2, 2], sizes=[8], bottoms=['in2_transpose'], tops=['concat1'], layer=['conv2'], data=ConvData(np.array([1, 1]), np.array([0, 0])), attrs={ 'data_layout': 'NCHW', 'padding': [[0, 0], [0, 0], [1, 1], [1, 1]] }, targets=[], target='test'), XLayer(name='concat1', type=['Concat'], shapes=[1, 4, 2, 2], sizes=[16], bottoms=['pool1', 'conv2'], tops=['concat1_transpose'], layer=['concat1'], attrs={'axis': 1}, targets=[]), XLayer(name='concat1_transpose', type=['Transpose'], shapes=[1, 2, 2, 4], sizes=[16], bottoms=['concat1'], tops=['dense1'], layer=['concat1_transpose'], attrs={'axes': [0, 2, 3, 1]}, targets=[]), XLayer(name='dense1', type=['Dense'], shapes=[1, 20], sizes=[], bottoms=['concat1_transpose'], tops=[], data=ConvData(np.array([1, 1]), np.array([0, 0])), layer=['dense1'], targets=[]) ] xgraph = TestLayoutTransformationPass.xgraph_factory\ .build_from_xlayer(net) layout_transform_pass = XGraphLayoutTransformationPass('NHWC', target='test') new_xgraph = layout_transform_pass.execute(xgraph) xlayers = new_xgraph.get_layers() # print(xlayers) # print(len(xlayers)) assert len(new_xgraph) == 10 assert xlayers[0].type[0] == 'Input' assert xlayers[0].name == 'in1' assert xlayers[0].bottoms == [] assert xlayers[0].tops == ['conv1_bottom_NCHW>NHWC'] assert xlayers[0].shapes == [1, 1, 4, 4] assert xlayers[1].type[0] == 'Transpose' assert xlayers[1].name == 'conv1_bottom_NCHW>NHWC' assert xlayers[1].bottoms == ['in1'] assert xlayers[1].tops == ['conv1'] assert xlayers[1].shapes == [1, 4, 4, 1] assert xlayers[1].attrs['axes'] == [0, 2, 3, 1] assert xlayers[2].type[0] == 'Convolution' assert xlayers[2].name == 'conv1' assert xlayers[2].bottoms == ['conv1_bottom_NCHW>NHWC'] assert xlayers[2].tops == ['conv1_top_NHWC>NCHW'] assert xlayers[2].shapes == [1, 3, 3, 2] assert xlayers[2].attrs['data_layout'] == 'NHWC' assert xlayers[2].attrs['padding'] == [[0, 0], [1, 1], [1, 1], [0, 0]] assert xlayers[3].type[0] == 'Transpose' assert xlayers[3].name == 'conv1_top_NHWC>NCHW' assert xlayers[3].bottoms == ['conv1'] assert xlayers[3].tops == ['pool1'] assert xlayers[3].shapes == [1, 2, 3, 3] assert xlayers[3].attrs['axes'] == (0, 3, 1, 2) assert xlayers[4].type[0] == 'Pooling' assert xlayers[4].name == 'pool1' assert xlayers[4].bottoms == ['conv1_top_NHWC>NCHW'] assert xlayers[4].tops == ['0_split_concat1_transpose'] assert xlayers[4].shapes == [1, 2, 2, 2] assert xlayers[4].attrs['data_layout'] == 'NCHW' assert xlayers[4].attrs['padding'] == [[0, 0], [0, 0], [1, 1], [1, 1]] assert xlayers[5].type[0] == 'Transpose' assert xlayers[5].name == '0_split_concat1_transpose' assert xlayers[5].bottoms == ['pool1'] assert xlayers[5].tops == ['concat1'] assert xlayers[5].shapes == [1, 2, 2, 2] assert xlayers[5].attrs['axes'] == [0, 2, 3, 1] assert xlayers[6].type[0] == 'Input' assert xlayers[6].name == 'in2' assert xlayers[6].bottoms == [] assert xlayers[6].tops == ['conv2'] assert xlayers[6].shapes == [1, 4, 4, 1] assert xlayers[7].type[0] == 'Convolution' assert xlayers[7].name == 'conv2' assert xlayers[7].bottoms == ['in2'] assert xlayers[7].tops == ['concat1'] assert xlayers[7].shapes == [1, 2, 2, 2] assert xlayers[7].attrs['data_layout'] == 'NHWC' assert xlayers[8].type[0] == 'Concat' assert xlayers[8].name == 'concat1' assert xlayers[8].bottoms == ['0_split_concat1_transpose', 'conv2'] assert xlayers[8].tops == ['dense1'] assert xlayers[8].shapes == [1, 2, 2, 4] assert xlayers[8].attrs['axis'] == 3 assert xlayers[9].type[0] == 'Dense' assert xlayers[9].name == 'dense1' assert xlayers[9].bottoms == ['concat1'] assert xlayers[9].tops == [] assert xlayers[9].shapes == [1, 20]
def test_two_partitions_interrupt(self): net = [ XLayer(name='in1', type=['Input'], shapes=[1, 1, 4, 4], sizes=[16], bottoms=[], tops=['conv1'], layer=['in1'], targets=[]), XLayer(name='conv1', type=['Convolution'], shapes=[1, 2, 3, 3], sizes=[18], bottoms=['in1'], tops=['pool1', 'bn1'], layer=['conv1'], targets=[], data=ConvData(np.array([1, 1]), np.array([0, 0])), attrs={ 'data_layout': 'NCHW', 'padding': [[0, 0], [0, 0], [1, 1], [1, 1]] }), XLayer(name='pool1', type=['Pooling'], shapes=[1, 4, 3, 3], sizes=[36], bottoms=['conv1'], tops=['concat1'], layer=['pool1'], targets=[], attrs={ 'data_layout': 'NCHW', 'padding': [[0, 0], [0, 0], [1, 1], [1, 1]] }), XLayer(name='bn1', type=['BatchNorm'], shapes=[1, 2, 3, 3], sizes=[18], bottoms=['conv1'], tops=['concat1'], layer=['bn1'], data=BatchData(np.array([1, 1]), np.array([0, 0]), np.array([1, 1]), np.array([0, 0])), targets=[]), XLayer(name='concat1', type=['Concat'], shapes=[1, 6, 3, 3], sizes=[54], bottoms=['pool1', 'bn1'], tops=['conv2'], layer=['concat1'], targets=[]), XLayer(name='conv2', type=['Convolution'], shapes=[1, 10, 2, 2], sizes=[40], bottoms=['concat1'], tops=[], layer=['conv2'], targets=[], data=ConvData(np.array([1, 1]), np.array([0, 0])), attrs={ 'data_layout': 'NCHW', 'padding': [[0, 0], [0, 0], [1, 1], [1, 1]] }) ] xgraph = TestSubgraphBuildFunc.xgraph_factory\ .build_from_xlayer(net) p_xgraph = partition(xgraph, ['test_simple']) dpu_xgraph = TestSubgraphBuildFunc.target_registry\ .get_target_build_func('test_simple')(p_xgraph) layers = dpu_xgraph.get_layers() assert len(dpu_xgraph) == 7 assert layers[0].type[0] == 'Input' assert layers[0].bottoms == [] assert layers[0].tops == ['xp0'] assert layers[1].type[0] == 'TEST_SIMPLE' assert layers[1].shapes == [[1, 2, 3, 3], [1, 4, 3, 3]] assert layers[1].bottoms == ['in1'] assert layers[1].tops == ['conv1', 'pool1'] assert layers[1].attrs['input_names'] == ['xinput0'] assert set(layers[1].attrs['output_names']) == set(['pool1', 'conv1']) assert layers[1].attrs['target'] == 'test_simple' assert layers[1].attrs['__bottom_tensors'] == {'xinput0': ['in1']} assert layers[1].attrs['orig_bottom_tensors'] == {'xinput0': ['in1']} assert layers[1].attrs['__top_tensors'] == \ {'conv1': ['bn1'], 'pool1': ['concat1']} assert layers[1].attrs['orig_top_tensors'] == \ {'conv1': ['bn1'], 'pool1': ['concat1']} assert layers[2].type[0] == 'TupleGetItem' assert layers[2].name == 'pool1' assert layers[2].bottoms == ['xp0'] assert layers[2].shapes == [1, 4, 3, 3] assert layers[2].tops == ['concat1'] assert layers[2].attrs['index'] == 1 assert layers[3].type[0] == 'TupleGetItem' assert layers[3].name == 'conv1' assert layers[3].bottoms == ['xp0'] assert layers[3].shapes == [1, 2, 3, 3] assert layers[3].tops == ['bn1'] assert layers[3].attrs['index'] == 0 assert layers[4].type[0] == 'BatchNorm' assert layers[4].name == 'bn1' assert layers[4].bottoms == ['conv1'] assert layers[4].shapes == [1, 2, 3, 3] assert layers[4].tops == ['concat1'] assert layers[5].type[0] == 'Concat' assert layers[5].name == 'concat1' assert layers[5].bottoms == ['pool1', 'bn1'] assert layers[5].shapes == [1, 6, 3, 3] assert layers[5].tops == ['conv2'] assert layers[6].type[0] == 'Convolution' assert layers[6].name == 'conv2' assert layers[6].bottoms == ['concat1'] assert layers[6].shapes == [1, 10, 2, 2] assert layers[6].tops == []
def test_conv_maxpool_subgraph(self): W = np.reshape( np.array([[[1, 2], [3, 0]], [[1, 1], [0, 1]]], dtype=np.float32), (2, 1, 2, 2)) B = np.array([0., 0.], dtype=np.float32) net = [ XLayer( name='in', type=['Input'], shapes=[1, 1, 4, 4], sizes=[16], bottoms=[], tops=['conv2d0'], layer=['in'], targets=[] ), XLayer( name='conv2d0', type=['Convolution'], shapes=[1, 2, 3, 3], sizes=[18], bottoms=['in'], tops=[], layer=['conv2d0'], data=ConvData(W, B), attrs={ 'data_layout': 'NCHW', 'kernel_layout': 'OIHW', 'shape': [1, 2, 3, 3], 'padding': [[0, 0], [0, 0], [0, 0], [0, 0]], 'strides': [1, 1], 'dilation': [1, 1], 'groups': 1 }, targets=[] ), XLayer( name='max_pool2d0', type=['Pooling'], shapes=[1, 2, 2, 2], sizes=[8], bottoms=['conv2d0'], tops=[], layer=['max_pool2d0'], attrs={ 'kernel_size': [2, 2], 'insize': [3, 3], 'outsize': [2, 2], 'data_layout': 'NCHW', 'padding': [[0, 0], [0, 0], [0, 0], [0, 0]], 'strides': [1, 1], 'pool_type': 'Max' }, targets=[] ) ] xgraph = TestQuantSimPass.xgraph_factory.build_from_xlayer( net, name='testtest' ) p_xgraph = partition(xgraph, ['npu_test']) assert p_xgraph.get_layers()[0].target == 'cpu' assert p_xgraph.get_layers()[1].target == 'npu_test' assert p_xgraph.get_layers()[2].target == 'cpu' assert p_xgraph.get_layers()[0].subgraph is None assert p_xgraph.get_layers()[1].subgraph == 'xp0' assert p_xgraph.get_layers()[2].subgraph is None quant_sim_pass = XGraphQuantSimPass( fdir=FILE_PATH, name=xgraph.get_name() + '_qsim' ) qsim_xgraph = quant_sim_pass.execute(xgraph=p_xgraph, subgraphs_only=True) exec_graph = TestQuantSimPass.xf_exec_graph_factory.build_runtime( qsim_xgraph ) inpts = { 'in': np.reshape( np.array([ [10, 10, 0, 40], [50, 10, 0, 80], [30, 50, 10, 0], [10, 90, 30, 40]], dtype=np.float32 ), (1, 1, 4, 4)) } res = exec_graph.run(inpts) outpt = res[0] expected_outpt = np.array([[ [[182.28346, 189.5748], [342.6929, 342.6929]], [[109.37008, 123.95275], [167.70079, 87.49606]]]], dtype=np.float32) np.testing.assert_array_almost_equal(outpt, expected_outpt, decimal=4)
def test_basic_diff_layout(self): net = [ XLayer(name='in1', type=['Input'], shapes=[1, 1, 4, 4], sizes=[16], bottoms=[], tops=['conv1'], layer=['in1'], targets=[]), XLayer(name='in2', type=['Input'], shapes=[1, 2, 2, 2], sizes=[8], bottoms=[], tops=['add1'], layer=['in2'], targets=[]), XLayer(name='conv1', type=['Convolution'], shapes=[1, 2, 3, 3], sizes=[18], bottoms=['in1'], tops=['pool1'], layer=['conv1'], data=ConvData(np.array([1, 1]), np.array([0, 0])), attrs={ 'data_layout': 'NCHW', 'padding': [[0, 0], [0, 0], [1, 1], [1, 1]] }, targets=[]), XLayer(name='pool1', type=['Pooling'], shapes=[1, 2, 2, 2], sizes=[8], bottoms=['conv1'], tops=['add1'], layer=['pool1'], attrs={ 'data_layout': 'NCHW', 'padding': [[0, 0], [0, 0], [1, 1], [1, 1]] }, targets=[]), XLayer(name='add1', type=['Eltwise'], shapes=[1, 2, 2, 2], sizes=[8], bottoms=['pool1', 'in2'], tops=[], layer=['add1'], targets=[]) ] xgraph = TestSubgraphBuildFunc.xgraph_factory.build_from_xlayer(net) p_xgraph = partition(xgraph, ['test']) dpu_xgraph = TestSubgraphBuildFunc.target_registry\ .get_target_build_func('test')(p_xgraph) layers = dpu_xgraph.get_layers() # print(layers) assert (len(dpu_xgraph) == 6) assert (layers[0].type[0] == 'Input') assert (layers[0].name == 'in1') assert (layers[0].bottoms == []) assert (layers[0].tops == ['conv1_bottom_NCHW-NHWC']) assert (layers[1].type[0] == 'Transpose') assert (layers[1].name == 'conv1_bottom_NCHW-NHWC') assert (layers[1].bottoms == ['in1']) assert (layers[1].tops == ['xp0']) assert (layers[2].type[0] == 'TEST') assert (layers[2].bottoms == ['conv1_bottom_NCHW-NHWC']) assert (layers[2].tops == ['pool1']) assert (layers[2].attrs['target'] == 'test') assert (layers[2].attrs['input_names'] == ['xinput0']) assert (layers[2].attrs['output_names'] == ['pool1']) assert (layers[2].attrs['input_layers']['xinput0'] == ['conv1']) assert (layers[2].attrs['output_layers']['pool1'] == ['pool1']) assert (layers[2].attrs['__bottom_tensors'] == { 'xinput0': ['conv1_bottom_NCHW-NHWC'] }) assert (layers[2].attrs['orig_bottom_tensors'] == {'xinput0': ['in1']}) assert (layers[2].attrs['__top_tensors'] == { 'pool1': ['pool1_top_NHWC-NCHW'] }) assert (layers[2].attrs['orig_top_tensors'] == {'pool1': ['add1']}) assert (layers[3].type[0] == 'TupleGetItem') assert (layers[3].bottoms == ['xp0']) assert (layers[3].tops == ['add1']) assert layers[3].attrs['transpose'] is True assert layers[3].attrs['axes'] == [0, 3, 1, 2] # assert(layers[4].type[0] == 'Transpose') # assert(layers[4].name == 'pool1_top_NHWC-NCHW') # assert(layers[4].bottoms == ['pool1']) # assert(layers[4].tops == ['add1']) # assert layers[4].attrs['axes'] == [0, 3, 1, 2] assert layers[4].type[0] == 'Input' assert layers[4].name == 'in2' assert layers[4].bottoms == [] assert layers[4].tops == ['add1'] assert layers[5].type[0] == 'Eltwise' assert layers[5].name == 'add1' assert layers[5].bottoms == ['pool1', 'in2'] assert layers[5].tops == []
def test_xgraph_insert(self): xgraph = XGraph() xgraph.add(XLayer( name='in1', type=['Input'], bottoms=[], tops=[], targets=[] )) assert(len(xgraph) == 1) assert(len(xgraph.get_layer_names()) == 1) assert(len(xgraph.get_output_names()) == 1) assert(len(xgraph.get_input_names()) == 1) X_conv = XLayer( name='conv1', type=['Convolution'], bottoms=['in1'], tops=[], data=ConvData( weights=np.array([[[[1, 2], [3, 4]]]], dtype=np.float32), biases=np.array([0., 1.], dtype=np.float32) ), targets=[] ) xgraph.add(X_conv) assert len(xgraph) == 2 assert len(xgraph.get_layer_names()) == 2 assert len(xgraph.get_output_names()) == 1 assert len(xgraph.get_input_names()) == 1 X_pool = XLayer( name='pool1', type=['Pooling'], bottoms=['in1'], tops=['conv1'], targets=[] ) xgraph.insert(X_pool) assert len(xgraph) == 3 assert len(xgraph.get_layer_names()) == 3 assert len(xgraph.get_output_names()) == 1 assert len(xgraph.get_input_names()) == 1 xlayers = xgraph.get_layers() assert xlayers[0].name == 'in1' assert xlayers[0].bottoms == [] assert xlayers[0].tops == ['pool1'] assert xlayers[1].name == 'pool1' assert xlayers[1].bottoms == ['in1'] assert xlayers[1].tops == ['conv1'] assert xlayers[2].name == 'conv1' assert xlayers[2].bottoms == ['pool1'] assert xlayers[2].tops == [] X_in2 = XLayer( name='in2', type=['Input'], bottoms=[], tops=[], targets=[] ) xgraph.add(X_in2) X_add = XLayer( name='add1', type=['Eltwise'], bottoms=['conv1', 'in2'], tops=[], targets=[] ) xgraph.add(X_add) X_conv2 = XLayer( name='conv2', type=['Convolution'], bottoms=['in2'], tops=['add1'], data=ConvData( weights=np.array([[[[1, 2], [3, 4]]]], dtype=np.float32), biases=np.array([0., 1.], dtype=np.float32) ), targets=[] ) xgraph.insert(X_conv2) assert(len(xgraph) == 6) assert(len(xgraph.get_layer_names()) == 6) assert(len(xgraph.get_output_names()) == 1) assert(len(xgraph.get_input_names()) == 2) xlayers = xgraph.get_layers() assert xlayers[0].name == 'in1' assert xlayers[0].bottoms == [] assert xlayers[0].tops == ['pool1'] assert xlayers[1].name == 'pool1' assert xlayers[1].bottoms == ['in1'] assert xlayers[1].tops == ['conv1'] assert xlayers[2].name == 'conv1' assert xlayers[2].bottoms == ['pool1'] assert xlayers[2].tops == ['add1'] assert xlayers[3].name == 'in2' assert xlayers[3].bottoms == [] assert xlayers[3].tops == ['conv2'] assert xlayers[4].name == 'conv2' assert xlayers[4].bottoms == ['in2'] assert xlayers[4].tops == ['add1'] assert xlayers[5].name == 'add1' assert xlayers[5].bottoms == ['conv1', 'conv2'] assert xlayers[5].tops == []
def test_two_partition_diff_layout(self): net = [ XLayer(name='in1', type=['Input'], shapes=[1, 1, 4, 4], sizes=[16], bottoms=[], tops=['conv1'], layer=['in1'], targets=[]), XLayer(name='in2', type=['Input'], shapes=[1, 4, 4, 1], sizes=[16], bottoms=[], tops=['in2_transpose'], layer=['in2'], targets=[]), XLayer(name='in2_transpose', type=['Transpose'], shapes=[1, 1, 4, 4], sizes=[16], bottoms=['in2'], tops=['conv2'], layer=['in2'], attrs={'axes': [0, 3, 1, 2]}, targets=[]), XLayer(name='conv1', type=['Convolution'], shapes=[1, 2, 3, 3], sizes=[18], bottoms=['in1'], tops=['pool1'], layer=['conv1'], data=ConvData(np.array([1, 1]), np.array([0, 0])), attrs={ 'data_layout': 'NCHW', 'padding': [[0, 0], [0, 0], [1, 1], [1, 1]] }, targets=[]), XLayer(name='pool1', type=['Pooling'], shapes=[1, 2, 2, 2], sizes=[8], bottoms=['conv1'], tops=['concat1'], layer=['pool1'], attrs={ 'data_layout': 'NCHW', 'padding': [[0, 0], [0, 0], [1, 1], [1, 1]] }, targets=[]), XLayer(name='conv2', type=['Convolution'], shapes=[1, 2, 2, 2], sizes=[8], bottoms=['in2_transpose'], tops=['concat1'], layer=['conv2'], data=ConvData(np.array([1, 1]), np.array([0, 0])), attrs={ 'data_layout': 'NCHW', 'padding': [[0, 0], [0, 0], [1, 1], [1, 1]] }, targets=[]), XLayer(name='concat1', type=['Concat'], shapes=[1, 4, 2, 2], sizes=[16], bottoms=['pool1', 'conv2'], tops=['concat1_transpose'], layer=['concat1'], attrs={'axis': 1}, targets=[]), XLayer(name='concat1_transpose', type=['Transpose'], shapes=[1, 2, 2, 4], sizes=[16], bottoms=['concat1'], tops=['dense1'], layer=['concat1'], attrs={'axes': [0, 2, 3, 1]}, targets=[]), XLayer(name='dense1', type=['Dense'], shapes=[1, 20], sizes=[], bottoms=['concat1_transpose'], tops=[], data=ConvData(np.array([1, 1]), np.array([0, 0])), layer=['dense1'], targets=[]) ] xgraph = TestSubgraphBuildFunc.xgraph_factory.build_from_xlayer(net) p_xgraph = partition(xgraph, ['test']) p_xlayers = p_xgraph.get_layers() dpu_xgraph = TestSubgraphBuildFunc.target_registry\ .get_target_build_func('test')(p_xgraph) layers = dpu_xgraph.get_layers() assert len(dpu_xgraph) == 6 assert layers[0].type[0] == 'Input' assert layers[0].name == 'in1' assert layers[0].shapes == [1, 1, 4, 4] assert layers[0].bottoms == [] assert layers[0].tops == ['conv1_bottom_NCHW-NHWC'] assert layers[0].target == 'cpu' assert layers[0].subgraph is None assert layers[1].type[0] == 'Transpose' assert layers[1].name == 'conv1_bottom_NCHW-NHWC' assert layers[1].shapes == [1, 4, 4, 1] assert layers[1].bottoms == ['in1'] assert layers[1].tops == ['xp2'] assert layers[1].target == 'cpu' assert layers[1].subgraph is None assert layers[2].type[0] == 'Input' assert layers[2].name == 'in2' assert layers[2].shapes == [1, 4, 4, 1] assert layers[2].bottoms == [] assert layers[2].tops == ['xp2'] assert layers[2].target == 'cpu' assert layers[2].subgraph is None assert layers[3].type[0] == 'TEST' assert layers[3].name == 'xp2' assert layers[3].shapes == [[1, 2, 2, 4]] assert layers[3].bottoms == ['conv1_bottom_NCHW-NHWC', 'in2'] assert layers[3].tops == ['concat1'] assert layers[3].target == 'cpu' assert layers[3].subgraph is None assert layers[3].attrs['target'] == 'test' assert layers[3].attrs['input_names'] == ['xinput0', 'xinput1'] assert layers[3].attrs['output_names'] == ['concat1'] assert layers[3].attrs['input_layers']['xinput0'] == ['conv1'] assert layers[3].attrs['input_layers']['xinput1'] == ['conv2'] assert layers[3].attrs['output_layers']['concat1'] == ['concat1'] assert (layers[3].attrs['__bottom_tensors'] == { 'xinput0': ['conv1_bottom_NCHW-NHWC'], 'xinput1': ['in2'] }) assert (layers[3].attrs['orig_bottom_tensors'] == { 'xinput0': ['in1'], 'xinput1': ['in2'] }) assert layers[3].attrs['__top_tensors'] == {'concat1': ['dense1']} assert layers[3].attrs['orig_top_tensors'] == {'concat1': ['dense1']} assert layers[4].type[0] == 'TupleGetItem' assert layers[4].name == 'concat1' assert layers[4].shapes == [1, 2, 2, 4] assert layers[4].bottoms == ['xp2'] assert layers[4].tops == ['dense1'] assert layers[4].target == 'cpu' assert layers[4].subgraph is None assert layers[5].type[0] == 'Dense' assert layers[5].name == 'dense1' assert layers[5].shapes == [1, 20] assert layers[5].bottoms == ['concat1'] assert layers[5].tops == [] assert layers[5].target == 'cpu' assert layers[5].subgraph is None
def test_copy(self): xgraph = XGraph() xgraph.add(XLayer( name='in1', type=['Input'], bottoms=[], tops=[], targets=[] )) xgraph.add(XLayer( name='in2', type=['Input'], bottoms=[], tops=[], targets=[] )) xgraph.add(XLayer( name='conv1', type=['Convolution'], bottoms=['in1'], tops=[], data=ConvData( weights=np.array([[[[1, 2], [3, 4]]]], dtype=np.float32), biases=np.array([0., 1.], dtype=np.float32) ), targets=[] )) xgraph.add(XLayer( name='add1', type=['Eltwise'], bottoms=['conv1', 'in2'], tops=[], targets=[] )) xgraph.insert(XLayer( name='conv2', type=['Convolution'], bottoms=['in2'], tops=['add1'], data=ConvData( weights=np.array([[[[1, 2], [3, 4]]]], dtype=np.float32), biases=np.array([0., 1.], dtype=np.float32) ), targets=[] )) xgraph.add(XLayer( name='pool1', type=['Pooling'], bottoms=['add1'], tops=[], targets=[] )) assert len(xgraph) == 6 assert xgraph.get_layer_names() == \ ['in1', 'conv1', 'in2', 'conv2', 'add1', 'pool1'] xg_copy = xgraph.copy() assert len(xg_copy) == 6 assert xg_copy.get_layer_names() == \ ['in1', 'conv1', 'in2', 'conv2', 'add1', 'pool1'] xgc_layers = xg_copy.get_layers() assert xgc_layers[1].type == ['Convolution'] assert xg_copy.get('conv1').type == ['Convolution'] xgc_layers[1].type = ['Convolution2'] assert xg_copy.get('conv1').type == ['Convolution2'] xgc_layers[1].type = ['Convolution'] assert xgc_layers[1].type == ['Convolution'] assert xg_copy.get('conv1').type == ['Convolution'] np.testing.assert_array_equal( xgc_layers[1].data.weights, np.array([[[[1, 2], [3, 4]]]], dtype=np.float32) ) np.testing.assert_array_equal( xgc_layers[1].data.biases, np.array([0., 1.], dtype=np.float32) ) xgraph.get('conv1').data = ConvData( weights=xgc_layers[1].data.weights * 2, biases=xgc_layers[1].data.biases ) np.testing.assert_array_equal( xgraph.get('conv1').data.weights, np.array([[[[2, 4], [6, 8]]]], dtype=np.float32) ) np.testing.assert_array_equal( xgc_layers[1].data.weights, np.array([[[[1, 2], [3, 4]]]], dtype=np.float32) ) np.testing.assert_array_equal( xgc_layers[1].data.biases, np.array([0., 1.], dtype=np.float32) )
def test_inception_like_block(self): net = [ XLayer(name='in1', type=['Input'], shapes=[1, 1, 4, 4], sizes=[16], bottoms=[], tops=['concat1'], layer=['in1'], targets=[]), XLayer(name='in2', type=['Input'], shapes=[1, 1, 4, 4], sizes=[16], bottoms=[], tops=['concat1'], layer=['in2'], targets=[]), XLayer(name='concat1', type=['Concat'], shapes=[1, 2, 4, 4], sizes=[32], bottoms=['in1', 'in2'], tops=['conv1', 'conv2'], layer=['concat1'], attrs={'axis': 1}, targets=[]), XLayer(name='conv1', type=['Convolution'], shapes=[1, 4, 3, 3], sizes=[], bottoms=['concat1'], tops=['pool1'], layer=['conv1'], data=ConvData(np.array([1, 1]), np.array([0, 0])), attrs={ 'data_layout': 'NCHW', 'padding': [[0, 0], [0, 0], [1, 1], [1, 1]] }, targets=[]), XLayer(name='pool1', type=['Pooling'], shapes=[1, 4, 2, 2], sizes=[], bottoms=['conv1'], tops=['concat2'], layer=['pool1'], attrs={ 'data_layout': 'NCHW', 'padding': [[0, 0], [0, 0], [1, 1], [1, 1]] }, targets=[]), XLayer(name='conv2', type=['Convolution'], shapes=[1, 4, 2, 2], sizes=[], bottoms=['concat1'], tops=['concat2'], layer=['conv2'], data=ConvData(np.array([1, 1]), np.array([0, 0])), attrs={ 'data_layout': 'NCHW', 'padding': [[0, 0], [0, 0], [1, 1], [1, 1]] }, targets=[]), XLayer(name='concat2', type=['Concat'], shapes=[1, 8, 2, 2], sizes=[32], bottoms=['pool1', 'conv2'], tops=['dense1'], layer=['concat2'], attrs={'axis': 1}, targets=[]), XLayer(name='dense1', type=['Dense'], shapes=[1, 20], sizes=[20], bottoms=['concat2'], tops=[], layer=['dense1'], data=ConvData(np.array([1, 1]), np.array([0, 0])), targets=[]) ] xgraph = TestSubgraphBuildFunc.xgraph_factory.build_from_xlayer(net) p_xgraph = partition(xgraph, ['test']) dpu_xgraph = TestSubgraphBuildFunc.target_registry\ .get_target_build_func('test')(p_xgraph) layers = dpu_xgraph.get_layers() assert len(dpu_xgraph) == 7 assert layers[0].type[0] == 'Input' assert layers[0].name == 'in1' assert layers[0].shapes == [1, 1, 4, 4] assert layers[0].bottoms == [] assert layers[0].tops ==\ ['0_split_conv1_bottom_NCHW-NHWC_conv2_bottom_NCHW-NHWC'] assert layers[0].target == 'cpu' assert layers[0].subgraph is None assert layers[1].type[0] == 'Transpose' assert layers[1].name ==\ '0_split_conv1_bottom_NCHW-NHWC_conv2_bottom_NCHW-NHWC' assert layers[1].shapes == [1, 4, 4, 1] assert layers[1].bottoms == ['in1'] assert layers[1].tops == ['xp0'] assert layers[1].target == 'cpu' assert layers[1].subgraph is None assert layers[2].type[0] == 'Input' assert layers[2].name == 'in2' assert layers[2].shapes == [1, 1, 4, 4] assert layers[2].bottoms == [] assert layers[2].tops ==\ ['1_split_conv1_bottom_NCHW-NHWC_conv2_bottom_NCHW-NHWC'] assert layers[2].target == 'cpu' assert layers[2].subgraph is None assert layers[3].type[0] == 'Transpose' assert layers[3].name ==\ '1_split_conv1_bottom_NCHW-NHWC_conv2_bottom_NCHW-NHWC' assert layers[3].shapes == [1, 4, 4, 1] assert layers[3].bottoms == ['in2'] assert layers[3].tops == ['xp0'] assert layers[3].target == 'cpu' assert layers[3].subgraph is None assert layers[4].type[0] == 'TEST' assert layers[4].name == 'xp0' assert layers[4].shapes == [[1, 2, 2, 8]] assert layers[4].bottoms ==\ ['0_split_conv1_bottom_NCHW-NHWC_conv2_bottom_NCHW-NHWC', '1_split_conv1_bottom_NCHW-NHWC_conv2_bottom_NCHW-NHWC'] assert layers[4].tops == ['concat2'] assert layers[4].target == 'cpu' assert layers[4].subgraph is None assert layers[4].tops == ['concat2'] assert layers[4].attrs['target'] == 'test' assert layers[4].attrs['input_names'] == ['xinput0', 'xinput1'] assert layers[4].attrs['output_names'] == ['concat2'] assert layers[4].attrs['input_layers']['xinput0'] == ['concat1'] assert layers[4].attrs['input_layers']['xinput1'] == ['concat1'] assert layers[4].attrs['output_layers']['concat2'] == ['concat2'] assert (layers[4].attrs['__bottom_tensors'] == { 'xinput0': ['0_split_conv1_bottom_NCHW-NHWC_conv2_bottom' '_NCHW-NHWC'], 'xinput1': ['1_split_conv1_bottom_NCHW-NHWC_conv2_bottom' '_NCHW-NHWC'] }) assert (layers[4].attrs['orig_bottom_tensors'] == { 'xinput0': ['in1'], 'xinput1': ['in2'] }) assert layers[4].attrs['__top_tensors'] ==\ {'concat2': ['merge_pool1_top_NHWC-NCHW_conv2_top_NHWC-NCHW']} assert layers[4].attrs['orig_top_tensors'] ==\ {'concat2': ['dense1']} assert layers[5].type[0] == 'TupleGetItem' assert layers[5].name == 'concat2' assert layers[5].shapes == [1, 8, 2, 2] assert layers[5].bottoms == ['xp0'] assert layers[5].tops == ['dense1'] # assert layers[6].type[0] == 'Transpose' # assert layers[6].name ==\ # 'merge_pool1_top_NHWC-NCHW_conv2_top_NHWC-NCHW' # assert layers[6].shapes == [1, 8, 2, 2] # assert layers[6].bottoms == ['concat2'] # assert layers[6].tops == ['dense1'] assert layers[6].type[0] == 'Dense' assert layers[6].name == 'dense1' assert layers[6].shapes == [1, 20] assert layers[6].bottoms == ['concat2'] assert layers[6].tops == []
def test_xgraph_add_get(self): xgraph = XGraph() xgraph.add(XLayer( name='in1', type=['Input'], bottoms=[], tops=[], targets=[] )) assert len(xgraph) == 1 assert len(xgraph.get_layer_names()) == 1 assert len(xgraph.get_output_names()) == 1 assert len(xgraph.get_input_names()) == 1 assert isinstance(xgraph.get('in1'), XLayer) assert xgraph.get('in1').bottoms == [] assert xgraph.get('in1').tops == [] X_conv = XLayer( name='conv1', type=['Convolution'], bottoms=['in1'], tops=[], data=ConvData( weights=np.array([[[[1, 2], [3, 4]]]], dtype=np.float32), biases=np.array([0., 1.], dtype=np.float32) ), targets=[] ) xgraph.add(X_conv) assert len(xgraph) == 2 assert xgraph.get_layer_names() == ['in1', 'conv1'] assert xgraph.get_output_names() == ['conv1'] assert xgraph.get_input_names() == ['in1'] assert xgraph.get('in1').tops == ['conv1'] assert isinstance(xgraph.get('conv1'), XLayer) assert xgraph.get('conv1').bottoms == ['in1'] assert xgraph.get('conv1').tops == [] assert xgraph.get('conv1').type == ['Convolution'] np.testing.assert_array_equal( xgraph.get('conv1').data.weights, np.array([[[[1, 2], [3, 4]]]], dtype=np.float32) ) np.testing.assert_array_equal( xgraph.get('conv1').data.biases, np.array([0., 1.], dtype=np.float32) ) xgraph.get('conv1').data = ConvData( weights=xgraph.get('conv1').data.weights * 2, biases=xgraph.get('conv1').data.biases ) np.testing.assert_array_equal( xgraph.get('conv1').data.weights, np.array([[[[2, 4], [6, 8]]]], dtype=np.float32) ) xgraph.remove(X_conv.name) assert len(xgraph) == 1 assert 'in1' in xgraph assert len(xgraph.get_layer_names()) == 1 assert len(xgraph.get_output_names()) == 1 assert len(xgraph.get_input_names()) == 1
def test_inception_block(self): W1 = np.reshape( np.array([[[1, 0, 1], [1, 0, 1], [1, 0, 1]]], dtype=np.float32), (1, 1, 3, 3)) B1 = np.array([0.], dtype=np.float32) W2 = np.reshape( np.array([[[1, 1, 0], [1, 1, 0], [1, 1, 0]]], dtype=np.float32), (1, 1, 3, 3)) B2 = np.array([0.], dtype=np.float32) gamma = np.array([2.]) beta = np.array([1.]) net = [ XLayer(name='in1', type=['Input'], shapes=[-1, 1, 4, 4], sizes=[16], bottoms=[], tops=['scale1'], layer=['in1'], targets=[]), XLayer(name='scale1', type=['Scale'], shapes=[-1, 1, 4, 4], sizes=[16], bottoms=['in1'], tops=['conv1', 'conv2'], layer=['scale1'], targets=[], data=ScaleData(gamma, beta), attrs={'axis': 1}), XLayer(name='conv1', type=['Convolution'], shapes=[-1, 1, 4, 4], sizes=[16], bottoms=['scale1'], tops=['concat1'], layer=['conv1'], data=ConvData(W1, B1), attrs={ 'data_layout': 'NCHW', 'kernel_layout': 'OIHW', 'shape': [1, 1, 4, 4], 'padding': [[0, 0], [0, 0], [1, 1], [1, 1]], 'strides': [1, 1], 'dilation': [1, 1], 'groups': 1 }, targets=[]), XLayer(name='conv2', type=['Convolution'], shapes=[-1, 1, 4, 4], sizes=[16], bottoms=['scale1'], tops=['concat1'], layer=['conv2'], data=ConvData(W2, B2), attrs={ 'data_layout': 'NCHW', 'kernel_layout': 'OIHW', 'shape': [1, 1, 4, 4], 'padding': [[0, 0], [0, 0], [1, 1], [1, 1]], 'strides': [1, 1], 'dilation': [1, 1], 'groups': 1 }, targets=[]), XLayer(name='concat1', type=['Concat'], shapes=[-1, 2, 4, 4], sizes=[16], bottoms=['conv1', 'conv2'], tops=[], layer=['concat1'], targets=[], attrs={'axis': 1}) ] xgraph = TestMSEQuantizer.xgraph_factory.build_from_xlayer(net) def inputs_func(iter): inputs = np.reshape( np.array([[1, 3, -1, -11], [3, 1, -1, 0], [1, 4, -3, -3], [1, 1, -1, -1]], dtype=np.float32), (1, 1, 4, 4)) return {'in1': inputs} quantizer = XGraphMSEThresholdQuantizer(xgraph, inputs_func, work_dir=FILE_PATH) q_xgraph = quantizer.quantize(subgraphs_only=False) assert 'xgraph' in quantizer._quant_layers assert len(quantizer._quant_layers['xgraph']) == 4 # assert(('scale1', 'Scale', None) in \ # quantizer._quant_layers['xgraph']) assert ('conv1', 'Convolution', None) in\ quantizer._quant_layers['xgraph'] assert ('conv2', 'Convolution', None) in\ quantizer._quant_layers['xgraph'] assert ('concat1', 'Concat', None) in\ quantizer._quant_layers['xgraph'] assert quantizer._quant_param.th_layer_in['scale1'][0] <= 11. assert quantizer._quant_param.th_layer_in['scale1'][0] >= 0. assert quantizer._quant_param.th_layer_out['scale1'][0] <= 22. assert quantizer._quant_param.th_layer_out['scale1'][0] >= 0. assert quantizer._quant_param.th_layer_in['conv1'] ==\ quantizer._quant_param.th_layer_out['scale1'] np.testing.assert_array_equal( quantizer._quant_param.th_params['conv1'], np.array([1.])) # NOTE: Conv2d does not take over threshold from subqequent concat # layer because it's expected that a scaling layer will be inserted assert quantizer._quant_param.th_layer_out['conv1'][0] <= 22. assert quantizer._quant_param.th_layer_in['conv2'][0] ==\ quantizer._quant_param.th_layer_out['scale1'] np.testing.assert_array_equal( quantizer._quant_param.th_params['conv2'], np.array([1.])) assert quantizer._quant_param.th_layer_out['conv2'][0] <= 33. # print("TEST") # print(quantizer._quant_param.th_layer_in['concat1']) # print(quantizer._quant_param.th_layer_out['conv2']) assert math.isclose(quantizer._quant_param.th_layer_in['concat1'], quantizer._quant_param.th_layer_out['conv2'], rel_tol=1e-4) # assert quantizer._quant_param.th_layer_in['concat1'] ==\ # quantizer._quant_param.th_layer_out['conv2'] assert quantizer._quant_param.th_layer_out['concat1'] ==\ quantizer._quant_param.th_layer_in['concat1'] quant_file = os.path.join(FILE_PATH, 'xgraph_quant.json') with open(quant_file) as f: qp_d = json.load(f) network = qp_d['network'] assert network[0]['name'] == 'scale1' assert network[0]['th_layer_in'] ==\ quantizer._quant_param.th_layer_in['scale1'][0] assert network[0]['th_layer_out'] ==\ quantizer._quant_param.th_layer_out['scale1'][0] assert network[1]['name'] == 'conv1' assert network[1]['th_layer_in'] ==\ quantizer._quant_param.th_layer_in['conv1'][0] # NOTE: adjustment for different scaling of concat input layers ! conv2 assert network[1]['th_layer_out'] ==\ quantizer._quant_param.th_layer_out['conv2'][0] assert network[1]['th_params'] == [1.0] assert network[2]['name'] == 'conv2' assert network[2]['th_layer_in'] ==\ quantizer._quant_param.th_layer_in['conv2'][0] assert network[2]['th_layer_out'] ==\ quantizer._quant_param.th_layer_out['conv2'][0] assert network[2]['th_params'] == [1.0] assert network[3]['name'] == 'concat1' assert network[3]['th_layer_in'] ==\ quantizer._quant_param.th_layer_in['concat1'][0] assert network[3]['th_layer_out'] ==\ quantizer._quant_param.th_layer_out['concat1'][0] os.remove(quant_file)
def test_xgraph_serialization_basic(self): net = [ XLayer(name='in1', type=['Input'], shapes=TensorShape([1, 1, 4, 4]), bottoms=[], tops=['add1'], targets=[]), XLayer(name='in2', type=['Input'], shapes=TensorShape([1, 2, 3, 3]), bottoms=[], tops=['add1'], targets=[]), XLayer(name='conv1', type=['Convolution'], shapes=TensorShape([1, 2, 3, 3]), bottoms=['in1'], tops=['bias_add1'], data=ConvData(weights=np.array([[[[1, 2], [3, 4]]]], dtype=np.float32), biases=np.array([0., 1.], dtype=np.float32)), targets=[]), XLayer(name='bias_add1', type=['BiasAdd'], shapes=TensorShape([1, 2, 3, 3]), bottoms=['conv1'], tops=['bn1'], data=[np.array([0., -1.], dtype=np.float32)], targets=[]), XLayer(name='bn1', type=['BatchNorm'], shapes=TensorShape([1, 2, 3, 3]), bottoms=['bias_add1'], tops=['scale1'], data=BatchData(mu=np.array([.5, 2.], dtype=np.float32), sigma_square=np.array([1., 1.], dtype=np.float32), gamma=np.array([.5, 2.], dtype=np.float32), beta=np.array([0., -1.], dtype=np.float32)), targets=[]), XLayer(name='scale1', type=['Scale'], shapes=TensorShape([1, 2, 3, 3]), bottoms=['bn1'], tops=['add1'], data=ScaleData(np.array([.5, 2.], dtype=np.float32), np.array([0., -1.], dtype=np.float32)), targets=[]), XLayer(name='add1', type=['Eltwise'], shapes=TensorShape([1, 2, 3, 3]), bottoms=['scale1', 'in2'], tops=[], targets=[]) ] xgraph = TestIOAPIs.xgraph_factory.build_from_xlayer(net) xgraph_str = api.get_xgraph_str(xgraph) xg = api.read_xgraph_str(xgraph_str) xg_layers = xg.get_layers() # import pdb; pdb.set_trace() assert len(xg_layers) == 7 assert xg_layers[0].type[0] == 'Input' assert xg_layers[1].type[0] == 'Convolution' np.testing.assert_array_equal( xg_layers[1].data[0], np.array([[[[1, 2], [3, 4]]]], dtype=np.float32))
def test_simple(self): W = np.reshape( np.array([[[1, 1], [0, 1]], [[3, 4], [-1, 0]]], dtype=np.float32), (2, 1, 2, 2)) B = np.array([1., -1.], dtype=np.float32) net = [ XLayer(name='in1', type=['Input'], shapes=[-1, 1, 4, 4], sizes=[16], bottoms=[], tops=['conv1'], layer=['in1'], targets=[]), XLayer(name='conv1', type=['Convolution'], shapes=[-1, 2, 3, 3], sizes=[18], bottoms=['in1'], tops=['pool1'], layer=['conv2d0'], data=ConvData(W, B), attrs={ 'data_layout': 'NCHW', 'kernel_layout': 'OIHW', 'shape': [1, 2, 3, 3], 'padding': [[0, 0], [0, 0], [0, 0], [0, 0]], 'strides': [1, 1], 'dilation': [1, 1], 'groups': 1 }, targets=[]), XLayer( name='pool1', type=['Pooling'], shapes=[1, 2, 2, 2], sizes=[8], bottoms=['conv1'], tops=[], layer=['pool1'], targets=[], attrs={ 'kernel_size': [2, 2], 'insize': [3, 3], # HW 'outsize': [2, 2], 'data_layout': 'NCHW', 'padding': [[0, 0], [0, 0], [0, 0], [0, 0]], 'strides': [1, 1], 'pool_type': 'Max' }) ] xgraph = TestMSEQuantizer.xgraph_factory.build_from_xlayer(net) def inputs_func(iter): inputs = np.reshape( np.array( [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], dtype=np.float32), (1, 1, 4, 4)) return {'in1': inputs} quantizer = XGraphMSEThresholdQuantizer(xgraph, inputs_func, work_dir=FILE_PATH) quantizer._quantize(xgraph, subgraphs_only=False) assert 'xgraph' in quantizer._quant_layers assert len(quantizer._quant_layers['xgraph']) == 2 assert ('conv1', 'Convolution', None) in\ quantizer._quant_layers['xgraph'] # assert quantizer._quant_param.th_layer_out['in1'] == [1.] assert quantizer._quant_param.th_layer_in['conv1'] == [1.] np.testing.assert_array_equal( quantizer._quant_param.th_params['conv1'], np.array([1., 4.])) assert quantizer._quant_param.th_layer_out['conv1'][0] <= 5. assert quantizer._quant_param.th_layer_out['conv1'][0] >= 0. assert quantizer._quant_param.th_layer_in['pool1'][0] <= 5. assert quantizer._quant_param.th_layer_in['pool1'][0] >= 0. assert quantizer._quant_param.th_layer_out['pool1'][0] <= 5. assert quantizer._quant_param.th_layer_out['pool1'][0] >= 0. # # Test json saving quant_file = os.path.join(FILE_PATH, 'quant1.json') quantizer._quant_param.save_to_dpu_v1_json( quantizer._quant_layers['xgraph'], quant_file) with open(quant_file) as f: qp_d = json.load(f) network = qp_d['network'] assert len(network) == 2 assert network[0]['name'] == 'conv1' assert network[0]['th_layer_in'] == 1.0 assert network[0]['th_layer_out'] == \ quantizer._quant_param.th_layer_out['conv1'][0] assert network[0]['th_params'] ==\ list(quantizer._quant_param.th_params['conv1']) assert network[1]['name'] == 'pool1' assert network[1]['th_layer_in'] == \ quantizer._quant_param.th_layer_out['conv1'][0] assert network[1]['th_layer_out'] == \ quantizer._quant_param.th_layer_out['conv1'][0] os.remove(quant_file)
def test_xlayer_data(self): X = XLayer(data=[np.array([1, 2, 3])]) assert isinstance(X.data, list) assert len(X.data) == 1 np.testing.assert_array_equal(X.data[0], np.array([1, 2, 3])) X.data[0] *= 2 np.testing.assert_array_equal(X.data[0], np.array([2, 4, 6])) X.data = [np.array([3., 5., 7.], dtype=np.float32)] np.testing.assert_array_equal( X.data[0], np.array([3., 5., 7.], dtype=np.float32)) X.data = [np.array([2., 4., 6.], dtype=np.float32), np.array([0, 0], dtype=np.float32)] np.testing.assert_array_equal( X.data[0], np.array([2., 4., 6.], dtype=np.float32)) np.testing.assert_array_equal( X.data[1], np.array([0., 0.], dtype=np.float32)) c_data = ConvData( weights=np.ones((4, 2, 3, 3), dtype=np.float32), biases=np.array([3, 3], dtype=np.float16) ) # print("c_data", c_data) X2 = XLayer( type=['Convolution'], data=[np.ones((4, 2, 3, 3), dtype=np.float32) * 2., np.array([3., 3.], dtype=np.float16)] ) assert isinstance(X2.data, ConvData) np.testing.assert_array_equal(X2.data.weights, c_data.weights * 2) np.testing.assert_array_equal(X2.data.biases, c_data.biases) X2.data = ConvData( weights=np.ones((4, 2, 3, 3), dtype=np.float32) * 3, biases=np.copy(X2.data.biases) ) np.testing.assert_array_equal( X2.data.weights, np.ones((4, 2, 3, 3), dtype=np.float32) * 3 ) np.testing.assert_array_equal( X2.data.biases, np.array([3, 3], dtype=np.float16)) # Scale X2.type[0] = 'Scale' X2.data = ScaleData( gamma=np.array([1, 2], dtype=np.float32), beta=np.array([3, 3], dtype=np.float32) ) assert X2.type == ['Scale'] assert isinstance(X2.data, ScaleData) np.testing.assert_array_equal( X2.data.gamma, np.array([1, 2], dtype=np.float32) ) np.testing.assert_array_equal( X2.data.beta, np.array([3, 3], dtype=np.float32)) # BatchData X2.type[0] = 'BatchNorm' X2.data = BatchData( mu=np.array([1, 0.5], dtype=np.float32), sigma_square=np.array([1, 2], dtype=np.float32), gamma=np.array([1, 2], dtype=np.float32), beta=np.array([3, 3], dtype=np.float32) ) assert X2.type == ['BatchNorm'] assert isinstance(X2.data, BatchData) np.testing.assert_array_equal( X2.data.mu, np.array([1, 0.5], dtype=np.float32) ) np.testing.assert_array_equal( X2.data.sigma_square, np.array([1, 2], dtype=np.float32) ) np.testing.assert_array_equal( X2.data.gamma, np.array([1, 2], dtype=np.float32) ) np.testing.assert_array_equal( X2.data.beta, np.array([3, 3], dtype=np.float32) )
def test_one_subgraph(self): W = np.reshape( np.array([[[1, 1], [0, 1]], [[3, 4], [-1, 0]]], dtype=np.float32), (2, 1, 2, 2)) B = np.array([1., -1.], dtype=np.float32) net = [ XLayer(name='in1', type=['Input'], shapes=[-1, 1, 4, 4], sizes=[16], bottoms=[], tops=['conv1'], layer=['in1'], targets=[]), XLayer(name='conv1', type=['Convolution'], shapes=[-1, 2, 3, 3], sizes=[18], bottoms=['in1'], tops=['pool1'], layer=['conv2d0'], data=ConvData(W, B), attrs={ 'data_layout': 'NCHW', 'kernel_layout': 'OIHW', 'shape': [1, 2, 3, 3], 'padding': [[0, 0], [0, 0], [0, 0], [0, 0]], 'strides': [1, 1], 'dilation': [1, 1], 'groups': 1 }, targets=[]), XLayer( name='pool1', type=['Pooling'], shapes=[1, 2, 2, 2], sizes=[8], bottoms=['conv1'], tops=['pool2'], layer=['pool1'], targets=[], attrs={ 'padding': [[0, 0], [0, 0], [0, 0], [0, 0]], 'strides': [1, 1], 'kernel_size': [2, 2], 'insize': [3, 3], # HW 'outsize': [2, 2], 'data_layout': 'NCHW', 'pool_type': 'Max' }), XLayer( name='pool2', type=['Pooling'], shapes=[1, 2, 1, 1], sizes=[2], bottoms=['pool1'], tops=[], layer=['pool2'], targets=[], attrs={ 'padding': [[0, 0], [0, 0], [0, 0], [0, 0]], 'strides': [1, 1], 'kernel_size': [2, 2], 'insize': [2, 2], # HW 'outsize': [1, 1], 'data_layout': 'NCHW', 'pool_type': 'Avg' }) ] xgraph = TestBaseSubgraphQuantizer.xgraph_factory\ .build_from_xlayer(net) p_xgraph = partition(xgraph, ['test']) assert len(p_xgraph.get_layer_names()) == 4 assert p_xgraph.get_subgraph_names() == ['xp0'] p_xlayers = p_xgraph.get_layers() assert p_xlayers[0].type[0] in ['Input'] assert p_xlayers[1].type[0] in ['Convolution'] assert p_xlayers[2].type[0] in ['Pooling'] assert p_xlayers[3].type[0] in ['Pooling'] inputs = np.reshape( np.array([[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], dtype=np.float32), (1, 1, 4, 4)) def inputs_func(iter): return {'in1': inputs} quantizer = BaseSubgraphQuantizerTest(xgraph=p_xgraph, inputs_func=inputs_func) quantizer.quantize() assert 'xp0' in quantizer.test_inputs assert 'xinput0' in quantizer.test_inputs['xp0'] expected = np.reshape( np.array([[[4, 4, 4], [4, 4, 4], [4, 4, 4]], [[5, 5, 5], [5, 5, 5], [5, 5, 5]]]), (1, 2, 3, 3)) np.testing.assert_array_equal(quantizer.test_inputs['xp0']['xinput0'], expected)