def test(backend):
    args = {'a': mx.nd.ones((3, 2)), 'b': mx.nd.ones((3, 2))}
    ###############################################
    # Test with subgraph not consuming params
    ###############################################
    #execute in MXNet
    print('-------------------------------')
    print('Testing regular Gluon execution')
    inputs = [a, b]
    sym_block = nn.SymbolBlock(sym, inputs)
    sym_block.initialize()
    out = sym_block(mx.nd.ones((3, 2)), mx.nd.ones((3, 2)))
    print(out)

    # Gluon Hybridize partitioning with shapes/types without inference
    print('-------------------------------')
    print(
        'Testing %s Gluon Hybridize partitioning with shapes/types without inference'
        % backend)
    inputs = [a, b]
    sym_block2 = nn.SymbolBlock(sym, inputs)
    sym_block2.initialize()
    sym_block2.optimize_for(mx.nd.ones((3, 2)),
                            mx.nd.ones((3, 2)),
                            backend=backend)
    sym_block2.export('partitioned')

    # Test with additional input to subgraph op
    print('-------------------------------')
    print('Testing %s Gluon Hybridize partitioning with extra input' % backend)
    sym_block2.optimize_for(mx.nd.ones((3, 2)),
                            mx.nd.ones((3, 2)),
                            backend="addInputPass")
    out3 = sym_block2(mx.nd.ones((3, 2)), mx.nd.ones((3, 2)))
    print(out3)

    ###############################################
    # Test with subgraph directly consuming params
    ###############################################
    args = {'a': mx.nd.ones((3, 2))}
    #execute in MXNet
    print('-------------------------------')
    print('Testing regular MXNet execution')
    inputs = [a]
    sym2_block = nn.SymbolBlock(sym2, inputs)
    sym2_block.initialize()
    out5 = sym2_block(mx.nd.ones((3, 2)))
    print(out5)

    # Gluon optimize_for partitioning with shapes/types
    print('-------------------------------')
    print('Testing %s Gluon optimize_for partitioning with shapes/types' %
          backend)
    inputs = [a]
    sym2_block = nn.SymbolBlock(sym2, inputs)
    sym2_block.initialize()
    sym2_block.optimize_for(mx.nd.ones((3, 2)), backend=backend)
    out8 = sym2_block(mx.nd.ones((3, 2)))
    print(out8)
def test_model(pass_name):
    args = {'a': mx.nd.ones((3, 2)), 'b': mx.nd.ones((3, 2))}
    # execute in MXNet
    print('-------------------------------')
    print('Testing regular MXNet execution')
    exe = sym.bind(ctx=mx.cpu(), args=args)
    out = exe.forward()
    print(out)

    # Symbol optimize_for
    # with propogating shapes/types
    print('-------------------------------')
    print('Testing pass "%s" with shapes/types' % pass_name)
    aux = {}
    mysym2 = sym.optimize_for(pass_name, args, aux)
    print(mysym2.tojson())
    exe2 = mysym2.bind(ctx=mx.cpu(), args=args)
    out2 = exe2.forward()
    print(out2)

    # without propogating shapes/types
    print('-------------------------------')
    print('Testing pass "%s" without shapes/types' % pass_name)
    mysym3 = sym.optimize_for(pass_name, myOpt='yello')
    exe3 = mysym3.bind(ctx=mx.cpu(), args=args)
    out3 = exe3.forward()
    print(out3)

    # Gluon Hybridize
    print('-------------------------------')
    print('Testing pass "%s" Gluon Hybridize with shapes/types' % pass_name)
    inputs = [a, b]
    sym_block = nn.SymbolBlock(sym, inputs)
    sym_block.initialize()
    sym_block.hybridize(backend=pass_name)
    out4 = sym_block(mx.nd.ones((3, 2)), mx.nd.ones((3, 2)))
    print(out4)

    # Gluon optimize_for
    print('-------------------------------')
    print(
        'Testing pass "%s" Gluon Hybridize with shapes/types without inference'
        % pass_name)
    inputs = [a, b]
    sym_block2 = nn.SymbolBlock(sym, inputs)
    sym_block2.initialize()
    sym_block2.optimize_for(mx.nd.ones((3, 2)),
                            mx.nd.ones((3, 2)),
                            backend=pass_name)
    sym_block2.export('modified')
示例#3
0
def check_subgraph_exe9(sym, subgraph_backend, op_names):
    """Call hybridize() to partition the graph, and then compare results of the partitioned
    sym and the original sym. Here do an inference before hybridizing with the subgraph_backend
    which means we'll pass shapes/types"""
    # create Gluon block for given symbol
    inputs = [mx.sym.var(i, dtype=mx_real_t) for i in sym[1]]
    sym_block = nn.SymbolBlock(sym[0], inputs)
    sym_block.initialize(ctx=mx.current_context())
    x = [
        mx.nd.random.uniform(shape=s, ctx=mx.current_context()) for s in sym[2]
    ]
    # hybridize and export to get baseline
    sym_block.hybridize()
    outputs1 = sym_block(*x)
    sym_block.export('check_subgraph_exe9')

    # load model and partition
    sym_block = nn.SymbolBlock.imports('check_subgraph_exe9-symbol.json',
                                       sym[1],
                                       'check_subgraph_exe9-0000.params',
                                       ctx=mx.current_context())
    check_call(
        _LIB.MXSetSubgraphPropertyOpNamesV2(c_str(subgraph_backend),
                                            mx_uint(len(op_names)),
                                            c_str_array(op_names)))
    sym_block.hybridize(backend=subgraph_backend)
    outputs2 = sym_block(*x)
    check_call(_LIB.MXRemoveSubgraphPropertyOpNamesV2(c_str(subgraph_backend)))

    # compare outputs
    assert len(outputs1) == len(outputs2)
    for i in range(len(outputs1)):
        assert_almost_equal((outputs1[i] - outputs2[i]).abs().sum().asnumpy(),
                            np.zeros(shape=(1, )))
示例#4
0
def test_model(pass_name):
    args={'a':mx.nd.ones((3,2)), 'b':mx.nd.ones((3,2))}
    # execute in MXNet
    print('-------------------------------')
    print('Testing regular MXNet execution')
    inputs = [a,b]
    sym_block = nn.SymbolBlock(sym, inputs)
    sym_block.initialize()
    out = sym_block(mx.nd.ones((3,2)),mx.nd.ones((3,2)))
    print(out)

    # Gluon optimize_for
    print('-------------------------------')
    print('Testing pass "%s" Gluon Hybridize with shapes/types without inference' % pass_name)
    inputs = [a,b]
    sym_block2 = nn.SymbolBlock(sym, inputs)
    sym_block2.initialize()
    sym_block2.optimize_for(mx.nd.ones((3,2)), mx.nd.ones((3,2)), backend=pass_name)
    sym_block2.export('modified')
示例#5
0
def test_yxnet_mnist():
    mnist_sym = make_mnist_graph()

    inputs_ext = {
        'data': {
            'shape': (1, 1, 28, 28),
            'precision': 8,
        }
    }
    in_shape = (1, 1, 28, 28)
    arg_shapes, _, aux_shapes = mnist_sym.infer_shape(data=in_shape)
    args, auxs = mnist_sym.list_arguments(), mnist_sym.list_auxiliary_states()
    infer_shapes = {args[i]: arg_shapes[i] for i in range(len(args))}
    infer_shapes.update({auxs[i]: aux_shapes[i] for i in range(len(auxs))})

    root = "/home/serving/warehouse"
    _, bd = load_parameters(
        mnist_sym, infer_shapes,
        root + "/ca3d0286d5758697cdef653c1375960a868ac08a/data/params")
    mnist_sym, bd = spass.mx_set_precs(mnist_sym, bd, inputs_ext)

    dump_sym, dump_par = '/tmp/mnist_yxnet.symbol', '/tmp/mnist_yxnet.params'
    with open(dump_sym, 'w') as fout:
        fout.write(mnist_sym.tojson())
    nd.save(dump_par, bd)

    inputs = [mx.sym.var('data')]
    data = np.load(root + '/ba9fedfc87ccb6064fcd437fd2287f5edef1bd84/data')
    data = nd.array([data.astype(np.int8)])

    if False:
        graph = nn.SymbolBlock(mnist_sym, inputs)
        utils.load_parameters(graph, bd)
        res = graph.forward(data).astype('int32')
    else:
        prefix = "/tmp/yxnet/mnist"
        dump_sym, dump_params = prefix + ".json", prefix + ".params"
        print(sutils.sym_collect_attr(mnist_sym))
        spass.mxnet_to_nnvm(mnist_sym, bd, {'data': {
            'shape': (1, 1, 28, 28)
        }}, dump_sym, dump_params)
        exit()
    print(res.asnumpy().flatten()[:100])
示例#6
0
def test_subgraph():
    # possible places to find library file
    if (os.name=='posix'):
        lib = 'libsubgraph_lib.so'
        if os.path.exists(lib):
            # plain make build, when run in the CI
            fname = lib
        elif os.path.exists(os.path.join(base_path, 'build/'+lib)):
            # plain cmake build when run in the CI
            fname = os.path.join(base_path, 'build/'+lib)
        else:
            raise MXNetError("library %s not found " % lib)
    elif (os.name=='nt'):
        lib = 'libsubgraph_lib.dll'
        if os.path.exists('windows_package\\lib\\'+lib):
            # plain make build, when run in the CI
            fname = 'windows_package\\lib\\'+lib
        else:
            # plain cmake build when run in the CI
            raise MXNetError("library %s not found " % lib)

    fname = os.path.abspath(fname)
    mx.library.load(fname)

    # test simple graph with add, exp and log operators, library supports exp/log
    a = mx.sym.var('a')
    b = mx.sym.var('b')
    c = a + b
    d = mx.sym.exp(c)
    sym = mx.sym.log(d)

    args = {'a':mx.nd.ones((3,2),ctx=mx.cpu()), 'b':mx.nd.ones((3,2),ctx=mx.cpu())}

    # baseline - regular execution in MXNet
    exe = sym._bind(ctx=mx.cpu(), args=args)
    out = exe.forward()

    # without propogating shapes/types, passing a custom option to subgraph prop "myOpt"
    # should not create subgraph since subgraph prop requires type info
    mysym1 = sym.optimize_for("myProp", myOpt='yello')
    exe1 = mysym1._bind(ctx=mx.cpu(), args=args)
    out1 = exe1.forward()
    # check that result matches one executed by MXNet
    assert_almost_equal(out[0].asnumpy(), out1[0].asnumpy(), rtol=1e-3, atol=1e-3)

    # with propogating shapes/types, rejecting subgraph
    # this tests creating the subgraph and having the subgraph prop reject it
    mysym2 = sym.optimize_for("myProp", args, reject=True)
    exe2 = mysym2._bind(ctx=mx.cpu(), args=args)
    out2 = exe2.forward()
    # check that result matches one executed by MXNet
    assert_almost_equal(out[0].asnumpy(), out2[0].asnumpy(), rtol=1e-3, atol=1e-3)

    # with propogating shapes/types
    mysym3 = sym.optimize_for("myProp",args)
    exe3 = mysym3._bind(ctx=mx.cpu(), args=args)
    out3 = exe3.forward()
    # check that result matches one executed by MXNet
    assert_almost_equal(out[0].asnumpy(), out3[0].asnumpy(), rtol=1e-3, atol=1e-3)

    # Gluon Hybridize partitioning with shapes/types
    sym_block = nn.SymbolBlock(sym, [a,b])
    sym_block.initialize()
    sym_block.optimize_for(mx.nd.ones((3,2)),mx.nd.ones((3,2)),backend='myProp')
    out4 = sym_block(mx.nd.ones((3,2)),mx.nd.ones((3,2)))
    # check that result matches one executed by MXNet
    assert_almost_equal(out[0].asnumpy(), out4[0].asnumpy(), rtol=1e-3, atol=1e-3)

    # Gluon Hybridize partitioning with sym.var
    sym_block2 = nn.SymbolBlock(sym, [a,b])
    sym_block2.initialize()
    a_var = mx.sym.var('a',shape=(3,2))
    b_var = mx.sym.var('b',shape=(3,2))
    sym_block2.optimize_for(a_var, b_var, backend='myProp')

    # Gluon Hybridize partitioning with shapes/types
    sym_block3 = nn.SymbolBlock(sym, [a,b])
    sym_block3.initialize()
    a_data = mx.nd.ones((3,2))
    b_data = mx.nd.ones((3,2))
    sym_block3.optimize_for(a_data, b_data, backend='myProp')
    sym_filename, params_filename = sym_block3.export('optimized')
    assert sym_filename == 'optimized-symbol.json'
    assert params_filename is None
    sym_block4 = nn.SymbolBlock.imports(sym_filename, ['a','b'], params_filename)

    out5 = sym_block4(a_data, b_data)
    # check that result matches one executed by MXNet
    assert_almost_equal(out[0].asnumpy(), out5[0].asnumpy(), rtol=1e-3, atol=1e-3)
示例#7
0
print('-------------------------------')
print('Testing partitioning without shapes/types')
mysym3 = sym.optimize_for("myProp", myOpt='yello')
exe3 = mysym3.bind(ctx=mx.cpu(),
                   args={
                       'a': mx.nd.ones((3, 2)),
                       'b': mx.nd.ones((3, 2))
                   })
out3 = exe3.forward()
print(out3)

# Gluon Hybridize partitioning with shapes/types
print('-------------------------------')
print('Testing Gluon Hybridize partitioning with shapes/types')
inputs = [a, b]
sym_block = nn.SymbolBlock(sym, inputs)
sym_block.initialize()
sym_block.hybridize(backend='myProp')
out4 = sym_block(mx.nd.ones((3, 2)), mx.nd.ones((3, 2)))
print(out4)

# Gluon Hybridize partitioning with shapes/types without inference
print('-------------------------------')
print(
    'Testing Gluon Hybridize partitioning with shapes/types without inference')
inputs = [a, b]
sym_block2 = nn.SymbolBlock(sym, inputs)
sym_block2.initialize()
sym_block2.optimize_for(mx.nd.ones((3, 2)),
                        mx.nd.ones((3, 2)),
                        backend='myProp')
示例#8
0
def get_net(_ctx, json, params):
    inputs = mx.sym.var('data', dtype='float32')
    net = gnn.SymbolBlock(mx.sym.load(json)['fc_pyr_fwd_output'], inputs)
    net.load_parameters(params, ctx=_ctx)
    net.hybridize()
    return net
示例#9
0
def test(backend):
    ###############################################
    # Test with subgraph not consuming params
    ###############################################
    #execute in MXNet
    print('-------------------------------')
    print('Testing regular MXNet execution')
    exe = sym.bind(ctx=mx.cpu(),
                   args={
                       'a': mx.nd.ones((3, 2)),
                       'b': mx.nd.ones((3, 2))
                   })
    out = exe.forward()
    print(out)

    # with propogating shapes/types
    print('-------------------------------')
    print('Testing %s partitioning with shapes/types' % backend)
    arg_array = [
        mx.nd.ones((3, 2), dtype='float32'),
        mx.nd.ones((3, 2), dtype='float32')
    ]
    mysym2 = sym.optimize_for(backend, arg_array)
    print(mysym2.tojson())
    exe2 = mysym2.bind(ctx=mx.cpu(),
                       args={
                           'a': mx.nd.ones((3, 2)),
                           'b': mx.nd.ones((3, 2))
                       })
    out2 = exe2.forward()
    print(out2)

    # with propogating shapes/types, rejecting subgraph
    print('-------------------------------')
    print('Testing %s partitioning with shapes/types - rejecting subgraph' %
          backend)
    arg_array = [
        mx.nd.ones((3, 2), dtype='float32'),
        mx.nd.ones((3, 2), dtype='float32')
    ]
    mysym2 = sym.optimize_for(backend, arg_array, reject=True)
    exe2 = mysym2.bind(ctx=mx.cpu(),
                       args={
                           'a': mx.nd.ones((3, 2)),
                           'b': mx.nd.ones((3, 2))
                       })
    out2 = exe2.forward()
    print(out2)

    # without propogating shapes/types
    print('-------------------------------')
    print('Testing %s partitioning without shapes/types' % backend)
    mysym3 = sym.optimize_for(backend, myOpt='yello')
    exe3 = mysym3.bind(ctx=mx.cpu(),
                       args={
                           'a': mx.nd.ones((3, 2)),
                           'b': mx.nd.ones((3, 2))
                       })
    out3 = exe3.forward()
    print(out3)

    # Gluon Hybridize partitioning with shapes/types
    print('-------------------------------')
    print('Testing %s Gluon Hybridize partitioning with shapes/types' %
          backend)
    inputs = [a, b]
    sym_block = nn.SymbolBlock(sym, inputs)
    sym_block.initialize()
    sym_block.hybridize(backend=backend)
    out4 = sym_block(mx.nd.ones((3, 2)), mx.nd.ones((3, 2)))
    print(out4)

    # Gluon Hybridize partitioning with shapes/types without inference
    print('-------------------------------')
    print(
        'Testing %s Gluon Hybridize partitioning with shapes/types without inference'
        % backend)
    inputs = [a, b]
    sym_block2 = nn.SymbolBlock(sym, inputs)
    sym_block2.initialize()
    sym_block2.optimize_for(mx.nd.ones((3, 2)),
                            mx.nd.ones((3, 2)),
                            backend=backend)
    sym_block2.export('partitioned')

    ###############################################
    # Test with subgraph directly consuming params
    ###############################################
    #execute in MXNet
    print('-------------------------------')
    print('Testing regular MXNet execution')
    exe5 = sym2.bind(ctx=mx.cpu(), args={'a': mx.nd.ones((3, 2))})
    out5 = exe5.forward()
    print(out5)

    # with propogating shapes/types
    print('-------------------------------')
    print('Testing %s partitioning with shapes/types' % backend)
    arg_array = [mx.nd.ones((3, 2), dtype='float32')]
    mysym6 = sym2.optimize_for(backend, arg_array, reqArgs=True)
    print(mysym6.tojson())
    exe6 = mysym6.bind(ctx=mx.cpu(), args={'a': mx.nd.ones((3, 2))})
    out6 = exe6.forward()
    print(out6)

    # without propogating shapes/types
    print('-------------------------------')
    print('Testing %s partitioning without shapes/types' % backend)
    mysym7 = sym2.optimize_for(backend, reqArgs=True)
    exe7 = mysym7.bind(ctx=mx.cpu(), args={'a': mx.nd.ones((3, 2))})
    out7 = exe7.forward()
    print(out7)

    # Gluon Hybridize partitioning with shapes/types
    print('-------------------------------')
    print('Testing %s Gluon Hybridize partitioning with shapes/types' %
          backend)
    inputs = [a]
    sym2_block = nn.SymbolBlock(sym2, inputs)
    sym2_block.initialize()
    sym2_block.hybridize(backend=backend)
    out8 = sym2_block(mx.nd.ones((3, 2)))
    print(out8)