Ejemplo n.º 1
0
def test_subgraph_backend_gluon_ext1(tmpdir):
    def get_net():
        net = nn.HybridSequential()  # Here we use the class HybridSequential.
        net.add(nn.Dense(256, activation='relu'),
                nn.Dense(128, activation='relu'), nn.Dense(2))
        return net

    # regular inference
    x = mx.np.random.normal(size=(1, 512), ctx=mx.current_context())
    net = get_net()
    net.initialize(ctx=mx.current_context())
    outputs1 = net(x)
    param_path = os.path.join(str(tmpdir),
                              'test_subgraph_backend_gluon_ext1.params')
    net.save_parameters(param_path)

    # after partitioning
    net = get_net()
    net.load_parameters(param_path, ctx=mx.current_context())
    subgraph_backend = 'default'
    op_names = ['FullyConnected']
    check_call(
        _LIB.MXSetSubgraphPropertyOpNamesV2(c_str(subgraph_backend),
                                            mx_uint(len(op_names)),
                                            c_str_array(op_names)))
    net.optimize_for(x, backend=subgraph_backend)
    outputs2 = net(x)
    check_call(_LIB.MXRemoveSubgraphPropertyOpNamesV2(c_str(subgraph_backend)))

    # compare outputs
    assert len(outputs1) == len(outputs2)
    for i in range(len(outputs1)):
        assert_almost_equal(
            mx.np.abs((outputs1[i] - outputs2[i])).sum().asnumpy(),
            onp.zeros(shape=(1, )))
Ejemplo n.º 2
0
def test_subgraph_exe7(sym, subgraph_backend, op_names):
    """Call optimize_for to trigger graph partitioning without infer shapes/types before,
    then bind and compare results of the partitioned sym and the original sym."""
    # bind
    sym, _, _ = sym
    arg_shapes, _, aux_shapes = sym.infer_shape()
    arg_array = [mx.nd.random.uniform(shape=shape) for shape in arg_shapes]
    aux_array = [mx.nd.random.uniform(shape=shape) for shape in aux_shapes]
    exe1 = sym._bind(ctx=mx.current_context(),
                     args=arg_array,
                     aux_states=aux_array,
                     grad_req='null')
    exe1.forward()

    # partition before bind
    check_call(
        _LIB.MXSetSubgraphPropertyOpNamesV2(c_str(subgraph_backend),
                                            mx_uint(len(op_names)),
                                            c_str_array(op_names)))
    part_sym = sym.optimize_for(subgraph_backend)
    check_call(_LIB.MXRemoveSubgraphPropertyOpNamesV2(c_str(subgraph_backend)))

    exe2 = part_sym._bind(ctx=mx.current_context(),
                          args=arg_array,
                          aux_states=aux_array,
                          grad_req='null')
    exe2.forward()

    # compare outputs
    outputs1 = exe1.outputs
    outputs2 = exe2.outputs
    assert len(outputs1) == len(outputs2)
    for i in range(len(outputs1)):
        assert_almost_equal((outputs1[i] - outputs2[i]).abs().sum().asnumpy(),
                            onp.zeros(shape=(1, )))
Ejemplo n.º 3
0
def test_subgraph_exe6(sym, subgraph_backend, op_names):
    """Call optimize_for to trigger graph partitioning with shapes/types, then _simple_bind
    and compare results of the partitioned sym and the original sym."""
    # _simple_bind
    sym, _, _ = sym
    exe1 = sym._simple_bind(ctx=mx.current_context(), grad_req='null')
    input_names = sym.list_inputs()
    set_random_inputs(exe1, input_names)
    exe1.forward()

    # infer shape/type before partition before _simple_bind
    check_call(
        _LIB.MXSetSubgraphPropertyOpNamesV2(c_str(subgraph_backend),
                                            mx_uint(len(op_names)),
                                            c_str_array(op_names)))
    part_sym = sym.optimize_for(subgraph_backend, exe1.arg_dict, exe1.aux_dict)
    check_call(_LIB.MXRemoveSubgraphPropertyOpNamesV2(c_str(subgraph_backend)))

    exe2 = part_sym._simple_bind(ctx=mx.current_context(), grad_req='null')
    copy_inputs_between_executors(exe1, exe2, input_names)
    exe2.forward()

    # compare outputs
    outputs1 = exe1.outputs
    outputs2 = exe2.outputs
    assert len(outputs1) == len(outputs2)
    for i in range(len(outputs1)):
        assert_almost_equal((outputs1[i] - outputs2[i]).abs().sum().asnumpy(),
                            onp.zeros(shape=(1, )))
Ejemplo n.º 4
0
def check_subgraph_exe9(sym, subgraph_backend, op_names):
    """Call hybridize() to partition the graph, and then compare results of the partitioned
    sym and the original sym. Here do an inference before hybridizing with the subgraph_backend
    which means we'll pass shapes/types"""
    # create Gluon block for given symbol
    inputs = [mx.sym.var(i, dtype=mx_real_t) for i in sym[1]]
    sym_block = nn.SymbolBlock(sym[0], inputs)
    sym_block.initialize(ctx=mx.current_context())
    x = [
        mx.nd.random.uniform(shape=s, ctx=mx.current_context()) for s in sym[2]
    ]
    # hybridize and export to get baseline
    sym_block.hybridize()
    outputs1 = sym_block(*x)
    sym_block.export('check_subgraph_exe9')

    # load model and partition
    sym_block = nn.SymbolBlock.imports('check_subgraph_exe9-symbol.json',
                                       sym[1],
                                       'check_subgraph_exe9-0000.params',
                                       ctx=mx.current_context())
    check_call(
        _LIB.MXSetSubgraphPropertyOpNamesV2(c_str(subgraph_backend),
                                            mx_uint(len(op_names)),
                                            c_str_array(op_names)))
    sym_block.hybridize(backend=subgraph_backend)
    outputs2 = sym_block(*x)
    check_call(_LIB.MXRemoveSubgraphPropertyOpNamesV2(c_str(subgraph_backend)))

    # compare outputs
    assert len(outputs1) == len(outputs2)
    for i in range(len(outputs1)):
        assert_almost_equal((outputs1[i] - outputs2[i]).abs().sum().asnumpy(),
                            np.zeros(shape=(1, )))
Ejemplo n.º 5
0
def test_subgraph_exe8(sym, subgraph_backend, op_names):
    """Call optimize_for to infer shapes, types and dtypes followed by graph partitioning,
    then bind and compare results of the partitioned sym and the original sym."""
    # bind
    sym, _, _ = sym
    arg_shapes, _, aux_shapes = sym.infer_shape()
    arg_names = sym.list_arguments()
    aux_names = sym.list_auxiliary_states()
    arg_dict = {name:mx.nd.random.uniform(shape=shape) for name,shape in zip(arg_names,arg_shapes)}
    aux_dict = {name:mx.nd.random.uniform(shape=shape) for name,shape in zip(aux_names,aux_shapes)}
    exe1 = sym.bind(ctx=mx.current_context(), args=arg_dict, aux_states=aux_dict, grad_req='null')
    exe1.forward()

    # infer shape/type before partition before bind
    check_call(_LIB.MXSetSubgraphPropertyOpNamesV2(c_str(subgraph_backend), mx_uint(len(op_names)),
                                                   c_str_array(op_names)))
    part_sym = sym.optimize_for(subgraph_backend, arg_dict, aux_dict)
    check_call(_LIB.MXRemoveSubgraphPropertyOpNamesV2(c_str(subgraph_backend)))

    exe2 = part_sym.bind(ctx=mx.current_context(), args=arg_dict, aux_states=aux_dict, grad_req='null')
    exe2.forward()

    # compare outputs
    outputs1 = exe1.outputs
    outputs2 = exe2.outputs
    assert len(outputs1) == len(outputs2)
    for i in range(len(outputs1)):
        assert_almost_equal((outputs1[i] - outputs2[i]).abs().sum().asnumpy(), np.zeros(shape=(1,)))
Ejemplo n.º 6
0
def test_subgraph_backend_gluon_ext2(tmpdir):
    class Net(gluon.HybridBlock):
        def __init__(self, **kwargs):
            super(Net, self).__init__(**kwargs)
            self.fc1 = nn.Dense(256)
            self.fc2 = nn.Dense(128)
            self.fc3 = nn.Dense(2)

        def forward(self, x):
            x = npx.relu(self.fc1(x))
            x = npx.relu(self.fc2(x))
            return self.fc3(x)

    # regular inference
    x = mx.np.random.normal(size=(1, 512), ctx=mx.current_context())
    net = Net()
    net.initialize(ctx=mx.current_context())
    outputs1 = net(x)
    param_path = os.path.join(str(tmpdir),
                              'test_subgraph_backend_gluon_ext2.params')
    net.save_parameters(param_path)

    # after partitioning
    net = Net()
    net.load_parameters(param_path, ctx=mx.current_context())
    subgraph_backend = 'default'
    op_names = ['FullyConnected']
    check_call(
        _LIB.MXSetSubgraphPropertyOpNamesV2(c_str(subgraph_backend),
                                            mx_uint(len(op_names)),
                                            c_str_array(op_names)))
    net.optimize_for(x, backend=subgraph_backend)
    outputs2 = net(x)
    check_call(_LIB.MXRemoveSubgraphPropertyOpNamesV2(c_str(subgraph_backend)))

    # compare outputs
    assert len(outputs1) == len(outputs2)
    for i in range(len(outputs1)):
        assert_almost_equal(
            mx.np.abs(outputs1[i] - outputs2[i]).sum().asnumpy(),
            onp.zeros(shape=(1, )))
Ejemplo n.º 7
0
def test_subgraph_backend_gluon_ext2():
    class Net(gluon.HybridBlock):
        def __init__(self, **kwargs):
            super(Net, self).__init__(**kwargs)
            with self.name_scope():
                self.fc1 = nn.Dense(256)
                self.fc2 = nn.Dense(128)
                self.fc3 = nn.Dense(2)

        def hybrid_forward(self, F, x):
            x = F.relu(self.fc1(x))
            x = F.relu(self.fc2(x))
            return self.fc3(x)

    # regular inference
    x = nd.random.normal(shape=(1, 512), ctx=mx.current_context())
    net = Net()
    net.collect_params().initialize(ctx=mx.current_context())
    outputs1 = net(x)
    net.save_parameters('test_subgraph_backend_gluon_ext2.params')

    # after partitioning
    net = Net()
    net.load_parameters('test_subgraph_backend_gluon_ext2.params',
                        ctx=mx.current_context())
    subgraph_backend = 'default'
    op_names = ['FullyConnected']
    check_call(
        _LIB.MXSetSubgraphPropertyOpNamesV2(c_str(subgraph_backend),
                                            mx_uint(len(op_names)),
                                            c_str_array(op_names)))
    net.hybridize(backend=subgraph_backend)
    outputs2 = net(x)
    check_call(_LIB.MXRemoveSubgraphPropertyOpNamesV2(c_str(subgraph_backend)))

    # compare outputs
    assert len(outputs1) == len(outputs2)
    for i in range(len(outputs1)):
        assert_almost_equal((outputs1[i] - outputs2[i]).abs().sum().asnumpy(),
                            np.zeros(shape=(1, )))