예제 #1
0
def test_subgraph_exe2(sym, subgraph_backend, op_names):
    def get_executor(sym,
                     subgraph_backend=None,
                     op_names=None,
                     original_exec=None):
        exe = sym._simple_bind(ctx=mx.current_context(), grad_req='null')
        input_names = sym.list_inputs()
        for name in input_names:
            if name in exe.arg_dict:
                exe.arg_dict[name][:] = mx.nd.random.uniform(shape=exe.arg_dict[name].shape)\
                    if original_exec is None else original_exec.arg_dict[name]
            else:
                assert name in exe.aux_dict
                exe.aux_dict[name][:] = mx.nd.random.uniform(shape=exe.aux_dict[name].shape)\
                    if original_exec is None else original_exec.aux_dict[name]
        exe.forward()
        return exe

    sym, _, _ = sym
    original_exec = get_executor(sym)
    check_call(
        _LIB.MXSetSubgraphPropertyOpNames(c_str(subgraph_backend),
                                          mx_uint(len(op_names)),
                                          c_str_array(op_names)))
    partitioned_exec = get_executor(sym, subgraph_backend, op_names,
                                    original_exec)
    check_call(_LIB.MXRemoveSubgraphPropertyOpNames(c_str(subgraph_backend)))
    outputs1 = original_exec.outputs
    outputs2 = partitioned_exec.outputs
    assert len(outputs1) == len(outputs2)
    for i in range(len(outputs1)):
        assert_almost_equal((outputs1[i] - outputs2[i]).abs().sum().asnumpy(),
                            onp.zeros(shape=(1, )))
예제 #2
0
def check_subgraph_exe9(sym, subgraph_backend, op_names):
    """Call hybridize() to partition the graph, and then compare results of the partitioned
    sym and the original sym. Here do an inference before hybridizing with the subgraph_backend
    which means we'll pass shapes/types"""
    # create Gluon block for given symbol
    inputs = [mx.sym.var(i, dtype=mx_real_t) for i in sym[1]]
    sym_block = nn.SymbolBlock(sym[0], inputs)
    sym_block.initialize(ctx=mx.current_context())
    x = [
        mx.nd.random.uniform(shape=s, ctx=mx.current_context()) for s in sym[2]
    ]
    # hybridize and export to get baseline
    sym_block.hybridize()
    outputs1 = sym_block(*x)
    sym_block.export('check_subgraph_exe9')

    # load model and partition
    sym_block = nn.SymbolBlock.imports('check_subgraph_exe9-symbol.json',
                                       sym[1],
                                       'check_subgraph_exe9-0000.params',
                                       ctx=mx.current_context())
    check_call(
        _LIB.MXSetSubgraphPropertyOpNamesV2(c_str(subgraph_backend),
                                            mx_uint(len(op_names)),
                                            c_str_array(op_names)))
    sym_block.hybridize(backend=subgraph_backend)
    outputs2 = sym_block(*x)
    check_call(_LIB.MXRemoveSubgraphPropertyOpNamesV2(c_str(subgraph_backend)))

    # compare outputs
    assert len(outputs1) == len(outputs2)
    for i in range(len(outputs1)):
        assert_almost_equal((outputs1[i] - outputs2[i]).abs().sum().asnumpy(),
                            np.zeros(shape=(1, )))
예제 #3
0
    def _check_subgraph_exe3(sym, subgraph_backend, op_names):
        """Use the partitioned sym to bind an executor and compare the outputs
        with those of the original executor"""
        out = SymbolHandle()
        check_call(_LIB.MXBuildSubgraphByOpNames(sym.handle, c_str(subgraph_backend), mx_uint(len(op_names)),
                                                  c_str_array(op_names), ctypes.byref(out)))

        partitioned_sym = Symbol(out)
        input_names = sym.list_inputs()
        arg_names = sym.list_arguments()
        aux_names = sym.list_auxiliary_states()
        assert partitioned_sym.list_inputs() == input_names
        assert partitioned_sym.list_arguments() == arg_names
        assert partitioned_sym.list_auxiliary_states() == aux_names
        arg_shapes, _, aux_shapes = sym.infer_shape()
        arg_array = [mx.nd.random.uniform(shape=shape) for shape in arg_shapes]
        aux_array = [mx.nd.random.uniform(shape=shape) for shape in aux_shapes]
        exe = sym.bind(ctx=mx.current_context(), args=arg_array, aux_states=aux_array, grad_req='null')
        partitioned_exe = partitioned_sym.bind(ctx=mx.current_context(), args=arg_array,
                                               aux_states=aux_array, grad_req='null')
        exe.forward()
        partitioned_exe.forward()
        assert len(exe.outputs) == len(partitioned_exe.outputs)
        for i in range(len(exe.outputs)):
            assert_almost_equal((exe.outputs[i] - partitioned_exe.outputs[i]).abs().sum().asnumpy(),
                                np.zeros(shape=(1,)))
예제 #4
0
    def _check_subgraph_exe1(sym, subgraph_backend, op_names):
        """Use the partitioned sym to simple_bind an executor and compare the outputs
        with those of the original executor"""
        out = SymbolHandle()
        check_call(_LIB.MXBuildSubgraphByOpNames(sym.handle, c_str(subgraph_backend), mx_uint(len(op_names)),
                                                  c_str_array(op_names), ctypes.byref(out)))

        partitioned_sym = Symbol(out)
        assert partitioned_sym.list_inputs() == sym.list_inputs()
        assert partitioned_sym.list_arguments() == sym.list_arguments()
        assert partitioned_sym.list_auxiliary_states() == sym.list_auxiliary_states()
        exe = sym.simple_bind(ctx=mx.current_context(), grad_req='null')
        partitioned_exe = partitioned_sym.simple_bind(ctx=mx.current_context(), grad_req='null')
        input_names = sym.list_inputs()
        for name in input_names:
            if name in exe.arg_dict:
                exe.arg_dict[name][:] = mx.nd.random.uniform(shape=exe.arg_dict[name].shape)
                partitioned_exe.arg_dict[name][:] = exe.arg_dict[name]
            else:
                assert name in exe.aux_dict
                exe.aux_dict[name][:] = mx.nd.random.uniform(shape=exe.aux_dict[name].shape)
                partitioned_exe.aux_dict[name][:] = exe.aux_dict[name]
        exe.forward()
        partitioned_exe.forward()
        assert len(exe.outputs) == len(partitioned_exe.outputs)
        for i in range(len(exe.outputs)):
            assert_almost_equal((exe.outputs[i] - partitioned_exe.outputs[i]).abs().sum().asnumpy(),
                                np.zeros(shape=(1,)))
예제 #5
0
def test_subgraph_exe8(sym, subgraph_backend, op_names):
    """Call optimize_for to infer shapes, types and dtypes followed by graph partitioning,
    then bind and compare results of the partitioned sym and the original sym."""
    # bind
    sym, _, _ = sym
    arg_shapes, _, aux_shapes = sym.infer_shape()
    arg_names = sym.list_arguments()
    aux_names = sym.list_auxiliary_states()
    arg_dict = {name:mx.nd.random.uniform(shape=shape) for name,shape in zip(arg_names,arg_shapes)}
    aux_dict = {name:mx.nd.random.uniform(shape=shape) for name,shape in zip(aux_names,aux_shapes)}
    exe1 = sym.bind(ctx=mx.current_context(), args=arg_dict, aux_states=aux_dict, grad_req='null')
    exe1.forward()

    # infer shape/type before partition before bind
    check_call(_LIB.MXSetSubgraphPropertyOpNamesV2(c_str(subgraph_backend), mx_uint(len(op_names)),
                                                   c_str_array(op_names)))
    part_sym = sym.optimize_for(subgraph_backend, arg_dict, aux_dict)
    check_call(_LIB.MXRemoveSubgraphPropertyOpNamesV2(c_str(subgraph_backend)))

    exe2 = part_sym.bind(ctx=mx.current_context(), args=arg_dict, aux_states=aux_dict, grad_req='null')
    exe2.forward()

    # compare outputs
    outputs1 = exe1.outputs
    outputs2 = exe2.outputs
    assert len(outputs1) == len(outputs2)
    for i in range(len(outputs1)):
        assert_almost_equal((outputs1[i] - outputs2[i]).abs().sum().asnumpy(), np.zeros(shape=(1,)))
예제 #6
0
 def get_executor(sym,
                  subgraph_backend=None,
                  op_names=None,
                  original_exec=None):
     if subgraph_backend is not None:
         os.environ['MXNET_SUBGRAPH_BACKEND'] = subgraph_backend
         check_call(
             _LIB.MXSetSubgraphPropertyOpNames(c_str(subgraph_backend),
                                               mx_uint(len(op_names)),
                                               c_str_array(op_names)))
     exe = sym.simple_bind(ctx=mx.current_context(), grad_req='null')
     input_names = sym.list_inputs()
     for name in input_names:
         if name in exe.arg_dict:
             exe.arg_dict[name][:] = mx.nd.random.uniform(shape=exe.arg_dict[name].shape)\
                 if original_exec is None else original_exec.arg_dict[name]
         else:
             assert name in exe.aux_dict
             exe.aux_dict[name][:] = mx.nd.random.uniform(shape=exe.aux_dict[name].shape)\
                 if original_exec is None else original_exec.aux_dict[name]
     exe.forward()
     if subgraph_backend is not None:
         check_call(
             _LIB.MXRemoveSubgraphPropertyOpNames(c_str(subgraph_backend)))
         del os.environ['MXNET_SUBGRAPH_BACKEND']
     return exe
예제 #7
0
 def get_executor(sym,
                  subgraph_backend=None,
                  op_names=None,
                  original_exec=None):
     if subgraph_backend is not None:
         os.environ['MXNET_SUBGRAPH_BACKEND'] = subgraph_backend
         check_call(
             _LIB.MXSetSubgraphPropertyOpNames(c_str(subgraph_backend),
                                               mx_uint(len(op_names)),
                                               c_str_array(op_names)))
     arg_shapes, _, aux_shapes = sym.infer_shape()
     if subgraph_backend is None:
         arg_array = [
             mx.nd.random.uniform(shape=shape) for shape in arg_shapes
         ]
         aux_array = [
             mx.nd.random.uniform(shape=shape) for shape in aux_shapes
         ]
     else:
         arg_array = None
         aux_array = None
     exe = sym.bind(ctx=mx.current_context(),
                    args=arg_array if subgraph_backend is None else
                    original_exec.arg_arrays,
                    aux_states=aux_array if subgraph_backend is None else
                    original_exec.aux_arrays,
                    grad_req='null')
     exe.forward()
     if subgraph_backend is not None:
         check_call(
             _LIB.MXRemoveSubgraphPropertyOpNames(c_str(subgraph_backend)))
         del os.environ['MXNET_SUBGRAPH_BACKEND']
     return exe
예제 #8
0
def test_subgraph_backend_gluon_ext1(tmpdir):
    def get_net():
        net = nn.HybridSequential()  # Here we use the class HybridSequential.
        net.add(nn.Dense(256, activation='relu'),
                nn.Dense(128, activation='relu'), nn.Dense(2))
        return net

    # regular inference
    x = mx.np.random.normal(size=(1, 512), ctx=mx.current_context())
    net = get_net()
    net.initialize(ctx=mx.current_context())
    outputs1 = net(x)
    param_path = os.path.join(str(tmpdir),
                              'test_subgraph_backend_gluon_ext1.params')
    net.save_parameters(param_path)

    # after partitioning
    net = get_net()
    net.load_parameters(param_path, ctx=mx.current_context())
    subgraph_backend = 'default'
    op_names = ['FullyConnected']
    check_call(
        _LIB.MXSetSubgraphPropertyOpNamesV2(c_str(subgraph_backend),
                                            mx_uint(len(op_names)),
                                            c_str_array(op_names)))
    net.optimize_for(x, backend=subgraph_backend)
    outputs2 = net(x)
    check_call(_LIB.MXRemoveSubgraphPropertyOpNamesV2(c_str(subgraph_backend)))

    # compare outputs
    assert len(outputs1) == len(outputs2)
    for i in range(len(outputs1)):
        assert_almost_equal(
            mx.np.abs((outputs1[i] - outputs2[i])).sum().asnumpy(),
            onp.zeros(shape=(1, )))
예제 #9
0
def test_subgraph_exe7(sym, subgraph_backend, op_names):
    """Call optimize_for to trigger graph partitioning without infer shapes/types before,
    then bind and compare results of the partitioned sym and the original sym."""
    # bind
    sym, _, _ = sym
    arg_shapes, _, aux_shapes = sym.infer_shape()
    arg_array = [mx.nd.random.uniform(shape=shape) for shape in arg_shapes]
    aux_array = [mx.nd.random.uniform(shape=shape) for shape in aux_shapes]
    exe1 = sym._bind(ctx=mx.current_context(),
                     args=arg_array,
                     aux_states=aux_array,
                     grad_req='null')
    exe1.forward()

    # partition before bind
    check_call(
        _LIB.MXSetSubgraphPropertyOpNamesV2(c_str(subgraph_backend),
                                            mx_uint(len(op_names)),
                                            c_str_array(op_names)))
    part_sym = sym.optimize_for(subgraph_backend)
    check_call(_LIB.MXRemoveSubgraphPropertyOpNamesV2(c_str(subgraph_backend)))

    exe2 = part_sym._bind(ctx=mx.current_context(),
                          args=arg_array,
                          aux_states=aux_array,
                          grad_req='null')
    exe2.forward()

    # compare outputs
    outputs1 = exe1.outputs
    outputs2 = exe2.outputs
    assert len(outputs1) == len(outputs2)
    for i in range(len(outputs1)):
        assert_almost_equal((outputs1[i] - outputs2[i]).abs().sum().asnumpy(),
                            onp.zeros(shape=(1, )))
예제 #10
0
def test_subgraph_exe6(sym, subgraph_backend, op_names):
    """Call optimize_for to trigger graph partitioning with shapes/types, then _simple_bind
    and compare results of the partitioned sym and the original sym."""
    # _simple_bind
    sym, _, _ = sym
    exe1 = sym._simple_bind(ctx=mx.current_context(), grad_req='null')
    input_names = sym.list_inputs()
    set_random_inputs(exe1, input_names)
    exe1.forward()

    # infer shape/type before partition before _simple_bind
    check_call(
        _LIB.MXSetSubgraphPropertyOpNamesV2(c_str(subgraph_backend),
                                            mx_uint(len(op_names)),
                                            c_str_array(op_names)))
    part_sym = sym.optimize_for(subgraph_backend, exe1.arg_dict, exe1.aux_dict)
    check_call(_LIB.MXRemoveSubgraphPropertyOpNamesV2(c_str(subgraph_backend)))

    exe2 = part_sym._simple_bind(ctx=mx.current_context(), grad_req='null')
    copy_inputs_between_executors(exe1, exe2, input_names)
    exe2.forward()

    # compare outputs
    outputs1 = exe1.outputs
    outputs2 = exe2.outputs
    assert len(outputs1) == len(outputs2)
    for i in range(len(outputs1)):
        assert_almost_equal((outputs1[i] - outputs2[i]).abs().sum().asnumpy(),
                            onp.zeros(shape=(1, )))
예제 #11
0
def test_subgraph_exe4(sym, subgraph_backend, op_names):
    """Use env var MXNET_SUBGRAPH_BACKEND=default to trigger graph partitioning in bind
    and compare results of the partitioned sym and the original sym."""
    def get_executor(sym,
                     subgraph_backend=None,
                     op_names=None,
                     original_exec=None):
        arg_shapes, _, aux_shapes = sym.infer_shape()
        if subgraph_backend is None:
            arg_array = [
                mx.nd.random.uniform(shape=shape) for shape in arg_shapes
            ]
            aux_array = [
                mx.nd.random.uniform(shape=shape) for shape in aux_shapes
            ]
        else:
            arg_array = None
            aux_array = None
        exe = sym._bind(ctx=mx.current_context(),
                        args=arg_array if subgraph_backend is None else
                        original_exec.arg_arrays,
                        aux_states=aux_array if subgraph_backend is None else
                        original_exec.aux_arrays,
                        grad_req='null')
        exe.forward()
        return exe

    sym, _, _ = sym
    original_exec = get_executor(sym)
    with environment('MXNET_SUBGRAPH_BACKEND', subgraph_backend):
        check_call(
            _LIB.MXSetSubgraphPropertyOpNames(c_str(subgraph_backend),
                                              mx_uint(len(op_names)),
                                              c_str_array(op_names)))
        partitioned_exec = get_executor(sym, subgraph_backend, op_names,
                                        original_exec)
        check_call(
            _LIB.MXRemoveSubgraphPropertyOpNames(c_str(subgraph_backend)))
    outputs1 = original_exec.outputs
    outputs2 = partitioned_exec.outputs
    assert len(outputs1) == len(outputs2)
    for i in range(len(outputs1)):
        assert_almost_equal((outputs1[i] - outputs2[i]).abs().sum().asnumpy(),
                            onp.zeros(shape=(1, )))
예제 #12
0
def _quantize_symbol(sym,
                     excluded_symbols=None,
                     offline_params=None,
                     quantized_dtype='int8'):
    """Given a symbol object representing a neural network of data type FP32,
    quantize it into a INT8 network.
    
    Parameters
    ----------
    sym : Symbol
        FP32 neural network symbol.
    excluded_sym_names : list of strings
        A list of strings representing the names of the symbols that users want to excluding
        from being quantized.
    offline_params : list of strs
        Names of the parameters that users want to quantize offline. It's always recommended to
        quantize parameters offline so that quantizing parameters during the inference can be
        avoided.
    quantized_dtype: str
        The quantized destination type for input data.
    """
    num_excluded_symbols = 0
    if excluded_symbols is not None:
        assert isinstance(excluded_symbols, list)
        num_excluded_symbols = len(excluded_symbols)
    else:
        excluded_symbols = []

    num_offline = 0
    offline = []
    if offline_params is not None:
        num_offline = len(offline_params)
        for k in offline_params:
            offline.append(c_str(k))

    out = SymbolHandle()
    check_call(
        _LIB.MXQuantizeSymbol(sym.handle, ctypes.byref(out),
                              mx_uint(num_excluded_symbols),
                              c_str_array(excluded_symbols),
                              mx_uint(num_offline),
                              c_array(ctypes.c_char_p, offline),
                              c_str(quantized_dtype), ctypes.c_bool(True)))
    return Symbol(out)
예제 #13
0
def calibrate_quantized_sym(qsym, th_dict):
    if th_dict is None or len(th_dict) == 0:
        return qsym
    num_layer_outputs = len(th_dict)
    layer_output_names = []
    min_vals = []
    max_vals = []
    for k, v in th_dict.items():
        layer_output_names.append(k)
        min_vals.append(v[0])
        max_vals.append(v[1])

    calibrated_sym = SymbolHandle()
    check_call(
        _LIB.MXSetCalibTableToQuantizedSymbol(
            qsym.handle, mx_uint(num_layer_outputs),
            c_str_array(layer_output_names), c_array(ctypes.c_float, min_vals),
            c_array(ctypes.c_float, max_vals), ctypes.byref(calibrated_sym)))
    return Symbol(calibrated_sym)
예제 #14
0
 def get_executor(sym, subgraph_backend=None, op_names=None, original_exec=None):
     if subgraph_backend is not None:
         os.environ['MXNET_SUBGRAPH_BACKEND'] = subgraph_backend
         check_call(_LIB.MXSetSubgraphPropertyOpNames(c_str(subgraph_backend), mx_uint(len(op_names)),
                                                      c_str_array(op_names)))
     exe = sym.simple_bind(ctx=mx.current_context(), grad_req='null')
     input_names = sym.list_inputs()
     for name in input_names:
         if name in exe.arg_dict:
             exe.arg_dict[name][:] = mx.nd.random.uniform(shape=exe.arg_dict[name].shape)\
                 if original_exec is None else original_exec.arg_dict[name]
         else:
             assert name in exe.aux_dict
             exe.aux_dict[name][:] = mx.nd.random.uniform(shape=exe.aux_dict[name].shape)\
                 if original_exec is None else original_exec.aux_dict[name]
     exe.forward()
     if subgraph_backend is not None:
         check_call(_LIB.MXRemoveSubgraphPropertyOpNames(c_str(subgraph_backend)))
         del os.environ['MXNET_SUBGRAPH_BACKEND']
     return exe
예제 #15
0
def test_subgraph_backend_gluon_ext2(tmpdir):
    class Net(gluon.HybridBlock):
        def __init__(self, **kwargs):
            super(Net, self).__init__(**kwargs)
            self.fc1 = nn.Dense(256)
            self.fc2 = nn.Dense(128)
            self.fc3 = nn.Dense(2)

        def forward(self, x):
            x = npx.relu(self.fc1(x))
            x = npx.relu(self.fc2(x))
            return self.fc3(x)

    # regular inference
    x = mx.np.random.normal(size=(1, 512), ctx=mx.current_context())
    net = Net()
    net.initialize(ctx=mx.current_context())
    outputs1 = net(x)
    param_path = os.path.join(str(tmpdir),
                              'test_subgraph_backend_gluon_ext2.params')
    net.save_parameters(param_path)

    # after partitioning
    net = Net()
    net.load_parameters(param_path, ctx=mx.current_context())
    subgraph_backend = 'default'
    op_names = ['FullyConnected']
    check_call(
        _LIB.MXSetSubgraphPropertyOpNamesV2(c_str(subgraph_backend),
                                            mx_uint(len(op_names)),
                                            c_str_array(op_names)))
    net.optimize_for(x, backend=subgraph_backend)
    outputs2 = net(x)
    check_call(_LIB.MXRemoveSubgraphPropertyOpNamesV2(c_str(subgraph_backend)))

    # compare outputs
    assert len(outputs1) == len(outputs2)
    for i in range(len(outputs1)):
        assert_almost_equal(
            mx.np.abs(outputs1[i] - outputs2[i]).sum().asnumpy(),
            onp.zeros(shape=(1, )))
예제 #16
0
 def get_executor(sym, subgraph_backend=None, op_names=None, original_exec=None):
     if subgraph_backend is not None:
         os.environ['MXNET_SUBGRAPH_BACKEND'] = subgraph_backend
         check_call(_LIB.MXSetSubgraphPropertyOpNames(c_str(subgraph_backend), mx_uint(len(op_names)),
                                                      c_str_array(op_names)))
     arg_shapes, _, aux_shapes = sym.infer_shape()
     if subgraph_backend is None:
         arg_array = [mx.nd.random.uniform(shape=shape) for shape in arg_shapes]
         aux_array = [mx.nd.random.uniform(shape=shape) for shape in aux_shapes]
     else:
         arg_array = None
         aux_array = None
     exe = sym.bind(ctx=mx.current_context(),
                    args=arg_array if subgraph_backend is None else original_exec.arg_arrays,
                    aux_states=aux_array if subgraph_backend is None else original_exec.aux_arrays,
                    grad_req='null')
     exe.forward()
     if subgraph_backend is not None:
         check_call(_LIB.MXRemoveSubgraphPropertyOpNames(c_str(subgraph_backend)))
         del os.environ['MXNET_SUBGRAPH_BACKEND']
     return exe
예제 #17
0
def test_subgraph_backend_gluon_ext2():
    class Net(gluon.HybridBlock):
        def __init__(self, **kwargs):
            super(Net, self).__init__(**kwargs)
            with self.name_scope():
                self.fc1 = nn.Dense(256)
                self.fc2 = nn.Dense(128)
                self.fc3 = nn.Dense(2)

        def hybrid_forward(self, F, x):
            x = F.relu(self.fc1(x))
            x = F.relu(self.fc2(x))
            return self.fc3(x)

    # regular inference
    x = nd.random.normal(shape=(1, 512), ctx=mx.current_context())
    net = Net()
    net.collect_params().initialize(ctx=mx.current_context())
    outputs1 = net(x)
    net.save_parameters('test_subgraph_backend_gluon_ext2.params')

    # after partitioning
    net = Net()
    net.load_parameters('test_subgraph_backend_gluon_ext2.params',
                        ctx=mx.current_context())
    subgraph_backend = 'default'
    op_names = ['FullyConnected']
    check_call(
        _LIB.MXSetSubgraphPropertyOpNamesV2(c_str(subgraph_backend),
                                            mx_uint(len(op_names)),
                                            c_str_array(op_names)))
    net.hybridize(backend=subgraph_backend)
    outputs2 = net(x)
    check_call(_LIB.MXRemoveSubgraphPropertyOpNamesV2(c_str(subgraph_backend)))

    # compare outputs
    assert len(outputs1) == len(outputs2)
    for i in range(len(outputs1)):
        assert_almost_equal((outputs1[i] - outputs2[i]).abs().sum().asnumpy(),
                            np.zeros(shape=(1, )))
예제 #18
0
def _calibrate_quantized_sym(qsym, th_dict):
    """Given a dictionary containing the thresholds for quantizing the layers,
    set the thresholds into the quantized symbol as the params of requantize operators.
    """
    if th_dict is None or len(th_dict) == 0:
        return qsym
    num_layer_outputs = len(th_dict)
    layer_output_names = []
    min_vals = []
    max_vals = []
    for k, v in th_dict.items():
        layer_output_names.append(k)
        min_vals.append(v[0])
        max_vals.append(v[1])

    calibrated_sym = SymbolHandle()
    check_call(
        _LIB.MXSetCalibTableToQuantizedSymbol(
            qsym.handle, mx_uint(num_layer_outputs),
            c_str_array(layer_output_names), c_array(ctypes.c_float, min_vals),
            c_array(ctypes.c_float, max_vals), ctypes.byref(calibrated_sym)))
    return Symbol(calibrated_sym)
예제 #19
0
def test_subgraph_exe2(sym, subgraph_backend, op_names):
    """Use env var MXNET_SUBGRAPH_BACKEND=default to trigger graph partitioning in _simple_bind
    and compare results of the partitioned sym and the original sym."""
    def get_executor(sym,
                     subgraph_backend=None,
                     op_names=None,
                     original_exec=None):
        exe = sym._simple_bind(ctx=mx.current_context(), grad_req='null')
        input_names = sym.list_inputs()
        for name in input_names:
            if name in exe.arg_dict:
                exe.arg_dict[name][:] = mx.nd.random.uniform(shape=exe.arg_dict[name].shape)\
                    if original_exec is None else original_exec.arg_dict[name]
            else:
                assert name in exe.aux_dict
                exe.aux_dict[name][:] = mx.nd.random.uniform(shape=exe.aux_dict[name].shape)\
                    if original_exec is None else original_exec.aux_dict[name]
        exe.forward()
        return exe

    sym, _, _ = sym

    original_exec = get_executor(sym)
    with environment('MXNET_SUBGRAPH_BACKEND', subgraph_backend):
        check_call(
            _LIB.MXSetSubgraphPropertyOpNames(c_str(subgraph_backend),
                                              mx_uint(len(op_names)),
                                              c_str_array(op_names)))
        partitioned_exec = get_executor(sym, subgraph_backend, op_names,
                                        original_exec)
        check_call(
            _LIB.MXRemoveSubgraphPropertyOpNames(c_str(subgraph_backend)))
    outputs1 = original_exec.outputs
    outputs2 = partitioned_exec.outputs
    assert len(outputs1) == len(outputs2)
    for i in range(len(outputs1)):
        assert_almost_equal((outputs1[i] - outputs2[i]).abs().sum().asnumpy(),
                            np.zeros(shape=(1, )))
예제 #20
0
def quantize_symbol(sym,
                    excluded_symbols=[],
                    offline_params=[],
                    quantized_dtype='uint8',
                    calib_quantize_op=False):
    """
    Quantize symbol.
    :param sym: mxnet.symbol.Symbol
        The symbol to quantize.
    :param excluded_symbols: list of str
        The names of symbols to exclude.
    :param offline_params: list of str
        The names of parameters to quantize offline.
    :param quantized_dtype: {"int8", "uint8"}
        The data type that you will quantize to.
    :param calib_quantize_op: bool
        Calibrate or not.(Only for quantization online.
    :return: mxnet.symbol.Symbol
        The symbol that has been quantized.
    """
    assert isinstance(excluded_symbols, list)
    num_excluded_symbols = len(excluded_symbols)
    # exclude = [s.handle for s in excluded_symbols]

    assert isinstance(offline_params, list)
    offline = [c_str(k) for k in offline_params]
    num_offline = len(offline)

    out = SymbolHandle()
    check_call(
        _LIB.MXQuantizeSymbol(sym.handle, ctypes.byref(out),
                              mx_uint(num_excluded_symbols),
                              c_str_array(excluded_symbols),
                              mx_uint(num_offline),
                              c_array(ctypes.c_char_p, offline),
                              c_str(quantized_dtype),
                              ctypes.c_bool(calib_quantize_op)))
    return Symbol(out)