コード例 #1
0
ファイル: test_subgraph_op.py プロジェクト: modeste2015/mxnet
 def get_executor(sym,
                  subgraph_backend=None,
                  op_names=None,
                  original_exec=None):
     if subgraph_backend is not None:
         os.environ['MXNET_SUBGRAPH_BACKEND'] = subgraph_backend
         check_call(
             _LIB.MXSetSubgraphPropertyOpNames(c_str(subgraph_backend),
                                               mx_uint(len(op_names)),
                                               c_str_array(op_names)))
     arg_shapes, _, aux_shapes = sym.infer_shape()
     if subgraph_backend is None:
         arg_array = [
             mx.nd.random.uniform(shape=shape) for shape in arg_shapes
         ]
         aux_array = [
             mx.nd.random.uniform(shape=shape) for shape in aux_shapes
         ]
     else:
         arg_array = None
         aux_array = None
     exe = sym.bind(ctx=mx.current_context(),
                    args=arg_array if subgraph_backend is None else
                    original_exec.arg_arrays,
                    aux_states=aux_array if subgraph_backend is None else
                    original_exec.aux_arrays,
                    grad_req='null')
     exe.forward()
     if subgraph_backend is not None:
         check_call(
             _LIB.MXRemoveSubgraphPropertyOpNames(c_str(subgraph_backend)))
         del os.environ['MXNET_SUBGRAPH_BACKEND']
     return exe
コード例 #2
0
def test_subgraph_exe8(sym, subgraph_backend, op_names):
    """Call optimize_for to infer shapes, types and dtypes followed by graph partitioning,
    then bind and compare results of the partitioned sym and the original sym."""
    # bind
    sym, _, _ = sym
    arg_shapes, _, aux_shapes = sym.infer_shape()
    arg_names = sym.list_arguments()
    aux_names = sym.list_auxiliary_states()
    arg_dict = {name:mx.nd.random.uniform(shape=shape) for name,shape in zip(arg_names,arg_shapes)}
    aux_dict = {name:mx.nd.random.uniform(shape=shape) for name,shape in zip(aux_names,aux_shapes)}
    exe1 = sym.bind(ctx=mx.current_context(), args=arg_dict, aux_states=aux_dict, grad_req='null')
    exe1.forward()

    # infer shape/type before partition before bind
    check_call(_LIB.MXSetSubgraphPropertyOpNamesV2(c_str(subgraph_backend), mx_uint(len(op_names)),
                                                   c_str_array(op_names)))
    part_sym = sym.optimize_for(subgraph_backend, arg_dict, aux_dict)
    check_call(_LIB.MXRemoveSubgraphPropertyOpNamesV2(c_str(subgraph_backend)))

    exe2 = part_sym.bind(ctx=mx.current_context(), args=arg_dict, aux_states=aux_dict, grad_req='null')
    exe2.forward()

    # compare outputs
    outputs1 = exe1.outputs
    outputs2 = exe2.outputs
    assert len(outputs1) == len(outputs2)
    for i in range(len(outputs1)):
        assert_almost_equal((outputs1[i] - outputs2[i]).abs().sum().asnumpy(), np.zeros(shape=(1,)))
コード例 #3
0
def test_subgraph_exe6(sym, subgraph_backend, op_names):
    """Call optimize_for to trigger graph partitioning with shapes/types, then _simple_bind
    and compare results of the partitioned sym and the original sym."""
    # _simple_bind
    sym, _, _ = sym
    exe1 = sym._simple_bind(ctx=mx.current_context(), grad_req='null')
    input_names = sym.list_inputs()
    set_random_inputs(exe1, input_names)
    exe1.forward()

    # infer shape/type before partition before _simple_bind
    check_call(
        _LIB.MXSetSubgraphPropertyOpNamesV2(c_str(subgraph_backend),
                                            mx_uint(len(op_names)),
                                            c_str_array(op_names)))
    part_sym = sym.optimize_for(subgraph_backend, exe1.arg_dict, exe1.aux_dict)
    check_call(_LIB.MXRemoveSubgraphPropertyOpNamesV2(c_str(subgraph_backend)))

    exe2 = part_sym._simple_bind(ctx=mx.current_context(), grad_req='null')
    copy_inputs_between_executors(exe1, exe2, input_names)
    exe2.forward()

    # compare outputs
    outputs1 = exe1.outputs
    outputs2 = exe2.outputs
    assert len(outputs1) == len(outputs2)
    for i in range(len(outputs1)):
        assert_almost_equal((outputs1[i] - outputs2[i]).abs().sum().asnumpy(),
                            onp.zeros(shape=(1, )))
コード例 #4
0
ファイル: test_subgraph_op.py プロジェクト: modeste2015/mxnet
def check_subgraph_exe9(sym, subgraph_backend, op_names):
    """Call hybridize() to partition the graph, and then compare results of the partitioned
    sym and the original sym. Here do an inference before hybridizing with the subgraph_backend
    which means we'll pass shapes/types"""
    # create Gluon block for given symbol
    inputs = [mx.sym.var(i, dtype=mx_real_t) for i in sym[1]]
    sym_block = nn.SymbolBlock(sym[0], inputs)
    sym_block.initialize(ctx=mx.current_context())
    x = [
        mx.nd.random.uniform(shape=s, ctx=mx.current_context()) for s in sym[2]
    ]
    # hybridize and export to get baseline
    sym_block.hybridize()
    outputs1 = sym_block(*x)
    sym_block.export('check_subgraph_exe9')

    # load model and partition
    sym_block = nn.SymbolBlock.imports('check_subgraph_exe9-symbol.json',
                                       sym[1],
                                       'check_subgraph_exe9-0000.params',
                                       ctx=mx.current_context())
    check_call(
        _LIB.MXSetSubgraphPropertyOpNamesV2(c_str(subgraph_backend),
                                            mx_uint(len(op_names)),
                                            c_str_array(op_names)))
    sym_block.hybridize(backend=subgraph_backend)
    outputs2 = sym_block(*x)
    check_call(_LIB.MXRemoveSubgraphPropertyOpNamesV2(c_str(subgraph_backend)))

    # compare outputs
    assert len(outputs1) == len(outputs2)
    for i in range(len(outputs1)):
        assert_almost_equal((outputs1[i] - outputs2[i]).abs().sum().asnumpy(),
                            np.zeros(shape=(1, )))
コード例 #5
0
def test_subgraph_exe7(sym, subgraph_backend, op_names):
    """Call optimize_for to trigger graph partitioning without infer shapes/types before,
    then bind and compare results of the partitioned sym and the original sym."""
    # bind
    sym, _, _ = sym
    arg_shapes, _, aux_shapes = sym.infer_shape()
    arg_array = [mx.nd.random.uniform(shape=shape) for shape in arg_shapes]
    aux_array = [mx.nd.random.uniform(shape=shape) for shape in aux_shapes]
    exe1 = sym._bind(ctx=mx.current_context(),
                     args=arg_array,
                     aux_states=aux_array,
                     grad_req='null')
    exe1.forward()

    # partition before bind
    check_call(
        _LIB.MXSetSubgraphPropertyOpNamesV2(c_str(subgraph_backend),
                                            mx_uint(len(op_names)),
                                            c_str_array(op_names)))
    part_sym = sym.optimize_for(subgraph_backend)
    check_call(_LIB.MXRemoveSubgraphPropertyOpNamesV2(c_str(subgraph_backend)))

    exe2 = part_sym._bind(ctx=mx.current_context(),
                          args=arg_array,
                          aux_states=aux_array,
                          grad_req='null')
    exe2.forward()

    # compare outputs
    outputs1 = exe1.outputs
    outputs2 = exe2.outputs
    assert len(outputs1) == len(outputs2)
    for i in range(len(outputs1)):
        assert_almost_equal((outputs1[i] - outputs2[i]).abs().sum().asnumpy(),
                            onp.zeros(shape=(1, )))
コード例 #6
0
ファイル: test_subgraph_op.py プロジェクト: modeste2015/mxnet
 def get_executor(sym,
                  subgraph_backend=None,
                  op_names=None,
                  original_exec=None):
     if subgraph_backend is not None:
         os.environ['MXNET_SUBGRAPH_BACKEND'] = subgraph_backend
         check_call(
             _LIB.MXSetSubgraphPropertyOpNames(c_str(subgraph_backend),
                                               mx_uint(len(op_names)),
                                               c_str_array(op_names)))
     exe = sym.simple_bind(ctx=mx.current_context(), grad_req='null')
     input_names = sym.list_inputs()
     for name in input_names:
         if name in exe.arg_dict:
             exe.arg_dict[name][:] = mx.nd.random.uniform(shape=exe.arg_dict[name].shape)\
                 if original_exec is None else original_exec.arg_dict[name]
         else:
             assert name in exe.aux_dict
             exe.aux_dict[name][:] = mx.nd.random.uniform(shape=exe.aux_dict[name].shape)\
                 if original_exec is None else original_exec.aux_dict[name]
     exe.forward()
     if subgraph_backend is not None:
         check_call(
             _LIB.MXRemoveSubgraphPropertyOpNames(c_str(subgraph_backend)))
         del os.environ['MXNET_SUBGRAPH_BACKEND']
     return exe
コード例 #7
0
def test_subgraph_exe2(sym, subgraph_backend, op_names):
    def get_executor(sym,
                     subgraph_backend=None,
                     op_names=None,
                     original_exec=None):
        exe = sym._simple_bind(ctx=mx.current_context(), grad_req='null')
        input_names = sym.list_inputs()
        for name in input_names:
            if name in exe.arg_dict:
                exe.arg_dict[name][:] = mx.nd.random.uniform(shape=exe.arg_dict[name].shape)\
                    if original_exec is None else original_exec.arg_dict[name]
            else:
                assert name in exe.aux_dict
                exe.aux_dict[name][:] = mx.nd.random.uniform(shape=exe.aux_dict[name].shape)\
                    if original_exec is None else original_exec.aux_dict[name]
        exe.forward()
        return exe

    sym, _, _ = sym
    original_exec = get_executor(sym)
    check_call(
        _LIB.MXSetSubgraphPropertyOpNames(c_str(subgraph_backend),
                                          mx_uint(len(op_names)),
                                          c_str_array(op_names)))
    partitioned_exec = get_executor(sym, subgraph_backend, op_names,
                                    original_exec)
    check_call(_LIB.MXRemoveSubgraphPropertyOpNames(c_str(subgraph_backend)))
    outputs1 = original_exec.outputs
    outputs2 = partitioned_exec.outputs
    assert len(outputs1) == len(outputs2)
    for i in range(len(outputs1)):
        assert_almost_equal((outputs1[i] - outputs2[i]).abs().sum().asnumpy(),
                            onp.zeros(shape=(1, )))
コード例 #8
0
def test_subgraph_backend_gluon_ext1(tmpdir):
    def get_net():
        net = nn.HybridSequential()  # Here we use the class HybridSequential.
        net.add(nn.Dense(256, activation='relu'),
                nn.Dense(128, activation='relu'), nn.Dense(2))
        return net

    # regular inference
    x = mx.np.random.normal(size=(1, 512), ctx=mx.current_context())
    net = get_net()
    net.initialize(ctx=mx.current_context())
    outputs1 = net(x)
    param_path = os.path.join(str(tmpdir),
                              'test_subgraph_backend_gluon_ext1.params')
    net.save_parameters(param_path)

    # after partitioning
    net = get_net()
    net.load_parameters(param_path, ctx=mx.current_context())
    subgraph_backend = 'default'
    op_names = ['FullyConnected']
    check_call(
        _LIB.MXSetSubgraphPropertyOpNamesV2(c_str(subgraph_backend),
                                            mx_uint(len(op_names)),
                                            c_str_array(op_names)))
    net.optimize_for(x, backend=subgraph_backend)
    outputs2 = net(x)
    check_call(_LIB.MXRemoveSubgraphPropertyOpNamesV2(c_str(subgraph_backend)))

    # compare outputs
    assert len(outputs1) == len(outputs2)
    for i in range(len(outputs1)):
        assert_almost_equal(
            mx.np.abs((outputs1[i] - outputs2[i])).sum().asnumpy(),
            onp.zeros(shape=(1, )))
コード例 #9
0
def save(fname, data):
    """Saves a list of arrays or a dict of str->array to file.

    Examples of filenames:

    - ``/path/to/file``
    - ``s3://my-bucket/path/to/file`` (if compiled with AWS S3 supports)
    - ``hdfs://path/to/file`` (if compiled with HDFS supports)

    Parameters
    ----------
    fname : str
        The filename.
    data : list of ``NDArray` or dict of str to ``NDArray``
        The data to save.

    Examples
    --------
    >>> x = mx.nd.zeros((2,3))
    >>> y = mx.nd.ones((1,4))
    >>> mx.nd.save('my_list', [x,y])
    >>> mx.nd.save('my_dict', {'x':x, 'y':y})
    >>> mx.nd.load('my_list')
    [<NDArray 2x3 @cpu(0)>, <NDArray 1x4 @cpu(0)>]
    >>> mx.nd.load('my_dict')
    {'y': <NDArray 1x4 @cpu(0)>, 'x': <NDArray 2x3 @cpu(0)>}
    """
    handles = []
    if isinstance(data, dict):
        keys = []
        for key, val in data.items():
            if not isinstance(key, string_types):
                raise TypeError(
                    'save only accept dict str->NDArray or list of NDArray')
            if not isinstance(val, NDArray):
                raise TypeError(
                    'save only accept dict str->NDArray or list of NDArray')
            keys.append(c_str(key))
            handles.append(val.handle)
        keys = c_array(ctypes.c_char_p, keys)
    else:
        for val in data:
            if not isinstance(val, NDArray):
                raise TypeError(
                    'save only accept dict str->NDArray or list of NDArray')
            handles.append(val.handle)
        keys = None
    check_call(
        _LIB.MXNDArraySave(c_str(fname), mx_uint(len(handles)),
                           c_array(NDArrayHandle, handles), keys))
コード例 #10
0
def load(fname):
    """Loads an array from file.

    See more details in ``save``.

    Parameters
    ----------
    fname : str
        The filename.

    Returns
    -------
    list of NDArray or dict of str to NDArray
        Loaded data.
    """
    if not isinstance(fname, string_types):
        raise TypeError('fname required to be a string')
    out_size = mx_uint()
    out_name_size = mx_uint()
    handles = ctypes.POINTER(NDArrayHandle)()
    names = ctypes.POINTER(ctypes.c_char_p)()
    check_call(
        _LIB.MXNDArrayLoad(c_str(fname), ctypes.byref(out_size),
                           ctypes.byref(handles), ctypes.byref(out_name_size),
                           ctypes.byref(names)))
    if out_name_size.value == 0:
        return [
            _ndarray_cls(NDArrayHandle(handles[i]))
            for i in range(out_size.value)
        ]
    else:
        assert out_name_size.value == out_size.value
        return dict((py_str(names[i]), _ndarray_cls(NDArrayHandle(handles[i])))
                    for i in range(out_size.value))
コード例 #11
0
ファイル: op.py プロジェクト: stefanhenneking/mxnet
def _init_symbol_module(root_namespace):
    """List and add all the atomic symbol functions to current module."""
    plist = ctypes.POINTER(ctypes.c_char_p)()
    size = ctypes.c_uint()

    check_call(_LIB.MXListAllOpNames(ctypes.byref(size),
                                     ctypes.byref(plist)))
    op_names = []
    for i in range(size.value):
        op_names.append(py_str(plist[i]))

    module_obj = _sys.modules["%s.symbol" % root_namespace]
    module_sparse = _sys.modules["%s.symbol.sparse" % root_namespace]
    module_internal = _sys.modules["%s.symbol._internal" % root_namespace]
    module_contrib = _sys.modules["%s.contrib.symbol" % root_namespace]
    for name in op_names:
        hdl = OpHandle()
        check_call(_LIB.NNGetOpHandle(c_str(name), ctypes.byref(hdl)))
        function = _make_atomic_symbol_function(hdl, name)
        if function.__name__.startswith('_contrib_'):
            function.__name__ = function.__name__[9:]
            function.__module__ = 'mxnet.contrib.symbol'
            setattr(module_contrib, function.__name__, function)
        elif function.__name__.startswith('_'):
            setattr(module_internal, function.__name__, function)
        else:
            setattr(module_obj, function.__name__, function)

        # register sparse ops under mxnet.symbol.sparse
        if function.__name__.startswith('_sparse_'):
            function.__name__ = function.__name__[8:]
            function.__module__ = 'mxnet.symbol.sparse'
            setattr(module_sparse, function.__name__, function)
コード例 #12
0
def allreduce_(tensor, average=True, name=None, priority=0):
    """
    A function that performs in-place averaging or summation of the input
    tensor over all the Horovod processes.

    The reduction operation is keyed by the name. If name is not provided, an
    incremented auto-generated name is used. The tensor type and shape must be
    the same on all Horovod processes for a given name. The reduction will not
    start until all processes are ready to send and receive the tensor.

    Arguments:
        tensor: A tensor to average and sum.
        average: A flag indicating whether to compute average or summation,
                 defaults to average.
        name: A name of the reduction operation.
        priority: The priority of this operation. Higher priority operations
                  are likely to be executed before other operations.

    Returns:
        A tensor of the same shape and type as `tensor`, averaged or summed
        across all processes.
    """
    c_in = tensor.handle
    c_out = tensor.handle
    if isinstance(name, string_types):
        check_call(
            MPI_MXNET_LIB_CTYPES.horovod_mxnet_allreduce_async(
                c_in, c_out, c_str(name), ctypes.c_bool(average),
                ctypes.c_int(priority)))
    else:
        check_call(
            MPI_MXNET_LIB_CTYPES.horovod_mxnet_allreduce_async(
                c_in, c_out, name, ctypes.c_bool(average),
                ctypes.c_int(priority)))
    return tensor
コード例 #13
0
def scatter_reduce_(tensor, name=None, priority=0):
    """
    A function that performs in-place scatter reduce of the input
    tensor over all the Horovod processes.

    Arguments:
        tensor: A tensor to average and sum.
        name: A name of the reduction operation.
        priority: The priority of this operation. Higher priority operations
                  are likely to be executed before other operations.

    Returns:
        A tensor of the same shape and type as `tensor`, averaged or summed
        across all processes.
    """
    c_in = tensor.handle
    c_out = tensor.handle
    if isinstance(name, string_types):
        check_call(
            MPI_MXNET_LIB_CTYPES.horovod_mxnet_scatter_reduce_async(
                c_in, c_out, c_str(name), ctypes.c_int(priority)))
    else:
        check_call(
            MPI_MXNET_LIB_CTYPES.horovod_mxnet_allreduce_async(
                c_in, c_out, name, ctypes.c_int(priority)))

    return tensor
コード例 #14
0
ファイル: ops.py プロジェクト: zmxdream/byteps
def byteps_push_pull(tensor, version=0, priority=0, name=None, is_average=True):
    """
    A function that performs pushing and pulling tensors

    The operation is keyed by the name. If name is not provided, an
    incremented auto-generated name is used. The tensor type and shape must be
    the same on all BytePS processes for a given name. The reduction will not
    start until all processes are ready to send and receive the tensor.

    This acts as a thin wrapper around an autograd function.  If your input
    tensor requires tensors, then callings this function will allow tensors
    to be computed and backpropagated.

    Arguments:
        tensor: A tensor to average and sum.
        average: A flag indicating whether to compute average or summation,
                 defaults to average.
        name: A name of the reduction operation.

    Returns:
        None
    """

    c_in = tensor.handle
    if isinstance(name, string_types):
        check_call(MXNET_LIB_CTYPES.byteps_mxnet_push_pull_async(c_in,
                                                                 c_str(name), ctypes.c_int(version), ctypes.c_int(priority), ctypes.c_bool(is_average)))
    else:
        check_call(MXNET_LIB_CTYPES.byteps_mxnet_push_pull_async(c_in,
                                                                 name, ctypes.c_int(version), ctypes.c_int(priority), ctypes.c_bool(is_average)))

    return
コード例 #15
0
def scatter_reduce(tensor, name=None, priority=0):
    """
    Arguments:
        tensor: A tensor to average and sum.
        name: A name of the reduction operation.
        priority: The priority of this operation. Higher priority operations
                  are likely to be executed before other operations.

    Returns:
        A tensor of the same shape and type as `tensor`, averaged or summed
        across all processes.
    """
    output = mx.nd.zeros(shape=tensor.shape,
                         ctx=tensor.context,
                         dtype=tensor.dtype)
    c_in = tensor.handle
    c_out = output.handle
    if isinstance(name, string_types):
        check_call(
            MPI_MXNET_LIB_CTYPES.horovod_mxnet_scatter_reduce_async(
                c_in, c_out, c_str(name), ctypes.c_int(priority)))
    else:
        check_call(
            MPI_MXNET_LIB_CTYPES.horovod_mxnet_allreduce_async(
                c_in, c_out, name, ctypes.c_int(priority)))

    return output
コード例 #16
0
ファイル: test_subgraph_op.py プロジェクト: rena-cleaner/Hele
    def _check_subgraph_exe3(sym, subgraph_backend, op_names):
        """Use the partitioned sym to bind an executor and compare the outputs
        with those of the original executor"""
        out = SymbolHandle()
        check_call(_LIB.MXBuildSubgraphByOpNames(sym.handle, c_str(subgraph_backend), mx_uint(len(op_names)),
                                                  c_str_array(op_names), ctypes.byref(out)))

        partitioned_sym = Symbol(out)
        input_names = sym.list_inputs()
        arg_names = sym.list_arguments()
        aux_names = sym.list_auxiliary_states()
        assert partitioned_sym.list_inputs() == input_names
        assert partitioned_sym.list_arguments() == arg_names
        assert partitioned_sym.list_auxiliary_states() == aux_names
        arg_shapes, _, aux_shapes = sym.infer_shape()
        arg_array = [mx.nd.random.uniform(shape=shape) for shape in arg_shapes]
        aux_array = [mx.nd.random.uniform(shape=shape) for shape in aux_shapes]
        exe = sym.bind(ctx=mx.current_context(), args=arg_array, aux_states=aux_array, grad_req='null')
        partitioned_exe = partitioned_sym.bind(ctx=mx.current_context(), args=arg_array,
                                               aux_states=aux_array, grad_req='null')
        exe.forward()
        partitioned_exe.forward()
        assert len(exe.outputs) == len(partitioned_exe.outputs)
        for i in range(len(exe.outputs)):
            assert_almost_equal((exe.outputs[i] - partitioned_exe.outputs[i]).abs().sum().asnumpy(),
                                np.zeros(shape=(1,)))
コード例 #17
0
ファイル: mpi_ops.py プロジェクト: zyx1213271098/horovod
def broadcast_(tensor, root_rank, name=None, priority=0):
    """
    A function that broadcasts the input tensor on root rank to the same input
    tensor on all other Horovod processes. The operation is performed in-place.

    The broadcast operation is keyed by the name. If name is not provided, an
    incremented auto-generated name is used. The tensor type and shape must be
    the same on all Horovod processes for a given name. The broadcast will not
    start until all processes are ready to send and receive the tensor.

    Arguments:
        tensor: A tensor to broadcast.
        root_rank: The rank to broadcast the value from.
        name: A name of the broadcast operation.
        priority: The priority of this operation. Higher priority operations
                  are likely to be executed before other operations.

    Returns:
        A tensor of the same shape and type as `tensor`, with the value
        broadcasted from root rank.
    """
    c_in = tensor.handle
    c_out = tensor.handle
    if isinstance(name, string_types):
        check_call(
            MPI_MXNET_LIB_CTYPES.horovod_mxnet_broadcast_async(
                c_in, c_out, c_str(name), ctypes.c_int(root_rank),
                ctypes.c_int(priority)))
    else:
        check_call(
            MPI_MXNET_LIB_CTYPES.horovod_mxnet_broadcast_async(
                c_in, c_out, name, ctypes.c_int(root_rank),
                ctypes.c_int(priority)))
    return tensor
コード例 #18
0
ファイル: test_subgraph_op.py プロジェクト: rena-cleaner/Hele
    def _check_subgraph_exe1(sym, subgraph_backend, op_names):
        """Use the partitioned sym to simple_bind an executor and compare the outputs
        with those of the original executor"""
        out = SymbolHandle()
        check_call(_LIB.MXBuildSubgraphByOpNames(sym.handle, c_str(subgraph_backend), mx_uint(len(op_names)),
                                                  c_str_array(op_names), ctypes.byref(out)))

        partitioned_sym = Symbol(out)
        assert partitioned_sym.list_inputs() == sym.list_inputs()
        assert partitioned_sym.list_arguments() == sym.list_arguments()
        assert partitioned_sym.list_auxiliary_states() == sym.list_auxiliary_states()
        exe = sym.simple_bind(ctx=mx.current_context(), grad_req='null')
        partitioned_exe = partitioned_sym.simple_bind(ctx=mx.current_context(), grad_req='null')
        input_names = sym.list_inputs()
        for name in input_names:
            if name in exe.arg_dict:
                exe.arg_dict[name][:] = mx.nd.random.uniform(shape=exe.arg_dict[name].shape)
                partitioned_exe.arg_dict[name][:] = exe.arg_dict[name]
            else:
                assert name in exe.aux_dict
                exe.aux_dict[name][:] = mx.nd.random.uniform(shape=exe.aux_dict[name].shape)
                partitioned_exe.aux_dict[name][:] = exe.aux_dict[name]
        exe.forward()
        partitioned_exe.forward()
        assert len(exe.outputs) == len(partitioned_exe.outputs)
        for i in range(len(exe.outputs)):
            assert_almost_equal((exe.outputs[i] - partitioned_exe.outputs[i]).abs().sum().asnumpy(),
                                np.zeros(shape=(1,)))
コード例 #19
0
ファイル: mpi_ops.py プロジェクト: lakersdf/horovod
def grouped_allreduce_(tensors,
                       average=None,
                       name=None,
                       priority=0,
                       prescale_factor=1.0,
                       postscale_factor=1.0,
                       process_set=global_process_set,
                       op=None):
    """
    A function that performs in-place averaging or summation of the input
    tensors over all the Horovod processes.

    The reduction operations are keyed by the base name. If a base name is not
    provided, an incremented auto-generated base name is used. Reductions are
    performed across tensors in the same list position. The tensor type and
    shape must be the same on all Horovod processes for tensors sharing
    positions in the input tensor list. The reduction will not start until all
    processes are ready to send and receive the tensors.

    Arguments:
        tensors: A list of tensors to average or sum.
        average:
            .. warning:: .. deprecated:: 0.24.0

                Use `op` instead. Will be removed in v1.0.

        op: The reduction operation to combine tensors across different ranks.
            Can be Average (default) or Sum.
        name: A base name to use for the group reduction operation
        priority: The priority of this operation. Higher priority operations
                  are likely to be executed before other operations.
        prescale_factor: Multiplicative factor to scale tensor before allreduce
        postscale_factor: Multiplicative factor to scale tensor after allreduce
        process_set: Process set object to limit this operation to a subset of
                     Horovod processes. Default is the global process set.

    Returns:
        A list containing tensors of the same shape and type as in `tensors`,
        averaged or summed across all processes.
    """
    op = handle_average_backwards_compatibility(op, average)
    assert op in [Average, Sum]

    if not tensors:
        return tensors

    c_in = c_handle_array(tensors)
    c_out = c_handle_array(tensors)
    c_name = c_str(name) if isinstance(name,
                                       string_types) else ctypes.c_char_p(None)

    check_call(
        MPI_MXNET_LIB_CTYPES.horovod_mxnet_allreduce_async(
            c_in, c_out, c_name, ctypes.c_bool(op == Average),
            ctypes.c_int(priority), ctypes.c_double(prescale_factor),
            ctypes.c_double(postscale_factor), ctypes.c_int(len(tensors)),
            ctypes.c_int(process_set.process_set_id)))

    return tensors
コード例 #20
0
ファイル: mpi_ops.py プロジェクト: lakersdf/horovod
def reducescatter(tensor,
                  op=Average,
                  name=None,
                  priority=0,
                  process_set=global_process_set):
    """
    A function that performs asynchronous averaging or summation of the input tensor
    over all the Horovod processes, then scatters the results across all Horovod
    processes. The input tensor is not modified.

    The reduction operation is keyed by the name. If name is not provided, an
    incremented auto-generated name is used. The tensor type and shape must be
    the same on all Horovod processes for a given name. The reduction will not
    start until all processes are ready to send and receive the tensor.

    This acts as a thin wrapper around an autograd function.  If your input
    tensor requires gradients, then callings this function will allow gradients
    to be computed and backpropagated.

    Arguments:
        tensor: A tensor to average/sum and scatter.
        op: The reduction operation to combine tensors across different ranks.
            Can be Average (default) or Sum.
        name: A name of the reduction operation.
        priority: The priority of this operation. Higher priority operations
                  are likely to be executed before other operations.
        process_set: Process set object to limit this operation to a subset of
                     Horovod processes. Default is the global process set.

    Returns:
        A tensor of the same rank and type as `tensor` across all processes.
        The shape is identical to the input shape except for the first dimension,
        which will be divided across the different Horovod processes.
    """
    assert (isinstance(tensor, mx.nd.NDArray))
    assert (op in [Average, Sum])
    # Size of output is unknown, create output array that
    # will be resized during Horovod operation
    output = mx.nd.empty(shape=(1, ), ctx=tensor.context, dtype=tensor.dtype)
    c_in = tensor.handle
    c_out = output.handle
    if isinstance(name, string_types):
        check_call(
            MPI_MXNET_LIB_CTYPES.horovod_mxnet_reducescatter_async(
                c_in, c_out, c_str(name), ctypes.c_int(priority),
                ctypes.c_int(process_set.process_set_id)))
    else:
        check_call(
            MPI_MXNET_LIB_CTYPES.horovod_mxnet_reducescatter_async(
                c_in, c_out, name, ctypes.c_int(priority),
                ctypes.c_int(process_set.process_set_id)))

    # Need to block here so changes to output tensor are visible
    output.wait_to_read()

    if op == Average:
        output /= process_set.size()

    return output
コード例 #21
0
ファイル: test_subgraph_op.py プロジェクト: MarkMa1990/mxnet
def test_subgraph_exe4(sym, subgraph_backend, op_names):
    """Use env var MXNET_SUBGRAPH_BACKEND=default to trigger graph partitioning in bind
    and compare results of the partitioned sym and the original sym."""
    def get_executor(sym,
                     subgraph_backend=None,
                     op_names=None,
                     original_exec=None):
        arg_shapes, _, aux_shapes = sym.infer_shape()
        if subgraph_backend is None:
            arg_array = [
                mx.nd.random.uniform(shape=shape) for shape in arg_shapes
            ]
            aux_array = [
                mx.nd.random.uniform(shape=shape) for shape in aux_shapes
            ]
        else:
            arg_array = None
            aux_array = None
        exe = sym._bind(ctx=mx.current_context(),
                        args=arg_array if subgraph_backend is None else
                        original_exec.arg_arrays,
                        aux_states=aux_array if subgraph_backend is None else
                        original_exec.aux_arrays,
                        grad_req='null')
        exe.forward()
        return exe

    sym, _, _ = sym
    original_exec = get_executor(sym)
    with environment('MXNET_SUBGRAPH_BACKEND', subgraph_backend):
        check_call(
            _LIB.MXSetSubgraphPropertyOpNames(c_str(subgraph_backend),
                                              mx_uint(len(op_names)),
                                              c_str_array(op_names)))
        partitioned_exec = get_executor(sym, subgraph_backend, op_names,
                                        original_exec)
        check_call(
            _LIB.MXRemoveSubgraphPropertyOpNames(c_str(subgraph_backend)))
    outputs1 = original_exec.outputs
    outputs2 = partitioned_exec.outputs
    assert len(outputs1) == len(outputs2)
    for i in range(len(outputs1)):
        assert_almost_equal((outputs1[i] - outputs2[i]).abs().sum().asnumpy(),
                            onp.zeros(shape=(1, )))
コード例 #22
0
def _quantize_symbol(sym,
                     excluded_symbols=None,
                     offline_params=None,
                     quantized_dtype='int8'):
    """Given a symbol object representing a neural network of data type FP32,
    quantize it into a INT8 network.
    
    Parameters
    ----------
    sym : Symbol
        FP32 neural network symbol.
    excluded_sym_names : list of strings
        A list of strings representing the names of the symbols that users want to excluding
        from being quantized.
    offline_params : list of strs
        Names of the parameters that users want to quantize offline. It's always recommended to
        quantize parameters offline so that quantizing parameters during the inference can be
        avoided.
    quantized_dtype: str
        The quantized destination type for input data.
    """
    num_excluded_symbols = 0
    if excluded_symbols is not None:
        assert isinstance(excluded_symbols, list)
        num_excluded_symbols = len(excluded_symbols)
    else:
        excluded_symbols = []

    num_offline = 0
    offline = []
    if offline_params is not None:
        num_offline = len(offline_params)
        for k in offline_params:
            offline.append(c_str(k))

    out = SymbolHandle()
    check_call(
        _LIB.MXQuantizeSymbol(sym.handle, ctypes.byref(out),
                              mx_uint(num_excluded_symbols),
                              c_str_array(excluded_symbols),
                              mx_uint(num_offline),
                              c_array(ctypes.c_char_p, offline),
                              c_str(quantized_dtype), ctypes.c_bool(True)))
    return Symbol(out)
コード例 #23
0
ファイル: mpi_ops.py プロジェクト: raajay/horovod
def grouped_allreduce(tensors,
                      average=True,
                      name=None,
                      priority=0,
                      prescale_factor=1.0,
                      postscale_factor=1.0,
                      process_set=global_process_set):
    """
    A function that performs averaging or summation of the input
    tensors over all the Horovod processes. The input tensors are not modified.

    The reduction operations are keyed by the base name. If a base name is not
    provided, an incremented auto-generated base name is used. Reductions are
    performed across tensors in the same list position. The tensor type and
    shape must be the same on all Horovod processes for tensors sharing
    positions in the input tensor list. The reduction will not start until all
    processes are ready to send and receive the tensors.

    Arguments:
        tensors: A list of tensors to average or sum.
        average: A flag indicating whether to compute average or summation,
                 defaults to average.
        name: A base name to use for the group reduction operation
        priority: The priority of this operation. Higher priority operations
                  are likely to be executed before other operations.
        prescale_factor: Multiplicative factor to scale tensor before allreduce
        postscale_factor: Multiplicative factor to scale tensor after allreduce
        process_set: Process set object to limit this operation to a subset of
                     Horovod processes. Default is the global process set.

    Returns:
        A list containing tensors of the same shape and type as in `tensors`,
        averaged or summed across all processes.
    """

    if not tensors:
        return tensors

    outputs = [
        mx.nd.zeros(shape=tensor.shape, ctx=tensor.context, dtype=tensor.dtype)
        for tensor in tensors
    ]

    c_in = c_handle_array(tensors)
    c_out = c_handle_array(outputs)
    c_name = c_str(name) if isinstance(name,
                                       string_types) else ctypes.c_char_p(None)

    check_call(
        MPI_MXNET_LIB_CTYPES.horovod_mxnet_allreduce_async(
            c_in, c_out, c_name, ctypes.c_bool(average),
            ctypes.c_int(priority), ctypes.c_double(prescale_factor),
            ctypes.c_double(postscale_factor), ctypes.c_int(len(tensors)),
            ctypes.c_int(process_set.process_set_id)))

    return outputs
コード例 #24
0
ファイル: mpi_ops.py プロジェクト: raajay/horovod
def allreduce(tensor,
              average=True,
              name=None,
              priority=0,
              prescale_factor=1.0,
              postscale_factor=1.0,
              process_set=global_process_set):
    """
    A function that performs averaging or summation of the input tensor over
    all the Horovod processes. The input tensor is not modified.

    The reduction operation is keyed by the name. If name is not provided, an
    incremented auto-generated name is used. The tensor type and shape must be
    the same on all Horovod processes for a given name. The reduction will not
    start until all processes are ready to send and receive the tensor.

    This acts as a thin wrapper around an autograd function.  If your input
    tensor requires gradients, then callings this function will allow gradients
    to be computed and backpropagated.

    Arguments:
        tensor: A tensor to average or sum.
        average: A flag indicating whether to compute average or summation,
                 defaults to average.
        name: A name of the reduction operation.
        priority: The priority of this operation. Higher priority operations
                  are likely to be executed before other operations.
        prescale_factor: Multiplicative factor to scale tensor before allreduce
        postscale_factor: Multiplicative factor to scale tensor after allreduce
        process_set: Process set object to limit this operation to a subset of
                     Horovod processes. Default is the global process set.

    Returns:
        A tensor of the same shape and type as `tensor`, averaged or summed
        across all processes.
    """
    output = mx.nd.zeros(shape=tensor.shape,
                         ctx=tensor.context,
                         dtype=tensor.dtype)

    c_in = tensor.handle
    c_out = output.handle
    c_name = c_str(name) if isinstance(name,
                                       string_types) else ctypes.c_char_p(None)

    check_call(
        MPI_MXNET_LIB_CTYPES.horovod_mxnet_allreduce_async(
            ctypes.byref(c_in), ctypes.byref(c_out), c_name,
            ctypes.c_bool(average), ctypes.c_int(priority),
            ctypes.c_double(prescale_factor),
            ctypes.c_double(postscale_factor), ctypes.c_int(1),
            ctypes.c_int(process_set.process_set_id)))

    return output
コード例 #25
0
 def get_executor(sym, subgraph_backend=None, op_names=None, original_exec=None):
     if subgraph_backend is not None:
         os.environ['MXNET_SUBGRAPH_BACKEND'] = subgraph_backend
         check_call(_LIB.MXSetSubgraphPropertyOpNames(c_str(subgraph_backend), mx_uint(len(op_names)),
                                                      c_str_array(op_names)))
     exe = sym.simple_bind(ctx=mx.current_context(), grad_req='null')
     input_names = sym.list_inputs()
     for name in input_names:
         if name in exe.arg_dict:
             exe.arg_dict[name][:] = mx.nd.random.uniform(shape=exe.arg_dict[name].shape)\
                 if original_exec is None else original_exec.arg_dict[name]
         else:
             assert name in exe.aux_dict
             exe.aux_dict[name][:] = mx.nd.random.uniform(shape=exe.aux_dict[name].shape)\
                 if original_exec is None else original_exec.aux_dict[name]
     exe.forward()
     if subgraph_backend is not None:
         check_call(_LIB.MXRemoveSubgraphPropertyOpNames(c_str(subgraph_backend)))
         del os.environ['MXNET_SUBGRAPH_BACKEND']
     return exe
コード例 #26
0
def _get_op_handles(op_name):
    """Get handle for an operator with given name - op_name.

    Parameters
    ----------
    op_name: str
        Name of operator to get handle for.
    """
    op_handle = OpHandle()
    check_call(_LIB.NNGetOpHandle(c_str(op_name), ctypes.byref(op_handle)))
    return op_handle
コード例 #27
0
def test_subgraph_backend_gluon_ext2(tmpdir):
    class Net(gluon.HybridBlock):
        def __init__(self, **kwargs):
            super(Net, self).__init__(**kwargs)
            self.fc1 = nn.Dense(256)
            self.fc2 = nn.Dense(128)
            self.fc3 = nn.Dense(2)

        def forward(self, x):
            x = npx.relu(self.fc1(x))
            x = npx.relu(self.fc2(x))
            return self.fc3(x)

    # regular inference
    x = mx.np.random.normal(size=(1, 512), ctx=mx.current_context())
    net = Net()
    net.initialize(ctx=mx.current_context())
    outputs1 = net(x)
    param_path = os.path.join(str(tmpdir),
                              'test_subgraph_backend_gluon_ext2.params')
    net.save_parameters(param_path)

    # after partitioning
    net = Net()
    net.load_parameters(param_path, ctx=mx.current_context())
    subgraph_backend = 'default'
    op_names = ['FullyConnected']
    check_call(
        _LIB.MXSetSubgraphPropertyOpNamesV2(c_str(subgraph_backend),
                                            mx_uint(len(op_names)),
                                            c_str_array(op_names)))
    net.optimize_for(x, backend=subgraph_backend)
    outputs2 = net(x)
    check_call(_LIB.MXRemoveSubgraphPropertyOpNamesV2(c_str(subgraph_backend)))

    # compare outputs
    assert len(outputs1) == len(outputs2)
    for i in range(len(outputs1)):
        assert_almost_equal(
            mx.np.abs(outputs1[i] - outputs2[i]).sum().asnumpy(),
            onp.zeros(shape=(1, )))
コード例 #28
0
    def from_dlpack_old(dlpack):

        PyCapsuleDestructor = ctypes.CFUNCTYPE(None, ctypes.c_void_p)
        _c_str_dltensor = c_str('dltensor')
        _c_str_used_dltensor = c_str('used_dltensor')
        handle = NDArrayHandle()
        dlpack = ctypes.py_object(dlpack)
        assert ctypes.pythonapi.PyCapsule_IsValid(
            dlpack, _c_str_dltensor
        ), ValueError(
            'Invalid DLPack Tensor. DLTensor capsules can be consumed only once.'
        )
        dlpack_handle = ctypes.c_void_p(
            ctypes.pythonapi.PyCapsule_GetPointer(dlpack, _c_str_dltensor))
        check_call(
            _LIB.MXNDArrayFromDLPack(dlpack_handle, ctypes.byref(handle)))
        # Rename PyCapsule (DLPack)
        ctypes.pythonapi.PyCapsule_SetName(dlpack, _c_str_used_dltensor)
        # delete the deleter of the old dlpack
        ctypes.pythonapi.PyCapsule_SetDestructor(dlpack, None)
        return mx.nd.NDArray(handle=handle)
コード例 #29
0
 def get_executor(sym, subgraph_backend=None, op_names=None, original_exec=None):
     if subgraph_backend is not None:
         os.environ['MXNET_SUBGRAPH_BACKEND'] = subgraph_backend
         check_call(_LIB.MXSetSubgraphPropertyOpNames(c_str(subgraph_backend), mx_uint(len(op_names)),
                                                      c_str_array(op_names)))
     arg_shapes, _, aux_shapes = sym.infer_shape()
     if subgraph_backend is None:
         arg_array = [mx.nd.random.uniform(shape=shape) for shape in arg_shapes]
         aux_array = [mx.nd.random.uniform(shape=shape) for shape in aux_shapes]
     else:
         arg_array = None
         aux_array = None
     exe = sym.bind(ctx=mx.current_context(),
                    args=arg_array if subgraph_backend is None else original_exec.arg_arrays,
                    aux_states=aux_array if subgraph_backend is None else original_exec.aux_arrays,
                    grad_req='null')
     exe.forward()
     if subgraph_backend is not None:
         check_call(_LIB.MXRemoveSubgraphPropertyOpNames(c_str(subgraph_backend)))
         del os.environ['MXNET_SUBGRAPH_BACKEND']
     return exe
コード例 #30
0
ファイル: test_subgraph_op.py プロジェクト: modeste2015/mxnet
def test_subgraph_backend_gluon_ext2():
    class Net(gluon.HybridBlock):
        def __init__(self, **kwargs):
            super(Net, self).__init__(**kwargs)
            with self.name_scope():
                self.fc1 = nn.Dense(256)
                self.fc2 = nn.Dense(128)
                self.fc3 = nn.Dense(2)

        def hybrid_forward(self, F, x):
            x = F.relu(self.fc1(x))
            x = F.relu(self.fc2(x))
            return self.fc3(x)

    # regular inference
    x = nd.random.normal(shape=(1, 512), ctx=mx.current_context())
    net = Net()
    net.collect_params().initialize(ctx=mx.current_context())
    outputs1 = net(x)
    net.save_parameters('test_subgraph_backend_gluon_ext2.params')

    # after partitioning
    net = Net()
    net.load_parameters('test_subgraph_backend_gluon_ext2.params',
                        ctx=mx.current_context())
    subgraph_backend = 'default'
    op_names = ['FullyConnected']
    check_call(
        _LIB.MXSetSubgraphPropertyOpNamesV2(c_str(subgraph_backend),
                                            mx_uint(len(op_names)),
                                            c_str_array(op_names)))
    net.hybridize(backend=subgraph_backend)
    outputs2 = net(x)
    check_call(_LIB.MXRemoveSubgraphPropertyOpNamesV2(c_str(subgraph_backend)))

    # compare outputs
    assert len(outputs1) == len(outputs2)
    for i in range(len(outputs1)):
        assert_almost_equal((outputs1[i] - outputs2[i]).abs().sum().asnumpy(),
                            np.zeros(shape=(1, )))
コード例 #31
0
ファイル: mpi_ops.py プロジェクト: raajay/horovod
def broadcast(tensor,
              root_rank,
              name=None,
              priority=0,
              process_set=global_process_set):
    """
    A function that broadcasts the input tensor on root rank to the same input
    tensor on all other Horovod processes. The input tensor is not modified.

    The broadcast operation is keyed by the name. If name is not provided, an
    incremented auto-generated name is used. The tensor type and shape must be
    the same on all Horovod processes for a given name. The broadcast will not
    start until all processes are ready to send and receive the tensor.

    This acts as a thin wrapper around an autograd function.  If your input
    tensor requires gradients, then callings this function will allow gradients
    to be computed and backpropagated.

    Arguments:
        tensor: A tensor to broadcast.
        root_rank: The rank to broadcast the value from.
        name: A name of the broadcast operation.
        priority: The priority of this operation. Higher priority operations
                  are likely to be executed before other operations.
        process_set: Process set object to limit this operation to a subset of
                     Horovod processes. Default is the global process set.

    Returns:
        A tensor of the same shape and type as `tensor`, with the value
        broadcasted from root rank.
    """
    if rank() == root_rank:
        output = tensor.copy()
    else:
        output = mx.nd.zeros(shape=tensor.shape,
                             ctx=tensor.context,
                             dtype=tensor.dtype)
    c_in = tensor.handle
    c_out = output.handle
    if isinstance(name, string_types):
        check_call(
            MPI_MXNET_LIB_CTYPES.horovod_mxnet_broadcast_async(
                c_in, c_out, c_str(name), ctypes.c_int(root_rank),
                ctypes.c_int(priority),
                ctypes.c_int(process_set.process_set_id)))
    else:
        check_call(
            MPI_MXNET_LIB_CTYPES.horovod_mxnet_broadcast_async(
                c_in, c_out, name, ctypes.c_int(root_rank),
                ctypes.c_int(priority),
                ctypes.c_int(process_set.process_set_id)))
    return output
コード例 #32
0
def quantize_symbol(sym,
                    excluded_symbols=[],
                    offline_params=[],
                    quantized_dtype='uint8',
                    calib_quantize_op=False):
    """
    Quantize symbol.
    :param sym: mxnet.symbol.Symbol
        The symbol to quantize.
    :param excluded_symbols: list of str
        The names of symbols to exclude.
    :param offline_params: list of str
        The names of parameters to quantize offline.
    :param quantized_dtype: {"int8", "uint8"}
        The data type that you will quantize to.
    :param calib_quantize_op: bool
        Calibrate or not.(Only for quantization online.
    :return: mxnet.symbol.Symbol
        The symbol that has been quantized.
    """
    assert isinstance(excluded_symbols, list)
    num_excluded_symbols = len(excluded_symbols)
    # exclude = [s.handle for s in excluded_symbols]

    assert isinstance(offline_params, list)
    offline = [c_str(k) for k in offline_params]
    num_offline = len(offline)

    out = SymbolHandle()
    check_call(
        _LIB.MXQuantizeSymbol(sym.handle, ctypes.byref(out),
                              mx_uint(num_excluded_symbols),
                              c_str_array(excluded_symbols),
                              mx_uint(num_offline),
                              c_array(ctypes.c_char_p, offline),
                              c_str(quantized_dtype),
                              ctypes.c_bool(calib_quantize_op)))
    return Symbol(out)