コード例 #1
0
ファイル: test_ndarray.py プロジェクト: xxradon/mxnet
def test_reduce():
    sample_num = 200
    def test_reduce_inner(numpy_reduce_func, nd_reduce_func):
        for i in range(sample_num):
            ndim = np.random.randint(1, 8)
            shape = np.random.randint(1, 11, size=ndim)
            axis_flags = np.random.randint(0, 2, size=ndim)
            axes = []
            for (axis, flag) in enumerate(axis_flags):
                if flag:
                    axes.append(axis)
            keepdims = np.random.randint(0, 2)
            dat = np.random.rand(*shape) - 0.5
            if 0 == len(axes):
                axes = tuple(range(ndim))
            else:
                axes = tuple(axes)
            numpy_ret = numpy_reduce_func(dat, axis=axes, keepdims=keepdims)

            ndarray_ret = nd_reduce_func(arr=mx.nd.array(dat), axis=axes, keepdims=keepdims)
            if type(ndarray_ret) is mx.ndarray.NDArray:
                ndarray_ret = ndarray_ret.asnumpy()
            assert ndarray_ret.shape == numpy_ret.shape
            err = np.square(ndarray_ret - numpy_ret).mean()
            assert err < 1E-4
    test_reduce_inner(lambda data, axis, keepdims:_np_reduce(data, axis, keepdims, np.sum),
                      mx.nd.sum)
    test_reduce_inner(lambda data, axis, keepdims:_np_reduce(data, axis, keepdims, np.max),
                      mx.nd.max)
    test_reduce_inner(lambda data, axis, keepdims:_np_reduce(data, axis, keepdims, np.min),
                      mx.nd.min)
コード例 #2
0
def test_reduce():
    sample_num = 200

    def test_reduce_inner(numpy_reduce_func, numpy_reduce_grad_func,
                          mx_reduce_sym):
        for i in range(sample_num):
            # Generate random data that has ndim between 1-7 and all the shape dims between 1-10
            ndim = np.random.randint(1, 8)
            shape = np.random.randint(1, 11, size=(ndim, ))
            axis_num = np.random.randint(0, ndim, size=1)
            axis_flags = np.random.randint(0, 2, size=ndim)
            axes = []
            for (axis, flag) in enumerate(axis_flags):
                if flag:
                    axes.append(axis)
            if 0 == len(axes):
                axes = None
            elif 1 == len(axes):
                axes = axes[0]
            else:
                axes = tuple(axes)
            keepdims = np.random.randint(0, 2)
            a = mx.symbol.Variable('a')
            if axes is None:
                b = mx_reduce_sym(a, keepdims=keepdims)
            else:
                b = mx_reduce_sym(a, axis=axes, keepdims=keepdims)
            dat_npy = np.random.rand(*shape)
            sum_groundtruth = np.array(
                numpy_reduce_func(dat_npy, axis=axes, keepdims=keepdims))
            if sum_groundtruth.shape == ():
                sum_groundtruth = np.array([sum_groundtruth])
            grad_nd = mx.nd.empty(shape)
            outgrad_npy = np.array(np.random.rand(*sum_groundtruth.shape))
            grad_groundtruth = numpy_reduce_grad_func(outgrad=outgrad_npy,
                                                      data=dat_npy,
                                                      axis=axes,
                                                      keepdims=keepdims)
            net = b.bind(mx.cpu(),
                         args={'a': mx.nd.array(dat_npy)},
                         args_grad={'a': grad_nd})
            net.forward(is_train=True)

            err_forward = reldiff(net.outputs[0].asnumpy(), sum_groundtruth)
            assert err_forward < 1E-4
            net.backward(out_grads=mx.nd.array(outgrad_npy))
            err_backward = reldiff(grad_nd.asnumpy(), grad_groundtruth)
            assert err_backward < 1E-4

    test_reduce_inner(
        lambda data, axis, keepdims: _np_reduce(data, axis, keepdims, np.sum),
        lambda outgrad, data, axis, keepdims: outgrad.reshape(
            _np_reduce(data, axis, 1, np.sum).shape), mx.symbol.sum)
コード例 #3
0
ファイル: test_operator.py プロジェクト: Atry/mxnet
 def test_broadcast_axis():
     for i in range(sample_num):
         # Generate random data that has ndim between 1-7 and all the shape dims between 1-10
         ndim = np.random.randint(1, 8)
         target_shape = np.random.randint(1, 11, size=(ndim,))
         axis = np.random.randint(0, ndim)
         shape = target_shape.copy()
         size = shape[axis]
         shape[axis] = 1
         a = mx.symbol.Variable('a')
         b = mx.symbol.broadcast_axis(a, axis=axis, size=size)
         dat_npy = np.random.rand(*shape)
         groundtruth = dat_npy
         grad_nd = mx.nd.empty(shape)
         outgrad_npy = np.random.rand(*target_shape)
         grad_groundtruth = _np_reduce(outgrad_npy, axis=axis, keepdims=True,
                                       numpy_reduce_func=np.sum)
         net = b.bind(mx.cpu(), args={'a': mx.nd.array(dat_npy)},
                      args_grad={'a': grad_nd})
         net.forward(is_train=True)
         assert (net.outputs[0].shape == target_shape).all()
         err_forward = np.square(net.outputs[0].asnumpy() - groundtruth).mean()
         assert err_forward < 1E-8
         net.backward(out_grads=mx.nd.array(outgrad_npy))
         err_backward = np.square(grad_nd.asnumpy() - grad_groundtruth).mean()
         assert err_backward < 1E-8
コード例 #4
0
 def test_broadcast_axis():
     for i in range(sample_num):
         # Generate random data that has ndim between 1-7 and all the shape dims between 1-10
         ndim = np.random.randint(1, 8)
         target_shape = np.random.randint(1, 11, size=(ndim, ))
         axis = np.random.randint(0, ndim)
         shape = target_shape.copy()
         size = shape[axis]
         shape[axis] = 1
         a = mx.symbol.Variable('a')
         b = mx.symbol.broadcast_axis(a, axis=axis, size=size)
         dat_npy = np.random.rand(*shape)
         groundtruth = dat_npy
         grad_nd = mx.nd.empty(shape)
         outgrad_npy = np.random.rand(*target_shape)
         grad_groundtruth = _np_reduce(outgrad_npy,
                                       axis=axis,
                                       keepdims=True,
                                       numpy_reduce_func=np.sum)
         net = b.bind(mx.cpu(),
                      args={'a': mx.nd.array(dat_npy)},
                      args_grad={'a': grad_nd})
         net.forward(is_train=True)
         assert (net.outputs[0].shape == target_shape).all()
         err_forward = np.square(net.outputs[0].asnumpy() -
                                 groundtruth).mean()
         assert err_forward < 1E-8
         net.backward(out_grads=mx.nd.array(outgrad_npy))
         err_backward = np.square(grad_nd.asnumpy() -
                                  grad_groundtruth).mean()
         assert err_backward < 1E-8
コード例 #5
0
ファイル: test_operator.py プロジェクト: Rowl1ng/mxnet
def test_reduce():
    sample_num = 200

    def test_reduce_inner(numpy_reduce_func, numpy_reduce_grad_func, mx_reduce_sym):
        for i in range(sample_num):
            # Generate random data that has ndim between 1-7 and all the shape dims between 1-10
            ndim = np.random.randint(1, 8)
            shape = np.random.randint(1, 11, size=(ndim,))
            axis_num = np.random.randint(0, ndim, size=1)
            axis_flags = np.random.randint(0, 2, size=ndim)
            axes = []
            for (axis, flag) in enumerate(axis_flags):
                if flag:
                    axes.append(axis)
            if 0 == len(axes):
                axes = None
            elif 1 == len(axes):
                axes = axes[0]
            else:
                axes = tuple(axes)
            keepdims = np.random.randint(0, 2)
            a = mx.symbol.Variable("a")
            if axes is None:
                b = mx_reduce_sym(a, keepdims=keepdims)
            else:
                b = mx_reduce_sym(a, axis=axes, keepdims=keepdims)
            dat_npy = np.random.rand(*shape)
            sum_groundtruth = np.array(numpy_reduce_func(dat_npy, axis=axes, keepdims=keepdims))
            if sum_groundtruth.shape == ():
                sum_groundtruth = np.array([sum_groundtruth])
            grad_nd = mx.nd.empty(shape)
            outgrad_npy = np.array(np.random.rand(*sum_groundtruth.shape))
            grad_groundtruth = numpy_reduce_grad_func(outgrad=outgrad_npy, data=dat_npy, axis=axes, keepdims=keepdims)
            net = b.bind(mx.cpu(), args={"a": mx.nd.array(dat_npy)}, args_grad={"a": grad_nd})
            net.forward(is_train=True)

            err_forward = np.square(net.outputs[0].asnumpy() - sum_groundtruth).sum() / np.prod(shape)
            assert err_forward < 1e-6
            net.backward(out_grads=mx.nd.array(outgrad_npy))
            err_backward = np.square(grad_nd.asnumpy() - grad_groundtruth).sum()
            assert err_backward < 1e-6

    test_reduce_inner(
        lambda data, axis, keepdims: _np_reduce(data, axis, keepdims, np.sum),
        lambda outgrad, data, axis, keepdims: outgrad.reshape(_np_reduce(data, axis, 1, np.sum).shape),
        mx.symbol.sum,
    )
コード例 #6
0
ファイル: test_operator.py プロジェクト: 253681319/mxnet
 def test_broadcasting_ele(sym_bcast):
     dat_npy = np.random.rand(*shape)
     groundtruth = dat_npy
     grad_nd = mx.nd.empty(shape)
     outgrad_npy = np.random.rand(*target_shape)
     grad_groundtruth = _np_reduce(outgrad_npy, axis=axis, keepdims=True,
                                   numpy_reduce_func=np.sum)
     net = sym_bcast.bind(mx.cpu(), args={'a': mx.nd.array(dat_npy)},
                                          args_grad={'a': grad_nd})
     net.forward(is_train=True)
     assert (net.outputs[0].shape == target_shape).all()
     err_forward = reldiff(net.outputs[0].asnumpy(), groundtruth)
     assert err_forward < 1E-4
     net.backward(out_grads=mx.nd.array(outgrad_npy))
     err_backward = reldiff(grad_nd.asnumpy(), grad_groundtruth)
     assert err_backward < 1E-4
コード例 #7
0
ファイル: test_operator.py プロジェクト: zcli/mxnet
 def test_broadcasting_ele(sym_bcast):
     dat_npy = np.random.rand(*shape)
     groundtruth = dat_npy
     grad_nd = mx.nd.empty(shape)
     outgrad_npy = np.random.rand(*target_shape)
     grad_groundtruth = _np_reduce(outgrad_npy, axis=axis, keepdims=True,
                                   numpy_reduce_func=np.sum)
     net = sym_bcast.bind(mx.cpu(), args={'a': mx.nd.array(dat_npy)},
                                          args_grad={'a': grad_nd})
     net.forward(is_train=True)
     assert (net.outputs[0].shape == target_shape).all()
     err_forward = reldiff(net.outputs[0].asnumpy(), groundtruth)
     assert err_forward < 1E-4
     net.backward(out_grads=mx.nd.array(outgrad_npy))
     err_backward = reldiff(grad_nd.asnumpy(), grad_groundtruth)
     assert err_backward < 1E-4