Пример #1
0
def test_np_ndarray_binary_element_wise_ops():
    np_op_map = {
        '+': _np.add,
        '*': _np.multiply,
        '-': _np.subtract,
        '/': _np.divide,
        'mod': _np.mod,
        'pow': _np.power,

    }

    if is_op_runnable():
        np_op_map.update({
            '==': _np.equal,
            '!=': _np.not_equal,
            '>': _np.greater,
            '>=': _np.greater_equal,
            '<': _np.less,
            '<=': _np.less_equal
        })

    def _get_grad_func(op, scalar=None, reverse=False):
        if op == '+':
            if scalar is None:
                return lambda ograd, x1, x2, out: (collapse_sum_like(ograd, x1.shape),
                                                   collapse_sum_like(ograd, x2.shape))
            elif not reverse:
                return lambda ograd, x1, x2, out: ograd
            else:
                return lambda ograd, x1, x2, out: ograd
        elif op == '-':
            if scalar is None:
                return lambda ograd, x1, x2, out: (collapse_sum_like(ograd, x1.shape),
                                                   -collapse_sum_like(ograd, x2.shape))
            elif not reverse:
                return lambda ograd, x1, x2, out: ograd
            else:
                return lambda ograd, x1, x2, out: -ograd
        elif op == '*':
            if scalar is None:
                return lambda ograd, x1, x2, out: (collapse_sum_like(ograd * x2, x1.shape),
                                                   collapse_sum_like(ograd * x1, x2.shape))
            elif not reverse:
                return lambda ograd, x1, x2, out: ograd * x2
            else:
                return lambda ograd, x1, x2, out: ograd * x1
        elif op == '/':
            if scalar is None:
                return lambda ograd, x1, x2, out: (collapse_sum_like(ograd / x2, x1.shape),
                                                   collapse_sum_like(-x1 * ograd / (x2 * x2), x2.shape))
            elif not reverse:
                return lambda ograd, x1, x2, out: ograd / x2
            else:
                return lambda ograd, x1, x2, out: -x1 * ograd / (x2 * x2)
        elif op == 'mod':
            if scalar is None:
                return lambda ograd, x1, x2, out: (collapse_sum_like(ograd, x1.shape),
                                                   collapse_sum_like(-ograd * _np.floor(x1 / x2), x2.shape))
            elif not reverse:
                return lambda ograd, x1, x2, out: ograd
            else:
                return lambda ograd, x1, x2, out: -ograd * _np.floor(x1 / x2)
        elif op == 'pow':
            if scalar is None:
                return lambda ograd, x1, x2, out: (collapse_sum_like(ograd * x2 * _np.power(x1, x2 - 1), x1.shape),
                                                   collapse_sum_like(ograd * out * _np.log(x1), x2.shape))
            elif not reverse:
                return lambda ograd, x1, x2, out: ograd * x2 * _np.power(x1, x2 - 1)
            else:
                return lambda ograd, x1, x2, out: ograd * out * _np.log(x1)
        elif op in ('==', '!=', '<', '<=', '>', '>='):
            if scalar is None:
                return lambda ograd, x1, x2, out: (_np.zeros_like(x1), _np.zeros_like(x2))
            else:
                return lambda ograd, x1, x2, out: _np.zeros_like(ograd)
        return None

    def get_np_ret(x1, x2, op):
        return np_op_map[op](x1, x2)

    @use_np
    class TestBinaryElementWiseOp(HybridBlock):
        def __init__(self, op, scalar=None, reverse=False):
            super(TestBinaryElementWiseOp, self).__init__()
            self._op = op
            self._scalar = scalar
            self._reverse = reverse  # if false, scalar is the right operand.

        def hybrid_forward(self, F, x, *args):
            if self._op == '+':
                if self._scalar is not None:
                    return x + self._scalar if not self._reverse else self._scalar + x
                else:
                    return x + args[0] if not self._reverse else args[0] + x
            elif self._op == '*':
                if self._scalar is not None:
                    return x * self._scalar if not self._reverse else self._scalar * x
                else:
                    return x * args[0] if not self._reverse else args[0] * x
            elif self._op == '-':
                if self._scalar is not None:
                    return x - self._scalar if not self._reverse else self._scalar - x
                else:
                    return x - args[0] if not self._reverse else args[0] - x
            elif self._op == '/':
                if self._scalar is not None:
                    return x / self._scalar if not self._reverse else self._scalar / x
                else:
                    return x / args[0] if not self._reverse else args[0] / x
            elif self._op == 'mod':
                if self._scalar is not None:
                    return x % self._scalar if not self._reverse else self._scalar % x
                else:
                    return x % args[0] if not self._reverse else args[0] % x
            elif self._op == 'pow':
                if self._scalar is not None:
                    return x ** self._scalar if not self._reverse else self._scalar ** x
                else:
                    return x ** args[0] if not self._reverse else args[0] ** x
            elif self._op == '>':
                if self._scalar is not None:
                    return x > self._scalar if not self._reverse else self._scalar > x
                else:
                    return x > args[0]
            elif self._op == '>=':
                if self._scalar is not None:
                    return x >= self._scalar if not self._reverse else self._scalar >= x
                else:
                    return x >= args[0]
            elif self._op == '<':
                if self._scalar is not None:
                    return x < self._scalar if not self._reverse else self._scalar < x
                else:
                    return x < args[0]
            elif self._op == '<=':
                if self._scalar is not None:
                    return x <= self._scalar if not self._reverse else self._scalar <= x
                else:
                    return x <= args[0]
            elif self._op == '==':
                if self._scalar is not None:
                    return x == self._scalar if not self._reverse else self._scalar == x
                else:
                    return x == args[0]
            elif self._op == '!=':
                if self._scalar is not None:
                    return x != self._scalar if not self._reverse else self._scalar != x
                else:
                    return x != args[0]
            else:
                print(self._op)
                assert False

    logic_ops = ['==', '!=', '>', '<', '>=', '<=']
    @use_np
    def check_binary_op_result(shape1, shape2, op, dtype=None):
        if shape1 is None:
            mx_input1 = abs(_np.random.uniform()) + 1
            np_input1 = mx_input1
        else:
            mx_input1 = (rand_ndarray(shape1, dtype=dtype).abs() + 1).as_np_ndarray()
            mx_input1.attach_grad()
            np_input1 = mx_input1.asnumpy()
        if shape2 is None:
            mx_input2 = abs(_np.random.uniform()) + 1
            np_input2 = mx_input2
        else:
            mx_input2 = (rand_ndarray(shape2, dtype=dtype).abs() + 1).as_np_ndarray()
            mx_input2.attach_grad()
            np_input2 = mx_input2.asnumpy()

        scalar = None
        reverse = False
        if isinstance(mx_input1, mx.nd.NDArray) and not isinstance(mx_input2, mx.nd.NDArray):
            scalar = mx_input2
            reverse = False
        elif isinstance(mx_input2, mx.nd.NDArray) and not isinstance(mx_input1, mx.nd.NDArray):
            scalar = mx_input1
            reverse = True

        grad_func = _get_grad_func(op, scalar, reverse)
        np_out = get_np_ret(np_input1, np_input2, op)
        ograd = _np.ones_like(np_out)
        for hybridize in [True, False]:
            if scalar is None:
                get_mx_ret_np = TestBinaryElementWiseOp(op)
                get_mx_ret_classic = TestBinaryElementWiseOp(op)
                if hybridize:
                    get_mx_ret_np.hybridize()
                    get_mx_ret_classic.hybridize()
                if grad_func is None:
                    mx_out = get_mx_ret_np(mx_input1, mx_input2)
                else:
                    with mx.autograd.record():
                        mx_out = get_mx_ret_np(mx_input1, mx_input2)
                    mx_out.backward()
                assert type(mx_out) == np.ndarray
                if op in logic_ops:
                    assert np_out.dtype == mx_out.dtype
                assert_almost_equal(mx_out.asnumpy(), np_out, atol=1e-6, rtol=1e-5, use_broadcast=False)

                if grad_func is not None:
                    x1_grad_expected, x2_grad_expected = grad_func(ograd, np_input1, np_input2, np_out)
                    assert_almost_equal(mx_input1.grad.asnumpy(), x1_grad_expected, atol=1e-5, rtol=1e-3,
                                        use_broadcast=False)
                    assert_almost_equal(mx_input2.grad.asnumpy(), x2_grad_expected, atol=1e-5, rtol=1e-3,
                                        use_broadcast=False)
            else:
                get_mx_ret = TestBinaryElementWiseOp(op, scalar=scalar, reverse=reverse)
                if hybridize:
                    get_mx_ret.hybridize()
                if reverse:
                    mx_input = mx_input2
                else:
                    mx_input = mx_input1

                if grad_func is None:
                    mx_out = get_mx_ret(mx_input)
                else:
                    with mx.autograd.record():
                        mx_out = get_mx_ret(mx_input)
                    mx_out.backward()
                assert type(mx_out) == np.ndarray

                if op in logic_ops:
                    assert np_out.dtype == mx_out.dtype
                assert_almost_equal(mx_out.asnumpy(), np_out, atol=1e-6, rtol=1e-5, use_broadcast=False)

                # check grad
                if grad_func is not None:
                    x_grad_expected = grad_func(ograd, np_input1, np_input2, np_out)
                    assert_almost_equal(mx_input.grad.asnumpy(), x_grad_expected, atol=1e-5, rtol=1e-3,
                                        use_broadcast=False)

    dtypes = [_np.float32, _np.float64, None]
    ops = np_op_map.keys()
    for dtype in dtypes:
        for op in ops:
            check_binary_op_result((3, 4), (3, 4), op, dtype)
            check_binary_op_result(None, (3, 4), op, dtype)
            check_binary_op_result((3, 4), None, op, dtype)
            check_binary_op_result((1, 4), (3, 1), op, dtype)
            check_binary_op_result(None, (3, 1), op, dtype)
            check_binary_op_result((1, 4), None, op, dtype)
            check_binary_op_result((1, 4), (3, 5, 4), op, dtype)
            check_binary_op_result((), (3, 5, 4), op, dtype)
            check_binary_op_result((), None, op, dtype)
            check_binary_op_result(None, (), op, dtype)
            check_binary_op_result((0, 2), (1, 1), op, dtype)
            check_binary_op_result((0, 2), None, op, dtype)
            check_binary_op_result(None, (0, 2), op, dtype)
def test_np_ndarray_binary_element_wise_ops():
    np_op_map = {
        '+': _np.add,
        '*': _np.multiply,
        '-': _np.subtract,
        '/': _np.divide,
        'mod': _np.mod,
        'pow': _np.power,
    }

    if is_op_runnable():
        np_op_map.update({
            '==': _np.equal,
            '!=': _np.not_equal,
            '>': _np.greater,
            '>=': _np.greater_equal,
            '<': _np.less,
            '<=': _np.less_equal
        })

    def get_np_ret(x1, x2, op):
        return np_op_map[op](x1, x2)

    @use_np
    class TestBinaryElementWiseOp(HybridBlock):
        def __init__(self, op, scalar=None, reverse=False):
            super(TestBinaryElementWiseOp, self).__init__()
            self._op = op
            self._scalar = scalar
            self._reverse = reverse  # if false, scalar is the right operand.

        def hybrid_forward(self, F, x, *args):
            if self._op == '+':
                if self._scalar is not None:
                    return x + self._scalar if not self._reverse else self._scalar + x
                else:
                    return x + args[0] if not self._reverse else args[0] + x
            elif self._op == '*':
                if self._scalar is not None:
                    return x * self._scalar if not self._reverse else self._scalar * x
                else:
                    return x * args[0] if not self._reverse else args[0] * x
            elif self._op == '-':
                if self._scalar is not None:
                    return x - self._scalar if not self._reverse else self._scalar - x
                else:
                    return x - args[0] if not self._reverse else args[0] - x
            elif self._op == '/':
                if self._scalar is not None:
                    return x / self._scalar if not self._reverse else self._scalar / x
                else:
                    return x / args[0] if not self._reverse else args[0] / x
            elif self._op == 'mod':
                if self._scalar is not None:
                    return x % self._scalar if not self._reverse else self._scalar % x
                else:
                    return x % args[0] if not self._reverse else args[0] % x
            elif self._op == 'pow':
                if self._scalar is not None:
                    return x**self._scalar if not self._reverse else self._scalar**x
                else:
                    return x**args[0] if not self._reverse else args[0]**x
            elif self._op == '>':
                if self._scalar is not None:
                    return x > self._scalar if not self._reverse else self._scalar > x
                else:
                    return x > args[0]
            elif self._op == '>=':
                if self._scalar is not None:
                    return x >= self._scalar if not self._reverse else self._scalar >= x
                else:
                    return x >= args[0]
            elif self._op == '<':
                if self._scalar is not None:
                    return x < self._scalar if not self._reverse else self._scalar < x
                else:
                    return x < args[0]
            elif self._op == '<=':
                if self._scalar is not None:
                    return x <= self._scalar if not self._reverse else self._scalar <= x
                else:
                    return x <= args[0]
            elif self._op == '==':
                if self._scalar is not None:
                    return x == self._scalar if not self._reverse else self._scalar == x
                else:
                    return x == args[0]
            elif self._op == '!=':
                if self._scalar is not None:
                    return x != self._scalar if not self._reverse else self._scalar != x
                else:
                    return x != args[0]
            else:
                print(self._op)
                assert False

    logic_ops = ['==', '!=', '>', '<', '>=', '<=']

    @use_np
    def check_binary_op_result(shape1, shape2, op, dtype=None):
        if shape1 is None:
            mx_input1 = abs(_np.random.uniform()) + 1
            np_input1 = mx_input1
        else:
            mx_input1 = rand_ndarray(shape1, dtype=dtype).abs() + 1
            np_input1 = mx_input1.asnumpy()
        if shape2 is None:
            mx_input2 = abs(_np.random.uniform()) + 1
            np_input2 = mx_input2
        else:
            mx_input2 = rand_ndarray(shape2, dtype=dtype).abs() + 1
            np_input2 = mx_input2.asnumpy()

        scalar = None
        reverse = False
        if isinstance(mx_input1, mx.nd.NDArray) and not isinstance(
                mx_input2, mx.nd.NDArray):
            scalar = mx_input2
            reverse = False
        elif isinstance(mx_input2, mx.nd.NDArray) and not isinstance(
                mx_input1, mx.nd.NDArray):
            scalar = mx_input1
            reverse = True

        np_out = get_np_ret(np_input1, np_input2, op)
        for hybridize in [True, False]:
            if scalar is None:
                get_mx_ret_np = TestBinaryElementWiseOp(op)
                get_mx_ret_classic = TestBinaryElementWiseOp(op)
                if hybridize:
                    get_mx_ret_np.hybridize()
                    get_mx_ret_classic.hybridize()
                mx_out = get_mx_ret_np(mx_input1.as_np_ndarray(),
                                       mx_input2.as_np_ndarray())
                assert type(mx_out) == np.ndarray
                assert np_out.shape == mx_out.shape
                if op in logic_ops:
                    assert np_out.dtype == mx_out.dtype
                assert_almost_equal(mx_out.asnumpy(),
                                    np_out,
                                    atol=1e-6,
                                    rtol=1e-5)
            else:
                get_mx_ret = TestBinaryElementWiseOp(op,
                                                     scalar=scalar,
                                                     reverse=reverse)
                if hybridize:
                    get_mx_ret.hybridize()
                if reverse:
                    mx_out = get_mx_ret(mx_input2.as_np_ndarray())
                    assert type(mx_out) == np.ndarray
                else:
                    mx_out = get_mx_ret(mx_input1.as_np_ndarray())
                    assert type(mx_out) == np.ndarray
                assert np_out.shape == mx_out.shape
                if op in logic_ops:
                    assert np_out.dtype == mx_out.dtype
                assert_almost_equal(mx_out.asnumpy(),
                                    np_out,
                                    atol=1e-6,
                                    rtol=1e-5)

    dtypes = [_np.float32, _np.float64, None]
    ops = np_op_map.keys()
    for dtype in dtypes:
        for op in ops:
            check_binary_op_result((3, 4), (3, 4), op, dtype)
            check_binary_op_result(None, (3, 4), op, dtype)
            check_binary_op_result((3, 4), None, op, dtype)
            check_binary_op_result((1, 4), (3, 1), op, dtype)
            check_binary_op_result(None, (3, 1), op, dtype)
            check_binary_op_result((1, 4), None, op, dtype)
            check_binary_op_result((1, 4), (3, 5, 4), op, dtype)
            check_binary_op_result((), (3, 5, 4), op, dtype)
            check_binary_op_result((), None, op, dtype)
            check_binary_op_result(None, (), op, dtype)
            check_binary_op_result((0, 2), (1, 1), op, dtype)
            check_binary_op_result((0, 2), None, op, dtype)
            check_binary_op_result(None, (0, 2), op, dtype)
Пример #3
0
        for pvals in pvals_list:
            if pvals_mx_np_array:
                pvals = mx.np.array(pvals)
            x = np.random.multinomial(small_exp, pvals)
            for i in range(total_exp // small_exp):
                x = x + np.random.multinomial(20, pvals)
        freq = (x.asnumpy() / _np.float32(total_exp)).reshape((-1, len(pvals)))
        for i in range(freq.shape[0]):
            if type(pvals) == np.ndarray:
                mx.test_utils.assert_almost_equal(freq[i, :], pvals.asnumpy(), rtol=0.20, atol=1e-1)
            else:
                mx.test_utils.assert_almost_equal(freq[i, :], pvals, rtol=0.20, atol=1e-1)


@with_seed()
@unittest.skipUnless(is_op_runnable(), "Comparison ops can only run on either CPU instances, or GPU instances with"
                                       " compute capability >= 53 if MXNet is built with USE_TVM_OP=ON")
@use_np
def test_np_ndarray_boolean_indexing():
    def test_single_bool_index():
        # adapted from numpy's test_indexing.py
        # Single boolean index
        a = np.array([[1, 2, 3],
                      [4, 5, 6],
                      [7, 8, 9]], dtype=np.int32)
        assert same(a[np.array(True, dtype=np.bool_)].asnumpy(), a[None].asnumpy())
        assert same(a[np.array(False, dtype=np.bool_)].asnumpy(), a[None][0:0].asnumpy())

    def test_boolean_catch_exception():
        # adapted from numpy's test_indexing.py
        arr = np.ones((5, 4, 3))
        for i in range(freq.shape[0]):
            if type(pvals) == np.ndarray:
                mx.test_utils.assert_almost_equal(freq[i, :],
                                                  pvals.asnumpy(),
                                                  rtol=0.20,
                                                  atol=1e-1)
            else:
                mx.test_utils.assert_almost_equal(freq[i, :],
                                                  pvals,
                                                  rtol=0.20,
                                                  atol=1e-1)


@with_seed()
@unittest.skipUnless(
    is_op_runnable(),
    "Comparison ops can only run on either CPU instances, or GPU instances with"
    " compute capability >= 53 if MXNet is built with USE_TVM_OP=ON")
@use_np
def test_np_ndarray_boolean_indexing():
    def test_single_bool_index():
        # adapted from numpy's test_indexing.py
        # Single boolean index
        a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)
        assert same(a[np.array(True, dtype=np.bool_)].asnumpy(),
                    a[None].asnumpy())
        assert same(a[np.array(False, dtype=np.bool_)].asnumpy(),
                    a[None][0:0].asnumpy())

    def test_boolean_catch_exception():
        # adapted from numpy's test_indexing.py