def test_kvstore_init_aux_keys():
    param_idx2name = {0: "weight", 1: "weight_full"}

    svrg_optimizer = _SVRGOptimizer(default_optimizer='sgd', param_idx2name= param_idx2name, learning_rate=1.0)
    kv = mx.kv.create('local')
    kv.set_optimizer(svrg_optimizer)

    # Use default sgd optimizer
    param_weight_init = mx.nd.array([0, 0, 0])
    param_weight_update = mx.nd.array([1, 1, 1])

    kv.init(0, param_weight_init)
    kv.push(0, param_weight_update)
    kv.pull(0, param_weight_init)

    param_weight_full_init = mx.nd.array([1, 1, 1])
    param_weight_full_update = mx.nd.array([2, 2, 2])

    # Use AssignmentOptimizer
    kv.init(1, param_weight_full_init)
    kv.push(1, param_weight_full_update)
    kv.pull(1, param_weight_full_init)

    # updated weights using default sgd optimizer
    assert same(param_weight_init.asnumpy(), np.array([-1, -1, -1]))
    # updated with AssignmentOptimizer
    assert same(param_weight_full_init.asnumpy(), np.array([2, 2, 2]))
def test_assign_float_value_to_ndarray():
    """Test case from https://github.com/apache/incubator-mxnet/issues/8668"""
    a = np.array([47.844944], dtype=np.float32)
    b = mx.nd.zeros(1, dtype=np.float32)
    b[0] = a
    assert same(a, b.asnumpy())
    b[0] = a[0]
    assert same(a, b.asnumpy())
 def test_single_bool_index():
     # adapted from numpy's test_indexing.py
     # Single boolean index
     a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)
     assert same(a[np.array(True, dtype=np.bool_)].asnumpy(),
                 a[None].asnumpy())
     assert same(a[np.array(False, dtype=np.bool_)].asnumpy(),
                 a[None][0:0].asnumpy())
Beispiel #4
0
def test_ndarray_reshape():
    tensor = mx.nd.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
    true_res = mx.nd.arange(8) + 1
    assert same(tensor.reshape((-1, )).asnumpy(), true_res.asnumpy())
    true_res = mx.nd.array([[1, 2, 3, 4], [5, 6, 7, 8]])
    assert same(tensor.reshape((2, -1)).asnumpy(), true_res.asnumpy())
    assert same(tensor.reshape((0, -1)).asnumpy(), true_res.asnumpy())
    true_res = mx.nd.array([[1, 2], [3, 4], [5, 6], [7, 8]])
    assert same(tensor.reshape((-1, 2)).asnumpy(), true_res.asnumpy())
def test_npx_slice():
    class TestSlice(HybridBlock):
        def __init__(self, begin, end, step):
            super(TestSlice, self).__init__()
            self._begin = begin
            self._end = end
            self._step = step

        def hybrid_forward(self, F, a):
            return F.npx.slice(a,
                               begin=self._begin,
                               end=self._end,
                               step=self._step)

    shape = (8, 16, 9, 9)
    np_array = _np.arange(_np.prod(shape), dtype='int32').reshape(shape)
    configs = [
        ([], [], None),
        ([], [], []),
        ([1], [4], None),
        ([1], [10], [3]),
        ([10], [0], [-2]),
        ([None], [None], [None]),
        ([None], [None], [-1]),
        ([10], [None], [-1]),
        ([1, 0, 3], [-2, 10, -4], [None, 2, 3]),
        ([-2, -3, -5, -6], [1, 3, 4, 5], None),
        ([-2, -3, -5, -6], [1, 3, 4, 5], [-1, -2, -3, -4]),
        ([2, -3, -5, -6], [2, 3, 4, 5], None),
        ([2, -3, -5, 5], [3, 3, 4, 5], None),
    ]

    for hybridize in [True, False]:
        for config in configs:
            start, end, step = config[0], config[1], config[2]
            test_slice = TestSlice(begin=start, end=end, step=step)
            if hybridize:
                test_slice.hybridize()

            a = np.array(np_array, dtype=np_array.dtype)
            a.attach_grad()
            basic_index = tuple([
                slice(start[i], end[i], step[i])
                if step is not None else slice(start[i], end[i])
                for i in range(len(start))
            ])
            expected_ret = np_array[basic_index]
            with mx.autograd.record():
                y = test_slice(a)

            assert same(y.asnumpy(), expected_ret)

            # test backward
            mx.autograd.backward(y)
            expected_grad = _np.zeros(shape)
            expected_grad[basic_index] = 1
            assert same(a.grad.asnumpy(), expected_grad)
Beispiel #6
0
def test_np_ndarray_pickle():
    a = np.random.uniform(size=(4, 5))
    a_copy = a.copy()
    import pickle
    with open("np_ndarray_pickle_test_file", 'wb') as f:
        pickle.dump(a_copy, f)
    with open("np_ndarray_pickle_test_file", 'rb') as f:
        a_load = pickle.load(f)
    same(a.asnumpy(), a_load.asnumpy())
Beispiel #7
0
def test_dense_backward_no_flatten():
    print("2nd order gradient for Fully Connected, flatten=False")
    for x in NDArrayGenerator(5, 3):
        hidden = random.randrange(1, 4)
        net = gluon.nn.Sequential()
        with net.name_scope():
            net.add(gluon.nn.Dense(hidden, flatten=False))
        net.initialize(mxnet.initializer.Constant(.5))
        x.attach_grad()
        with autograd.record():
            y = net.forward(x)
            o_y = arange_shape_like(y)  # head gradient of y
            params = [p.data() for p in net.collect_params().values()]
            w = params[0]
            b = params[1]
            print("Checking y ({}) = x({}) * w^T({}) + b({})".format(
                y.shape, x.shape, w.shape, b.shape))
            x_grad = autograd.grad(heads=y,
                                   variables=x,
                                   head_grads=o_y,
                                   create_graph=True,
                                   retain_graph=True)[0]
            o_x_grad = arange_shape_like(x_grad)
            w_grad_grad = autograd.grad(heads=x_grad,
                                        variables=w,
                                        head_grads=o_x_grad,
                                        create_graph=False)[0]
            w_grad = autograd.grad(heads=y,
                                   variables=w,
                                   head_grads=o_y,
                                   create_graph=True,
                                   retain_graph=True)[0]
            o_w_grad = arange_shape_like(w_grad)
            x_grad_grad = autograd.grad(heads=w_grad,
                                        variables=x,
                                        head_grads=o_w_grad,
                                        create_graph=False)[0]
        # Expected results
        o_y = flatten2d_left(o_y)
        x = flatten2d_left(x)
        o_x_grad = flatten2d_left(o_x_grad)
        o_w_grad = flatten2d_left(o_w_grad)
        w_grad_e = nd.dot(o_y, x, transpose_a=True)
        w_grad_grad_e = nd.dot(o_y, o_x_grad, transpose_a=True)
        x_grad_e = nd.dot(o_y, w)
        x_grad_grad_e = nd.dot(o_y, o_w_grad)
        w_grad_check = same(flatten2d_left(w_grad), flatten2d_left(w_grad_e))
        w_grad_grad_check = same(flatten2d_left(w_grad_grad),
                                 flatten2d_left(w_grad_grad_e))
        x_grad_check = same(flatten2d_left(x_grad), flatten2d_left(x_grad_e))
        x_grad_grad_check = same(flatten2d_left(x_grad_grad),
                                 flatten2d_left(x_grad_grad_e))
        ok_(x_grad_check)
        ok_(w_grad_check)
        ok_(x_grad_grad_check)
        ok_(w_grad_grad_check)
 def check_quantized_flatten(shape):
     qdata = mx.nd.random.uniform(low=-127, high=127, shape=shape).astype('int8')
     min_data = mx.nd.array([-1023.343], dtype='float32')
     max_data = mx.nd.array([2343.324275], dtype='float32')
     qoutput, min_output, max_output = mx.nd.contrib.quantized_flatten(qdata, min_data, max_data)
     assert qoutput.ndim == 2
     assert qoutput.shape[0] == qdata.shape[0]
     assert qoutput.shape[1] == np.prod(qdata.shape[1:])
     assert same(qdata.asnumpy().flatten(), qoutput.asnumpy().flatten())
     assert same(min_data.asnumpy(), min_output.asnumpy())
     assert same(max_data.asnumpy(), max_output.asnumpy())
def test_np_meshgrid():
    nx, ny = (4, 5)
    x = np.array(_np.linspace(0, 1, nx), dtype=np.float32)
    y = np.array(_np.linspace(0, 1, ny), dtype=np.float32)
    z = np.ones(())
    xv, yv, zv = np.meshgrid(x, y, z)
    xv_expected, yv_expected, zv_expected = _np.meshgrid(
        x.asnumpy(), y.asnumpy(), z.asnumpy())
    assert same(xv.asnumpy(), xv_expected)
    assert same(yv.asnumpy(), yv_expected)
    assert same(zv.asnumpy(), zv_expected)
 def test_boolean_indexing_twodim():
     # adapted from numpy's test_indexing.py
     # Indexing a 2-dimensional array with
     # 2-dimensional boolean array
     a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)
     b = np.array(
         [[True, False, True], [False, True, False], [True, False, True]],
         dtype=np.bool_)
     assert same(a[b].asnumpy(), _np.array([1, 3, 5, 7, 9], dtype=a.dtype))
     assert same(a[b[1]].asnumpy(), _np.array([[4, 5, 6]], dtype=a.dtype))
     assert same(a[b[0]].asnumpy(), a[b[2]].asnumpy())
 def check_params(params, qparams, qsym=None):
     if qsym is None:
         assert len(params) == len(qparams)
         for k, v in params.items():
             assert k in qparams
             assert same(v.asnumpy(), qparams[k].asnumpy())
     else:
         qparams_ground_truth = mx.contrib.quant._quantize_params(qsym, params)
         assert len(qparams) == len(qparams_ground_truth)
         for k, v in qparams_ground_truth.items():
             assert k in qparams
             assert same(v.asnumpy(), qparams[k].asnumpy())
Beispiel #12
0
 def check_params(params, qparams, qsym=None):
     if qsym is None:
         assert len(params) == len(qparams)
         for k, v in params.items():
             assert k in qparams
             assert same(v.asnumpy(), qparams[k].asnumpy())
     else:
         qparams_ground_truth = mx.contrib.quant._quantize_params(qsym, params)
         assert len(qparams) == len(qparams_ground_truth)
         for k, v in qparams_ground_truth.items():
             assert k in qparams
             assert same(v.asnumpy(), qparams[k].asnumpy())
Beispiel #13
0
def test_np_ndarray_pickle():
    a = np.random.uniform(size=(4, 5))
    a_copy = a.copy()
    import pickle

    with TemporaryDirectory() as work_dir:
        fname = os.path.join(work_dir, 'np_ndarray_pickle_test_file')
        with open(fname, 'wb') as f:
            pickle.dump(a_copy, f)
        with open(fname, 'rb') as f:
            a_load = pickle.load(f)
        same(a.asnumpy(), a_load.asnumpy())
Beispiel #14
0
 def check_quantized_flatten(shape):
     qdata = mx.nd.random.uniform(low=-127, high=127,
                                  shape=shape).astype('int8')
     min_data = mx.nd.array([-1023.343], dtype='float32')
     max_data = mx.nd.array([2343.324275], dtype='float32')
     qoutput, min_output, max_output = mx.nd.contrib.quantized_flatten(
         qdata, min_data, max_data)
     assert qoutput.ndim == 2
     assert qoutput.shape[0] == qdata.shape[0]
     assert qoutput.shape[1] == np.prod(qdata.shape[1:])
     assert same(qdata.asnumpy().flatten(), qoutput.asnumpy().flatten())
     assert same(min_data.asnumpy(), min_output.asnumpy())
     assert same(max_data.asnumpy(), max_output.asnumpy())
def test_contrib_intgemm_maxabsolute(shape):
    if "intgemm_maxabsolute" not in dir(mx.nd.contrib):
        return
    # mx.nd API
    m = mx.nd.random_uniform(low=-100.0, high=100.0, shape=shape)
    fast = mx.nd.contrib.intgemm_maxabsolute(m)
    slow = mx.nd.max(mx.nd.abs(m))
    assert same(fast, slow)
    # np API
    m = np.random.uniform(low=-100.0, high=100.0, size=shape)
    fast = npx.intgemm_maxabsolute(m).reshape(())
    slow = np.max(np.abs(m))
    assert same(fast, slow)
def test_ndarray_reshape():
    tensor  = mx.nd.array([[[1, 2], [3, 4]],
                           [[5, 6], [7, 8]]])
    true_res = mx.nd.arange(8) + 1
    assert same(tensor.reshape((-1, )).asnumpy(), true_res.asnumpy())
    true_res  = mx.nd.array([[1, 2, 3, 4],
                             [5, 6, 7, 8]])
    assert same(tensor.reshape((2, -1)).asnumpy(), true_res.asnumpy())
    assert same(tensor.reshape((0, -1)).asnumpy(), true_res.asnumpy())
    true_res  = mx.nd.array([[1, 2],
                             [3, 4],
                             [5, 6],
                             [7, 8]])
    assert same(tensor.reshape((-1, 2)).asnumpy(), true_res.asnumpy())
Beispiel #17
0
 def assert_same(np_array, np_index, mx_array, mx_index, mx_value, np_value=None):
     if np_value is not None:
         np_array[np_index] = np_value
     else:
         np_array[np_index] = mx_value
     mx_array[mx_index] = mx_value
     assert same(np_array, mx_array.asnumpy())
Beispiel #18
0
 def test_getitem(np_array, index):
     np_index = index
     if type(index) == mx.nd.NDArray:  # use of NDArray is prohibited
         assert False
     if isinstance(index, np.ndarray):
         np_index = index.asnumpy()
     if isinstance(index, tuple):
         np_index = tuple([
             idx.asnumpy() if isinstance(idx, mx.nd.NDArray) else idx
             for idx in index]
         )
     np_indexed_array = np_array[np_index]
     mx_np_array = np.array(np_array, dtype=np_array.dtype)
     for autograd in [True, False]:
         try:
             if autograd:
                 with mx.autograd.record():
                     mx_indexed_array = mx_np_array[index]
             else:
                 mx_indexed_array = mx_np_array[index]
         except Exception as e:
             print('Failed with index = {}'.format(index))
             raise e
         mx_indexed_array = mx_indexed_array.asnumpy()
         assert same(np_indexed_array, mx_indexed_array), 'Failed with index = {}'.format(index)
Beispiel #19
0
 def check_identity_array_creation(shape, dtype):
     np_out = _np.identity(n=n, dtype=dtype)
     mx_out = np.identity(n=n, dtype=dtype)
     assert same(mx_out.asnumpy(), np_out)
     if dtype is None:
         assert mx_out.dtype == _np.float32
         assert np_out.dtype == _np.float64
Beispiel #20
0
 def test_boolean_indexing_list():
     # adapted from numpy's test_indexing.py
     a = np.array([1, 2, 3], dtype=np.int32)
     b = [True, False, True]
     # Two variants of the test because the first takes a fast path
     assert same(a[b].asnumpy(), _np.array([1, 3], dtype=a.dtype))
     (a[None, b], [[1, 3]])
Beispiel #21
0
 def test_boolean_indexing_onedim():
     # adapted from numpy's test_indexing.py
     # Indexing a 2-dimensional array with
     # boolean array of length one
     a = np.array([[0.,  0.,  0.]])
     b = np.array([True], dtype=bool)
     assert same(a[b].asnumpy(), a.asnumpy())
Beispiel #22
0
def test_np_transpose():
    # TODO(junwu): Add more test cases
    data = mx.sym.var('a').as_np_ndarray()
    ret = data.transpose()
    assert type(ret) == mx.sym.np._Symbol

    dtypes = ['float32', 'int32']
    for dtype in dtypes:
        for ndim in [0, 1, 2, 3, 4, 5, 6]:
            shape = rand_shape_nd(ndim, dim=5, allow_zero_size=True)
            np_data = _np.random.uniform(low=-100, high=100,
                                         size=shape).astype(dtype)
            mx_data = np.array(np_data, dtype=dtype)
            axes = [None]
            if ndim == 0:
                axes += [()]
            else:
                axis = [i for i in range(ndim)]
                axes.append(tuple(axis))
                random.shuffle(axis)
                axes.append(tuple(axis))
            for axis in axes:
                np_out = _np.transpose(np_data, axes=axis)
                mx_out = np.transpose(mx_data, axes=axis)
                assert np_out.dtype == mx_out.dtype
                assert same(mx_out.asnumpy(), np_out)
Beispiel #23
0
    def test_getitem(np_array, index, is_scalar=False):
        """`is_scalar` indicates whether we should expect a scalar for the result.
        If so, the indexed array of NDArray should call asscalar to compare
        with numpy's indexed array."""
        np_index = index
        if isinstance(index, mx.nd.NDArray):
            np_index = index.asnumpy()
        if isinstance(index, tuple):
            np_index = []
            for idx in index:
                if isinstance(idx, mx.nd.NDArray):
                    np_index.append(idx.asnumpy())
                else:
                    np_index.append(idx)
            np_index = tuple(np_index)

        np_indexed_array = np_array[np_index]
        mx_array = mx.nd.array(np_array, dtype=np_array.dtype)
        mx_indexed_array = mx_array[index]
        if is_scalar:
            mx_indexed_array = mx_indexed_array.asscalar()
        else:
            mx_indexed_array = mx_indexed_array.asnumpy()
        assert same(np_indexed_array,
                    mx_indexed_array), 'Failed with index=%s' % str(index)
Beispiel #24
0
    def test_horovod_broadcast_deferred_init_parameters(self):
        """Test that the deferred initialized parameters are broadcasted."""
        hvd.init()
        root_rank = 0
        rank = hvd.rank()

        # This test does not apply if there is only one worker.
        if hvd.size() == 1:
            self.skipTest("Only one worker available")

        mx.random.seed(rank)
        layer = mx.gluon.nn.Conv2D(10, 2)
        layer.initialize()
        hvd.broadcast_parameters(layer.collect_params(), root_rank=root_rank)

        x = mx.nd.ones((5, 4, 10, 10))
        layer(x)
        tensors = [p.data() for _, p in sorted(layer.collect_params().items())]
        root_tensors = []
        for tensor in tensors:
            root_tensors.append(hvd.broadcast(tensor, root_rank=root_rank))

        for tensor, root_tensor in zip(tensors, root_tensors):
            assert same(tensor.asnumpy(), root_tensor.asnumpy()), \
                'horovod did not broadcast deferred initialized parameter correctly'
Beispiel #25
0
 def check_zero_array_creation(shape, dtype):
     np_out = _np.zeros(shape=shape, dtype=dtype)
     mx_out = np.zeros(shape=shape, dtype=dtype)
     assert same(mx_out.asnumpy(), np_out)
     if dtype is None:
         assert mx_out.dtype == _np.float32
         assert np_out.dtype == _np.float64
 def assert_same(np_array, np_index, mx_array, mx_index, mx_value, np_value=None):
     if np_value is not None:
         np_array[np_index] = np_value
     else:
         np_array[np_index] = mx_value
     mx_array[mx_index] = mx_value
     assert same(np_array, mx_array.asnumpy())
Beispiel #27
0
def test_moveaxis():
    X = mx.nd.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])
    res = mx.nd.moveaxis(X, 0, 3).asnumpy()
    true_res = mx.nd.array([[[1., 7.], [2., 8.], [3., 9.]],
                            [[4., 10.], [5., 11.], [6., 12.]]])
    assert same(res, true_res.asnumpy())
    assert mx.nd.moveaxis(X, 2, 0).shape == (3, 2, 2)
Beispiel #28
0
    def test_horovod_broadcast_inplace(self):
        """Test that the broadcast correctly broadcasts 1D, 2D, 3D tensors."""
        hvd.init()
        rank = hvd.rank()
        size = hvd.size()

        # This test does not apply if there is only one worker.
        if size == 1:
            return

        dtypes = ['int32', 'int64', 'float32', 'float64']
        dims = [1, 2, 3]
        ctx = self._current_context()
        count = 0
        shapes = [(), (17), (17, 17), (17, 17, 17)]
        root_ranks = list(range(size))
        for dtype, dim, root_rank in itertools.product(dtypes, dims,
                                                       root_ranks):
            tensor = mx.nd.ones(shapes[dim], ctx=ctx) * rank
            root_tensor = mx.nd.ones(shapes[dim], ctx=ctx) * root_rank
            tensor = tensor.astype(dtype)
            root_tensor = root_tensor.astype(dtype)

            # Only do broadcasting using and on broadcast_tensor
            broadcast_tensor = tensor.copy()
            hvd.broadcast_(broadcast_tensor,
                           root_rank=root_rank,
                           name=str(count))
            if rank != root_rank:
                if same(tensor.asnumpy(), root_tensor.asnumpy()):
                    print("broadcast", count, dtype, dim,
                          mx.nd.max(tensor == root_tensor))
                    print("tensor", hvd.rank(), tensor)
                    print("root_tensor", hvd.rank(), root_tensor)
                    print("comparison", hvd.rank(), tensor == root_tensor)
                assert not same(tensor.asnumpy(), root_tensor.asnumpy()), \
                    'hvd.broadcast modifies source tensor'
            if not same(broadcast_tensor.asnumpy(), root_tensor.asnumpy()):
                print("broadcast", count, dtype, dim)
                print("broadcast_tensor", hvd.rank(), broadcast_tensor)
                print("root_tensor", hvd.rank(), root_tensor)
                print("comparison", hvd.rank(),
                      broadcast_tensor == root_tensor)
            broadcast_tensor.wait_to_read()
            tensor.wait_to_read()
            assert same(broadcast_tensor.asnumpy(), root_tensor.asnumpy()), \
                'hvd.broadcast produces incorrect broadcasted tensor'
Beispiel #29
0
def test_iter():
    x = mx.nd.array([1, 2, 3])
    y = []
    for a in x:
        y.append(a)

    for i in range(x.size):
        assert same(y[i].asnumpy(), x[i].asnumpy())
def test_iter():
    x = mx.nd.array([1, 2, 3])
    y = []
    for a in x:
        y.append(a)

    for i in range(x.size):
        assert same(y[i].asnumpy(), x[i].asnumpy())
Beispiel #31
0
def test_np_sum():
    class TestSum(HybridBlock):
        def __init__(self, axis=None, dtype=None, keepdims=False):
            super(TestSum, self).__init__()
            self._axis = axis
            self._dtype = dtype
            self._keepdims = keepdims

        def hybrid_forward(self, F, a, *args, **kwargs):
            return F.np.sum(a, axis=self._axis, dtype=self._dtype, keepdims=self._keepdims)

    def is_int(dtype):
        return 'int' in dtype

    in_data_dim = random.choice([2, 3, 4])
    shape = rand_shape_nd(in_data_dim, dim=3)
    acc_type = {'float16': 'float32', 'float32': 'float64', 'float64': 'float64',
                'int8': 'int32', 'int32': 'int64', 'int64': 'int64'}
    for hybridize in [False, True]:
        for keepdims in [True, False]:
            for axis in ([i for i in range(in_data_dim)] + [(), None]):
                for itype in ['float16', 'float32', 'float64', 'int8', 'int32', 'int64']:
                    for dtype in ['float16', 'float32', 'float64', 'int8', 'int32', 'int64']:
                        if is_int(dtype) and not is_int(itype):
                            continue
                        # test gluon
                        test_sum = TestSum(axis=axis, dtype=dtype, keepdims=keepdims)
                        if hybridize:
                            test_sum.hybridize()
                        if is_int(itype):
                            x = _np.random.randint(-128, 128, shape, dtype=itype)
                            x = mx.nd.array(x)
                        else:
                            x = mx.nd.random.uniform(-1.0, 1.0, shape=shape, dtype=itype)
                        x = x.as_np_ndarray()
                        x.attach_grad()
                        expected_ret = _np.sum(x.asnumpy(), axis=axis, dtype=acc_type[itype], keepdims=keepdims)
                        expected_ret = expected_ret.astype(dtype)
                        with mx.autograd.record():
                            y = test_sum(x)
                        assert y.shape == expected_ret.shape
                        assert_almost_equal(y.asnumpy(), expected_ret, rtol=1e-3 if dtype == 'float16' else 1e-3,
                                            atol=1e-5 if dtype == 'float16' else 1e-5)

                        y.backward()
                        assert same(x.grad.asnumpy(), _np.ones(shape=x.shape, dtype=x.dtype))

                        # test numeric
                        if itype == 'float32' and dtype == 'float32':
                            x_sym = mx.sym.Variable("x").as_np_ndarray()
                            mx_sym = mx.sym.np.sum(x_sym, axis=axis, dtype=dtype, keepdims=keepdims).as_nd_ndarray()
                            check_numeric_gradient(mx_sym, [x.as_nd_ndarray()],
                                                   numeric_eps=1e-3, rtol=1e-3, atol=1e-4, dtype=_np.float32)

                        # test imperative
                        mx_out = np.sum(x, axis=axis, dtype=dtype, keepdims=keepdims)
                        np_out = _np.sum(x.asnumpy(), axis=axis, dtype=acc_type[itype], keepdims=keepdims).astype(dtype)
                        assert_almost_equal(mx_out.asnumpy(), np_out, rtol=1e-3, atol=1e-5)
Beispiel #32
0
 def check_astype_equal(dtype, copy, expect_zero_copy=False):
     mx_ret = mx_data.astype(dtype=dtype, copy=copy)
     assert type(mx_ret) is np.ndarray
     np_ret = np_data.astype(dtype=dtype, copy=copy)
     assert mx_ret.dtype == np_ret.dtype
     assert same(mx_ret.asnumpy(), np_ret)
     if expect_zero_copy:
         assert id(mx_ret) == id(mx_data)
         assert id(np_ret) == id(np_data)
Beispiel #33
0
def test_tvm_broadcast_add():
    if _features.is_enabled("TVM_OP"):
        a_shape = rand_shape_nd(4)
        b_shape = (1, ) + a_shape[1:2] + (1, 1)
        a = mx.nd.normal(shape=a_shape)
        b = mx.nd.normal(shape=b_shape)
        c = mx.nd.contrib.tvm_vadd(a, b)
        c_np = a.asnumpy() + b.asnumpy()
        assert same(c.asnumpy(), c_np)
def test_ndarray_choose():
    shape = (100, 20)
    npy = np.arange(np.prod(shape)).reshape(shape)
    arr = mx.nd.array(npy)
    nrepeat = 3
    for repeat in range(nrepeat):
        indices = np.random.randint(shape[1], size=shape[0])
        assert same(npy[np.arange(shape[0]), indices],
                    mx.nd.choose_element_0index(arr, mx.nd.array(indices)).asnumpy())
Beispiel #35
0
def test_ndarray_legacy_load():
    data = []
    for i in range(6):
        data.append(mx.nd.arange(128))
    path = os.path.dirname(os.path.realpath(__file__))
    legacy_data = mx.nd.load(os.path.join(path, 'legacy_ndarray.v0'))
    assert len(data) == len(legacy_data)
    for i in range(len(data)):
        assert same(data[i].asnumpy(), legacy_data[i].asnumpy())
def test_ndarray_legacy_load():
    data = []
    for i in range(6):
        data.append(mx.nd.arange(128))
    path = os.path.dirname(os.path.realpath(__file__))
    legacy_data = mx.nd.load(os.path.join(path, 'legacy_ndarray.v0'))
    assert len(data) == len(legacy_data)
    for i in range(len(data)):
        assert same(data[i].asnumpy(), legacy_data[i].asnumpy())
 def assert_same(np_array, np_index, mx_array, mx_index, mx_value, np_value=None):
     if np_value is not None:
         np_array[np_index] = np_value
     elif isinstance(mx_value, mx.nd.NDArray):
         np_array[np_index] = mx_value.asnumpy()
     else:
         np_array[np_index] = mx_value
     mx_array[mx_index] = mx_value
     assert same(np_array, mx_array.asnumpy())
def test_ndarray_choose():
    shape = (100, 20)
    npy = np.arange(np.prod(shape)).reshape(shape)
    arr = mx.nd.array(npy)
    nrepeat = 3
    for repeat in range(nrepeat):
        indices = np.random.randint(shape[1], size=shape[0])
        assert same(npy[np.arange(shape[0]), indices],
                    mx.nd.choose_element_0index(arr, mx.nd.array(indices)).asnumpy())
def test_tvm_broadcast_add():
    if _features.is_enabled("TVM_OP"):
        configs = [
            [[5, 6, 7, 8, 9], [1]],
            [[6, 4, 5, 2, 1], [6, 1, 5, 1, 1]],
            [[3, 5, 6], [1, 6]],
            [[3, 5, 6], [5, 1]],
            [[3, 5, 6], [5, 6]],
            [[4, 3, 2, 1], [2, 1]],
            [[4, 3, 2, 2], [4, 1, 1, 2]],
            [[6, 6], [6, 6]],
        ]
        for config in configs:
            a_shape = config[0]
            b_shape = config[1]
            a = mx.nd.normal(shape=a_shape)
            b = mx.nd.normal(shape=b_shape)
            a.attach_grad()
            b.attach_grad()
            with mx.autograd.record():
                c = mx.nd.contrib.tvm_vadd(a, b)
            c_np = a.asnumpy() + b.asnumpy()
            assert same(c.asnumpy(), c_np)
            # test backward
            c.backward()
            expected_grad_a = _np.ones_like(
                a.asnumpy()) * c_np.size / a.asnumpy().size
            expected_grad_b = _np.ones_like(
                b.asnumpy()) * c_np.size / b.asnumpy().size
            assert same(a.grad.asnumpy(), expected_grad_a)
            assert same(b.grad.asnumpy(), expected_grad_b)
            # test kAddTo request
            a = mx.nd.normal(shape=a_shape)
            b = mx.nd.normal(shape=b_shape)
            a.attach_grad()
            b.attach_grad()
            with mx.autograd.record():
                c = mx.nd.contrib.tvm_vadd(a, b)
                d = mx.nd.contrib.tvm_vadd(a, b)
            mx.autograd.backward([c, d])
            expected_grad_a = 2 * _np.ones_like(a.asnumpy()) * c.size / a.size
            expected_grad_b = 2 * _np.ones_like(b.asnumpy()) * c.size / b.size
            assert same(a.grad.asnumpy(), expected_grad_a)
            assert same(b.grad.asnumpy(), expected_grad_b)
 def test_getitem_autograd(np_array, index):
     x = mx.nd.array(np_array, dtype=np_array.dtype)
     x.attach_grad()
     with mx.autograd.record():
         y = x[index]
     y.backward()
     value = mx.nd.ones_like(y)
     x_grad = mx.nd.zeros_like(x)
     x_grad[index] = value
     assert same(x_grad.asnumpy(), x.grad.asnumpy())
def test_quantize_float32_to_int8():
    shape = rand_shape_nd(4)
    data = rand_ndarray(shape, 'default', dtype='float32')
    min_range = mx.nd.min(data)
    max_range = mx.nd.max(data)
    qdata, min_val, max_val = mx.nd.contrib.quantize(data, min_range, max_range, out_type='int8')
    data_np = data.asnumpy()
    min_range = min_range.asscalar()
    max_range = max_range.asscalar()
    real_range = np.maximum(np.abs(min_range), np.abs(max_range))
    quantized_range = 127.0
    scale = quantized_range / real_range
    assert qdata.dtype == np.int8
    assert min_val.dtype == np.float32
    assert max_val.dtype == np.float32
    assert same(min_val.asscalar(), -real_range)
    assert same(max_val.asscalar(), real_range)
    qdata_np = (np.sign(data_np) * np.minimum(np.abs(data_np) * scale + 0.5, quantized_range)).astype(np.int8)
    assert same(qdata.asnumpy(), qdata_np)
def test_ndarray_concatenate():
    axis = 1
    shapes = [(2, 3, 4, 2), (2, 2, 4, 2), (2, 1, 4, 2)]
    arrays_np = [np.random.uniform(-10, 10, s).astype(np.float32) for s in shapes]
    arrays_nd = [mx.nd.array(x) for x in arrays_np]

    array_nd = mx.nd.concatenate(arrays_nd, axis=axis)
    array_np = np.concatenate(arrays_np, axis=axis)

    assert same(array_np, array_nd.asnumpy())
def test_ndarray_slice():
    shape = (10,)
    A = mx.nd.array(np.random.uniform(-10, 10, shape))
    A2 = A.asnumpy()
    assert same(A[3:8].asnumpy(), A2[3:8])
    A2[3:8] *= 10;
    A[3:8] = A2[3:8]
    assert same(A[3:8].asnumpy(), A2[3:8])

    shape = (3,4,5,6,7)
    A = mx.nd.random.uniform(shape=shape)
    A2 = A.asnumpy()

    assert same(A[1,3:4,:,1:5].asnumpy(), A2[1,3:4,:,1:5])

    assert A[1,2,3,4,5].asscalar() == A2[1,2,3,4,5]

    a = mx.nd.array([[0, 1], [2, 3]])
    assert (a[[1, 1, 0], [0, 1, 0]].asnumpy() == [2, 3, 0]).all()
    assert (a[mx.nd.array([1, 1, 0]), mx.nd.array([0, 1, 0])].asnumpy() == [2, 3, 0]).all()
def test_ndarray_onehot():
    shape = (100, 20)
    npy = np.arange(np.prod(shape)).reshape(shape)
    arr = mx.nd.array(npy)
    nrepeat = 3
    for repeat in range(nrepeat):
        indices = np.random.randint(shape[1], size=shape[0])
        npy[:] = 0.0
        npy[np.arange(shape[0]), indices] = 1.0
        mx.nd.onehot_encode(mx.nd.array(indices), out=arr)
        assert same(npy, arr.asnumpy())
def test_moveaxis():
    X = mx.nd.array([[[1, 2, 3], [4, 5, 6]],
                     [[7, 8, 9], [10, 11, 12]]])
    res = mx.nd.moveaxis(X, 0, 3).asnumpy()
    true_res = mx.nd.array([[[  1.,   7.],
                             [  2.,   8.],
                             [  3.,   9.]],
                            [[  4.,  10.],
                             [  5.,  11.],
                             [  6.,  12.]]])
    assert same(res, true_res.asnumpy())
    assert mx.nd.moveaxis(X, 2, 0).shape == (3, 2, 2)
def test_ndarray_crop():
    # get crop
    x = mx.nd.ones((2, 3, 4))
    y = mx.nd.crop(x, begin=(0, 0, 0), end=(2, 1, 3))
    assert same(y.asnumpy(), np.ones((2, 1, 3), dtype=y.dtype))

    # crop assign
    z = mx.nd.zeros((2, 1, 3))
    mx.nd._internal._crop_assign(x, z, begin=(0, 0, 0),
                                 end=(2, 1, 3), out=x)
    np_x = np.ones(x.shape, dtype=x.dtype)
    np_x[0:2, 0:1, 0:3] = 0
    assert same(x.asnumpy(), np_x)

    # crop assign with scalar
    x = mx.nd.ones((2, 3, 4))
    mx.nd._internal._crop_assign_scalar(x, scalar=5,
                                        begin=(0, 0, 0),
                                        end=(2, 1, 3), out=x)
    np_x = np.ones(x.shape, dtype=x.dtype)
    np_x[0:2, 0:1, 0:3] = 5
    assert same(x.asnumpy(), np_x)
def test_ndarray_fill():
    shape = (100, 20)
    npy = np.arange(np.prod(shape)).reshape(shape)
    arr = mx.nd.array(npy)
    new_npy = npy.copy()
    nrepeat = 3
    for repeat in range(nrepeat):
        indices = np.random.randint(shape[1], size=shape[0])
        val = np.random.randint(shape[1], size=shape[0])
        new_npy[:] = npy
        new_npy[np.arange(shape[0]), indices] = val
        assert same(new_npy,
                    mx.nd.fill_element_0index(arr, mx.nd.array(val), mx.nd.array(indices)).asnumpy())
    def test_getitem(np_array, index, is_scalar=False):
        """`is_scalar` indicates whether we should expect a scalar for the result.
        If so, the indexed array of NDArray should call asscalar to compare
        with numpy's indexed array."""
        np_index = index
        if isinstance(index, mx.nd.NDArray):
            np_index = index.asnumpy()
        if isinstance(index, tuple):
            np_index = []
            for idx in index:
                if isinstance(idx, mx.nd.NDArray):
                    np_index.append(idx.asnumpy())
                else:
                    np_index.append(idx)
            np_index = tuple(np_index)

        np_indexed_array = np_array[np_index]
        mx_array = mx.nd.array(np_array, dtype=np_array.dtype)
        mx_indexed_array = mx_array[index]
        if is_scalar:
            mx_indexed_array = mx_indexed_array.asscalar()
        else:
            mx_indexed_array = mx_indexed_array.asnumpy()
        assert same(np_indexed_array, mx_indexed_array), 'Failed with index=%s' % str(index)
def test_ndarray_setitem():
    shape = (3, 4, 2)

    # scalar assignment
    x = mx.nd.zeros(shape)
    x[:] = 1
    x_np = np.ones(shape, dtype=x.dtype)
    assert same(x.asnumpy(), x_np)

    # ndarray assignment
    x = mx.nd.zeros(shape)
    x[:] = mx.nd.ones(shape)
    x_np = np.ones(shape, dtype=x.dtype)
    assert same(x.asnumpy(), x_np)

    # numpy assignment
    x = mx.nd.zeros(shape)
    x[:] = np.ones(shape)
    x_np = np.ones(shape, dtype=x.dtype)
    assert same(x.asnumpy(), x_np)

    # indexing sub-arrays
    x = mx.nd.zeros(shape)
    x[1] = 1
    x_np = np.zeros(shape, dtype=x.dtype)
    x_np[1] = 1
    assert same(x.asnumpy(), x_np)

    # short all-dim indexing
    x = mx.nd.zeros(shape)
    val = mx.nd.ones((3, 2))
    x[:, 1:3, 1] = val
    x_np = np.zeros(shape, dtype=x.dtype)
    x_np[:, 1:3, 1] = val.asnumpy()
    assert same(x.asnumpy(), x_np)

    x = mx.nd.zeros(shape)
    x[:, 1:3, 1] = 1
    x_np = np.zeros(shape, dtype=x.dtype)
    x_np[:, 1:3, 1:2] = 1
    assert same(x.asnumpy(), x_np)
def test_ndarray_elementwisesum():
    ones = mx.nd.ones((10,), dtype=np.int32)
    res = mx.nd.ElementWiseSum(ones, ones*2, ones*4, ones*8)
    assert same(res.asnumpy(), ones.asnumpy()*15)