Пример #1
0
def test_with_random_seed():
    ctx = mx.context.current_context()
    size = 100
    shape = (size,)

    def check_same(x, y, name):
        assert same(x, y), \
            "%s rng should give the same result with the same seed" % name

    def check_diff(x, y, name):
        assert not same(x, y), \
            "%s rng should give different results with different seeds" % name

    # generate python, numpy and mxnet datasets with the given seed
    def gen_data(seed=None):
        with random_seed(seed):
            python_data = [rnd.random() for _ in range(size)]
            np_data = np.random.rand(size)
            mx_data = mx.nd.random_uniform(shape=shape, ctx=ctx).asnumpy()
        return (seed, python_data, np_data, mx_data)

    # check data, expecting them to be the same or different based on the seeds
    def check_data(a, b):
        seed_a = a[0]
        seed_b = b[0]
        if seed_a == seed_b and seed_a is not None:
            check_same(a[1], b[1], 'python')
            check_same(a[2], b[2], 'numpy')
            check_same(a[3], b[3], 'mxnet')
        else:
            check_diff(a[1], b[1], 'python')
            check_diff(a[2], b[2], 'numpy')
            check_diff(a[3], b[3], 'mxnet')

    # 5 tests that include a duplicated seed 1 and randomizing seed None
    seeds = [1, 2, 1, None, None]
    data = [gen_data(seed) for seed in seeds]

    # Add more complicated test case scenarios
    with random_seed(1):
        seeds.append(None)
        data.append(gen_data(None))
    with random_seed(2):
        seeds.append(None)
        data.append(gen_data(None))
    with random_seed():
        seeds.append(1)
        data.append(gen_data(1))
    with random_seed():
        seeds.append(2)
        data.append(gen_data(2))
    with random_seed(1):
        seeds.append(2)
        data.append(gen_data(2))

    num_seeds = len(seeds)
    for i in range(0, num_seeds-1):
        for j in range(i+1, num_seeds):
            check_data(data[i],data[j])
def test_sparse_nd_random():
    """ test sparse random operator on cpu """
    # gpu random operator doesn't use fixed seed
    if default_context().device_type is 'gpu':
        return
    shape = (100, 100)
    fns = [mx.nd.random.uniform, mx.nd.random.normal, mx.nd.random.gamma]
    for fn in fns:
        rsp_out = mx.nd.zeros(shape=shape, stype='row_sparse')
        dns_out = mx.nd.zeros(shape=shape, stype='default')
        with random_seed(0):
            fn(shape=shape, out=dns_out)
        with random_seed(0):
            fn(shape=shape, out=rsp_out)
        assert_almost_equal(dns_out.asnumpy(), rsp_out.asnumpy())
def test_sparse_nd_random():
    """ test sparse random operator on cpu """
    # gpu random operator doesn't use fixed seed
    if default_context().device_type is 'gpu':
        return
    shape = (100, 100)
    fns = [mx.nd.random.uniform, mx.nd.random.normal, mx.nd.random.gamma]
    for fn in fns:
        rsp_out = mx.nd.zeros(shape=shape, stype='row_sparse')
        dns_out = mx.nd.zeros(shape=shape, stype='default')
        with random_seed(0):
            fn(shape=shape, out=dns_out)
        with random_seed(0):
            fn(shape=shape, out=rsp_out)
        assert_almost_equal(dns_out.asnumpy(), rsp_out.asnumpy())
 def data(self):
     with random_seed(0):
         pred = mx.nd.random_uniform(0.0,
                                     1.0, (self.n, self.c),
                                     ctx=self.pred_ctx)
         label = mx.nd.random_uniform(0.0,
                                      1.0, (self.n, self.c),
                                      ctx=self.label_ctx)
         return label, pred
Пример #5
0
 def gen_data(seed=None):
     with random_seed(seed):
         python_data = [rnd.random() for _ in range(size)]
         np_data = np.random.rand(size)
         mx_data = mx.nd.random_uniform(shape=shape, ctx=ctx).asnumpy()
     return (seed, python_data, np_data, mx_data)
Пример #6
0
def test_cuda_graphs():
    class GraphTester(gluon.HybridBlock):
        def __init__(self, function_to_test, **kwargs):
            super(GraphTester, self).__init__(**kwargs)
            self.f = function_to_test()

        def forward(self, *args):
            # We need to isolate the operation to be fully inside the graph
            # in order for graphs usage to be possible
            copied_args = [mx.np.copy(a) for a in args]
            outputs = self.f(*copied_args)
            if isinstance(outputs, (list, tuple)):
                return [mx.np.copy(o) for o in outputs]
            else:
                return mx.np.copy(outputs)

    class TestDesc:
        def __init__(self, name, f, num_inputs=1, input_dim=4):
            self.name = name
            self.f = f
            self.num_inputs = num_inputs
            self.input_dim = input_dim

        def generate_inputs(self):
            shape = tuple(_np.random.randint(4, 11, size=self.input_dim))
            ret = [mx.np.random.uniform(size=shape) for _ in range(self.num_inputs)]
            for r in ret:
                r.attach_grad()
            return ret

    tested_ops = [
            TestDesc('add', lambda: (lambda x, y: x + y), num_inputs = 2),
            TestDesc('add_scalar', lambda: (lambda x: x + 0.5)),
            TestDesc('Conv', lambda: mx.gluon.nn.Conv2D(channels=32, kernel_size=(1,1))),
            TestDesc('ConvTranspose', lambda: mx.gluon.nn.Conv2DTranspose(channels=32, kernel_size=(1,1))),
            TestDesc('Dense', lambda: mx.gluon.nn.Dense(units=128)),
            TestDesc('Activation', lambda: mx.gluon.nn.Activation('tanh')),
            TestDesc('Dropout', lambda: mx.gluon.nn.Dropout(0.5)),
            TestDesc('Flatten', lambda: mx.gluon.nn.Flatten()),
            TestDesc('MaxPool', lambda: mx.gluon.nn.MaxPool2D()),
            TestDesc('AvgPool', lambda: mx.gluon.nn.AvgPool2D()),
            TestDesc('GlobalMaxPool', lambda: mx.gluon.nn.GlobalMaxPool2D()),
            TestDesc('GlobalAvgPool', lambda: mx.gluon.nn.GlobalAvgPool2D()),
            TestDesc('ReflectionPad2D', lambda: mx.gluon.nn.ReflectionPad2D()),
            TestDesc('BatchNorm', lambda: mx.gluon.nn.BatchNorm()),
            TestDesc('InstanceNorm', lambda: mx.gluon.nn.InstanceNorm()),
            TestDesc('LayerNorm', lambda: mx.gluon.nn.LayerNorm()),
            TestDesc('LeakyReLU', lambda: mx.gluon.nn.LeakyReLU(0.1)),
            TestDesc('PReLU', lambda: mx.gluon.nn.PReLU()),
            TestDesc('ELU', lambda: mx.gluon.nn.ELU()),
            TestDesc('SELU', lambda: mx.gluon.nn.SELU()),
            TestDesc('Swish', lambda: mx.gluon.nn.Swish()),
        ]

    N = 10

    with environment({'MXNET_ENABLE_CUDA_GRAPHS': '1',
                      'MXNET_USE_FUSION': '0'}):
        device = mx.gpu(0)
        for test_desc in tested_ops:
            print("Testing ", test_desc.name)
            inputs = test_desc.generate_inputs()
            inputsg = [i.copy() for i in inputs]
            for i in inputsg:
                i.attach_grad()
            seed = random.randint(0, 10000)
            net = GraphTester(test_desc.f)
            netg = GraphTester(test_desc.f)

            # initialize parameters
            net.initialize(device=device)
            netg.initialize(device=device)

            net(*inputs)

            for p1, p2 in zip(net.collect_params().values(), netg.collect_params().values()):
                p2.set_data(p1.data())

            netg.hybridize(static_alloc=True, static_shape=True)

            print("Testing inference mode")
            with random_seed(seed):
                for _ in range(N):
                    assert_almost_equal(net(*inputs), netg(*inputsg))

            mx.npx.waitall()
            print("Testing training mode")
            for _ in range(N):
                with random_seed(seed):
                    with mx.autograd.record():
                        out = net(*inputs)
                    out.backward()

                with random_seed(seed):
                    with mx.autograd.record():
                        outg = netg(*inputsg)
                    outg.backward()

                assert_almost_equal(out, outg)
                for i, ig in zip(inputs, inputsg):
                    assert_almost_equal(i.grad, ig.grad)

                for p1, p2 in zip(net.collect_params().values(), netg.collect_params().values()):
                    assert_almost_equal(p1.data(), p2.data())
                    if p1.grad_req != 'null':
                        assert_almost_equal(p1.grad(), p2.grad())
            mx.npx.waitall()