Beispiel #1
0
def test_fill():
    a = Tensor(np.zeros((2, 3), dtype=np.float32))
    a.fill(3)
    np.testing.assert_allclose(a.numpy(), np.full((2, 3), 3, dtype=np.float32))
    a.fill(124.568)
    np.testing.assert_allclose(a.numpy(),
                               np.full((2, 3), 124.568, dtype=np.float32))
Beispiel #2
0
def test_ShuffleRNG():
    g = []

    def cb(grad):
        g.append(grad)

    n, m = 6, 3
    arr = np.arange(n * m)
    out0 = Tensor(arr, dtype="float32")
    with Grad() as grad:
        grad.wrt(out0, callback=cb)
        random.shuffle(out0)
        grad(out0, F.ones_like(out0))
    m1 = RNG(seed=111, device="xpu0")
    m2 = RNG(seed=111, device="xpu1")
    m3 = RNG(seed=222, device="xpu0")
    out1 = Tensor(arr, dtype="float32", device="xpu0")
    out2 = Tensor(arr, dtype="float32", device="xpu1")
    out3 = Tensor(arr, dtype="float32", device="xpu0")
    m1.shuffle(out1)
    m2.shuffle(out2)
    m3.shuffle(out3)

    np.testing.assert_allclose(out1.numpy(), out2.numpy(), atol=1e-6)
    assert out1.device == "xpu0" and out2.device == "xpu1"
    assert not (out1.numpy() == out3.numpy()).all()

    out = Tensor(arr, dtype="float32").reshape(n, m)
    m1.shuffle(out)

    out_shp = out.shape
    if isinstance(out_shp, tuple):
        assert out_shp == (n, m)
    else:
        assert all(out.shape.numpy() == np.array([n, m]))
Beispiel #3
0
def test_tensor_serialization():
    with TemporaryFile() as f:
        data = np.random.randint(low=0, high=7, size=[233])
        a = Tensor(data, device="cpu0", dtype=np.int32)
        mge.save(a, f)
        f.seek(0)
        b = mge.load(f)
        np.testing.assert_equal(a.numpy(), data)
        assert b.device.logical_name == "cpu0:0"
        assert b.dtype == np.int32

    with TemporaryFile() as f:
        a = Parameter(np.random.random(size=(233, 2)).astype(np.float32))
        mge.save(a, f)
        f.seek(0)
        b = mge.load(f)
        assert isinstance(b, Parameter)
        np.testing.assert_equal(a.numpy(), b.numpy())

    with TemporaryFile() as f:
        a = Tensor(np.random.random(size=(2, 233)).astype(np.float32))
        mge.save(a, f)
        f.seek(0)
        b = mge.load(f)
        assert type(b) is Tensor
        np.testing.assert_equal(a.numpy(), b.numpy())

    with TemporaryFile() as f:
        a = Tensor(np.random.random(size=(2, 233)).astype(np.float32))
        mge.save(a, f)
        f.seek(0)
        b = mge.load(f, map_location="cpux")
        assert type(b) is Tensor
        assert "cpu" in str(b.device)
        np.testing.assert_equal(a.numpy(), b.numpy())

    with TemporaryFile() as f:
        if mge.is_cuda_available():
            device_org = mge.get_default_device()
            mge.set_default_device("gpu0")
            a = Tensor(np.random.random(size=(2, 233)).astype(np.float32))
            mge.save(a, f)
            f.seek(0)
            mge.set_default_device("cpux")
            b = mge.load(f, map_location={"gpu0": "cpu0"})
            assert type(b) is Tensor
            assert "cpu0" in str(b.device)
            np.testing.assert_equal(a.numpy(), b.numpy())
            mge.set_default_device(device_org)

    with TemporaryFile() as f:
        a = Tensor(0)
        a.qparams.scale = Tensor(1.0)
        mge.save(a, f)
        f.seek(0)
        b = mge.load(f)
        assert isinstance(b.qparams.scale, Tensor)
        np.testing.assert_equal(b.qparams.scale.numpy(), 1.0)
Beispiel #4
0
def test_tensor_serialization():
    def tensor_eq(a, b):
        assert a.dtype == b.dtype
        assert a.device == b.device
        np.testing.assert_equal(a.numpy(), b.numpy())

    with TemporaryFile() as f:
        data = np.random.randint(low=0, high=7, size=[233])
        a = Tensor(data, device="xpux", dtype=np.int32)
        pickle.dump(a, f)
        f.seek(0)
        b = pickle.load(f)
        np.testing.assert_equal(a.numpy(), b.numpy())

    with TemporaryFile() as f:
        a = Parameter(np.random.random(size=(233, 2)).astype(np.float32))
        pickle.dump(a, f)
        f.seek(0)
        b = pickle.load(f)
        assert isinstance(b, Parameter)
        np.testing.assert_equal(a.numpy(), b.numpy())

    with TemporaryFile() as f:
        a = Tensor(np.random.random(size=(2, 233)).astype(np.float32))
        pickle.dump(a, f)
        f.seek(0)
        b = pickle.load(f)
        assert type(b) is Tensor
        np.testing.assert_equal(a.numpy(), b.numpy())

    with TemporaryFile() as f:
        a = Tensor(np.random.random(size=(2, 233)).astype(np.float32))
        mge.save(a, f)
        f.seek(0)
        b = mge.load(f, map_location="cpux")
        assert type(b) is Tensor
        assert "cpu" in str(b.device)
        np.testing.assert_equal(a.numpy(), b.numpy())

    with TemporaryFile() as f:
        if mge.is_cuda_available():
            device_org = mge.get_default_device()
            mge.set_default_device("gpu0")
            a = Tensor(np.random.random(size=(2, 233)).astype(np.float32))
            mge.save(a, f)
            f.seek(0)
            mge.set_default_device("cpux")
            b = mge.load(f, map_location={"gpu0": "cpu0"})
            assert type(b) is Tensor
            assert "cpu0" in str(b.device)
            np.testing.assert_equal(a.numpy(), b.numpy())
            mge.set_default_device(device_org)
Beispiel #5
0
def test_PoissonRNG():
    m1 = RNG(seed=111, device="xpu0")
    m2 = RNG(seed=111, device="xpu1")
    m3 = RNG(seed=222, device="xpu0")
    lam = Tensor([[2, 3, 4], [9, 10, 11]], dtype=np.float32)
    out1 = m1.poisson(lam.to("xpu0"), size=(100, ))
    out2 = m2.poisson(lam.to("xpu1"), size=(100, ))
    out3 = m3.poisson(lam.to("xpu0"), size=(100, ))

    np.testing.assert_allclose(out1.numpy(), out2.numpy(), atol=1e-6)
    assert out1.device == "xpu0" and out2.device == "xpu1"
    assert not (out1.numpy() == out3.numpy()).all()

    out = m1.poisson(lam.to("xpu0"), size=(20, 30))
    out_shp = out.shape
    expected_shape = (20, 30) + lam._tuple_shape
    if isinstance(out_shp, tuple):
        assert out_shp == expected_shape
    else:
        assert all(out.shape.numpy() == np.array(expected_shape))
    lam = lam.numpy()

    assert (np.abs(out.mean(axis=(0, 1)).numpy() - lam) /
            np.sqrt(lam)).mean() < 0.1
    assert np.abs(np.std(out.numpy(), axis=(0, 1)) - np.sqrt(lam)).mean() < 0.1
Beispiel #6
0
def test_training_converge(test_traced_module):
    net = XORNet()
    if test_traced_module:
        inp = Tensor(np.random.random((14, 2)))
        net = trace_module(net, inp)
    opt = SGD(net.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4)
    gm = ad.GradManager().attach(net.parameters())

    @trace(symbolic=False)
    def train(data, label):
        with gm:
            pred = net(data)
            loss = F.nn.cross_entropy(pred, label)
            gm.backward(loss)
            optim.clip_grad_norm(net.parameters(), max_norm=0.2, ord=2.0)
        return loss

    def infer(data):
        return net(data)

    train_dataset = minibatch_generator()
    losses = []

    for data, label in itertools.islice(train_dataset, 2000):
        data = Tensor(data, dtype=np.float32)
        label = Tensor(label, dtype=np.int32)
        opt.clear_grad()
        loss = train(data, label)
        optim.clip_grad_value(net.parameters(), lower=-0.1, upper=0.1)
        opt.step()
        losses.append(loss.numpy())
    assert (np.mean(losses[-100:]) <
            0.1), "Final training Loss must be low enough, get {}".format(
                np.mean(losses[-100:]))

    ngrid = 10
    x = np.linspace(-1.0, 1.0, ngrid)
    xx, yy = np.meshgrid(x, x)
    xx = xx.reshape((ngrid * ngrid, 1))
    yy = yy.reshape((ngrid * ngrid, 1))
    data = mge.tensor(np.concatenate((xx, yy), axis=1).astype(np.float32))
    pred = infer(data)
    precision = calculate_precision(data.numpy(), pred.numpy())
    assert precision == 1.0, "Test precision must be high enough, get {}".format(
        precision)
def test_training_converge_with_drop():
    set_option("enable_drop", 1)
    net = XORNet()
    opt = SGD(net.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4)
    gm = ad.GradManager().attach(net.parameters())

    def train(data, label):
        with gm:
            pred = net(data)
            loss = F.nn.cross_entropy(pred, label)
            gm.backward(loss)
        return loss

    def infer(data):
        return net(data)

    train_dataset = minibatch_generator()
    losses = []

    for data, label in itertools.islice(train_dataset, 2000):
        data = Tensor(data, dtype=np.float32)
        label = Tensor(label, dtype=np.int32)
        opt.clear_grad()
        loss = train(data, label)
        opt.step()
        losses.append(loss.numpy())

    assert np.mean(
        losses[-100:]) < 0.1, "Final training Loss must be low enough"

    ngrid = 10
    x = np.linspace(-1.0, 1.0, ngrid)
    xx, yy = np.meshgrid(x, x)
    xx = xx.reshape((ngrid * ngrid, 1))
    yy = yy.reshape((ngrid * ngrid, 1))
    data = mge.tensor(np.concatenate((xx, yy), axis=1).astype(np.float32))

    pred = infer(Tensor(data)).numpy()
    precision = calculate_precision(data.numpy(), pred)
    assert precision == 1.0, "Test precision must be high enough, get {}".format(
        precision)

    set_option("enable_drop", 0)
Beispiel #8
0
def test_opdef_loader():
    class MyModule1(Module):
        def forward(self, x, y):
            op = Elemwise("ADD")
            return apply(op, x, y)[0]

    m = MyModule1()
    x = Tensor(np.ones((20)))
    y = Tensor(np.ones((20)))
    traced_module = trace_module(m, x, y)
    orig_loader_dict = S.OPDEF_LOADER
    S.OPDEF_LOADER = {}

    @register_opdef_loader(Elemwise)
    def add_opdef_loader(expr):
        if expr.opdef_state["mode"] == "ADD":
            expr.opdef_state["mode"] = "MUL"
            node = expr.inputs[1]
            astype_expr = CallMethod(node, "astype")
            oup = TensorNode(
                astype_expr,
                shape=node.shape,
                dtype=expr.inputs[0].dtype,
                qparams=node.qparams,
            )
            astype_expr.set_args_kwargs(node, expr.inputs[0].dtype)
            astype_expr.return_val = (oup, )
            expr.inputs[1] = oup

    obj = pickle.dumps(traced_module)
    new_module = pickle.loads(obj)
    _check_id(new_module)
    _check_expr_users(new_module)
    _check_name(new_module.flatten())
    assert (isinstance(new_module.graph._exprs[0], CallMethod)
            and new_module.graph._exprs[1].opdef.mode == "MUL"
            and len(new_module.graph._exprs) == 2)
    result = new_module(x, y)
    np.testing.assert_equal(result.numpy(), x.numpy())
    S.OPDEF_LOADER = orig_loader_dict