Exemplo n.º 1
0
def test_release():
    def check(f):
        n = 0
        d = None
        gc.disable()
        try:
            for i in range(3):
                f()
                m = len(gc.get_objects())
                d = m - n
                n = m
            assert d == 0
        finally:
            gc.enable()

    x = mge.Tensor([0.0])
    dy = mge.Tensor(np.ones_like(x.numpy()))

    @check
    def _():
        g = Grad().wrt(x)
        y = x * x
        g(y, dy)

    @check
    def _():
        with Grad().wrt(x):
            pass

    @check
    def _():
        with Grad().wrt(x):
            y = x * x
Exemplo n.º 2
0
def test_param_pack_concat():
    a = mge.Tensor(np.ones((1,), np.int32))
    b = mge.Tensor(np.ones((3, 3), np.int32))
    offsets_val = [0, 1, 1, 10]
    offsets = mge.Tensor(offsets_val, np.int32)
    c = param_pack_concat([a, b], offsets, offsets_val)
    assert np.allclose(np.concatenate([a.numpy(), b.numpy().flatten()]), c.numpy())
Exemplo n.º 3
0
def test_elemwise_add():
    x_np = np.random.rand(10).astype("float32")
    y_np = np.random.rand(10, 10).astype("float32")
    dz_np = np.random.rand(10, 10).astype("float32")
    x = mge.Tensor(x_np)
    y = mge.Tensor(y_np)
    dz = mge.Tensor(dz_np)

    refs = {}

    def f(x, y):
        x = x * 2
        refs["x"] = TensorWeakRef(x)
        refs["y"] = TensorWeakRef(y)
        return x + y

    grad = Grad().wrt(x, callback=save_to(x))

    z = f(x, y)
    del y

    for k, r in refs.items():
        assert r() is None

    grad(z, dz)
    np.testing.assert_almost_equal(x.grad.numpy(), dz_np.sum(0) * 2, decimal=5)
Exemplo n.º 4
0
def test_grad_with_tensor_wrapper():
    x_np = np.random.rand(10).astype("float32")
    x = mge.Tensor(x_np)

    with Grad() as grad:
        grad.wrt(x, callback=save_to(x))
        y = mul(x, x)
        y = mul(y, y)
        grad(y, mge.Tensor(np.ones_like(x_np)))

    np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
Exemplo n.º 5
0
def test_grad_inplace():
    x_np = np.random.rand(10).astype("float32")
    x = mge.Tensor(x_np)

    grad = Grad().wrt(x, callback=save_to(x))

    y = mul(x, x)
    y *= y

    grad(y, mge.Tensor(np.ones_like(x_np)))
    np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np**3, decimal=6)
Exemplo n.º 6
0
def test_identity():
    x_np = np.random.rand(10).astype("float32")
    x = mge.Tensor(x_np)
    dy_np = np.random.rand(*x.shape).astype("float32")
    dy = mge.Tensor(dy_np)

    grad = Grad().wrt(x, callback=save_to(x))

    (y,) = apply(Identity(), x)

    grad(y, dy)
    np.testing.assert_array_equal(x.grad.numpy(), dy_np)
Exemplo n.º 7
0
 def __init__(self, transpose=False):
     super().__init__()
     self.transpose = transpose
     self.data = np.random.random((10, 100)).astype(np.float32)
     weight = np.random.random((200, 100) if transpose else (100, 200))
     self.linear_weight = mge.Tensor(weight)
     self.bn = M.BatchNorm1d(200)
Exemplo n.º 8
0
 def __init__(self, conv_cls, bn_cls):
     super().__init__()
     self.conv = conv_cls(3, 3, 1, 1, 0)
     self.bn = bn_cls(3)
     self.conv2 = conv_cls(3, 3, 1, 1, 0)
     self.bn2 = bn_cls(3)
     self.scale = mge.Tensor([3, 4])
Exemplo n.º 9
0
 def run():
     if not args.embed_input:
         for key in inp_dict:
             inp_dict[key].set_value(mge.Tensor(data[key])._dev_tensor())
     func.execute()
     func.wait()
     return [oup_node.get_value().numpy() for oup_node in output_dict.values()]
Exemplo n.º 10
0
 def __init__(self, *args):
     for d in args:
         mge.Tensor([], device=d)
     gc.collect()
     mge._full_sync()
     self.baseline = {d: mge.device.get_allocated_memory(d) for d in args}
     for d in args:
         mge.device.reset_max_memory_stats(d)
Exemplo n.º 11
0
def test_resize():
    x_np = np.random.rand(3, 3, 32, 32).astype("float32")
    x = mge.Tensor(x_np)

    grad = Grad().wrt(x, callback=save_to(x))
    y = F.resize(x, (16, 16))

    grad(y, F.ones_like(y))
    np.testing.assert_equal(np.ones(x_np.shape, dtype=np.float32) / 4, x.grad.numpy())
Exemplo n.º 12
0
def test_Broadcast():
    x_np = np.random.rand(3, 3, 1).astype("float32")
    x = mge.Tensor(x_np)

    grad = Grad().wrt(x, callback=save_to(x))
    y = F.broadcast_to(x, (3, 3, 10))

    grad(y, F.ones_like(y))
    np.testing.assert_equal(np.ones((3, 3, 1), dtype=np.float32) * 10, x.grad.numpy())
Exemplo n.º 13
0
def test_Reduce_sum():
    x_np = np.random.rand(3, 3).astype("float32")
    x = mge.Tensor(x_np)

    grad = Grad().wrt(x, callback=save_to(x))
    y = x.sum(axis=0)

    grad(y, F.ones_like(y))
    np.testing.assert_equal(np.ones((3, 3), dtype=np.float32), x.grad.numpy())
Exemplo n.º 14
0
def tensor_mge(batch, check_on=True):
    if check_on:
        for k, v in batch.items():
            if isinstance(v, np.ndarray):
                batch[k] = mge.Tensor(v)
    else:
        for k, v in batch.items():
            batch[k] = v.numpy()
    return batch
Exemplo n.º 15
0
def test_interpolate_fastpath():
    x_np = np.random.rand(3, 3, 32, 32).astype("float32")
    x = mge.Tensor(x_np)

    with Grad() as grad:
        grad.wrt(x, callback=save_to(x))
        y = F.vision.interpolate(x, size=(16, 16), mode="bilinear")
        grad(y, F.ones_like(y))

    np.testing.assert_equal(np.ones(x_np.shape, dtype=np.float32) / 4, x.grad.numpy())
Exemplo n.º 16
0
def test_subgraph_jit_backward():
    x_np = np.random.rand(3, 4, 5).astype("float32")
    x1 = megengine.Tensor(x_np)
    x2 = megengine.Tensor(x_np)
    mul = _get_mul_fn(x1.dtype, x1.device)
    gm = GradManager()
    gm.attach([x1, x2])
    with gm:
        y1 = x1 * x1
        y2 = mul(x2, x2)
        gm.backward(y1)
    with gm:
        y1 = x1 * x1
        y2 = mul(x2, x2)
        gm.backward(y1 + y2)
    with gm:
        y1 = x1 * x1
        y2 = mul(x2, x2)
        gm.backward(y2)
Exemplo n.º 17
0
def test_elemwise_relu():
    x_np = [1.0, -1.0]
    dz_np = [1.0]
    x = mge.Tensor(x_np)
    dz = mge.Tensor(dz_np)

    refs = {}

    def f(x):
        x = x * 2
        refs["x"] = TensorWeakRef(x)
        return relu(x)

    with Grad() as grad:
        grad.wrt(x, callback=save_to(x))
        z = f(x)
        assert refs["x"]() is None
        grad(z, dz)

    np.testing.assert_almost_equal(x.grad.numpy(), [2.0, 0])
Exemplo n.º 18
0
def test_borrow():
    memstat = MemStat("xpux:0", "xpux:1")

    x_np = np.random.randint(2 ** 30, size=(1 * 1024 * 1024,), dtype="int32")
    unit = x_np.size * 4
    x0 = mge.Tensor(x_np, device="xpux:0")
    x1 = x0.to("xpux:1", _borrow=True)
    y = -x1
    np.testing.assert_equal(-x_np, y.numpy())

    mge._full_sync()
    assert memstat.get_max("xpux:0") / unit < 2.1
Exemplo n.º 19
0
def test_prelu(shape, use_symbolic):
    old_flag = set_symbolic_shape(use_symbolic)
    data = np.random.random(size=shape)

    num_channel = 1 if len(shape) == 1 else shape[1]
    prelu = PReLU(num_parameters=num_channel, init=0.25)
    output = prelu(mge.Tensor(data))

    np_output = np.maximum(data,
                           0) + prelu.weight.numpy() * np.minimum(data, 0)
    set_symbolic_shape(old_flag)

    np.testing.assert_allclose(output.numpy(), np_output, atol=1e-5)
Exemplo n.º 20
0
def test_backward_fold_scale(conv_cls, bn_cls):
    module = MyModule(conv_cls, bn_cls)
    module.eval()
    inp = mge.Tensor(np.random.random((1, 3, 32, 32)))
    desired = module(inp)
    traced_net = tm.trace_module(module, inp)

    traced_net = traced_net.flatten()
    optimized_net = tm.optimize(traced_net, "BackwardFoldScale")

    actual = optimized_net(inp)
    np.testing.assert_allclose(desired=desired, actual=actual, atol=1e-4)
    # fuse all mul to conv
    mul_list = optimized_net.graph.get_method_by_type("__mul__").as_list()
    assert len(mul_list) == 0
Exemplo n.º 21
0
def test_dot():
    x = np.random.rand(2, 2).astype("float32")
    x = mge.Tensor(x)
    u = F.ones((2,))
    v = F.ones((2,))

    with Grad() as grad:
        grad.wrt(x, callback=save_to(x))

        def f(x):
            return F.dot(u, F.matmul(x, v))

        y = f(x)
        grad(y, F.ones_like(y))

    np.testing.assert_equal(np.ones((2, 2), dtype=np.float32), x.grad.numpy())
Exemplo n.º 22
0
def test_fuse_bn(conv_cls, bn_cls):
    module = MyModule(conv_cls, bn_cls)
    module.eval()
    inp = mge.Tensor(np.random.random((1, 3, 32, 32)))
    desired = module(inp)
    traced_net = tm.trace_module(module, inp)

    traced_net = traced_net.flatten()
    optimized_net = tm.optimize(traced_net, "FuseConvBn")

    actual = optimized_net(inp)
    np.testing.assert_allclose(desired=desired, actual=actual, atol=1e-4)
    # fuse all mul to conv
    bn_list = optimized_net.graph.get_function_by_type(F.batch_norm).as_list()
    assert len(bn_list) == 0

    bn_list = optimized_net.graph.get_module_by_type(M.BatchNorm2d).as_list()
    assert len(bn_list) == 0
Exemplo n.º 23
0
def dump_static_graph(model, graph_name="model.mge"):
    model.eval()
    model.head.decode_in_inference = False

    data = mge.Tensor(np.random.random((1, 3, 640, 640)))

    @jit.trace(capture_as_const=True)
    def pred_func(data):
        outputs = model(data)
        return outputs

    pred_func(data)
    pred_func.dump(
        graph_name,
        arg_names=["data"],
        optimize_for_inference=True,
        enable_fuse_conv_bias_nonlinearity=True,
    )
Exemplo n.º 24
0
def get_flow_mge(H_mat_mul, patch_indices, image_size_h=600, image_size_w=800):
    # (N, 6, 3, 3)
    batch_size = H_mat_mul.shape[0]
    divide = H_mat_mul.shape[1]
    H_mat_mul = mge.Tensor(H_mat_mul.reshape(batch_size, divide, 3, 3))

    small_patch_sz = [image_size_h // divide, image_size_w]
    small = 1e-7

    H_mat_pool = F.zeros((batch_size, image_size_h, image_size_w, 3, 3))

    for i in range(divide):
        H_mat = H_mat_mul[:, i, :, :]

        if i == divide - 1:
            H_mat = F.broadcast_to(F.expand_dims(F.expand_dims(H_mat, 1), 1),
                                   (batch_size, image_size_h -
                                    i * small_patch_sz[0], image_size_w, 3, 3))
            H_mat_pool[:, i * small_patch_sz[0]:, ...] = H_mat
            continue

        H_mat = F.broadcast_to(F.expand_dims(F.expand_dims(
            H_mat, 1), 1), (batch_size, small_patch_sz[0], image_size_w, 3, 3))
        H_mat_pool[:, i * small_patch_sz[0]:(i + 1) * small_patch_sz[0],
                   ...] = H_mat

    pred_I2_index_warp = F.expand_dims(patch_indices.transpose(0, 2, 3, 1), 4)
    pred_I2_index_warp = F.matmul(H_mat_pool,
                                  pred_I2_index_warp)[:, :, :, :,
                                                      0].transpose(0, 3, 1, 2)
    T_t = pred_I2_index_warp[:, 2:3, ...]
    smallers = 1e-6
    T_t = T_t + smallers
    v1 = pred_I2_index_warp[:, 0:1, ...]
    v2 = pred_I2_index_warp[:, 1:2, ...]
    v1 = v1 / T_t
    v2 = v2 / T_t
    warp_index = F.concat((v1, v2), 1)
    vgrid = patch_indices[:, :2, ...]

    flow = warp_index - vgrid
    return flow
Exemplo n.º 25
0
def test_addAxis():
    x_np = np.random.rand(3, 3).astype("float32")
    x = mge.Tensor(x_np)

    grad = Grad().wrt(x, callback=save_to(x))

    refs = {}

    def f(x):
        x = x * 1
        y = F.expand_dims(x, [2, 3])
        refs["x"] = TensorWeakRef(x)
        return y

    y = f(x)
    for _, r in refs.items():
        assert r() is None

    grad(y, F.ones_like(y))
    np.testing.assert_equal(np.ones((3, 3), dtype=np.float32), x.grad.numpy())
Exemplo n.º 26
0
def test_reshape():
    x_np = np.random.rand(2, 5).astype("float32")
    x = mge.Tensor(x_np)

    grad = Grad().wrt(x, callback=save_to(x))

    refs = {}

    def f(x):
        x = x * 1
        y = x.reshape(5, 2)
        refs["x"] = TensorWeakRef(x)
        return y

    y = f(x)
    for _, r in refs.items():
        assert r() is None

    grad(y, F.ones_like(y))
    np.testing.assert_equal(np.ones((2, 5), dtype=np.float32), x.grad.numpy())
Exemplo n.º 27
0
def test_removeAxis():
    x_np = np.random.rand(3, 3, 1, 1).astype("float32")
    x = mge.Tensor(x_np)

    with Grad() as grad:
        grad.wrt(x, callback=save_to(x))
        refs = {}

        def f(x):
            x = x * 1
            y = F.squeeze(x, [2, 3])
            refs["x"] = TensorWeakRef(x)
            return y

        y = f(x)
        for _, r in refs.items():
            assert r() is None
        grad(y, F.ones_like(y))

    np.testing.assert_equal(np.ones((3, 3, 1, 1), dtype=np.float32), x.grad.numpy())
Exemplo n.º 28
0
def test_attach_temporary():
    w = mge.Parameter(2.0)
    gm = GradManager()
    gm.attach(w)

    def cb(x, g):
        assert x is ref()
        cb.called = True

    for i in range(3):
        with gm:
            cb.called = False
            x = mge.Tensor(i, dtype="float32")
            gm.attach(x, callbacks=cb)
            ref = weakref.ref(x)
            y = x * w
            gm.backward(y)
            assert cb.called
        del x
        assert ref() is None
Exemplo n.º 29
0
def test_AxisAddRemove():
    x_np = np.random.rand(1, 5).astype("float32")
    x = mge.Tensor(x_np)

    grad = Grad().wrt(x, callback=save_to(x))

    refs = {}

    def f(x):
        x = x * 1
        y = F.squeeze(F.expand_dims(x, 2), 0)
        refs["x"] = TensorWeakRef(x)
        return y

    y = f(x)
    for _, r in refs.items():
        assert r() is None

    grad(y, F.ones_like(y))
    np.testing.assert_equal(np.array([[1, 1, 1, 1, 1]], dtype=np.float32),
                            x.grad.numpy())
Exemplo n.º 30
0
def run_frozen_bn(BNModule, use_trace=False, use_symbolic=False):
    nchannel = 3
    m = BNModule(nchannel, freeze=True)
    var = 4.0
    bias = 1.0
    shape = (1, nchannel, 1, 1)
    m.running_var[...] = var * F.ones(shape)
    m.running_mean[...] = bias * F.ones(shape)

    saved_var = m.running_var.numpy()
    saved_mean = m.running_mean.numpy()
    saved_wt = m.weight.numpy()
    saved_bias = m.bias.numpy()

    gm = ad.GradManager().attach(m.parameters())
    optim = optimizer.SGD(m.parameters(), lr=1.0)
    optim.clear_grad()

    data = np.random.random((6, nchannel, 2, 2)).astype("float32")

    def train_fn(d):
        for _ in range(3):
            with gm:
                loss = m(d).mean()
                gm.backward(loss)
            optim.step()
        return loss

    if use_trace:
        train_fn = trace(train_fn, symbolic=use_symbolic)

    for _ in range(3):
        loss = train_fn(megengine.Tensor(data))
        np.testing.assert_equal(m.running_var.numpy(), saved_var)
        np.testing.assert_equal(m.running_mean.numpy(), saved_mean)
        np.testing.assert_equal(m.weight.numpy(), saved_wt)
        np.testing.assert_equal(m.bias.numpy(), saved_bias)
        np.testing.assert_almost_equal(loss.numpy(),
                                       ((data - bias) / np.sqrt(var)).mean(),
                                       5)