def test_almost_equal_shape(): shape1 = (2, 2, 3) a = np.random.random(shape1) b = a.copy() c = a[1] assert_almost_equal(a, b) check_almost_euqal_expection_raise(a, c, "No exception raised")
def test_transpose2d(): R, C = 3, 5 x = mx.nd.array(np.random.uniform(size=(R, C))) for c in [False, True]: op = mobula.op.Transpose2D[mx.nd.NDArray] y = op(x, continuous_input=c) assert_almost_equal(y, x.T)
def test_template_1type(): shape = (2, 3, 4) for dtype in [np.int32, np.int64, np.float32, np.float64]: a = mx.nd.random.uniform(0, 100, shape=shape).astype(dtype) b = mx.nd.random.uniform(0, 100, shape=shape).astype(dtype) c = mx.nd.empty(shape, dtype=dtype) mobula.func.maximum(a.size, a, b, c) assert_almost_equal(mx.nd.maximum(a, b).asnumpy(), c.asnumpy())
def call_op(i, q): import mxnet as mx n = 32 x = mx.random.randint(-100, 100, (n, )) y = mx.nd.zeros_like(x) mobula.func.add(n, x, i, y) assert_almost_equal(y, x + i) q.put(i)
def test_infer_type_for_const(): ns = [np.int32, np.int64, np.float32, np.float64] N = 3 V = 39.39 for dtype in ns: out = np.empty(N, dtype=dtype) rv = dtype(V).tolist() mobula.func.infer_type_for_const(N, rv, out) assert_almost_equal(out, rv)
def test_template_3type(): shape = (2, 3, 4) t1, t2, t3 = np.int32, np.float32, np.float64 a = mx.nd.random.uniform(0, 100, shape=shape).astype(t1) b = mx.nd.random.uniform(0, 100, shape=shape).astype(t2) c = mx.nd.empty(shape, dtype=t3) mobula.func.maximum_3type(a.size, a, b, c) assert_almost_equal( mx.nd.maximum(a.astype(t2), b).asnumpy().astype(t3), c.asnumpy())
def test_default_value_op(): a = np.random.random((5, 5)) b = np.random.random((5, 5)) value = np.random.random((5, 5)) op = mobula.op.default_add_op[np.ndarray](value=value) c = op(a, b) assert_almost_equal(a + b, c) c = op(a) # a+b[default=value] assert_almost_equal(a + value, c)
def test_mobula_func(): # skip float temporarily ns = [np.int32, np.int64] # , np.float32, np.float64] pv = 39.39 for dtype in ns: a = np.array([pv], dtype=dtype) b = np.empty(a.shape, dtype=dtype) rtn = mobula.func.set_and_return(a, b) assert_almost_equal(a, b) assert_almost_equal(a, rtn)
def test_softmax1d(): N = 20 data = mx.random.uniform(0, 1, shape=(N, )) out = mobula.op.Softmax(data) gt = mx.nd.softmax(data) exp_data = mx.nd.exp(data - data.max()) math_gt = exp_data / exp_data.sum() assert_almost_equal(math_gt, gt, atol=atol) assert_almost_equal(out, gt, atol=atol)
def test_const_template(): shape = (5, 5) value = 3939 cs = [ctypes.c_int, ctypes.c_float, ctypes.c_double] vs = [3, 9.9, 3.9] atols = [0, 1e-6, 1e-6] for ctype, value, atol in zip(cs, vs, atols): c_value = ctype(value) a = np.empty(shape) mobula.func.test_const_template(a.size, c_value, a) assert_almost_equal(np.tile(value, shape), a, atol=atol)
def test_atomic_add(): I = U = J = 100 import time for _ in range(10): tic = time.time() dtype = np.float32 a = np.random.uniform(size=(I, U)).astype(dtype).round(1) b = np.random.uniform(size=(U, J)).astype(dtype).round(1) out = np.zeros((I, J), dtype=dtype) mobula.func.test_atomic_add_by_gemm(U, I, J, a, b, out) target = np.dot(a, b) assert_almost_equal(out, target, atol=1e-3)
def test_custom_struct(): class MyStruct(ctypes.Structure): _fields_ = [ ('hello', ctypes.c_int), ('mobula', ctypes.c_float), ] mobula.glue.register_cstruct('MyStruct', MyStruct) mobula.op.load('MyStruct', os.path.dirname(__file__)) res = mobula.func.hello((42, 39)) assert_almost_equal(res, 42 + 39)
def test_thread(): n = 300 out_1 = np.empty(n // 1) out_2 = np.empty(n // 2) out_3 = np.empty(n // 3) out_4 = np.empty(n * 2) out_5 = np.empty(n * 3) mobula.func.test_thread(n, out_1, out_2, out_3, out_4, out_5) assert_almost_equal(np.arange(n // 1) * 1, out_1) assert_almost_equal(np.arange(n // 2) * 2, out_2) assert_almost_equal(np.arange(n // 3) * 3, out_3) assert_almost_equal(np.arange(n * 2) * 2, out_4) assert_almost_equal(np.arange(n * 3) * 3, out_5)
def test_non_c_contiguous(): a = np.random.random((5, 5)) b = np.random.random((5, 5)) c = np.empty((5, 5)) s = (slice(None), slice(2, 4)) a_part = a[s] b_part = b[s] c_part = c[s] assert a_part.flags.c_contiguous == False assert b_part.flags.c_contiguous == False assert c_part.flags.c_contiguous == False mobula.func.mul_elemwise(a_part.size, a_part, b_part, c_part) assert_almost_equal(a_part * b_part, c_part) assert_almost_equal(c[s], c_part)
def softmax2d_grad(N, C): data = mx.random.uniform(0, 1, shape=(N, C)) data2 = data.copy() data.attach_grad() data2.attach_grad() dy = mx.random.uniform(0, 1, shape=(N, C)) * 1000 with mx.autograd.record(): out = mobula.op.Softmax(data) out.backward(dy) with mx.autograd.record(): gt = mx.nd.softmax(data2, axis=-1) gt.backward(dy) assert_almost_equal(out, gt, atol=atol, rtol=rtol) assert_almost_equal(data.grad, data2.grad, atol=atol, rtol=rtol)
def test_FocalLoss_mx_cpu(): ctx = mx.cpu() x = mx.nd.random.randn(N, N, dtype="float64", ctx=ctx) y = mx.nd.random.randn(N, N, dtype="float64", ctx=ctx) x1 = x.copy() y1 = y.copy() x.attach_grad() x1.attach_grad() with ag.record(): fl = BCEFocalLoss(x, y, alpha=.25, gamma=2) fl_mobula = mobula.op.FocalLoss( alpha=.25, gamma=2, logits=x1, targets=y1) fl.backward() fl_mobula.backward() assert_almost_equal(x.grad.asnumpy(), x1.grad.asnumpy()) assert_almost_equal(fl.asnumpy(), fl_mobula.asnumpy())
def test_FocalLoss_mx_cuda(): if len(mobula.utils.list_gpus()) == 0: return ctx = mx.gpu() x = mx.nd.random.randn(N, N, dtype="float64", ctx=ctx) y = mx.nd.random.randn(N, N, dtype="float64", ctx=ctx) x1 = x.copy() y1 = y.copy() x.attach_grad() x1.attach_grad() with ag.record(): fl = BCEFocalLoss(x, y, alpha=.25, gamma=2) fl_mobula = mobula.op.FocalLoss( alpha=.25, gamma=2, logits=x1, targets=y1) fl.backward() fl_mobula.backward() assert_almost_equal(x.grad, x1.grad) assert_almost_equal(fl, fl_mobula)
def test_custom_ctensor(): class CTensor(ctypes.Structure): _fields_ = [ ('data', ctypes.POINTER(ctypes.c_float)), ('size', ctypes.c_int), ] def CTensorConstructor(var): glue_mod = mobula.glue.backend.get_var_glue(var) tensor = glue_mod.Tensor(var) data_ptr = ctypes.cast(tensor.data_ptr, ctypes.POINTER(ctypes.c_float)) return CTensor(data_ptr, var.size) mobula.glue.register_cstruct('CTensor', CTensor, CTensorConstructor) mobula.op.load('CTensor', os.path.dirname(__file__)) import numpy as np x = np.array([1, 2, 3], dtype=np.float32) y = x + 1 mobula.func.ctensor_inc(1, x) assert_almost_equal(y, x)
def test_roi_align_value(): dtype = np.float32 dlen = 224 N, C, H, W = 5, 3, 16, 16 assert H == W R = 7 pooled_size = (3, 4) spatial_scale = H * 1.0 / dlen sampling_ratio = 0 data = mx.nd.array(np.arange(N * C * W * H).reshape((N, C, H, W)), dtype=dtype) # data = mx.nd.random.uniform(0, 1, (N, C, H, W), dtype = dtype) center_xy = mx.nd.random.uniform(0, dlen, (R, 2), dtype=dtype) wh = mx.nd.random.uniform(0, dlen, (R, 2), dtype=dtype) batch_ind = mx.nd.array(np.random.randint(0, N, size=(R, 1))) pos = mx.nd.concat(center_xy - wh / 2, center_xy + wh / 2, dim=1) rois = mx.nd.concat(batch_ind, pos, dim=1) data.attach_grad() rois.attach_grad() with mx.autograd.record(): output = mobula.op.ROIAlign(data=data, rois=rois, pooled_size=pooled_size, spatial_scale=spatial_scale, sampling_ratio=sampling_ratio) dy = mx.nd.random.uniform(-1, 1, (R, C) + pooled_size, dtype=dtype) output.backward(dy) real_output, [dx, drois] = roialign_forward_backward(data.asnumpy(), rois.asnumpy(), pooled_size, spatial_scale, sampling_ratio, dy.asnumpy()) bottom_diff = np.zeros(data.shape, dtype=T) roialign_backward(bottom_diff, rois.asnumpy(), pooled_size, spatial_scale, sampling_ratio, dy.asnumpy()) assert_almost_equal(dx, bottom_diff) atol = 1e-3 rtol = 1e-3 assert_almost_equal(output.asnumpy(), real_output, atol=atol, rtol=rtol) assert_almost_equal(data.grad.asnumpy(), dx, atol=atol, rtol=rtol) assert_almost_equal(rois.grad.asnumpy(), drois, atol=atol, rtol=rtol)
def test_convolution(): N, C, H, W = 2, 2, 3, 4 channels = 3 kernel_size = (2, 3) strides = (1, 2) padding = (0, 1) x = mx.random.uniform(0, 1, shape=(N, C, H, W)) our_x = x.copy() block = nn.Conv2D(channels=channels, kernel_size=kernel_size, strides=strides, padding=padding) block.initialize() y = block(x) out_grad = mx.random.uniform(0, 1, shape=y.shape) weight = block.weight.data() bias = block.bias.data() x.attach_grad() with mx.autograd.record(): mx_y = block(x) mx_y.backward(out_grad) our_x.attach_grad() our_weight = weight.copy() our_weight.attach_grad() our_bias = bias.copy() our_bias.attach_grad() with mx.autograd.record(): our_y = mobula.op.Conv2D(x=our_x, weight=our_weight, bias=our_bias, channels=channels, kernel_size=kernel_size, strides=strides, padding=padding) our_y.backward(out_grad) atol = 1e-6 assert_almost_equal(mx_y, our_y, atol=atol) assert_almost_equal(x.grad, our_x.grad, atol=atol) assert_almost_equal(weight.grad, our_weight.grad, atol=atol) assert_almost_equal(bias.grad, our_bias.grad, atol=atol)
def test_almost_equal_value(): shape1 = (2, 2, 3) a = np.random.random(shape1) b = a.copy() atol = 1e-3 assert_almost_equal(a, b, atol=0) assert_almost_equal(a, b, atol=atol) b[0, 0, 0] += atol b[0, 1, 2] -= atol assert_almost_equal(a, b, rtol=np.inf, atol=atol * 2.0) check_almost_euqal_expection_raise( a, b, 'Absolute Error Check failed', rtol=np.inf, atol=atol / 2.0) eps = np.finfo(b.dtype).eps rtol = np.max(np.abs((a - b) / (b + eps))) assert_almost_equal(a, b, rtol=rtol * 2.0, atol=atol * 2.0) check_almost_euqal_expection_raise( a, b, 'Relative Error Check failed', rtol=rtol * 2.0, atol=atol / 2.0)
def test_ctx_np(): shape = (5, 5) a = np.random.random(shape) b = np.random.random(shape) dy = np.random.random(shape) op = MulOP[np.ndarray]() c = op.forward(a, b) a_grad, b_grad = op.backward(dy) assert_almost_equal(a_grad, b * dy) assert_almost_equal(b_grad, a * dy) assert_almost_equal(a * b, c)
def test_addition(): a = mx.nd.array([1, 2, 3]) b = mx.nd.array([4, 5, 6]) a.attach_grad() b.attach_grad() with mx.autograd.record(): c = AdditionOP(a, b) dc = mx.nd.array([7, 8, 9]) c.backward(dc) assert_almost_equal(a + b, c) assert_almost_equal(a.grad, dc) assert_almost_equal(b.grad, dc)
def test_ctx_torch(): try: import torch except ImportError: return shape = (5, 5) a_np = np.random.random(shape) b_np = np.random.random(shape) dy_np = np.random.random(shape) a = torch.tensor(a_np, requires_grad=True) b = torch.tensor(b_np, requires_grad=True) dy = torch.tensor(dy_np) c = MulOP(a, b) c.backward(dy) assert_almost_equal(a.grad.data, (b * dy).data) assert_almost_equal(b.grad.data, (a * dy).data) assert_almost_equal((a * b).data, c.data)
def test_ctx_mxnet(): try: import mxnet as mx except ImportError: return shape = (5, 5) a_np = np.random.random(shape) b_np = np.random.random(shape) dy_np = np.random.random(shape) a = mx.nd.array(a_np) b = mx.nd.array(b_np) dy = mx.nd.array(dy_np) a.attach_grad() b.attach_grad() with mx.autograd.record(): c = MulOP(a, b) c.backward(dy) assert_almost_equal(a.grad, b * dy) assert_almost_equal(b.grad, a * dy) assert_almost_equal(a * b, c)
def test_IoULoss_mx(ctx): x = mx.nd.random.uniform(1, 3, shape=(N, 4), dtype="float64", ctx=ctx) y = mx.nd.random.uniform(np.exp(4), np.exp( 5), shape=(N, 4), dtype="float64", ctx=ctx) x1 = x.copy() y1 = y.copy() x.attach_grad() x1.attach_grad() with ag.record(): loss = IoULoss()(x, y) loss_mobula = mobula.op.IoULoss(x1, y1).squeeze() loss.backward() loss_mobula.backward() mx.nd.waitall() assert_almost_equal(x.grad.asnumpy(), x1.grad.asnumpy()) assert_almost_equal(loss.asnumpy(), loss_mobula.asnumpy()) x = mx.nd.random.uniform(3, 5, shape=(N, 4), dtype="float64", ctx=ctx) y = mx.nd.random.uniform(np.exp(1), np.exp( 2), shape=(N, 4), dtype="float64", ctx=ctx) x1 = x.copy() y1 = y.copy() x.attach_grad() x1.attach_grad() with ag.record(): loss = IoULoss()(x, y) loss_mobula = mobula.op.IoULoss(x1, y1).squeeze() loss.backward() loss_mobula.backward() mx.nd.waitall() assert_almost_equal(x.grad.asnumpy(), x1.grad.asnumpy()) assert_almost_equal(loss.asnumpy(), loss_mobula.asnumpy()) x = mx.nd.random.uniform(1, 5, shape=(N, 4), dtype="float64", ctx=ctx) y = mx.nd.random.uniform(np.exp(1), np.exp( 5), shape=(N, 4), dtype="float64", ctx=ctx) x1 = x.copy() y1 = y.copy() x.attach_grad() x1.attach_grad() with ag.record(): loss = IoULoss()(x, y) loss_mobula = mobula.op.IoULoss(x1, y1).squeeze() loss.backward() loss_mobula.backward() mx.nd.waitall() assert_almost_equal(x.grad.asnumpy(), x1.grad.asnumpy()) assert_almost_equal(loss.asnumpy(), loss_mobula.asnumpy())
def test_IoULoss_mx(ctx): x = mx.nd.random.uniform(1, 3, shape=(N, 4), dtype="float64", ctx=ctx) y = mx.nd.random.uniform(np.exp(4), np.exp(5), shape=(N, 4), dtype="float64", ctx=ctx) x1 = x.copy() y1 = y.copy() x2 = x.copy() y2 = y.copy() x.attach_grad() x1.attach_grad() x2.attach_grad() with ag.record(): fl = IoULoss()(x, y) fl_mobula = mobula.op.IoULoss(x1, y1, axis=1).squeeze() f2_mobula = mobula.op.IoULoss(x2.transpose(), y2.transpose(), axis=0).squeeze() fl.backward() fl_mobula.backward() f2_mobula.backward() mx.nd.waitall() assert_almost_equal(fl.asnumpy(), fl_mobula.asnumpy()) assert_almost_equal(x.grad.asnumpy(), x1.grad.asnumpy()) assert_almost_equal(x.grad.asnumpy(), x2.grad.asnumpy()) x = mx.nd.random.uniform(3, 5, shape=(N, 4), dtype="float64", ctx=ctx) y = mx.nd.random.uniform(np.exp(1), np.exp(2), shape=(N, 4), dtype="float64", ctx=ctx) x1 = x.copy() y1 = y.copy() x.attach_grad() x1.attach_grad() with ag.record(): fl = IoULoss()(x, y) fl_mobula = mobula.op.IoULoss(x1, y1).squeeze() fl.backward() fl_mobula.backward() mx.nd.waitall() assert_almost_equal(x.grad.asnumpy(), x1.grad.asnumpy()) assert_almost_equal(fl.asnumpy(), fl_mobula.asnumpy()) x = mx.nd.random.uniform(1, 5, shape=(N, 4), dtype="float64", ctx=ctx) y = mx.nd.random.uniform(np.exp(1), np.exp(5), shape=(N, 4), dtype="float64", ctx=ctx) x1 = x.copy() y1 = y.copy() x.attach_grad() x1.attach_grad() with ag.record(): fl = IoULoss()(x, y) fl_mobula = mobula.op.IoULoss(x1, y1).squeeze() fl.backward() fl_mobula.backward() mx.nd.waitall() assert_almost_equal(x.grad.asnumpy(), x1.grad.asnumpy()) assert_almost_equal(fl.asnumpy(), fl_mobula.asnumpy())
def check_almost_euqal_expection_raise(a, b, info, rtol=1e-5, atol=1e-8): try: assert_almost_equal(a, b, rtol=rtol, atol=atol) raise Exception(info) except AssertionError: pass
def softmax2d(N, C): data = mx.random.uniform(0, 1, shape=(N, C)) out = mobula.op.Softmax(data) gt = mx.nd.softmax(data, axis=-1) assert_almost_equal(out, gt, atol=atol)
def test_func_kwargs(): a = np.random.random((5, 5)) b = np.random.random((5, 5)) c = np.empty((5, 5)) mobula.func.mul_elemwise(n=a.size, a=a, b=b, c=c) assert_almost_equal(a * b, c)