Beispiel #1
0
def test_add_input():

    a = Tensor([1, 2])
    b = Tensor([3, 4])

    @trace(symbolic=True, capture_as_const=True)
    def fwd(a, b):
        return (a + b) * 2

    fwd(a, b)
    orig_model = io.BytesIO()
    fwd.dump(orig_model,
             arg_names=["a", "b"],
             output_names="o",
             optimize_for_inference=False)
    orig_model.seek(0)

    graph = Net.load(orig_model)
    inp_c = graph.make_input_node((2, ), np.int32, name="c")
    varo = graph.var_filter.name("o").as_unique()

    out = F.add(varo, inp_c)
    out.name = "o1"
    graph.remove_output(varo)
    graph.add_output(out)
    modified_model = io.BytesIO()

    graph.dump(modified_model)
    modified_model.seek(0)
    load_graph = GraphInference(modified_model)

    out = load_graph.run(a, b, a)
    np.testing.assert_equal(out["o1"], ((a + b) * 2 + a).numpy())
Beispiel #2
0
def test_replace_var():

    a = Tensor([1, 2])
    b = Tensor([3, 4])

    @trace(symbolic=True, capture_as_const=True)
    def fwd(a, b):
        return (a + b) * 2

    fwd(a, b)
    orig_model = io.BytesIO()
    fwd.dump(orig_model,
             arg_names=["a", "b"],
             output_names="o",
             optimize_for_inference=False)
    orig_model.seek(0)

    graph = Net.load(orig_model)
    vara = graph.var_filter.name("a").as_unique()
    varb = graph.var_filter.name("b").as_unique()

    out = F.mul(vara, varb)
    out = F.relu(out)

    opnode = list(graph.opr_filter.has_input(vara))
    repl_dict = {opnode[0].outputs[0]: out}
    graph.replace_vars(repl_dict)

    modified_model = io.BytesIO()
    graph.dump(modified_model)
    modified_model.seek(0)
    load_graph = GraphInference(modified_model)

    out = load_graph.run(a, b)
    np.testing.assert_equal(out["o"], [6, 16])
Beispiel #3
0
def test_modify_params():

    a = Tensor([1, 2])
    b = Tensor([3, 4])

    @trace(symbolic=True, capture_as_const=True)
    def fwd(a, b):
        return (a + b) * 2

    fwd(a, b)
    orig_model = io.BytesIO()
    fwd.dump(orig_model,
             arg_names=["a", "b"],
             output_names="o",
             optimize_for_inference=False)
    orig_model.seek(0)

    graph = Net.load(orig_model)
    param_const = graph.params_filter.as_unique()
    param_const.set_value(3)

    modified_model = io.BytesIO()
    graph.dump(modified_model)
    modified_model.seek(0)
    load_graph = GraphInference(modified_model)

    out = load_graph.run(a, b)
    np.testing.assert_equal(out["o"], [12, 18])
Beispiel #4
0
def test_make_const():

    a = Tensor([1, 2])
    b = Tensor([3, 4])

    @trace(symbolic=True, capture_as_const=True)
    def fwd(a, b):
        return (a + b) * 2

    fwd(a, b)
    orig_model = io.BytesIO()
    fwd.dump(orig_model,
             arg_names=["a", "b"],
             output_names="o",
             optimize_for_inference=False)
    orig_model.seek(0)

    graph = Net.load(orig_model)
    const_b = graph.make_const(np.array([0.0, 0.0]), name="b")
    varb = graph.var_filter.name("b").as_unique()

    repl_dict = {varb: const_b}
    graph.replace_vars(repl_dict)

    modified_model = io.BytesIO()
    graph.dump(modified_model)
    modified_model.seek(0)
    load_graph = GraphInference(modified_model)

    out = load_graph.run(a)
    np.testing.assert_equal(out["o"], [2, 4])
Beispiel #5
0
def update_model(model_path):
    """
    Update the dumped model with test cases for new reference values.

    The model with pre-trained weights is trained for one iter with the test data attached.
    The loss and updated net state dict is dumped.

    .. code-block:: python

        from test_correctness import update_model
        update_model('mnist_model_with_test.mge') # for gpu
        update_model('mnist_model_with_test_cpu.mge') # for cpu

    """
    net = MnistNet(has_bn=True)
    checkpoint = mge.load(model_path)
    net.load_state_dict(checkpoint["net_init"])
    lr = checkpoint["sgd_lr"]
    opt = SGD(net.parameters(), lr=lr)
    gm = ad.GradManager().attach(net.parameters())

    data = Tensor(checkpoint["data"], dtype=np.float32)
    label = Tensor(checkpoint["label"], dtype=np.int32)

    opt.clear_grad()
    loss = train(data, label, net, opt, gm)
    opt.step()

    xpu_name = get_xpu_name()

    checkpoint.update(
        {"net_updated": net.state_dict(), "loss": loss.numpy(), "xpu": xpu_name}
    )
    mge.save(checkpoint, model_path)
Beispiel #6
0
def test_name():
    x = Tensor(0)
    assert x.name == ""
    x.name = "x"
    assert x.name == "x"
    x = Tensor(0, name="x")
    assert x.name == "x"
Beispiel #7
0
def test_deformable_ps_roi_pooling():
    inp = Tensor(np.random.random((1, 256, 64, 64)).astype("float32"))
    rois = Tensor(np.random.random((1, 5)).astype("float32"))
    trans = Tensor(np.random.random((24, 2, 7, 7)).astype("float32"))

    pooled_h = 7
    pooled_w = 7
    sample_per_part = 4
    no_trans = False
    part_size = 7
    spatial_scale = 1.0 / 64
    trans_std = 0.1

    @trace(symbolic=True, capture_as_const=True)
    def fwd(inp, rois, trans):
        y = F.deformable_psroi_pooling(
            inp,
            rois,
            trans,
            no_trans,
            part_size,
            pooled_h,
            pooled_w,
            sample_per_part,
            spatial_scale,
            trans_std,
        )
        return y

    result = fwd(inp, rois, trans)
    check_pygraph_dump(fwd, [inp, rois, trans], [result])
Beispiel #8
0
def test_replace_opr():

    a = Tensor([1, 2])
    b = Tensor([3, 4])

    @trace(symbolic=True, capture_as_const=True)
    def fwd(a, b):
        return (a + b) * 2

    fwd(a, b)
    orig_model = io.BytesIO()
    fwd.dump(orig_model,
             arg_names=["a", "b"],
             output_names="o",
             optimize_for_inference=False)
    orig_model.seek(0)

    graph = Net.load(orig_model)
    vara = graph.var_filter.name("a").as_unique()
    varb = graph.var_filter.name("b").as_unique()

    out1 = F.sub(vara, varb)
    out1 = F.relu(out1)
    out1 = graph.add_dep_oprs(out1)
    orig_opr = graph.opr_filter.has_input(vara).as_unique()

    repl_dict = {orig_opr: out1[0].owner}
    graph.replace_oprs(repl_dict)
    modified_model1 = io.BytesIO()
    graph.dump(modified_model1)
    modified_model1.seek(0)

    load_graph = GraphInference(modified_model1)
    out = load_graph.run(a, b)
    np.testing.assert_equal(out["o"], [0, 0])
Beispiel #9
0
    def worker(max_err):
        net = MnistNet(has_bn=True)
        net.load_state_dict(checkpoint["net_init"])
        lr = checkpoint["sgd_lr"]
        opt = SGD(net.parameters(), lr=lr)

        gm = ad.GradManager().attach(
            net.parameters(), callbacks=[dist.make_allreduce_cb("MEAN", dist.WORLD)]
        )

        # use same data and label for all gpu's
        # such that the result does not depend on number of gpu
        data_train = Tensor(data)
        label_train = Tensor(label)

        loss = train(data_train, label_train, net, opt, gm)

        np.testing.assert_allclose(loss.numpy(), checkpoint["loss"], atol=max_err)

        if dist.get_rank():
            return
        for param, param_ref in zip(
            net.state_dict().items(), checkpoint["net_updated"].items()
        ):
            assert param[0] == param_ref[0]
            if "bn" in param[0]:
                ref = param_ref[1].reshape(param[1].shape)
                np.testing.assert_allclose(param[1], ref, atol=max_err)
            else:
                np.testing.assert_allclose(param[1], param_ref[1], atol=max_err)
Beispiel #10
0
def test_set_subtensor():
    x = Tensor([1, 2, 3])
    x[:] = [1, 1, 1]
    np.testing.assert_almost_equal(x.numpy(), [1, 1, 1], decimal=6)
    x[[0, 2]] = [3, 2]
    np.testing.assert_almost_equal(x.numpy(), [3, 1, 2], decimal=6)
    x[1:3] = [4, 5]
    np.testing.assert_almost_equal(x.numpy(), [3, 4, 5], decimal=6)
Beispiel #11
0
def test_qparams():
    x = Tensor(1)
    assert x.qparams.scale is None
    x.qparams.scale = Tensor(1.0)
    assert x.qparams.scale.numpy() == 1.0
    x2 = copy.copy(x)
    assert x.qparams is x2.qparams and x2.qparams.scale.numpy() == 1.0
    x3 = copy.deepcopy(x)
    assert x.qparams is not x3.qparams and x3.qparams.scale.numpy() == 1.0
Beispiel #12
0
def test_matmul():
    @trace(symbolic=True, capture_as_const=True)
    def fwd(data1, data2):
        return F.matmul(data1, data2)

    data1 = Tensor(np.random.random((32, 64)))
    data2 = Tensor(np.random.random((64, 16)))
    result = fwd(data1, data2)
    check_pygraph_dump(fwd, [data1, data2], [result])
Beispiel #13
0
def test_concat():
    @trace(symbolic=True, capture_as_const=True)
    def fwd(data1, data2):
        return F.concat([data1, data2], axis=1)

    x = Tensor(np.random.random((2, 3)))
    y = Tensor(np.random.random((2, 5)))
    result = fwd(x, y)
    check_pygraph_dump(fwd, [x, y], [result])
Beispiel #14
0
def test_dot():
    @trace(symbolic=True, capture_as_const=True)
    def fwd(x, y):
        return F.dot(x, y)

    x = Tensor([1.0, 2.0, 3.0])
    y = Tensor([3.0, 4.0, 5.0])
    result = fwd(x, y)
    check_pygraph_dump(fwd, [x, y], [result])
Beispiel #15
0
def test_batchmatmul():
    @trace(symbolic=True, capture_as_const=True)
    def fwd(x, y):
        return F.matmul(x, y)

    x = Tensor(np.random.random((3, 3, 5)))
    y = Tensor(np.random.random((3, 5, 3)))
    result = fwd(x, y)
    check_pygraph_dump(fwd, [x, y], [result])
Beispiel #16
0
def test_roialign():
    inp = Tensor(np.random.randn(1, 1, 128, 128))
    rois = Tensor(np.random.random((4, 5)))

    @trace(symbolic=True, capture_as_const=True)
    def fwd(inp, rois):
        return F.vision.roi_align(inp, rois, (2, 2))

    output = fwd(inp, rois)
    check_pygraph_dump(fwd, [inp, rois], [output])
Beispiel #17
0
def test_index_onehot():
    src = Tensor([[1.0, 2.0]])
    index = Tensor([0])

    @trace(symbolic=True, capture_as_const=True)
    def fwd(src, index):
        return F.indexing_one_hot(src, index)

    out = fwd(src, index)
    check_pygraph_dump(fwd, [src, index], [out])
Beispiel #18
0
def test_condtake():
    mask = Tensor(np.array([[True, False], [False, True]], dtype=np.bool_))
    x = Tensor(np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32))

    @trace(symbolic=True, capture_as_const=True)
    def fwd(mask, x):
        v, index = F.cond_take(mask, x)
        return v, index

    v, index = fwd(mask, x)
    check_pygraph_dump(fwd, [mask, x], [v, index])
Beispiel #19
0
def test_elemwise_multitype():
    op = builtin.ElemwiseMultiType(mode="qadd", dtype=dtype.qint32(2.0))

    @trace(symbolic=True, capture_as_const=True)
    def fwd(x, y):
        return apply(op, x, y)[0]

    x = Tensor(np.random.random(10) * 10, dtype=dtype.qint8(2.0))
    y = Tensor(np.random.random(10) * 10, dtype=dtype.qint8(2.0))
    result = fwd(x, y)
    check_pygraph_dump(fwd, [x, y], [result])
Beispiel #20
0
def test_warpaffine():
    inp_shape = (1, 3, 3, 3)
    x = Tensor(np.arange(27, dtype=np.float32).reshape(inp_shape))
    weightv = Tensor([[[1.26666667, 0.6, -83.33333333], [-0.33333333, 1, 66.66666667]]])

    @trace(symbolic=True, capture_as_const=True)
    def fwd(x, weightv):
        return F.vision.warp_affine(x, weightv, (2, 2), border_mode="wrap")

    outp = fwd(x, weightv)
    check_pygraph_dump(fwd, [x, weightv], [outp])
Beispiel #21
0
def run_train(
    model_path,
    use_jit,
    use_symbolic,
    sublinear_memory_config=None,
    max_err=None,
    use_adaptive_pooling=False,
):

    """
    Load the model with test cases and run the training for one iter.
    The loss and updated weights are compared with reference value to verify the correctness.

    Dump a new file with updated result by calling update_model
    if you think the test fails due to numerical rounding errors instead of bugs.
    Please think twice before you do so.

    """
    net = MnistNet(has_bn=True, use_adaptive_pooling=use_adaptive_pooling)
    checkpoint = mge.load(model_path)
    net.load_state_dict(checkpoint["net_init"])
    lr = checkpoint["sgd_lr"]
    opt = SGD(net.parameters(), lr=lr)
    gm = ad.GradManager().attach(net.parameters())

    data = Tensor(checkpoint["data"], dtype=np.float32)
    label = Tensor(checkpoint["label"], dtype=np.int32)

    if max_err is None:
        max_err = 1e-5

    train_func = train
    if use_jit:
        train_func = jit.trace(
            train_func,
            symbolic=use_symbolic,
            sublinear_memory_config=sublinear_memory_config,
        )

    opt.clear_grad()
    loss = train_func(data, label, net, opt, gm)
    opt.step()

    np.testing.assert_allclose(loss.numpy(), checkpoint["loss"], atol=max_err)

    for param, param_ref in zip(
        net.state_dict().items(), checkpoint["net_updated"].items()
    ):
        assert param[0] == param_ref[0]
        if "bn" in param[0]:
            ref = param_ref[1].reshape(param[1].shape)
            np.testing.assert_allclose(param[1], ref, atol=max_err)
        else:
            np.testing.assert_allclose(param[1], param_ref[1], atol=max_err)
Beispiel #22
0
def test_io():
    g = mgb_graph.Graph()
    x = Tensor(np.random.randn(3).astype("float32"), device="xpux")._dev_tensor()
    vx, _ = mgb_graph.input_callback(
        lambda: x, device=x.comp_node, dtype=x.dtype, graph=g
    )
    y = Future()
    v = mgb_graph.output_callback(y.set_result, vx)
    f = g.compile(v)
    f()

    np.testing.assert_equal(x.numpy(), y.result().numpy())
Beispiel #23
0
def test_as_type():
    x = Tensor([1, 2, 3], dtype=np.float32)
    y = x.astype(qint8(0.1))
    np.testing.assert_almost_equal(get_scale(y.dtype), 0.1)
    z = y.astype(qint8(0.2))
    np.testing.assert_almost_equal(get_scale(z.dtype), 0.2)
    a = z.astype(quint8(0.3, 127))
    np.testing.assert_almost_equal(get_scale(a.dtype), 0.3)
    np.testing.assert_equal(get_zero_point(a.dtype), 127)
    b = a.astype(quint8(0.3, 128))
    np.testing.assert_almost_equal(get_scale(b.dtype), 0.3)
    np.testing.assert_equal(get_zero_point(b.dtype), 128)
Beispiel #24
0
def test_convbias():
    @trace(symbolic=True, capture_as_const=True)
    def fwd(inp, weight, bias):
        return F.quantized.conv_bias_activation(
            inp, weight, bias, dtype=dtype.qint8(scale=1.0), nonlinear_mode="relu"
        )

    inp = Tensor(np.random.random((1, 3, 64, 64)), dtype=dtype.qint8(scale=1.0))
    weight = Tensor(np.random.random((32, 3, 3, 3)), dtype=dtype.qint8(scale=1.0))
    bias = Tensor(np.random.random((1, 32, 1, 1)), dtype=dtype.qint32(scale=1.0))
    result = fwd(inp, weight, bias)
    check_pygraph_dump(fwd, [inp, weight, bias], [result])
Beispiel #25
0
def test_elemwise():
    @trace(symbolic=True, capture_as_const=True)
    def fwd(x, y):
        z1 = x * y
        z2 = x + y
        z3 = z1 / z2
        z3 = z3**3
        return z3

    x = Tensor([1.0, 2.0])
    y = Tensor([3.0, 5.0])
    result = fwd(x, y)
    check_pygraph_dump(fwd, [x, y], [result])
Beispiel #26
0
def test_deformable_conv():
    if not is_cuda_available():
        return
    conv = M.DeformableConv2d(3, 32, 3)

    @trace(symbolic=True, capture_as_const=True)
    def fwd(data, offset, mask):
        return conv(data, offset, mask)

    data = Tensor(np.random.random((1, 3, 32, 32)))
    offset = Tensor(np.ones((32, 3 * 3 * 2, 30, 30)).astype("int32") * 5)
    mask = Tensor(np.ones((32, 3 * 3, 30, 30)).astype("int32"))
    out = fwd(data, offset, mask)
    check_pygraph_dump(fwd, [data, offset, mask], [out])
Beispiel #27
0
def test_nms():
    x = np.zeros((100, 4))
    np.random.seed(42)
    x[:, :2] = np.random.rand(100, 2) * 20
    x[:, 2:] = np.random.rand(100, 2) * 20 + 100
    scores = Tensor(np.random.rand(100))
    inp = Tensor(x)

    @trace(symbolic=True, capture_as_const=True)
    def fwd(inp, scores):
        return F.nn.nms(inp, scores, iou_thresh=0.7, max_output=3)

    result = fwd(inp, scores)
    check_pygraph_dump(fwd, [inp, scores], [result])
Beispiel #28
0
def test_op():
    g = mgb_graph.Graph()
    x = Tensor(np.random.randn(10).astype("float32"), device="xpux")._dev_tensor()
    v, _ = mgb_graph.input_callback(
        lambda: x, device=x.comp_node, dtype=x.dtype, graph=g
    )
    neg = Elemwise(Elemwise.Mode.NEGATE)
    v = mgb_graph.apply_normal_varnode(neg, v)[0]
    y = Future()
    v = mgb_graph.output_callback(y.set_result, v)
    f = g.compile(v)
    f()

    np.testing.assert_equal(x.numpy(), -y.result().numpy())
Beispiel #29
0
def test_remap():
    inp_shape = (1, 1, 4, 4)
    inp = Tensor(np.arange(16, dtype=np.float32).reshape(inp_shape))
    map_xy_shape = (1, 2, 2, 2)
    map_xy = Tensor(
        np.array([[[1.0, 0.0], [0.0, 1.0]], [[0.0, 1.0], [0.0, 1.0]]],
                 dtype=np.float32).reshape(map_xy_shape))

    @trace(symbolic=True, capture_as_const=True)
    def fwd(inp, map_xy):
        return F.vision.remap(inp, map_xy)

    out = fwd(inp, map_xy)
    check_pygraph_dump(fwd, [inp, map_xy], [out])
Beispiel #30
0
def test_warpperspective():
    inp_shape = (1, 1, 4, 4)
    x = Tensor(np.arange(16, dtype=np.float32).reshape(inp_shape))
    M_shape = (1, 3, 3)
    # M defines a translation: dst(1, 1, h, w) = rst(1, 1, h+1, w+1)
    M = Tensor(
        np.array([[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]],
                 dtype=np.float32).reshape(M_shape))

    @trace(symbolic=True, capture_as_const=True)
    def fwd(x, M):
        return F.vision.warp_perspective(x, M, (2, 2))

    result = fwd(x, M)
    check_pygraph_dump(fwd, [x, M], [result])