コード例 #1
0
ファイル: test_functional.py プロジェクト: mozre/MegEngine
def test_batchnorm2d_autocast():
    """check amp's result is equal to manually converted result"""
    amp.enabled = True
    tshape = (1, 3, 224, 224)
    pshape = (1, 3, 1, 1)
    inp = tensor(np.random.randn(*tshape), dtype=np.float32)
    weight = tensor(np.ones(pshape, dtype=np.float32))
    bias = tensor(np.zeros(pshape, dtype=np.float32))

    out = F.batch_norm(inp,
                       weight=weight,
                       bias=bias,
                       training=True,
                       inplace=False)

    amp.enabled = False
    expected = F.batch_norm(
        inp.astype("float16"),
        weight=weight,
        bias=bias,
        training=True,
        inplace=False,
        compute_mode="float32",
    )
    assert out.dtype == np.float16
    assert expected.dtype == np.float16
    np.testing.assert_allclose(out.numpy(), expected.numpy())
コード例 #2
0
ファイル: test_module.py プロジェクト: ym593277523/MegEngine
def test_module_api_hooks():
    net = MyModule()
    pre_hook_num = 0
    post_hook_num = 0
    hooks = []

    def pre_hook(module, inputs):
        nonlocal pre_hook_num
        pre_hook_num += 1
        modified_inputs = tuple(inp + 1 for inp in inputs)
        return modified_inputs

    def post_hook(module, inputs, outputs):
        nonlocal post_hook_num
        post_hook_num += 1
        outputs += 1
        return outputs

    net.apply(lambda module: hooks.append(
        module.register_forward_pre_hook(pre_hook)))
    net.apply(
        lambda module: hooks.append(module.register_forward_hook(post_hook)))

    shape = (1, 4, 1, 1)
    x = tensor(np.zeros(shape, dtype=np.float32))
    y = net(x)

    assert pre_hook_num == 4
    assert post_hook_num == 4
    mean1 = Parameter(np.zeros(shape), dtype=np.float32)
    bn1 = F.batch_norm(x + 3,
                       mean1,
                       Parameter(np.ones(shape), dtype=np.float32),
                       training=True)
    np.testing.assert_allclose(
        net.i.bn.running_mean.numpy(),
        mean1.numpy(),
    )
    mean2 = Parameter(np.zeros(shape), dtype=np.float32)
    bn2 = F.batch_norm(bn1 + 3,
                       mean2,
                       Parameter(np.ones(shape), dtype=np.float32),
                       training=True)
    np.testing.assert_allclose(
        net.bn.running_mean.numpy(),
        mean2.numpy(),
    )
    np.testing.assert_allclose((bn2 + 2).numpy(), y.numpy())

    assert len(hooks) == 8
    for handler in hooks:
        handler.remove()
    y = net(x)
    assert pre_hook_num == 4
    assert post_hook_num == 4
コード例 #3
0
def test_regression_1762():
    x = F.ones((10, 10, 3, 3))

    conv = M.Conv2d(10, 10, kernel_size=3, padding=1)

    t_shape = (1, 10, 1, 1)
    weight = mge.Parameter(np.ones(t_shape, dtype=np.float32))
    bias = mge.Parameter(np.zeros(t_shape, dtype=np.float32))

    gm = GradManager()
    gm.attach(list(conv.parameters()) + [weight, bias])

    with gm:
        out1 = conv(x)

        out2 = F.batch_norm(
            out1,
            None,
            None,
            weight,
            bias,
            training=True,
        )

        # Weird error only occur when this action is placed after BN
        # Op type is not relevant
        loss = out1 + 1
        gm.backward(loss)