コード例 #1
0
ファイル: test_module.py プロジェクト: MegEngine/MegEngine
 def __init__(self):
     super().__init__()
     self.conv1 = Conv2d(3, 128, 3, padding=1, bias=False)
     self.conv2 = Conv2d(3, 128, 3, dilation=2, bias=False)
     self.bn1 = BatchNorm1d(128)
     self.bn2 = BatchNorm2d(128)
     self.pooling = MaxPool2d(kernel_size=2, padding=0)
     modules = OrderedDict()
     modules["depthwise"] = Conv2d(
         256,
         256,
         3,
         1,
         0,
         groups=256,
         bias=False,
     )
     modules["pointwise"] = Conv2d(
         256,
         256,
         kernel_size=1,
         stride=1,
         padding=0,
         bias=True,
     )
     self.submodule1 = Sequential(modules)
     self.list1 = [Dropout(drop_prob=0.1), [Softmax(axis=100)]]
     self.tuple1 = (
         Dropout(drop_prob=0.1),
         (Softmax(axis=100), Dropout(drop_prob=0.2)),
     )
     self.dict1 = {"Dropout": Dropout(drop_prob=0.1)}
     self.fc1 = Linear(512, 1024)
コード例 #2
0
ファイル: test_module.py プロジェクト: wenming2014/MegEngine
 def __init__(self):
     super().__init__()
     self.conv1 = Conv2d(3, 128, 3, stride=2, bias=False)
     self.conv2 = Conv2d(3, 128, 3, padding=1, bias=False)
     self.conv3 = Conv2d(3, 128, 3, dilation=2, bias=False)
     self.bn1 = BatchNorm2d(128)
     self.bn2 = BatchNorm1d(128)
     self.dropout = Dropout(drop_prob=0.1)
     self.softmax = Softmax(axis=100)
     self.pooling = MaxPool2d(kernel_size=2, padding=0)
     self.submodule1 = Sequential(Dropout(drop_prob=0.1), Softmax(axis=100),)
     self.fc1 = Linear(512, 1024)
コード例 #3
0
def test_batchnorm():
    nr_chan = 8
    data_shape = (3, nr_chan, 4)
    momentum = 0.9
    bn = BatchNorm1d(nr_chan, momentum=momentum)
    running_mean = np.zeros((1, nr_chan, 1), dtype=np.float32)
    running_var = np.ones((1, nr_chan, 1), dtype=np.float32)
    data = tensor()
    for i in range(3):
        xv = np.random.normal(loc=2.3, size=data_shape).astype(np.float32)
        mean = np.mean(np.mean(xv, axis=0, keepdims=True),
                       axis=2,
                       keepdims=True)
        xv_transposed = np.transpose(xv, [0, 2, 1]).reshape(
            (data_shape[0] * data_shape[2], nr_chan))

        var_biased = np.var(xv_transposed, axis=0).reshape((1, nr_chan, 1))
        sd = np.sqrt(var_biased + bn.eps)

        var_unbiased = np.var(xv_transposed, axis=0, ddof=1).reshape(
            (1, nr_chan, 1))
        running_mean = running_mean * momentum + mean * (1 - momentum)
        running_var = running_var * momentum + var_unbiased * (1 - momentum)

        data.set_value(xv)
        yv = bn(data)
        yv_expect = (xv - mean) / sd

        assertTensorClose(yv_expect, yv.numpy(), max_err=5e-6)
        assertTensorClose(running_mean.reshape(-1),
                          bn.running_mean.numpy().reshape(-1),
                          max_err=5e-6)
        assertTensorClose(running_var.reshape(-1),
                          bn.running_var.numpy().reshape(-1),
                          max_err=5e-6)

    # test set 'training' flag to False
    mean_backup = bn.running_mean.numpy()
    var_backup = bn.running_var.numpy()
    bn.training = False
    xv = np.random.normal(loc=2.3, size=data_shape).astype(np.float32)
    data.set_value(xv)
    yv1 = bn(data)
    yv2 = bn(data)
    assertTensorClose(yv1.numpy(), yv2.numpy(), max_err=0)
    assertTensorClose(mean_backup, bn.running_mean.numpy(), max_err=0)
    assertTensorClose(var_backup, bn.running_var.numpy(), max_err=0)
    yv_expect = (xv - running_mean) / np.sqrt(running_var + bn.eps)
    assertTensorClose(yv_expect, yv1.numpy(), max_err=5e-6)
コード例 #4
0
ファイル: test_batchnorm.py プロジェクト: mozre/MegEngine
def test_batchnorm_no_stats():
    nr_chan = 8
    data_shape = (3, nr_chan, 4)
    bn = BatchNorm1d(8, track_running_stats=False)
    for i in range(4):
        if i == 2:
            bn.training = False
        xv = np.random.normal(loc=2.3, size=data_shape).astype(np.float32)
        mean = np.mean(np.mean(xv, axis=0, keepdims=True), axis=2, keepdims=True)
        var = np.var(
            np.transpose(xv, [0, 2, 1]).reshape(
                (data_shape[0] * data_shape[2], nr_chan)
            ),
            axis=0,
        ).reshape((1, nr_chan, 1))
        sd = np.sqrt(var + bn.eps)

        yv = bn(Tensor(xv))
        yv_expect = (xv - mean) / sd

        _assert_allclose(yv.numpy(), yv_expect)
コード例 #5
0
ファイル: test_batchnorm.py プロジェクト: mozre/MegEngine
def test_batchnorm_empty_tensor(dim, is_symbolic):
    if dim == 1:
        m = BatchNorm1d(4, affine=True)
        inp = mge.tensor(np.random.randn(0, 4, 0).astype("float32"))
    elif dim == 2:
        m = BatchNorm2d(4, affine=True)
        inp = mge.tensor(np.random.randn(0, 4, 0, 0).astype("float32"))
    else:
        raise NotImplementedError

    m.train()

    def fn(inp):
        return m(inp)

    if is_symbolic is not None:
        fn = jit.trace(symbolic=is_symbolic)(fn)
    for _ in range(3):
        out = fn(inp)
        np.testing.assert_equal(out.numpy(), inp)
        if is_symbolic is None:
            break