예제 #1
0
def xcorr_depthwise(x, kernel):
    """
        x: [B,C,H,W]   250
        kernel: [B,C,h,w]   160
    """
    batch, channel, kH, kW = kernel.shape
    _, _, xH, xW = x.shape
    bc = batch * channel
    x = x.reshape(1, bc, xH, xW)
    kernel = kernel.reshape(bc, 1, 1, kH, kW)
    # divide by 2 * 2
    kernel_lt = kernel[:, :, :, 0:kH // 2, 0:kW // 2]
    kernel_rb = kernel[:, :, :, kH // 2:kH, kW // 2:kW]
    kernel_rt = kernel[:, :, :, 0:kH // 2, kW // 2:kW]
    kernel_lb = kernel[:, :, :, kH // 2:kH, 0:kW // 2]
    # cal result
    out_lt = F.conv2d(x[:, :, 0:(xH - kH // 2), 0:(xW - kW // 2)],
                      kernel_lt,
                      groups=bc)
    out_rb = F.conv2d(x[:, :, (kH // 2):xH, (kW // 2):xW],
                      kernel_rb,
                      groups=bc)
    out_rt = F.conv2d(x[:, :, 0:(xH - kH // 2), (kW // 2):xW],
                      kernel_rt,
                      groups=bc)
    out_lb = F.conv2d(x[:, :, (kH // 2):xH, 0:(xW - kW // 2)],
                      kernel_lb,
                      groups=bc)
    oH, oW = out_lt.shape[2], out_lt.shape[3]
    out = (out_lt + out_rb + out_rt + out_lb).reshape(batch, channel, oH, oW)
    return out
예제 #2
0
def two_layer_conv(x):
    # (8, 3, 3, 3) 代表(输出信道数,输入信道数,卷积核高度,卷积核宽度)
    conv_weight = mge.Parameter(np.random.randn(8, 3, 3, 3).astype(np.float32))
    # 对于 8 个卷积核,提供 8 个 bias
    conv_bias = mge.Parameter(np.zeros((1, 8, 1, 1), dtype=np.float32))
    x = F.conv2d(x, conv_weight, conv_bias)
    x = F.relu(x)
    conv_weight = mge.Parameter(
        np.random.randn(16, 8, 3, 3).astype(np.float32))
    conv_bias = mge.Parameter(np.zeros((1, 16, 1, 1), dtype=np.float32))
    x = F.conv2d(x, conv_weight, conv_bias)
    x = F.relu(x)
    return x
예제 #3
0
    def forward(self, x, x_gap):
        x_w = self.wn_fc1(x_gap)
        x_w = self.sigmoid(x_w)
        x_w = self.wn_fc2(x_w)

        if x.shape[0] == 1: # case of batch size = 1
            x_w = x_w.reshape(self.oup, self.inp, self.ksize, self.ksize)
            x = F.conv2d(x, weight=x_w, stride=self.stride, padding=self.pad)
            return x
        
        x = x.reshape(1, -1, x.shape[2], x.shape[3])
        x_w = x_w.reshape(-1, self.oup, self.inp, self.ksize, self.ksize)
        x = F.conv2d(x, weight=x_w, stride=self.stride, padding=self.pad, groups=x_w.shape[0])
        x = x.reshape(-1, self.oup, x.shape[2], x.shape[3])
        return x
예제 #4
0
def test_conv2d_zero_stride_numpy_array():
    inp = np.random.randn(3, 224, 224).astype(np.float32)
    inp = inp[np.newaxis, :]

    inp = tensor(inp, dtype=np.float32)
    weight = tensor(np.random.randn(16, 3, 3, 3), dtype=np.float32)
    out = F.conv2d(inp, weight, None, (2, 2), (3, 3), (1, 1), 1)
예제 #5
0
    def forward(self, inps):
        src = inps[0]
        weight = inps[1]
        try:
            bias = inps[2]
        except IndexError:
            bias = None

        if bias is not None:
            if bias.shape.ndim == 3:
                bias = F.expand_dims(bias, axis=0)
            elif bias.shape.ndim == 1:
                bias = F.expand_dims(bias, axis=[0, 2, 3])
            else:
                raise Exception(f"Invalid Conv2d bias's shape {bias.shape}")

        if self.param["groups"] != 1:
            groups = self.param["groups"]
            IC = src.shape.numpy()[1]
            OC = weight.shape.numpy()[0]
            FH = weight.shape.numpy()[2]
            FW = weight.shape.numpy()[3]
            target_shape = [groups, int(OC / groups), int(IC / groups), FH, FW]
            weight = F.reshape(weight, target_shape)

        return F.conv2d(
            src,
            weight,
            bias,
            stride=self.param["stride"],
            padding=self.param["padding"],
            dilation=self.param["dilation"],
            groups=self.param["groups"],
        )
예제 #6
0
def gaussblur(x, kernel, p=5, chn=3):
    x_pad = F.pad(x, pad=[
        int((p - 1) / 2),
    ] * 4, mode='reflect')
    y = F.conv2d(x_pad, kernel, padding=0, stride=1, groups=chn)

    return y
예제 #7
0
 def convf(x, kernel):
     batch = int(kernel.shape[0])
     channel = int(kernel.shape[1])
     bc = batch * channel
     x = x.reshape((1, bc, int(x.shape[2]), int(x.shape[3])))
     kernel = kernel.reshape(bc, 1, 1, int(kernel.shape[2]), int(kernel.shape[3]))
     out = F.conv2d(x, kernel, groups=bc)
     out = out.reshape(batch, channel, int(out.shape[2]), int(out.shape[3]))
     return out
예제 #8
0
    def run(
        N,
        IC,
        OC,
        IH,
        IW,
        KH,
        KW,
        PH,
        PW,
        SH,
        SW,
        has_bias=True,
    ):
        inp_v = np.random.normal(size=(N, IC, IH, IW))
        w_v = np.random.normal(size=(N, OC, IC, KH, KW))
        b_v = np.random.normal(size=(1, OC, 1, 1))
        inp_scale = dtype.get_scale(inp_dtype)
        w_scale = dtype.get_scale(w_dtype)
        b_scale = dtype.get_scale(b_dtype)

        inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
        wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
        bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)

        inp_int8 = tensor(inpv, dtype=inp_dtype)
        w_int8 = Parameter(wv, dtype=w_dtype)
        b_int32 = Parameter(bv, dtype=b_dtype)

        inp_fp32 = inp_int8.astype("float32")
        w_fp32 = w_int8.astype("float32")
        b_fp32 = b_int32.astype("float32")

        def run_batch_conv_bias(inp, w, b):
            b = b if has_bias else Parameter(np.zeros_like(b.numpy()))
            result = F.quantized.batch_conv_bias_activation(
                inp,
                w,
                b,
                stride=(SH, SW),
                padding=(PH, PW),
                dtype=out_dtype,
            )
            return result.astype("float32")

        expected = F.conv2d(inp_fp32, w_fp32[0],
                            b_fp32 if has_bias else None)[0]
        expected = expected.astype(out_dtype).astype("float32")
        expected = F.flatten(expected)

        result = run_batch_conv_bias(inp_int8, w_int8, b_int32)
        result = F.flatten(result)

        np.testing.assert_allclose(result.numpy(),
                                   expected.numpy(),
                                   atol=outp_scale)
예제 #9
0
def test_conv2d_autocast():
    """check amp's result is equal to manually converted result"""
    amp.enabled = True
    inp = tensor(np.random.randn(1, 3, 224, 224), dtype=np.float32)
    weight = tensor(np.random.randn(64, 3, 7, 7), dtype=np.float32)
    out = F.conv2d(inp, weight, None, (2, 2), (3, 3), (1, 1), 1)
    amp.enabled = False
    expected = F.conv2d(
        inp.astype("float16"),
        weight.astype("float16"),
        None,
        (2, 2),
        (3, 3),
        (1, 1),
        1,
        compute_mode="float32",
    )
    assert out.dtype == np.float16
    assert expected.dtype == np.float16
    np.testing.assert_allclose(out.numpy(), expected.numpy())
예제 #10
0
 def run_conv2d(inp, w, b):
     O = F.conv2d(
         inp,
         w,
         b if has_bias else None,
         stride=(SH, SW),
         padding=(PH, PW),
     )
     if nonlinear_mode == "relu":
         return F.relu(O)
     else:
         return O
예제 #11
0
def test_set_conv2d_config():
    """check setting config by contextmanager is equal to manually converted result"""
    config._compute_mode = "float32"
    inp = tensor(np.random.randn(1, 3, 224, 224), dtype=np.float16)
    weight = tensor(np.random.randn(64, 3, 7, 7), dtype=np.float16)
    config_out = F.conv2d(inp, weight, None, (2, 2), (3, 3), (1, 1), 1)
    config._compute_mode = "default"
    with config._override(compute_mode="float32"):
        context_out = F.conv2d(inp, weight, None, (2, 2), (3, 3), (1, 1), 1)
    expected = F.conv2d(
        inp,
        weight,
        None,
        (2, 2),
        (3, 3),
        (1, 1),
        1,
        compute_mode="float32",
    )
    np.testing.assert_allclose(config_out.numpy(), expected.numpy())
    np.testing.assert_allclose(context_out.numpy(), expected.numpy())
예제 #12
0
 def forward(
         self,
         inp,
         weight,
         bias=None,
         stride=(1, 1),
         padding=(0, 0),
         dilation=(1, 1),
         groups=1,
 ):
     x = F.conv2d(inp, weight, bias, stride, padding, dilation, groups)
     return x
예제 #13
0
def xcorr_depthwise(x, kernel):
    """
        x: [B,C,H,W]
        kernel: [B,C,h,w]
    """
    batch = int(kernel.shape[0])
    channel = int(kernel.shape[1])
    bc = batch * channel
    x = x.reshape(1, bc, x.shape[2], x.shape[3])
    kernel = kernel.reshape(bc, 1, 1, kernel.shape[2], kernel.shape[3])
    out = F.conv2d(x, kernel, groups=bc)
    out = out.reshape(batch, channel, out.shape[2], out.shape[3])
    return out
예제 #14
0
 def forward(self, x, y):
     return F.conv2d(x, y)