Exemple #1
0
 def channel_shuffle(self, x):
     batchsize, num_channels, height, width = x.shape
     # assert (num_channels % 4 == 0)
     x = x.reshape(batchsize * num_channels // 2, 2, height * width)
     x = F.transpose(x, (1, 0, 2))
     x = x.reshape(2, -1, num_channels // 2, height, width)
     return x[0], x[1]
Exemple #2
0
    def __getitem__(self, idx):
        dummy_data = np.zeros([512, 640, 2])

        imgs = [self.samples[idx]["img1"], self.samples[idx]["img2"]]

        gyro_homo = self.samples[idx]["h**o"]

        gt_flow = self.samples[idx]["gt_flow"]

        split = self.samples[idx]["split"]

        gyro_filed = homo_to_flow(np.expand_dims(gyro_homo, 0), H=600, W=800).squeeze()

        imgs = [cv2.resize(i, (640, 512)) for i in imgs]

        gt_flow = self.resize_flow(gt_flow, dummy_data, True).transpose(2, 0, 1)
        gyro_filed = self.resize_flow(gyro_filed, dummy_data, True).transpose(2, 0, 1)

        if self.input_transform:
            imgs_it = [F.transpose(i, (2, 0, 1)) for i in imgs]

        ret = {"img{}".format(i + 1): v for i, v in enumerate(imgs_it)}

        ret["gyro_field"] = gyro_filed
        ret["gt_flow"] = gt_flow
        ret["label"] = split
        ret["rain_label"] = split
        return ret
    def forward(self, xin, labels=None, imgs=None):
        outputs = []
        assert not self.training

        for k, (cls_conv, reg_conv, stride_this_level, x) in enumerate(
                zip(self.cls_convs, self.reg_convs, self.strides, xin)):
            x = self.stems[k](x)
            cls_x = x
            reg_x = x

            cls_feat = cls_conv(cls_x)
            cls_output = self.cls_preds[k](cls_feat)

            reg_feat = reg_conv(reg_x)
            reg_output = self.reg_preds[k](reg_feat)
            obj_output = self.obj_preds[k](reg_feat)
            output = F.concat(
                [reg_output,
                 F.sigmoid(obj_output),
                 F.sigmoid(cls_output)], 1)
            outputs.append(output)

        self.hw = [x.shape[-2:] for x in outputs]
        # [batch, n_anchors_all, 85]
        outputs = F.concat([F.flatten(x, start_axis=2) for x in outputs],
                           axis=2)
        outputs = F.transpose(outputs, (0, 2, 1))
        if self.decode_in_inference:
            return self.decode_outputs(outputs)
        else:
            return outputs
Exemple #4
0
    def channel_shuffle(self, x):
        batchsize, num_channels, height, width = x.shape
        # assert num_channels.numpy() % self.group == 0
        group_channels = num_channels // self.group

        x = x.reshape(batchsize, group_channels, self.group, height, width)
        x = F.transpose(x, (0, 2, 1, 3, 4))
        x = x.reshape(batchsize, num_channels, height, width)
        return x
Exemple #5
0
def flow_warp(x, flow12):
    B, _, H, W = x.shape

    base_grid = mesh_grid(B, H, W).astype(x)  # B2HW

    grid_warp = base_grid + flow12
    grid_warp = F.transpose(grid_warp, (0, 2, 3, 1))

    warp_imgs = F.vision.remap(x, grid_warp)
    return warp_imgs
Exemple #6
0
 def convert_to_nchw4(var):
     var = F.reshape(var, (var.shape[0], var.shape[1] // 4, 4,
                           var.shape[2], var.shape[3]))
     var = F.transpose(var, (0, 1, 3, 4, 2))
     return var
Exemple #7
0
    def run(
        N,
        IC,
        OC,
        IH,
        IW,
        KH,
        KW,
        PH,
        PW,
        SH,
        SW,
        has_bias=True,
        nonlinear_mode="identity",
    ):
        inp_v = np.random.normal(size=(N, IC, IH, IW))
        w_v = np.random.normal(size=(OC, IC, KH, KW))
        b_v = np.random.normal(size=(1, OC, 1, 1))
        inp_scale = dtype.get_scale(inp_dtype)
        w_scale = dtype.get_scale(w_dtype)
        b_scale = dtype.get_scale(b_dtype)

        inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
        wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
        bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)

        inp_int8 = tensor(inpv, dtype=inp_dtype)
        w_int8 = Parameter(wv, dtype=w_dtype)
        b_int32 = Parameter(bv, dtype=b_dtype)

        inp_fp32 = inp_int8.astype("float32")
        w_fp32 = w_int8.astype("float32")
        b_fp32 = b_int32.astype("float32")

        def convert_to_nchw4(var):
            var = F.reshape(var, (var.shape[0], var.shape[1] // 4, 4,
                                  var.shape[2], var.shape[3]))
            var = F.transpose(var, (0, 1, 3, 4, 2))
            return var

        def run_conv2d(inp, w, b):
            O = F.conv2d(
                inp,
                w,
                b if has_bias else None,
                stride=(SH, SW),
                padding=(PH, PW),
            )
            if nonlinear_mode == "relu":
                return F.relu(O)
            else:
                return O

        def run_conv_bias(inp, w, b, format="NCHW"):
            b = b if has_bias else Parameter(np.zeros_like(b.numpy()))
            if format == "NCHW4":
                inp = convert_to_nchw4(inp)
                w = convert_to_nchw4(w)
                b = convert_to_nchw4(b)
            return F.quantized.conv_bias_activation(
                inp,
                w,
                b,
                stride=(SH, SW),
                padding=(PH, PW),
                dtype=out_dtype,
                nonlinear_mode=nonlinear_mode,
            )

        format = "NCHW4" if is_cuda_available() else "NCHW"

        expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
        expected = expected.astype(out_dtype).astype("float32")
        result = run_conv_bias(inp_int8, w_int8, b_int32,
                               format=format).astype("float32")
        if format == "NCHW4":
            result = F.transpose(result, (0, 1, 4, 2, 3))
        expected = F.flatten(expected)
        result = F.flatten(result)
        np.testing.assert_allclose(result.numpy(),
                                   expected.numpy(),
                                   atol=outp_scale)
Exemple #8
0
 def forward(self, inps):
     pattern = [int(i) for i in self.param["perm"]]
     return F.transpose(inps[0], pattern)
Exemple #9
0
     [(1000, 1000)],
     True,
     1000,
 ),
 (
     "tile",
     lambda x: MF.tile(x, (2, ) * len(x.shape)),
     lambda x: torch.tile(x, (2, ) * len(x.shape)),
     [(100, 100)],
     [(64, 512, 16, 16)],
     True,
     1000,
 ),
 (
     "transpose",
     lambda x: MF.transpose(x,
                            list(range(len(x.shape)))[::-1]),
     lambda x: torch.permute(x,
                             list(range(len(x.shape)))[::-1]),
     [(100, 100)],
     [(64, 512, 16, 16)],
     True,
     1000,
 ),
 (
     "where",
     lambda x: MF.where(x > 0.5, x, x),
     lambda x: torch.where(x > 0.5, x, x),
     [(100, 100)],
     [(64, 512, 16, 16)],
     True,
     1000,
Exemple #10
0
 def forward(self, x):
     return F.transpose(x, self.perm)