Ejemplo n.º 1
0
    def forward(self, x):
        if x.numel() == 0:
            output_shape = [
                (i + 2 * p - (di * (k - 1) + 1)) // s + 1
                for i, p, di, k, s in zip(x.shape[-2:], self.padding,
                                          self.dilation, (3, 3), self.stride)
            ]
            output_shape = [x.shape[0], self.weight.shape[0]
                            ] + output_shape + [4]
            return _NewEmptyTensorOp.apply(x, output_shape)

        x_1 = spatial_conv(x, self.weight, self.kernel_type, self.num_kernel,
                           0, self.stride, self.padding, self.dilation).sum(0)
        x_2 = spatial_conv(x, self.weight, self.kernel_type, self.num_kernel,
                           2, self.stride, self.padding, self.dilation).sum(0)
        x_3 = spatial_conv(x, self.weight, self.kernel_type, self.num_kernel,
                           4, self.stride, self.padding, self.dilation).sum(0)
        x_4 = spatial_conv(x, self.weight, self.kernel_type, self.num_kernel,
                           6, self.stride, self.padding, self.dilation).sum(0)

        if self.norm != None:
            x_1 = self.norm(x_1)
            x_2 = self.norm(x_2)
            x_3 = self.norm(x_3)
            x_4 = self.norm(x_4)

        x_out = torch.stack([x_1, x_2, x_3, x_4], dim=4)

        if self.activation is not None:
            x_out = self.activation(x_out)

        return x_out
Ejemplo n.º 2
0
    def forward(self, x, grid_size):
        if x.numel() == 0:
            # When input is empty, we want to return a empty tensor with "correct" shape,
            # So that the following operations will not panic
            # if they check for the shape of the tensor.
            # This computes the height and width of the output tensor
            output_shape = [x.shape[0], self.weight.shape[0], grid_size[0], grid_size[1]]
            return _NewEmptyTensorOp.apply(x, output_shape)

        if not self.offset_std is None:
            offset = (self.offset_std * torch.randn([x.shape[0], 2 * x.shape[1], grid_size[1], grid_size[0]], device=x.device)).clamp(min=-0.5, max=0.5)
        else:
            offset = torch.zeros([x.shape[0], 2 * x.shape[1], grid_size[1], grid_size[0]], device=x.device)

        x = continuous_conv(
            x,
            self.weight,
            offset,
            grid_size,
            self.shift,
            self.dilation,
            self.groups,
        )
        if self.norm is not None:
            x = self.norm(x)
        if self.activation is not None:
            x = self.activation(x)
        return x
Ejemplo n.º 3
0
    def forward(self, x, step=0):
        if x.numel() == 0:
            # When input is empty, we want to return a empty tensor with "correct" shape,
            # So that the following operations will not panic
            # if they check for the shape of the tensor.
            # This computes the height and width of the output tensor
            output_shape = [
                (i + 2 * p - (di * (k - 1) + 1)) // s + 1
                for i, p, di, k, s in zip(x.shape[-2:], self.padding,
                                          self.dilation, (3, 3), self.stride)
            ]
            out_channels = self.weight.shape[0]
            if self.spatial:
                out_channels = out_channels * self.num_kernel
            output_shape = [x.shape[0], out_channels] + output_shape
            return _NewEmptyTensorOp.apply(x, output_shape)

        x = spatial_conv(x, self.weight, self.kernel_type, self.num_kernel,
                         step, self.stride, self.padding, self.dilation)
        if self.spatial:
            out = []
            for idx in range(x.size(0)):
                out.append(x[idx, :, :, :, :])
            x = torch.cat(out, dim=1)
        else:
            x = torch.sum(x, dim=0)

        if self.norm is not None:
            x = self.norm(x)
        if self.activation is not None:
            x = self.activation(x)
        return x
Ejemplo n.º 4
0
    def forward(self, x):
        if x.numel() == 0:
            # When input is empty, we want to return a empty tensor with "correct" shape,
            # So that the following operations will not panic
            # if they check for the shape of the tensor.
            # This computes the height and width of the output tensor
            output_shape = [(i + 2 * p - (di * (k - 1) + 1)) // s + 1
                            for i, p, di, k, s in
                            zip(x.shape[-2:], self.padding, self.dilation,
                                self.kernel_size, self.stride)]
            output_shape = [x.shape[0], self.weight.shape[0]] + output_shape
            empty = _NewEmptyTensorOp.apply(x, output_shape)
            if self.training:
                # https://github.com/pytorch/pytorch/issues/12013
                assert not isinstance(
                    self.norm, torch.nn.SyncBatchNorm
                ), "SyncBatchNorm does not support empty inputs!"

                # This is to make DDP happy.
                # DDP expects all workers to have gradient w.r.t the same set of parameters.
                _dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0
                return empty + _dummy
            else:
                return empty

        x = super().forward(x)
        if self.norm is not None:
            x = self.norm(x)
        if self.activation is not None:
            x = self.activation(x)
        return x
Ejemplo n.º 5
0
def _empty_tensor(self, x, output_shape):
    empty = _NewEmptyTensorOp.apply(x, output_shape)
    if self.training:
        # This is to make DDP happy.
        # DDP expects all workers to have gradient w.r.t the same set of parameters.
        _dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0
        return empty + _dummy
    else:
        return empty
Ejemplo n.º 6
0
    def forward(self, x_tensor):
        # x_tensor is zero, return empty
        if x_tensor.numel() == 0:
            output_shape = [
                (i + 2 * p - (di * (k - 1) + 1)) // s + 1
                for i, p, di, k, s in zip(x_tensor.shape[-2:], self.padding,
                                          self.dilation, (3, 3), self.stride)
            ]
            output_shape = [
                x_tensor.shape[0], self.weight.shape[0], self.kernel_rot
            ] + output_shape
            return _NewEmptyTensorOp.apply(x_tensor, output_shape)

        step = int(8 / self.kernel_rot)

        if not self.rot_1x1_in and not self.is_first:
            x_tensor = torch.cat(
                [x_tensor[:, :, idx, :, :] for idx in range(self.kernel_rot)],
                dim=1)

        rot_out = []
        for rot in range(self.kernel_rot):
            if self.is_first:
                x_rot = x_tensor
                weight = self.weight
            elif self.rot_1x1_in:
                x_rot = x_tensor[:, :, rot, :, :]
                weight = self.weight
            else:
                x_rot = x_tensor
                weight = torch.cat([
                    self.weight[:, :,
                                int((idx - rot) % self.kernel_rot), :]
                    for idx in range(self.kernel_rot)
                ],
                                   dim=1)

            x_rot = pr_conv(x_rot, weight, self.kernel_type, self.num_kernel,
                            step * rot, self.stride, self.padding,
                            self.dilation)

            # batch norm is same for all rotation.
            if self.norm != None:
                x_rot = self.norm(x_rot)

            rot_out.append(x_rot)

        x_out = torch.stack(rot_out, dim=2)

        if self.activation is not None:
            x_out = self.activation(x_out)
        return x_out
Ejemplo n.º 7
0
 def forward(self, x):
     if x.numel() > 0:
         return super(MaxPool2d, self).forward(x)
     # get output shape
     output_shape = [
         (i + 2 * p - (di * (k - 1) + 1)) // s + 1
         for i, p, di, k, s in zip(x.shape[-2:], self.padding, self.
                                   dilation, self.kernel_size, self.stride)
     ]
     output_shape = [x.shape[0], x.shape[1]] + output_shape
     # This is to make DDP happy.
     # DDP expects all workers to have gradient w.r.t the same set of parameters.
     _dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0
     return _NewEmptyTensorOp.apply(x, output_shape) + _dummy
Ejemplo n.º 8
0
    def forward(self, x, offset, grid_size, shift=0):
        if x.numel() == 0:
            # When input is empty, we want to return a empty tensor with "correct" shape,
            # So that the following operations will not panic
            # if they check for the shape of the tensor.
            # This computes the height and width of the output tensor
            output_shape = [x.shape[0], x.shape[1], grid_size[1], grid_size[0]]
            return _NewEmptyTensorOp.apply(x, output_shape)

        x = defem_layer(x, offset, grid_size, _pair(shift))
        if self.norm is not None:
            x = self.norm(x)
        if self.activation is not None:
            x = self.activation(x)
        return x
Ejemplo n.º 9
0
    def forward(self, x_tensor):
        if x_tensor.numel() == 0:
            output_shape = [
                (i + 2 * p - (di * (k - 1) + 1)) // s + 1
                for i, p, di, k, s in zip(x_tensor.shape[-2:], self.padding,
                                          self.dilation, (3, 3), self.stride)
            ]
            output_shape = [x_tensor.shape[0], self.weight.shape[0]
                            ] + output_shape + [4]
            return _NewEmptyTensorOp.apply(x_tensor, output_shape)

        if self.with_1x1:
            x_1 = x_tensor[:, :, :, :, 0]
            x_2 = x_tensor[:, :, :, :, 1]
            x_3 = x_tensor[:, :, :, :, 2]
            x_4 = x_tensor[:, :, :, :, 3]
        else:
            x_in = [
                x_tensor[:, :, :, :, 0], x_tensor[:, :, :, :, 1],
                x_tensor[:, :, :, :, 2], x_tensor[:, :, :, :, 3]
            ]
            x_1 = torch.cat([x_in[0], x_in[1], x_in[2], x_in[3]], dim=1)
            x_2 = torch.cat([x_in[1], x_in[2], x_in[3], x_in[0]], dim=1)
            x_3 = torch.cat([x_in[2], x_in[3], x_in[0], x_in[1]], dim=1)
            x_4 = torch.cat([x_in[3], x_in[0], x_in[1], x_in[2]], dim=1)

        x_1 = spatial_conv(x_1, self.weight, self.kernel_type, self.num_kernel,
                           0, self.stride, self.padding, self.dilation).sum(0)
        x_2 = spatial_conv(x_2, self.weight, self.kernel_type, self.num_kernel,
                           2, self.stride, self.padding, self.dilation).sum(0)
        x_3 = spatial_conv(x_3, self.weight, self.kernel_type, self.num_kernel,
                           4, self.stride, self.padding, self.dilation).sum(0)
        x_4 = spatial_conv(x_4, self.weight, self.kernel_type, self.num_kernel,
                           6, self.stride, self.padding, self.dilation).sum(0)

        if self.norm != None:
            x_1 = self.norm(x_1)
            x_2 = self.norm(x_2)
            x_3 = self.norm(x_3)
            x_4 = self.norm(x_4)

        x_out = torch.stack([x_1, x_2, x_3, x_4], dim=4)

        if self.activation is not None:
            x_out = self.activation(x_out)

        return x_out
Ejemplo n.º 10
0
    def forward(self, x):
        if x.numel() == 0:
            # When input is empty, we want to return a empty tensor with "correct" shape,
            # So that the following operations will not panic
            # if they check for the shape of the tensor.
            # This computes the height and width of the output tensor
            output_shape = [x.shape[0], self.weight.shape[0], x.shape[2], x.shape[3]]
            return _NewEmptyTensorOp.apply(x, output_shape)

        x = skeleton_conv(
            x,
            self.weight,
            self.dilation,
            self.step
        )
        if self.norm is not None:
            x = self.norm(x)
        if self.activation is not None:
            x = self.activation(x)
        return x
 def forward(self, x):
     if x.numel() > 0:
         if not self.with_modulated_dcn:
             offset = self.offset(x)
             x = self.conv(x, offset)
         else:
             offset_mask = self.offset(x)
             offset = offset_mask[:, :18, :, :]
             mask = offset_mask[:, -9:, :, :].sigmoid()
             x = self.conv(x, offset, mask)
         return x
     # get output shape
     output_shape = [
         (i + 2 * p - (di * (k - 1) + 1)) // d + 1
         for i, p, di, k, d in zip(x.shape[-2:], self.padding, self.
                                   dilation, self.kernel_size, self.stride)
     ]
     from detectron2.layers.wrappers import _NewEmptyTensorOp
     output_shape = [x.shape[0], self.conv.weight.shape[0]] + output_shape
     return _NewEmptyTensorOp.apply(x, output_shape)
Ejemplo n.º 12
0
    def forward(self, x):
        if x.numel() == 0:
            # When input is empty, we want to return a empty tensor with "correct" shape,
            # So that the following operations will not panic
            # if they check for the shape of the tensor.
            # This computes the height and width of the output tensor
            output_shape = [x.shape[0], self.weight.shape[0], x.shape[2], x.shape[3], 8]
            return _NewEmptyTensorOp.apply(x, output_shape)

        out = []
        for idx in range(8):
            x_tmp = skeleton_conv(x, self.weight, self.dilation, idx)
            if self.norm is not None:
                x_tmp = self.norm(x_tmp)
            out.append(x_tmp)

        x_out = torch.stack(out, dim=2)
        if self.activation is not None:
            x_out = self.activation(x_out)

        return x_out
Ejemplo n.º 13
0
    def forward(self, inputs):
        num_branch = self.num_branch if self.training or self.test_branch_idx == -1 else 1
        assert len(inputs) == num_branch

        if inputs[0].numel() == 0:
            output_shape = [(i + 2 * p - (di * (k - 1) + 1)) // s + 1
                            for i, p, di, k, s in
                            zip(inputs[0].shape[-2:], self.padding,
                                self.dilation, self.kernel_size, self.stride)]
            output_shape = [input[0].shape[0], self.weight.shape[0]
                            ] + output_shape
            return [
                _NewEmptyTensorOp.apply(input, output_shape)
                for input in inputs
            ]

        if self.training or self.test_branch_idx == -1:
            outputs = [
                F.conv2d(input, self.weight, self.bias, self.stride, padding,
                         dilation, self.groups) for input, dilation, padding in
                zip(inputs, self.dilations, self.paddings)
            ]
        else:
            outputs = [
                F.conv2d(
                    inputs[0],
                    self.weight,
                    self.bias,
                    self.stride,
                    self.paddings[self.test_branch_idx],
                    self.dilations[self.test_branch_idx],
                    self.groups,
                )
            ]

        if self.norm is not None:
            outputs = [self.norm(x) for x in outputs]
        if self.activation is not None:
            outputs = [self.activation(x) for x in outputs]
        return outputs
Ejemplo n.º 14
0
    def forward(self, inputs):
        num_branch = self.num_branch if self.training or self.test_branch_idx == -1 else 1
        assert len(inputs) == num_branch

        if inputs[0].numel() == 0:
            output_shape = [(i + 2 * p - (di * (k - 1) + 1)) // s + 1
                            for i, p, di, k, s in
                            zip(inputs[0].shape[-2:], self.padding,
                                self.dilation, self.kernel_size, self.stride)]
            output_shape = [inputs[0].shape[0], self.weight.shape[0]
                            ] + output_shape
            return [
                _NewEmptyTensorOp.apply(input, output_shape)
                for input in inputs
            ]

        if self.training or self.test_branch_idx == -1:
            outputs = [
                self.gconv(input,
                           weight=self.weight,
                           bias=self.bias,
                           stride=self.stride,
                           padding=padding,
                           dilation=dilation,
                           bn=[self.norm],
                           act=self.activation) for input, dilation, padding in
                zip(inputs, self.dilations, self.paddings)
            ]
        else:
            outputs = [
                self.gconv(inputs[0],
                           weight=self.weight,
                           bias=self.bias,
                           stride=self.stride,
                           padding=self.paddings[self.test_branch_idx],
                           dilation=self.dilations[self.test_branch_idx],
                           bn=[self.norm],
                           act=self.activation)
            ]
        return outputs
Ejemplo n.º 15
0
    def forward(self, x):
        # x is zero, return empty
        if x.numel() == 0:
            output_shape = [
                (i + 2 * p - (di * (k - 1) + 1)) // s + 1
                for i, p, di, k, s in zip(x.shape[-2:], self.padding,
                                          self.dilation, (3, 3), self.stride)
            ]
            if self.spatial:
                output_shape = [
                    x.shape[0], self.weight.shape[0] * self.num_kernel
                ] + output_shape
            else:
                output_shape = [x.shape[0], self.weight.shape[0]
                                ] + output_shape
            return _NewEmptyTensorOp.apply(x, output_shape)

        x = spatial_conv(x, self.weight, self.kernel_type, self.num_kernel, 0,
                         self.stride, self.padding, self.dilation)

        # concat or sum.
        if self.spatial:
            spatial_out = []
            for idx in range(x.size(0)):
                spatial_out.append(x[idx, :, :, :, :])
            x = torch.cat(spatial_out, dim=1)
        else:
            x = torch.sum(x, dim=0)

        # batch norm is same for all rotation.
        if self.norm != None:
            x = self.norm(x)

        if self.activation is not None:
            x = self.activation(x)
        return x
Ejemplo n.º 16
0
    def forward(self, x_tensor, rot):
        # x_tensor is zero, return empty
        if x_tensor.numel() == 0:
            output_shape = [
                (i + 2 * p - (di * (k - 1) + 1)) // s + 1
                for i, p, di, k, s in zip(x_tensor.shape[-2:], self.padding,
                                          self.dilation, (3, 3), self.stride)
            ]
            output_shape = [x_tensor.shape[0], self.weight.shape[0]
                            ] + output_shape
            return _NewEmptyTensorOp.apply(x_tensor, output_shape)

        step = int(8 / self.kernel_rot)

        x_rot = pr_conv(x_tensor, self.weight, self.kernel_type,
                        self.num_kernel, step * rot, self.stride, self.padding,
                        self.dilation)

        # batch norm is same for all rotation.
        if self.norm != None:
            x_rot = self.norm(x_rot)
        if self.activation is not None:
            x_rot = self.activation(x_rot)
        return x_rot