Example #1
0
    def active_rebuild(self, conv: L.Convolution2D):
        mask = conv.W.array.sum(axis=(1, 2, 3)) != 0
        self.logger.debug(log_shape(conv.W.array, mask))
        conv.W.array = conv.W.array[mask].copy()

        if conv.b is not None:
            self.logger.debug(log_shape(conv.b.array, mask))
            conv.b.array = conv.b.array[mask].copy()

        return mask
Example #2
0
    def passive_rebuild(self, linear, mask):
        # conv-linearだと影響を受ける

        self.logger.debug(log_shape(linear.weight.data, mask))

        input_shape = self.node.input_shape[0]
        if len(input_shape) == 4 and input_shape[1] == len(mask):
            # prev node is conv: conv-fc
            n_out, n_in = linear.weight.shape
            w = linear.weight.data.clone().reshape(n_out, *input_shape[1:])
            w = w[:, mask, :, :]
            w = w.reshape(n_out, -1)
        else:
            # conv-gap-fc, conv-view(flatten)-fc
            n_out, n_in = linear.weight.shape
            pixels_per_channel = input_shape[1] // len(mask)

            assert mask.dim() == 1

            # TODO(tkat0) refactor
            # convert channel mask to pixel-level mask
            flatten_mask = torch.zeros((n_in, ),
                                       dtype=torch.uint8,
                                       device=mask.device,
                                       requires_grad=False)
            for i, m in enumerate(mask):
                m = int(m)
                if m == 1:
                    begin, end = i * pixels_per_channel, (
                        i + 1) * pixels_per_channel
                    flatten_mask[begin:end] = m

            w = linear.weight.data[:, flatten_mask].clone()

        linear.weight.data = w
Example #3
0
 def passive_rebuild(self, conv, mask):
     # 出力チャネルに影響は無いので、biasは変化なし
     if conv.groups > 1:
         # Depthwise Convolution
         conv.weight.data = conv.weight.data[mask, :, :, :].clone()
         self.logger.debug(log_shape(conv.weight.data, mask))
         in_channels = int(conv.weight.shape[0])
         conv.groups = in_channels
         out_channels = conv.groups * int(conv.weight.shape[1])
         conv.in_channels = in_channels
         conv.out_channels = out_channels
     else:
         conv.weight.data = conv.weight.data[:, mask, :, :].clone(
         )  # oc, ic, kh, kwのうちicをrebuild
         self.logger.debug(log_shape(conv.weight.data, mask))
         in_channels = int(conv.weight.shape[1])
         conv.in_channels = in_channels
Example #4
0
    def active_rebuild(self, conv: nn.Conv2d):
        assert conv.groups == 1, 'Group Convolution is not supported.'

        # mask_model is not None => conv-bnのbnを用いてconvをpruningする例外
        # ocに関する全kernelの総和が0のチャネルは除外し、それ以外を残す
        mask = conv.weight.data.sum(dim=(1, 2, 3)) != 0
        self.logger.debug(log_shape(conv.weight.data, mask))
        conv.weight.data = conv.weight.data[mask].clone()

        if conv.bias is not None:
            self.logger.debug(log_shape(conv.bias.data, mask))
            conv.bias.data = conv.bias.data[mask].clone()

        out_channels = int(conv.weight.shape[0])
        conv.out_channels = out_channels

        return mask
Example #5
0
    def _rebuild(self, bn, mask):
        self.logger.debug(log_shape(bn.gamma.array, mask))
        bn.gamma.array = bn.gamma.array[mask].copy()

        w = bn.beta
        if w is not None:
            bn.beta.array = w.array[mask].copy()

        bn.avg_var = bn.avg_var[mask].copy()
        bn.avg_mean = bn.avg_mean[mask].copy()

        return mask
Example #6
0
    def _rebuild(self, bn, mask):
        self.logger.debug(log_shape(bn.weight.data, mask))
        bn.weight.data = bn.weight.data[mask].clone()

        w = bn.bias
        if w is not None:
            bn.bias.data = w.data[mask].clone()

        bn.running_mean = bn.running_mean[mask].clone()
        bn.running_var = bn.running_var[mask].clone()

        bn.num_features = len(bn.weight)

        return mask
Example #7
0
    def passive_rebuild(self, linear, mask):

        self.logger.debug(log_shape(linear.W.array, mask))

        input_shape = self.node.input_shape[0]
        if len(input_shape) == 4 and input_shape[1] == len(mask):
            # conv-fc
            n_out, n_in = linear.W.shape
            w = linear.W.array.copy().reshape(n_out, *input_shape[1:])
            w = w[:, mask, :, :]
            w = w.reshape(n_out, -1)
        else:
            # conv-gap-fc
            w = linear.W.array[:, mask].copy()

        linear.W.array = w
Example #8
0
 def passive_rebuild(self, conv: L.DepthwiseConvolution2D, mask):
     self.logger.debug(log_shape(conv.W.array, mask))
     conv.W.array = conv.W.array[:, mask].copy()
     if conv.b is not None:
         conv.b.array = conv.b.array[mask].copy()
Example #9
0
 def passive_rebuild(self, conv, mask):
     # 出力チャネルに影響は無いので、biasは変化なし
     conv.W.array = conv.W.array[:, mask, :, :].copy(
     )  # oc, ic, kh, kwのうちicをrebuild
     self.logger.debug(log_shape(conv.W.array, mask))