Ejemplo n.º 1
0
    def forward(self, z, is_training=True, test_local_stats=True):
        batch_norm_args = {'track_running_stats': test_local_stats}

        method = self._method
        # Cycle over the encoder shapes backwards, to build a symmetrical decoder.
        enc_conv_shapes = self._enc_conv_shapes[::-1]
        strides = self._dec_up_strides
        # We store the heights and widths of the encoder feature maps that are
        # unique, i.e., the ones right after a layer with stride != 1. These will be
        # used as a target to potentially crop the upsampled feature maps.
        unique_hw = np.unique([(el[1], el[2]) for el in enc_conv_shapes],
                              axis=0)
        unique_hw = unique_hw.tolist()[::-1]
        unique_hw.pop()  # Drop the initial shape

        layers = [z]

        upsample_mlp_flat = self.dec_mlp(z)
        if self._use_bn:
            upsample_mlp_flat = nn.BatchNorm1d(
                upsample_mlp_flat.shape[1],
                **batch_norm_args)(upsample_mlp_flat)
        layers.append(upsample_mlp_flat)
        upsample = upsample_mlp_flat.view(enc_conv_shapes[0])
        layers.append(upsample)

        for i, (conv_layer,
                stride_i) in enumerate(zip(self._conv_layers, strides), 1):
            if method != 'deconv' and stride_i > 1:
                upsample = Image.resize(
                    upsample, [stride_i * el for el in upsample.shape[1:3]],
                    method=method)
            padding = get_same_padding(self._kernel_size, upsample.shape,
                                       stride_i)
            upsample_padded = F.pad(upsample, padding, "constant", 0)
            upsample = conv_layer(upsample_padded)
            upsample = self._activation(upsample)
            if self._use_bn:
                upsample = nn.BatchNorm2d(upsample.shape[1],
                                          **batch_norm_args)(upsample)
            if stride_i > 1:
                hw = unique_hw.pop()
                upsample = crop_layer(upsample, hw)
            layers.append(upsample)

        # Final layer, no upsampling.
        padding = get_same_padding(self._kernel_size, upsample.shape, 1)
        upsample_padded = F.pad(upsample, padding, "constant", 0)
        x_logits = self.logits_layer(upsample_padded)
        if self._use_bn:
            x_logits = nn.BatchNorm2d(x_logits.shape[1],
                                      **batch_norm_args)(x_logits)
        layers.append(x_logits)

        logging.info('%s upsampling module layer shapes', self._method_str)
        logging.info('\n'.join([str(v.shape) for v in layers]))

        return x_logits
Ejemplo n.º 2
0
def make_model(pool_op, shape, pool_size, strides, padding, dtype, scale,
               zero_point, relu_type):
    """Return a model and any parameters it may have"""
    op = relay.var("input", shape=shape, dtype=dtype)
    pad_ = (0, 0, 0, 0)
    if padding == "SAME":
        dilation = (1, 1)
        pad_ = get_same_padding((shape[1], shape[2]), pool_size, dilation,
                                strides)
        op = relay.nn.pad(
            op,
            pad_width=[(0, 0), (pad_[0], pad_[2]), (pad_[1], pad_[3]), (0, 0)],
            pad_value=zero_point,
            pad_mode="constant",
        )
    if pool_op == relay.nn.avg_pool2d:
        op = relay.cast(op, "int32")
    op = pool_op(op,
                 pool_size=pool_size,
                 strides=strides,
                 padding=pad_,
                 ceil_mode=True,
                 layout="NHWC")
    if pool_op == relay.nn.avg_pool2d:
        op = relay.cast(op, dtype)
    op = make_qnn_relu(op, relu_type, scale, zero_point, dtype)
    return op
Ejemplo n.º 3
0
    def weight_op(self):
        padding = get_same_padding(self.kernel_size)
        if isinstance(padding, int):
            padding *= self.dilation
        else:
            padding[0] *= self.dilation
            padding[1] *= self.dilation

        weight_dict = OrderedDict()
        weight_dict['depth_conv'] = nn.Conv2d(self.in_channels,
                                              self.in_channels,
                                              kernel_size=self.kernel_size,
                                              stride=self.stride,
                                              padding=padding,
                                              dilation=self.dilation,
                                              groups=self.in_channels,
                                              bias=False)
        weight_dict['point_conv'] = nn.Conv2d(self.in_channels,
                                              self.out_channels,
                                              kernel_size=1,
                                              groups=self.groups,
                                              bias=self.bias)
        if self.has_shuffle and self.groups > 1:
            weight_dict['shuffle'] = ShuffleLayer(self.groups)
        return weight_dict
Ejemplo n.º 4
0
    def weight_op(self):
        padding = get_same_padding(self.kernel_size)
        if isinstance(padding, int):
            padding *= self.dilation
        else:
            padding[0] *= self.dilation
            padding[1] *= self.dilation

        weight_dict = OrderedDict()
        # weight_dict['conv'] = nn.Conv2d(
        #     self.in_channels, self.out_channels, kernel_size=self.kernel_size, stride=self.stride, padding=padding,
        #     dilation=self.dilation, groups=self.groups, bias=self.bias
        # )
        weight_dict['conv'] = LsqConv(self.in_channels,
                                      self.out_channels,
                                      kernel_size=self.kernel_size,
                                      quan_name_w='lsq',
                                      quan_name_a='lsq',
                                      nbit_w=self.nbit_w,
                                      nbit_a=self.nbit_a,
                                      stride=self.stride,
                                      padding=padding,
                                      dilation=self.dilation,
                                      groups=self.groups,
                                      bias=self.bias)
        if self.has_shuffle and self.groups > 1:
            weight_dict['shuffle'] = ShuffleLayer(self.groups)

        return weight_dict
Ejemplo n.º 5
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 pool_type,
                 kernel_size=2,
                 stride=2,
                 use_bn=False,
                 act_func=None,
                 dropout_rate=0,
                 ops_order='weight_bn_act'):
        super(PoolingLayer, self).__init__(in_channels, out_channels, use_bn,
                                           act_func, dropout_rate, ops_order)

        self.pool_type = pool_type
        self.kernel_size = kernel_size
        self.stride = stride

        if self.stride == 1:
            # same padding if `stride == 1`
            padding = get_same_padding(self.kernel_size)
        else:
            padding = 0

        if self.pool_type == 'avg':
            self.pool = nn.AvgPool2d(self.kernel_size,
                                     stride=self.stride,
                                     padding=padding,
                                     count_include_pad=False)
        elif self.pool_type == 'max':
            self.pool = nn.MaxPool2d(self.kernel_size,
                                     stride=self.stride,
                                     padding=padding)
        else:
            raise NotImplementedError
Ejemplo n.º 6
0
    def forward(self, X):
        self.N, C, X_n, _ = X.shape
        self.X_shape = X.shape

        if self.same_padding:
            self.padding = utils.get_same_padding(X_n, self.kernel_size,
                                                  self.stride)
        else:
            self.padding = 0

        _, self.new_X_n, Xcol = standfordutils.im2col(X, self.kernel_size,
                                                      self.kernel_size,
                                                      self.padding,
                                                      self.stride)

        self.S = gates.ConvMultGate()
        self.Z = gates.ConvAddGate()
        S_out = self.S.forward(self.W, Xcol)
        Z_out = self.Z.forward(S_out, self.B)
        Z_out = Z_out.reshape(self.N, self.n_kernels, self.new_X_n,
                              self.new_X_n)

        if self.logging:
            print("[+] CONVOLUTION_OUT:", Z_out.shape)

        return Z_out
Ejemplo n.º 7
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size=3,
                 stride=1,
                 expand_ratio=6,
                 *args,
                 **kwargs):
        super(MBInvertedConvLayer, self).__init__()

        self.in_channels = in_channels
        self.out_channels = out_channels

        self.kernel_size = kernel_size
        self.stride = stride
        self.expand_ratio = expand_ratio

        if self.expand_ratio > 1:
            feature_dim = round(in_channels * self.expand_ratio)
            self.inverted_bottleneck = nn.Sequential(
                OrderedDict({
                    'conv':
                    nn.Conv2d(in_channels, feature_dim, 1, 1, 0, bias=False),
                    'bn':
                    nn.BatchNorm2d(feature_dim),
                    'relu':
                    nn.ReLU6(inplace=True),
                }))
        else:
            feature_dim = in_channels
            self.inverted_bottleneck = None

        # depthwise convolution
        pad = get_same_padding(self.kernel_size)
        self.depth_conv = nn.Sequential(
            OrderedDict({
                'conv':
                nn.Conv2d(feature_dim,
                          feature_dim,
                          kernel_size,
                          stride,
                          pad,
                          groups=feature_dim,
                          bias=False),
                'bn':
                nn.BatchNorm2d(feature_dim),
                'relu':
                nn.ReLU6(inplace=True),
            }))

        # pointwise linear
        self.point_linear = OrderedDict({
            'conv':
            nn.Conv2d(feature_dim, out_channels, 1, 1, 0, bias=False),
            'bn':
            nn.BatchNorm2d(out_channels),
        })

        self.point_linear = nn.Sequential(self.point_linear)
Ejemplo n.º 8
0
 def export(self) -> nn.Module:
     module = getattr(nn, "Conv{}d".format(self._ndim))(
         self.active_in_channels,
         self.active_in_channels,
         self.kernel_size[0],
         stride=self.stride,
         padding=get_same_padding(self.kernel_size[0]) * self.dilation[0],
         dilation=self.dilation,
         groups=self.active_in_channels,
         bias=self.bias is not None,
         padding_mode=self.padding_mode,
     )
     module.load_state_dict(self.active_state_dict())
     return module
Ejemplo n.º 9
0
 def forward(self, x: torch.Tensor) -> torch.Tensor:
     self.active_in_channels = x.shape[1]
     if self.padding_mode != "zeros":
         raise NotImplementedError
     else:
         active_weight = self.active_weight
         return getattr(F, "conv{}d".format(self._ndim))(
             x,
             active_weight,
             self.active_bias,
             stride=self.stride,
             padding=get_same_padding(int(active_weight.size(2))) * self.dilation[0],
             dilation=self.dilation,
             groups=self.active_in_channels,
         )
Ejemplo n.º 10
0
    def weight_op(self):
        if self.stride == 1:
            # same padding if `stride == 1`
            padding = get_same_padding(self.kernel_size)
        else:
            padding = 0

        weight_dict = OrderedDict()
        if self.pool_type == 'avg':
            weight_dict['pool'] = nn.AvgPool2d(
                self.kernel_size, stride=self.stride, padding=padding, count_include_pad=False
            )
        elif self.pool_type == 'max':
            weight_dict['pool'] = nn.MaxPool2d(self.kernel_size, stride=self.stride, padding=padding)
        else:
            raise NotImplementedError
        return weight_dict
Ejemplo n.º 11
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size=3,
                 stride=1,
                 dilation=1,
                 groups=1,
                 bias=False,
                 has_shuffle=False,
                 use_bn=True,
                 act_func='relu',
                 dropout_rate=0,
                 ops_order='weight_bn_act',
                 *args,
                 **kwargs):
        super(DepthConvLayer, self).__init__(in_channels, out_channels, use_bn,
                                             act_func, dropout_rate, ops_order)

        self.kernel_size = kernel_size
        self.stride = stride
        self.dilation = dilation
        self.groups = groups
        self.bias = bias
        self.has_shuffle = has_shuffle

        padding = get_same_padding(self.kernel_size)
        if isinstance(padding, int):
            padding *= self.dilation
        else:
            padding[0] *= self.dilation
            padding[1] *= self.dilation
        # `kernel_size`, `stride`, `padding`, `dilation` can either be `int` or `tuple` of int
        self.depth_conv = nn.Conv2d(in_channels,
                                    in_channels,
                                    kernel_size=self.kernel_size,
                                    stride=self.stride,
                                    padding=padding,
                                    dilation=self.dilation,
                                    groups=in_channels,
                                    bias=False)
        self.point_conv = nn.Conv2d(in_channels,
                                    out_channels,
                                    kernel_size=1,
                                    groups=self.groups,
                                    bias=self.bias)
Ejemplo n.º 12
0
    def __init__(self, in_channels, out_channels,
                 kernel_size=3, stride=1, expand_ratio=6, mid_channels=None, act_func='relu6', use_se=False):
        super(MBInvertedConvLayer, self).__init__()

        self.in_channels = in_channels
        self.out_channels = out_channels

        self.kernel_size = kernel_size
        self.stride = stride
        self.expand_ratio = expand_ratio
        self.mid_channels = mid_channels
        self.act_func = act_func
        self.use_se = use_se

        if self.mid_channels is None:
            feature_dim = round(self.in_channels * self.expand_ratio)
        else:
            feature_dim = self.mid_channels

        if self.expand_ratio == 1:
            self.inverted_bottleneck = None
        else:
            self.inverted_bottleneck = nn.Sequential(OrderedDict([
                ('conv', nn.Conv2d(self.in_channels, feature_dim, 1, 1, 0, bias=False)),
                ('bn', nn.BatchNorm2d(feature_dim)),
                ('act', build_activation(self.act_func, inplace=True)),
            ]))

        pad = get_same_padding(self.kernel_size)
        depth_conv_modules = [
            ('conv', nn.Conv2d(feature_dim, feature_dim, kernel_size, stride, pad, groups=feature_dim, bias=False)),
            ('bn', nn.BatchNorm2d(feature_dim)),
            ('act', build_activation(self.act_func, inplace=True))
        ]
        if self.use_se:
            depth_conv_modules.append(('se', SEModule(feature_dim)))
        self.depth_conv = nn.Sequential(OrderedDict(depth_conv_modules))

        self.point_linear = nn.Sequential(OrderedDict([
            ('conv', nn.Conv2d(feature_dim, out_channels, 1, 1, 0, bias=False)),
            ('bn', nn.BatchNorm2d(out_channels)),
        ]))
Ejemplo n.º 13
0
def make_model(
    pool_op,
    shape=(1, 28, 28, 12),
    pool_size=(3, 3),
    strides=(2, 2),
    padding="VALID",
    dtype="int8",
    scale=1,
    zero_point=-33,
    relu_type="RELU",
    layout="NHWC",
):
    """Return a model and any parameters it may have, all parameters are defaulted to known good values"""
    op = relay.var("input", shape=shape, dtype=dtype)
    pad_ = (0, 0, 0, 0)
    if padding == "SAME":
        dilation = (1, 1)
        pad_ = get_same_padding((shape[1], shape[2]), pool_size, dilation,
                                strides)
        op = relay.nn.pad(
            op,
            pad_width=[(0, 0), (pad_[0], pad_[2]), (pad_[1], pad_[3]), (0, 0)],
            pad_value=zero_point,
            pad_mode="constant",
        )
    if pool_op == relay.nn.avg_pool2d:
        op = relay.cast(op, "int32")
    op = pool_op(op,
                 pool_size=pool_size,
                 strides=strides,
                 padding=pad_,
                 ceil_mode=True,
                 layout=layout)
    if pool_op == relay.nn.avg_pool2d:
        op = relay.cast(op, dtype)
    op = make_qnn_relu(op, relu_type, scale, zero_point, dtype)
    return op
Ejemplo n.º 14
0
def make_model(
    shape,
    kernel_shape,
    input_zero_point,
    input_scale,
    kernel_zero_point,
    kernel_scale,
    output_zero_point,
    output_scale,
    padding,
    strides,
    dilation,
    groups,
    dtype,
    kernel_dtype,
    out_channels,
    weight_format,
    enable_bias,
    relu_type,
):
    """Return a model and any parameters it may have"""
    h_index = weight_format.index("H")
    w_index = weight_format.index("W")
    kernel_h = kernel_shape[h_index]
    kernel_w = kernel_shape[w_index]
    a = relay.var("input", shape=shape, dtype=dtype)
    p = (0, 0, 0, 0)
    if padding == "SAME":
        p = get_same_padding((shape[1], shape[2]), (kernel_h, kernel_w),
                             dilation, strides)
        a = relay.nn.pad(
            a,
            pad_width=[(0, 0), (p[0], p[2]), (p[1], p[3]), (0, 0)],
            pad_value=input_zero_point,
            pad_mode="constant",
        )
        shape = (shape[0], shape[1] + p[0] + p[2], shape[2] + p[1] + p[3],
                 shape[3])

    weight_shape = (kernel_h, kernel_w, shape[3] // groups, out_channels)
    rng = np.random.default_rng(12321)
    w = tvm.nd.array(
        rng.integers(
            np.iinfo(kernel_dtype).min,
            high=np.iinfo(kernel_dtype).max,
            size=weight_shape,
            dtype=kernel_dtype,
        ))
    weight_const = relay.const(w, kernel_dtype)
    conv = relay.qnn.op.conv2d(
        a,
        weight_const,
        input_zero_point=relay.const(input_zero_point, "int32"),
        kernel_zero_point=relay.const(kernel_zero_point, "int32"),
        input_scale=relay.const(input_scale, "float32"),
        kernel_scale=relay.const(kernel_scale, "float32"),
        kernel_size=(kernel_h, kernel_w),
        data_layout="NHWC",
        kernel_layout=weight_format,
        dilation=dilation,
        strides=strides,
        groups=groups,
        channels=out_channels,
        padding=p,
        out_dtype="int32",
    )
    b = tvm.nd.array(
        rng.integers(0, high=10, size=(out_channels, ), dtype="int32"))
    bias_const = relay.const(b, "int32")
    last_op = relay.nn.bias_add(conv, bias_const,
                                axis=3) if enable_bias else conv
    requant_input_sc = [sc * input_scale for sc in kernel_scale]
    last_op = relay.qnn.op.requantize(
        last_op,
        relay.const(requant_input_sc, "float32"),
        relay.const(0, "int32"),
        relay.const(output_scale, "float32"),
        relay.const(output_zero_point, "int32"),
        out_dtype=dtype,
    )
    last_op = make_qnn_relu(last_op, relu_type, output_scale,
                            output_zero_point, dtype)
    params = {"w": w, "b": b}
    return last_op, params