def __init__(self, in_c, out_c, filter_size, stride, padding, dilation=1, num_groups=1, if_act=True, act=None): super(ConvBNLayer, self).__init__() self.if_act = if_act self.act = act self.conv = nn.Conv2D(in_channels=in_c, out_channels=out_c, kernel_size=filter_size, stride=stride, padding=padding, dilation=dilation, groups=num_groups, bias_attr=False) self.bn = layers.SyncBatchNorm( num_features=out_c, weight_attr=paddle.ParamAttr( regularizer=paddle.regularizer.L2Decay(0.0)), bias_attr=paddle.ParamAttr( regularizer=paddle.regularizer.L2Decay(0.0))) self._act_op = layers.Activation(act='hardswish')
def __init__( self, in_channels, out_channels, kernel_size, stride=1, dilation=1, groups=1, is_vd_mode=False, act=None, ): super(ConvBNLayer, self).__init__() self.is_vd_mode = is_vd_mode self._pool2d_avg = nn.AvgPool2D(kernel_size=2, stride=2, padding=0, ceil_mode=True) self._conv = nn.Conv2D(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=(kernel_size - 1) // 2 if dilation == 1 else 0, dilation=dilation, groups=groups, bias_attr=False) self._batch_norm = layers.SyncBatchNorm(out_channels) self._act_op = layers.Activation(act=act)
def __init__(self, out_channels, **kwargs): super().__init__() if 'data_format' in kwargs: data_format = kwargs['data_format'] else: data_format = 'NCHW' self._batch_norm = layers.SyncBatchNorm(out_channels, data_format=data_format) self._prelu = layers.Activation("prelu")
def __init__(self, in_channels, out_channels): super().__init__() self.conv = nn.Conv2D(in_channels, out_channels, kernel_size=3, stride=1, padding=1, dilation=1) self._relu = layers.Activation("relu") self._max_pool = nn.MaxPool2D(kernel_size=2, stride=2)
def __init__(self, in_dim, out_dim, expand): super().__init__() expand_dim = expand * in_dim self.conv = nn.Sequential( layers.ConvBNReLU(in_dim, in_dim, 3), layers.DepthwiseConvBN(in_dim, expand_dim, 3), layers.ConvBN(expand_dim, out_dim, 1)) self.relu = layers.Activation("relu")
def __init__(self, input_channels, output_channels, stride, filter, dilation=1, act=None, name=None): super(Seperate_Conv, self).__init__() self._conv1 = nn.Conv2D(in_channels=input_channels, out_channels=input_channels, kernel_size=filter, stride=stride, groups=input_channels, padding=(filter) // 2 * dilation, dilation=dilation, bias_attr=False) self._bn1 = layers.SyncBatchNorm(input_channels, epsilon=1e-3, momentum=0.99) self._act_op1 = layers.Activation(act=act) self._conv2 = nn.Conv2D(input_channels, output_channels, 1, stride=1, groups=1, padding=0, bias_attr=False) self._bn2 = layers.SyncBatchNorm(output_channels, epsilon=1e-3, momentum=0.99) self._act_op2 = layers.Activation(act=act)
def __init__(self, in_channels, out_channels, kernel_size, padding='same', **kwargs): super().__init__() self._conv = nn.Conv2D( in_channels, out_channels, kernel_size, padding=padding, **kwargs) if 'data_format' in kwargs: data_format = kwargs['data_format'] else: data_format = 'NCHW' self._batch_norm = SyncBatchNorm(out_channels, data_format=data_format) self._relu = layers.Activation("relu")
def __init__(self, num_channels, num_filters, has_se, stride=1, downsample=False, name=None, padding_same=True): super(BottleneckBlock, self).__init__() self.has_se = has_se self.downsample = downsample self.conv1 = layers.ConvBNReLU(in_channels=num_channels, out_channels=num_filters, kernel_size=1, bias_attr=False) self.conv2 = layers.ConvBNReLU( in_channels=num_filters, out_channels=num_filters, kernel_size=3, stride=stride, padding=1 if not padding_same else 'same', bias_attr=False) self.conv3 = layers.ConvBN(in_channels=num_filters, out_channels=num_filters * 4, kernel_size=1, bias_attr=False) if self.downsample: self.conv_down = layers.ConvBN(in_channels=num_channels, out_channels=num_filters * 4, kernel_size=1, bias_attr=False) if self.has_se: self.se = SELayer(num_channels=num_filters * 4, num_filters=num_filters * 4, reduction_ratio=16, name=name + '_fc') self.add = layers.Add() self.relu = layers.Activation("relu")
def __init__(self, in_channels, out_channels, stride, shortcut=True, if_first=False, dilation=1, data_format='NCHW'): super(BottleneckBlock, self).__init__() self.data_format = data_format self.conv0 = ConvBNLayer(in_channels=in_channels, out_channels=out_channels, kernel_size=1, act='relu', data_format=data_format) self.dilation = dilation self.conv1 = ConvBNLayer(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=stride, act='relu', dilation=dilation, data_format=data_format) self.conv2 = ConvBNLayer(in_channels=out_channels, out_channels=out_channels * 4, kernel_size=1, act=None, data_format=data_format) if not shortcut: self.short = ConvBNLayer( in_channels=in_channels, out_channels=out_channels * 4, kernel_size=1, stride=1, is_vd_mode=False if if_first or stride == 1 else True, data_format=data_format) self.shortcut = shortcut # NOTE: Use the wrap layer for quantization training self.add = layers.Add() self.relu = layers.Activation(act="relu")
def __init__(self, out_dim): super().__init__() self.db_branch_keep = nn.Sequential( layers.DepthwiseConvBN(out_dim, out_dim, 3), nn.Conv2D(out_dim, out_dim, 1)) self.db_branch_down = nn.Sequential( layers.ConvBN(out_dim, out_dim, 3, stride=2), nn.AvgPool2D(kernel_size=3, stride=2, padding=1)) self.sb_branch_keep = nn.Sequential( layers.DepthwiseConvBN(out_dim, out_dim, 3), nn.Conv2D(out_dim, out_dim, 1), layers.Activation(act='sigmoid')) self.sb_branch_up = layers.ConvBN(out_dim, out_dim, 3) self.conv = layers.ConvBN(out_dim, out_dim, 3)
def __init__(self, input_channels, output_channels, filter_size, stride=1, padding=0, act=None, name=None): super(ConvBNLayer, self).__init__() self._conv = nn.Conv2D(in_channels=input_channels, out_channels=output_channels, kernel_size=filter_size, stride=stride, padding=padding, bias_attr=False) self._bn = layers.SyncBatchNorm(num_features=output_channels, epsilon=1e-3, momentum=0.99) self._act_op = layers.Activation(act=act)
def __init__(self, inplanes, out_channels, dilation_series, padding_series, num_classes): super(edge_branch, self).__init__() self.conv_x1 = nn.Conv2D(inplanes[0], 512, kernel_size=3) self.conv_x4 = nn.Conv2D(inplanes[1], 512, kernel_size=3) self.conv0 = resnet_vd.ConvBNLayer(in_channels=512 * 2, out_channels=out_channels, kernel_size=3, act='relu') self.conv1 = resnet_vd.ConvBNLayer(in_channels=out_channels, out_channels=out_channels, kernel_size=3, act=None) self.add = layers.Add() self.relu = layers.Activation(act="relu") self.conv2d_list = nn.LayerList() for dilation, padding in zip(dilation_series, padding_series): weight_attr = paddle.ParamAttr( initializer=nn.initializer.Normal(std=0.01), learning_rate=10.0) bias_attr = paddle.ParamAttr( initializer=nn.initializer.Constant(value=0.0), learning_rate=10.0) self.conv2d_list.append( nn.Conv2D(out_channels, num_classes, kernel_size=3, stride=1, padding=padding, dilation=dilation, weight_attr=weight_attr, bias_attr=bias_attr)) self.classifier = nn.Conv2D(out_channels, num_classes, kernel_size=3, stride=1)
def __init__(self, in_channels, out_channels, stride, dilation=1, shortcut=True, if_first=False, data_format='NCHW'): super(BasicBlock, self).__init__() self.conv0 = ConvBNLayer(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride, dilation=dilation, act='relu', data_format=data_format) self.conv1 = ConvBNLayer(in_channels=out_channels, out_channels=out_channels, kernel_size=3, dilation=dilation, act=None, data_format=data_format) if not shortcut: self.short = ConvBNLayer( in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, is_vd_mode=False if if_first or stride == 1 else True, data_format=data_format) self.shortcut = shortcut self.dilation = dilation self.data_format = data_format self.add = layers.Add() self.relu = layers.Activation(act="relu")
def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, groups=1, is_vd_mode=False, act=None, data_format='NCHW'): super(ConvBNLayer, self).__init__() if dilation != 1 and kernel_size != 3: raise RuntimeError("When the dilation isn't 1," \ "the kernel_size should be 3.") self.is_vd_mode = is_vd_mode self._pool2d_avg = nn.AvgPool2D(kernel_size=2, stride=2, padding=0, ceil_mode=True, data_format=data_format) self._conv = nn.Conv2D( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=(kernel_size - 1) // 2 \ if dilation == 1 else dilation, dilation=dilation, groups=groups, bias_attr=False, data_format=data_format) self._batch_norm = layers.SyncBatchNorm(out_channels, data_format=data_format) self._act_op = layers.Activation(act=act)