Exemple #1
0
 def __init__(self,
              n_filter=128,
              in_channels=128,
              strides=(1, 1),
              exp_ratio=6,
              data_format="channels_first"):
     super().__init__()
     self.n_filter = n_filter
     self.in_channels = in_channels
     self.strides = strides
     self.exp_ratio = exp_ratio
     self.data_format = data_format
     self.hidden_dim = self.exp_ratio * self.in_channels
     self.identity = False
     if (self.strides == (1, 1) and self.in_channels == self.n_filter):
         self.identity = True
     if (self.exp_ratio == 1):
         self.main_block=LayerList([
             DepthwiseConv2d(in_channels=self.hidden_dim,filter_size=(3,3),strides=self.strides,\
                 b_init=None,data_format=self.data_format),
             BatchNorm2d(num_features=self.hidden_dim,is_train=True,act=tf.nn.relu6,data_format=self.data_format),
             Conv2d(n_filter=self.n_filter,in_channels=self.hidden_dim,filter_size=(1,1),strides=(1,1),b_init=None,data_format=self.data_format),
             BatchNorm2d(num_features=self.n_filter,is_train=True,act=None,data_format=self.data_format)
         ])
     else:
         self.main_block=LayerList([
             Conv2d(n_filter=self.hidden_dim,in_channels=self.in_channels,filter_size=(1,1),strides=(1,1),b_init=None,data_format=self.data_format),
             BatchNorm2d(num_features=self.hidden_dim,is_train=True,act=tf.nn.relu6,data_format=self.data_format),
             DepthwiseConv2d(in_channels=self.hidden_dim,filter_size=(3,3),strides=self.strides,\
                 b_init=None,data_format=self.data_format),
             BatchNorm2d(num_features=self.hidden_dim,is_train=True,act=tf.nn.relu6,data_format=self.data_format),
             Conv2d(n_filter=self.n_filter,in_channels=self.hidden_dim,filter_size=(1,1),strides=(1,1),b_init=None,data_format=self.data_format)
         ])
Exemple #2
0
def depthwise_conv_block(n, n_filter, strides=(1, 1), name="depth_block"):
    n = DepthwiseConv2d((3, 3), strides, b_init=None,
                        name=name + '.depthwise')(n)
    n = BatchNorm(decay=0.99, act=tf.nn.relu6, name=name + '.batchnorm1')(n)
    n = Conv2d(n_filter, (1, 1), (1, 1), b_init=None, name=name + '.conv')(n)
    n = BatchNorm(decay=0.99, act=tf.nn.relu6, name=name + '.batchnorm2')(n)
    return n
 def depthwise_conv_block(n,
                          n_filter,
                          filter_size=(3, 3),
                          strides=(1, 1),
                          name="depth_block"):
     with tf.variable_scope(name):
         n = DepthwiseConv2d(n,
                             filter_size,
                             strides,
                             W_init=W_init,
                             b_init=None,
                             name='depthwise')
         n = BatchNormLayer(n,
                            decay=decay,
                            act=tf.nn.relu6,
                            is_train=train_bn,
                            name='batchnorm1')
         n = Conv2d(n,
                    n_filter, (1, 1), (1, 1),
                    W_init=W_init,
                    b_init=None,
                    name='conv')
         n = BatchNormLayer(n,
                            decay=decay,
                            act=tf.nn.relu6,
                            is_train=train_bn,
                            name='batchnorm2')
     return n
Exemple #4
0
 def depthwise_conv_block(cls, n, n_filter, strides=(1, 1), is_train=False, name="depth_block"):
     with tf.variable_scope(name):
         n = DepthwiseConv2d(n, (3, 3), strides, b_init=None, name='depthwise')
         n = BatchNormLayer(n, act=tf.nn.relu6, is_train=is_train, name='batchnorm1')
         n = Conv2d(n, n_filter, (1, 1), (1, 1), b_init=None, name='conv')
         n = BatchNormLayer(n, act=tf.nn.relu6, is_train=is_train, name='batchnorm2')
     return n
Exemple #5
0
def nobn_dw_conv_block(n_filter,
                       in_channels,
                       filter_size=(3, 3),
                       strides=(1, 1),
                       W_init=initializer,
                       b_init=initializer,
                       data_format="channels_first"):
    layer_list = []
    layer_list.append(
        DepthwiseConv2d(filter_size=filter_size,
                        strides=strides,
                        in_channels=in_channels,
                        act=tf.nn.relu,
                        W_init=initializer,
                        b_init=None,
                        data_format=data_format))
    layer_list.append(
        Conv2d(n_filter=n_filter,
               filter_size=(1, 1),
               strides=(1, 1),
               in_channels=in_channels,
               act=tf.nn.relu,
               W_init=initializer,
               b_init=None,
               data_format=data_format))
    return layers.LayerList(layer_list)
Exemple #6
0
 def separable_conv_block(self,
                          n_filter=32,
                          in_channels=3,
                          filter_size=(3, 3),
                          strides=(1, 1)):
     layer_list = []
     layer_list.append(
         DepthwiseConv2d(in_channels=in_channels,
                         filter_size=filter_size,
                         strides=strides,
                         data_format=self.data_format))
     layer_list.append(
         BatchNorm2d(num_features=in_channels,
                     is_train=True,
                     act=tf.nn.relu,
                     data_format=self.data_format))
     layer_list.append(
         Conv2d(n_filter=n_filter,
                in_channels=in_channels,
                filter_size=(1, 1),
                strides=(1, 1),
                data_format=self.data_format))
     layer_list.append(
         BatchNorm2d(num_features=n_filter,
                     is_train=True,
                     act=tf.nn.relu,
                     data_format=self.data_format))
     return LayerList(layer_list)
Exemple #7
0
def separable_block(n_filter=32,in_channels=3,filter_size=(3,3),strides=(1,1),dilation_rate=(1,1),act=tf.nn.relu,data_format="channels_first"):
    layer_list=[]
    layer_list.append(DepthwiseConv2d(filter_size=filter_size,strides=strides,in_channels=in_channels,
        dilation_rate=dilation_rate,W_init=initial_w,b_init=None,data_format=data_format))
    layer_list.append(BatchNorm2d(decay=0.99,act=act,num_features=in_channels,data_format=data_format,is_train=True))
    layer_list.append(Conv2d(n_filter=n_filter,filter_size=(1,1),strides=(1,1),in_channels=in_channels,W_init=initial_w,b_init=None,data_format=data_format))
    layer_list.append(BatchNorm2d(decay=0.99,act=act,num_features=n_filter,data_format=data_format,is_train=True))
    return layers.LayerList(layer_list)
Exemple #8
0
def depthwise_conv_block(n,
                         n_filter,
                         alpha,
                         strides=(1, 1),
                         name="depth_block"):
    if strides != (1, 1):
        n = ZeroPad2d(padding=((1, 1), (1, 1)), name=name + '.pad')(n)
        padding_type = 'VALID'
    else:
        padding_type = 'SAME'
    n_filter = int(n_filter * alpha)
    n = DepthwiseConv2d((3, 3),
                        strides,
                        padding=padding_type,
                        b_init=None,
                        name=name + '.depthwise')(n)
    n = BatchNorm(decay=0.99, act=tf.nn.relu6, name=name + '.batchnorm1')(n)
    n = Conv2d(n_filter, (1, 1), (1, 1), b_init=None, name=name + '.conv')(n)
    n = BatchNorm(decay=0.99, act=tf.nn.relu6, name=name + '.batchnorm2')(n)
    return n
 def shufflenet_unit(self, inputs, n_filter, filter_size, strides, groups, stage, bottleneck_ratio=0.25, name='_shufflenetunit'):
     in_channels = inputs.outputs.get_shape()[3]
     #print("input", inputs.outputs.get_shape())
     bottleneck_channels = int(n_filter * bottleneck_ratio)
     if stage == 2:
         x = Conv2d(inputs, n_filter=bottleneck_channels, filter_size=filter_size, strides=(1, 1),
                    padding='SAME', name=name+'_Conv2d1')
         #print("conv", x.outputs.get_shape())
     else:
         x = self.group_conv(inputs, groups, bottleneck_channels, (1, 1), (1, 1), name=name+'_groupconv1')
     x = BatchNormLayer(x, act=tf.nn.leaky_relu, name=name+'_Batch1')
     #print("batch", x.outputs.get_shape())
     x = self.channel_shuffle(x, groups, name=name+'_channelshuffle')
     #print("shuffle", x.outputs.get_shape())
     #x = PadLayer(x, [[0, 0], [4, 4], [4, 4], [0, 0]], "CONSTANT", name=name+'_pad')
     #print("pad", x.outputs.get_shape())
     x = DepthwiseConv2d(x, shape=filter_size, strides=strides, depth_multiplier=1,
                         padding='SAME', name=name+'_DepthwiseConv2d')
     #print("deep", x.outputs.get_shape())
     #x = Conv2d(x, n_filter=in_channels, filter_size=filter_size, strides=(1, 1),padding='SAME', name=name+'_Conv2d2')
     #print("conv", x.outputs.get_shape())
     x = BatchNormLayer(x, name=name+'_Batch2')
     #print("deep_batch", x.outputs.get_shape())
     if strides == (2, 2):
         x = self.group_conv(x, groups, n_filter - in_channels, (1, 1), (1, 1), name=name+'_groupconv2')#n_filter - in_channels ??????????
         #print("gonv", x.outputs.get_shape())
         x = BatchNormLayer(x, name=name+'_Batch3')
         #print("batch", x.outputs.get_shape())
         avg = MeanPool2d(inputs, filter_size=(3, 3), strides=(2, 2), padding='SAME', name=name+'_AvePool')
         #print("avg", avg.outputs.get_shape())
         x = ConcatLayer([x, avg], concat_dim=-1, name=name+'_Concat')
         #print("x1out", x.outputs.get_shape())
     else:
         x = self.group_conv(x, groups, n_filter, (1, 1), (1, 1), name=name+'_groupconv3')
         #print("x", x.outputs.get_shape())
         x = BatchNormLayer(x, name=name+'_Batch4')
         if x.outputs.get_shape()[3] != inputs.outputs.get_shape()[3]:
             x = Conv2d(x, n_filter=in_channels, filter_size=filter_size, strides=(1, 1),
                        padding='SAME', name=name+'_Conv2d2')
         x = ElementwiseLayer([x, inputs], combine_fn=tf.add, name=name+'_Elementwise')
     return x