def __resBlock(self,
                x,
                channels=64,
                kernel_size=(3, 3),
                scale=1.0,
                layer=0):
     nn = ReluLayer(x, name='res%d/ru1' % (layer))
     nn = tl.Conv2d(nn,
                    channels - self.prunedlist[layer],
                    kernel_size,
                    act=tf.nn.relu,
                    name='res%d/c1' % (layer))
     self.op.append(nn.outputs)
     #self.out_t = nn.outputs
     #from c1_nn get a rand input [3,3,input_channels]
     #nn.print_layers()
     nn = tl.Conv2d(nn,
                    channels,
                    kernel_size,
                    act=None,
                    name='res%d/c2' % (layer))
     nn = ScaleLayer(nn, scale, name='res%d/scale' % (layer))
     n = tl.ElementwiseLayer([x, nn],
                             tf.add,
                             name='res%d/res_add' % (layer))
     return n
Пример #2
0
 def _res_block(x, n_features=64, kernel_size=(3, 3), scale=1.0, layer=0):
     """
     a resBlock is defined in the paper as (excuse the ugly ASCII graph)
             x
             |\
             | \
             |  relu
             |  conv2d
             |  relu
             |  conv2d
             | /
             |/
             + (addition here)
             |
             result
     """
     nn = ReluLayer(x, name='res%d/ru1' % layer)
     nn = tl.Conv2d(nn,
                    n_features,
                    kernel_size,
                    act=tf.nn.relu,
                    name='res%d/c1' % layer)
     nn = tl.Conv2d(nn,
                    n_features,
                    kernel_size,
                    act=None,
                    name='res%d/c2' % layer)
     nn = ScaleLayer(nn, scale, name='res%d/scale' % layer)
     n = tl.ElementwiseLayer([x, nn], tf.add, name='res%d/res_add' % layer)
     return n
Пример #3
0
    def __denseBlock(self,
                     x,
                     growth_rate=16,
                     num_layers=8,
                     kernel_size=[3, 3],
                     layer=0):
        dense_block_output = x
        for i in range(num_layers):
            '''
            In Paper <Densely Connected Convolutional Networks>
            each composite function contains three consecutive operations:
            batch normalization(BN), followed by a rectified linear unit (ReLU) and a 3*3 convolution (Conv).
            '''
            if self.is_bn:
                x = tl.BatchNormLayer(x, name='denseblock%d/BN%d' % (layer, i))
            x = ReluLayer(x, name='denseblock%d/relu%d' % (layer, i))
            x = tl.Conv2d(x,
                          growth_rate,
                          kernel_size,
                          name='denseblock%d/conv%d' % (layer, i))
            # concat the output of layer
            dense_block_output = tl.ConcatLayer([dense_block_output, x],
                                                concat_dim=3,
                                                name='denseblock%d/concat%d' %
                                                (layer, i))
            x = dense_block_output

        return dense_block_output
Пример #4
0
 def __resBlock(self, x, channels = 64, kernel_size = (3, 3), scale = 1.0,layer = 0):
     nn = ReluLayer(x, name='res%d/ru1'%(layer))
     nn = tl.Conv2d(nn, channels-self.prunedlist[layer], kernel_size, act=tf.nn.relu, name='res%d/c1'%(layer))
     nn = tl.Conv2d(nn, channels, kernel_size, act=None, name='res%d/c2'%(layer))
     nn = ScaleLayer(nn,scale, name='res%d/scale'%(layer))
     n = tl.ElementwiseLayer([x,nn],tf.add, name='res%d/res_add'%(layer))
     #这里调用dictionary(X, W2, Y)函数,注意X就是x,即本层的输入,
     #W2仅仅是此block里第一层卷积的权值,Y=n-Y'这里的Y'是上一层剪枝后的输出 
     #即在残差模块中不断调整,弥补shortcut中的无法影响的部分
     
     #把本层的beta记录起来(append到model的beta数组里),减掉的通道数也要记录起来,影响新模型上一层的filter数量
     
     
     #所以上面是算完旧的,这里还要再算一次新的
     return n
Пример #5
0
 def __resBlock(self,
                x,
                channels=64,
                kernel_size=(3, 3),
                scale=1.0,
                layer=0):
     nn = ReluLayer(x, name='res%d/ru1' % (layer))
     nn = tl.Conv2d(nn,
                    channels - self.prunedlist[layer],
                    kernel_size,
                    act=tf.nn.relu,
                    name='res%d/c1' % (layer))
     nn = tl.Conv2d(nn,
                    channels,
                    kernel_size,
                    act=None,
                    name='res%d/c2' % (layer))
     nn = ScaleLayer(nn, scale, name='res%d/scale' % (layer))
     n = tl.ElementwiseLayer([x, nn],
                             tf.add,
                             name='res%d/res_add' % (layer))
     return n
Пример #6
0
    def dense_block(self,
                    x,
                    growth_rate=16,
                    n_conv=8,
                    kernel_size=(3, 3),
                    layer=0):
        dense_block_output = x
        for i in range(n_conv):
            x = tl.BatchNormLayer(x, name='dense_%d/bn_%d' % (layer, i))
            x = ReluLayer(x, name='dense_%d/relu_%d' % (layer, i))
            x = tl.Conv2d(x,
                          growth_rate,
                          kernel_size,
                          name='dense_%d/conv_%d' % (layer, i))
            # concat the output of layer
            dense_block_output = tl.ConcatLayer([dense_block_output, x],
                                                concat_dim=3,
                                                name='dense_%d/concat_%d' %
                                                (layer, i))
            x = dense_block_output

        return x
Пример #7
0
    def __resBlock2(self, x, channels=64, kernel_size=[3, 3], scale=1, layer=0):
        """
           Creates a convolutional residual block
           as defined in the paper. More on
           this inside model.py

           a resBlock is defined in the paper as
           (excuse the ugly ASCII graph)
               x
               |\
               | \
               |  relu
               |  conv2d
               |  relu
               |  conv2d
               | /
               |/
               + (addition here)
               |
               result

           x: input to pass through the residual block
           channels: number of channels to compute
           stride: convolution stride

           :param x: input tensor
           :param channnels: channnels in the block
           :param kernel_size: filter kernel size
           :param scale: scale for residual skip
           :param layer: layer number
           """

        nn = ReluLayer(x, name='relu%d' % (layer))
        nn = tl.Conv2d(nn, channels, kernel_size, act=tf.nn.relu, name='res%d/c1' % (layer))
        nn = tl.Conv2d(nn, channels, kernel_size, act=None, name='res%d/c2' % (layer))
        nn = ScaleLayer(nn, scale, name='res%d/scale' % (layer))
        n = tl.ElementwiseLayer([x, nn], tf.add, name='res%d/res_add' % (layer))
        return n