def wrn_expansion(bottom, ks, nout, stride_first_conv=1, pad_first_conv=1):
    conv_1_1 = L.Convolution(bottom,
                             kernel_size=ks,
                             stride=stride_first_conv,
                             num_output=nout,
                             pad=pad_first_conv,
                             bias_term=False,
                             weight_filler=dict(type='msra'))
    batch_norm = L.BatchNorm(conv_1_1,
                             use_global_stats=False,
                             in_place=True,
                             param=[
                                 dict(lr_mult=0, decay_mult=0),
                                 dict(lr_mult=0, decay_mult=0),
                                 dict(lr_mult=0, decay_mult=0)
                             ])
    scale = L.Scale(batch_norm, bias_term=True, in_place=True)
    relu = L.ReLU(scale, in_place=True)
    conv_2_1 = L.Convolution(relu,
                             kernel_size=ks,
                             stride=1,
                             num_output=nout,
                             pad=1,
                             bias_term=False,
                             weight_filler=dict(type='msra'))
    conv_1_2 = L.Convolution(bottom,
                             kernel_size=1,
                             stride=stride_first_conv,
                             num_output=nout,
                             pad=0,
                             bias_term=False,
                             weight_filler=dict(type='msra'))
    addition = L.Eltwise(conv_2_1, conv_1_2, operation=P.Eltwise.SUM)
    return addition
Пример #2
0
def convolution_unit(input_layer, k, pad, planes, lr_mult=1, decay_mult=1):
    """ Generates a convolution unit (conv + batch_norm + ReLU)

    Args:
        input_layer: the layer on which to stack the conv unit
        k (int): the kernel size
        pad (int): the padding size
        planes (int): the number of filters
        lr_mult (int, optional): the learning rate multiplier (defaults to 1)
        decay_mult (int, optional): the weight regularization multiplier

    Returns:
        obj tuple: the Caffe Layers objects
    """

    conv = L.Convolution(input_layer,
                         kernel_size=k,
                         pad=pad,
                         num_output=planes,
                         weight_filler=dict(type='msra'),
                         param={'lr_mult': lr_mult, 'decay_mult': decay_mult}
                        )
    bn = L.BatchNorm(conv, in_place=True)
    scale = L.Scale(conv, in_place=True, bias_term=True,\
                    param=[{'lr_mult': lr_mult},{'lr_mult': 2*lr_mult}])
    relu = L.ReLU(conv, in_place=True)
    return conv, bn, scale, relu
def add_module(bottom, num_output, stride):
    conv = L.Convolution(bottom,
                         param=[{
                             'lr_mult': 1,
                             'decay_mult': 0
                         }],
                         num_output=num_output,
                         pad=1,
                         kernel_size=3,
                         stride=stride,
                         bias_term=False,
                         weight_filler=dict(type='gaussian',
                                            std=round(0.01, 2)))
    bn = L.BatchNorm(conv,
                     moving_average_fraction=round(0.05, 2),
                     param=[{
                         'lr_mult': 0
                     }, {
                         'lr_mult': 0
                     }, {
                         'lr_mult': 0
                     }],
                     in_place=True)
    scale = L.Scale(conv, bias_term=True, in_place=True)
    return conv, bn, scale
Пример #4
0
def Linear(net,
           from_layer,
           num_output=1,
           kernel_size=1,
           stride=1,
           pad=0,
           num_group=1,
           name=None,
           suffix=""):
    type = "Convolution"
    if num_group != 1:
        type = "DepthwiseConvolution"
    conv = L.Convolution(from_layer,
                         num_output=num_output,
                         kernel_size=kernel_size,
                         group=num_group,
                         stride=stride,
                         pad=pad,
                         bias_term=False,
                         name="%s%s_conv2d" % (name, suffix),
                         type=type,
                         engine=P.Convolution.Engine.CAFFE,
                         **kwargs)
    bn = L.BatchNorm(conv, name="%s%s_batchnormal" % (name, suffix))
    return bn
Пример #5
0
def conv_factory_relu_h_w(bottom,
                          ks_h,
                          ks_w,
                          nout,
                          stride=1,
                          pad_h=0,
                          pad_w=0):
    conv = L.Convolution(bottom,
                         kernel_h=ks_h,
                         kernel_w=ks_w,
                         stride=stride,
                         num_output=nout,
                         pad_h=pad_h,
                         pad_w=pad_w,
                         bias_term=False,
                         weight_filler=dict(type='msra'))
    batch_norm = L.BatchNorm(conv,
                             in_place=True,
                             param=[
                                 dict(lr_mult=0, decay_mult=0),
                                 dict(lr_mult=0, decay_mult=0),
                                 dict(lr_mult=0, decay_mult=0)
                             ])
    scale = L.Scale(batch_norm, bias_term=True, in_place=True)
    relu = L.ReLU(scale, in_place=True)
    return relu
Пример #6
0
def conv_bn(bottom, nout, ks=3, stride=1, pad=0, learn=True):
    if learn:
        param = [dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)]
    else:
        param = [dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0)]

    if isinstance(bottom, str):
        conv = L.Convolution(bottom=bottom,
                             kernel_size=ks,
                             stride=stride,
                             num_output=nout,
                             pad=pad,
                             param=param,
                             weight_filler=dict(type='msra'),
                             bias_filler=dict(type='constant'))
    else:
        conv = L.Convolution(bottom,
                             kernel_size=ks,
                             stride=stride,
                             num_output=nout,
                             pad=pad,
                             param=param,
                             weight_filler=dict(type='msra'),
                             bias_filler=dict(type='constant'))

    bn = L.BatchNorm(conv)
    # lrn = L.LRN(bn)
    return conv, bn
Пример #7
0
def SingleConv(data,
               num_output,
               kernel_size=3,
               stride=1,
               padding=1,
               is_train=True):
    '''
    3D卷积+BatchNorm+Relu
    @data:待卷积数据
    @num_output:输出通道
    @kernel_size:卷积核大小
    @stride:步长
    @padding:填充
    Return:Relu激活后的结果
    '''
    conv = L.Convolution(data,
                         kernel_size=kernel_size,
                         stride=stride,
                         pad=padding,
                         num_output=num_output,
                         weight_filler=dict(type='xavier'))
    if is_train:
        kwargs = {'engine': 3}
    else:
        kwargs = {'engine': 3, 'use_global_stats': True}
    norm = L.BatchNorm(conv, **kwargs)
    scale = L.Scale(norm, bias_term=True)
    actv = L.ReLU(scale, engine=3)
    return actv
Пример #8
0
def conv_bn_relu(n, name, top_prev, ks, nout, stride=1, pad=0, loop=1):

    for idx in range(loop):
        n[str(name) + "_" + str(idx)] = L.Convolution(
            top_prev,  #name = name,
            convolution_param=dict(kernel_size=ks,
                                   stride=stride,
                                   num_output=nout,
                                   pad=pad,
                                   engine=2,
                                   weight_filler=dict(type='xavier'),
                                   bias_term=False),
            param=[dict(lr_mult=1)],
        )
        top_prev = n[str(name) + "_" + str(idx)]
        n[str(name) + '_bn_' + str(idx)] = L.BatchNorm(
            top_prev,
            batch_norm_param=dict(eps=1e-3, moving_average_fraction=0.99))
        top_prev = n[str(name) + '_bn_' + str(idx)]
        n[str(name) + '_sc_' + str(idx)] = L.Scale(
            top_prev, scale_param=dict(bias_term=True))
        top_prev = n[str(name) + '_sc_' + str(idx)]
        n[str(name) + '_relu_' + str(idx)] = L.ReLU(top_prev, in_place=True)
        top_prev = n[str(name) + '_relu_' + str(idx)]

    return top_prev
def conv_bn(name,
            input,
            output,
            kernel_size=3,
            stride=1,
            pad=1,
            activation=True,
            dilation=1):
    conv = L.Convolution(input,
                         kernel_size=kernel_size,
                         stride=stride,
                         num_output=output,
                         bias_term=False,
                         pad=pad,
                         weight_filler=dict(type='xavier'),
                         dilation=dilation)

    # in-place compute means your input and output has the same memory area,which will be more memory effienct
    bn = L.BatchNorm(conv, use_global_stats=True, in_place=True)

    # scale = L.Scale(bn,filler=dict(value=1),bias_filler=dict(value=0),bias_term=True, in_place=True)
    out = L.Scale(bn, bias_term=True, in_place=True)

    if activation is True:
        out = L.ReLU(out, in_place=True)
    return out
Пример #10
0
def _block_4in1(major, minor, net, bottom, nout, pad, ks, stride):
    branch_flag = '{}_branch{}'.format(major, minor)
    conv_layer = 'res{}'.format(branch_flag)
    bn_layer = 'bn{}'.format(branch_flag)
    scale_layer = 'scale{}'.format(branch_flag)
    relu_layer = 'res{}_relu'.format(branch_flag)

    if ks == 3:  # bottleneck layer, grouped convolutions
        net[conv_layer] = L.Convolution(bottom,
                                        num_output=nout,
                                        pad=pad,
                                        kernel_size=ks,
                                        stride=stride,
                                        bias_term=False,
                                        group=32)  # Cardinality
    else:
        net[conv_layer] = L.Convolution(bottom,
                                        num_output=nout,
                                        pad=pad,
                                        kernel_size=ks,
                                        stride=stride,
                                        bias_term=False)
    net[bn_layer] = L.BatchNorm(net[conv_layer], in_place=True)
    net[scale_layer] = L.Scale(net[bn_layer], bias_term=True, in_place=True)
    net[relu_layer] = L.ReLU(net[scale_layer], in_place=True)

    return net[relu_layer]
Пример #11
0
def factorization_conv_mxn(bottom,
                           num_output=64,
                           kernel_h=1,
                           kernel_w=7,
                           stride=1,
                           pad_h=3,
                           pad_w=0):
    conv_mxn = L.Convolution(
        bottom,
        num_output=num_output,
        kernel_h=kernel_h,
        kernel_w=kernel_w,
        stride=stride,
        pad_h=pad_h,
        pad_w=pad_w,
        param=[dict(lr_mult=1, decay_mult=1),
               dict(lr_mult=2, decay_mult=0)],
        weight_filler=dict(type='xavier', std=0.01),
        bias_filler=dict(type='constant', value=0.2))
    conv_mxn_bn = L.BatchNorm(conv_mxn, use_global_stats=False, in_place=True)
    conv_mxn_scale = L.Scale(conv_mxn,
                             scale_param=dict(bias_term=True),
                             in_place=True)
    conv_mxn_relu = L.ReLU(conv_mxn, in_place=True)

    return conv_mxn, conv_mxn_bn, conv_mxn_scale, conv_mxn_relu
Пример #12
0
def conv_relu(bottom, nout, ks=3, stride=2, pad=1, bn=True, test=False):
    conv = L.Convolution(
        bottom,
        param=[dict(lr_mult=1, decay_mult=1)],
        convolution_param=dict(
            kernel_size=ks,
            stride=stride,
            num_output=nout,
            pad=pad,
            bias_term=False,
            weight_filler=dict(type="msra"),
            engine=1  # DEFAULT = 0; CAFFE = 1; CUDNN = 2;
        ))
    if bn:
        conv = L.BatchNorm(
            conv,
            # param=[dict(lr_mult=0, decay_mult=0),
            #        dict(lr_mult=0, decay_mult=0),
            #        dict(lr_mult=0, decay_mult=0)],
            batch_norm_param=dict(use_global_stats=test, eps=0.00001),
            # inplace=True
        )
        conv = L.Scale(
            conv,
            # param=[dict(lr_mult=0, decay_mult=0),
            #        dict(lr_mult=0, decay_mult=0)
            #        ],
            scale_param=dict(filler=dict(value=1),
                             bias_term=True,
                             bias_filler=dict(value=0)),
            # inplace=True
        )
    relu_conv = L.ReLU(conv, in_place=True)
    return conv, relu_conv
Пример #13
0
def conv_BN_scale_relu(bottom,
                       numout,
                       kernelsize,
                       stride,
                       pad,
                       bias=False,
                       groups=1):
    conv = L.Convolution(bottom,
                         kernel_size=kernelsize,
                         stride=stride,
                         num_output=numout,
                         pad=pad,
                         bias_term=bias,
                         group=groups,
                         weight_filler=dict(type='msra'),
                         bias_filler=dict(type='constant'))

    BN = L.BatchNorm(conv,
                     in_place=True,
                     param=[
                         dict(lr_mult=0, decay_mult=0),
                         dict(lr_mult=0, decay_mult=0),
                         dict(lr_mult=0, decay_mult=0)
                     ])

    scale = L.Scale(BN,
                    in_place=True,
                    bias_term=True,
                    filler=dict(value=1),
                    bias_filler=dict(value=0))
    relu = L.ReLU(scale, in_place=True)
    return scale, relu
Пример #14
0
 def define_model(self):
     n = caffe.NetSpec()
     pylayer = 'ClsDataLayer'
     pydata_params = dict(phase='train', nodule_cubic=self.nodule_cubic,candidate_cubic=self.candidate_cubic, ratio=self.ratio,
     #pydata_params = dict(phase='train', img_root='/workspace/pai/data/',
                          batch_size=4,crop_size=[40,40,40],random=True)
     n.data, n.label = L.Python(module='data.ClsDataLayer', layer=pylayer,
         ntop=2, param_str=str(pydata_params))
     n.conv1=BasicConv(n.data,32)#(40,40,40)
     n.downsample1=Inception_v1(n.conv1,32,32)#(20,20,20)
     n.conv2=BasicConv(n.downsample1,64)
     n.downsample2=Inception_v1(n.conv2,64,64)#(10,10,10)
     n.conv3=BasicConv(n.downsample2,64)
     n.downsample3=Inception_v1(n.conv3,64,64)#(5,5,5)
     n.conv4=BasicConv(n.downsample3,64)
     
     n.conv5=L.Convolution(n.conv4, kernel_size=3,stride=1,pad=0,num_output=16,weight_filler=dict(type='xavier'))
     n.bn5=L.BatchNorm(n.conv5,**kwargs)
     n.re5=L.ReLU(n.bn5,**kwargs)
     
     n.fc6=L.InnerProduct(n.re5, num_output=150,weight_filler=dict(type='xavier'))
     n.re6=L.ReLU(n.fc6,**kwargs)
     
     n.fc7=L.InnerProduct(n.re6, num_output=2,weight_filler=dict(type='xavier'))
     
     
     n.loss=L.SoftmaxWithLoss(n.fc7,n.label)
     #n.loss=L.Python(module='DiceLoss', layer="DiceLossLayer",
     #    ntop=1, bottom=[n.probs,n.label])
     with open(self.model_def, 'w') as f:
         f.write(str(n.to_proto()))
Пример #15
0
def identity_residual(bottom, kernel_size=3, num_out=64, stride=1, pad=0):

    # pre_bn = L.BatchNorm(bottom, in_place=True)
    pre_bn = L.BatchNorm(bottom, in_place=False)
    pre_scale = L.Scale(pre_bn,
                        scale_param=dict(bias_term=True),
                        in_place=True)
    pre_relu = L.ReLU(pre_scale, in_place=True)

    conv1, bn1, scale1, relu1 = conv_bn_scale_relu(pre_relu,
                                                   kernel_size=1,
                                                   num_out=num_out,
                                                   stride=1,
                                                   pad=0)
    conv2, bn2, scale2, relu2 = conv_bn_scale_relu(relu1,
                                                   kernel_size=3,
                                                   num_out=num_out,
                                                   stride=stride,
                                                   pad=1)
    conv3 = L.Convolution(relu2,
                          kernel_size=1,
                          num_output=num_out * 4,
                          stride=1,
                          pad=0,
                          param=[dict(lr_mult=1, decay_mult=1)],
                          bias_term=False,
                          weight_filler=weight_filler)

    eltsum = eltsum_block(bottom, conv3)

    return pre_bn, pre_scale, pre_relu, \
           conv1, bn1, scale1, relu1, \
           conv2, bn2, scale2, relu2, \
           conv3, eltsum
Пример #16
0
def conv_factory(bottom, ks, nout, stride=1, pad=0):
    conv = L.Convolution(bottom, kernel_size=ks, stride=stride,
                                num_output=nout, pad=pad, bias_term=False, weight_filler=dict(type='msra'))
    batch_norm = L.BatchNorm(conv, in_place=True, batch_norm_param=dict(moving_average_fraction=0.9))
    scale = L.Scale(batch_norm, bias_term=True, in_place=True)
    relu = L.ReLU(scale, in_place=True)
    return relu
def bn_relu_conv(bottom, ks, nout, stride, pad, dropout):
    conv1 = L.Convolution(bottom,
                          kernel_size=1,
                          stride=1,
                          num_output=nout,
                          pad=0,
                          bias_term=False,
                          weight_filler=dict(type='msra'),
                          bias_filler=dict(type='constant'))
    conv = L.Convolution(conv1,
                         kernel_size=ks,
                         stride=stride,
                         num_output=nout,
                         pad=pad,
                         bias_term=False,
                         weight_filler=dict(type='msra'),
                         bias_filler=dict(type='constant'))
    batch_norm = L.BatchNorm(conv,
                             in_place=True,
                             param=[
                                 dict(lr_mult=0, decay_mult=0),
                                 dict(lr_mult=0, decay_mult=0),
                                 dict(lr_mult=0, decay_mult=0)
                             ])
    scale = L.Scale(batch_norm,
                    bias_term=True,
                    in_place=True,
                    filler=dict(value=1),
                    bias_filler=dict(value=0))
    relu = L.ReLU(scale, in_place=True)

    #    if dropout>0:
    #        relu = L.Dropout(relu, dropout_ratio=dropout)
    return relu
def conv_factory_relu(bottom, ks, nout, stride=1, pad=0):
    conv = L.Convolution(bottom, kernel_size=ks, stride=stride,
                                num_output=nout, pad=pad, bias_term=True, weight_filler=dict(type='msra'), bias_filler=dict(type='constant'))
    batch_norm = L.BatchNorm(conv, in_place=True, param=[dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0)])
    scale = L.Scale(batch_norm, bias_term=True, in_place=True)
    relu = L.ReLU(scale, in_place=True)
    return relu
def conv_BN_scale_relu(split, bottom, nout, ks, stride,
                       pad):  #对输入作 conv—BN—scale—relu
    if bottom == "data":
        conv = L.Convolution(bottom="data",
                             kernel_size=ks,
                             stride=stride,
                             num_output=nout,
                             pad=pad,
                             bias_term=True,
                             weight_filler=dict(type='msra'),
                             bias_filler=dict(type='constant'))
    else:
        conv = L.Convolution(bottom,
                             kernel_size=ks,
                             stride=stride,
                             num_output=nout,
                             pad=pad,
                             bias_term=True,
                             weight_filler=dict(type='msra'),
                             bias_filler=dict(type='constant'))
    if split == 'train':
        use_global_stats = False  #训练的时候我们对 BN 的参数取滑动平均
    else:
        use_global_stats = True  #测试的时候我们直接使用输入的参数
    BN = L.BatchNorm(
        conv,
        batch_norm_param=dict(use_global_stats=use_global_stats),
        in_place=True,  #BN 的学习率惩罚设置为 0,由 scale 学习
        #                     param = [dict(lr_mult = 0, decay_mult = 0),
        #                              dict(lr_mult = 0, decay_mult = 0),
        #                              dict(lr_mult = 0, decay_mult = 0)]
    )
    scale = L.Scale(BN, scale_param=dict(bias_term=True), in_place=True)
    relu = L.ReLU(scale, in_place=True)
    return scale, relu
Пример #20
0
def preactbottleneck(bottom, ks, nout, stride, pad, groups=1):
    batch_norm = L.BatchNorm(bottom, in_place=False, batch_norm_param=dict(use_global_stats=True))
    scale = L.Scale(batch_norm, bias_term=True, in_place=True)
    relu = L.ReLU(scale, in_place=True)
    conv = L.Convolution(relu, kernel_size=ks, stride=stride, group=groups,
                    num_output=nout, pad=pad, bias_term=False, weight_filler=dict(type='msra'), bias_filler=dict(type='constant'))
    return conv
Пример #21
0
def after_conv(conv):
    #in-place compute means your input and output has the same memory area,which will be more memory effienct
    bn = L.BatchNorm(conv, use_global_stats=False,in_place=False)
    #scale = L.Scale(bn,filler=dict(value=1),bias_filler=dict(value=0),bias_term=True, in_place=True)
    scale = L.Scale(bn,bias_term=True, in_place=True)
    relu=L.ReLU(scale, in_place=True)
    return relu
Пример #22
0
def convLayer(prev, lrn=False, param_name=None, bn=False, **kwargs):
    if param_name:
        name1 = param_name + '_kernels'
        name2 = param_name + '_bias'
        conv = L.Convolution(
            prev,
            param=[dict(lr_mult=1, name=name1),
                   dict(lr_mult=2, name=name2)],
            weight_filler=dict(type='msra'),
            **kwargs)
    else:
        conv = L.Convolution(prev,
                             param=[dict(lr_mult=1),
                                    dict(lr_mult=2)],
                             weight_filler=dict(type='msra'),
                             **kwargs)
    if bn:
        bn = L.BatchNorm(conv)
        relu = L.ReLU(bn, in_place=True)
    else:
        relu = L.ReLU(conv, in_place=True)
    if lrn:
        # optional Local Response Normalization
        relu = L.LRN(relu,
                     lrn_param={
                         'local_size': min(kwargs['num_output'] / 3, 5),
                         'alpha': 0.0001,
                         'beta': 0.75
                     })
    return relu
def bn_relu_conv1(bottom, ks, nout, stride, pad, dropout, idx):
    batch_norm = L.BatchNorm(bottom,
                             in_place=False,
                             param=[
                                 dict(lr_mult=0, decay_mult=0),
                                 dict(lr_mult=0, decay_mult=0),
                                 dict(lr_mult=0, decay_mult=0)
                             ])
    scale = L.Scale(batch_norm,
                    bias_term=True,
                    in_place=True,
                    filler=dict(value=1),
                    bias_filler=dict(value=0))
    relu = L.ReLU(scale, in_place=True)
    net['conv-block' + str(idx)] = L.Convolution(
        relu,
        kernel_size=ks,
        stride=stride,
        num_output=nout,
        pad=pad,
        bias_term=False,
        weight_filler=dict(type='xavier'),
        bias_filler=dict(type='constant'))
    if dropout > 0:
        conv = L.Dropout(net['conv-block' + str(idx)], dropout_ratio=dropout)
    return conv
Пример #24
0
    def add_conv_bn_relu(self, bottom, num_output):
        model = self.conv(bottom, num_output, wf=msra_constant)
        model = L.BatchNorm(model, use_global_stats=False, in_place=True)
        model = L.Scale(model, bias_term=True, in_place=True)
        model = L.ReLU(model, in_place=True)

        return model
Пример #25
0
def bn_relu_conv(net, mode, flag, bottom, ks, nout, stride, pad, dropout):
  suffix     = '{}x{}'.format(ks, ks)
  flag_bn    = '{}_{}_bn'.format(flag, suffix)
  flag_scale = '{}_{}_scale'.format(flag, suffix)
  flag_relu  = '{}_{}_relu'.format(flag, suffix)
  flag_conv  = '{}_{}_conv'.format(flag, suffix)
  flag_drop  = '{}_{}_dropout'.format(flag, suffix)
  
  use_global_stats = False
  if mode == 1: # TEST phase
    use_global_stats = True
  
  net[flag_bn] = L.BatchNorm(bottom, in_place=False,
                             batch_norm_param = dict(use_global_stats=use_global_stats),
                             param=[dict(lr_mult=0, decay_mult=0), 
                                    dict(lr_mult=0, decay_mult=0), 
                                    dict(lr_mult=0, decay_mult=0)])
  net[flag_scale] = L.Scale(net[flag_bn], bias_term=True, in_place=True, 
                            filler=dict(value=1), bias_filler=dict(value=0))
  net[flag_relu] = L.PReLU(net[flag_scale], in_place=True)
  
  net[flag_conv] = L.Convolution(net[flag_relu], num_output=nout, 
                                kernel_size=ks, stride=stride, pad=pad, 
                                weight_filler=dict(type='msra'), 
                                bias_term=False)
  if dropout > 0:
    net[flag_drop] = L.Dropout(net[flag_conv], dropout_ratio=dropout)
    return net[flag_drop]
  
  return net[flag_conv]
Пример #26
0
def conv_bn_scale_relu(bottom, kernel_size=3, num_out=64, stride=1, pad=0, params=conv_params):
    weight_filler = params[0]
    bias_filler = params[1]
    conv = L.Convolution(bottom, kernel_size=kernel_size, stride=stride, num_output=num_out,
                         pad=pad, param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
                         weight_filler=weight_filler, bias_filler=bias_filler)
    bn_train = L.BatchNorm(conv, param=[dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0),
                                        dict(lr_mult=0, decay_mult=0)],
                           use_global_stats=False, in_place=True, include=dict(phase=0))
    bn_test = L.BatchNorm(conv, param=[dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0),
                                       dict(lr_mult=0, decay_mult=0)],
                          use_global_stats=True, in_place=True, include=dict(phase=1))
    scale = L.Scale(conv, scale_param=dict(bias_term=True), in_place=True)
    relu = L.ReLU(conv, in_place=True)

    return conv, bn_train, bn_test, scale, relu
Пример #27
0
def batch_norm_net():
    n = caffe.NetSpec()
    n.data = L.DummyData(dummy_data_param=dict(num=64,
                                               channels=1,
                                               height=28,
                                               width=28,
                                               data_filler=dict(
                                                   type="gaussian")))
    n.label = L.DummyData(dummy_data_param=dict(num=64,
                                                channels=1,
                                                height=1,
                                                width=1,
                                                data_filler=dict(
                                                    type="gaussian")))
    n.conv1 = L.Convolution(n.data,
                            kernel_size=7,
                            stride=2,
                            num_output=32,
                            pad=3)
    n.pool1 = L.Pooling(n.conv1, pool=P.Pooling.MAX, kernel_size=2, stride=2)
    n.relu1 = L.ReLU(n.pool1, in_place=True)

    n.norm1 = L.BatchNorm(n.relu1, moving_average_fraction=0.9, in_place=True)
    n.scale1 = L.Scale(n.norm1, bias_term=True, in_place=True)

    n.ip2 = L.InnerProduct(n.scale1,
                           num_output=10,
                           weight_filler=dict(type='xavier'))
    n.loss = L.SoftmaxWithLoss(n.ip2, n.label)
    return n.to_proto()
Пример #28
0
def conv_bn_scale(bottom,
                  kernel_size=3,
                  num_out=64,
                  stride=1,
                  pad=0,
                  params=conv_params):
    weight_filler = params[0]
    bias_filler = params[1]
    conv = L.Convolution(bottom,
                         kernel_size=kernel_size,
                         stride=stride,
                         num_output=num_out,
                         bias_term=False,
                         pad=pad,
                         param=[dict(lr_mult=1, decay_mult=1)],
                         weight_filler=weight_filler)
    bn = L.BatchNorm(conv, in_place=True)
    scale = L.Scale(
        conv,
        scale_param=dict(bias_term=True),
        in_place=True,
        param=[dict(lr_mult=1, decay_mult=0),
               dict(lr_mult=1, decay_mult=0)])

    return conv, bn, scale
Пример #29
0
 def add_batchnorm(self, bottom):
     bn = cl.BatchNorm(bottom, in_place=True)
     bn = cl.Scale(bn,
                   bias_term=True,
                   bias_filler=dict(value=0),
                   in_place=True)
     return bn
Пример #30
0
def BR(caffe_net, layer_idx, bottom_blob):

    names = ['bn{}'.format(layer_idx), 'prelu{}'.format(layer_idx)]

    caffe_net[names[0]] = L.BatchNorm(bottom_blob)
    caffe_net[names[1]] = L.PReLU(caffe_net[names[0]])
    return caffe_net[names[1]], layer_idx + 1